Compare commits

..

1 Commits

Author SHA1 Message Date
8053909bd0 Add --git-since option to rojo serve
- Add new GitFilter struct for tracking files changed since a Git reference
- Only sync changed (added/deleted/modified) files to Roblox Studio
- Files remain acknowledged once synced, even if content is reverted
- Add enhanced logging for debugging sync issues
- Force acknowledge project structure to prevent 'Cannot sync a model as a place' errors
2026-01-19 22:02:59 +01:00
73 changed files with 889 additions and 1198 deletions

3
.gitignore vendored
View File

@@ -23,6 +23,3 @@
# Macos file system junk
._*
.DS_STORE
# JetBrains IDEs
/.idea/

View File

@@ -31,14 +31,8 @@ Making a new release? Simply add the new header with the version and date undern
## Unreleased
* Fixed a bug caused by having reference properties (such as `ObjectValue.Value`) that point to an Instance not included in syncback. ([#1179])
* Implemented support for the "name" property in meta/model JSON files. ([#1187])
* Fixed instance replacement fallback failing when too many instances needed to be replaced. ([#1192])
* Fixed a bug where MacOS paths weren't being handled correctly. ([#1201])
[#1179]: https://github.com/rojo-rbx/rojo/pull/1179
[#1187]: https://github.com/rojo-rbx/rojo/pull/1187
[#1192]: https://github.com/rojo-rbx/rojo/pull/1192
[#1201]: https://github.com/rojo-rbx/rojo/pull/1201
## [7.7.0-rc.1] (November 27th, 2025)

1
Cargo.lock generated
View File

@@ -1319,7 +1319,6 @@ dependencies = [
"fs-err",
"notify",
"serde",
"tempfile",
]
[[package]]

View File

@@ -1,7 +1,6 @@
# memofs Changelog
## Unreleased Changes
* Added `Vfs::canonicalize`. [#1201]
## 0.3.1 (2025-11-27)
* Added `Vfs::exists`. [#1169]

View File

@@ -19,6 +19,3 @@ crossbeam-channel = "0.5.12"
fs-err = "2.11.0"
notify = "4.0.17"
serde = { version = "1.0.197", features = ["derive"] }
[dev-dependencies]
tempfile = "3.10.1"

View File

@@ -232,33 +232,6 @@ impl VfsBackend for InMemoryFs {
}
}
// TODO: We rely on Rojo to prepend cwd to any relative path before storing paths
// in MemoFS. The current implementation will error if no prepended absolute path
// is found. It really only normalizes paths within the provided path's context.
// Example: "/Users/username/project/../other/file.txt" ->
// "/Users/username/other/file.txt"
// Erroneous example: "/Users/../../other/file.txt" -> "/other/file.txt"
// This is not very robust. We should implement proper path normalization here or otherwise
// warn if we are missing context and can not fully canonicalize the path correctly.
fn canonicalize(&mut self, path: &Path) -> io::Result<PathBuf> {
let mut normalized = PathBuf::new();
for component in path.components() {
match component {
std::path::Component::ParentDir => {
normalized.pop();
}
std::path::Component::CurDir => {}
_ => normalized.push(component),
}
}
let inner = self.inner.lock().unwrap();
match inner.entries.get(&normalized) {
Some(_) => Ok(normalized),
None => not_found(&normalized),
}
}
fn event_receiver(&self) -> crossbeam_channel::Receiver<VfsEvent> {
let inner = self.inner.lock().unwrap();

View File

@@ -77,7 +77,6 @@ pub trait VfsBackend: sealed::Sealed + Send + 'static {
fn metadata(&mut self, path: &Path) -> io::Result<Metadata>;
fn remove_file(&mut self, path: &Path) -> io::Result<()>;
fn remove_dir_all(&mut self, path: &Path) -> io::Result<()>;
fn canonicalize(&mut self, path: &Path) -> io::Result<PathBuf>;
fn event_receiver(&self) -> crossbeam_channel::Receiver<VfsEvent>;
fn watch(&mut self, path: &Path) -> io::Result<()>;
@@ -226,11 +225,6 @@ impl VfsInner {
self.backend.metadata(path)
}
fn canonicalize<P: AsRef<Path>>(&mut self, path: P) -> io::Result<PathBuf> {
let path = path.as_ref();
self.backend.canonicalize(path)
}
fn event_receiver(&self) -> crossbeam_channel::Receiver<VfsEvent> {
self.backend.event_receiver()
}
@@ -419,19 +413,6 @@ impl Vfs {
self.inner.lock().unwrap().metadata(path)
}
/// Normalize a path via the underlying backend.
///
/// Roughly equivalent to [`std::fs::canonicalize`][std::fs::canonicalize]. Relative paths are
/// resolved against the backend's current working directory (if applicable) and errors are
/// surfaced directly from the backend.
///
/// [std::fs::canonicalize]: https://doc.rust-lang.org/stable/std/fs/fn.canonicalize.html
#[inline]
pub fn canonicalize<P: AsRef<Path>>(&self, path: P) -> io::Result<PathBuf> {
let path = path.as_ref();
self.inner.lock().unwrap().canonicalize(path)
}
/// Retrieve a handle to the event receiver for this `Vfs`.
#[inline]
pub fn event_receiver(&self) -> crossbeam_channel::Receiver<VfsEvent> {
@@ -559,13 +540,6 @@ impl VfsLock<'_> {
self.inner.metadata(path)
}
/// Normalize a path via the underlying backend.
#[inline]
pub fn normalize<P: AsRef<Path>>(&mut self, path: P) -> io::Result<PathBuf> {
let path = path.as_ref();
self.inner.canonicalize(path)
}
/// Retrieve a handle to the event receiver for this `Vfs`.
#[inline]
pub fn event_receiver(&self) -> crossbeam_channel::Receiver<VfsEvent> {
@@ -581,9 +555,7 @@ impl VfsLock<'_> {
#[cfg(test)]
mod test {
use crate::{InMemoryFs, StdBackend, Vfs, VfsSnapshot};
use std::io;
use std::path::PathBuf;
use crate::{InMemoryFs, Vfs, VfsSnapshot};
/// https://github.com/rojo-rbx/rojo/issues/899
#[test]
@@ -599,62 +571,4 @@ mod test {
"bar\nfoo\n\n"
);
}
/// https://github.com/rojo-rbx/rojo/issues/1200
#[test]
fn canonicalize_in_memory_success() {
let mut imfs = InMemoryFs::new();
let contents = "Lorem ipsum dolor sit amet.".to_string();
imfs.load_snapshot("/test/file.txt", VfsSnapshot::file(contents.to_string()))
.unwrap();
let vfs = Vfs::new(imfs);
assert_eq!(
vfs.canonicalize("/test/nested/../file.txt").unwrap(),
PathBuf::from("/test/file.txt")
);
assert_eq!(
vfs.read_to_string(vfs.canonicalize("/test/nested/../file.txt").unwrap())
.unwrap()
.to_string(),
contents.to_string()
);
}
#[test]
fn canonicalize_in_memory_missing_errors() {
let imfs = InMemoryFs::new();
let vfs = Vfs::new(imfs);
let err = vfs.canonicalize("test").unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::NotFound);
}
#[test]
fn canonicalize_std_backend_success() {
let contents = "Lorem ipsum dolor sit amet.".to_string();
let dir = tempfile::tempdir().unwrap();
let file_path = dir.path().join("file.txt");
fs_err::write(&file_path, contents.to_string()).unwrap();
let vfs = Vfs::new(StdBackend::new());
let canonicalized = vfs.canonicalize(&file_path).unwrap();
assert_eq!(canonicalized, file_path.canonicalize().unwrap());
assert_eq!(
vfs.read_to_string(&canonicalized).unwrap().to_string(),
contents.to_string()
);
}
#[test]
fn canonicalize_std_backend_missing_errors() {
let dir = tempfile::tempdir().unwrap();
let file_path = dir.path().join("test");
let vfs = Vfs::new(StdBackend::new());
let err = vfs.canonicalize(&file_path).unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::NotFound);
}
}

View File

@@ -1,5 +1,5 @@
use std::io;
use std::path::{Path, PathBuf};
use std::path::Path;
use crate::{Metadata, ReadDir, VfsBackend, VfsEvent};
@@ -50,10 +50,6 @@ impl VfsBackend for NoopBackend {
Err(io::Error::other("NoopBackend doesn't do anything"))
}
fn canonicalize(&mut self, _path: &Path) -> io::Result<PathBuf> {
Err(io::Error::other("NoopBackend doesn't do anything"))
}
fn event_receiver(&self) -> crossbeam_channel::Receiver<VfsEvent> {
crossbeam_channel::never()
}

View File

@@ -106,10 +106,6 @@ impl VfsBackend for StdBackend {
})
}
fn canonicalize(&mut self, path: &Path) -> io::Result<PathBuf> {
fs_err::canonicalize(path)
}
fn event_receiver(&self) -> crossbeam_channel::Receiver<VfsEvent> {
self.watcher_receiver.clone()
}

View File

@@ -290,39 +290,31 @@ function ApiContext:open(id)
end
function ApiContext:serialize(ids: { string })
local url = ("%s/api/serialize"):format(self.__baseUrl)
local request_body = Http.jsonEncode({ sessionId = self.__sessionId, ids = ids })
local url = ("%s/api/serialize/%s"):format(self.__baseUrl, table.concat(ids, ","))
return Http.post(url, request_body)
:andThen(rejectFailedRequests)
:andThen(Http.Response.json)
:andThen(function(response_body)
if response_body.sessionId ~= self.__sessionId then
return Promise.reject("Server changed ID")
end
return Http.get(url):andThen(rejectFailedRequests):andThen(Http.Response.json):andThen(function(body)
if body.sessionId ~= self.__sessionId then
return Promise.reject("Server changed ID")
end
assert(validateApiSerialize(response_body))
assert(validateApiSerialize(body))
return response_body
end)
return body
end)
end
function ApiContext:refPatch(ids: { string })
local url = ("%s/api/ref-patch"):format(self.__baseUrl)
local request_body = Http.jsonEncode({ sessionId = self.__sessionId, ids = ids })
local url = ("%s/api/ref-patch/%s"):format(self.__baseUrl, table.concat(ids, ","))
return Http.post(url, request_body)
:andThen(rejectFailedRequests)
:andThen(Http.Response.json)
:andThen(function(response_body)
if response_body.sessionId ~= self.__sessionId then
return Promise.reject("Server changed ID")
end
return Http.get(url):andThen(rejectFailedRequests):andThen(Http.Response.json):andThen(function(body)
if body.sessionId ~= self.__sessionId then
return Promise.reject("Server changed ID")
end
assert(validateApiRefPatch(response_body))
assert(validateApiRefPatch(body))
return response_body
end)
return body
end)
end
return ApiContext

View File

@@ -0,0 +1,16 @@
---
source: tests/tests/build.rs
expression: contents
---
<roblox version="4">
<Item class="Folder" referent="0">
<Properties>
<string name="Name">json_model_legacy_name</string>
</Properties>
<Item class="Folder" referent="1">
<Properties>
<string name="Name">Expected Name</string>
</Properties>
</Item>
</Item>
</roblox>

View File

@@ -1,23 +0,0 @@
---
source: tests/tests/build.rs
assertion_line: 109
expression: contents
---
<roblox version="4">
<Item class="DataModel" referent="0">
<Properties>
<string name="Name">model_json_name_input</string>
</Properties>
<Item class="Workspace" referent="1">
<Properties>
<string name="Name">Workspace</string>
<bool name="NeedsPivotMigration">false</bool>
</Properties>
<Item class="StringValue" referent="2">
<Properties>
<string name="Name">/Bar</string>
</Properties>
</Item>
</Item>
</Item>
</roblox>

View File

@@ -1,20 +0,0 @@
---
source: tests/tests/build.rs
assertion_line: 108
expression: contents
---
<roblox version="4">
<Item class="Folder" referent="0">
<Properties>
<string name="Name">slugified_name_roundtrip</string>
</Properties>
<Item class="Script" referent="1">
<Properties>
<string name="Name">/Script</string>
<token name="RunContext">0</token>
<string name="Source"><![CDATA[print("Hello world!")
]]></string>
</Properties>
</Item>
</Item>
</roblox>

View File

@@ -0,0 +1,6 @@
{
"name": "json_model_legacy_name",
"tree": {
"$path": "folder"
}
}

View File

@@ -0,0 +1,4 @@
{
"Name": "Overridden Name",
"ClassName": "Folder"
}

View File

@@ -1,11 +0,0 @@
{
"name": "model_json_name_input",
"tree": {
"$className": "DataModel",
"Workspace": {
"$className": "Workspace",
"$path": "src"
}
}
}

View File

@@ -1,5 +0,0 @@
{
"name": "/Bar",
"className": "StringValue"
}

View File

@@ -1,4 +0,0 @@
{
"name": "/Script"
}

View File

@@ -1,2 +0,0 @@
print("Hello world!")

View File

@@ -1,6 +0,0 @@
{
"name": "slugified_name_roundtrip",
"tree": {
"$path": "src"
}
}

View File

@@ -1,3 +0,0 @@
{
"name": "/Script"
}

View File

@@ -1 +0,0 @@
print("Hello world!")

View File

@@ -1,6 +0,0 @@
---
source: tests/rojo_test/syncback_util.rs
assertion_line: 101
expression: "String::from_utf8_lossy(&output.stdout)"
---

View File

@@ -0,0 +1,73 @@
---
source: tests/rojo_test/syncback_util.rs
expression: src/ChildWithDuplicates.rbxm
---
num_types: 1
num_instances: 3
chunks:
- Inst:
type_id: 0
type_name: Folder
object_format: 0
referents:
- 0
- 1
- 2
- Prop:
type_id: 0
prop_name: AttributesSerialize
prop_type: String
values:
- ""
- ""
- ""
- Prop:
type_id: 0
prop_name: Capabilities
prop_type: SecurityCapabilities
values:
- 0
- 0
- 0
- Prop:
type_id: 0
prop_name: Name
prop_type: String
values:
- DuplicateChild
- DuplicateChild
- ChildWithDuplicates
- Prop:
type_id: 0
prop_name: DefinesCapabilities
prop_type: Bool
values:
- false
- false
- false
- Prop:
type_id: 0
prop_name: SourceAssetId
prop_type: Int64
values:
- -1
- -1
- -1
- Prop:
type_id: 0
prop_name: Tags
prop_type: String
values:
- ""
- ""
- ""
- Prnt:
version: 0
links:
- - 0
- 2
- - 1
- 2
- - 2
- -1
- End

View File

@@ -1,12 +1,9 @@
---
source: tests/rojo_test/syncback_util.rs
assertion_line: 101
expression: "String::from_utf8_lossy(&output.stdout)"
---
Writing src/ChildWithDuplicates/DuplicateChild/.gitkeep
Writing src/ChildWithDuplicates/DuplicateChild1/.gitkeep
Writing src/ChildWithDuplicates.rbxm
Writing src/ChildWithoutDuplicates/Child/.gitkeep
Writing src/ChildWithDuplicates/DuplicateChild
Writing src/ChildWithDuplicates/DuplicateChild1
Writing src/ChildWithoutDuplicates
Writing src/ChildWithoutDuplicates/Child
Removing src/ChildWithDuplicates

View File

@@ -1,13 +0,0 @@
---
source: tests/rojo_test/syncback_util.rs
assertion_line: 101
expression: "String::from_utf8_lossy(&output.stdout)"
---
Writing default.project.json
Writing src/Camera.rbxm
Writing src/Terrain.rbxm
Writing src/_Folder/init.meta.json
Writing src/_Script.meta.json
Writing src/_Script.server.luau
Writing src
Writing src/_Folder

View File

@@ -1,9 +0,0 @@
---
source: tests/tests/syncback.rs
assertion_line: 31
expression: src/foo.model.json
---
{
"name": "/Bar",
"className": "StringValue"
}

View File

@@ -1,6 +0,0 @@
---
source: tests/tests/syncback.rs
assertion_line: 31
expression: src/ChildWithDuplicates/DuplicateChild1/.gitkeep
---

View File

@@ -1,6 +0,0 @@
---
source: tests/tests/syncback.rs
assertion_line: 31
expression: src/ChildWithDuplicates/DuplicateChild/.gitkeep
---

View File

@@ -1,8 +0,0 @@
---
source: tests/tests/syncback.rs
assertion_line: 31
expression: src/_Folder.model.json
---
{
"className": "Folder"
}

View File

@@ -1,8 +0,0 @@
---
source: tests/tests/syncback.rs
assertion_line: 31
expression: src/_Folder/init.meta.json
---
{
"name": "/Folder"
}

View File

@@ -1,8 +0,0 @@
---
source: tests/tests/syncback.rs
assertion_line: 31
expression: src/_Script.meta.json
---
{
"name": "/Script"
}

View File

@@ -1,6 +0,0 @@
---
source: tests/tests/syncback.rs
assertion_line: 31
expression: src/_Script.server.luau
---
print("Hello world!")

View File

@@ -1,8 +0,0 @@
---
source: tests/tests/syncback.rs
assertion_line: 31
expression: src/_Script/init.meta.json
---
{
"name": "/Script"
}

View File

@@ -1,6 +0,0 @@
---
source: tests/tests/syncback.rs
assertion_line: 31
expression: src/_Script/init.server.luau
---
print("Hello world!")

View File

@@ -1,11 +0,0 @@
{
"name": "model_json_name",
"tree": {
"$className": "DataModel",
"Workspace": {
"$className": "Workspace",
"$path": "src"
}
}
}

View File

@@ -1,5 +0,0 @@
{
"name": "/Bar",
"className": "StringValue"
}

View File

@@ -1,10 +0,0 @@
{
"name": "slugified_name",
"tree": {
"$className": "DataModel",
"Workspace": {
"$className": "Workspace",
"$path": "src"
}
}
}

View File

@@ -1,14 +1,15 @@
use crossbeam_channel::{select, Receiver, RecvError, Sender};
use jod_thread::JoinHandle;
use memofs::{IoResultExt, Vfs, VfsEvent};
use rbx_dom_weak::types::{Ref, Variant};
use std::path::PathBuf;
use std::{
fs,
sync::{Arc, Mutex},
};
use crossbeam_channel::{select, Receiver, RecvError, Sender};
use jod_thread::JoinHandle;
use memofs::{IoResultExt, Vfs, VfsEvent};
use rbx_dom_weak::types::{Ref, Variant};
use crate::{
git::SharedGitFilter,
message_queue::MessageQueue,
snapshot::{
apply_patch_set, compute_patch_set, AppliedPatchSet, InstigatingSource, PatchSet, RojoTree,
@@ -46,11 +47,15 @@ pub struct ChangeProcessor {
impl ChangeProcessor {
/// Spin up the ChangeProcessor, connecting it to the given tree, VFS, and
/// outbound message queue.
///
/// If `git_filter` is provided, it will be refreshed on every VFS event
/// to ensure newly changed files are acknowledged.
pub fn start(
tree: Arc<Mutex<RojoTree>>,
vfs: Arc<Vfs>,
message_queue: Arc<MessageQueue<AppliedPatchSet>>,
tree_mutation_receiver: Receiver<PatchSet>,
git_filter: Option<SharedGitFilter>,
) -> Self {
let (shutdown_sender, shutdown_receiver) = crossbeam_channel::bounded(1);
let vfs_receiver = vfs.event_receiver();
@@ -58,6 +63,7 @@ impl ChangeProcessor {
tree,
vfs,
message_queue,
git_filter,
};
let job_thread = jod_thread::Builder::new()
@@ -111,55 +117,24 @@ struct JobThreadContext {
/// Whenever changes are applied to the DOM, we should push those changes
/// into this message queue to inform any connected clients.
message_queue: Arc<MessageQueue<AppliedPatchSet>>,
/// Optional Git filter for --git-since mode. When set, will be refreshed
/// on every VFS event to ensure newly changed files are acknowledged.
git_filter: Option<SharedGitFilter>,
}
impl JobThreadContext {
/// Computes and applies patches to the DOM for a given file path.
///
/// This function finds the nearest ancestor to the given path that has associated instances
/// in the tree.
/// It then computes and applies changes for each affected instance ID and
/// returns a vector of applied patch sets.
fn apply_patches(&self, path: PathBuf) -> Vec<AppliedPatchSet> {
let mut tree = self.tree.lock().unwrap();
let mut applied_patches = Vec::new();
// Find the nearest ancestor to this path that has
// associated instances in the tree. This helps make sure
// that we handle additions correctly, especially if we
// receive events for descendants of a large tree being
// created all at once.
let mut current_path = path.as_path();
let affected_ids = loop {
let ids = tree.get_ids_at_path(current_path);
log::trace!("Path {} affects IDs {:?}", current_path.display(), ids);
if !ids.is_empty() {
break ids.to_vec();
}
log::trace!("Trying parent path...");
match current_path.parent() {
Some(parent) => current_path = parent,
None => break Vec::new(),
}
};
for id in affected_ids {
if let Some(patch) = compute_and_apply_changes(&mut tree, &self.vfs, id) {
if !patch.is_empty() {
applied_patches.push(patch);
}
}
}
applied_patches
}
fn handle_vfs_event(&self, event: VfsEvent) {
log::trace!("Vfs event: {:?}", event);
// If we have a git filter, refresh it to pick up any new changes.
// This ensures that files modified during the session will be acknowledged.
if let Some(ref git_filter) = self.git_filter {
if let Err(err) = git_filter.refresh() {
log::warn!("Failed to refresh git filter: {:?}", err);
}
}
// Update the VFS immediately with the event.
self.vfs
.commit_event(&event)
@@ -168,16 +143,54 @@ impl JobThreadContext {
// For a given VFS event, we might have many changes to different parts
// of the tree. Calculate and apply all of these changes.
let applied_patches = match event {
VfsEvent::Create(path) | VfsEvent::Write(path) => {
self.apply_patches(self.vfs.canonicalize(&path).unwrap())
}
VfsEvent::Remove(path) => {
// MemoFS does not track parent removals yet, so we can canonicalize
// the parent path safely and then append the removed path's file name.
let parent = path.parent().unwrap();
let file_name = path.file_name().unwrap();
let parent_normalized = self.vfs.canonicalize(parent).unwrap();
self.apply_patches(parent_normalized.join(file_name))
VfsEvent::Create(path) | VfsEvent::Remove(path) | VfsEvent::Write(path) => {
let mut tree = self.tree.lock().unwrap();
let mut applied_patches = Vec::new();
// Find the nearest ancestor to this path that has
// associated instances in the tree. This helps make sure
// that we handle additions correctly, especially if we
// receive events for descendants of a large tree being
// created all at once.
let mut current_path = path.as_path();
let affected_ids = loop {
let ids = tree.get_ids_at_path(current_path);
log::trace!("Path {} affects IDs {:?}", current_path.display(), ids);
if !ids.is_empty() {
break ids.to_vec();
}
log::trace!("Trying parent path...");
match current_path.parent() {
Some(parent) => current_path = parent,
None => break Vec::new(),
}
};
if affected_ids.is_empty() {
log::debug!(
"No instances found for path {} or any of its ancestors",
path.display()
);
} else {
log::debug!(
"Found {} affected instances for path {}",
affected_ids.len(),
path.display()
);
}
for id in affected_ids {
if let Some(patch) = compute_and_apply_changes(&mut tree, &self.vfs, id) {
if !patch.is_empty() {
applied_patches.push(patch);
}
}
}
applied_patches
}
_ => {
log::warn!("Unhandled VFS event: {:?}", event);

View File

@@ -81,7 +81,7 @@ impl BuildCommand {
let vfs = Vfs::new_default();
vfs.set_watch_enabled(self.watch);
let session = ServeSession::new(vfs, project_path)?;
let session = ServeSession::new(vfs, project_path, None)?;
let mut cursor = session.message_queue().cursor();
write_model(&session, &output_path, output_kind)?;

View File

@@ -54,7 +54,7 @@ fn initialize_plugin() -> anyhow::Result<ServeSession> {
in_memory_fs.load_snapshot("/plugin", plugin_snapshot)?;
let vfs = Vfs::new(in_memory_fs);
Ok(ServeSession::new(vfs, "/plugin")?)
Ok(ServeSession::new(vfs, "/plugin", None)?)
}
fn install_plugin() -> anyhow::Result<()> {

View File

@@ -9,7 +9,7 @@ use clap::Parser;
use memofs::Vfs;
use termcolor::{BufferWriter, Color, ColorChoice, ColorSpec, WriteColor};
use crate::{serve_session::ServeSession, web::LiveServer};
use crate::{git::GitFilter, serve_session::ServeSession, web::LiveServer};
use super::{resolve_path, GlobalOptions};
@@ -31,6 +31,19 @@ pub struct ServeCommand {
/// it has none.
#[clap(long)]
pub port: Option<u16>,
/// Only sync files that have changed since the given Git reference.
///
/// When this option is set, Rojo will only include files that have been
/// modified, added, or are untracked since the specified Git reference
/// (e.g., "HEAD", "main", a commit hash). This is useful for working with
/// large projects where you only want to sync your local changes.
///
/// Scripts that have not changed will still be acknowledged if modified
/// during the session, and all synced instances will have
/// ignoreUnknownInstances set to true to preserve descendants in Studio.
#[clap(long, value_name = "REF")]
pub git_since: Option<String>,
}
impl ServeCommand {
@@ -39,7 +52,19 @@ impl ServeCommand {
let vfs = Vfs::new_default();
let session = Arc::new(ServeSession::new(vfs, project_path)?);
// Set up Git filter if --git-since was specified
let git_filter = if let Some(ref base_ref) = self.git_since {
let repo_root = GitFilter::find_repo_root(&project_path)?;
log::info!(
"Git filter enabled: only syncing files changed since '{}'",
base_ref
);
Some(Arc::new(GitFilter::new(repo_root, base_ref.clone(), &project_path)?))
} else {
None
};
let session = Arc::new(ServeSession::new(vfs, project_path, git_filter)?);
let ip = self
.address
@@ -53,17 +78,25 @@ impl ServeCommand {
let server = LiveServer::new(session);
let _ = show_start_message(ip, port, global.color.into());
let _ = show_start_message(ip, port, self.git_since.as_deref(), global.color.into());
server.start((ip, port).into());
Ok(())
}
}
fn show_start_message(bind_address: IpAddr, port: u16, color: ColorChoice) -> io::Result<()> {
fn show_start_message(
bind_address: IpAddr,
port: u16,
git_since: Option<&str>,
color: ColorChoice,
) -> io::Result<()> {
let mut green = ColorSpec::new();
green.set_fg(Some(Color::Green)).set_bold(true);
let mut yellow = ColorSpec::new();
yellow.set_fg(Some(Color::Yellow)).set_bold(true);
let writer = BufferWriter::stdout(color);
let mut buffer = writer.buffer();
@@ -84,6 +117,13 @@ fn show_start_message(bind_address: IpAddr, port: u16, color: ColorChoice) -> io
buffer.set_color(&green)?;
writeln!(&mut buffer, "{}", port)?;
if let Some(base_ref) = git_since {
buffer.set_color(&ColorSpec::new())?;
write!(&mut buffer, " Mode: ")?;
buffer.set_color(&yellow)?;
writeln!(&mut buffer, "git-since ({})", base_ref)?;
}
writeln!(&mut buffer)?;
buffer.set_color(&ColorSpec::new())?;

View File

@@ -76,7 +76,7 @@ impl SourcemapCommand {
let vfs = Vfs::new_default();
vfs.set_watch_enabled(self.watch);
let session = ServeSession::new(vfs, project_path)?;
let session = ServeSession::new(vfs, project_path, None)?;
let mut cursor = session.message_queue().cursor();
let filter = if self.include_non_scripts {

View File

@@ -73,7 +73,7 @@ impl SyncbackCommand {
vfs.set_watch_enabled(false);
let project_start_timer = Instant::now();
let session_old = ServeSession::new(vfs, path_old.clone())?;
let session_old = ServeSession::new(vfs, path_old.clone(), None)?;
log::debug!(
"Finished opening project in {:0.02}s",
project_start_timer.elapsed().as_secs_f32()

View File

@@ -42,7 +42,7 @@ impl UploadCommand {
let vfs = Vfs::new_default();
let session = ServeSession::new(vfs, project_path)?;
let session = ServeSession::new(vfs, project_path, None)?;
let tree = session.tree();
let inner_tree = tree.inner();

380
src/git.rs Normal file
View File

@@ -0,0 +1,380 @@
//! Git integration for filtering files based on changes since a reference.
use std::{
collections::HashSet,
path::{Path, PathBuf},
process::Command,
sync::{Arc, RwLock},
};
use anyhow::{bail, Context};
/// A filter that tracks which files have been changed since a Git reference.
///
/// When active, only files that have been modified, added, or deleted according
/// to Git will be "acknowledged" and synced to Studio. This allows users to
/// work with large projects where they only want to sync their local changes.
///
/// Once a file is acknowledged (either initially or during the session), it
/// stays acknowledged for the entire session. This prevents files from being
/// deleted in Studio if their content is reverted to match the git reference.
#[derive(Debug)]
pub struct GitFilter {
/// The Git repository root directory.
repo_root: PathBuf,
/// The Git reference to compare against (e.g., "HEAD", "main", a commit hash).
base_ref: String,
/// Cache of paths that are currently different from the base ref according to git.
/// This is refreshed on every VFS event.
git_changed_paths: RwLock<HashSet<PathBuf>>,
/// Paths that have been acknowledged at any point during this session.
/// Once a path is added here, it stays acknowledged forever (for this session).
/// This prevents files from being deleted if their content is reverted.
session_acknowledged_paths: RwLock<HashSet<PathBuf>>,
}
impl GitFilter {
/// Creates a new GitFilter for the given repository root and base reference.
///
/// The `repo_root` should be the root of the Git repository (where .git is located).
/// The `base_ref` is the Git reference to compare against (e.g., "HEAD", "main").
/// The `project_path` is the path to the project being served - it will always be
/// acknowledged regardless of git status to ensure the project structure exists.
pub fn new(repo_root: PathBuf, base_ref: String, project_path: &Path) -> anyhow::Result<Self> {
let filter = Self {
repo_root,
base_ref,
git_changed_paths: RwLock::new(HashSet::new()),
session_acknowledged_paths: RwLock::new(HashSet::new()),
};
// Always acknowledge the project path and its directory so the project
// structure exists even when there are no git changes
filter.acknowledge_project_path(project_path);
// Initial refresh to populate the cache with git changes
filter.refresh()?;
Ok(filter)
}
/// Acknowledges the project path and its containing directory.
/// This ensures the project structure always exists regardless of git status.
fn acknowledge_project_path(&self, project_path: &Path) {
let mut session = self.session_acknowledged_paths.write().unwrap();
// Acknowledge the project path itself (might be a directory or .project.json file)
let canonical = project_path.canonicalize().unwrap_or_else(|_| project_path.to_path_buf());
session.insert(canonical.clone());
// Acknowledge all ancestor directories
let mut current = canonical.parent();
while let Some(parent) = current {
session.insert(parent.to_path_buf());
current = parent.parent();
}
// If it's a directory, also acknowledge default.project.json inside it
if project_path.is_dir() {
for name in &["default.project.json", "default.project.jsonc"] {
let project_file = project_path.join(name);
if let Ok(canonical_file) = project_file.canonicalize() {
session.insert(canonical_file);
} else {
session.insert(project_file);
}
}
}
// If it's a .project.json file, also acknowledge its parent directory
if let Some(parent) = project_path.parent() {
let parent_canonical = parent.canonicalize().unwrap_or_else(|_| parent.to_path_buf());
session.insert(parent_canonical);
}
log::debug!(
"GitFilter: acknowledged project path {} ({} paths total)",
project_path.display(),
session.len()
);
}
/// Finds the Git repository root for the given path.
pub fn find_repo_root(path: &Path) -> anyhow::Result<PathBuf> {
let output = Command::new("git")
.args(["rev-parse", "--show-toplevel"])
.current_dir(path)
.output()
.context("Failed to execute git rev-parse")?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
bail!("Failed to find Git repository root: {}", stderr.trim());
}
let root = String::from_utf8_lossy(&output.stdout)
.trim()
.to_string();
Ok(PathBuf::from(root))
}
/// Refreshes the cache of acknowledged paths by querying Git.
///
/// This should be called when files change to ensure newly modified files
/// are properly acknowledged. Once a path is acknowledged, it stays
/// acknowledged for the entire session (even if the file is reverted).
pub fn refresh(&self) -> anyhow::Result<()> {
let mut git_changed = HashSet::new();
// Get files changed since the base ref (modified, added, deleted)
let diff_output = Command::new("git")
.args(["diff", "--name-only", &self.base_ref])
.current_dir(&self.repo_root)
.output()
.context("Failed to execute git diff")?;
if !diff_output.status.success() {
let stderr = String::from_utf8_lossy(&diff_output.stderr);
bail!("git diff failed: {}", stderr.trim());
}
let diff_files = String::from_utf8_lossy(&diff_output.stdout);
let diff_count = diff_files.lines().filter(|l| !l.is_empty()).count();
if diff_count > 0 {
log::debug!("git diff found {} changed files", diff_count);
}
for line in diff_files.lines() {
if !line.is_empty() {
let path = self.repo_root.join(line);
log::trace!("git diff: acknowledging {}", path.display());
self.acknowledge_path(&path, &mut git_changed);
}
}
// Get untracked files (new files not yet committed)
let untracked_output = Command::new("git")
.args(["ls-files", "--others", "--exclude-standard"])
.current_dir(&self.repo_root)
.output()
.context("Failed to execute git ls-files")?;
if !untracked_output.status.success() {
let stderr = String::from_utf8_lossy(&untracked_output.stderr);
bail!("git ls-files failed: {}", stderr.trim());
}
let untracked_files = String::from_utf8_lossy(&untracked_output.stdout);
for line in untracked_files.lines() {
if !line.is_empty() {
let path = self.repo_root.join(line);
self.acknowledge_path(&path, &mut git_changed);
}
}
// Get staged files (files added to index but not yet committed)
let staged_output = Command::new("git")
.args(["diff", "--name-only", "--cached", &self.base_ref])
.current_dir(&self.repo_root)
.output()
.context("Failed to execute git diff --cached")?;
if staged_output.status.success() {
let staged_files = String::from_utf8_lossy(&staged_output.stdout);
for line in staged_files.lines() {
if !line.is_empty() {
let path = self.repo_root.join(line);
self.acknowledge_path(&path, &mut git_changed);
}
}
}
// Update the git changed paths cache
{
let mut cache = self.git_changed_paths.write().unwrap();
*cache = git_changed.clone();
}
// Merge newly changed paths into session acknowledged paths
// Once acknowledged, a path stays acknowledged for the entire session
{
let mut session = self.session_acknowledged_paths.write().unwrap();
for path in git_changed {
session.insert(path);
}
log::debug!(
"GitFilter refreshed: {} paths acknowledged in session",
session.len()
);
}
Ok(())
}
/// Acknowledges a path and all its ancestors, plus associated meta files.
fn acknowledge_path(&self, path: &Path, acknowledged: &mut HashSet<PathBuf>) {
// Canonicalize the path if possible, otherwise use as-is
let path = path.canonicalize().unwrap_or_else(|_| path.to_path_buf());
// Add the path itself
acknowledged.insert(path.clone());
// Add all ancestor directories
let mut current = path.parent();
while let Some(parent) = current {
acknowledged.insert(parent.to_path_buf());
current = parent.parent();
}
// Add associated meta files
self.acknowledge_meta_files(&path, acknowledged);
}
/// Acknowledges associated meta files for a given path.
fn acknowledge_meta_files(&self, path: &Path, acknowledged: &mut HashSet<PathBuf>) {
if let Some(file_name) = path.file_name().and_then(|n| n.to_str()) {
if let Some(parent) = path.parent() {
// For a file like "foo.lua", also acknowledge "foo.meta.json"
// Strip known extensions to get the base name
let base_name = strip_lua_extension(file_name);
let meta_path = parent.join(format!("{}.meta.json", base_name));
if let Ok(canonical) = meta_path.canonicalize() {
acknowledged.insert(canonical);
} else {
acknowledged.insert(meta_path);
}
// For init files, also acknowledge "init.meta.json" in the same directory
if file_name.starts_with("init.") {
let init_meta = parent.join("init.meta.json");
if let Ok(canonical) = init_meta.canonicalize() {
acknowledged.insert(canonical);
} else {
acknowledged.insert(init_meta);
}
}
}
}
}
/// Checks if a path is acknowledged (should be synced).
///
/// Returns `true` if the path or any of its descendants have been changed
/// at any point during this session. Once a file is acknowledged, it stays
/// acknowledged even if its content is reverted to match the git reference.
pub fn is_acknowledged(&self, path: &Path) -> bool {
let session = self.session_acknowledged_paths.read().unwrap();
// Try to canonicalize the path
let canonical = path.canonicalize().unwrap_or_else(|_| path.to_path_buf());
// Check if this exact path is acknowledged
if session.contains(&canonical) {
log::trace!("Path {} is directly acknowledged", path.display());
return true;
}
// Also check without canonicalization in case of path differences
if session.contains(path) {
log::trace!("Path {} is acknowledged (non-canonical)", path.display());
return true;
}
// For directories, check if any descendant is acknowledged
// This is done by checking if any acknowledged path starts with this path
for acknowledged in session.iter() {
if acknowledged.starts_with(&canonical) {
log::trace!(
"Path {} has acknowledged descendant {}",
path.display(),
acknowledged.display()
);
return true;
}
// Also check non-canonical
if acknowledged.starts_with(path) {
log::trace!(
"Path {} has acknowledged descendant {} (non-canonical)",
path.display(),
acknowledged.display()
);
return true;
}
}
log::trace!(
"Path {} is NOT acknowledged (canonical: {})",
path.display(),
canonical.display()
);
false
}
/// Returns the base reference being compared against.
pub fn base_ref(&self) -> &str {
&self.base_ref
}
/// Returns the repository root path.
pub fn repo_root(&self) -> &Path {
&self.repo_root
}
/// Explicitly acknowledges a path and all its ancestors.
/// This is useful for ensuring certain paths are always synced regardless of git status.
pub fn force_acknowledge(&self, path: &Path) {
let mut acknowledged = HashSet::new();
self.acknowledge_path(path, &mut acknowledged);
let mut session = self.session_acknowledged_paths.write().unwrap();
for p in acknowledged {
session.insert(p);
}
}
}
/// Strips Lua-related extensions from a file name to get the base name.
fn strip_lua_extension(file_name: &str) -> &str {
const EXTENSIONS: &[&str] = &[
".server.luau",
".server.lua",
".client.luau",
".client.lua",
".luau",
".lua",
];
for ext in EXTENSIONS {
if let Some(base) = file_name.strip_suffix(ext) {
return base;
}
}
// If no Lua extension, try to strip the regular extension
file_name
.rsplit_once('.')
.map(|(base, _)| base)
.unwrap_or(file_name)
}
/// A wrapper around GitFilter that can be shared across threads.
pub type SharedGitFilter = Arc<GitFilter>;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_strip_lua_extension() {
assert_eq!(strip_lua_extension("foo.server.lua"), "foo");
assert_eq!(strip_lua_extension("foo.client.luau"), "foo");
assert_eq!(strip_lua_extension("foo.lua"), "foo");
assert_eq!(strip_lua_extension("init.server.lua"), "init");
assert_eq!(strip_lua_extension("bar.txt"), "bar");
assert_eq!(strip_lua_extension("noextension"), "noextension");
}
}

View File

@@ -9,6 +9,7 @@ mod tree_view;
mod auth_cookie;
mod change_processor;
mod git;
mod glob;
mod json;
mod lua_ast;
@@ -28,6 +29,7 @@ mod web;
// TODO: Work out what we should expose publicly
pub use git::{GitFilter, SharedGitFilter};
pub use project::*;
pub use rojo_ref::*;
pub use session_id::SessionId;

View File

@@ -13,6 +13,7 @@ use thiserror::Error;
use crate::{
change_processor::ChangeProcessor,
git::SharedGitFilter,
message_queue::MessageQueue,
project::{Project, ProjectError},
session_id::SessionId,
@@ -94,7 +95,14 @@ impl ServeSession {
/// The project file is expected to be loaded out-of-band since it's
/// currently loaded from the filesystem directly instead of through the
/// in-memory filesystem layer.
pub fn new<P: AsRef<Path>>(vfs: Vfs, start_path: P) -> Result<Self, ServeSessionError> {
///
/// If `git_filter` is provided, only files that have changed since the
/// specified Git reference will be synced.
pub fn new<P: AsRef<Path>>(
vfs: Vfs,
start_path: P,
git_filter: Option<SharedGitFilter>,
) -> Result<Self, ServeSessionError> {
let start_path = start_path.as_ref();
let start_time = Instant::now();
@@ -102,12 +110,28 @@ impl ServeSession {
let root_project = Project::load_initial_project(&vfs, start_path)?;
// If git filter is active, ensure the project file location is acknowledged
// This is necessary so the project structure exists even with no git changes
if let Some(ref filter) = git_filter {
filter.force_acknowledge(start_path);
filter.force_acknowledge(&root_project.file_location);
filter.force_acknowledge(root_project.folder_location());
log::debug!(
"Force acknowledged project at {}",
root_project.file_location.display()
);
}
let mut tree = RojoTree::new(InstanceSnapshot::new());
let root_id = tree.get_root_id();
let instance_context =
InstanceContext::with_emit_legacy_scripts(root_project.emit_legacy_scripts);
let instance_context = match &git_filter {
Some(filter) => {
InstanceContext::with_git_filter(root_project.emit_legacy_scripts, Arc::clone(filter))
}
None => InstanceContext::with_emit_legacy_scripts(root_project.emit_legacy_scripts),
};
log::trace!("Generating snapshot of instances from VFS");
let snapshot = snapshot_from_vfs(&instance_context, &vfs, start_path)?;
@@ -133,6 +157,7 @@ impl ServeSession {
Arc::clone(&vfs),
Arc::clone(&message_queue),
tree_mutation_receiver,
git_filter,
);
Ok(Self {

View File

@@ -8,6 +8,7 @@ use anyhow::Context;
use serde::{Deserialize, Serialize};
use crate::{
git::SharedGitFilter,
glob::Glob,
path_serializer,
project::ProjectNode,
@@ -70,12 +71,6 @@ pub struct InstanceMetadata {
/// A schema provided via a JSON file, if one exists. Will be `None` for
/// all non-JSON middleware.
pub schema: Option<String>,
/// A custom name specified via meta.json or model.json files. If present,
/// this name will be used for the instance while the filesystem name will
/// be slugified to remove illegal characters.
#[serde(skip_serializing_if = "Option::is_none")]
pub specified_name: Option<String>,
}
impl InstanceMetadata {
@@ -88,7 +83,6 @@ impl InstanceMetadata {
specified_id: None,
middleware: None,
schema: None,
specified_name: None,
}
}
@@ -137,13 +131,6 @@ impl InstanceMetadata {
pub fn schema(self, schema: Option<String>) -> Self {
Self { schema, ..self }
}
pub fn specified_name(self, specified_name: Option<String>) -> Self {
Self {
specified_name,
..self
}
}
}
impl Default for InstanceMetadata {
@@ -152,13 +139,27 @@ impl Default for InstanceMetadata {
}
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct InstanceContext {
#[serde(skip_serializing_if = "Vec::is_empty")]
pub path_ignore_rules: Arc<Vec<PathIgnoreRule>>,
pub emit_legacy_scripts: bool,
#[serde(skip_serializing_if = "Vec::is_empty")]
pub sync_rules: Vec<SyncRule>,
/// Optional Git filter for --git-since mode. When set, only files that have
/// changed since the specified Git reference will be synced.
#[serde(skip)]
pub git_filter: Option<SharedGitFilter>,
}
impl PartialEq for InstanceContext {
fn eq(&self, other: &Self) -> bool {
// Note: git_filter is intentionally excluded from comparison
// since it's runtime state, not configuration
self.path_ignore_rules == other.path_ignore_rules
&& self.emit_legacy_scripts == other.emit_legacy_scripts
&& self.sync_rules == other.sync_rules
}
}
impl InstanceContext {
@@ -167,6 +168,7 @@ impl InstanceContext {
path_ignore_rules: Arc::new(Vec::new()),
emit_legacy_scripts: emit_legacy_scripts_default().unwrap(),
sync_rules: Vec::new(),
git_filter: None,
}
}
@@ -179,6 +181,36 @@ impl InstanceContext {
}
}
/// Creates a new InstanceContext with a Git filter for --git-since mode.
pub fn with_git_filter(
emit_legacy_scripts: Option<bool>,
git_filter: SharedGitFilter,
) -> Self {
Self {
git_filter: Some(git_filter),
..Self::with_emit_legacy_scripts(emit_legacy_scripts)
}
}
/// Sets the Git filter for this context.
pub fn set_git_filter(&mut self, git_filter: Option<SharedGitFilter>) {
self.git_filter = git_filter;
}
/// Returns true if the given path should be acknowledged (synced).
/// If no git filter is set, all paths are acknowledged.
pub fn is_path_acknowledged(&self, path: &Path) -> bool {
match &self.git_filter {
Some(filter) => filter.is_acknowledged(path),
None => true,
}
}
/// Returns true if a git filter is active.
pub fn has_git_filter(&self) -> bool {
self.git_filter.is_some()
}
/// Extend the list of ignore rules in the context with the given new rules.
pub fn add_path_ignore_rules<I>(&mut self, new_rules: I)
where

View File

@@ -42,7 +42,7 @@ pub fn snapshot_csv(
.metadata(
InstanceMetadata::new()
.instigating_source(path)
.relevant_paths(vec![vfs.canonicalize(path)?]),
.relevant_paths(vec![path.to_path_buf()]),
);
AdjacentMetadata::read_and_apply_all(vfs, path, name, &mut snapshot)?;
@@ -109,14 +109,8 @@ pub fn syncback_csv<'sync>(
if !meta.is_empty() {
let parent = snapshot.path.parent_err()?;
let file_name = snapshot
.path
.file_name()
.and_then(|n| n.to_str())
.unwrap_or("");
let meta_stem = file_name.strip_suffix(".csv").unwrap_or(file_name);
fs_snapshot.add_file(
parent.join(format!("{meta_stem}.meta.json")),
parent.join(format!("{}.meta.json", new_inst.name)),
serde_json::to_vec_pretty(&meta).context("cannot serialize metadata")?,
)
}

View File

@@ -8,13 +8,10 @@ use memofs::{DirEntry, Vfs};
use crate::{
snapshot::{InstanceContext, InstanceMetadata, InstanceSnapshot, InstigatingSource},
syncback::{
extension_for_middleware, hash_instance, FsSnapshot, SyncbackReturn,
SyncbackSnapshot,
},
syncback::{hash_instance, FsSnapshot, SyncbackReturn, SyncbackSnapshot},
};
use super::{meta_file::DirectoryMetadata, snapshot_from_vfs, Middleware};
use super::{meta_file::DirectoryMetadata, snapshot_from_vfs};
const EMPTY_DIR_KEEP_NAME: &str = ".gitkeep";
@@ -65,19 +62,18 @@ pub fn snapshot_dir_no_meta(
}
}
let normalized_path = vfs.canonicalize(path)?;
let relevant_paths = vec![
normalized_path.clone(),
path.to_path_buf(),
// TODO: We shouldn't need to know about Lua existing in this
// middleware. Should we figure out a way for that function to add
// relevant paths to this middleware?
normalized_path.join("init.lua"),
normalized_path.join("init.luau"),
normalized_path.join("init.server.lua"),
normalized_path.join("init.server.luau"),
normalized_path.join("init.client.lua"),
normalized_path.join("init.client.luau"),
normalized_path.join("init.csv"),
path.join("init.lua"),
path.join("init.luau"),
path.join("init.server.lua"),
path.join("init.server.luau"),
path.join("init.client.lua"),
path.join("init.client.luau"),
path.join("init.csv"),
];
let snapshot = InstanceSnapshot::new()
@@ -94,22 +90,6 @@ pub fn snapshot_dir_no_meta(
Ok(Some(snapshot))
}
/// Splits a filesystem name into (stem, extension) based on middleware type.
/// For directory middleware, the extension is empty. For file middleware,
/// the extension comes from `extension_for_middleware`.
fn split_name_and_ext(name: &str, middleware: Middleware) -> (&str, &str) {
if middleware.is_dir() {
(name, "")
} else {
let ext = extension_for_middleware(middleware);
if let Some(stem) = name.strip_suffix(&format!(".{ext}")) {
(stem, ext)
} else {
(name, "")
}
}
}
pub fn syncback_dir<'sync>(
snapshot: &SyncbackSnapshot<'sync>,
) -> anyhow::Result<SyncbackReturn<'sync>> {
@@ -153,128 +133,65 @@ pub fn syncback_dir_no_meta<'sync>(
let mut children = Vec::new();
let mut removed_children = Vec::new();
// Build the old child map early so it can be used for deduplication below.
let mut old_child_map = HashMap::new();
// We have to enforce unique child names for the file system.
let mut child_names = HashSet::with_capacity(new_inst.children().len());
let mut duplicate_set = HashSet::new();
for child_ref in new_inst.children() {
let child = snapshot.get_new_instance(*child_ref).unwrap();
if !child_names.insert(child.name.to_lowercase()) {
duplicate_set.insert(child.name.as_str());
}
}
if !duplicate_set.is_empty() {
if duplicate_set.len() <= 25 {
anyhow::bail!(
"Instance has children with duplicate name (case may not exactly match):\n {}",
duplicate_set.into_iter().collect::<Vec<&str>>().join(", ")
);
}
anyhow::bail!("Instance has more than 25 children with duplicate names");
}
if let Some(old_inst) = snapshot.old_inst() {
let mut old_child_map = HashMap::with_capacity(old_inst.children().len());
for child in old_inst.children() {
let inst = snapshot.get_old_instance(*child).unwrap();
old_child_map.insert(inst.name(), inst);
}
}
// --- Two-pass collision resolution ---
//
// Pass 1: Collect each child's base filesystem name and old ref, applying
// skip conditions. Track which names are used (lowercased) so we can
// detect collisions.
struct ChildEntry {
new_ref: rbx_dom_weak::types::Ref,
old_ref: Option<rbx_dom_weak::types::Ref>,
base_name: String,
middleware: Middleware,
skip: bool,
}
let mut entries = Vec::with_capacity(new_inst.children().len());
let mut used_names: HashSet<String> = HashSet::with_capacity(new_inst.children().len());
let mut collision_indices: Vec<usize> = Vec::new();
for new_child_ref in new_inst.children() {
let new_child = snapshot.get_new_instance(*new_child_ref).unwrap();
// Determine old_ref and apply skip conditions.
let old_child = if snapshot.old_inst().is_some() {
old_child_map.remove(new_child.name.as_str())
} else {
None
};
let mut skip = false;
if let Some(ref old) = old_child {
if old.metadata().relevant_paths.is_empty() {
log::debug!(
"Skipping instance {} because it doesn't exist on the disk",
old.name()
);
skip = true;
} else if matches!(
old.metadata().instigating_source,
Some(InstigatingSource::ProjectNode { .. })
) {
log::debug!(
"Skipping instance {} because it originates in a project file",
old.name()
);
skip = true;
}
}
let old_ref = old_child.as_ref().map(|o| o.id());
if skip {
entries.push(ChildEntry {
new_ref: *new_child_ref,
old_ref,
base_name: String::new(),
middleware: Middleware::Dir,
skip: true,
});
continue;
}
let (middleware, base_name) =
snapshot.child_middleware_and_name(*new_child_ref, old_ref)?;
let idx = entries.len();
let lower = base_name.to_lowercase();
if !used_names.insert(lower) {
// Name already claimed — needs resolution.
collision_indices.push(idx);
}
entries.push(ChildEntry {
new_ref: *new_child_ref,
old_ref,
base_name,
middleware,
skip: false,
});
}
// Pass 2: Resolve collisions by appending incrementing suffixes.
for idx in collision_indices {
let entry = &entries[idx];
let (stem, ext) = split_name_and_ext(&entry.base_name, entry.middleware);
let mut counter = 1u32;
loop {
let candidate = if ext.is_empty() {
format!("{stem}{counter}")
for new_child_ref in new_inst.children() {
let new_child = snapshot.get_new_instance(*new_child_ref).unwrap();
if let Some(old_child) = old_child_map.remove(new_child.name.as_str()) {
if old_child.metadata().relevant_paths.is_empty() {
log::debug!(
"Skipping instance {} because it doesn't exist on the disk",
old_child.name()
);
continue;
} else if matches!(
old_child.metadata().instigating_source,
Some(InstigatingSource::ProjectNode { .. })
) {
log::debug!(
"Skipping instance {} because it originates in a project file",
old_child.name()
);
continue;
}
// This child exists in both doms. Pass it on.
children.push(snapshot.with_joined_path(*new_child_ref, Some(old_child.id()))?);
} else {
format!("{stem}{counter}.{ext}")
};
let lower = candidate.to_lowercase();
if used_names.insert(lower) {
// Safe to mutate — we only visit each collision index once.
let entry = &mut entries[idx];
entry.base_name = candidate;
break;
// The child only exists in the the new dom
children.push(snapshot.with_joined_path(*new_child_ref, None)?);
}
counter += 1;
}
}
// Create snapshots from resolved entries.
for entry in &entries {
if entry.skip {
continue;
}
let resolved_path = snapshot.path.join(&entry.base_name);
children.push(snapshot.with_new_path(resolved_path, entry.new_ref, entry.old_ref));
}
// Any children that are in the old dom but not the new one are removed.
if snapshot.old_inst().is_some() {
// Any children that are in the old dom but not the new one are removed.
removed_children.extend(old_child_map.into_values());
} else {
// There is no old instance. Just add every child.
for new_child_ref in new_inst.children() {
children.push(snapshot.with_joined_path(*new_child_ref, None)?);
}
}
let mut fs_snapshot = FsSnapshot::new();
@@ -307,12 +224,6 @@ pub fn syncback_dir_no_meta<'sync>(
mod test {
use super::*;
use std::path::PathBuf;
use crate::{
snapshot::{InstanceMetadata, InstanceSnapshot},
Project, RojoTree, SyncbackData, SyncbackSnapshot,
};
use memofs::{InMemoryFs, VfsSnapshot};
#[test]
@@ -349,302 +260,4 @@ mod test {
insta::assert_yaml_snapshot!(instance_snapshot);
}
fn make_project() -> Project {
serde_json::from_str(r#"{"tree": {"$className": "DataModel"}}"#).unwrap()
}
fn make_vfs() -> Vfs {
let mut imfs = InMemoryFs::new();
imfs.load_snapshot("/root", VfsSnapshot::empty_dir()).unwrap();
Vfs::new(imfs)
}
/// Two children whose Roblox names are identical when lowercased ("Alpha"
/// and "alpha") but live at different filesystem paths because of the
/// `name` property ("Beta/" and "Alpha/" respectively). The dedup check
/// must use the actual filesystem paths, not the raw Roblox names, to
/// avoid a false-positive duplicate error.
#[test]
fn syncback_no_false_duplicate_with_name_prop() {
use rbx_dom_weak::{InstanceBuilder, WeakDom};
// Old child A: Roblox name "Alpha", on disk at "/root/Beta"
// (name property maps "Alpha" → "Beta" on the filesystem)
let old_child_a = InstanceSnapshot::new()
.name("Alpha")
.class_name("Folder")
.metadata(
InstanceMetadata::new()
.instigating_source(PathBuf::from("/root/Beta"))
.relevant_paths(vec![PathBuf::from("/root/Beta")]),
);
// Old child B: Roblox name "alpha", on disk at "/root/Alpha"
let old_child_b = InstanceSnapshot::new()
.name("alpha")
.class_name("Folder")
.metadata(
InstanceMetadata::new()
.instigating_source(PathBuf::from("/root/Alpha"))
.relevant_paths(vec![PathBuf::from("/root/Alpha")]),
);
let old_parent = InstanceSnapshot::new()
.name("Parent")
.class_name("Folder")
.children(vec![old_child_a, old_child_b])
.metadata(
InstanceMetadata::new()
.instigating_source(PathBuf::from("/root"))
.relevant_paths(vec![PathBuf::from("/root")]),
);
let old_tree = RojoTree::new(old_parent);
// New state: same two children in Roblox.
let mut new_tree = WeakDom::new(InstanceBuilder::new("ROOT"));
let new_parent = new_tree.insert(
new_tree.root_ref(),
InstanceBuilder::new("Folder").with_name("Parent"),
);
new_tree.insert(new_parent, InstanceBuilder::new("Folder").with_name("Alpha"));
new_tree.insert(new_parent, InstanceBuilder::new("Folder").with_name("alpha"));
let vfs = make_vfs();
let project = make_project();
let data = SyncbackData::for_test(&vfs, &old_tree, &new_tree, &project);
let snapshot = SyncbackSnapshot {
data,
old: Some(old_tree.get_root_id()),
new: new_parent,
path: PathBuf::from("/root"),
middleware: None,
};
let result = syncback_dir_no_meta(&snapshot);
assert!(
result.is_ok(),
"should not error when two children have the same lowercased Roblox \
name but map to distinct filesystem paths: {:?}",
result.as_ref().err(),
);
}
/// Two completely new children with the same name get resolved via
/// incrementing suffixes instead of erroring.
#[test]
fn syncback_resolves_sibling_duplicate_names() {
use rbx_dom_weak::{InstanceBuilder, WeakDom};
let old_parent = InstanceSnapshot::new()
.name("Parent")
.class_name("Folder")
.metadata(
InstanceMetadata::new()
.instigating_source(PathBuf::from("/root"))
.relevant_paths(vec![PathBuf::from("/root")]),
);
let old_tree = RojoTree::new(old_parent);
let mut new_tree = WeakDom::new(InstanceBuilder::new("ROOT"));
let new_parent = new_tree.insert(
new_tree.root_ref(),
InstanceBuilder::new("Folder").with_name("Parent"),
);
new_tree.insert(new_parent, InstanceBuilder::new("Folder").with_name("Foo"));
new_tree.insert(new_parent, InstanceBuilder::new("Folder").with_name("Foo"));
let vfs = make_vfs();
let project = make_project();
let data = SyncbackData::for_test(&vfs, &old_tree, &new_tree, &project);
let snapshot = SyncbackSnapshot {
data,
old: Some(old_tree.get_root_id()),
new: new_parent,
path: PathBuf::from("/root"),
middleware: None,
};
let result = syncback_dir_no_meta(&snapshot);
assert!(
result.is_ok(),
"should resolve duplicate names with suffixes, not error: {:?}",
result.as_ref().err(),
);
let children = result.unwrap().children;
let mut names: Vec<String> = children
.iter()
.map(|c| c.path.file_name().unwrap().to_string_lossy().into_owned())
.collect();
names.sort();
assert_eq!(names, vec!["Foo", "Foo1"]);
}
/// A new child named "Init" (as a ModuleScript) would naively become
/// "Init.luau", which case-insensitively matches the parent's reserved
/// "init.luau". Syncback must resolve this automatically by prefixing the
/// filesystem name with '_' (→ "_Init.luau") rather than erroring.
#[test]
fn syncback_resolves_init_name_conflict() {
use rbx_dom_weak::{InstanceBuilder, WeakDom};
let old_parent = InstanceSnapshot::new()
.name("Parent")
.class_name("Folder")
.metadata(
InstanceMetadata::new()
.instigating_source(PathBuf::from("/root"))
.relevant_paths(vec![PathBuf::from("/root")]),
);
let old_tree = RojoTree::new(old_parent);
let mut new_tree = WeakDom::new(InstanceBuilder::new("ROOT"));
let new_parent = new_tree.insert(
new_tree.root_ref(),
InstanceBuilder::new("Folder").with_name("Parent"),
);
new_tree.insert(
new_parent,
InstanceBuilder::new("ModuleScript").with_name("Init"),
);
let vfs = make_vfs();
let project = make_project();
let data = SyncbackData::for_test(&vfs, &old_tree, &new_tree, &project);
let snapshot = SyncbackSnapshot {
data,
old: Some(old_tree.get_root_id()),
new: new_parent,
path: PathBuf::from("/root"),
middleware: None,
};
let result = syncback_dir_no_meta(&snapshot);
assert!(
result.is_ok(),
"should resolve init-name conflict by prefixing '_', not error: {:?}",
result.as_ref().err(),
);
// The child should have been placed at "_Init.luau", not "Init.luau".
let child_file_name = result
.unwrap()
.children
.into_iter()
.next()
.and_then(|c| c.path.file_name().map(|n| n.to_string_lossy().into_owned()))
.unwrap_or_default();
assert!(
child_file_name.starts_with('_'),
"child filesystem name should start with '_' to avoid init collision, \
got: {child_file_name}",
);
}
/// A child whose filesystem name is stored with a slugified prefix (e.g.
/// "_Init") must NOT be blocked — only the bare "init" stem is reserved.
#[test]
fn syncback_allows_slugified_init_name() {
use rbx_dom_weak::{InstanceBuilder, WeakDom};
// Existing child: on disk as "_Init" (slugified from a name with an
// illegal character), its stem is "_init" which is not reserved.
let old_child = InstanceSnapshot::new()
.name("Init")
.class_name("Folder")
.metadata(
InstanceMetadata::new()
.instigating_source(PathBuf::from("/root/_Init"))
.relevant_paths(vec![PathBuf::from("/root/_Init")]),
);
let old_parent = InstanceSnapshot::new()
.name("Parent")
.class_name("Folder")
.children(vec![old_child])
.metadata(
InstanceMetadata::new()
.instigating_source(PathBuf::from("/root"))
.relevant_paths(vec![PathBuf::from("/root")]),
);
let old_tree = RojoTree::new(old_parent);
let mut new_tree = WeakDom::new(InstanceBuilder::new("ROOT"));
let new_parent = new_tree.insert(
new_tree.root_ref(),
InstanceBuilder::new("Folder").with_name("Parent"),
);
new_tree.insert(new_parent, InstanceBuilder::new("Folder").with_name("Init"));
let vfs = make_vfs();
let project = make_project();
let data = SyncbackData::for_test(&vfs, &old_tree, &new_tree, &project);
let snapshot = SyncbackSnapshot {
data,
old: Some(old_tree.get_root_id()),
new: new_parent,
path: PathBuf::from("/root"),
middleware: None,
};
let result = syncback_dir_no_meta(&snapshot);
assert!(
result.is_ok(),
"should allow a child whose filesystem name is slugified away from \
the reserved 'init' stem: {:?}",
result.as_ref().err(),
);
}
/// Two new children both named "Init" (ModuleScripts) should get
/// "_Init.luau" and "_Init1.luau" respectively.
#[test]
fn syncback_resolves_multiple_init_conflicts() {
use rbx_dom_weak::{InstanceBuilder, WeakDom};
let old_parent = InstanceSnapshot::new()
.name("Parent")
.class_name("Folder")
.metadata(
InstanceMetadata::new()
.instigating_source(PathBuf::from("/root"))
.relevant_paths(vec![PathBuf::from("/root")]),
);
let old_tree = RojoTree::new(old_parent);
let mut new_tree = WeakDom::new(InstanceBuilder::new("ROOT"));
let new_parent = new_tree.insert(
new_tree.root_ref(),
InstanceBuilder::new("Folder").with_name("Parent"),
);
new_tree.insert(
new_parent,
InstanceBuilder::new("ModuleScript").with_name("Init"),
);
new_tree.insert(
new_parent,
InstanceBuilder::new("ModuleScript").with_name("Init"),
);
let vfs = make_vfs();
let project = make_project();
let data = SyncbackData::for_test(&vfs, &old_tree, &new_tree, &project);
let snapshot = SyncbackSnapshot {
data,
old: Some(old_tree.get_root_id()),
new: new_parent,
path: PathBuf::from("/root"),
middleware: None,
};
let result = syncback_dir_no_meta(&snapshot);
assert!(
result.is_ok(),
"should resolve multiple init conflicts with suffixes: {:?}",
result.as_ref().err(),
);
let children = result.unwrap().children;
let mut names: Vec<String> = children
.iter()
.map(|c| c.path.file_name().unwrap().to_string_lossy().into_owned())
.collect();
names.sort();
assert_eq!(names, vec!["_Init.luau", "_Init1.luau"]);
}
}

View File

@@ -32,7 +32,7 @@ pub fn snapshot_json(
.metadata(
InstanceMetadata::new()
.instigating_source(path)
.relevant_paths(vec![vfs.canonicalize(path)?])
.relevant_paths(vec![path.to_path_buf()])
.context(context),
);

View File

@@ -35,14 +35,20 @@ pub fn snapshot_json_model(
format!("File is not a valid JSON model: {}", path.display())
})?;
// If the JSON has a name property, preserve it in metadata for syncback
let specified_name = instance.name.clone();
if let Some(top_level_name) = &instance.name {
let new_name = format!("{}.model.json", top_level_name);
// Use the name from JSON if present, otherwise fall back to filename-derived name
if instance.name.is_none() {
instance.name = Some(name.to_owned());
log::warn!(
"Model at path {} had a top-level Name field. \
This field has been ignored since Rojo 6.0.\n\
Consider removing this field and renaming the file to {}.",
new_name,
path.display()
);
}
instance.name = Some(name.to_owned());
let id = instance.id.take().map(RojoRef::new);
let schema = instance.schema.take();
@@ -53,11 +59,10 @@ pub fn snapshot_json_model(
snapshot.metadata = snapshot
.metadata
.instigating_source(path)
.relevant_paths(vec![vfs.canonicalize(path)?])
.relevant_paths(vec![path.to_path_buf()])
.context(context)
.specified_id(id)
.schema(schema)
.specified_name(specified_name);
.schema(schema);
Ok(Some(snapshot))
}
@@ -76,7 +81,6 @@ pub fn syncback_json_model<'sync>(
// schemas will ever exist in one project for it to matter, but it
// could have a performance cost.
model.schema = old_inst.metadata().schema.clone();
model.name = old_inst.metadata().specified_name.clone();
}
Ok(SyncbackReturn {

View File

@@ -88,7 +88,7 @@ pub fn snapshot_lua(
.metadata(
InstanceMetadata::new()
.instigating_source(path)
.relevant_paths(vec![vfs.canonicalize(path)?])
.relevant_paths(vec![path.to_path_buf()])
.context(context),
);
@@ -158,23 +158,8 @@ pub fn syncback_lua<'sync>(
if !meta.is_empty() {
let parent_location = snapshot.path.parent_err()?;
let file_name = snapshot
.path
.file_name()
.and_then(|n| n.to_str())
.unwrap_or("");
let meta_stem = file_name
.strip_suffix(".server.luau")
.or_else(|| file_name.strip_suffix(".server.lua"))
.or_else(|| file_name.strip_suffix(".client.luau"))
.or_else(|| file_name.strip_suffix(".client.lua"))
.or_else(|| file_name.strip_suffix(".plugin.luau"))
.or_else(|| file_name.strip_suffix(".plugin.lua"))
.or_else(|| file_name.strip_suffix(".luau"))
.or_else(|| file_name.strip_suffix(".lua"))
.unwrap_or(file_name);
fs_snapshot.add_file(
parent_location.join(format!("{meta_stem}.meta.json")),
parent_location.join(format!("{}.meta.json", new_inst.name)),
serde_json::to_vec_pretty(&meta).context("cannot serialize metadata")?,
);
}

View File

@@ -10,10 +10,7 @@ use rbx_dom_weak::{
use serde::{Deserialize, Serialize};
use crate::{
json,
resolution::UnresolvedValue,
snapshot::InstanceSnapshot,
syncback::{validate_file_name, SyncbackSnapshot},
json, resolution::UnresolvedValue, snapshot::InstanceSnapshot, syncback::SyncbackSnapshot,
RojoRef,
};
@@ -39,9 +36,6 @@ pub struct AdjacentMetadata {
#[serde(default, skip_serializing_if = "IndexMap::is_empty")]
pub attributes: IndexMap<String, UnresolvedValue>,
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(skip)]
pub path: PathBuf,
}
@@ -150,26 +144,6 @@ impl AdjacentMetadata {
}
}
let name = snapshot
.old_inst()
.and_then(|inst| inst.metadata().specified_name.clone())
.or_else(|| {
// Write name when name_for_inst would produce a different
// filesystem stem (slugification or init-prefix).
if snapshot.old_inst().is_none() {
let instance_name = &snapshot.new_inst().name;
if validate_file_name(instance_name).is_err()
|| instance_name.to_lowercase() == "init"
{
Some(instance_name.clone())
} else {
None
}
} else {
None
}
});
Ok(Some(Self {
ignore_unknown_instances: if ignore_unknown_instances {
Some(true)
@@ -181,7 +155,6 @@ impl AdjacentMetadata {
path,
id: None,
schema,
name,
}))
}
@@ -240,26 +213,11 @@ impl AdjacentMetadata {
Ok(())
}
fn apply_name(&mut self, snapshot: &mut InstanceSnapshot) -> anyhow::Result<()> {
if self.name.is_some() && snapshot.metadata.specified_name.is_some() {
anyhow::bail!(
"cannot specify a name using {} (instance has a name from somewhere else)",
self.path.display()
);
}
if let Some(name) = &self.name {
snapshot.name = name.clone().into();
}
snapshot.metadata.specified_name = self.name.take();
Ok(())
}
pub fn apply_all(&mut self, snapshot: &mut InstanceSnapshot) -> anyhow::Result<()> {
self.apply_ignore_unknown_instances(snapshot);
self.apply_properties(snapshot)?;
self.apply_id(snapshot)?;
self.apply_schema(snapshot)?;
self.apply_name(snapshot)?;
Ok(())
}
@@ -268,13 +226,11 @@ impl AdjacentMetadata {
///
/// - The number of properties and attributes is 0
/// - `ignore_unknown_instances` is None
/// - `name` is None
#[inline]
pub fn is_empty(&self) -> bool {
self.attributes.is_empty()
&& self.properties.is_empty()
&& self.ignore_unknown_instances.is_none()
&& self.name.is_none()
}
// TODO: Add method to allow selectively applying parts of metadata and
@@ -306,9 +262,6 @@ pub struct DirectoryMetadata {
#[serde(skip_serializing_if = "Option::is_none")]
pub class_name: Option<Ustr>,
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(skip)]
pub path: PathBuf,
}
@@ -419,26 +372,6 @@ impl DirectoryMetadata {
}
}
let name = snapshot
.old_inst()
.and_then(|inst| inst.metadata().specified_name.clone())
.or_else(|| {
// Write name when name_for_inst would produce a different
// directory name (slugification or init-prefix).
if snapshot.old_inst().is_none() {
let instance_name = &snapshot.new_inst().name;
if validate_file_name(instance_name).is_err()
|| instance_name.to_lowercase() == "init"
{
Some(instance_name.clone())
} else {
None
}
} else {
None
}
});
Ok(Some(Self {
ignore_unknown_instances: if ignore_unknown_instances {
Some(true)
@@ -451,7 +384,6 @@ impl DirectoryMetadata {
path,
id: None,
schema,
name,
}))
}
@@ -461,7 +393,6 @@ impl DirectoryMetadata {
self.apply_properties(snapshot)?;
self.apply_id(snapshot)?;
self.apply_schema(snapshot)?;
self.apply_name(snapshot)?;
Ok(())
}
@@ -533,33 +464,17 @@ impl DirectoryMetadata {
snapshot.metadata.schema = self.schema.take();
Ok(())
}
fn apply_name(&mut self, snapshot: &mut InstanceSnapshot) -> anyhow::Result<()> {
if self.name.is_some() && snapshot.metadata.specified_name.is_some() {
anyhow::bail!(
"cannot specify a name using {} (instance has a name from somewhere else)",
self.path.display()
);
}
if let Some(name) = &self.name {
snapshot.name = name.clone().into();
}
snapshot.metadata.specified_name = self.name.take();
Ok(())
}
/// Returns whether the metadata is 'empty', meaning it doesn't have anything
/// worth persisting in it. Specifically:
///
/// - The number of properties and attributes is 0
/// - `ignore_unknown_instances` is None
/// - `class_name` is either None or not Some("Folder")
/// - `name` is None
#[inline]
pub fn is_empty(&self) -> bool {
self.attributes.is_empty()
&& self.properties.is_empty()
&& self.ignore_unknown_instances.is_none()
&& self.name.is_none()
&& if let Some(class) = &self.class_name {
class == "Folder"
} else {

View File

@@ -61,6 +61,10 @@ pub use self::{
/// This will inspect the path and find the appropriate middleware for it,
/// taking user-written rules into account. Then, it will attempt to convert
/// the path into an InstanceSnapshot using that middleware.
///
/// If a git filter is active in the context and the path is not acknowledged
/// (i.e., the file hasn't changed since the base git reference), this function
/// returns `Ok(None)` to skip syncing that file.
#[profiling::function]
pub fn snapshot_from_vfs(
context: &InstanceContext,
@@ -72,6 +76,16 @@ pub fn snapshot_from_vfs(
None => return Ok(None),
};
// Check if this path is acknowledged by the git filter.
// If not, skip this path entirely.
if !context.is_path_acknowledged(path) {
log::trace!(
"Skipping path {} (not acknowledged by git filter)",
path.display()
);
return Ok(None);
}
if meta.is_dir() {
let (middleware, dir_name, init_path) = get_dir_middleware(vfs, path)?;
// TODO: Support user defined init paths
@@ -213,6 +227,10 @@ pub enum Middleware {
impl Middleware {
/// Creates a snapshot for the given path from the Middleware with
/// the provided name.
///
/// When a git filter is active in the context, `ignore_unknown_instances`
/// will be set to `true` on all generated snapshots to preserve descendants
/// in Studio that are not tracked by Rojo.
fn snapshot(
&self,
context: &InstanceContext,
@@ -262,6 +280,14 @@ impl Middleware {
};
if let Ok(Some(ref mut snapshot)) = output {
snapshot.metadata.middleware = Some(*self);
// When git filter is active, force ignore_unknown_instances to true
// so that we don't delete children in Studio that aren't tracked.
if context.has_git_filter() {
snapshot.metadata.ignore_unknown_instances = true;
// Also apply this recursively to all children
set_ignore_unknown_instances_recursive(&mut snapshot.children);
}
}
output
}
@@ -365,6 +391,16 @@ impl Middleware {
}
}
/// Recursively sets `ignore_unknown_instances` to `true` on all children.
/// This is used when git filter is active to ensure we don't delete
/// children in Studio that aren't tracked by Rojo.
fn set_ignore_unknown_instances_recursive(children: &mut [InstanceSnapshot]) {
for child in children {
child.metadata.ignore_unknown_instances = true;
set_ignore_unknown_instances_recursive(&mut child.children);
}
}
/// A helper for easily defining a SyncRule. Arguments are passed literally
/// to this macro in the order `include`, `middleware`, `suffix`,
/// and `exclude`. Both `suffix` and `exclude` are optional.

View File

@@ -192,6 +192,17 @@ pub fn snapshot_project_node(
}
(_, None, _, Some(PathNode::Required(path))) => {
// If git filter is active and the path was filtered out, treat it
// as if the path was optional and skip this node.
if context.has_git_filter() {
log::trace!(
"Skipping project node '{}' because its path was filtered by git filter: {}",
instance_name,
path.display()
);
return Ok(None);
}
anyhow::bail!(
"Rojo project referred to a file using $path that could not be turned into a Roblox Instance by Rojo.\n\
Check that the file exists and is a file type known by Rojo.\n\
@@ -282,7 +293,12 @@ pub fn snapshot_project_node(
// If the user didn't specify it AND $path was not specified (meaning
// there's no existing value we'd be stepping on from a project file or meta
// file), set it to true.
if let Some(ignore) = node.ignore_unknown_instances {
//
// When git filter is active, always set to true to preserve descendants
// in Studio that are not tracked by Rojo.
if context.has_git_filter() {
metadata.ignore_unknown_instances = true;
} else if let Some(ignore) = node.ignore_unknown_instances {
metadata.ignore_unknown_instances = ignore;
} else if node.path.is_none() {
// TODO: Introduce a strict mode where $ignoreUnknownInstances is never

View File

@@ -28,7 +28,7 @@ pub fn snapshot_rbxm(
.metadata(
InstanceMetadata::new()
.instigating_source(path)
.relevant_paths(vec![vfs.canonicalize(path)?])
.relevant_paths(vec![path.to_path_buf()])
.context(context),
);

View File

@@ -31,7 +31,7 @@ pub fn snapshot_rbxmx(
.metadata(
InstanceMetadata::new()
.instigating_source(path)
.relevant_paths(vec![vfs.canonicalize(path)?])
.relevant_paths(vec![path.to_path_buf()])
.context(context),
);

View File

@@ -31,7 +31,7 @@ pub fn snapshot_toml(
.metadata(
InstanceMetadata::new()
.instigating_source(path)
.relevant_paths(vec![vfs.canonicalize(path)?])
.relevant_paths(vec![path.to_path_buf()])
.context(context),
);

View File

@@ -28,7 +28,7 @@ pub fn snapshot_txt(
.metadata(
InstanceMetadata::new()
.instigating_source(path)
.relevant_paths(vec![vfs.canonicalize(path)?])
.relevant_paths(vec![path.to_path_buf()])
.context(context),
);
@@ -58,14 +58,8 @@ pub fn syncback_txt<'sync>(
if !meta.is_empty() {
let parent = snapshot.path.parent_err()?;
let file_name = snapshot
.path
.file_name()
.and_then(|n| n.to_str())
.unwrap_or("");
let meta_stem = file_name.strip_suffix(".txt").unwrap_or(file_name);
fs_snapshot.add_file(
parent.join(format!("{meta_stem}.meta.json")),
parent.join(format!("{}.meta.json", new_inst.name)),
serde_json::to_vec_pretty(&meta).context("could not serialize metadata")?,
);
}

View File

@@ -37,7 +37,7 @@ pub fn snapshot_yaml(
.metadata(
InstanceMetadata::new()
.instigating_source(path)
.relevant_paths(vec![vfs.canonicalize(path)?])
.relevant_paths(vec![path.to_path_buf()])
.context(context),
);

View File

@@ -8,11 +8,11 @@ use rbx_dom_weak::Instance;
use crate::{snapshot::InstanceWithMeta, snapshot_middleware::Middleware};
pub fn name_for_inst<'a>(
pub fn name_for_inst<'old>(
middleware: Middleware,
new_inst: &'a Instance,
old_inst: Option<InstanceWithMeta<'a>>,
) -> anyhow::Result<Cow<'a, str>> {
new_inst: &Instance,
old_inst: Option<InstanceWithMeta<'old>>,
) -> anyhow::Result<Cow<'old, str>> {
if let Some(old_inst) = old_inst {
if let Some(source) = old_inst.metadata().relevant_paths.first() {
source
@@ -35,34 +35,14 @@ pub fn name_for_inst<'a>(
| Middleware::CsvDir
| Middleware::ServerScriptDir
| Middleware::ClientScriptDir
| Middleware::ModuleScriptDir => {
let name = if validate_file_name(&new_inst.name).is_err() {
Cow::Owned(slugify_name(&new_inst.name))
} else {
Cow::Borrowed(new_inst.name.as_str())
};
// Prefix "init" to avoid colliding with reserved init files.
if name.to_lowercase() == "init" {
Cow::Owned(format!("_{name}"))
} else {
name
}
}
| Middleware::ModuleScriptDir => Cow::Owned(new_inst.name.clone()),
_ => {
let extension = extension_for_middleware(middleware);
let slugified;
let stem: &str = if validate_file_name(&new_inst.name).is_err() {
slugified = slugify_name(&new_inst.name);
&slugified
} else {
&new_inst.name
};
// Prefix "init" stems to avoid colliding with reserved init files.
if stem.to_lowercase() == "init" {
Cow::Owned(format!("_{stem}.{extension}"))
} else {
Cow::Owned(format!("{stem}.{extension}"))
}
let name = &new_inst.name;
validate_file_name(name).with_context(|| {
format!("name '{name}' is not legal to write to the file system")
})?;
Cow::Owned(format!("{name}.{extension}"))
}
})
}
@@ -114,39 +94,6 @@ const INVALID_WINDOWS_NAMES: [&str; 22] = [
/// in a file's name.
const FORBIDDEN_CHARS: [char; 9] = ['<', '>', ':', '"', '/', '|', '?', '*', '\\'];
/// Slugifies a name by replacing forbidden characters with underscores
/// and ensuring the result is a valid file name
pub fn slugify_name(name: &str) -> String {
let mut result = String::with_capacity(name.len());
for ch in name.chars() {
if FORBIDDEN_CHARS.contains(&ch) {
result.push('_');
} else {
result.push(ch);
}
}
// Handle Windows reserved names by appending an underscore
let result_lower = result.to_lowercase();
for forbidden in INVALID_WINDOWS_NAMES {
if result_lower == forbidden.to_lowercase() {
result.push('_');
break;
}
}
while result.ends_with(' ') || result.ends_with('.') {
result.pop();
}
if result.is_empty() || result.chars().all(|c| c == '_') {
result = "instance".to_string();
}
result
}
/// Validates a provided file name to ensure it's allowed on the file system. An
/// error is returned if the name isn't allowed, indicating why.
/// This takes into account rules for Windows, MacOS, and Linux.

View File

@@ -31,25 +31,6 @@ pub struct SyncbackSnapshot<'sync> {
}
impl<'sync> SyncbackSnapshot<'sync> {
/// Computes the middleware and filesystem name for a child without
/// creating a full snapshot. Uses the same logic as `with_joined_path`.
pub fn child_middleware_and_name(
&self,
new_ref: Ref,
old_ref: Option<Ref>,
) -> anyhow::Result<(Middleware, String)> {
let temp = Self {
data: self.data,
old: old_ref,
new: new_ref,
path: PathBuf::new(),
middleware: None,
};
let middleware = get_best_middleware(&temp, self.data.force_json);
let name = name_for_inst(middleware, temp.new_inst(), temp.old_inst())?;
Ok((middleware, name.into_owned()))
}
/// Constructs a SyncbackSnapshot from the provided refs
/// while inheriting this snapshot's path and data. This should be used for
/// directories.
@@ -256,25 +237,6 @@ pub fn inst_path(dom: &WeakDom, referent: Ref) -> String {
path.join("/")
}
impl<'sync> SyncbackData<'sync> {
/// Constructs a `SyncbackData` for use in unit tests.
#[cfg(test)]
pub fn for_test(
vfs: &'sync Vfs,
old_tree: &'sync RojoTree,
new_tree: &'sync WeakDom,
project: &'sync Project,
) -> Self {
Self {
vfs,
old_tree,
new_tree,
project,
force_json: false,
}
}
}
#[cfg(test)]
mod test {
use rbx_dom_weak::{InstanceBuilder, WeakDom};

View File

@@ -1,7 +1,13 @@
//! Defines Rojo's HTTP API, all under /api. These endpoints generally return
//! JSON.
use std::{collections::HashMap, fs, path::PathBuf, str::FromStr, sync::Arc};
use std::{
collections::{HashMap, HashSet},
fs,
path::PathBuf,
str::FromStr,
sync::Arc,
};
use futures::{sink::SinkExt, stream::StreamExt};
use hyper::{body, Body, Method, Request, Response, StatusCode};
@@ -24,10 +30,7 @@ use crate::{
},
util::{json, json_ok},
},
web_api::{
BufferEncode, InstanceUpdate, RefPatchRequest, RefPatchResponse, SerializeRequest,
SerializeResponse,
},
web_api::{BufferEncode, InstanceUpdate, RefPatchResponse, SerializeResponse},
};
pub async fn call(serve_session: Arc<ServeSession>, mut request: Request<Body>) -> Response<Body> {
@@ -50,8 +53,12 @@ pub async fn call(serve_session: Arc<ServeSession>, mut request: Request<Body>)
)
}
}
(&Method::POST, "/api/serialize") => service.handle_api_serialize(request).await,
(&Method::POST, "/api/ref-patch") => service.handle_api_ref_patch(request).await,
(&Method::GET, path) if path.starts_with("/api/serialize/") => {
service.handle_api_serialize(request).await
}
(&Method::GET, path) if path.starts_with("/api/ref-patch/") => {
service.handle_api_ref_patch(request).await
}
(&Method::POST, path) if path.starts_with("/api/open/") => {
service.handle_api_open(request).await
@@ -222,30 +229,22 @@ impl ApiService {
/// that correspond to the requested Instances. These values have their
/// `Value` property set to point to the requested Instance.
async fn handle_api_serialize(&self, request: Request<Body>) -> Response<Body> {
let session_id = self.serve_session.session_id();
let body = body::to_bytes(request.into_body()).await.unwrap();
let argument = &request.uri().path()["/api/serialize/".len()..];
let requested_ids: Result<Vec<Ref>, _> = argument.split(',').map(Ref::from_str).collect();
let request: SerializeRequest = match json::from_slice(&body) {
Ok(request) => request,
Err(err) => {
let requested_ids = match requested_ids {
Ok(ids) => ids,
Err(_) => {
return json(
ErrorResponse::bad_request(format!("Invalid body: {}", err)),
ErrorResponse::bad_request("Malformed ID list"),
StatusCode::BAD_REQUEST,
);
}
};
if request.session_id != session_id {
return json(
ErrorResponse::bad_request("Wrong session ID"),
StatusCode::BAD_REQUEST,
);
}
let mut response_dom = WeakDom::new(InstanceBuilder::new("Folder"));
let tree = self.serve_session.tree();
for id in &request.ids {
for id in &requested_ids {
if let Some(instance) = tree.get_instance(*id) {
let clone = response_dom.insert(
Ref::none(),
@@ -291,26 +290,20 @@ impl ApiService {
/// and referent properties need to be updated after the serialize
/// endpoint is used.
async fn handle_api_ref_patch(self, request: Request<Body>) -> Response<Body> {
let session_id = self.serve_session.session_id();
let body = body::to_bytes(request.into_body()).await.unwrap();
let argument = &request.uri().path()["/api/ref-patch/".len()..];
let requested_ids: Result<HashSet<Ref>, _> =
argument.split(',').map(Ref::from_str).collect();
let request: RefPatchRequest = match json::from_slice(&body) {
Ok(request) => request,
Err(err) => {
let requested_ids = match requested_ids {
Ok(ids) => ids,
Err(_) => {
return json(
ErrorResponse::bad_request(format!("Invalid body: {}", err)),
ErrorResponse::bad_request("Malformed ID list"),
StatusCode::BAD_REQUEST,
);
}
};
if request.session_id != session_id {
return json(
ErrorResponse::bad_request("Wrong session ID"),
StatusCode::BAD_REQUEST,
);
}
let mut instance_updates: HashMap<Ref, InstanceUpdate> = HashMap::new();
let tree = self.serve_session.tree();
@@ -319,7 +312,7 @@ impl ApiService {
let Variant::Ref(prop_value) = prop_value else {
continue;
};
if let Some(target_id) = request.ids.get(prop_value) {
if let Some(target_id) = requested_ids.get(prop_value) {
let instance_id = instance.id();
let update =
instance_updates

View File

@@ -238,13 +238,6 @@ pub struct OpenResponse {
pub session_id: SessionId,
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct SerializeRequest {
pub session_id: SessionId,
pub ids: Vec<Ref>,
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct SerializeResponse {
@@ -276,13 +269,6 @@ impl BufferEncode {
}
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RefPatchRequest {
pub session_id: SessionId,
pub ids: HashSet<Ref>,
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RefPatchResponse<'a> {

View File

@@ -1,4 +1,5 @@
use std::{
fmt::Write as _,
fs,
path::{Path, PathBuf},
process::Command,
@@ -12,12 +13,8 @@ use rbx_dom_weak::types::Ref;
use tempfile::{tempdir, TempDir};
use librojo::{
web_api::{
ReadResponse, SerializeRequest, SerializeResponse, ServerInfoResponse, SocketPacket,
SocketPacketType,
},
SessionId,
use librojo::web_api::{
ReadResponse, SerializeResponse, ServerInfoResponse, SocketPacket, SocketPacketType,
};
use rojo_insta_ext::RedactionMap;
@@ -229,19 +226,16 @@ impl TestServeSession {
}
}
pub fn get_api_serialize(
&self,
ids: &[Ref],
session_id: SessionId,
) -> Result<SerializeResponse, reqwest::Error> {
let client = reqwest::blocking::Client::new();
let url = format!("http://localhost:{}/api/serialize", self.port);
let body = serde_json::to_string(&SerializeRequest {
session_id,
ids: ids.to_vec(),
});
pub fn get_api_serialize(&self, ids: &[Ref]) -> Result<SerializeResponse, reqwest::Error> {
let mut id_list = String::with_capacity(ids.len() * 33);
for id in ids {
write!(id_list, "{id},").unwrap();
}
id_list.pop();
client.post(url).body((body).unwrap()).send()?.json()
let url = format!("http://localhost:{}/api/serialize/{}", self.port, id_list);
reqwest::blocking::get(url)?.json()
}
}

View File

@@ -41,6 +41,7 @@ gen_build_tests! {
issue_546,
json_as_lua,
json_model_in_folder,
json_model_legacy_name,
module_in_folder,
module_init,
nested_runcontext,
@@ -54,8 +55,6 @@ gen_build_tests! {
script_meta_disabled,
server_in_folder,
server_init,
slugified_name_roundtrip,
model_json_name_input,
txt,
txt_in_folder,
unresolved_values,

View File

@@ -646,7 +646,7 @@ fn meshpart_with_id() {
.unwrap();
let serialize_response = session
.get_api_serialize(&[*meshpart, *objectvalue], info.session_id)
.get_api_serialize(&[*meshpart, *objectvalue])
.unwrap();
// We don't assert a snapshot on the SerializeResponse because the model includes the
@@ -673,9 +673,7 @@ fn forced_parent() {
read_response.intern_and_redact(&mut redactions, root_id)
);
let serialize_response = session
.get_api_serialize(&[root_id], info.session_id)
.unwrap();
let serialize_response = session.get_api_serialize(&[root_id]).unwrap();
assert_eq!(serialize_response.session_id, info.session_id);

View File

@@ -60,8 +60,8 @@ syncback_tests! {
// Ensures that projects can be reserialized by syncback and that
// default.project.json doesn't change unexpectedly.
project_reserialize => ["attribute_mismatch.luau", "property_mismatch.project.json"],
// Confirms that duplicate children are resolved with incrementing suffixes
rbxm_fallback => ["src/ChildWithDuplicates/DuplicateChild/.gitkeep", "src/ChildWithDuplicates/DuplicateChild1/.gitkeep"],
// Confirms that Instances that cannot serialize as directories serialize as rbxms
rbxm_fallback => ["src/ChildWithDuplicates.rbxm"],
// Ensures that ref properties are linked properly on the file system
ref_properties => ["src/pointer.model.json", "src/target.model.json"],
// Ensures that ref properties are linked when no attributes are manually
@@ -86,9 +86,4 @@ syncback_tests! {
sync_rules => ["src/module.modulescript", "src/text.text"],
// Ensures that the `syncUnscriptable` setting works
unscriptable_properties => ["default.project.json"],
// Ensures that instances with names containing illegal characters get slugified filenames
// and preserve their original names in meta.json without forcing directories for leaf scripts
slugified_name => ["src/_Script.meta.json", "src/_Script.server.luau", "src/_Folder/init.meta.json"],
// Ensures that .model.json files preserve the name property
model_json_name => ["src/foo.model.json"],
}