feat: resolve duplicate sibling names with incrementing suffixes

Instead of bailing when children have duplicate filesystem names,
syncback now resolves collisions by appending incrementing suffixes
(e.g. Foo, Foo1, Foo2). This handles both init-renamed children and
any other name collisions. Meta stem derivation is now path-based
to correctly handle collision suffixes and dotted names.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-26 14:30:46 +01:00
parent 917d17a738
commit 110b9f0df3
11 changed files with 271 additions and 189 deletions

View File

@@ -1,73 +0,0 @@
---
source: tests/rojo_test/syncback_util.rs
expression: src/ChildWithDuplicates.rbxm
---
num_types: 1
num_instances: 3
chunks:
- Inst:
type_id: 0
type_name: Folder
object_format: 0
referents:
- 0
- 1
- 2
- Prop:
type_id: 0
prop_name: AttributesSerialize
prop_type: String
values:
- ""
- ""
- ""
- Prop:
type_id: 0
prop_name: Capabilities
prop_type: SecurityCapabilities
values:
- 0
- 0
- 0
- Prop:
type_id: 0
prop_name: Name
prop_type: String
values:
- DuplicateChild
- DuplicateChild
- ChildWithDuplicates
- Prop:
type_id: 0
prop_name: DefinesCapabilities
prop_type: Bool
values:
- false
- false
- false
- Prop:
type_id: 0
prop_name: SourceAssetId
prop_type: Int64
values:
- -1
- -1
- -1
- Prop:
type_id: 0
prop_name: Tags
prop_type: String
values:
- ""
- ""
- ""
- Prnt:
version: 0
links:
- - 0
- 2
- - 1
- 2
- - 2
- -1
- End

View File

@@ -1,9 +1,12 @@
--- ---
source: tests/rojo_test/syncback_util.rs source: tests/rojo_test/syncback_util.rs
assertion_line: 101
expression: "String::from_utf8_lossy(&output.stdout)" expression: "String::from_utf8_lossy(&output.stdout)"
--- ---
Writing src/ChildWithDuplicates.rbxm Writing src/ChildWithDuplicates/DuplicateChild/.gitkeep
Writing src/ChildWithDuplicates/DuplicateChild1/.gitkeep
Writing src/ChildWithoutDuplicates/Child/.gitkeep Writing src/ChildWithoutDuplicates/Child/.gitkeep
Writing src/ChildWithDuplicates/DuplicateChild
Writing src/ChildWithDuplicates/DuplicateChild1
Writing src/ChildWithoutDuplicates Writing src/ChildWithoutDuplicates
Writing src/ChildWithoutDuplicates/Child Writing src/ChildWithoutDuplicates/Child
Removing src/ChildWithDuplicates

View File

@@ -0,0 +1,6 @@
---
source: tests/tests/syncback.rs
assertion_line: 31
expression: src/ChildWithDuplicates/DuplicateChild1/.gitkeep
---

View File

@@ -0,0 +1,6 @@
---
source: tests/tests/syncback.rs
assertion_line: 31
expression: src/ChildWithDuplicates/DuplicateChild/.gitkeep
---

View File

@@ -109,17 +109,12 @@ pub fn syncback_csv<'sync>(
if !meta.is_empty() { if !meta.is_empty() {
let parent = snapshot.path.parent_err()?; let parent = snapshot.path.parent_err()?;
let instance_name = &new_inst.name; let file_name = snapshot
let base = if crate::syncback::validate_file_name(instance_name).is_err() { .path
crate::syncback::slugify_name(instance_name) .file_name()
} else { .and_then(|n| n.to_str())
instance_name.clone() .unwrap_or("");
}; let meta_stem = file_name.strip_suffix(".csv").unwrap_or(file_name);
let meta_stem = if base.to_lowercase() == "init" {
format!("_{base}")
} else {
base
};
fs_snapshot.add_file( fs_snapshot.add_file(
parent.join(format!("{meta_stem}.meta.json")), parent.join(format!("{meta_stem}.meta.json")),
serde_json::to_vec_pretty(&meta).context("cannot serialize metadata")?, serde_json::to_vec_pretty(&meta).context("cannot serialize metadata")?,

View File

@@ -8,10 +8,13 @@ use memofs::{DirEntry, Vfs};
use crate::{ use crate::{
snapshot::{InstanceContext, InstanceMetadata, InstanceSnapshot, InstigatingSource}, snapshot::{InstanceContext, InstanceMetadata, InstanceSnapshot, InstigatingSource},
syncback::{hash_instance, slugify_name, FsSnapshot, SyncbackReturn, SyncbackSnapshot}, syncback::{
extension_for_middleware, hash_instance, FsSnapshot, SyncbackReturn,
SyncbackSnapshot,
},
}; };
use super::{meta_file::DirectoryMetadata, snapshot_from_vfs}; use super::{meta_file::DirectoryMetadata, snapshot_from_vfs, Middleware};
const EMPTY_DIR_KEEP_NAME: &str = ".gitkeep"; const EMPTY_DIR_KEEP_NAME: &str = ".gitkeep";
@@ -91,6 +94,22 @@ pub fn snapshot_dir_no_meta(
Ok(Some(snapshot)) Ok(Some(snapshot))
} }
/// Splits a filesystem name into (stem, extension) based on middleware type.
/// For directory middleware, the extension is empty. For file middleware,
/// the extension comes from `extension_for_middleware`.
fn split_name_and_ext(name: &str, middleware: Middleware) -> (&str, &str) {
if middleware.is_dir() {
(name, "")
} else {
let ext = extension_for_middleware(middleware);
if let Some(stem) = name.strip_suffix(&format!(".{ext}")) {
(stem, ext)
} else {
(name, "")
}
}
}
pub fn syncback_dir<'sync>( pub fn syncback_dir<'sync>(
snapshot: &SyncbackSnapshot<'sync>, snapshot: &SyncbackSnapshot<'sync>,
) -> anyhow::Result<SyncbackReturn<'sync>> { ) -> anyhow::Result<SyncbackReturn<'sync>> {
@@ -143,77 +162,119 @@ pub fn syncback_dir_no_meta<'sync>(
} }
} }
// Enforce unique filesystem names. Uses actual on-disk names for existing // --- Two-pass collision resolution ---
// children and resolved names (with init-prefix) for new ones. //
let mut fs_child_names = HashSet::with_capacity(new_inst.children().len()); // Pass 1: Collect each child's base filesystem name and old ref, applying
let mut duplicate_set = HashSet::new(); // skip conditions. Track which names are used (lowercased) so we can
for child_ref in new_inst.children() { // detect collisions.
let child = snapshot.get_new_instance(*child_ref).unwrap(); struct ChildEntry {
let fs_name = old_child_map new_ref: rbx_dom_weak::types::Ref,
.get(child.name.as_str()) old_ref: Option<rbx_dom_weak::types::Ref>,
.and_then(|old| old.metadata().relevant_paths.first()) base_name: String,
.and_then(|p| p.file_name()) middleware: Middleware,
.and_then(|n| n.to_str()) skip: bool,
.map(|s| s.to_lowercase())
.unwrap_or_else(|| {
let slug = slugify_name(&child.name);
let slug_lower = slug.to_lowercase();
// Mirror name_for_inst's init-prefix.
if slug_lower == "init" {
format!("_{slug_lower}")
} else {
slug_lower
}
});
if !fs_child_names.insert(fs_name) {
duplicate_set.insert(child.name.as_str());
}
}
if !duplicate_set.is_empty() {
if duplicate_set.len() <= 25 {
anyhow::bail!(
"Instance has children with duplicate name (case may not exactly match):\n {}",
duplicate_set.into_iter().collect::<Vec<&str>>().join(", ")
);
}
anyhow::bail!("Instance has more than 25 children with duplicate names");
} }
if snapshot.old_inst().is_some() { let mut entries = Vec::with_capacity(new_inst.children().len());
for new_child_ref in new_inst.children() { let mut used_names: HashSet<String> = HashSet::with_capacity(new_inst.children().len());
let new_child = snapshot.get_new_instance(*new_child_ref).unwrap(); let mut collision_indices: Vec<usize> = Vec::new();
if let Some(old_child) = old_child_map.remove(new_child.name.as_str()) {
if old_child.metadata().relevant_paths.is_empty() { for new_child_ref in new_inst.children() {
log::debug!( let new_child = snapshot.get_new_instance(*new_child_ref).unwrap();
"Skipping instance {} because it doesn't exist on the disk",
old_child.name() // Determine old_ref and apply skip conditions.
); let old_child = if snapshot.old_inst().is_some() {
continue; old_child_map.remove(new_child.name.as_str())
} else if matches!( } else {
old_child.metadata().instigating_source, None
Some(InstigatingSource::ProjectNode { .. }) };
) {
log::debug!( let mut skip = false;
"Skipping instance {} because it originates in a project file", if let Some(ref old) = old_child {
old_child.name() if old.metadata().relevant_paths.is_empty() {
); log::debug!(
continue; "Skipping instance {} because it doesn't exist on the disk",
} old.name()
// This child exists in both doms. Pass it on. );
children.push(snapshot.with_joined_path(*new_child_ref, Some(old_child.id()))?); skip = true;
} else { } else if matches!(
// The child only exists in the the new dom old.metadata().instigating_source,
children.push(snapshot.with_joined_path(*new_child_ref, None)?); Some(InstigatingSource::ProjectNode { .. })
) {
log::debug!(
"Skipping instance {} because it originates in a project file",
old.name()
);
skip = true;
} }
} }
// Any children that are in the old dom but not the new one are removed.
removed_children.extend(old_child_map.into_values()); let old_ref = old_child.as_ref().map(|o| o.id());
} else {
// There is no old instance. Just add every child. if skip {
for new_child_ref in new_inst.children() { entries.push(ChildEntry {
children.push(snapshot.with_joined_path(*new_child_ref, None)?); new_ref: *new_child_ref,
old_ref,
base_name: String::new(),
middleware: Middleware::Dir,
skip: true,
});
continue;
} }
let (middleware, base_name) =
snapshot.child_middleware_and_name(*new_child_ref, old_ref)?;
let idx = entries.len();
let lower = base_name.to_lowercase();
if !used_names.insert(lower) {
// Name already claimed — needs resolution.
collision_indices.push(idx);
}
entries.push(ChildEntry {
new_ref: *new_child_ref,
old_ref,
base_name,
middleware,
skip: false,
});
}
// Pass 2: Resolve collisions by appending incrementing suffixes.
for idx in collision_indices {
let entry = &entries[idx];
let (stem, ext) = split_name_and_ext(&entry.base_name, entry.middleware);
let mut counter = 1u32;
loop {
let candidate = if ext.is_empty() {
format!("{stem}{counter}")
} else {
format!("{stem}{counter}.{ext}")
};
let lower = candidate.to_lowercase();
if used_names.insert(lower) {
// Safe to mutate — we only visit each collision index once.
let entry = &mut entries[idx];
entry.base_name = candidate;
break;
}
counter += 1;
}
}
// Create snapshots from resolved entries.
for entry in &entries {
if entry.skip {
continue;
}
let resolved_path = snapshot.path.join(&entry.base_name);
children.push(snapshot.with_new_path(resolved_path, entry.new_ref, entry.old_ref));
}
// Any children that are in the old dom but not the new one are removed.
if snapshot.old_inst().is_some() {
removed_children.extend(old_child_map.into_values());
} }
let mut fs_snapshot = FsSnapshot::new(); let mut fs_snapshot = FsSnapshot::new();
@@ -362,14 +423,15 @@ mod test {
assert!( assert!(
result.is_ok(), result.is_ok(),
"should not error when two children have the same lowercased Roblox \ "should not error when two children have the same lowercased Roblox \
name but map to distinct filesystem paths: {result:?}", name but map to distinct filesystem paths: {:?}",
result.as_ref().err(),
); );
} }
/// Two completely new children with the same non-init name would produce /// Two completely new children with the same name get resolved via
/// the same filesystem entry and must be detected as a duplicate. /// incrementing suffixes instead of erroring.
#[test] #[test]
fn syncback_detects_sibling_duplicate_names() { fn syncback_resolves_sibling_duplicate_names() {
use rbx_dom_weak::{InstanceBuilder, WeakDom}; use rbx_dom_weak::{InstanceBuilder, WeakDom};
let old_parent = InstanceSnapshot::new() let old_parent = InstanceSnapshot::new()
@@ -387,8 +449,6 @@ mod test {
new_tree.root_ref(), new_tree.root_ref(),
InstanceBuilder::new("Folder").with_name("Parent"), InstanceBuilder::new("Folder").with_name("Parent"),
); );
// "Foo" is not a reserved name but two siblings named "Foo" still
// collide on disk.
new_tree.insert(new_parent, InstanceBuilder::new("Folder").with_name("Foo")); new_tree.insert(new_parent, InstanceBuilder::new("Folder").with_name("Foo"));
new_tree.insert(new_parent, InstanceBuilder::new("Folder").with_name("Foo")); new_tree.insert(new_parent, InstanceBuilder::new("Folder").with_name("Foo"));
@@ -405,9 +465,17 @@ mod test {
let result = syncback_dir_no_meta(&snapshot); let result = syncback_dir_no_meta(&snapshot);
assert!( assert!(
result.is_err(), result.is_ok(),
"should error when two new children would produce the same filesystem name", "should resolve duplicate names with suffixes, not error: {:?}",
result.as_ref().err(),
); );
let children = result.unwrap().children;
let mut names: Vec<String> = children
.iter()
.map(|c| c.path.file_name().unwrap().to_string_lossy().into_owned())
.collect();
names.sort();
assert_eq!(names, vec!["Foo", "Foo1"]);
} }
/// A new child named "Init" (as a ModuleScript) would naively become /// A new child named "Init" (as a ModuleScript) would naively become
@@ -452,7 +520,8 @@ mod test {
let result = syncback_dir_no_meta(&snapshot); let result = syncback_dir_no_meta(&snapshot);
assert!( assert!(
result.is_ok(), result.is_ok(),
"should resolve init-name conflict by prefixing '_', not error: {result:?}", "should resolve init-name conflict by prefixing '_', not error: {:?}",
result.as_ref().err(),
); );
// The child should have been placed at "_Init.luau", not "Init.luau". // The child should have been placed at "_Init.luau", not "Init.luau".
let child_file_name = result let child_file_name = result
@@ -518,7 +587,64 @@ mod test {
assert!( assert!(
result.is_ok(), result.is_ok(),
"should allow a child whose filesystem name is slugified away from \ "should allow a child whose filesystem name is slugified away from \
the reserved 'init' stem: {result:?}", the reserved 'init' stem: {:?}",
result.as_ref().err(),
); );
} }
/// Two new children both named "Init" (ModuleScripts) should get
/// "_Init.luau" and "_Init1.luau" respectively.
#[test]
fn syncback_resolves_multiple_init_conflicts() {
use rbx_dom_weak::{InstanceBuilder, WeakDom};
let old_parent = InstanceSnapshot::new()
.name("Parent")
.class_name("Folder")
.metadata(
InstanceMetadata::new()
.instigating_source(PathBuf::from("/root"))
.relevant_paths(vec![PathBuf::from("/root")]),
);
let old_tree = RojoTree::new(old_parent);
let mut new_tree = WeakDom::new(InstanceBuilder::new("ROOT"));
let new_parent = new_tree.insert(
new_tree.root_ref(),
InstanceBuilder::new("Folder").with_name("Parent"),
);
new_tree.insert(
new_parent,
InstanceBuilder::new("ModuleScript").with_name("Init"),
);
new_tree.insert(
new_parent,
InstanceBuilder::new("ModuleScript").with_name("Init"),
);
let vfs = make_vfs();
let project = make_project();
let data = SyncbackData::for_test(&vfs, &old_tree, &new_tree, &project);
let snapshot = SyncbackSnapshot {
data,
old: Some(old_tree.get_root_id()),
new: new_parent,
path: PathBuf::from("/root"),
middleware: None,
};
let result = syncback_dir_no_meta(&snapshot);
assert!(
result.is_ok(),
"should resolve multiple init conflicts with suffixes: {:?}",
result.as_ref().err(),
);
let children = result.unwrap().children;
let mut names: Vec<String> = children
.iter()
.map(|c| c.path.file_name().unwrap().to_string_lossy().into_owned())
.collect();
names.sort();
assert_eq!(names, vec!["_Init.luau", "_Init1.luau"]);
}
} }

View File

@@ -158,17 +158,21 @@ pub fn syncback_lua<'sync>(
if !meta.is_empty() { if !meta.is_empty() {
let parent_location = snapshot.path.parent_err()?; let parent_location = snapshot.path.parent_err()?;
let instance_name = &snapshot.new_inst().name; let file_name = snapshot
let base = if crate::syncback::validate_file_name(instance_name).is_err() { .path
crate::syncback::slugify_name(instance_name) .file_name()
} else { .and_then(|n| n.to_str())
instance_name.clone() .unwrap_or("");
}; let meta_stem = file_name
let meta_stem = if base.to_lowercase() == "init" { .strip_suffix(".server.luau")
format!("_{base}") .or_else(|| file_name.strip_suffix(".server.lua"))
} else { .or_else(|| file_name.strip_suffix(".client.luau"))
base .or_else(|| file_name.strip_suffix(".client.lua"))
}; .or_else(|| file_name.strip_suffix(".plugin.luau"))
.or_else(|| file_name.strip_suffix(".plugin.lua"))
.or_else(|| file_name.strip_suffix(".luau"))
.or_else(|| file_name.strip_suffix(".lua"))
.unwrap_or(file_name);
fs_snapshot.add_file( fs_snapshot.add_file(
parent_location.join(format!("{meta_stem}.meta.json")), parent_location.join(format!("{meta_stem}.meta.json")),
serde_json::to_vec_pretty(&meta).context("cannot serialize metadata")?, serde_json::to_vec_pretty(&meta).context("cannot serialize metadata")?,

View File

@@ -58,17 +58,12 @@ pub fn syncback_txt<'sync>(
if !meta.is_empty() { if !meta.is_empty() {
let parent = snapshot.path.parent_err()?; let parent = snapshot.path.parent_err()?;
let instance_name = &new_inst.name; let file_name = snapshot
let base = if crate::syncback::validate_file_name(instance_name).is_err() { .path
crate::syncback::slugify_name(instance_name) .file_name()
} else { .and_then(|n| n.to_str())
instance_name.clone() .unwrap_or("");
}; let meta_stem = file_name.strip_suffix(".txt").unwrap_or(file_name);
let meta_stem = if base.to_lowercase() == "init" {
format!("_{base}")
} else {
base
};
fs_snapshot.add_file( fs_snapshot.add_file(
parent.join(format!("{meta_stem}.meta.json")), parent.join(format!("{meta_stem}.meta.json")),
serde_json::to_vec_pretty(&meta).context("could not serialize metadata")?, serde_json::to_vec_pretty(&meta).context("could not serialize metadata")?,

View File

@@ -28,7 +28,7 @@ use crate::{
Project, Project,
}; };
pub use file_names::{extension_for_middleware, name_for_inst, slugify_name, validate_file_name}; pub use file_names::{extension_for_middleware, name_for_inst, validate_file_name};
pub use fs_snapshot::FsSnapshot; pub use fs_snapshot::FsSnapshot;
pub use hash::*; pub use hash::*;
pub use property_filter::{filter_properties, filter_properties_preallocated}; pub use property_filter::{filter_properties, filter_properties_preallocated};

View File

@@ -31,6 +31,25 @@ pub struct SyncbackSnapshot<'sync> {
} }
impl<'sync> SyncbackSnapshot<'sync> { impl<'sync> SyncbackSnapshot<'sync> {
/// Computes the middleware and filesystem name for a child without
/// creating a full snapshot. Uses the same logic as `with_joined_path`.
pub fn child_middleware_and_name(
&self,
new_ref: Ref,
old_ref: Option<Ref>,
) -> anyhow::Result<(Middleware, String)> {
let temp = Self {
data: self.data,
old: old_ref,
new: new_ref,
path: PathBuf::new(),
middleware: None,
};
let middleware = get_best_middleware(&temp, self.data.force_json);
let name = name_for_inst(middleware, temp.new_inst(), temp.old_inst())?;
Ok((middleware, name.into_owned()))
}
/// Constructs a SyncbackSnapshot from the provided refs /// Constructs a SyncbackSnapshot from the provided refs
/// while inheriting this snapshot's path and data. This should be used for /// while inheriting this snapshot's path and data. This should be used for
/// directories. /// directories.
@@ -251,6 +270,7 @@ impl<'sync> SyncbackData<'sync> {
old_tree, old_tree,
new_tree, new_tree,
project, project,
force_json: false,
} }
} }
} }

View File

@@ -60,8 +60,8 @@ syncback_tests! {
// Ensures that projects can be reserialized by syncback and that // Ensures that projects can be reserialized by syncback and that
// default.project.json doesn't change unexpectedly. // default.project.json doesn't change unexpectedly.
project_reserialize => ["attribute_mismatch.luau", "property_mismatch.project.json"], project_reserialize => ["attribute_mismatch.luau", "property_mismatch.project.json"],
// Confirms that Instances that cannot serialize as directories serialize as rbxms // Confirms that duplicate children are resolved with incrementing suffixes
rbxm_fallback => ["src/ChildWithDuplicates.rbxm"], rbxm_fallback => ["src/ChildWithDuplicates/DuplicateChild/.gitkeep", "src/ChildWithDuplicates/DuplicateChild1/.gitkeep"],
// Ensures that ref properties are linked properly on the file system // Ensures that ref properties are linked properly on the file system
ref_properties => ["src/pointer.model.json", "src/target.model.json"], ref_properties => ["src/pointer.model.json", "src/target.model.json"],
// Ensures that ref properties are linked when no attributes are manually // Ensures that ref properties are linked when no attributes are manually