forked from rojo-rbx/rojo
Compare commits
1 Commits
d7a9ce55db
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9bbb1edd79 |
7
.github/workflows/release.yml
vendored
7
.github/workflows/release.yml
vendored
@@ -44,6 +44,13 @@ jobs:
|
||||
with:
|
||||
name: Rojo.rbxm
|
||||
path: Rojo.rbxm
|
||||
|
||||
- name: Upload Plugin to Roblox
|
||||
env:
|
||||
RBX_API_KEY: ${{ secrets.PLUGIN_UPLOAD_TOKEN }}
|
||||
RBX_UNIVERSE_ID: ${{ vars.PLUGIN_CI_PLACE_ID }}
|
||||
RBX_PLACE_ID: ${{ vars.PLUGIN_CI_UNIVERSE_ID }}
|
||||
run: lune run upload-plugin Rojo.rbxm
|
||||
|
||||
build:
|
||||
needs: ["create-release"]
|
||||
|
||||
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -19,3 +19,6 @@
|
||||
[submodule "plugin/Packages/msgpack-luau"]
|
||||
path = plugin/Packages/msgpack-luau
|
||||
url = https://github.com/cipharius/msgpack-luau/
|
||||
[submodule ".lune/opencloud-execute"]
|
||||
path = .lune/opencloud-execute
|
||||
url = https://github.com/Dekkonot/opencloud-luau-execute-lune.git
|
||||
|
||||
8
.lune/.config.luau
Normal file
8
.lune/.config.luau
Normal file
@@ -0,0 +1,8 @@
|
||||
return {
|
||||
luau = {
|
||||
languagemode = "strict",
|
||||
aliases = {
|
||||
lune = "~/.lune/.typedefs/0.10.4/",
|
||||
},
|
||||
},
|
||||
}
|
||||
1
.lune/opencloud-execute
Submodule
1
.lune/opencloud-execute
Submodule
Submodule .lune/opencloud-execute added at 8ae86dd3ad
51
.lune/scripts/plugin-upload.luau
Normal file
51
.lune/scripts/plugin-upload.luau
Normal file
@@ -0,0 +1,51 @@
|
||||
local args: any = ...
|
||||
assert(args, "no arguments passed to script")
|
||||
|
||||
local input: buffer = args.BinaryInput
|
||||
|
||||
local AssetService = game:GetService("AssetService")
|
||||
local SerializationService = game:GetService("SerializationService")
|
||||
local EncodingService = game:GetService("EncodingService")
|
||||
|
||||
local input_hash: buffer = EncodingService:ComputeBufferHash(input, Enum.HashAlgorithm.Sha256)
|
||||
local hex_hash: { string } = table.create(buffer.len(input_hash))
|
||||
for i = 0, buffer.len(input_hash) - 1 do
|
||||
table.insert(hex_hash, string.format("%02x", buffer.readu8(input_hash, i)))
|
||||
end
|
||||
|
||||
print(`Deserializing plugin file (size: {buffer.len(input)} bytes, hash: {table.concat(hex_hash, "")})`)
|
||||
local plugin = SerializationService:DeserializeInstancesAsync(input)[1]
|
||||
|
||||
local UploadDetails = require(plugin.UploadDetails) :: any
|
||||
local PLUGIN_ID = UploadDetails.assetId
|
||||
local PLUGIN_NAME = UploadDetails.name
|
||||
local PLUGIN_DESCRIPTION = UploadDetails.description
|
||||
local PLUGIN_CREATOR_ID = UploadDetails.creatorId
|
||||
local PLUGIN_CREATOR_TYPE = UploadDetails.creatorType
|
||||
|
||||
assert(typeof(PLUGIN_ID) == "number", "UploadDetails did not contain a number field 'assetId'")
|
||||
assert(typeof(PLUGIN_NAME) == "string", "UploadDetails did not contain a string field 'name'")
|
||||
assert(typeof(PLUGIN_DESCRIPTION) == "string", "UploadDetails did not contain a string field 'description'")
|
||||
assert(typeof(PLUGIN_CREATOR_ID) == "number", "UploadDetails did not contain a number field 'creatorId'")
|
||||
assert(typeof(PLUGIN_CREATOR_TYPE) == "string", "UploadDetails did not contain a string field 'creatorType'")
|
||||
assert(
|
||||
Enum.AssetCreatorType:FromName(PLUGIN_CREATOR_TYPE) ~= nil,
|
||||
"UploadDetails field 'creatorType' was not a valid member of Enum.AssetCreatorType"
|
||||
)
|
||||
|
||||
print(`Uploading to {PLUGIN_ID}`)
|
||||
print(`Plugin Name: {PLUGIN_NAME}`)
|
||||
print(`Plugin Description: {PLUGIN_DESCRIPTION}`)
|
||||
|
||||
local result, version_or_err = AssetService:CreateAssetVersionAsync(plugin, Enum.AssetType.Plugin, PLUGIN_ID, {
|
||||
["Name"] = PLUGIN_NAME,
|
||||
["Description"] = PLUGIN_DESCRIPTION,
|
||||
["CreatorId"] = PLUGIN_CREATOR_ID,
|
||||
["CreatorType"] = Enum.AssetCreatorType:FromName(PLUGIN_CREATOR_TYPE),
|
||||
})
|
||||
|
||||
if result ~= Enum.CreateAssetResult.Success then
|
||||
error(`Plugin failed to upload because: {result.Name} - {version_or_err}`)
|
||||
end
|
||||
|
||||
print(`Plugin uploaded successfully. New version is {version_or_err}.`)
|
||||
78
.lune/upload-plugin.luau
Normal file
78
.lune/upload-plugin.luau
Normal file
@@ -0,0 +1,78 @@
|
||||
local fs = require("@lune/fs")
|
||||
local process = require("@lune/process")
|
||||
local stdio = require("@lune/stdio")
|
||||
|
||||
local luau_execute = require("./opencloud-execute")
|
||||
|
||||
local UNIVERSE_ID = process.env["RBX_UNIVERSE_ID"]
|
||||
local PLACE_ID = process.env["RBX_PLACE_ID"]
|
||||
|
||||
local version_string = fs.readFile("plugin/Version.txt")
|
||||
local versions = { string.match(version_string, "^v?(%d+)%.(%d+)%.(%d+)(.*)$") }
|
||||
if versions[4] ~= "" then
|
||||
print("This release is a pre-release. Skipping uploading plugin.")
|
||||
process.exit(0)
|
||||
end
|
||||
|
||||
local plugin_path = process.args[1]
|
||||
assert(
|
||||
typeof(plugin_path) == "string",
|
||||
"no plugin path provided, expected usage is `lune run upload-plugin [PATH TO RBXM]`."
|
||||
)
|
||||
|
||||
-- For local testing
|
||||
if process.env["CI"] ~= "true" then
|
||||
local rojo = process.exec("rojo", { "build", "plugin.project.json", "--output", plugin_path })
|
||||
if not rojo.ok then
|
||||
stdio.ewrite("plugin upload failed because: could not build plugin.rbxm\n\n")
|
||||
stdio.ewrite(rojo.stderr)
|
||||
stdio.ewrite("\n")
|
||||
process.exit(1)
|
||||
end
|
||||
else
|
||||
assert(fs.isFile(plugin_path), `Plugin file did not exist at {plugin_path}`)
|
||||
end
|
||||
local plugin_content = fs.readFile(plugin_path)
|
||||
|
||||
local engine_script = fs.readFile(".lune/scripts/plugin-upload.luau")
|
||||
|
||||
print("Creating task to upload plugin")
|
||||
local task = luau_execute.create_task_latest(UNIVERSE_ID, PLACE_ID, engine_script, 300, false, plugin_content)
|
||||
|
||||
print("Waiting for task to finish")
|
||||
local success = luau_execute.await_finish(task)
|
||||
if not success then
|
||||
local error = luau_execute.get_error(task)
|
||||
assert(error, "could not fetch error from task")
|
||||
stdio.ewrite("plugin upload failed because: task did not finish successfully\n\n")
|
||||
stdio.ewrite(error.code)
|
||||
stdio.ewrite("\n")
|
||||
stdio.ewrite(error.message)
|
||||
stdio.ewrite("\n")
|
||||
process.exit(1)
|
||||
end
|
||||
|
||||
print("Output from task:\n")
|
||||
for _, log in luau_execute.get_structured_logs(task) do
|
||||
if log.messageType == "ERROR" then
|
||||
stdio.write(stdio.color("red"))
|
||||
stdio.write(log.message)
|
||||
stdio.write("\n")
|
||||
stdio.write(stdio.color("reset"))
|
||||
elseif log.messageType == "INFO" then
|
||||
stdio.write(stdio.color("cyan"))
|
||||
stdio.write(log.message)
|
||||
stdio.write("\n")
|
||||
stdio.write(stdio.color("reset"))
|
||||
elseif log.messageType == "WARNING" then
|
||||
stdio.write(stdio.color("yellow"))
|
||||
stdio.write(log.message)
|
||||
stdio.write("\n")
|
||||
stdio.write(stdio.color("reset"))
|
||||
else
|
||||
stdio.write(stdio.color("reset"))
|
||||
stdio.write(log.message)
|
||||
stdio.write("\n")
|
||||
stdio.write(stdio.color("reset"))
|
||||
end
|
||||
end
|
||||
@@ -33,7 +33,6 @@ Making a new release? Simply add the new header with the version and date undern
|
||||
|
||||
* `inf` and `nan` values in properties are now synced ([#1176])
|
||||
* Fixed a bug caused by having reference properties (such as `ObjectValue.Value`) that point to an Instance not included in syncback. ([#1179])
|
||||
* Implemented support for the "name" property in meta/model JSON files. ([#1187])
|
||||
* Fixed instance replacement fallback failing when too many instances needed to be replaced. ([#1192])
|
||||
* Added actors and bindable/remote event/function variants to be synced back as JSON files. ([#1199])
|
||||
* Fixed a bug where MacOS paths weren't being handled correctly. ([#1201])
|
||||
@@ -43,7 +42,6 @@ Making a new release? Simply add the new header with the version and date undern
|
||||
|
||||
[#1176]: https://github.com/rojo-rbx/rojo/pull/1176
|
||||
[#1179]: https://github.com/rojo-rbx/rojo/pull/1179
|
||||
[#1187]: https://github.com/rojo-rbx/rojo/pull/1187
|
||||
[#1192]: https://github.com/rojo-rbx/rojo/pull/1192
|
||||
[#1199]: https://github.com/rojo-rbx/rojo/pull/1199
|
||||
[#1201]: https://github.com/rojo-rbx/rojo/pull/1201
|
||||
|
||||
1
build.rs
1
build.rs
@@ -75,6 +75,7 @@ fn main() -> Result<(), anyhow::Error> {
|
||||
"src" => snapshot_from_fs_path(&plugin_dir.join("src"))?,
|
||||
"Packages" => snapshot_from_fs_path(&plugin_dir.join("Packages"))?,
|
||||
"Version.txt" => snapshot_from_fs_path(&plugin_dir.join("Version.txt"))?,
|
||||
"UploadDetails.json" => snapshot_from_fs_path(&plugin_dir.join("UploadDetails.json"))?,
|
||||
}),
|
||||
});
|
||||
|
||||
|
||||
@@ -22,6 +22,9 @@
|
||||
},
|
||||
"Version": {
|
||||
"$path": "plugin/Version.txt"
|
||||
},
|
||||
"UploadDetails": {
|
||||
"$path": "plugin/UploadDetails.json"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
7
plugin/UploadDetails.json
Normal file
7
plugin/UploadDetails.json
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"assetId": 13916111004,
|
||||
"name": "Rojo",
|
||||
"description": "The plugin portion of Rojo, a tool to enable professional tooling for Roblox developers.",
|
||||
"creatorId": 32644114,
|
||||
"creatorType": "Group"
|
||||
}
|
||||
@@ -41,41 +41,14 @@ function reifyInstanceInner(unappliedPatch, deferredRefs, instanceMap, virtualIn
|
||||
invariant("Cannot reify an instance not present in virtualInstances\nID: {}", id)
|
||||
end
|
||||
|
||||
-- Before creating a new instance, check if the parent already has an
|
||||
-- untracked child with the same Name and ClassName. This enables "late
|
||||
-- adoption" of instances that exist in Studio but weren't in the initial
|
||||
-- Rojo tree (e.g., when using --git-since filtering). Without this,
|
||||
-- newly acknowledged files would create duplicate instances.
|
||||
local adoptedExisting = false
|
||||
local instance = nil
|
||||
-- Instance.new can fail if we're passing in something that can't be
|
||||
-- created, like a service, something enabled with a feature flag, or
|
||||
-- something that requires higher security than we have.
|
||||
local createSuccess, instance = pcall(Instance.new, virtualInstance.ClassName)
|
||||
|
||||
for _, child in ipairs(parentInstance:GetChildren()) do
|
||||
local accessSuccess, name, className = pcall(function()
|
||||
return child.Name, child.ClassName
|
||||
end)
|
||||
|
||||
if accessSuccess
|
||||
and name == virtualInstance.Name
|
||||
and className == virtualInstance.ClassName
|
||||
and instanceMap.fromInstances[child] == nil
|
||||
then
|
||||
instance = child
|
||||
adoptedExisting = true
|
||||
break
|
||||
end
|
||||
end
|
||||
|
||||
if not adoptedExisting then
|
||||
-- Instance.new can fail if we're passing in something that can't be
|
||||
-- created, like a service, something enabled with a feature flag, or
|
||||
-- something that requires higher security than we have.
|
||||
local createSuccess
|
||||
createSuccess, instance = pcall(Instance.new, virtualInstance.ClassName)
|
||||
|
||||
if not createSuccess then
|
||||
addAllToPatch(unappliedPatch, virtualInstances, id)
|
||||
return
|
||||
end
|
||||
if not createSuccess then
|
||||
addAllToPatch(unappliedPatch, virtualInstances, id)
|
||||
return
|
||||
end
|
||||
|
||||
-- TODO: Can this fail? Previous versions of Rojo guarded against this, but
|
||||
@@ -123,9 +96,7 @@ function reifyInstanceInner(unappliedPatch, deferredRefs, instanceMap, virtualIn
|
||||
reifyInstanceInner(unappliedPatch, deferredRefs, instanceMap, virtualInstances, childId, instance)
|
||||
end
|
||||
|
||||
if not adoptedExisting then
|
||||
instance.Parent = parentInstance
|
||||
end
|
||||
instance.Parent = parentInstance
|
||||
instanceMap:insert(id, instance)
|
||||
end
|
||||
|
||||
|
||||
@@ -0,0 +1,16 @@
|
||||
---
|
||||
source: tests/tests/build.rs
|
||||
expression: contents
|
||||
---
|
||||
<roblox version="4">
|
||||
<Item class="Folder" referent="0">
|
||||
<Properties>
|
||||
<string name="Name">json_model_legacy_name</string>
|
||||
</Properties>
|
||||
<Item class="Folder" referent="1">
|
||||
<Properties>
|
||||
<string name="Name">Expected Name</string>
|
||||
</Properties>
|
||||
</Item>
|
||||
</Item>
|
||||
</roblox>
|
||||
@@ -1,23 +0,0 @@
|
||||
---
|
||||
source: tests/tests/build.rs
|
||||
assertion_line: 109
|
||||
expression: contents
|
||||
---
|
||||
<roblox version="4">
|
||||
<Item class="DataModel" referent="0">
|
||||
<Properties>
|
||||
<string name="Name">model_json_name_input</string>
|
||||
</Properties>
|
||||
<Item class="Workspace" referent="1">
|
||||
<Properties>
|
||||
<string name="Name">Workspace</string>
|
||||
<bool name="NeedsPivotMigration">false</bool>
|
||||
</Properties>
|
||||
<Item class="StringValue" referent="2">
|
||||
<Properties>
|
||||
<string name="Name">/Bar</string>
|
||||
</Properties>
|
||||
</Item>
|
||||
</Item>
|
||||
</Item>
|
||||
</roblox>
|
||||
@@ -1,20 +0,0 @@
|
||||
---
|
||||
source: tests/tests/build.rs
|
||||
assertion_line: 108
|
||||
expression: contents
|
||||
---
|
||||
<roblox version="4">
|
||||
<Item class="Folder" referent="0">
|
||||
<Properties>
|
||||
<string name="Name">slugified_name_roundtrip</string>
|
||||
</Properties>
|
||||
<Item class="Script" referent="1">
|
||||
<Properties>
|
||||
<string name="Name">/Script</string>
|
||||
<token name="RunContext">0</token>
|
||||
<string name="Source"><![CDATA[print("Hello world!")
|
||||
]]></string>
|
||||
</Properties>
|
||||
</Item>
|
||||
</Item>
|
||||
</roblox>
|
||||
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"name": "json_model_legacy_name",
|
||||
"tree": {
|
||||
"$path": "folder"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,4 @@
|
||||
{
|
||||
"Name": "Overridden Name",
|
||||
"ClassName": "Folder"
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
{
|
||||
"name": "model_json_name_input",
|
||||
"tree": {
|
||||
"$className": "DataModel",
|
||||
"Workspace": {
|
||||
"$className": "Workspace",
|
||||
"$path": "src"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
{
|
||||
"name": "/Bar",
|
||||
"className": "StringValue"
|
||||
}
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
{
|
||||
"name": "/Script"
|
||||
}
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
print("Hello world!")
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
{
|
||||
"name": "slugified_name_roundtrip",
|
||||
"tree": {
|
||||
"$path": "src"
|
||||
}
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
{
|
||||
"name": "/Script"
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
print("Hello world!")
|
||||
@@ -1,6 +0,0 @@
|
||||
---
|
||||
source: tests/rojo_test/syncback_util.rs
|
||||
assertion_line: 101
|
||||
expression: "String::from_utf8_lossy(&output.stdout)"
|
||||
---
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
---
|
||||
source: tests/rojo_test/syncback_util.rs
|
||||
assertion_line: 101
|
||||
expression: "String::from_utf8_lossy(&output.stdout)"
|
||||
---
|
||||
Writing default.project.json
|
||||
Writing src/Camera.rbxm
|
||||
Writing src/Terrain.rbxm
|
||||
Writing src/_Folder/init.meta.json
|
||||
Writing src/_Script.meta.json
|
||||
Writing src/_Script.server.luau
|
||||
Writing src
|
||||
Writing src/_Folder
|
||||
@@ -1,9 +0,0 @@
|
||||
---
|
||||
source: tests/tests/syncback.rs
|
||||
assertion_line: 31
|
||||
expression: src/foo.model.json
|
||||
---
|
||||
{
|
||||
"name": "/Bar",
|
||||
"className": "StringValue"
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
---
|
||||
source: tests/tests/syncback.rs
|
||||
assertion_line: 31
|
||||
expression: src/_Folder.model.json
|
||||
---
|
||||
{
|
||||
"className": "Folder"
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
---
|
||||
source: tests/tests/syncback.rs
|
||||
assertion_line: 31
|
||||
expression: src/_Folder/init.meta.json
|
||||
---
|
||||
{
|
||||
"name": "/Folder"
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
---
|
||||
source: tests/tests/syncback.rs
|
||||
assertion_line: 31
|
||||
expression: src/_Script.meta.json
|
||||
---
|
||||
{
|
||||
"name": "/Script"
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
---
|
||||
source: tests/tests/syncback.rs
|
||||
assertion_line: 31
|
||||
expression: src/_Script.server.luau
|
||||
---
|
||||
print("Hello world!")
|
||||
@@ -1,8 +0,0 @@
|
||||
---
|
||||
source: tests/tests/syncback.rs
|
||||
assertion_line: 31
|
||||
expression: src/_Script/init.meta.json
|
||||
---
|
||||
{
|
||||
"name": "/Script"
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
---
|
||||
source: tests/tests/syncback.rs
|
||||
assertion_line: 31
|
||||
expression: src/_Script/init.server.luau
|
||||
---
|
||||
print("Hello world!")
|
||||
@@ -1,11 +0,0 @@
|
||||
{
|
||||
"name": "model_json_name",
|
||||
"tree": {
|
||||
"$className": "DataModel",
|
||||
"Workspace": {
|
||||
"$className": "Workspace",
|
||||
"$path": "src"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
{
|
||||
"name": "/Bar",
|
||||
"className": "StringValue"
|
||||
}
|
||||
|
||||
Binary file not shown.
@@ -1,10 +0,0 @@
|
||||
{
|
||||
"name": "slugified_name",
|
||||
"tree": {
|
||||
"$className": "DataModel",
|
||||
"Workspace": {
|
||||
"$className": "Workspace",
|
||||
"$path": "src"
|
||||
}
|
||||
}
|
||||
}
|
||||
Binary file not shown.
@@ -3,3 +3,4 @@ rojo = "rojo-rbx/rojo@7.5.1"
|
||||
selene = "Kampfkarren/selene@0.29.0"
|
||||
stylua = "JohnnyMorganz/stylua@2.1.0"
|
||||
run-in-roblox = "rojo-rbx/run-in-roblox@0.3.0"
|
||||
lune = "lune-org/lune@0.10.4"
|
||||
|
||||
@@ -9,7 +9,6 @@ use std::{
|
||||
};
|
||||
|
||||
use crate::{
|
||||
git::SharedGitFilter,
|
||||
message_queue::MessageQueue,
|
||||
snapshot::{
|
||||
apply_patch_set, compute_patch_set, AppliedPatchSet, InstigatingSource, PatchSet, RojoTree,
|
||||
@@ -47,15 +46,11 @@ pub struct ChangeProcessor {
|
||||
impl ChangeProcessor {
|
||||
/// Spin up the ChangeProcessor, connecting it to the given tree, VFS, and
|
||||
/// outbound message queue.
|
||||
///
|
||||
/// If `git_filter` is provided, it will be refreshed on every VFS event
|
||||
/// to ensure newly changed files are acknowledged.
|
||||
pub fn start(
|
||||
tree: Arc<Mutex<RojoTree>>,
|
||||
vfs: Arc<Vfs>,
|
||||
message_queue: Arc<MessageQueue<AppliedPatchSet>>,
|
||||
tree_mutation_receiver: Receiver<PatchSet>,
|
||||
git_filter: Option<SharedGitFilter>,
|
||||
) -> Self {
|
||||
let (shutdown_sender, shutdown_receiver) = crossbeam_channel::bounded(1);
|
||||
let vfs_receiver = vfs.event_receiver();
|
||||
@@ -63,7 +58,6 @@ impl ChangeProcessor {
|
||||
tree,
|
||||
vfs,
|
||||
message_queue,
|
||||
git_filter,
|
||||
};
|
||||
|
||||
let job_thread = jod_thread::Builder::new()
|
||||
@@ -117,10 +111,6 @@ struct JobThreadContext {
|
||||
/// Whenever changes are applied to the DOM, we should push those changes
|
||||
/// into this message queue to inform any connected clients.
|
||||
message_queue: Arc<MessageQueue<AppliedPatchSet>>,
|
||||
|
||||
/// Optional Git filter for --git-since mode. When set, will be refreshed
|
||||
/// on every VFS event to ensure newly changed files are acknowledged.
|
||||
git_filter: Option<SharedGitFilter>,
|
||||
}
|
||||
|
||||
impl JobThreadContext {
|
||||
@@ -170,14 +160,6 @@ impl JobThreadContext {
|
||||
fn handle_vfs_event(&self, event: VfsEvent) {
|
||||
log::trace!("Vfs event: {:?}", event);
|
||||
|
||||
// If we have a git filter, refresh it to pick up any new changes.
|
||||
// This ensures that files modified during the session will be acknowledged.
|
||||
if let Some(ref git_filter) = self.git_filter {
|
||||
if let Err(err) = git_filter.refresh() {
|
||||
log::warn!("Failed to refresh git filter: {:?}", err);
|
||||
}
|
||||
}
|
||||
|
||||
// Update the VFS immediately with the event.
|
||||
self.vfs
|
||||
.commit_event(&event)
|
||||
|
||||
@@ -81,7 +81,7 @@ impl BuildCommand {
|
||||
let vfs = Vfs::new_default();
|
||||
vfs.set_watch_enabled(self.watch);
|
||||
|
||||
let session = ServeSession::new(vfs, project_path, None)?;
|
||||
let session = ServeSession::new(vfs, project_path)?;
|
||||
let mut cursor = session.message_queue().cursor();
|
||||
|
||||
write_model(&session, &output_path, output_kind)?;
|
||||
|
||||
@@ -54,7 +54,7 @@ fn initialize_plugin() -> anyhow::Result<ServeSession> {
|
||||
in_memory_fs.load_snapshot("/plugin", plugin_snapshot)?;
|
||||
|
||||
let vfs = Vfs::new(in_memory_fs);
|
||||
Ok(ServeSession::new(vfs, "/plugin", None)?)
|
||||
Ok(ServeSession::new(vfs, "/plugin")?)
|
||||
}
|
||||
|
||||
fn install_plugin() -> anyhow::Result<()> {
|
||||
@@ -98,5 +98,5 @@ fn uninstall_plugin() -> anyhow::Result<()> {
|
||||
|
||||
#[test]
|
||||
fn plugin_initialize() {
|
||||
assert!(initialize_plugin().is_ok())
|
||||
let _ = initialize_plugin().unwrap();
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ use clap::Parser;
|
||||
use memofs::Vfs;
|
||||
use termcolor::{BufferWriter, Color, ColorChoice, ColorSpec, WriteColor};
|
||||
|
||||
use crate::{git::GitFilter, serve_session::ServeSession, web::LiveServer};
|
||||
use crate::{serve_session::ServeSession, web::LiveServer};
|
||||
|
||||
use super::{resolve_path, GlobalOptions};
|
||||
|
||||
@@ -31,19 +31,6 @@ pub struct ServeCommand {
|
||||
/// it has none.
|
||||
#[clap(long)]
|
||||
pub port: Option<u16>,
|
||||
|
||||
/// Only sync files that have changed since the given Git reference.
|
||||
///
|
||||
/// When this option is set, Rojo will only include files that have been
|
||||
/// modified, added, or are untracked since the specified Git reference
|
||||
/// (e.g., "HEAD", "main", a commit hash). This is useful for working with
|
||||
/// large projects where you only want to sync your local changes.
|
||||
///
|
||||
/// Scripts that have not changed will still be acknowledged if modified
|
||||
/// during the session, and all synced instances will have
|
||||
/// ignoreUnknownInstances set to true to preserve descendants in Studio.
|
||||
#[clap(long, value_name = "REF")]
|
||||
pub git_since: Option<String>,
|
||||
}
|
||||
|
||||
impl ServeCommand {
|
||||
@@ -52,19 +39,7 @@ impl ServeCommand {
|
||||
|
||||
let vfs = Vfs::new_default();
|
||||
|
||||
// Set up Git filter if --git-since was specified
|
||||
let git_filter = if let Some(ref base_ref) = self.git_since {
|
||||
let repo_root = GitFilter::find_repo_root(&project_path)?;
|
||||
log::info!(
|
||||
"Git filter enabled: only syncing files changed since '{}'",
|
||||
base_ref
|
||||
);
|
||||
Some(Arc::new(GitFilter::new(repo_root, base_ref.clone(), &project_path)?))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let session = Arc::new(ServeSession::new(vfs, project_path, git_filter)?);
|
||||
let session = Arc::new(ServeSession::new(vfs, project_path)?);
|
||||
|
||||
let ip = self
|
||||
.address
|
||||
@@ -78,25 +53,17 @@ impl ServeCommand {
|
||||
|
||||
let server = LiveServer::new(session);
|
||||
|
||||
let _ = show_start_message(ip, port, self.git_since.as_deref(), global.color.into());
|
||||
let _ = show_start_message(ip, port, global.color.into());
|
||||
server.start((ip, port).into());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn show_start_message(
|
||||
bind_address: IpAddr,
|
||||
port: u16,
|
||||
git_since: Option<&str>,
|
||||
color: ColorChoice,
|
||||
) -> io::Result<()> {
|
||||
fn show_start_message(bind_address: IpAddr, port: u16, color: ColorChoice) -> io::Result<()> {
|
||||
let mut green = ColorSpec::new();
|
||||
green.set_fg(Some(Color::Green)).set_bold(true);
|
||||
|
||||
let mut yellow = ColorSpec::new();
|
||||
yellow.set_fg(Some(Color::Yellow)).set_bold(true);
|
||||
|
||||
let writer = BufferWriter::stdout(color);
|
||||
let mut buffer = writer.buffer();
|
||||
|
||||
@@ -117,13 +84,6 @@ fn show_start_message(
|
||||
buffer.set_color(&green)?;
|
||||
writeln!(&mut buffer, "{}", port)?;
|
||||
|
||||
if let Some(base_ref) = git_since {
|
||||
buffer.set_color(&ColorSpec::new())?;
|
||||
write!(&mut buffer, " Mode: ")?;
|
||||
buffer.set_color(&yellow)?;
|
||||
writeln!(&mut buffer, "git-since ({})", base_ref)?;
|
||||
}
|
||||
|
||||
writeln!(&mut buffer)?;
|
||||
|
||||
buffer.set_color(&ColorSpec::new())?;
|
||||
|
||||
@@ -78,7 +78,7 @@ impl SourcemapCommand {
|
||||
vfs.set_watch_enabled(self.watch);
|
||||
|
||||
log::trace!("Setting up session for sourcemap generation");
|
||||
let session = ServeSession::new(vfs, project_path, None)?;
|
||||
let session = ServeSession::new(vfs, project_path)?;
|
||||
let mut cursor = session.message_queue().cursor();
|
||||
|
||||
let filter = if self.include_non_scripts {
|
||||
|
||||
@@ -54,11 +54,6 @@ pub struct SyncbackCommand {
|
||||
/// If provided, the prompt for writing to the file system is skipped.
|
||||
#[clap(long, short = 'y')]
|
||||
pub non_interactive: bool,
|
||||
|
||||
/// If provided, forces syncback to use JSON model files instead of binary
|
||||
/// .rbxm files for instances that would otherwise serialize as binary.
|
||||
#[clap(long)]
|
||||
pub dangerously_force_json: bool,
|
||||
}
|
||||
|
||||
impl SyncbackCommand {
|
||||
@@ -78,7 +73,7 @@ impl SyncbackCommand {
|
||||
vfs.set_watch_enabled(false);
|
||||
|
||||
let project_start_timer = Instant::now();
|
||||
let session_old = ServeSession::new(vfs, path_old.clone(), None)?;
|
||||
let session_old = ServeSession::new(vfs, path_old.clone())?;
|
||||
log::debug!(
|
||||
"Finished opening project in {:0.02}s",
|
||||
project_start_timer.elapsed().as_secs_f32()
|
||||
@@ -109,7 +104,6 @@ impl SyncbackCommand {
|
||||
&mut dom_old,
|
||||
dom_new,
|
||||
session_old.root_project(),
|
||||
self.dangerously_force_json,
|
||||
)?;
|
||||
log::debug!(
|
||||
"Syncback finished in {:.02}s!",
|
||||
|
||||
@@ -42,7 +42,7 @@ impl UploadCommand {
|
||||
|
||||
let vfs = Vfs::new_default();
|
||||
|
||||
let session = ServeSession::new(vfs, project_path, None)?;
|
||||
let session = ServeSession::new(vfs, project_path)?;
|
||||
|
||||
let tree = session.tree();
|
||||
let inner_tree = tree.inner();
|
||||
|
||||
380
src/git.rs
380
src/git.rs
@@ -1,380 +0,0 @@
|
||||
//! Git integration for filtering files based on changes since a reference.
|
||||
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
path::{Path, PathBuf},
|
||||
process::Command,
|
||||
sync::{Arc, RwLock},
|
||||
};
|
||||
|
||||
use anyhow::{bail, Context};
|
||||
|
||||
/// A filter that tracks which files have been changed since a Git reference.
|
||||
///
|
||||
/// When active, only files that have been modified, added, or deleted according
|
||||
/// to Git will be "acknowledged" and synced to Studio. This allows users to
|
||||
/// work with large projects where they only want to sync their local changes.
|
||||
///
|
||||
/// Once a file is acknowledged (either initially or during the session), it
|
||||
/// stays acknowledged for the entire session. This prevents files from being
|
||||
/// deleted in Studio if their content is reverted to match the git reference.
|
||||
#[derive(Debug)]
|
||||
pub struct GitFilter {
|
||||
/// The Git repository root directory.
|
||||
repo_root: PathBuf,
|
||||
|
||||
/// The Git reference to compare against (e.g., "HEAD", "main", a commit hash).
|
||||
base_ref: String,
|
||||
|
||||
/// Cache of paths that are currently different from the base ref according to git.
|
||||
/// This is refreshed on every VFS event.
|
||||
git_changed_paths: RwLock<HashSet<PathBuf>>,
|
||||
|
||||
/// Paths that have been acknowledged at any point during this session.
|
||||
/// Once a path is added here, it stays acknowledged forever (for this session).
|
||||
/// This prevents files from being deleted if their content is reverted.
|
||||
session_acknowledged_paths: RwLock<HashSet<PathBuf>>,
|
||||
}
|
||||
|
||||
impl GitFilter {
|
||||
/// Creates a new GitFilter for the given repository root and base reference.
|
||||
///
|
||||
/// The `repo_root` should be the root of the Git repository (where .git is located).
|
||||
/// The `base_ref` is the Git reference to compare against (e.g., "HEAD", "main").
|
||||
/// The `project_path` is the path to the project being served - it will always be
|
||||
/// acknowledged regardless of git status to ensure the project structure exists.
|
||||
pub fn new(repo_root: PathBuf, base_ref: String, project_path: &Path) -> anyhow::Result<Self> {
|
||||
let filter = Self {
|
||||
repo_root,
|
||||
base_ref,
|
||||
git_changed_paths: RwLock::new(HashSet::new()),
|
||||
session_acknowledged_paths: RwLock::new(HashSet::new()),
|
||||
};
|
||||
|
||||
// Always acknowledge the project path and its directory so the project
|
||||
// structure exists even when there are no git changes
|
||||
filter.acknowledge_project_path(project_path);
|
||||
|
||||
// Initial refresh to populate the cache with git changes
|
||||
filter.refresh()?;
|
||||
|
||||
Ok(filter)
|
||||
}
|
||||
|
||||
/// Acknowledges the project path and its containing directory.
|
||||
/// This ensures the project structure always exists regardless of git status.
|
||||
fn acknowledge_project_path(&self, project_path: &Path) {
|
||||
let mut session = self.session_acknowledged_paths.write().unwrap();
|
||||
|
||||
// Acknowledge the project path itself (might be a directory or .project.json file)
|
||||
let canonical = project_path.canonicalize().unwrap_or_else(|_| project_path.to_path_buf());
|
||||
session.insert(canonical.clone());
|
||||
|
||||
// Acknowledge all ancestor directories
|
||||
let mut current = canonical.parent();
|
||||
while let Some(parent) = current {
|
||||
session.insert(parent.to_path_buf());
|
||||
current = parent.parent();
|
||||
}
|
||||
|
||||
// If it's a directory, also acknowledge default.project.json inside it
|
||||
if project_path.is_dir() {
|
||||
for name in &["default.project.json", "default.project.jsonc"] {
|
||||
let project_file = project_path.join(name);
|
||||
if let Ok(canonical_file) = project_file.canonicalize() {
|
||||
session.insert(canonical_file);
|
||||
} else {
|
||||
session.insert(project_file);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If it's a .project.json file, also acknowledge its parent directory
|
||||
if let Some(parent) = project_path.parent() {
|
||||
let parent_canonical = parent.canonicalize().unwrap_or_else(|_| parent.to_path_buf());
|
||||
session.insert(parent_canonical);
|
||||
}
|
||||
|
||||
log::debug!(
|
||||
"GitFilter: acknowledged project path {} ({} paths total)",
|
||||
project_path.display(),
|
||||
session.len()
|
||||
);
|
||||
}
|
||||
|
||||
/// Finds the Git repository root for the given path.
|
||||
pub fn find_repo_root(path: &Path) -> anyhow::Result<PathBuf> {
|
||||
let output = Command::new("git")
|
||||
.args(["rev-parse", "--show-toplevel"])
|
||||
.current_dir(path)
|
||||
.output()
|
||||
.context("Failed to execute git rev-parse")?;
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
bail!("Failed to find Git repository root: {}", stderr.trim());
|
||||
}
|
||||
|
||||
let root = String::from_utf8_lossy(&output.stdout)
|
||||
.trim()
|
||||
.to_string();
|
||||
|
||||
Ok(PathBuf::from(root))
|
||||
}
|
||||
|
||||
/// Refreshes the cache of acknowledged paths by querying Git.
|
||||
///
|
||||
/// This should be called when files change to ensure newly modified files
|
||||
/// are properly acknowledged. Once a path is acknowledged, it stays
|
||||
/// acknowledged for the entire session (even if the file is reverted).
|
||||
pub fn refresh(&self) -> anyhow::Result<()> {
|
||||
let mut git_changed = HashSet::new();
|
||||
|
||||
// Get files changed since the base ref (modified, added, deleted)
|
||||
let diff_output = Command::new("git")
|
||||
.args(["diff", "--name-only", &self.base_ref])
|
||||
.current_dir(&self.repo_root)
|
||||
.output()
|
||||
.context("Failed to execute git diff")?;
|
||||
|
||||
if !diff_output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&diff_output.stderr);
|
||||
bail!("git diff failed: {}", stderr.trim());
|
||||
}
|
||||
|
||||
let diff_files = String::from_utf8_lossy(&diff_output.stdout);
|
||||
let diff_count = diff_files.lines().filter(|l| !l.is_empty()).count();
|
||||
if diff_count > 0 {
|
||||
log::debug!("git diff found {} changed files", diff_count);
|
||||
}
|
||||
for line in diff_files.lines() {
|
||||
if !line.is_empty() {
|
||||
let path = self.repo_root.join(line);
|
||||
log::trace!("git diff: acknowledging {}", path.display());
|
||||
self.acknowledge_path(&path, &mut git_changed);
|
||||
}
|
||||
}
|
||||
|
||||
// Get untracked files (new files not yet committed)
|
||||
let untracked_output = Command::new("git")
|
||||
.args(["ls-files", "--others", "--exclude-standard"])
|
||||
.current_dir(&self.repo_root)
|
||||
.output()
|
||||
.context("Failed to execute git ls-files")?;
|
||||
|
||||
if !untracked_output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&untracked_output.stderr);
|
||||
bail!("git ls-files failed: {}", stderr.trim());
|
||||
}
|
||||
|
||||
let untracked_files = String::from_utf8_lossy(&untracked_output.stdout);
|
||||
for line in untracked_files.lines() {
|
||||
if !line.is_empty() {
|
||||
let path = self.repo_root.join(line);
|
||||
self.acknowledge_path(&path, &mut git_changed);
|
||||
}
|
||||
}
|
||||
|
||||
// Get staged files (files added to index but not yet committed)
|
||||
let staged_output = Command::new("git")
|
||||
.args(["diff", "--name-only", "--cached", &self.base_ref])
|
||||
.current_dir(&self.repo_root)
|
||||
.output()
|
||||
.context("Failed to execute git diff --cached")?;
|
||||
|
||||
if staged_output.status.success() {
|
||||
let staged_files = String::from_utf8_lossy(&staged_output.stdout);
|
||||
for line in staged_files.lines() {
|
||||
if !line.is_empty() {
|
||||
let path = self.repo_root.join(line);
|
||||
self.acknowledge_path(&path, &mut git_changed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update the git changed paths cache
|
||||
{
|
||||
let mut cache = self.git_changed_paths.write().unwrap();
|
||||
*cache = git_changed.clone();
|
||||
}
|
||||
|
||||
// Merge newly changed paths into session acknowledged paths
|
||||
// Once acknowledged, a path stays acknowledged for the entire session
|
||||
{
|
||||
let mut session = self.session_acknowledged_paths.write().unwrap();
|
||||
for path in git_changed {
|
||||
session.insert(path);
|
||||
}
|
||||
log::debug!(
|
||||
"GitFilter refreshed: {} paths acknowledged in session",
|
||||
session.len()
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Acknowledges a path and all its ancestors, plus associated meta files.
|
||||
fn acknowledge_path(&self, path: &Path, acknowledged: &mut HashSet<PathBuf>) {
|
||||
// Canonicalize the path if possible, otherwise use as-is
|
||||
let path = path.canonicalize().unwrap_or_else(|_| path.to_path_buf());
|
||||
|
||||
// Add the path itself
|
||||
acknowledged.insert(path.clone());
|
||||
|
||||
// Add all ancestor directories
|
||||
let mut current = path.parent();
|
||||
while let Some(parent) = current {
|
||||
acknowledged.insert(parent.to_path_buf());
|
||||
current = parent.parent();
|
||||
}
|
||||
|
||||
// Add associated meta files
|
||||
self.acknowledge_meta_files(&path, acknowledged);
|
||||
}
|
||||
|
||||
/// Acknowledges associated meta files for a given path.
|
||||
fn acknowledge_meta_files(&self, path: &Path, acknowledged: &mut HashSet<PathBuf>) {
|
||||
if let Some(file_name) = path.file_name().and_then(|n| n.to_str()) {
|
||||
if let Some(parent) = path.parent() {
|
||||
// For a file like "foo.lua", also acknowledge "foo.meta.json"
|
||||
// Strip known extensions to get the base name
|
||||
let base_name = strip_lua_extension(file_name);
|
||||
|
||||
let meta_path = parent.join(format!("{}.meta.json", base_name));
|
||||
if let Ok(canonical) = meta_path.canonicalize() {
|
||||
acknowledged.insert(canonical);
|
||||
} else {
|
||||
acknowledged.insert(meta_path);
|
||||
}
|
||||
|
||||
// For init files, also acknowledge "init.meta.json" in the same directory
|
||||
if file_name.starts_with("init.") {
|
||||
let init_meta = parent.join("init.meta.json");
|
||||
if let Ok(canonical) = init_meta.canonicalize() {
|
||||
acknowledged.insert(canonical);
|
||||
} else {
|
||||
acknowledged.insert(init_meta);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if a path is acknowledged (should be synced).
|
||||
///
|
||||
/// Returns `true` if the path or any of its descendants have been changed
|
||||
/// at any point during this session. Once a file is acknowledged, it stays
|
||||
/// acknowledged even if its content is reverted to match the git reference.
|
||||
pub fn is_acknowledged(&self, path: &Path) -> bool {
|
||||
let session = self.session_acknowledged_paths.read().unwrap();
|
||||
|
||||
// Try to canonicalize the path
|
||||
let canonical = path.canonicalize().unwrap_or_else(|_| path.to_path_buf());
|
||||
|
||||
// Check if this exact path is acknowledged
|
||||
if session.contains(&canonical) {
|
||||
log::trace!("Path {} is directly acknowledged", path.display());
|
||||
return true;
|
||||
}
|
||||
|
||||
// Also check without canonicalization in case of path differences
|
||||
if session.contains(path) {
|
||||
log::trace!("Path {} is acknowledged (non-canonical)", path.display());
|
||||
return true;
|
||||
}
|
||||
|
||||
// For directories, check if any descendant is acknowledged
|
||||
// This is done by checking if any acknowledged path starts with this path
|
||||
for acknowledged in session.iter() {
|
||||
if acknowledged.starts_with(&canonical) {
|
||||
log::trace!(
|
||||
"Path {} has acknowledged descendant {}",
|
||||
path.display(),
|
||||
acknowledged.display()
|
||||
);
|
||||
return true;
|
||||
}
|
||||
// Also check non-canonical
|
||||
if acknowledged.starts_with(path) {
|
||||
log::trace!(
|
||||
"Path {} has acknowledged descendant {} (non-canonical)",
|
||||
path.display(),
|
||||
acknowledged.display()
|
||||
);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
log::trace!(
|
||||
"Path {} is NOT acknowledged (canonical: {})",
|
||||
path.display(),
|
||||
canonical.display()
|
||||
);
|
||||
false
|
||||
}
|
||||
|
||||
/// Returns the base reference being compared against.
|
||||
pub fn base_ref(&self) -> &str {
|
||||
&self.base_ref
|
||||
}
|
||||
|
||||
/// Returns the repository root path.
|
||||
pub fn repo_root(&self) -> &Path {
|
||||
&self.repo_root
|
||||
}
|
||||
|
||||
/// Explicitly acknowledges a path and all its ancestors.
|
||||
/// This is useful for ensuring certain paths are always synced regardless of git status.
|
||||
pub fn force_acknowledge(&self, path: &Path) {
|
||||
let mut acknowledged = HashSet::new();
|
||||
self.acknowledge_path(path, &mut acknowledged);
|
||||
|
||||
let mut session = self.session_acknowledged_paths.write().unwrap();
|
||||
for p in acknowledged {
|
||||
session.insert(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Strips Lua-related extensions from a file name to get the base name.
|
||||
fn strip_lua_extension(file_name: &str) -> &str {
|
||||
const EXTENSIONS: &[&str] = &[
|
||||
".server.luau",
|
||||
".server.lua",
|
||||
".client.luau",
|
||||
".client.lua",
|
||||
".luau",
|
||||
".lua",
|
||||
];
|
||||
|
||||
for ext in EXTENSIONS {
|
||||
if let Some(base) = file_name.strip_suffix(ext) {
|
||||
return base;
|
||||
}
|
||||
}
|
||||
|
||||
// If no Lua extension, try to strip the regular extension
|
||||
file_name
|
||||
.rsplit_once('.')
|
||||
.map(|(base, _)| base)
|
||||
.unwrap_or(file_name)
|
||||
}
|
||||
|
||||
/// A wrapper around GitFilter that can be shared across threads.
|
||||
pub type SharedGitFilter = Arc<GitFilter>;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_strip_lua_extension() {
|
||||
assert_eq!(strip_lua_extension("foo.server.lua"), "foo");
|
||||
assert_eq!(strip_lua_extension("foo.client.luau"), "foo");
|
||||
assert_eq!(strip_lua_extension("foo.lua"), "foo");
|
||||
assert_eq!(strip_lua_extension("init.server.lua"), "init");
|
||||
assert_eq!(strip_lua_extension("bar.txt"), "bar");
|
||||
assert_eq!(strip_lua_extension("noextension"), "noextension");
|
||||
}
|
||||
}
|
||||
@@ -9,7 +9,6 @@ mod tree_view;
|
||||
|
||||
mod auth_cookie;
|
||||
mod change_processor;
|
||||
mod git;
|
||||
mod glob;
|
||||
mod json;
|
||||
mod lua_ast;
|
||||
@@ -29,7 +28,6 @@ mod web;
|
||||
|
||||
// TODO: Work out what we should expose publicly
|
||||
|
||||
pub use git::{GitFilter, SharedGitFilter};
|
||||
pub use project::*;
|
||||
pub use rojo_ref::*;
|
||||
pub use session_id::SessionId;
|
||||
|
||||
@@ -13,7 +13,6 @@ use thiserror::Error;
|
||||
|
||||
use crate::{
|
||||
change_processor::ChangeProcessor,
|
||||
git::SharedGitFilter,
|
||||
message_queue::MessageQueue,
|
||||
project::{Project, ProjectError},
|
||||
session_id::SessionId,
|
||||
@@ -95,14 +94,7 @@ impl ServeSession {
|
||||
/// The project file is expected to be loaded out-of-band since it's
|
||||
/// currently loaded from the filesystem directly instead of through the
|
||||
/// in-memory filesystem layer.
|
||||
///
|
||||
/// If `git_filter` is provided, only files that have changed since the
|
||||
/// specified Git reference will be synced.
|
||||
pub fn new<P: AsRef<Path>>(
|
||||
vfs: Vfs,
|
||||
start_path: P,
|
||||
git_filter: Option<SharedGitFilter>,
|
||||
) -> Result<Self, ServeSessionError> {
|
||||
pub fn new<P: AsRef<Path>>(vfs: Vfs, start_path: P) -> Result<Self, ServeSessionError> {
|
||||
let start_path = start_path.as_ref();
|
||||
let start_time = Instant::now();
|
||||
|
||||
@@ -110,28 +102,12 @@ impl ServeSession {
|
||||
|
||||
let root_project = Project::load_initial_project(&vfs, start_path)?;
|
||||
|
||||
// If git filter is active, ensure the project file location is acknowledged
|
||||
// This is necessary so the project structure exists even with no git changes
|
||||
if let Some(ref filter) = git_filter {
|
||||
filter.force_acknowledge(start_path);
|
||||
filter.force_acknowledge(&root_project.file_location);
|
||||
filter.force_acknowledge(root_project.folder_location());
|
||||
log::debug!(
|
||||
"Force acknowledged project at {}",
|
||||
root_project.file_location.display()
|
||||
);
|
||||
}
|
||||
|
||||
let mut tree = RojoTree::new(InstanceSnapshot::new());
|
||||
|
||||
let root_id = tree.get_root_id();
|
||||
|
||||
let instance_context = match &git_filter {
|
||||
Some(filter) => {
|
||||
InstanceContext::with_git_filter(root_project.emit_legacy_scripts, Arc::clone(filter))
|
||||
}
|
||||
None => InstanceContext::with_emit_legacy_scripts(root_project.emit_legacy_scripts),
|
||||
};
|
||||
let instance_context =
|
||||
InstanceContext::with_emit_legacy_scripts(root_project.emit_legacy_scripts);
|
||||
|
||||
log::trace!("Generating snapshot of instances from VFS");
|
||||
let snapshot = snapshot_from_vfs(&instance_context, &vfs, start_path)?;
|
||||
@@ -157,7 +133,6 @@ impl ServeSession {
|
||||
Arc::clone(&vfs),
|
||||
Arc::clone(&message_queue),
|
||||
tree_mutation_receiver,
|
||||
git_filter,
|
||||
);
|
||||
|
||||
Ok(Self {
|
||||
|
||||
@@ -8,7 +8,6 @@ use anyhow::Context;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
git::SharedGitFilter,
|
||||
glob::Glob,
|
||||
path_serializer,
|
||||
project::ProjectNode,
|
||||
@@ -71,12 +70,6 @@ pub struct InstanceMetadata {
|
||||
/// A schema provided via a JSON file, if one exists. Will be `None` for
|
||||
/// all non-JSON middleware.
|
||||
pub schema: Option<String>,
|
||||
|
||||
/// A custom name specified via meta.json or model.json files. If present,
|
||||
/// this name will be used for the instance while the filesystem name will
|
||||
/// be slugified to remove illegal characters.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub specified_name: Option<String>,
|
||||
}
|
||||
|
||||
impl InstanceMetadata {
|
||||
@@ -89,7 +82,6 @@ impl InstanceMetadata {
|
||||
specified_id: None,
|
||||
middleware: None,
|
||||
schema: None,
|
||||
specified_name: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -138,13 +130,6 @@ impl InstanceMetadata {
|
||||
pub fn schema(self, schema: Option<String>) -> Self {
|
||||
Self { schema, ..self }
|
||||
}
|
||||
|
||||
pub fn specified_name(self, specified_name: Option<String>) -> Self {
|
||||
Self {
|
||||
specified_name,
|
||||
..self
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for InstanceMetadata {
|
||||
@@ -153,27 +138,13 @@ impl Default for InstanceMetadata {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct InstanceContext {
|
||||
#[serde(skip_serializing_if = "Vec::is_empty")]
|
||||
pub path_ignore_rules: Arc<Vec<PathIgnoreRule>>,
|
||||
pub emit_legacy_scripts: bool,
|
||||
#[serde(skip_serializing_if = "Vec::is_empty")]
|
||||
pub sync_rules: Vec<SyncRule>,
|
||||
/// Optional Git filter for --git-since mode. When set, only files that have
|
||||
/// changed since the specified Git reference will be synced.
|
||||
#[serde(skip)]
|
||||
pub git_filter: Option<SharedGitFilter>,
|
||||
}
|
||||
|
||||
impl PartialEq for InstanceContext {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
// Note: git_filter is intentionally excluded from comparison
|
||||
// since it's runtime state, not configuration
|
||||
self.path_ignore_rules == other.path_ignore_rules
|
||||
&& self.emit_legacy_scripts == other.emit_legacy_scripts
|
||||
&& self.sync_rules == other.sync_rules
|
||||
}
|
||||
}
|
||||
|
||||
impl InstanceContext {
|
||||
@@ -182,7 +153,6 @@ impl InstanceContext {
|
||||
path_ignore_rules: Arc::new(Vec::new()),
|
||||
emit_legacy_scripts: emit_legacy_scripts_default().unwrap(),
|
||||
sync_rules: Vec::new(),
|
||||
git_filter: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -195,36 +165,6 @@ impl InstanceContext {
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new InstanceContext with a Git filter for --git-since mode.
|
||||
pub fn with_git_filter(
|
||||
emit_legacy_scripts: Option<bool>,
|
||||
git_filter: SharedGitFilter,
|
||||
) -> Self {
|
||||
Self {
|
||||
git_filter: Some(git_filter),
|
||||
..Self::with_emit_legacy_scripts(emit_legacy_scripts)
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the Git filter for this context.
|
||||
pub fn set_git_filter(&mut self, git_filter: Option<SharedGitFilter>) {
|
||||
self.git_filter = git_filter;
|
||||
}
|
||||
|
||||
/// Returns true if the given path should be acknowledged (synced).
|
||||
/// If no git filter is set, all paths are acknowledged.
|
||||
pub fn is_path_acknowledged(&self, path: &Path) -> bool {
|
||||
match &self.git_filter {
|
||||
Some(filter) => filter.is_acknowledged(path),
|
||||
None => true,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if a git filter is active.
|
||||
pub fn has_git_filter(&self) -> bool {
|
||||
self.git_filter.is_some()
|
||||
}
|
||||
|
||||
/// Extend the list of ignore rules in the context with the given new rules.
|
||||
pub fn add_path_ignore_rules<I>(&mut self, new_rules: I)
|
||||
where
|
||||
|
||||
@@ -109,13 +109,8 @@ pub fn syncback_csv<'sync>(
|
||||
|
||||
if !meta.is_empty() {
|
||||
let parent = snapshot.path.parent_err()?;
|
||||
let meta_stem = snapshot.path
|
||||
.file_name()
|
||||
.and_then(|n| n.to_str())
|
||||
.map(|s| s.split('.').next().unwrap_or(s))
|
||||
.unwrap_or_else(|| new_inst.name.as_str());
|
||||
fs_snapshot.add_file(
|
||||
parent.join(format!("{meta_stem}.meta.json")),
|
||||
parent.join(format!("{}.meta.json", new_inst.name)),
|
||||
serde_json::to_vec_pretty(&meta).context("cannot serialize metadata")?,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ use memofs::{DirEntry, Vfs};
|
||||
|
||||
use crate::{
|
||||
snapshot::{InstanceContext, InstanceMetadata, InstanceSnapshot, InstigatingSource},
|
||||
syncback::{hash_instance, slugify_name, FsSnapshot, SyncbackReturn, SyncbackSnapshot},
|
||||
syncback::{hash_instance, FsSnapshot, SyncbackReturn, SyncbackSnapshot},
|
||||
};
|
||||
|
||||
use super::{meta_file::DirectoryMetadata, snapshot_from_vfs};
|
||||
@@ -134,39 +134,12 @@ pub fn syncback_dir_no_meta<'sync>(
|
||||
let mut children = Vec::new();
|
||||
let mut removed_children = Vec::new();
|
||||
|
||||
// Build the old child map early so it can be used for deduplication below.
|
||||
let mut old_child_map = HashMap::new();
|
||||
if let Some(old_inst) = snapshot.old_inst() {
|
||||
for child in old_inst.children() {
|
||||
let inst = snapshot.get_old_instance(*child).unwrap();
|
||||
old_child_map.insert(inst.name(), inst);
|
||||
}
|
||||
}
|
||||
|
||||
// Enforce unique filesystem names. Uses actual on-disk names for existing
|
||||
// children and resolved names (with init-prefix) for new ones.
|
||||
let mut fs_child_names = HashSet::with_capacity(new_inst.children().len());
|
||||
// We have to enforce unique child names for the file system.
|
||||
let mut child_names = HashSet::with_capacity(new_inst.children().len());
|
||||
let mut duplicate_set = HashSet::new();
|
||||
for child_ref in new_inst.children() {
|
||||
let child = snapshot.get_new_instance(*child_ref).unwrap();
|
||||
let fs_name = old_child_map
|
||||
.get(child.name.as_str())
|
||||
.and_then(|old| old.metadata().relevant_paths.first())
|
||||
.and_then(|p| p.file_name())
|
||||
.and_then(|n| n.to_str())
|
||||
.map(|s| s.to_lowercase())
|
||||
.unwrap_or_else(|| {
|
||||
let slug = slugify_name(&child.name);
|
||||
let slug_lower = slug.to_lowercase();
|
||||
// Mirror name_for_inst's init-prefix.
|
||||
if slug_lower == "init" {
|
||||
format!("_{slug_lower}")
|
||||
} else {
|
||||
slug_lower
|
||||
}
|
||||
});
|
||||
|
||||
if !fs_child_names.insert(fs_name) {
|
||||
if !child_names.insert(child.name.to_lowercase()) {
|
||||
duplicate_set.insert(child.name.as_str());
|
||||
}
|
||||
}
|
||||
@@ -180,7 +153,13 @@ pub fn syncback_dir_no_meta<'sync>(
|
||||
anyhow::bail!("Instance has more than 25 children with duplicate names");
|
||||
}
|
||||
|
||||
if snapshot.old_inst().is_some() {
|
||||
if let Some(old_inst) = snapshot.old_inst() {
|
||||
let mut old_child_map = HashMap::with_capacity(old_inst.children().len());
|
||||
for child in old_inst.children() {
|
||||
let inst = snapshot.get_old_instance(*child).unwrap();
|
||||
old_child_map.insert(inst.name(), inst);
|
||||
}
|
||||
|
||||
for new_child_ref in new_inst.children() {
|
||||
let new_child = snapshot.get_new_instance(*new_child_ref).unwrap();
|
||||
if let Some(old_child) = old_child_map.remove(new_child.name.as_str()) {
|
||||
@@ -246,12 +225,6 @@ pub fn syncback_dir_no_meta<'sync>(
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
use std::path::PathBuf;
|
||||
|
||||
use crate::{
|
||||
snapshot::{InstanceMetadata, InstanceSnapshot},
|
||||
Project, RojoTree, SyncbackData, SyncbackSnapshot,
|
||||
};
|
||||
use memofs::{InMemoryFs, VfsSnapshot};
|
||||
|
||||
#[test]
|
||||
@@ -288,237 +261,4 @@ mod test {
|
||||
|
||||
insta::assert_yaml_snapshot!(instance_snapshot);
|
||||
}
|
||||
|
||||
fn make_project() -> Project {
|
||||
serde_json::from_str(r#"{"tree": {"$className": "DataModel"}}"#).unwrap()
|
||||
}
|
||||
|
||||
fn make_vfs() -> Vfs {
|
||||
let mut imfs = InMemoryFs::new();
|
||||
imfs.load_snapshot("/root", VfsSnapshot::empty_dir()).unwrap();
|
||||
Vfs::new(imfs)
|
||||
}
|
||||
|
||||
/// Two children whose Roblox names are identical when lowercased ("Alpha"
|
||||
/// and "alpha") but live at different filesystem paths because of the
|
||||
/// `name` property ("Beta/" and "Alpha/" respectively). The dedup check
|
||||
/// must use the actual filesystem paths, not the raw Roblox names, to
|
||||
/// avoid a false-positive duplicate error.
|
||||
#[test]
|
||||
fn syncback_no_false_duplicate_with_name_prop() {
|
||||
use rbx_dom_weak::{InstanceBuilder, WeakDom};
|
||||
|
||||
// Old child A: Roblox name "Alpha", on disk at "/root/Beta"
|
||||
// (name property maps "Alpha" → "Beta" on the filesystem)
|
||||
let old_child_a = InstanceSnapshot::new()
|
||||
.name("Alpha")
|
||||
.class_name("Folder")
|
||||
.metadata(
|
||||
InstanceMetadata::new()
|
||||
.instigating_source(PathBuf::from("/root/Beta"))
|
||||
.relevant_paths(vec![PathBuf::from("/root/Beta")]),
|
||||
);
|
||||
// Old child B: Roblox name "alpha", on disk at "/root/Alpha"
|
||||
let old_child_b = InstanceSnapshot::new()
|
||||
.name("alpha")
|
||||
.class_name("Folder")
|
||||
.metadata(
|
||||
InstanceMetadata::new()
|
||||
.instigating_source(PathBuf::from("/root/Alpha"))
|
||||
.relevant_paths(vec![PathBuf::from("/root/Alpha")]),
|
||||
);
|
||||
let old_parent = InstanceSnapshot::new()
|
||||
.name("Parent")
|
||||
.class_name("Folder")
|
||||
.children(vec![old_child_a, old_child_b])
|
||||
.metadata(
|
||||
InstanceMetadata::new()
|
||||
.instigating_source(PathBuf::from("/root"))
|
||||
.relevant_paths(vec![PathBuf::from("/root")]),
|
||||
);
|
||||
let old_tree = RojoTree::new(old_parent);
|
||||
|
||||
// New state: same two children in Roblox.
|
||||
let mut new_tree = WeakDom::new(InstanceBuilder::new("ROOT"));
|
||||
let new_parent = new_tree.insert(
|
||||
new_tree.root_ref(),
|
||||
InstanceBuilder::new("Folder").with_name("Parent"),
|
||||
);
|
||||
new_tree.insert(new_parent, InstanceBuilder::new("Folder").with_name("Alpha"));
|
||||
new_tree.insert(new_parent, InstanceBuilder::new("Folder").with_name("alpha"));
|
||||
|
||||
let vfs = make_vfs();
|
||||
let project = make_project();
|
||||
let data = SyncbackData::for_test(&vfs, &old_tree, &new_tree, &project);
|
||||
let snapshot = SyncbackSnapshot {
|
||||
data,
|
||||
old: Some(old_tree.get_root_id()),
|
||||
new: new_parent,
|
||||
path: PathBuf::from("/root"),
|
||||
middleware: None,
|
||||
};
|
||||
|
||||
let result = syncback_dir_no_meta(&snapshot);
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"should not error when two children have the same lowercased Roblox \
|
||||
name but map to distinct filesystem paths: {result:?}",
|
||||
);
|
||||
}
|
||||
|
||||
/// Two completely new children with the same non-init name would produce
|
||||
/// the same filesystem entry and must be detected as a duplicate.
|
||||
#[test]
|
||||
fn syncback_detects_sibling_duplicate_names() {
|
||||
use rbx_dom_weak::{InstanceBuilder, WeakDom};
|
||||
|
||||
let old_parent = InstanceSnapshot::new()
|
||||
.name("Parent")
|
||||
.class_name("Folder")
|
||||
.metadata(
|
||||
InstanceMetadata::new()
|
||||
.instigating_source(PathBuf::from("/root"))
|
||||
.relevant_paths(vec![PathBuf::from("/root")]),
|
||||
);
|
||||
let old_tree = RojoTree::new(old_parent);
|
||||
|
||||
let mut new_tree = WeakDom::new(InstanceBuilder::new("ROOT"));
|
||||
let new_parent = new_tree.insert(
|
||||
new_tree.root_ref(),
|
||||
InstanceBuilder::new("Folder").with_name("Parent"),
|
||||
);
|
||||
// "Foo" is not a reserved name but two siblings named "Foo" still
|
||||
// collide on disk.
|
||||
new_tree.insert(new_parent, InstanceBuilder::new("Folder").with_name("Foo"));
|
||||
new_tree.insert(new_parent, InstanceBuilder::new("Folder").with_name("Foo"));
|
||||
|
||||
let vfs = make_vfs();
|
||||
let project = make_project();
|
||||
let data = SyncbackData::for_test(&vfs, &old_tree, &new_tree, &project);
|
||||
let snapshot = SyncbackSnapshot {
|
||||
data,
|
||||
old: Some(old_tree.get_root_id()),
|
||||
new: new_parent,
|
||||
path: PathBuf::from("/root"),
|
||||
middleware: None,
|
||||
};
|
||||
|
||||
let result = syncback_dir_no_meta(&snapshot);
|
||||
assert!(
|
||||
result.is_err(),
|
||||
"should error when two new children would produce the same filesystem name",
|
||||
);
|
||||
}
|
||||
|
||||
/// A new child named "Init" (as a ModuleScript) would naively become
|
||||
/// "Init.luau", which case-insensitively matches the parent's reserved
|
||||
/// "init.luau". Syncback must resolve this automatically by prefixing the
|
||||
/// filesystem name with '_' (→ "_Init.luau") rather than erroring.
|
||||
#[test]
|
||||
fn syncback_resolves_init_name_conflict() {
|
||||
use rbx_dom_weak::{InstanceBuilder, WeakDom};
|
||||
|
||||
let old_parent = InstanceSnapshot::new()
|
||||
.name("Parent")
|
||||
.class_name("Folder")
|
||||
.metadata(
|
||||
InstanceMetadata::new()
|
||||
.instigating_source(PathBuf::from("/root"))
|
||||
.relevant_paths(vec![PathBuf::from("/root")]),
|
||||
);
|
||||
let old_tree = RojoTree::new(old_parent);
|
||||
|
||||
let mut new_tree = WeakDom::new(InstanceBuilder::new("ROOT"));
|
||||
let new_parent = new_tree.insert(
|
||||
new_tree.root_ref(),
|
||||
InstanceBuilder::new("Folder").with_name("Parent"),
|
||||
);
|
||||
new_tree.insert(
|
||||
new_parent,
|
||||
InstanceBuilder::new("ModuleScript").with_name("Init"),
|
||||
);
|
||||
|
||||
let vfs = make_vfs();
|
||||
let project = make_project();
|
||||
let data = SyncbackData::for_test(&vfs, &old_tree, &new_tree, &project);
|
||||
let snapshot = SyncbackSnapshot {
|
||||
data,
|
||||
old: Some(old_tree.get_root_id()),
|
||||
new: new_parent,
|
||||
path: PathBuf::from("/root"),
|
||||
middleware: None,
|
||||
};
|
||||
|
||||
let result = syncback_dir_no_meta(&snapshot);
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"should resolve init-name conflict by prefixing '_', not error: {result:?}",
|
||||
);
|
||||
// The child should have been placed at "_Init.luau", not "Init.luau".
|
||||
let child_file_name = result
|
||||
.unwrap()
|
||||
.children
|
||||
.into_iter()
|
||||
.next()
|
||||
.and_then(|c| c.path.file_name().map(|n| n.to_string_lossy().into_owned()))
|
||||
.unwrap_or_default();
|
||||
assert!(
|
||||
child_file_name.starts_with('_'),
|
||||
"child filesystem name should start with '_' to avoid init collision, \
|
||||
got: {child_file_name}",
|
||||
);
|
||||
}
|
||||
|
||||
/// A child whose filesystem name is stored with a slugified prefix (e.g.
|
||||
/// "_Init") must NOT be blocked — only the bare "init" stem is reserved.
|
||||
#[test]
|
||||
fn syncback_allows_slugified_init_name() {
|
||||
use rbx_dom_weak::{InstanceBuilder, WeakDom};
|
||||
|
||||
// Existing child: on disk as "_Init" (slugified from a name with an
|
||||
// illegal character), its stem is "_init" which is not reserved.
|
||||
let old_child = InstanceSnapshot::new()
|
||||
.name("Init")
|
||||
.class_name("Folder")
|
||||
.metadata(
|
||||
InstanceMetadata::new()
|
||||
.instigating_source(PathBuf::from("/root/_Init"))
|
||||
.relevant_paths(vec![PathBuf::from("/root/_Init")]),
|
||||
);
|
||||
let old_parent = InstanceSnapshot::new()
|
||||
.name("Parent")
|
||||
.class_name("Folder")
|
||||
.children(vec![old_child])
|
||||
.metadata(
|
||||
InstanceMetadata::new()
|
||||
.instigating_source(PathBuf::from("/root"))
|
||||
.relevant_paths(vec![PathBuf::from("/root")]),
|
||||
);
|
||||
let old_tree = RojoTree::new(old_parent);
|
||||
|
||||
let mut new_tree = WeakDom::new(InstanceBuilder::new("ROOT"));
|
||||
let new_parent = new_tree.insert(
|
||||
new_tree.root_ref(),
|
||||
InstanceBuilder::new("Folder").with_name("Parent"),
|
||||
);
|
||||
new_tree.insert(new_parent, InstanceBuilder::new("Folder").with_name("Init"));
|
||||
|
||||
let vfs = make_vfs();
|
||||
let project = make_project();
|
||||
let data = SyncbackData::for_test(&vfs, &old_tree, &new_tree, &project);
|
||||
let snapshot = SyncbackSnapshot {
|
||||
data,
|
||||
old: Some(old_tree.get_root_id()),
|
||||
new: new_parent,
|
||||
path: PathBuf::from("/root"),
|
||||
middleware: None,
|
||||
};
|
||||
|
||||
let result = syncback_dir_no_meta(&snapshot);
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"should allow a child whose filesystem name is slugified away from \
|
||||
the reserved 'init' stem: {result:?}",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,14 +35,20 @@ pub fn snapshot_json_model(
|
||||
format!("File is not a valid JSON model: {}", path.display())
|
||||
})?;
|
||||
|
||||
// If the JSON has a name property, preserve it in metadata for syncback
|
||||
let specified_name = instance.name.clone();
|
||||
if let Some(top_level_name) = &instance.name {
|
||||
let new_name = format!("{}.model.json", top_level_name);
|
||||
|
||||
// Use the name from JSON if present, otherwise fall back to filename-derived name
|
||||
if instance.name.is_none() {
|
||||
instance.name = Some(name.to_owned());
|
||||
log::warn!(
|
||||
"Model at path {} had a top-level Name field. \
|
||||
This field has been ignored since Rojo 6.0.\n\
|
||||
Consider removing this field and renaming the file to {}.",
|
||||
new_name,
|
||||
path.display()
|
||||
);
|
||||
}
|
||||
|
||||
instance.name = Some(name.to_owned());
|
||||
|
||||
let id = instance.id.take().map(RojoRef::new);
|
||||
let schema = instance.schema.take();
|
||||
|
||||
@@ -56,8 +62,7 @@ pub fn snapshot_json_model(
|
||||
.relevant_paths(vec![vfs.canonicalize(path)?])
|
||||
.context(context)
|
||||
.specified_id(id)
|
||||
.schema(schema)
|
||||
.specified_name(specified_name);
|
||||
.schema(schema);
|
||||
|
||||
Ok(Some(snapshot))
|
||||
}
|
||||
@@ -76,7 +81,6 @@ pub fn syncback_json_model<'sync>(
|
||||
// schemas will ever exist in one project for it to matter, but it
|
||||
// could have a performance cost.
|
||||
model.schema = old_inst.metadata().schema.clone();
|
||||
model.name = old_inst.metadata().specified_name.clone();
|
||||
}
|
||||
|
||||
Ok(SyncbackReturn {
|
||||
|
||||
@@ -158,13 +158,8 @@ pub fn syncback_lua<'sync>(
|
||||
|
||||
if !meta.is_empty() {
|
||||
let parent_location = snapshot.path.parent_err()?;
|
||||
let meta_stem = snapshot.path
|
||||
.file_name()
|
||||
.and_then(|n| n.to_str())
|
||||
.map(|s| s.split('.').next().unwrap_or(s))
|
||||
.unwrap_or_else(|| snapshot.new_inst().name.as_str());
|
||||
fs_snapshot.add_file(
|
||||
parent_location.join(format!("{meta_stem}.meta.json")),
|
||||
parent_location.join(format!("{}.meta.json", new_inst.name)),
|
||||
serde_json::to_vec_pretty(&meta).context("cannot serialize metadata")?,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -10,10 +10,7 @@ use rbx_dom_weak::{
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
json,
|
||||
resolution::UnresolvedValue,
|
||||
snapshot::InstanceSnapshot,
|
||||
syncback::{validate_file_name, SyncbackSnapshot},
|
||||
json, resolution::UnresolvedValue, snapshot::InstanceSnapshot, syncback::SyncbackSnapshot,
|
||||
RojoRef,
|
||||
};
|
||||
|
||||
@@ -39,9 +36,6 @@ pub struct AdjacentMetadata {
|
||||
#[serde(default, skip_serializing_if = "IndexMap::is_empty")]
|
||||
pub attributes: IndexMap<String, UnresolvedValue>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub name: Option<String>,
|
||||
|
||||
#[serde(skip)]
|
||||
pub path: PathBuf,
|
||||
}
|
||||
@@ -150,31 +144,6 @@ impl AdjacentMetadata {
|
||||
}
|
||||
}
|
||||
|
||||
let name = snapshot
|
||||
.old_inst()
|
||||
.and_then(|inst| inst.metadata().specified_name.clone())
|
||||
.or_else(|| {
|
||||
// Write name when the filesystem path doesn't match the
|
||||
// instance name (invalid chars or init-prefix).
|
||||
if snapshot.old_inst().is_none() {
|
||||
let instance_name = &snapshot.new_inst().name;
|
||||
let fs_stem = path
|
||||
.file_name()
|
||||
.and_then(|n| n.to_str())
|
||||
.map(|s| s.split('.').next().unwrap_or(s))
|
||||
.unwrap_or("");
|
||||
if validate_file_name(instance_name).is_err()
|
||||
|| fs_stem != instance_name.as_str()
|
||||
{
|
||||
Some(instance_name.clone())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
|
||||
Ok(Some(Self {
|
||||
ignore_unknown_instances: if ignore_unknown_instances {
|
||||
Some(true)
|
||||
@@ -186,7 +155,6 @@ impl AdjacentMetadata {
|
||||
path,
|
||||
id: None,
|
||||
schema,
|
||||
name,
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -245,26 +213,11 @@ impl AdjacentMetadata {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn apply_name(&mut self, snapshot: &mut InstanceSnapshot) -> anyhow::Result<()> {
|
||||
if self.name.is_some() && snapshot.metadata.specified_name.is_some() {
|
||||
anyhow::bail!(
|
||||
"cannot specify a name using {} (instance has a name from somewhere else)",
|
||||
self.path.display()
|
||||
);
|
||||
}
|
||||
if let Some(name) = &self.name {
|
||||
snapshot.name = name.clone().into();
|
||||
}
|
||||
snapshot.metadata.specified_name = self.name.take();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn apply_all(&mut self, snapshot: &mut InstanceSnapshot) -> anyhow::Result<()> {
|
||||
self.apply_ignore_unknown_instances(snapshot);
|
||||
self.apply_properties(snapshot)?;
|
||||
self.apply_id(snapshot)?;
|
||||
self.apply_schema(snapshot)?;
|
||||
self.apply_name(snapshot)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -273,13 +226,11 @@ impl AdjacentMetadata {
|
||||
///
|
||||
/// - The number of properties and attributes is 0
|
||||
/// - `ignore_unknown_instances` is None
|
||||
/// - `name` is None
|
||||
#[inline]
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.attributes.is_empty()
|
||||
&& self.properties.is_empty()
|
||||
&& self.ignore_unknown_instances.is_none()
|
||||
&& self.name.is_none()
|
||||
}
|
||||
|
||||
// TODO: Add method to allow selectively applying parts of metadata and
|
||||
@@ -311,9 +262,6 @@ pub struct DirectoryMetadata {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub class_name: Option<Ustr>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub name: Option<String>,
|
||||
|
||||
#[serde(skip)]
|
||||
pub path: PathBuf,
|
||||
}
|
||||
@@ -424,30 +372,6 @@ impl DirectoryMetadata {
|
||||
}
|
||||
}
|
||||
|
||||
let name = snapshot
|
||||
.old_inst()
|
||||
.and_then(|inst| inst.metadata().specified_name.clone())
|
||||
.or_else(|| {
|
||||
// Write name when the directory name doesn't match the
|
||||
// instance name (invalid chars or init-prefix).
|
||||
if snapshot.old_inst().is_none() {
|
||||
let instance_name = &snapshot.new_inst().name;
|
||||
let fs_name = path
|
||||
.file_name()
|
||||
.and_then(|n| n.to_str())
|
||||
.unwrap_or("");
|
||||
if validate_file_name(instance_name).is_err()
|
||||
|| fs_name != instance_name.as_str()
|
||||
{
|
||||
Some(instance_name.clone())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
|
||||
Ok(Some(Self {
|
||||
ignore_unknown_instances: if ignore_unknown_instances {
|
||||
Some(true)
|
||||
@@ -460,7 +384,6 @@ impl DirectoryMetadata {
|
||||
path,
|
||||
id: None,
|
||||
schema,
|
||||
name,
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -470,7 +393,6 @@ impl DirectoryMetadata {
|
||||
self.apply_properties(snapshot)?;
|
||||
self.apply_id(snapshot)?;
|
||||
self.apply_schema(snapshot)?;
|
||||
self.apply_name(snapshot)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -542,33 +464,17 @@ impl DirectoryMetadata {
|
||||
snapshot.metadata.schema = self.schema.take();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn apply_name(&mut self, snapshot: &mut InstanceSnapshot) -> anyhow::Result<()> {
|
||||
if self.name.is_some() && snapshot.metadata.specified_name.is_some() {
|
||||
anyhow::bail!(
|
||||
"cannot specify a name using {} (instance has a name from somewhere else)",
|
||||
self.path.display()
|
||||
);
|
||||
}
|
||||
if let Some(name) = &self.name {
|
||||
snapshot.name = name.clone().into();
|
||||
}
|
||||
snapshot.metadata.specified_name = self.name.take();
|
||||
Ok(())
|
||||
}
|
||||
/// Returns whether the metadata is 'empty', meaning it doesn't have anything
|
||||
/// worth persisting in it. Specifically:
|
||||
///
|
||||
/// - The number of properties and attributes is 0
|
||||
/// - `ignore_unknown_instances` is None
|
||||
/// - `class_name` is either None or not Some("Folder")
|
||||
/// - `name` is None
|
||||
#[inline]
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.attributes.is_empty()
|
||||
&& self.properties.is_empty()
|
||||
&& self.ignore_unknown_instances.is_none()
|
||||
&& self.name.is_none()
|
||||
&& if let Some(class) = &self.class_name {
|
||||
class == "Folder"
|
||||
} else {
|
||||
|
||||
@@ -61,10 +61,6 @@ pub use self::{
|
||||
/// This will inspect the path and find the appropriate middleware for it,
|
||||
/// taking user-written rules into account. Then, it will attempt to convert
|
||||
/// the path into an InstanceSnapshot using that middleware.
|
||||
///
|
||||
/// If a git filter is active in the context and the path is not acknowledged
|
||||
/// (i.e., the file hasn't changed since the base git reference), this function
|
||||
/// returns `Ok(None)` to skip syncing that file.
|
||||
#[profiling::function]
|
||||
pub fn snapshot_from_vfs(
|
||||
context: &InstanceContext,
|
||||
@@ -76,16 +72,6 @@ pub fn snapshot_from_vfs(
|
||||
None => return Ok(None),
|
||||
};
|
||||
|
||||
// Check if this path is acknowledged by the git filter.
|
||||
// If not, skip this path entirely.
|
||||
if !context.is_path_acknowledged(path) {
|
||||
log::trace!(
|
||||
"Skipping path {} (not acknowledged by git filter)",
|
||||
path.display()
|
||||
);
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
if meta.is_dir() {
|
||||
let (middleware, dir_name, init_path) = get_dir_middleware(vfs, path)?;
|
||||
// TODO: Support user defined init paths
|
||||
@@ -227,10 +213,6 @@ pub enum Middleware {
|
||||
impl Middleware {
|
||||
/// Creates a snapshot for the given path from the Middleware with
|
||||
/// the provided name.
|
||||
///
|
||||
/// When a git filter is active in the context, `ignore_unknown_instances`
|
||||
/// will be set to `true` on all generated snapshots to preserve descendants
|
||||
/// in Studio that are not tracked by Rojo.
|
||||
fn snapshot(
|
||||
&self,
|
||||
context: &InstanceContext,
|
||||
@@ -280,14 +262,6 @@ impl Middleware {
|
||||
};
|
||||
if let Ok(Some(ref mut snapshot)) = output {
|
||||
snapshot.metadata.middleware = Some(*self);
|
||||
|
||||
// When git filter is active, force ignore_unknown_instances to true
|
||||
// so that we don't delete children in Studio that aren't tracked.
|
||||
if context.has_git_filter() {
|
||||
snapshot.metadata.ignore_unknown_instances = true;
|
||||
// Also apply this recursively to all children
|
||||
set_ignore_unknown_instances_recursive(&mut snapshot.children);
|
||||
}
|
||||
}
|
||||
output
|
||||
}
|
||||
@@ -391,16 +365,6 @@ impl Middleware {
|
||||
}
|
||||
}
|
||||
|
||||
/// Recursively sets `ignore_unknown_instances` to `true` on all children.
|
||||
/// This is used when git filter is active to ensure we don't delete
|
||||
/// children in Studio that aren't tracked by Rojo.
|
||||
fn set_ignore_unknown_instances_recursive(children: &mut [InstanceSnapshot]) {
|
||||
for child in children {
|
||||
child.metadata.ignore_unknown_instances = true;
|
||||
set_ignore_unknown_instances_recursive(&mut child.children);
|
||||
}
|
||||
}
|
||||
|
||||
/// A helper for easily defining a SyncRule. Arguments are passed literally
|
||||
/// to this macro in the order `include`, `middleware`, `suffix`,
|
||||
/// and `exclude`. Both `suffix` and `exclude` are optional.
|
||||
|
||||
@@ -83,19 +83,6 @@ pub fn snapshot_project(
|
||||
// file being updated.
|
||||
snapshot.metadata.relevant_paths.push(path.to_path_buf());
|
||||
|
||||
// When git filter is active, also register the project folder as a
|
||||
// relevant path. This serves as a catch-all so that file changes
|
||||
// not under any specific $path node can still walk up the directory
|
||||
// tree and trigger a re-snapshot of the entire project.
|
||||
if context.has_git_filter() {
|
||||
if let Some(folder) = path.parent() {
|
||||
let normalized = vfs
|
||||
.canonicalize(folder)
|
||||
.unwrap_or_else(|_| folder.to_path_buf());
|
||||
snapshot.metadata.relevant_paths.push(normalized);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Some(snapshot))
|
||||
}
|
||||
None => Ok(None),
|
||||
@@ -150,26 +137,6 @@ pub fn snapshot_project_node(
|
||||
// Take the snapshot's metadata as-is, which will be mutated later
|
||||
// on.
|
||||
metadata = snapshot.metadata;
|
||||
} else if context.has_git_filter() {
|
||||
// When the git filter is active and the $path was filtered out
|
||||
// (no acknowledged files yet), we still need to register the path
|
||||
// in relevant_paths. This allows the change processor to map file
|
||||
// changes in this directory back to this project node instance,
|
||||
// triggering a re-snapshot that will pick up newly modified files.
|
||||
let normalized = vfs
|
||||
.canonicalize(full_path.as_ref())
|
||||
.unwrap_or_else(|_| full_path.to_path_buf());
|
||||
metadata.relevant_paths.push(normalized);
|
||||
|
||||
// The VFS only sets up file watches via read() and read_dir(),
|
||||
// not via metadata(). Since the git filter caused snapshot_from_vfs
|
||||
// to return early (before read_dir was called), the VFS is not
|
||||
// watching this path. We must read the directory here to ensure
|
||||
// the VFS sets up a recursive watch, otherwise file change events
|
||||
// will never fire and live sync won't detect modifications.
|
||||
if full_path.is_dir() {
|
||||
let _ = vfs.read_dir(&full_path);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -225,17 +192,6 @@ pub fn snapshot_project_node(
|
||||
}
|
||||
|
||||
(_, None, _, Some(PathNode::Required(path))) => {
|
||||
// If git filter is active and the path was filtered out, treat it
|
||||
// as if the path was optional and skip this node.
|
||||
if context.has_git_filter() {
|
||||
log::trace!(
|
||||
"Skipping project node '{}' because its path was filtered by git filter: {}",
|
||||
instance_name,
|
||||
path.display()
|
||||
);
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
anyhow::bail!(
|
||||
"Rojo project referred to a file using $path that could not be turned into a Roblox Instance by Rojo.\n\
|
||||
Check that the file exists and is a file type known by Rojo.\n\
|
||||
@@ -326,12 +282,7 @@ pub fn snapshot_project_node(
|
||||
// If the user didn't specify it AND $path was not specified (meaning
|
||||
// there's no existing value we'd be stepping on from a project file or meta
|
||||
// file), set it to true.
|
||||
//
|
||||
// When git filter is active, always set to true to preserve descendants
|
||||
// in Studio that are not tracked by Rojo.
|
||||
if context.has_git_filter() {
|
||||
metadata.ignore_unknown_instances = true;
|
||||
} else if let Some(ignore) = node.ignore_unknown_instances {
|
||||
if let Some(ignore) = node.ignore_unknown_instances {
|
||||
metadata.ignore_unknown_instances = ignore;
|
||||
} else if node.path.is_none() {
|
||||
// TODO: Introduce a strict mode where $ignoreUnknownInstances is never
|
||||
|
||||
@@ -58,13 +58,8 @@ pub fn syncback_txt<'sync>(
|
||||
|
||||
if !meta.is_empty() {
|
||||
let parent = snapshot.path.parent_err()?;
|
||||
let meta_stem = snapshot.path
|
||||
.file_name()
|
||||
.and_then(|n| n.to_str())
|
||||
.map(|s| s.split('.').next().unwrap_or(s))
|
||||
.unwrap_or_else(|| new_inst.name.as_str());
|
||||
fs_snapshot.add_file(
|
||||
parent.join(format!("{meta_stem}.meta.json")),
|
||||
parent.join(format!("{}.meta.json", new_inst.name)),
|
||||
serde_json::to_vec_pretty(&meta).context("could not serialize metadata")?,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -8,11 +8,11 @@ use rbx_dom_weak::Instance;
|
||||
|
||||
use crate::{snapshot::InstanceWithMeta, snapshot_middleware::Middleware};
|
||||
|
||||
pub fn name_for_inst<'a>(
|
||||
pub fn name_for_inst<'old>(
|
||||
middleware: Middleware,
|
||||
new_inst: &'a Instance,
|
||||
old_inst: Option<InstanceWithMeta<'a>>,
|
||||
) -> anyhow::Result<Cow<'a, str>> {
|
||||
new_inst: &Instance,
|
||||
old_inst: Option<InstanceWithMeta<'old>>,
|
||||
) -> anyhow::Result<Cow<'old, str>> {
|
||||
if let Some(old_inst) = old_inst {
|
||||
if let Some(source) = old_inst.metadata().relevant_paths.first() {
|
||||
source
|
||||
@@ -35,34 +35,14 @@ pub fn name_for_inst<'a>(
|
||||
| Middleware::CsvDir
|
||||
| Middleware::ServerScriptDir
|
||||
| Middleware::ClientScriptDir
|
||||
| Middleware::ModuleScriptDir => {
|
||||
let name = if validate_file_name(&new_inst.name).is_err() {
|
||||
Cow::Owned(slugify_name(&new_inst.name))
|
||||
} else {
|
||||
Cow::Borrowed(new_inst.name.as_str())
|
||||
};
|
||||
// Prefix "init" to avoid colliding with reserved init files.
|
||||
if name.to_lowercase() == "init" {
|
||||
Cow::Owned(format!("_{name}"))
|
||||
} else {
|
||||
name
|
||||
}
|
||||
}
|
||||
| Middleware::ModuleScriptDir => Cow::Owned(new_inst.name.clone()),
|
||||
_ => {
|
||||
let extension = extension_for_middleware(middleware);
|
||||
let slugified;
|
||||
let stem: &str = if validate_file_name(&new_inst.name).is_err() {
|
||||
slugified = slugify_name(&new_inst.name);
|
||||
&slugified
|
||||
} else {
|
||||
&new_inst.name
|
||||
};
|
||||
// Prefix "init" stems to avoid colliding with reserved init files.
|
||||
if stem.to_lowercase() == "init" {
|
||||
Cow::Owned(format!("_{stem}.{extension}"))
|
||||
} else {
|
||||
Cow::Owned(format!("{stem}.{extension}"))
|
||||
}
|
||||
let name = &new_inst.name;
|
||||
validate_file_name(name).with_context(|| {
|
||||
format!("name '{name}' is not legal to write to the file system")
|
||||
})?;
|
||||
Cow::Owned(format!("{name}.{extension}"))
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -114,39 +94,6 @@ const INVALID_WINDOWS_NAMES: [&str; 22] = [
|
||||
/// in a file's name.
|
||||
const FORBIDDEN_CHARS: [char; 9] = ['<', '>', ':', '"', '/', '|', '?', '*', '\\'];
|
||||
|
||||
/// Slugifies a name by replacing forbidden characters with underscores
|
||||
/// and ensuring the result is a valid file name
|
||||
pub fn slugify_name(name: &str) -> String {
|
||||
let mut result = String::with_capacity(name.len());
|
||||
|
||||
for ch in name.chars() {
|
||||
if FORBIDDEN_CHARS.contains(&ch) {
|
||||
result.push('_');
|
||||
} else {
|
||||
result.push(ch);
|
||||
}
|
||||
}
|
||||
|
||||
// Handle Windows reserved names by appending an underscore
|
||||
let result_lower = result.to_lowercase();
|
||||
for forbidden in INVALID_WINDOWS_NAMES {
|
||||
if result_lower == forbidden.to_lowercase() {
|
||||
result.push('_');
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
while result.ends_with(' ') || result.ends_with('.') {
|
||||
result.pop();
|
||||
}
|
||||
|
||||
if result.is_empty() || result.chars().all(|c| c == '_') {
|
||||
result = "instance".to_string();
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
/// Validates a provided file name to ensure it's allowed on the file system. An
|
||||
/// error is returned if the name isn't allowed, indicating why.
|
||||
/// This takes into account rules for Windows, MacOS, and Linux.
|
||||
|
||||
@@ -28,7 +28,7 @@ use crate::{
|
||||
Project,
|
||||
};
|
||||
|
||||
pub use file_names::{extension_for_middleware, name_for_inst, slugify_name, validate_file_name};
|
||||
pub use file_names::{extension_for_middleware, name_for_inst, validate_file_name};
|
||||
pub use fs_snapshot::FsSnapshot;
|
||||
pub use hash::*;
|
||||
pub use property_filter::{filter_properties, filter_properties_preallocated};
|
||||
@@ -52,7 +52,6 @@ pub fn syncback_loop(
|
||||
old_tree: &mut RojoTree,
|
||||
mut new_tree: WeakDom,
|
||||
project: &Project,
|
||||
force_json: bool,
|
||||
) -> anyhow::Result<FsSnapshot> {
|
||||
let ignore_patterns = project
|
||||
.syncback_rules
|
||||
@@ -154,7 +153,6 @@ pub fn syncback_loop(
|
||||
old_tree,
|
||||
new_tree: &new_tree,
|
||||
project,
|
||||
force_json,
|
||||
};
|
||||
|
||||
let mut snapshots = vec![SyncbackSnapshot {
|
||||
@@ -199,7 +197,7 @@ pub fn syncback_loop(
|
||||
}
|
||||
}
|
||||
|
||||
let middleware = get_best_middleware(&snapshot, force_json);
|
||||
let middleware = get_best_middleware(&snapshot);
|
||||
|
||||
log::trace!(
|
||||
"Middleware for {inst_path} is {:?} (path is {})",
|
||||
@@ -215,14 +213,10 @@ pub fn syncback_loop(
|
||||
let syncback = match middleware.syncback(&snapshot) {
|
||||
Ok(syncback) => syncback,
|
||||
Err(err) if middleware == Middleware::Dir => {
|
||||
let new_middleware = if force_json {
|
||||
Middleware::JsonModel
|
||||
} else {
|
||||
match env::var(DEBUG_MODEL_FORMAT_VAR) {
|
||||
Ok(value) if value == "1" => Middleware::Rbxmx,
|
||||
Ok(value) if value == "2" => Middleware::JsonModel,
|
||||
_ => Middleware::Rbxm,
|
||||
}
|
||||
let new_middleware = match env::var(DEBUG_MODEL_FORMAT_VAR) {
|
||||
Ok(value) if value == "1" => Middleware::Rbxmx,
|
||||
Ok(value) if value == "2" => Middleware::JsonModel,
|
||||
_ => Middleware::Rbxm,
|
||||
};
|
||||
let file_name = snapshot
|
||||
.path
|
||||
@@ -301,7 +295,7 @@ pub struct SyncbackReturn<'sync> {
|
||||
pub removed_children: Vec<InstanceWithMeta<'sync>>,
|
||||
}
|
||||
|
||||
pub fn get_best_middleware(snapshot: &SyncbackSnapshot, force_json: bool) -> Middleware {
|
||||
pub fn get_best_middleware(snapshot: &SyncbackSnapshot) -> Middleware {
|
||||
// At some point, we're better off using an O(1) method for checking
|
||||
// equality for classes like this.
|
||||
static JSON_MODEL_CLASSES: OnceLock<HashSet<&str>> = OnceLock::new();
|
||||
@@ -373,18 +367,10 @@ pub fn get_best_middleware(snapshot: &SyncbackSnapshot, force_json: bool) -> Mid
|
||||
}
|
||||
|
||||
if middleware == Middleware::Rbxm {
|
||||
middleware = if force_json {
|
||||
if !inst.children().is_empty() {
|
||||
Middleware::Dir
|
||||
} else {
|
||||
Middleware::JsonModel
|
||||
}
|
||||
} else {
|
||||
match env::var(DEBUG_MODEL_FORMAT_VAR) {
|
||||
Ok(value) if value == "1" => Middleware::Rbxmx,
|
||||
Ok(value) if value == "2" => Middleware::JsonModel,
|
||||
_ => Middleware::Rbxm,
|
||||
}
|
||||
middleware = match env::var(DEBUG_MODEL_FORMAT_VAR) {
|
||||
Ok(value) if value == "1" => Middleware::Rbxmx,
|
||||
Ok(value) if value == "2" => Middleware::JsonModel,
|
||||
_ => Middleware::Rbxm,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -20,7 +20,6 @@ pub struct SyncbackData<'sync> {
|
||||
pub(super) old_tree: &'sync RojoTree,
|
||||
pub(super) new_tree: &'sync WeakDom,
|
||||
pub(super) project: &'sync Project,
|
||||
pub(super) force_json: bool,
|
||||
}
|
||||
|
||||
pub struct SyncbackSnapshot<'sync> {
|
||||
@@ -44,7 +43,7 @@ impl<'sync> SyncbackSnapshot<'sync> {
|
||||
path: PathBuf::new(),
|
||||
middleware: None,
|
||||
};
|
||||
let middleware = get_best_middleware(&snapshot, self.data.force_json);
|
||||
let middleware = get_best_middleware(&snapshot);
|
||||
let name = name_for_inst(middleware, snapshot.new_inst(), snapshot.old_inst())?;
|
||||
snapshot.path = self.path.join(name.as_ref());
|
||||
|
||||
@@ -70,7 +69,7 @@ impl<'sync> SyncbackSnapshot<'sync> {
|
||||
path: PathBuf::new(),
|
||||
middleware: None,
|
||||
};
|
||||
let middleware = get_best_middleware(&snapshot, self.data.force_json);
|
||||
let middleware = get_best_middleware(&snapshot);
|
||||
let name = name_for_inst(middleware, snapshot.new_inst(), snapshot.old_inst())?;
|
||||
snapshot.path = base_path.join(name.as_ref());
|
||||
|
||||
@@ -238,24 +237,6 @@ pub fn inst_path(dom: &WeakDom, referent: Ref) -> String {
|
||||
path.join("/")
|
||||
}
|
||||
|
||||
impl<'sync> SyncbackData<'sync> {
|
||||
/// Constructs a `SyncbackData` for use in unit tests.
|
||||
#[cfg(test)]
|
||||
pub fn for_test(
|
||||
vfs: &'sync Vfs,
|
||||
old_tree: &'sync RojoTree,
|
||||
new_tree: &'sync WeakDom,
|
||||
project: &'sync Project,
|
||||
) -> Self {
|
||||
Self {
|
||||
vfs,
|
||||
old_tree,
|
||||
new_tree,
|
||||
project,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use rbx_dom_weak::{InstanceBuilder, WeakDom};
|
||||
|
||||
@@ -41,6 +41,7 @@ gen_build_tests! {
|
||||
issue_546,
|
||||
json_as_lua,
|
||||
json_model_in_folder,
|
||||
json_model_legacy_name,
|
||||
module_in_folder,
|
||||
module_init,
|
||||
nested_runcontext,
|
||||
@@ -54,8 +55,6 @@ gen_build_tests! {
|
||||
script_meta_disabled,
|
||||
server_in_folder,
|
||||
server_init,
|
||||
slugified_name_roundtrip,
|
||||
model_json_name_input,
|
||||
txt,
|
||||
txt_in_folder,
|
||||
unresolved_values,
|
||||
|
||||
@@ -86,9 +86,4 @@ syncback_tests! {
|
||||
sync_rules => ["src/module.modulescript", "src/text.text"],
|
||||
// Ensures that the `syncUnscriptable` setting works
|
||||
unscriptable_properties => ["default.project.json"],
|
||||
// Ensures that instances with names containing illegal characters get slugified filenames
|
||||
// and preserve their original names in meta.json without forcing directories for leaf scripts
|
||||
slugified_name => ["src/_Script.meta.json", "src/_Script.server.luau", "src/_Folder/init.meta.json"],
|
||||
// Ensures that .model.json files preserve the name property
|
||||
model_json_name => ["src/foo.model.json"],
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user