forked from rojo-rbx/rojo
Compare commits
3 Commits
667683d3b3
...
feature/in
| Author | SHA1 | Date | |
|---|---|---|---|
|
110b9f0df3
|
|||
|
917d17a738
|
|||
|
14bbdaf560
|
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -16,6 +16,3 @@
|
||||
[submodule "plugin/Packages/Highlighter"]
|
||||
path = plugin/Packages/Highlighter
|
||||
url = https://github.com/boatbomber/highlighter.git
|
||||
[submodule "plugin/Packages/msgpack-luau"]
|
||||
path = plugin/Packages/msgpack-luau
|
||||
url = https://github.com/cipharius/msgpack-luau/
|
||||
|
||||
11
CHANGELOG.md
11
CHANGELOG.md
@@ -30,26 +30,15 @@ Making a new release? Simply add the new header with the version and date undern
|
||||
-->
|
||||
|
||||
## Unreleased
|
||||
|
||||
* `inf` and `nan` values in properties are now synced ([#1176])
|
||||
* Fixed a bug caused by having reference properties (such as `ObjectValue.Value`) that point to an Instance not included in syncback. ([#1179])
|
||||
* Implemented support for the "name" property in meta/model JSON files. ([#1187])
|
||||
* Fixed instance replacement fallback failing when too many instances needed to be replaced. ([#1192])
|
||||
* Added actors and bindable/remote event/function variants to be synced back as JSON files. ([#1199])
|
||||
* Fixed a bug where MacOS paths weren't being handled correctly. ([#1201])
|
||||
* Fixed a bug where the notification timeout thread would fail to cancel on unmount ([#1211])
|
||||
* Added a "Forget" option to the sync reminder notification to avoid being reminded for that place in the future ([#1215])
|
||||
* Improves relative path calculation for sourcemap generation to avoid issues with Windows UNC paths. ([#1217])
|
||||
|
||||
[#1176]: https://github.com/rojo-rbx/rojo/pull/1176
|
||||
[#1179]: https://github.com/rojo-rbx/rojo/pull/1179
|
||||
[#1187]: https://github.com/rojo-rbx/rojo/pull/1187
|
||||
[#1192]: https://github.com/rojo-rbx/rojo/pull/1192
|
||||
[#1199]: https://github.com/rojo-rbx/rojo/pull/1199
|
||||
[#1201]: https://github.com/rojo-rbx/rojo/pull/1201
|
||||
[#1211]: https://github.com/rojo-rbx/rojo/pull/1211
|
||||
[#1215]: https://github.com/rojo-rbx/rojo/pull/1215
|
||||
[#1217]: https://github.com/rojo-rbx/rojo/pull/1217
|
||||
|
||||
## [7.7.0-rc.1] (November 27th, 2025)
|
||||
|
||||
|
||||
@@ -14,7 +14,6 @@ Code contributions are welcome for features and bugs that have been reported in
|
||||
You'll want these tools to work on Rojo:
|
||||
|
||||
* Latest stable Rust compiler
|
||||
* Rustfmt and Clippy are used for code formatting and linting.
|
||||
* Latest stable [Rojo](https://github.com/rojo-rbx/rojo)
|
||||
* [Rokit](https://github.com/rojo-rbx/rokit)
|
||||
* [Luau Language Server](https://github.com/JohnnyMorganz/luau-lsp) (Only needed if working on the Studio plugin.)
|
||||
|
||||
19
Cargo.lock
generated
19
Cargo.lock
generated
@@ -1520,12 +1520,6 @@ version = "1.0.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a"
|
||||
|
||||
[[package]]
|
||||
name = "pathdiff"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "df94ce210e5bc13cb6651479fa48d14f601d9858cfe0467f43ae157023b938d3"
|
||||
|
||||
[[package]]
|
||||
name = "percent-encoding"
|
||||
version = "2.3.2"
|
||||
@@ -2074,7 +2068,6 @@ dependencies = [
|
||||
"num_cpus",
|
||||
"opener",
|
||||
"paste",
|
||||
"pathdiff",
|
||||
"pretty_assertions",
|
||||
"profiling",
|
||||
"rayon",
|
||||
@@ -2085,12 +2078,10 @@ dependencies = [
|
||||
"rbx_xml",
|
||||
"reqwest",
|
||||
"ritz",
|
||||
"rmp-serde",
|
||||
"roblox_install",
|
||||
"rojo-insta-ext",
|
||||
"semver",
|
||||
"serde",
|
||||
"serde_bytes",
|
||||
"serde_json",
|
||||
"serde_yaml",
|
||||
"strum",
|
||||
@@ -2231,16 +2222,6 @@ dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_bytes"
|
||||
version = "0.11.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a5d440709e79d88e51ac01c4b72fc6cb7314017bb7da9eeff678aa94c10e3ea8"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_cbor"
|
||||
version = "0.11.2"
|
||||
|
||||
@@ -100,13 +100,10 @@ clap = { version = "3.2.25", features = ["derive"] }
|
||||
profiling = "1.0.15"
|
||||
yaml-rust2 = "0.10.3"
|
||||
data-encoding = "2.8.0"
|
||||
pathdiff = "0.2.3"
|
||||
|
||||
blake3 = "1.5.0"
|
||||
float-cmp = "0.9.0"
|
||||
indexmap = { version = "2.10.0", features = ["serde"] }
|
||||
rmp-serde = "1.3.0"
|
||||
serde_bytes = "0.11.19"
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
winreg = "0.10.1"
|
||||
@@ -125,7 +122,7 @@ semver = "1.0.22"
|
||||
rojo-insta-ext = { path = "crates/rojo-insta-ext" }
|
||||
|
||||
criterion = "0.3.6"
|
||||
insta = { version = "1.36.1", features = ["redactions", "yaml", "json"] }
|
||||
insta = { version = "1.36.1", features = ["redactions", "yaml"] }
|
||||
paste = "1.0.14"
|
||||
pretty_assertions = "1.4.0"
|
||||
serde_yaml = "0.8.26"
|
||||
|
||||
5
build.rs
5
build.rs
@@ -30,11 +30,6 @@ fn snapshot_from_fs_path(path: &Path) -> io::Result<VfsSnapshot> {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Ignore images in msgpack-luau because they aren't UTF-8 encoded.
|
||||
if file_name.ends_with(".png") {
|
||||
continue;
|
||||
}
|
||||
|
||||
let child_snapshot = snapshot_from_fs_path(&entry.path())?;
|
||||
children.push((file_name, child_snapshot));
|
||||
}
|
||||
|
||||
Submodule plugin/Packages/msgpack-luau deleted from 40f67fc0f6
@@ -1,7 +1,5 @@
|
||||
local HttpService = game:GetService("HttpService")
|
||||
|
||||
local msgpack = require(script.Parent.Parent.msgpack)
|
||||
|
||||
local stringTemplate = [[
|
||||
Http.Response {
|
||||
code: %d
|
||||
@@ -33,8 +31,4 @@ function Response:json()
|
||||
return HttpService:JSONDecode(self.body)
|
||||
end
|
||||
|
||||
function Response:msgpack()
|
||||
return msgpack.decode(self.body)
|
||||
end
|
||||
|
||||
return Response
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
local HttpService = game:GetService("HttpService")
|
||||
|
||||
local Log = require(script.Parent.Log)
|
||||
local msgpack = require(script.Parent.msgpack)
|
||||
local Promise = require(script.Parent.Promise)
|
||||
local Log = require(script.Parent.Log)
|
||||
|
||||
local HttpError = require(script.Error)
|
||||
local HttpResponse = require(script.Response)
|
||||
@@ -69,12 +68,4 @@ function Http.jsonDecode(source)
|
||||
return HttpService:JSONDecode(source)
|
||||
end
|
||||
|
||||
function Http.msgpackEncode(object)
|
||||
return msgpack.encode(object)
|
||||
end
|
||||
|
||||
function Http.msgpackDecode(source)
|
||||
return msgpack.decode(source)
|
||||
end
|
||||
|
||||
return Http
|
||||
|
||||
@@ -145,7 +145,7 @@ function ApiContext:connect()
|
||||
|
||||
return Http.get(url)
|
||||
:andThen(rejectFailedRequests)
|
||||
:andThen(Http.Response.msgpack)
|
||||
:andThen(Http.Response.json)
|
||||
:andThen(rejectWrongProtocolVersion)
|
||||
:andThen(function(body)
|
||||
assert(validateApiInfo(body))
|
||||
@@ -163,7 +163,7 @@ end
|
||||
function ApiContext:read(ids)
|
||||
local url = ("%s/api/read/%s"):format(self.__baseUrl, table.concat(ids, ","))
|
||||
|
||||
return Http.get(url):andThen(rejectFailedRequests):andThen(Http.Response.msgpack):andThen(function(body)
|
||||
return Http.get(url):andThen(rejectFailedRequests):andThen(Http.Response.json):andThen(function(body)
|
||||
if body.sessionId ~= self.__sessionId then
|
||||
return Promise.reject("Server changed ID")
|
||||
end
|
||||
@@ -191,9 +191,9 @@ function ApiContext:write(patch)
|
||||
table.insert(updated, fixedUpdate)
|
||||
end
|
||||
|
||||
-- Only add the 'added' field if the table is non-empty, or else the msgpack
|
||||
-- encode implementation will turn the table into an array instead of a map,
|
||||
-- causing API validation to fail.
|
||||
-- Only add the 'added' field if the table is non-empty, or else Roblox's
|
||||
-- JSON implementation will turn the table into an array instead of an
|
||||
-- object, causing API validation to fail.
|
||||
local added
|
||||
if next(patch.added) ~= nil then
|
||||
added = patch.added
|
||||
@@ -206,16 +206,13 @@ function ApiContext:write(patch)
|
||||
added = added,
|
||||
}
|
||||
|
||||
body = Http.msgpackEncode(body)
|
||||
body = Http.jsonEncode(body)
|
||||
|
||||
return Http.post(url, body)
|
||||
:andThen(rejectFailedRequests)
|
||||
:andThen(Http.Response.msgpack)
|
||||
:andThen(function(responseBody)
|
||||
Log.info("Write response: {:?}", responseBody)
|
||||
return Http.post(url, body):andThen(rejectFailedRequests):andThen(Http.Response.json):andThen(function(responseBody)
|
||||
Log.info("Write response: {:?}", responseBody)
|
||||
|
||||
return responseBody
|
||||
end)
|
||||
return responseBody
|
||||
end)
|
||||
end
|
||||
|
||||
function ApiContext:connectWebSocket(packetHandlers)
|
||||
@@ -237,7 +234,7 @@ function ApiContext:connectWebSocket(packetHandlers)
|
||||
local closed, errored, received
|
||||
|
||||
received = self.__wsClient.MessageReceived:Connect(function(msg)
|
||||
local data = Http.msgpackDecode(msg)
|
||||
local data = Http.jsonDecode(msg)
|
||||
if data.sessionId ~= self.__sessionId then
|
||||
Log.warn("Received message with wrong session ID; ignoring")
|
||||
return
|
||||
@@ -283,7 +280,7 @@ end
|
||||
function ApiContext:open(id)
|
||||
local url = ("%s/api/open/%s"):format(self.__baseUrl, id)
|
||||
|
||||
return Http.post(url, ""):andThen(rejectFailedRequests):andThen(Http.Response.msgpack):andThen(function(body)
|
||||
return Http.post(url, ""):andThen(rejectFailedRequests):andThen(Http.Response.json):andThen(function(body)
|
||||
if body.sessionId ~= self.__sessionId then
|
||||
return Promise.reject("Server changed ID")
|
||||
end
|
||||
@@ -294,11 +291,11 @@ end
|
||||
|
||||
function ApiContext:serialize(ids: { string })
|
||||
local url = ("%s/api/serialize"):format(self.__baseUrl)
|
||||
local request_body = Http.msgpackEncode({ sessionId = self.__sessionId, ids = ids })
|
||||
local request_body = Http.jsonEncode({ sessionId = self.__sessionId, ids = ids })
|
||||
|
||||
return Http.post(url, request_body)
|
||||
:andThen(rejectFailedRequests)
|
||||
:andThen(Http.Response.msgpack)
|
||||
:andThen(Http.Response.json)
|
||||
:andThen(function(response_body)
|
||||
if response_body.sessionId ~= self.__sessionId then
|
||||
return Promise.reject("Server changed ID")
|
||||
@@ -312,11 +309,11 @@ end
|
||||
|
||||
function ApiContext:refPatch(ids: { string })
|
||||
local url = ("%s/api/ref-patch"):format(self.__baseUrl)
|
||||
local request_body = Http.msgpackEncode({ sessionId = self.__sessionId, ids = ids })
|
||||
local request_body = Http.jsonEncode({ sessionId = self.__sessionId, ids = ids })
|
||||
|
||||
return Http.post(url, request_body)
|
||||
:andThen(rejectFailedRequests)
|
||||
:andThen(Http.Response.msgpack)
|
||||
:andThen(Http.Response.json)
|
||||
:andThen(function(response_body)
|
||||
if response_body.sessionId ~= self.__sessionId then
|
||||
return Promise.reject("Server changed ID")
|
||||
|
||||
@@ -19,15 +19,9 @@ local FullscreenNotification = Roact.Component:extend("FullscreeFullscreenNotifi
|
||||
function FullscreenNotification:init()
|
||||
self.transparency, self.setTransparency = Roact.createBinding(0)
|
||||
self.lifetime = self.props.timeout
|
||||
self.dismissed = false
|
||||
end
|
||||
|
||||
function FullscreenNotification:dismiss()
|
||||
if self.dismissed then
|
||||
return
|
||||
end
|
||||
self.dismissed = true
|
||||
|
||||
if self.props.onClose then
|
||||
self.props.onClose()
|
||||
end
|
||||
@@ -65,7 +59,7 @@ function FullscreenNotification:didMount()
|
||||
end
|
||||
|
||||
function FullscreenNotification:willUnmount()
|
||||
if self.timeout and coroutine.status(self.timeout) == "suspended" then
|
||||
if self.timeout and coroutine.status(self.timeout) ~= "dead" then
|
||||
task.cancel(self.timeout)
|
||||
end
|
||||
end
|
||||
|
||||
@@ -25,7 +25,6 @@ function Notification:init()
|
||||
self.binding = bindingUtil.fromMotor(self.motor)
|
||||
|
||||
self.lifetime = self.props.timeout
|
||||
self.dismissed = false
|
||||
|
||||
self.motor:onStep(function(value)
|
||||
if value <= 0 and self.props.onClose then
|
||||
@@ -35,11 +34,6 @@ function Notification:init()
|
||||
end
|
||||
|
||||
function Notification:dismiss()
|
||||
if self.dismissed then
|
||||
return
|
||||
end
|
||||
self.dismissed = true
|
||||
|
||||
self.motor:setGoal(Flipper.Spring.new(0, {
|
||||
frequency = 5,
|
||||
dampingRatio = 1,
|
||||
@@ -81,7 +75,7 @@ function Notification:didMount()
|
||||
end
|
||||
|
||||
function Notification:willUnmount()
|
||||
if self.timeout and coroutine.status(self.timeout) == "suspended" then
|
||||
if self.timeout and coroutine.status(self.timeout) ~= "dead" then
|
||||
task.cancel(self.timeout)
|
||||
end
|
||||
end
|
||||
|
||||
@@ -301,19 +301,6 @@ function App:setPriorSyncInfo(host: string, port: string, projectName: string)
|
||||
Settings:set("priorEndpoints", priorSyncInfos)
|
||||
end
|
||||
|
||||
function App:forgetPriorSyncInfo()
|
||||
local priorSyncInfos = Settings:get("priorEndpoints")
|
||||
if not priorSyncInfos then
|
||||
priorSyncInfos = {}
|
||||
end
|
||||
|
||||
local id = tostring(game.PlaceId)
|
||||
priorSyncInfos[id] = nil
|
||||
Log.trace("Erased last used endpoint for {}", game.PlaceId)
|
||||
|
||||
Settings:set("priorEndpoints", priorSyncInfos)
|
||||
end
|
||||
|
||||
function App:getHostAndPort()
|
||||
local host = self.host:getValue()
|
||||
local port = self.port:getValue()
|
||||
@@ -448,8 +435,7 @@ function App:checkSyncReminder()
|
||||
self:findActiveServer()
|
||||
:andThen(function(serverInfo, host, port)
|
||||
self:sendSyncReminder(
|
||||
`Project '{serverInfo.projectName}' is serving at {host}:{port}.\nWould you like to connect?`,
|
||||
{ "Connect", "Dismiss" }
|
||||
`Project '{serverInfo.projectName}' is serving at {host}:{port}.\nWould you like to connect?`
|
||||
)
|
||||
end)
|
||||
:catch(function()
|
||||
@@ -460,8 +446,7 @@ function App:checkSyncReminder()
|
||||
|
||||
local timeSinceSync = timeUtil.elapsedToText(os.time() - priorSyncInfo.timestamp)
|
||||
self:sendSyncReminder(
|
||||
`You synced project '{priorSyncInfo.projectName}' to this place {timeSinceSync}.\nDid you mean to run 'rojo serve' and then connect?`,
|
||||
{ "Connect", "Forget", "Dismiss" }
|
||||
`You synced project '{priorSyncInfo.projectName}' to this place {timeSinceSync}.\nDid you mean to run 'rojo serve' and then connect?`
|
||||
)
|
||||
end
|
||||
end)
|
||||
@@ -501,16 +486,12 @@ function App:stopSyncReminderPolling()
|
||||
end
|
||||
end
|
||||
|
||||
function App:sendSyncReminder(message: string, shownActions: { string })
|
||||
function App:sendSyncReminder(message: string)
|
||||
local syncReminderMode = Settings:get("syncReminderMode")
|
||||
if syncReminderMode == "None" then
|
||||
return
|
||||
end
|
||||
|
||||
local connectIndex = table.find(shownActions, "Connect")
|
||||
local forgetIndex = table.find(shownActions, "Forget")
|
||||
local dismissIndex = table.find(shownActions, "Dismiss")
|
||||
|
||||
self.dismissSyncReminder = self:addNotification({
|
||||
text = message,
|
||||
timeout = 120,
|
||||
@@ -519,39 +500,24 @@ function App:sendSyncReminder(message: string, shownActions: { string })
|
||||
self.dismissSyncReminder = nil
|
||||
end,
|
||||
actions = {
|
||||
Connect = if connectIndex
|
||||
then {
|
||||
text = "Connect",
|
||||
style = "Solid",
|
||||
layoutOrder = connectIndex,
|
||||
onClick = function()
|
||||
self:startSession()
|
||||
end,
|
||||
}
|
||||
else nil,
|
||||
Forget = if forgetIndex
|
||||
then {
|
||||
text = "Forget",
|
||||
style = "Bordered",
|
||||
layoutOrder = forgetIndex,
|
||||
onClick = function()
|
||||
-- The user doesn't want to be reminded again about this sync
|
||||
self:forgetPriorSyncInfo()
|
||||
end,
|
||||
}
|
||||
else nil,
|
||||
Dismiss = if dismissIndex
|
||||
then {
|
||||
text = "Dismiss",
|
||||
style = "Bordered",
|
||||
layoutOrder = dismissIndex,
|
||||
onClick = function()
|
||||
-- If the user dismisses the reminder,
|
||||
-- then we don't need to remind them again
|
||||
self:stopSyncReminderPolling()
|
||||
end,
|
||||
}
|
||||
else nil,
|
||||
Connect = {
|
||||
text = "Connect",
|
||||
style = "Solid",
|
||||
layoutOrder = 1,
|
||||
onClick = function()
|
||||
self:startSession()
|
||||
end,
|
||||
},
|
||||
Dismiss = {
|
||||
text = "Dismiss",
|
||||
style = "Bordered",
|
||||
layoutOrder = 2,
|
||||
onClick = function()
|
||||
-- If the user dismisses the reminder,
|
||||
-- then we don't need to remind them again
|
||||
self:stopSyncReminderPolling()
|
||||
end,
|
||||
},
|
||||
},
|
||||
})
|
||||
end
|
||||
|
||||
@@ -54,10 +54,6 @@ local function trueEquals(a, b): boolean
|
||||
end
|
||||
return true
|
||||
|
||||
-- For NaN, check if both values are not equal to themselves
|
||||
elseif a ~= a and b ~= b then
|
||||
return true
|
||||
|
||||
-- For numbers, compare with epsilon of 0.0001 to avoid floating point inequality
|
||||
elseif typeA == "number" and typeB == "number" then
|
||||
return fuzzyEq(a, b, 0.0001)
|
||||
|
||||
@@ -41,41 +41,14 @@ function reifyInstanceInner(unappliedPatch, deferredRefs, instanceMap, virtualIn
|
||||
invariant("Cannot reify an instance not present in virtualInstances\nID: {}", id)
|
||||
end
|
||||
|
||||
-- Before creating a new instance, check if the parent already has an
|
||||
-- untracked child with the same Name and ClassName. This enables "late
|
||||
-- adoption" of instances that exist in Studio but weren't in the initial
|
||||
-- Rojo tree (e.g., when using --git-since filtering). Without this,
|
||||
-- newly acknowledged files would create duplicate instances.
|
||||
local adoptedExisting = false
|
||||
local instance = nil
|
||||
-- Instance.new can fail if we're passing in something that can't be
|
||||
-- created, like a service, something enabled with a feature flag, or
|
||||
-- something that requires higher security than we have.
|
||||
local createSuccess, instance = pcall(Instance.new, virtualInstance.ClassName)
|
||||
|
||||
for _, child in ipairs(parentInstance:GetChildren()) do
|
||||
local accessSuccess, name, className = pcall(function()
|
||||
return child.Name, child.ClassName
|
||||
end)
|
||||
|
||||
if accessSuccess
|
||||
and name == virtualInstance.Name
|
||||
and className == virtualInstance.ClassName
|
||||
and instanceMap.fromInstances[child] == nil
|
||||
then
|
||||
instance = child
|
||||
adoptedExisting = true
|
||||
break
|
||||
end
|
||||
end
|
||||
|
||||
if not adoptedExisting then
|
||||
-- Instance.new can fail if we're passing in something that can't be
|
||||
-- created, like a service, something enabled with a feature flag, or
|
||||
-- something that requires higher security than we have.
|
||||
local createSuccess
|
||||
createSuccess, instance = pcall(Instance.new, virtualInstance.ClassName)
|
||||
|
||||
if not createSuccess then
|
||||
addAllToPatch(unappliedPatch, virtualInstances, id)
|
||||
return
|
||||
end
|
||||
if not createSuccess then
|
||||
addAllToPatch(unappliedPatch, virtualInstances, id)
|
||||
return
|
||||
end
|
||||
|
||||
-- TODO: Can this fail? Previous versions of Rojo guarded against this, but
|
||||
@@ -123,9 +96,7 @@ function reifyInstanceInner(unappliedPatch, deferredRefs, instanceMap, virtualIn
|
||||
reifyInstanceInner(unappliedPatch, deferredRefs, instanceMap, virtualInstances, childId, instance)
|
||||
end
|
||||
|
||||
if not adoptedExisting then
|
||||
instance.Parent = parentInstance
|
||||
end
|
||||
instance.Parent = parentInstance
|
||||
instanceMap:insert(id, instance)
|
||||
end
|
||||
|
||||
|
||||
@@ -1,73 +0,0 @@
|
||||
---
|
||||
source: tests/rojo_test/syncback_util.rs
|
||||
expression: src/ChildWithDuplicates.rbxm
|
||||
---
|
||||
num_types: 1
|
||||
num_instances: 3
|
||||
chunks:
|
||||
- Inst:
|
||||
type_id: 0
|
||||
type_name: Folder
|
||||
object_format: 0
|
||||
referents:
|
||||
- 0
|
||||
- 1
|
||||
- 2
|
||||
- Prop:
|
||||
type_id: 0
|
||||
prop_name: AttributesSerialize
|
||||
prop_type: String
|
||||
values:
|
||||
- ""
|
||||
- ""
|
||||
- ""
|
||||
- Prop:
|
||||
type_id: 0
|
||||
prop_name: Capabilities
|
||||
prop_type: SecurityCapabilities
|
||||
values:
|
||||
- 0
|
||||
- 0
|
||||
- 0
|
||||
- Prop:
|
||||
type_id: 0
|
||||
prop_name: Name
|
||||
prop_type: String
|
||||
values:
|
||||
- DuplicateChild
|
||||
- DuplicateChild
|
||||
- ChildWithDuplicates
|
||||
- Prop:
|
||||
type_id: 0
|
||||
prop_name: DefinesCapabilities
|
||||
prop_type: Bool
|
||||
values:
|
||||
- false
|
||||
- false
|
||||
- false
|
||||
- Prop:
|
||||
type_id: 0
|
||||
prop_name: SourceAssetId
|
||||
prop_type: Int64
|
||||
values:
|
||||
- -1
|
||||
- -1
|
||||
- -1
|
||||
- Prop:
|
||||
type_id: 0
|
||||
prop_name: Tags
|
||||
prop_type: String
|
||||
values:
|
||||
- ""
|
||||
- ""
|
||||
- ""
|
||||
- Prnt:
|
||||
version: 0
|
||||
links:
|
||||
- - 0
|
||||
- 2
|
||||
- - 1
|
||||
- 2
|
||||
- - 2
|
||||
- -1
|
||||
- End
|
||||
@@ -1,9 +1,12 @@
|
||||
---
|
||||
source: tests/rojo_test/syncback_util.rs
|
||||
assertion_line: 101
|
||||
expression: "String::from_utf8_lossy(&output.stdout)"
|
||||
---
|
||||
Writing src/ChildWithDuplicates.rbxm
|
||||
Writing src/ChildWithDuplicates/DuplicateChild/.gitkeep
|
||||
Writing src/ChildWithDuplicates/DuplicateChild1/.gitkeep
|
||||
Writing src/ChildWithoutDuplicates/Child/.gitkeep
|
||||
Writing src/ChildWithDuplicates/DuplicateChild
|
||||
Writing src/ChildWithDuplicates/DuplicateChild1
|
||||
Writing src/ChildWithoutDuplicates
|
||||
Writing src/ChildWithoutDuplicates/Child
|
||||
Removing src/ChildWithDuplicates
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
---
|
||||
source: tests/tests/syncback.rs
|
||||
assertion_line: 31
|
||||
expression: src/ChildWithDuplicates/DuplicateChild1/.gitkeep
|
||||
---
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
---
|
||||
source: tests/tests/syncback.rs
|
||||
assertion_line: 31
|
||||
expression: src/ChildWithDuplicates/DuplicateChild/.gitkeep
|
||||
---
|
||||
|
||||
@@ -9,7 +9,6 @@ use std::{
|
||||
};
|
||||
|
||||
use crate::{
|
||||
git::SharedGitFilter,
|
||||
message_queue::MessageQueue,
|
||||
snapshot::{
|
||||
apply_patch_set, compute_patch_set, AppliedPatchSet, InstigatingSource, PatchSet, RojoTree,
|
||||
@@ -47,15 +46,11 @@ pub struct ChangeProcessor {
|
||||
impl ChangeProcessor {
|
||||
/// Spin up the ChangeProcessor, connecting it to the given tree, VFS, and
|
||||
/// outbound message queue.
|
||||
///
|
||||
/// If `git_filter` is provided, it will be refreshed on every VFS event
|
||||
/// to ensure newly changed files are acknowledged.
|
||||
pub fn start(
|
||||
tree: Arc<Mutex<RojoTree>>,
|
||||
vfs: Arc<Vfs>,
|
||||
message_queue: Arc<MessageQueue<AppliedPatchSet>>,
|
||||
tree_mutation_receiver: Receiver<PatchSet>,
|
||||
git_filter: Option<SharedGitFilter>,
|
||||
) -> Self {
|
||||
let (shutdown_sender, shutdown_receiver) = crossbeam_channel::bounded(1);
|
||||
let vfs_receiver = vfs.event_receiver();
|
||||
@@ -63,7 +58,6 @@ impl ChangeProcessor {
|
||||
tree,
|
||||
vfs,
|
||||
message_queue,
|
||||
git_filter,
|
||||
};
|
||||
|
||||
let job_thread = jod_thread::Builder::new()
|
||||
@@ -117,10 +111,6 @@ struct JobThreadContext {
|
||||
/// Whenever changes are applied to the DOM, we should push those changes
|
||||
/// into this message queue to inform any connected clients.
|
||||
message_queue: Arc<MessageQueue<AppliedPatchSet>>,
|
||||
|
||||
/// Optional Git filter for --git-since mode. When set, will be refreshed
|
||||
/// on every VFS event to ensure newly changed files are acknowledged.
|
||||
git_filter: Option<SharedGitFilter>,
|
||||
}
|
||||
|
||||
impl JobThreadContext {
|
||||
@@ -170,14 +160,6 @@ impl JobThreadContext {
|
||||
fn handle_vfs_event(&self, event: VfsEvent) {
|
||||
log::trace!("Vfs event: {:?}", event);
|
||||
|
||||
// If we have a git filter, refresh it to pick up any new changes.
|
||||
// This ensures that files modified during the session will be acknowledged.
|
||||
if let Some(ref git_filter) = self.git_filter {
|
||||
if let Err(err) = git_filter.refresh() {
|
||||
log::warn!("Failed to refresh git filter: {:?}", err);
|
||||
}
|
||||
}
|
||||
|
||||
// Update the VFS immediately with the event.
|
||||
self.vfs
|
||||
.commit_event(&event)
|
||||
|
||||
@@ -81,7 +81,7 @@ impl BuildCommand {
|
||||
let vfs = Vfs::new_default();
|
||||
vfs.set_watch_enabled(self.watch);
|
||||
|
||||
let session = ServeSession::new(vfs, project_path, None)?;
|
||||
let session = ServeSession::new(vfs, project_path)?;
|
||||
let mut cursor = session.message_queue().cursor();
|
||||
|
||||
write_model(&session, &output_path, output_kind)?;
|
||||
|
||||
@@ -54,7 +54,7 @@ fn initialize_plugin() -> anyhow::Result<ServeSession> {
|
||||
in_memory_fs.load_snapshot("/plugin", plugin_snapshot)?;
|
||||
|
||||
let vfs = Vfs::new(in_memory_fs);
|
||||
Ok(ServeSession::new(vfs, "/plugin", None)?)
|
||||
Ok(ServeSession::new(vfs, "/plugin")?)
|
||||
}
|
||||
|
||||
fn install_plugin() -> anyhow::Result<()> {
|
||||
|
||||
@@ -9,7 +9,7 @@ use clap::Parser;
|
||||
use memofs::Vfs;
|
||||
use termcolor::{BufferWriter, Color, ColorChoice, ColorSpec, WriteColor};
|
||||
|
||||
use crate::{git::GitFilter, serve_session::ServeSession, web::LiveServer};
|
||||
use crate::{serve_session::ServeSession, web::LiveServer};
|
||||
|
||||
use super::{resolve_path, GlobalOptions};
|
||||
|
||||
@@ -31,19 +31,6 @@ pub struct ServeCommand {
|
||||
/// it has none.
|
||||
#[clap(long)]
|
||||
pub port: Option<u16>,
|
||||
|
||||
/// Only sync files that have changed since the given Git reference.
|
||||
///
|
||||
/// When this option is set, Rojo will only include files that have been
|
||||
/// modified, added, or are untracked since the specified Git reference
|
||||
/// (e.g., "HEAD", "main", a commit hash). This is useful for working with
|
||||
/// large projects where you only want to sync your local changes.
|
||||
///
|
||||
/// Scripts that have not changed will still be acknowledged if modified
|
||||
/// during the session, and all synced instances will have
|
||||
/// ignoreUnknownInstances set to true to preserve descendants in Studio.
|
||||
#[clap(long, value_name = "REF")]
|
||||
pub git_since: Option<String>,
|
||||
}
|
||||
|
||||
impl ServeCommand {
|
||||
@@ -52,19 +39,7 @@ impl ServeCommand {
|
||||
|
||||
let vfs = Vfs::new_default();
|
||||
|
||||
// Set up Git filter if --git-since was specified
|
||||
let git_filter = if let Some(ref base_ref) = self.git_since {
|
||||
let repo_root = GitFilter::find_repo_root(&project_path)?;
|
||||
log::info!(
|
||||
"Git filter enabled: only syncing files changed since '{}'",
|
||||
base_ref
|
||||
);
|
||||
Some(Arc::new(GitFilter::new(repo_root, base_ref.clone(), &project_path)?))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let session = Arc::new(ServeSession::new(vfs, project_path, git_filter)?);
|
||||
let session = Arc::new(ServeSession::new(vfs, project_path)?);
|
||||
|
||||
let ip = self
|
||||
.address
|
||||
@@ -78,25 +53,17 @@ impl ServeCommand {
|
||||
|
||||
let server = LiveServer::new(session);
|
||||
|
||||
let _ = show_start_message(ip, port, self.git_since.as_deref(), global.color.into());
|
||||
let _ = show_start_message(ip, port, global.color.into());
|
||||
server.start((ip, port).into());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn show_start_message(
|
||||
bind_address: IpAddr,
|
||||
port: u16,
|
||||
git_since: Option<&str>,
|
||||
color: ColorChoice,
|
||||
) -> io::Result<()> {
|
||||
fn show_start_message(bind_address: IpAddr, port: u16, color: ColorChoice) -> io::Result<()> {
|
||||
let mut green = ColorSpec::new();
|
||||
green.set_fg(Some(Color::Green)).set_bold(true);
|
||||
|
||||
let mut yellow = ColorSpec::new();
|
||||
yellow.set_fg(Some(Color::Yellow)).set_bold(true);
|
||||
|
||||
let writer = BufferWriter::stdout(color);
|
||||
let mut buffer = writer.buffer();
|
||||
|
||||
@@ -117,13 +84,6 @@ fn show_start_message(
|
||||
buffer.set_color(&green)?;
|
||||
writeln!(&mut buffer, "{}", port)?;
|
||||
|
||||
if let Some(base_ref) = git_since {
|
||||
buffer.set_color(&ColorSpec::new())?;
|
||||
write!(&mut buffer, " Mode: ")?;
|
||||
buffer.set_color(&yellow)?;
|
||||
writeln!(&mut buffer, "git-since ({})", base_ref)?;
|
||||
}
|
||||
|
||||
writeln!(&mut buffer)?;
|
||||
|
||||
buffer.set_color(&ColorSpec::new())?;
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
---
|
||||
source: src/cli/sourcemap.rs
|
||||
expression: sourcemap_contents
|
||||
---
|
||||
{
|
||||
"name": "default",
|
||||
"className": "DataModel",
|
||||
"filePaths": "[...1 path omitted...]",
|
||||
"children": [
|
||||
{
|
||||
"name": "ReplicatedStorage",
|
||||
"className": "ReplicatedStorage",
|
||||
"children": [
|
||||
{
|
||||
"name": "Project",
|
||||
"className": "ModuleScript",
|
||||
"filePaths": "[...1 path omitted...]",
|
||||
"children": [
|
||||
{
|
||||
"name": "Module",
|
||||
"className": "Folder",
|
||||
"children": [
|
||||
{
|
||||
"name": "module",
|
||||
"className": "ModuleScript",
|
||||
"filePaths": "[...1 path omitted...]"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
---
|
||||
source: src/cli/sourcemap.rs
|
||||
expression: sourcemap_contents
|
||||
---
|
||||
{
|
||||
"name": "default",
|
||||
"className": "DataModel",
|
||||
"filePaths": [
|
||||
"default.project.json"
|
||||
],
|
||||
"children": [
|
||||
{
|
||||
"name": "ReplicatedStorage",
|
||||
"className": "ReplicatedStorage",
|
||||
"children": [
|
||||
{
|
||||
"name": "Project",
|
||||
"className": "ModuleScript",
|
||||
"filePaths": [
|
||||
"src/init.luau"
|
||||
],
|
||||
"children": [
|
||||
{
|
||||
"name": "Module",
|
||||
"className": "Folder",
|
||||
"children": [
|
||||
{
|
||||
"name": "module",
|
||||
"className": "ModuleScript",
|
||||
"filePaths": [
|
||||
"../module/module.luau"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -10,7 +10,7 @@ use fs_err::File;
|
||||
use memofs::Vfs;
|
||||
use rayon::prelude::*;
|
||||
use rbx_dom_weak::{types::Ref, Ustr};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde::Serialize;
|
||||
use tokio::runtime::Runtime;
|
||||
|
||||
use crate::{
|
||||
@@ -24,20 +24,19 @@ const PATH_STRIP_FAILED_ERR: &str = "Failed to create relative paths for project
|
||||
const ABSOLUTE_PATH_FAILED_ERR: &str = "Failed to turn relative path into absolute path!";
|
||||
|
||||
/// Representation of a node in the generated sourcemap tree.
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct SourcemapNode<'a> {
|
||||
name: &'a str,
|
||||
class_name: Ustr,
|
||||
|
||||
#[serde(
|
||||
default,
|
||||
skip_serializing_if = "Vec::is_empty",
|
||||
serialize_with = "crate::path_serializer::serialize_vec_absolute"
|
||||
)]
|
||||
file_paths: Vec<Cow<'a, Path>>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
#[serde(skip_serializing_if = "Vec::is_empty")]
|
||||
children: Vec<SourcemapNode<'a>>,
|
||||
}
|
||||
|
||||
@@ -71,14 +70,13 @@ pub struct SourcemapCommand {
|
||||
|
||||
impl SourcemapCommand {
|
||||
pub fn run(self) -> anyhow::Result<()> {
|
||||
let project_path = fs_err::canonicalize(resolve_path(&self.project))?;
|
||||
let project_path = resolve_path(&self.project);
|
||||
|
||||
log::trace!("Constructing filesystem with StdBackend");
|
||||
log::trace!("Constructing in-memory filesystem");
|
||||
let vfs = Vfs::new_default();
|
||||
vfs.set_watch_enabled(self.watch);
|
||||
|
||||
log::trace!("Setting up session for sourcemap generation");
|
||||
let session = ServeSession::new(vfs, project_path, None)?;
|
||||
let session = ServeSession::new(vfs, project_path)?;
|
||||
let mut cursor = session.message_queue().cursor();
|
||||
|
||||
let filter = if self.include_non_scripts {
|
||||
@@ -89,17 +87,14 @@ impl SourcemapCommand {
|
||||
|
||||
// Pre-build a rayon threadpool with a low number of threads to avoid
|
||||
// dynamic creation overhead on systems with a high number of cpus.
|
||||
log::trace!("Setting rayon global threadpool");
|
||||
rayon::ThreadPoolBuilder::new()
|
||||
.num_threads(num_cpus::get().min(6))
|
||||
.build_global()
|
||||
.ok();
|
||||
.unwrap();
|
||||
|
||||
log::trace!("Writing initial sourcemap");
|
||||
write_sourcemap(&session, self.output.as_deref(), filter, self.absolute)?;
|
||||
|
||||
if self.watch {
|
||||
log::trace!("Setting up runtime for watch mode");
|
||||
let rt = Runtime::new().unwrap();
|
||||
|
||||
loop {
|
||||
@@ -213,7 +208,7 @@ fn recurse_create_node<'a>(
|
||||
} else {
|
||||
for val in file_paths {
|
||||
output_file_paths.push(Cow::from(
|
||||
pathdiff::diff_paths(val, project_dir).expect(PATH_STRIP_FAILED_ERR),
|
||||
val.strip_prefix(project_dir).expect(PATH_STRIP_FAILED_ERR),
|
||||
));
|
||||
}
|
||||
};
|
||||
@@ -255,80 +250,3 @@ fn write_sourcemap(
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::cli::sourcemap::SourcemapNode;
|
||||
use crate::cli::SourcemapCommand;
|
||||
use insta::internals::Content;
|
||||
use std::path::Path;
|
||||
|
||||
#[test]
|
||||
fn maps_relative_paths() {
|
||||
let sourcemap_dir = tempfile::tempdir().unwrap();
|
||||
let sourcemap_output = sourcemap_dir.path().join("sourcemap.json");
|
||||
let project_path = fs_err::canonicalize(
|
||||
Path::new(env!("CARGO_MANIFEST_DIR"))
|
||||
.join("test-projects")
|
||||
.join("relative_paths")
|
||||
.join("project"),
|
||||
)
|
||||
.unwrap();
|
||||
let sourcemap_command = SourcemapCommand {
|
||||
project: project_path,
|
||||
output: Some(sourcemap_output.clone()),
|
||||
include_non_scripts: false,
|
||||
watch: false,
|
||||
absolute: false,
|
||||
};
|
||||
assert!(sourcemap_command.run().is_ok());
|
||||
|
||||
let raw_sourcemap_contents = fs_err::read_to_string(sourcemap_output.as_path()).unwrap();
|
||||
let sourcemap_contents =
|
||||
serde_json::from_str::<SourcemapNode>(&raw_sourcemap_contents).unwrap();
|
||||
insta::assert_json_snapshot!(sourcemap_contents);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn maps_absolute_paths() {
|
||||
let sourcemap_dir = tempfile::tempdir().unwrap();
|
||||
let sourcemap_output = sourcemap_dir.path().join("sourcemap.json");
|
||||
let project_path = fs_err::canonicalize(
|
||||
Path::new(env!("CARGO_MANIFEST_DIR"))
|
||||
.join("test-projects")
|
||||
.join("relative_paths")
|
||||
.join("project"),
|
||||
)
|
||||
.unwrap();
|
||||
let sourcemap_command = SourcemapCommand {
|
||||
project: project_path,
|
||||
output: Some(sourcemap_output.clone()),
|
||||
include_non_scripts: false,
|
||||
watch: false,
|
||||
absolute: true,
|
||||
};
|
||||
assert!(sourcemap_command.run().is_ok());
|
||||
|
||||
let raw_sourcemap_contents = fs_err::read_to_string(sourcemap_output.as_path()).unwrap();
|
||||
let sourcemap_contents =
|
||||
serde_json::from_str::<SourcemapNode>(&raw_sourcemap_contents).unwrap();
|
||||
insta::assert_json_snapshot!(sourcemap_contents, {
|
||||
".**.filePaths" => insta::dynamic_redaction(|mut value, _path| {
|
||||
let mut paths_count = 0;
|
||||
|
||||
match value {
|
||||
Content::Seq(ref mut vec) => {
|
||||
for path in vec.iter().map(|i| i.as_str().unwrap()) {
|
||||
assert_eq!(fs_err::canonicalize(path).is_ok(), true, "path was not valid");
|
||||
assert_eq!(Path::new(path).is_absolute(), true, "path was not absolute");
|
||||
|
||||
paths_count += 1;
|
||||
}
|
||||
}
|
||||
_ => panic!("Expected filePaths to be a sequence"),
|
||||
}
|
||||
format!("[...{} path{} omitted...]", paths_count, if paths_count != 1 { "s" } else { "" } )
|
||||
})
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,11 +54,6 @@ pub struct SyncbackCommand {
|
||||
/// If provided, the prompt for writing to the file system is skipped.
|
||||
#[clap(long, short = 'y')]
|
||||
pub non_interactive: bool,
|
||||
|
||||
/// If provided, forces syncback to use JSON model files instead of binary
|
||||
/// .rbxm files for instances that would otherwise serialize as binary.
|
||||
#[clap(long)]
|
||||
pub dangerously_force_json: bool,
|
||||
}
|
||||
|
||||
impl SyncbackCommand {
|
||||
@@ -78,7 +73,7 @@ impl SyncbackCommand {
|
||||
vfs.set_watch_enabled(false);
|
||||
|
||||
let project_start_timer = Instant::now();
|
||||
let session_old = ServeSession::new(vfs, path_old.clone(), None)?;
|
||||
let session_old = ServeSession::new(vfs, path_old.clone())?;
|
||||
log::debug!(
|
||||
"Finished opening project in {:0.02}s",
|
||||
project_start_timer.elapsed().as_secs_f32()
|
||||
@@ -109,7 +104,6 @@ impl SyncbackCommand {
|
||||
&mut dom_old,
|
||||
dom_new,
|
||||
session_old.root_project(),
|
||||
self.dangerously_force_json,
|
||||
)?;
|
||||
log::debug!(
|
||||
"Syncback finished in {:.02}s!",
|
||||
|
||||
@@ -42,7 +42,7 @@ impl UploadCommand {
|
||||
|
||||
let vfs = Vfs::new_default();
|
||||
|
||||
let session = ServeSession::new(vfs, project_path, None)?;
|
||||
let session = ServeSession::new(vfs, project_path)?;
|
||||
|
||||
let tree = session.tree();
|
||||
let inner_tree = tree.inner();
|
||||
|
||||
380
src/git.rs
380
src/git.rs
@@ -1,380 +0,0 @@
|
||||
//! Git integration for filtering files based on changes since a reference.
|
||||
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
path::{Path, PathBuf},
|
||||
process::Command,
|
||||
sync::{Arc, RwLock},
|
||||
};
|
||||
|
||||
use anyhow::{bail, Context};
|
||||
|
||||
/// A filter that tracks which files have been changed since a Git reference.
|
||||
///
|
||||
/// When active, only files that have been modified, added, or deleted according
|
||||
/// to Git will be "acknowledged" and synced to Studio. This allows users to
|
||||
/// work with large projects where they only want to sync their local changes.
|
||||
///
|
||||
/// Once a file is acknowledged (either initially or during the session), it
|
||||
/// stays acknowledged for the entire session. This prevents files from being
|
||||
/// deleted in Studio if their content is reverted to match the git reference.
|
||||
#[derive(Debug)]
|
||||
pub struct GitFilter {
|
||||
/// The Git repository root directory.
|
||||
repo_root: PathBuf,
|
||||
|
||||
/// The Git reference to compare against (e.g., "HEAD", "main", a commit hash).
|
||||
base_ref: String,
|
||||
|
||||
/// Cache of paths that are currently different from the base ref according to git.
|
||||
/// This is refreshed on every VFS event.
|
||||
git_changed_paths: RwLock<HashSet<PathBuf>>,
|
||||
|
||||
/// Paths that have been acknowledged at any point during this session.
|
||||
/// Once a path is added here, it stays acknowledged forever (for this session).
|
||||
/// This prevents files from being deleted if their content is reverted.
|
||||
session_acknowledged_paths: RwLock<HashSet<PathBuf>>,
|
||||
}
|
||||
|
||||
impl GitFilter {
|
||||
/// Creates a new GitFilter for the given repository root and base reference.
|
||||
///
|
||||
/// The `repo_root` should be the root of the Git repository (where .git is located).
|
||||
/// The `base_ref` is the Git reference to compare against (e.g., "HEAD", "main").
|
||||
/// The `project_path` is the path to the project being served - it will always be
|
||||
/// acknowledged regardless of git status to ensure the project structure exists.
|
||||
pub fn new(repo_root: PathBuf, base_ref: String, project_path: &Path) -> anyhow::Result<Self> {
|
||||
let filter = Self {
|
||||
repo_root,
|
||||
base_ref,
|
||||
git_changed_paths: RwLock::new(HashSet::new()),
|
||||
session_acknowledged_paths: RwLock::new(HashSet::new()),
|
||||
};
|
||||
|
||||
// Always acknowledge the project path and its directory so the project
|
||||
// structure exists even when there are no git changes
|
||||
filter.acknowledge_project_path(project_path);
|
||||
|
||||
// Initial refresh to populate the cache with git changes
|
||||
filter.refresh()?;
|
||||
|
||||
Ok(filter)
|
||||
}
|
||||
|
||||
/// Acknowledges the project path and its containing directory.
|
||||
/// This ensures the project structure always exists regardless of git status.
|
||||
fn acknowledge_project_path(&self, project_path: &Path) {
|
||||
let mut session = self.session_acknowledged_paths.write().unwrap();
|
||||
|
||||
// Acknowledge the project path itself (might be a directory or .project.json file)
|
||||
let canonical = project_path.canonicalize().unwrap_or_else(|_| project_path.to_path_buf());
|
||||
session.insert(canonical.clone());
|
||||
|
||||
// Acknowledge all ancestor directories
|
||||
let mut current = canonical.parent();
|
||||
while let Some(parent) = current {
|
||||
session.insert(parent.to_path_buf());
|
||||
current = parent.parent();
|
||||
}
|
||||
|
||||
// If it's a directory, also acknowledge default.project.json inside it
|
||||
if project_path.is_dir() {
|
||||
for name in &["default.project.json", "default.project.jsonc"] {
|
||||
let project_file = project_path.join(name);
|
||||
if let Ok(canonical_file) = project_file.canonicalize() {
|
||||
session.insert(canonical_file);
|
||||
} else {
|
||||
session.insert(project_file);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If it's a .project.json file, also acknowledge its parent directory
|
||||
if let Some(parent) = project_path.parent() {
|
||||
let parent_canonical = parent.canonicalize().unwrap_or_else(|_| parent.to_path_buf());
|
||||
session.insert(parent_canonical);
|
||||
}
|
||||
|
||||
log::debug!(
|
||||
"GitFilter: acknowledged project path {} ({} paths total)",
|
||||
project_path.display(),
|
||||
session.len()
|
||||
);
|
||||
}
|
||||
|
||||
/// Finds the Git repository root for the given path.
|
||||
pub fn find_repo_root(path: &Path) -> anyhow::Result<PathBuf> {
|
||||
let output = Command::new("git")
|
||||
.args(["rev-parse", "--show-toplevel"])
|
||||
.current_dir(path)
|
||||
.output()
|
||||
.context("Failed to execute git rev-parse")?;
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
bail!("Failed to find Git repository root: {}", stderr.trim());
|
||||
}
|
||||
|
||||
let root = String::from_utf8_lossy(&output.stdout)
|
||||
.trim()
|
||||
.to_string();
|
||||
|
||||
Ok(PathBuf::from(root))
|
||||
}
|
||||
|
||||
/// Refreshes the cache of acknowledged paths by querying Git.
|
||||
///
|
||||
/// This should be called when files change to ensure newly modified files
|
||||
/// are properly acknowledged. Once a path is acknowledged, it stays
|
||||
/// acknowledged for the entire session (even if the file is reverted).
|
||||
pub fn refresh(&self) -> anyhow::Result<()> {
|
||||
let mut git_changed = HashSet::new();
|
||||
|
||||
// Get files changed since the base ref (modified, added, deleted)
|
||||
let diff_output = Command::new("git")
|
||||
.args(["diff", "--name-only", &self.base_ref])
|
||||
.current_dir(&self.repo_root)
|
||||
.output()
|
||||
.context("Failed to execute git diff")?;
|
||||
|
||||
if !diff_output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&diff_output.stderr);
|
||||
bail!("git diff failed: {}", stderr.trim());
|
||||
}
|
||||
|
||||
let diff_files = String::from_utf8_lossy(&diff_output.stdout);
|
||||
let diff_count = diff_files.lines().filter(|l| !l.is_empty()).count();
|
||||
if diff_count > 0 {
|
||||
log::debug!("git diff found {} changed files", diff_count);
|
||||
}
|
||||
for line in diff_files.lines() {
|
||||
if !line.is_empty() {
|
||||
let path = self.repo_root.join(line);
|
||||
log::trace!("git diff: acknowledging {}", path.display());
|
||||
self.acknowledge_path(&path, &mut git_changed);
|
||||
}
|
||||
}
|
||||
|
||||
// Get untracked files (new files not yet committed)
|
||||
let untracked_output = Command::new("git")
|
||||
.args(["ls-files", "--others", "--exclude-standard"])
|
||||
.current_dir(&self.repo_root)
|
||||
.output()
|
||||
.context("Failed to execute git ls-files")?;
|
||||
|
||||
if !untracked_output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&untracked_output.stderr);
|
||||
bail!("git ls-files failed: {}", stderr.trim());
|
||||
}
|
||||
|
||||
let untracked_files = String::from_utf8_lossy(&untracked_output.stdout);
|
||||
for line in untracked_files.lines() {
|
||||
if !line.is_empty() {
|
||||
let path = self.repo_root.join(line);
|
||||
self.acknowledge_path(&path, &mut git_changed);
|
||||
}
|
||||
}
|
||||
|
||||
// Get staged files (files added to index but not yet committed)
|
||||
let staged_output = Command::new("git")
|
||||
.args(["diff", "--name-only", "--cached", &self.base_ref])
|
||||
.current_dir(&self.repo_root)
|
||||
.output()
|
||||
.context("Failed to execute git diff --cached")?;
|
||||
|
||||
if staged_output.status.success() {
|
||||
let staged_files = String::from_utf8_lossy(&staged_output.stdout);
|
||||
for line in staged_files.lines() {
|
||||
if !line.is_empty() {
|
||||
let path = self.repo_root.join(line);
|
||||
self.acknowledge_path(&path, &mut git_changed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update the git changed paths cache
|
||||
{
|
||||
let mut cache = self.git_changed_paths.write().unwrap();
|
||||
*cache = git_changed.clone();
|
||||
}
|
||||
|
||||
// Merge newly changed paths into session acknowledged paths
|
||||
// Once acknowledged, a path stays acknowledged for the entire session
|
||||
{
|
||||
let mut session = self.session_acknowledged_paths.write().unwrap();
|
||||
for path in git_changed {
|
||||
session.insert(path);
|
||||
}
|
||||
log::debug!(
|
||||
"GitFilter refreshed: {} paths acknowledged in session",
|
||||
session.len()
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Acknowledges a path and all its ancestors, plus associated meta files.
|
||||
fn acknowledge_path(&self, path: &Path, acknowledged: &mut HashSet<PathBuf>) {
|
||||
// Canonicalize the path if possible, otherwise use as-is
|
||||
let path = path.canonicalize().unwrap_or_else(|_| path.to_path_buf());
|
||||
|
||||
// Add the path itself
|
||||
acknowledged.insert(path.clone());
|
||||
|
||||
// Add all ancestor directories
|
||||
let mut current = path.parent();
|
||||
while let Some(parent) = current {
|
||||
acknowledged.insert(parent.to_path_buf());
|
||||
current = parent.parent();
|
||||
}
|
||||
|
||||
// Add associated meta files
|
||||
self.acknowledge_meta_files(&path, acknowledged);
|
||||
}
|
||||
|
||||
/// Acknowledges associated meta files for a given path.
|
||||
fn acknowledge_meta_files(&self, path: &Path, acknowledged: &mut HashSet<PathBuf>) {
|
||||
if let Some(file_name) = path.file_name().and_then(|n| n.to_str()) {
|
||||
if let Some(parent) = path.parent() {
|
||||
// For a file like "foo.lua", also acknowledge "foo.meta.json"
|
||||
// Strip known extensions to get the base name
|
||||
let base_name = strip_lua_extension(file_name);
|
||||
|
||||
let meta_path = parent.join(format!("{}.meta.json", base_name));
|
||||
if let Ok(canonical) = meta_path.canonicalize() {
|
||||
acknowledged.insert(canonical);
|
||||
} else {
|
||||
acknowledged.insert(meta_path);
|
||||
}
|
||||
|
||||
// For init files, also acknowledge "init.meta.json" in the same directory
|
||||
if file_name.starts_with("init.") {
|
||||
let init_meta = parent.join("init.meta.json");
|
||||
if let Ok(canonical) = init_meta.canonicalize() {
|
||||
acknowledged.insert(canonical);
|
||||
} else {
|
||||
acknowledged.insert(init_meta);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if a path is acknowledged (should be synced).
|
||||
///
|
||||
/// Returns `true` if the path or any of its descendants have been changed
|
||||
/// at any point during this session. Once a file is acknowledged, it stays
|
||||
/// acknowledged even if its content is reverted to match the git reference.
|
||||
pub fn is_acknowledged(&self, path: &Path) -> bool {
|
||||
let session = self.session_acknowledged_paths.read().unwrap();
|
||||
|
||||
// Try to canonicalize the path
|
||||
let canonical = path.canonicalize().unwrap_or_else(|_| path.to_path_buf());
|
||||
|
||||
// Check if this exact path is acknowledged
|
||||
if session.contains(&canonical) {
|
||||
log::trace!("Path {} is directly acknowledged", path.display());
|
||||
return true;
|
||||
}
|
||||
|
||||
// Also check without canonicalization in case of path differences
|
||||
if session.contains(path) {
|
||||
log::trace!("Path {} is acknowledged (non-canonical)", path.display());
|
||||
return true;
|
||||
}
|
||||
|
||||
// For directories, check if any descendant is acknowledged
|
||||
// This is done by checking if any acknowledged path starts with this path
|
||||
for acknowledged in session.iter() {
|
||||
if acknowledged.starts_with(&canonical) {
|
||||
log::trace!(
|
||||
"Path {} has acknowledged descendant {}",
|
||||
path.display(),
|
||||
acknowledged.display()
|
||||
);
|
||||
return true;
|
||||
}
|
||||
// Also check non-canonical
|
||||
if acknowledged.starts_with(path) {
|
||||
log::trace!(
|
||||
"Path {} has acknowledged descendant {} (non-canonical)",
|
||||
path.display(),
|
||||
acknowledged.display()
|
||||
);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
log::trace!(
|
||||
"Path {} is NOT acknowledged (canonical: {})",
|
||||
path.display(),
|
||||
canonical.display()
|
||||
);
|
||||
false
|
||||
}
|
||||
|
||||
/// Returns the base reference being compared against.
|
||||
pub fn base_ref(&self) -> &str {
|
||||
&self.base_ref
|
||||
}
|
||||
|
||||
/// Returns the repository root path.
|
||||
pub fn repo_root(&self) -> &Path {
|
||||
&self.repo_root
|
||||
}
|
||||
|
||||
/// Explicitly acknowledges a path and all its ancestors.
|
||||
/// This is useful for ensuring certain paths are always synced regardless of git status.
|
||||
pub fn force_acknowledge(&self, path: &Path) {
|
||||
let mut acknowledged = HashSet::new();
|
||||
self.acknowledge_path(path, &mut acknowledged);
|
||||
|
||||
let mut session = self.session_acknowledged_paths.write().unwrap();
|
||||
for p in acknowledged {
|
||||
session.insert(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Strips Lua-related extensions from a file name to get the base name.
|
||||
fn strip_lua_extension(file_name: &str) -> &str {
|
||||
const EXTENSIONS: &[&str] = &[
|
||||
".server.luau",
|
||||
".server.lua",
|
||||
".client.luau",
|
||||
".client.lua",
|
||||
".luau",
|
||||
".lua",
|
||||
];
|
||||
|
||||
for ext in EXTENSIONS {
|
||||
if let Some(base) = file_name.strip_suffix(ext) {
|
||||
return base;
|
||||
}
|
||||
}
|
||||
|
||||
// If no Lua extension, try to strip the regular extension
|
||||
file_name
|
||||
.rsplit_once('.')
|
||||
.map(|(base, _)| base)
|
||||
.unwrap_or(file_name)
|
||||
}
|
||||
|
||||
/// A wrapper around GitFilter that can be shared across threads.
|
||||
pub type SharedGitFilter = Arc<GitFilter>;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_strip_lua_extension() {
|
||||
assert_eq!(strip_lua_extension("foo.server.lua"), "foo");
|
||||
assert_eq!(strip_lua_extension("foo.client.luau"), "foo");
|
||||
assert_eq!(strip_lua_extension("foo.lua"), "foo");
|
||||
assert_eq!(strip_lua_extension("init.server.lua"), "init");
|
||||
assert_eq!(strip_lua_extension("bar.txt"), "bar");
|
||||
assert_eq!(strip_lua_extension("noextension"), "noextension");
|
||||
}
|
||||
}
|
||||
@@ -9,7 +9,6 @@ mod tree_view;
|
||||
|
||||
mod auth_cookie;
|
||||
mod change_processor;
|
||||
mod git;
|
||||
mod glob;
|
||||
mod json;
|
||||
mod lua_ast;
|
||||
@@ -29,7 +28,6 @@ mod web;
|
||||
|
||||
// TODO: Work out what we should expose publicly
|
||||
|
||||
pub use git::{GitFilter, SharedGitFilter};
|
||||
pub use project::*;
|
||||
pub use rojo_ref::*;
|
||||
pub use session_id::SessionId;
|
||||
|
||||
@@ -13,7 +13,6 @@ use thiserror::Error;
|
||||
|
||||
use crate::{
|
||||
change_processor::ChangeProcessor,
|
||||
git::SharedGitFilter,
|
||||
message_queue::MessageQueue,
|
||||
project::{Project, ProjectError},
|
||||
session_id::SessionId,
|
||||
@@ -95,14 +94,7 @@ impl ServeSession {
|
||||
/// The project file is expected to be loaded out-of-band since it's
|
||||
/// currently loaded from the filesystem directly instead of through the
|
||||
/// in-memory filesystem layer.
|
||||
///
|
||||
/// If `git_filter` is provided, only files that have changed since the
|
||||
/// specified Git reference will be synced.
|
||||
pub fn new<P: AsRef<Path>>(
|
||||
vfs: Vfs,
|
||||
start_path: P,
|
||||
git_filter: Option<SharedGitFilter>,
|
||||
) -> Result<Self, ServeSessionError> {
|
||||
pub fn new<P: AsRef<Path>>(vfs: Vfs, start_path: P) -> Result<Self, ServeSessionError> {
|
||||
let start_path = start_path.as_ref();
|
||||
let start_time = Instant::now();
|
||||
|
||||
@@ -110,28 +102,12 @@ impl ServeSession {
|
||||
|
||||
let root_project = Project::load_initial_project(&vfs, start_path)?;
|
||||
|
||||
// If git filter is active, ensure the project file location is acknowledged
|
||||
// This is necessary so the project structure exists even with no git changes
|
||||
if let Some(ref filter) = git_filter {
|
||||
filter.force_acknowledge(start_path);
|
||||
filter.force_acknowledge(&root_project.file_location);
|
||||
filter.force_acknowledge(root_project.folder_location());
|
||||
log::debug!(
|
||||
"Force acknowledged project at {}",
|
||||
root_project.file_location.display()
|
||||
);
|
||||
}
|
||||
|
||||
let mut tree = RojoTree::new(InstanceSnapshot::new());
|
||||
|
||||
let root_id = tree.get_root_id();
|
||||
|
||||
let instance_context = match &git_filter {
|
||||
Some(filter) => {
|
||||
InstanceContext::with_git_filter(root_project.emit_legacy_scripts, Arc::clone(filter))
|
||||
}
|
||||
None => InstanceContext::with_emit_legacy_scripts(root_project.emit_legacy_scripts),
|
||||
};
|
||||
let instance_context =
|
||||
InstanceContext::with_emit_legacy_scripts(root_project.emit_legacy_scripts);
|
||||
|
||||
log::trace!("Generating snapshot of instances from VFS");
|
||||
let snapshot = snapshot_from_vfs(&instance_context, &vfs, start_path)?;
|
||||
@@ -157,7 +133,6 @@ impl ServeSession {
|
||||
Arc::clone(&vfs),
|
||||
Arc::clone(&message_queue),
|
||||
tree_mutation_receiver,
|
||||
git_filter,
|
||||
);
|
||||
|
||||
Ok(Self {
|
||||
|
||||
@@ -8,7 +8,6 @@ use anyhow::Context;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
git::SharedGitFilter,
|
||||
glob::Glob,
|
||||
path_serializer,
|
||||
project::ProjectNode,
|
||||
@@ -153,27 +152,13 @@ impl Default for InstanceMetadata {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct InstanceContext {
|
||||
#[serde(skip_serializing_if = "Vec::is_empty")]
|
||||
pub path_ignore_rules: Arc<Vec<PathIgnoreRule>>,
|
||||
pub emit_legacy_scripts: bool,
|
||||
#[serde(skip_serializing_if = "Vec::is_empty")]
|
||||
pub sync_rules: Vec<SyncRule>,
|
||||
/// Optional Git filter for --git-since mode. When set, only files that have
|
||||
/// changed since the specified Git reference will be synced.
|
||||
#[serde(skip)]
|
||||
pub git_filter: Option<SharedGitFilter>,
|
||||
}
|
||||
|
||||
impl PartialEq for InstanceContext {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
// Note: git_filter is intentionally excluded from comparison
|
||||
// since it's runtime state, not configuration
|
||||
self.path_ignore_rules == other.path_ignore_rules
|
||||
&& self.emit_legacy_scripts == other.emit_legacy_scripts
|
||||
&& self.sync_rules == other.sync_rules
|
||||
}
|
||||
}
|
||||
|
||||
impl InstanceContext {
|
||||
@@ -182,7 +167,6 @@ impl InstanceContext {
|
||||
path_ignore_rules: Arc::new(Vec::new()),
|
||||
emit_legacy_scripts: emit_legacy_scripts_default().unwrap(),
|
||||
sync_rules: Vec::new(),
|
||||
git_filter: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -195,36 +179,6 @@ impl InstanceContext {
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new InstanceContext with a Git filter for --git-since mode.
|
||||
pub fn with_git_filter(
|
||||
emit_legacy_scripts: Option<bool>,
|
||||
git_filter: SharedGitFilter,
|
||||
) -> Self {
|
||||
Self {
|
||||
git_filter: Some(git_filter),
|
||||
..Self::with_emit_legacy_scripts(emit_legacy_scripts)
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the Git filter for this context.
|
||||
pub fn set_git_filter(&mut self, git_filter: Option<SharedGitFilter>) {
|
||||
self.git_filter = git_filter;
|
||||
}
|
||||
|
||||
/// Returns true if the given path should be acknowledged (synced).
|
||||
/// If no git filter is set, all paths are acknowledged.
|
||||
pub fn is_path_acknowledged(&self, path: &Path) -> bool {
|
||||
match &self.git_filter {
|
||||
Some(filter) => filter.is_acknowledged(path),
|
||||
None => true,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if a git filter is active.
|
||||
pub fn has_git_filter(&self) -> bool {
|
||||
self.git_filter.is_some()
|
||||
}
|
||||
|
||||
/// Extend the list of ignore rules in the context with the given new rules.
|
||||
pub fn add_path_ignore_rules<I>(&mut self, new_rules: I)
|
||||
where
|
||||
|
||||
@@ -8,7 +8,7 @@ use rbx_dom_weak::{
|
||||
ustr, HashMapExt as _, UstrMap, UstrSet,
|
||||
};
|
||||
|
||||
use crate::{variant_eq::variant_eq, RojoRef, REF_POINTER_ATTRIBUTE_PREFIX};
|
||||
use crate::{RojoRef, REF_POINTER_ATTRIBUTE_PREFIX};
|
||||
|
||||
use super::{
|
||||
patch::{PatchAdd, PatchSet, PatchUpdate},
|
||||
@@ -127,7 +127,7 @@ fn compute_property_patches(
|
||||
|
||||
match instance.properties().get(&name) {
|
||||
Some(instance_value) => {
|
||||
if !variant_eq(&snapshot_value, instance_value) {
|
||||
if &snapshot_value != instance_value {
|
||||
changed_properties.insert(name, Some(snapshot_value));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -109,15 +109,12 @@ pub fn syncback_csv<'sync>(
|
||||
|
||||
if !meta.is_empty() {
|
||||
let parent = snapshot.path.parent_err()?;
|
||||
let meta_stem = snapshot.middleware
|
||||
.and_then(|mw| {
|
||||
let ext = format!(".{}", crate::syncback::extension_for_middleware(mw));
|
||||
snapshot.path.file_name()
|
||||
.and_then(|n| n.to_str())
|
||||
.and_then(|s| s.strip_suffix(ext.as_str()))
|
||||
.map(str::to_owned)
|
||||
})
|
||||
.unwrap_or_else(|| new_inst.name.clone());
|
||||
let file_name = snapshot
|
||||
.path
|
||||
.file_name()
|
||||
.and_then(|n| n.to_str())
|
||||
.unwrap_or("");
|
||||
let meta_stem = file_name.strip_suffix(".csv").unwrap_or(file_name);
|
||||
fs_snapshot.add_file(
|
||||
parent.join(format!("{meta_stem}.meta.json")),
|
||||
serde_json::to_vec_pretty(&meta).context("cannot serialize metadata")?,
|
||||
|
||||
@@ -8,10 +8,13 @@ use memofs::{DirEntry, Vfs};
|
||||
|
||||
use crate::{
|
||||
snapshot::{InstanceContext, InstanceMetadata, InstanceSnapshot, InstigatingSource},
|
||||
syncback::{hash_instance, slugify_name, FsSnapshot, SyncbackReturn, SyncbackSnapshot},
|
||||
syncback::{
|
||||
extension_for_middleware, hash_instance, FsSnapshot, SyncbackReturn,
|
||||
SyncbackSnapshot,
|
||||
},
|
||||
};
|
||||
|
||||
use super::{meta_file::DirectoryMetadata, snapshot_from_vfs};
|
||||
use super::{meta_file::DirectoryMetadata, snapshot_from_vfs, Middleware};
|
||||
|
||||
const EMPTY_DIR_KEEP_NAME: &str = ".gitkeep";
|
||||
|
||||
@@ -91,6 +94,22 @@ pub fn snapshot_dir_no_meta(
|
||||
Ok(Some(snapshot))
|
||||
}
|
||||
|
||||
/// Splits a filesystem name into (stem, extension) based on middleware type.
|
||||
/// For directory middleware, the extension is empty. For file middleware,
|
||||
/// the extension comes from `extension_for_middleware`.
|
||||
fn split_name_and_ext(name: &str, middleware: Middleware) -> (&str, &str) {
|
||||
if middleware.is_dir() {
|
||||
(name, "")
|
||||
} else {
|
||||
let ext = extension_for_middleware(middleware);
|
||||
if let Some(stem) = name.strip_suffix(&format!(".{ext}")) {
|
||||
(stem, ext)
|
||||
} else {
|
||||
(name, "")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn syncback_dir<'sync>(
|
||||
snapshot: &SyncbackSnapshot<'sync>,
|
||||
) -> anyhow::Result<SyncbackReturn<'sync>> {
|
||||
@@ -143,77 +162,119 @@ pub fn syncback_dir_no_meta<'sync>(
|
||||
}
|
||||
}
|
||||
|
||||
// Enforce unique filesystem names. Uses actual on-disk names for existing
|
||||
// children and resolved names (with init-prefix) for new ones.
|
||||
let mut fs_child_names = HashSet::with_capacity(new_inst.children().len());
|
||||
let mut duplicate_set = HashSet::new();
|
||||
for child_ref in new_inst.children() {
|
||||
let child = snapshot.get_new_instance(*child_ref).unwrap();
|
||||
let fs_name = old_child_map
|
||||
.get(child.name.as_str())
|
||||
.and_then(|old| old.metadata().relevant_paths.first())
|
||||
.and_then(|p| p.file_name())
|
||||
.and_then(|n| n.to_str())
|
||||
.map(|s| s.to_lowercase())
|
||||
.unwrap_or_else(|| {
|
||||
let slug = slugify_name(&child.name);
|
||||
let slug_lower = slug.to_lowercase();
|
||||
// Mirror name_for_inst's init-prefix.
|
||||
if slug_lower == "init" {
|
||||
format!("_{slug_lower}")
|
||||
} else {
|
||||
slug_lower
|
||||
}
|
||||
});
|
||||
|
||||
if !fs_child_names.insert(fs_name) {
|
||||
duplicate_set.insert(child.name.as_str());
|
||||
}
|
||||
}
|
||||
if !duplicate_set.is_empty() {
|
||||
if duplicate_set.len() <= 25 {
|
||||
anyhow::bail!(
|
||||
"Instance has children with duplicate name (case may not exactly match):\n {}",
|
||||
duplicate_set.into_iter().collect::<Vec<&str>>().join(", ")
|
||||
);
|
||||
}
|
||||
anyhow::bail!("Instance has more than 25 children with duplicate names");
|
||||
// --- Two-pass collision resolution ---
|
||||
//
|
||||
// Pass 1: Collect each child's base filesystem name and old ref, applying
|
||||
// skip conditions. Track which names are used (lowercased) so we can
|
||||
// detect collisions.
|
||||
struct ChildEntry {
|
||||
new_ref: rbx_dom_weak::types::Ref,
|
||||
old_ref: Option<rbx_dom_weak::types::Ref>,
|
||||
base_name: String,
|
||||
middleware: Middleware,
|
||||
skip: bool,
|
||||
}
|
||||
|
||||
if snapshot.old_inst().is_some() {
|
||||
for new_child_ref in new_inst.children() {
|
||||
let new_child = snapshot.get_new_instance(*new_child_ref).unwrap();
|
||||
if let Some(old_child) = old_child_map.remove(new_child.name.as_str()) {
|
||||
if old_child.metadata().relevant_paths.is_empty() {
|
||||
log::debug!(
|
||||
"Skipping instance {} because it doesn't exist on the disk",
|
||||
old_child.name()
|
||||
);
|
||||
continue;
|
||||
} else if matches!(
|
||||
old_child.metadata().instigating_source,
|
||||
Some(InstigatingSource::ProjectNode { .. })
|
||||
) {
|
||||
log::debug!(
|
||||
"Skipping instance {} because it originates in a project file",
|
||||
old_child.name()
|
||||
);
|
||||
continue;
|
||||
}
|
||||
// This child exists in both doms. Pass it on.
|
||||
children.push(snapshot.with_joined_path(*new_child_ref, Some(old_child.id()))?);
|
||||
} else {
|
||||
// The child only exists in the the new dom
|
||||
children.push(snapshot.with_joined_path(*new_child_ref, None)?);
|
||||
let mut entries = Vec::with_capacity(new_inst.children().len());
|
||||
let mut used_names: HashSet<String> = HashSet::with_capacity(new_inst.children().len());
|
||||
let mut collision_indices: Vec<usize> = Vec::new();
|
||||
|
||||
for new_child_ref in new_inst.children() {
|
||||
let new_child = snapshot.get_new_instance(*new_child_ref).unwrap();
|
||||
|
||||
// Determine old_ref and apply skip conditions.
|
||||
let old_child = if snapshot.old_inst().is_some() {
|
||||
old_child_map.remove(new_child.name.as_str())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let mut skip = false;
|
||||
if let Some(ref old) = old_child {
|
||||
if old.metadata().relevant_paths.is_empty() {
|
||||
log::debug!(
|
||||
"Skipping instance {} because it doesn't exist on the disk",
|
||||
old.name()
|
||||
);
|
||||
skip = true;
|
||||
} else if matches!(
|
||||
old.metadata().instigating_source,
|
||||
Some(InstigatingSource::ProjectNode { .. })
|
||||
) {
|
||||
log::debug!(
|
||||
"Skipping instance {} because it originates in a project file",
|
||||
old.name()
|
||||
);
|
||||
skip = true;
|
||||
}
|
||||
}
|
||||
// Any children that are in the old dom but not the new one are removed.
|
||||
removed_children.extend(old_child_map.into_values());
|
||||
} else {
|
||||
// There is no old instance. Just add every child.
|
||||
for new_child_ref in new_inst.children() {
|
||||
children.push(snapshot.with_joined_path(*new_child_ref, None)?);
|
||||
|
||||
let old_ref = old_child.as_ref().map(|o| o.id());
|
||||
|
||||
if skip {
|
||||
entries.push(ChildEntry {
|
||||
new_ref: *new_child_ref,
|
||||
old_ref,
|
||||
base_name: String::new(),
|
||||
middleware: Middleware::Dir,
|
||||
skip: true,
|
||||
});
|
||||
continue;
|
||||
}
|
||||
|
||||
let (middleware, base_name) =
|
||||
snapshot.child_middleware_and_name(*new_child_ref, old_ref)?;
|
||||
|
||||
let idx = entries.len();
|
||||
let lower = base_name.to_lowercase();
|
||||
if !used_names.insert(lower) {
|
||||
// Name already claimed — needs resolution.
|
||||
collision_indices.push(idx);
|
||||
}
|
||||
|
||||
entries.push(ChildEntry {
|
||||
new_ref: *new_child_ref,
|
||||
old_ref,
|
||||
base_name,
|
||||
middleware,
|
||||
skip: false,
|
||||
});
|
||||
}
|
||||
|
||||
// Pass 2: Resolve collisions by appending incrementing suffixes.
|
||||
for idx in collision_indices {
|
||||
let entry = &entries[idx];
|
||||
let (stem, ext) = split_name_and_ext(&entry.base_name, entry.middleware);
|
||||
let mut counter = 1u32;
|
||||
loop {
|
||||
let candidate = if ext.is_empty() {
|
||||
format!("{stem}{counter}")
|
||||
} else {
|
||||
format!("{stem}{counter}.{ext}")
|
||||
};
|
||||
let lower = candidate.to_lowercase();
|
||||
if used_names.insert(lower) {
|
||||
// Safe to mutate — we only visit each collision index once.
|
||||
let entry = &mut entries[idx];
|
||||
entry.base_name = candidate;
|
||||
break;
|
||||
}
|
||||
counter += 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Create snapshots from resolved entries.
|
||||
for entry in &entries {
|
||||
if entry.skip {
|
||||
continue;
|
||||
}
|
||||
let resolved_path = snapshot.path.join(&entry.base_name);
|
||||
children.push(snapshot.with_new_path(resolved_path, entry.new_ref, entry.old_ref));
|
||||
}
|
||||
|
||||
// Any children that are in the old dom but not the new one are removed.
|
||||
if snapshot.old_inst().is_some() {
|
||||
removed_children.extend(old_child_map.into_values());
|
||||
}
|
||||
let mut fs_snapshot = FsSnapshot::new();
|
||||
|
||||
@@ -362,14 +423,15 @@ mod test {
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"should not error when two children have the same lowercased Roblox \
|
||||
name but map to distinct filesystem paths: {result:?}",
|
||||
name but map to distinct filesystem paths: {:?}",
|
||||
result.as_ref().err(),
|
||||
);
|
||||
}
|
||||
|
||||
/// Two completely new children with the same non-init name would produce
|
||||
/// the same filesystem entry and must be detected as a duplicate.
|
||||
/// Two completely new children with the same name get resolved via
|
||||
/// incrementing suffixes instead of erroring.
|
||||
#[test]
|
||||
fn syncback_detects_sibling_duplicate_names() {
|
||||
fn syncback_resolves_sibling_duplicate_names() {
|
||||
use rbx_dom_weak::{InstanceBuilder, WeakDom};
|
||||
|
||||
let old_parent = InstanceSnapshot::new()
|
||||
@@ -387,8 +449,6 @@ mod test {
|
||||
new_tree.root_ref(),
|
||||
InstanceBuilder::new("Folder").with_name("Parent"),
|
||||
);
|
||||
// "Foo" is not a reserved name but two siblings named "Foo" still
|
||||
// collide on disk.
|
||||
new_tree.insert(new_parent, InstanceBuilder::new("Folder").with_name("Foo"));
|
||||
new_tree.insert(new_parent, InstanceBuilder::new("Folder").with_name("Foo"));
|
||||
|
||||
@@ -405,9 +465,17 @@ mod test {
|
||||
|
||||
let result = syncback_dir_no_meta(&snapshot);
|
||||
assert!(
|
||||
result.is_err(),
|
||||
"should error when two new children would produce the same filesystem name",
|
||||
result.is_ok(),
|
||||
"should resolve duplicate names with suffixes, not error: {:?}",
|
||||
result.as_ref().err(),
|
||||
);
|
||||
let children = result.unwrap().children;
|
||||
let mut names: Vec<String> = children
|
||||
.iter()
|
||||
.map(|c| c.path.file_name().unwrap().to_string_lossy().into_owned())
|
||||
.collect();
|
||||
names.sort();
|
||||
assert_eq!(names, vec!["Foo", "Foo1"]);
|
||||
}
|
||||
|
||||
/// A new child named "Init" (as a ModuleScript) would naively become
|
||||
@@ -452,7 +520,8 @@ mod test {
|
||||
let result = syncback_dir_no_meta(&snapshot);
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"should resolve init-name conflict by prefixing '_', not error: {result:?}",
|
||||
"should resolve init-name conflict by prefixing '_', not error: {:?}",
|
||||
result.as_ref().err(),
|
||||
);
|
||||
// The child should have been placed at "_Init.luau", not "Init.luau".
|
||||
let child_file_name = result
|
||||
@@ -518,7 +587,64 @@ mod test {
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"should allow a child whose filesystem name is slugified away from \
|
||||
the reserved 'init' stem: {result:?}",
|
||||
the reserved 'init' stem: {:?}",
|
||||
result.as_ref().err(),
|
||||
);
|
||||
}
|
||||
|
||||
/// Two new children both named "Init" (ModuleScripts) should get
|
||||
/// "_Init.luau" and "_Init1.luau" respectively.
|
||||
#[test]
|
||||
fn syncback_resolves_multiple_init_conflicts() {
|
||||
use rbx_dom_weak::{InstanceBuilder, WeakDom};
|
||||
|
||||
let old_parent = InstanceSnapshot::new()
|
||||
.name("Parent")
|
||||
.class_name("Folder")
|
||||
.metadata(
|
||||
InstanceMetadata::new()
|
||||
.instigating_source(PathBuf::from("/root"))
|
||||
.relevant_paths(vec![PathBuf::from("/root")]),
|
||||
);
|
||||
let old_tree = RojoTree::new(old_parent);
|
||||
|
||||
let mut new_tree = WeakDom::new(InstanceBuilder::new("ROOT"));
|
||||
let new_parent = new_tree.insert(
|
||||
new_tree.root_ref(),
|
||||
InstanceBuilder::new("Folder").with_name("Parent"),
|
||||
);
|
||||
new_tree.insert(
|
||||
new_parent,
|
||||
InstanceBuilder::new("ModuleScript").with_name("Init"),
|
||||
);
|
||||
new_tree.insert(
|
||||
new_parent,
|
||||
InstanceBuilder::new("ModuleScript").with_name("Init"),
|
||||
);
|
||||
|
||||
let vfs = make_vfs();
|
||||
let project = make_project();
|
||||
let data = SyncbackData::for_test(&vfs, &old_tree, &new_tree, &project);
|
||||
let snapshot = SyncbackSnapshot {
|
||||
data,
|
||||
old: Some(old_tree.get_root_id()),
|
||||
new: new_parent,
|
||||
path: PathBuf::from("/root"),
|
||||
middleware: None,
|
||||
};
|
||||
|
||||
let result = syncback_dir_no_meta(&snapshot);
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"should resolve multiple init conflicts with suffixes: {:?}",
|
||||
result.as_ref().err(),
|
||||
);
|
||||
let children = result.unwrap().children;
|
||||
let mut names: Vec<String> = children
|
||||
.iter()
|
||||
.map(|c| c.path.file_name().unwrap().to_string_lossy().into_owned())
|
||||
.collect();
|
||||
names.sort();
|
||||
assert_eq!(names, vec!["_Init.luau", "_Init1.luau"]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -158,15 +158,21 @@ pub fn syncback_lua<'sync>(
|
||||
|
||||
if !meta.is_empty() {
|
||||
let parent_location = snapshot.path.parent_err()?;
|
||||
let meta_stem = snapshot.middleware
|
||||
.and_then(|mw| {
|
||||
let ext = format!(".{}", crate::syncback::extension_for_middleware(mw));
|
||||
snapshot.path.file_name()
|
||||
.and_then(|n| n.to_str())
|
||||
.and_then(|s| s.strip_suffix(ext.as_str()))
|
||||
.map(str::to_owned)
|
||||
})
|
||||
.unwrap_or_else(|| snapshot.new_inst().name.clone());
|
||||
let file_name = snapshot
|
||||
.path
|
||||
.file_name()
|
||||
.and_then(|n| n.to_str())
|
||||
.unwrap_or("");
|
||||
let meta_stem = file_name
|
||||
.strip_suffix(".server.luau")
|
||||
.or_else(|| file_name.strip_suffix(".server.lua"))
|
||||
.or_else(|| file_name.strip_suffix(".client.luau"))
|
||||
.or_else(|| file_name.strip_suffix(".client.lua"))
|
||||
.or_else(|| file_name.strip_suffix(".plugin.luau"))
|
||||
.or_else(|| file_name.strip_suffix(".plugin.lua"))
|
||||
.or_else(|| file_name.strip_suffix(".luau"))
|
||||
.or_else(|| file_name.strip_suffix(".lua"))
|
||||
.unwrap_or(file_name);
|
||||
fs_snapshot.add_file(
|
||||
parent_location.join(format!("{meta_stem}.meta.json")),
|
||||
serde_json::to_vec_pretty(&meta).context("cannot serialize metadata")?,
|
||||
|
||||
@@ -154,17 +154,12 @@ impl AdjacentMetadata {
|
||||
.old_inst()
|
||||
.and_then(|inst| inst.metadata().specified_name.clone())
|
||||
.or_else(|| {
|
||||
// Write name when the filesystem path doesn't match the
|
||||
// instance name (invalid chars or init-prefix).
|
||||
// Write name when name_for_inst would produce a different
|
||||
// filesystem stem (slugification or init-prefix).
|
||||
if snapshot.old_inst().is_none() {
|
||||
let instance_name = &snapshot.new_inst().name;
|
||||
let fs_stem = path
|
||||
.file_name()
|
||||
.and_then(|n| n.to_str())
|
||||
.map(|s| s.split('.').next().unwrap_or(s))
|
||||
.unwrap_or("");
|
||||
if validate_file_name(instance_name).is_err()
|
||||
|| fs_stem != instance_name.as_str()
|
||||
|| instance_name.to_lowercase() == "init"
|
||||
{
|
||||
Some(instance_name.clone())
|
||||
} else {
|
||||
@@ -428,16 +423,12 @@ impl DirectoryMetadata {
|
||||
.old_inst()
|
||||
.and_then(|inst| inst.metadata().specified_name.clone())
|
||||
.or_else(|| {
|
||||
// Write name when the directory name doesn't match the
|
||||
// instance name (invalid chars or init-prefix).
|
||||
// Write name when name_for_inst would produce a different
|
||||
// directory name (slugification or init-prefix).
|
||||
if snapshot.old_inst().is_none() {
|
||||
let instance_name = &snapshot.new_inst().name;
|
||||
let fs_name = path
|
||||
.file_name()
|
||||
.and_then(|n| n.to_str())
|
||||
.unwrap_or("");
|
||||
if validate_file_name(instance_name).is_err()
|
||||
|| fs_name != instance_name.as_str()
|
||||
|| instance_name.to_lowercase() == "init"
|
||||
{
|
||||
Some(instance_name.clone())
|
||||
} else {
|
||||
|
||||
@@ -61,10 +61,6 @@ pub use self::{
|
||||
/// This will inspect the path and find the appropriate middleware for it,
|
||||
/// taking user-written rules into account. Then, it will attempt to convert
|
||||
/// the path into an InstanceSnapshot using that middleware.
|
||||
///
|
||||
/// If a git filter is active in the context and the path is not acknowledged
|
||||
/// (i.e., the file hasn't changed since the base git reference), this function
|
||||
/// returns `Ok(None)` to skip syncing that file.
|
||||
#[profiling::function]
|
||||
pub fn snapshot_from_vfs(
|
||||
context: &InstanceContext,
|
||||
@@ -76,16 +72,6 @@ pub fn snapshot_from_vfs(
|
||||
None => return Ok(None),
|
||||
};
|
||||
|
||||
// Check if this path is acknowledged by the git filter.
|
||||
// If not, skip this path entirely.
|
||||
if !context.is_path_acknowledged(path) {
|
||||
log::trace!(
|
||||
"Skipping path {} (not acknowledged by git filter)",
|
||||
path.display()
|
||||
);
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
if meta.is_dir() {
|
||||
let (middleware, dir_name, init_path) = get_dir_middleware(vfs, path)?;
|
||||
// TODO: Support user defined init paths
|
||||
@@ -227,10 +213,6 @@ pub enum Middleware {
|
||||
impl Middleware {
|
||||
/// Creates a snapshot for the given path from the Middleware with
|
||||
/// the provided name.
|
||||
///
|
||||
/// When a git filter is active in the context, `ignore_unknown_instances`
|
||||
/// will be set to `true` on all generated snapshots to preserve descendants
|
||||
/// in Studio that are not tracked by Rojo.
|
||||
fn snapshot(
|
||||
&self,
|
||||
context: &InstanceContext,
|
||||
@@ -280,14 +262,6 @@ impl Middleware {
|
||||
};
|
||||
if let Ok(Some(ref mut snapshot)) = output {
|
||||
snapshot.metadata.middleware = Some(*self);
|
||||
|
||||
// When git filter is active, force ignore_unknown_instances to true
|
||||
// so that we don't delete children in Studio that aren't tracked.
|
||||
if context.has_git_filter() {
|
||||
snapshot.metadata.ignore_unknown_instances = true;
|
||||
// Also apply this recursively to all children
|
||||
set_ignore_unknown_instances_recursive(&mut snapshot.children);
|
||||
}
|
||||
}
|
||||
output
|
||||
}
|
||||
@@ -391,16 +365,6 @@ impl Middleware {
|
||||
}
|
||||
}
|
||||
|
||||
/// Recursively sets `ignore_unknown_instances` to `true` on all children.
|
||||
/// This is used when git filter is active to ensure we don't delete
|
||||
/// children in Studio that aren't tracked by Rojo.
|
||||
fn set_ignore_unknown_instances_recursive(children: &mut [InstanceSnapshot]) {
|
||||
for child in children {
|
||||
child.metadata.ignore_unknown_instances = true;
|
||||
set_ignore_unknown_instances_recursive(&mut child.children);
|
||||
}
|
||||
}
|
||||
|
||||
/// A helper for easily defining a SyncRule. Arguments are passed literally
|
||||
/// to this macro in the order `include`, `middleware`, `suffix`,
|
||||
/// and `exclude`. Both `suffix` and `exclude` are optional.
|
||||
|
||||
@@ -83,19 +83,6 @@ pub fn snapshot_project(
|
||||
// file being updated.
|
||||
snapshot.metadata.relevant_paths.push(path.to_path_buf());
|
||||
|
||||
// When git filter is active, also register the project folder as a
|
||||
// relevant path. This serves as a catch-all so that file changes
|
||||
// not under any specific $path node can still walk up the directory
|
||||
// tree and trigger a re-snapshot of the entire project.
|
||||
if context.has_git_filter() {
|
||||
if let Some(folder) = path.parent() {
|
||||
let normalized = vfs
|
||||
.canonicalize(folder)
|
||||
.unwrap_or_else(|_| folder.to_path_buf());
|
||||
snapshot.metadata.relevant_paths.push(normalized);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Some(snapshot))
|
||||
}
|
||||
None => Ok(None),
|
||||
@@ -150,26 +137,6 @@ pub fn snapshot_project_node(
|
||||
// Take the snapshot's metadata as-is, which will be mutated later
|
||||
// on.
|
||||
metadata = snapshot.metadata;
|
||||
} else if context.has_git_filter() {
|
||||
// When the git filter is active and the $path was filtered out
|
||||
// (no acknowledged files yet), we still need to register the path
|
||||
// in relevant_paths. This allows the change processor to map file
|
||||
// changes in this directory back to this project node instance,
|
||||
// triggering a re-snapshot that will pick up newly modified files.
|
||||
let normalized = vfs
|
||||
.canonicalize(full_path.as_ref())
|
||||
.unwrap_or_else(|_| full_path.to_path_buf());
|
||||
metadata.relevant_paths.push(normalized);
|
||||
|
||||
// The VFS only sets up file watches via read() and read_dir(),
|
||||
// not via metadata(). Since the git filter caused snapshot_from_vfs
|
||||
// to return early (before read_dir was called), the VFS is not
|
||||
// watching this path. We must read the directory here to ensure
|
||||
// the VFS sets up a recursive watch, otherwise file change events
|
||||
// will never fire and live sync won't detect modifications.
|
||||
if full_path.is_dir() {
|
||||
let _ = vfs.read_dir(&full_path);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -225,17 +192,6 @@ pub fn snapshot_project_node(
|
||||
}
|
||||
|
||||
(_, None, _, Some(PathNode::Required(path))) => {
|
||||
// If git filter is active and the path was filtered out, treat it
|
||||
// as if the path was optional and skip this node.
|
||||
if context.has_git_filter() {
|
||||
log::trace!(
|
||||
"Skipping project node '{}' because its path was filtered by git filter: {}",
|
||||
instance_name,
|
||||
path.display()
|
||||
);
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
anyhow::bail!(
|
||||
"Rojo project referred to a file using $path that could not be turned into a Roblox Instance by Rojo.\n\
|
||||
Check that the file exists and is a file type known by Rojo.\n\
|
||||
@@ -326,12 +282,7 @@ pub fn snapshot_project_node(
|
||||
// If the user didn't specify it AND $path was not specified (meaning
|
||||
// there's no existing value we'd be stepping on from a project file or meta
|
||||
// file), set it to true.
|
||||
//
|
||||
// When git filter is active, always set to true to preserve descendants
|
||||
// in Studio that are not tracked by Rojo.
|
||||
if context.has_git_filter() {
|
||||
metadata.ignore_unknown_instances = true;
|
||||
} else if let Some(ignore) = node.ignore_unknown_instances {
|
||||
if let Some(ignore) = node.ignore_unknown_instances {
|
||||
metadata.ignore_unknown_instances = ignore;
|
||||
} else if node.path.is_none() {
|
||||
// TODO: Introduce a strict mode where $ignoreUnknownInstances is never
|
||||
|
||||
@@ -58,15 +58,12 @@ pub fn syncback_txt<'sync>(
|
||||
|
||||
if !meta.is_empty() {
|
||||
let parent = snapshot.path.parent_err()?;
|
||||
let meta_stem = snapshot.middleware
|
||||
.and_then(|mw| {
|
||||
let ext = format!(".{}", crate::syncback::extension_for_middleware(mw));
|
||||
snapshot.path.file_name()
|
||||
.and_then(|n| n.to_str())
|
||||
.and_then(|s| s.strip_suffix(ext.as_str()))
|
||||
.map(str::to_owned)
|
||||
})
|
||||
.unwrap_or_else(|| new_inst.name.clone());
|
||||
let file_name = snapshot
|
||||
.path
|
||||
.file_name()
|
||||
.and_then(|n| n.to_str())
|
||||
.unwrap_or("");
|
||||
let meta_stem = file_name.strip_suffix(".txt").unwrap_or(file_name);
|
||||
fs_snapshot.add_file(
|
||||
parent.join(format!("{meta_stem}.meta.json")),
|
||||
serde_json::to_vec_pretty(&meta).context("could not serialize metadata")?,
|
||||
|
||||
@@ -28,7 +28,7 @@ use crate::{
|
||||
Project,
|
||||
};
|
||||
|
||||
pub use file_names::{extension_for_middleware, name_for_inst, slugify_name, validate_file_name};
|
||||
pub use file_names::{extension_for_middleware, name_for_inst, validate_file_name};
|
||||
pub use fs_snapshot::FsSnapshot;
|
||||
pub use hash::*;
|
||||
pub use property_filter::{filter_properties, filter_properties_preallocated};
|
||||
@@ -52,7 +52,6 @@ pub fn syncback_loop(
|
||||
old_tree: &mut RojoTree,
|
||||
mut new_tree: WeakDom,
|
||||
project: &Project,
|
||||
force_json: bool,
|
||||
) -> anyhow::Result<FsSnapshot> {
|
||||
let ignore_patterns = project
|
||||
.syncback_rules
|
||||
@@ -154,7 +153,6 @@ pub fn syncback_loop(
|
||||
old_tree,
|
||||
new_tree: &new_tree,
|
||||
project,
|
||||
force_json,
|
||||
};
|
||||
|
||||
let mut snapshots = vec![SyncbackSnapshot {
|
||||
@@ -199,7 +197,7 @@ pub fn syncback_loop(
|
||||
}
|
||||
}
|
||||
|
||||
let middleware = get_best_middleware(&snapshot, force_json);
|
||||
let middleware = get_best_middleware(&snapshot);
|
||||
|
||||
log::trace!(
|
||||
"Middleware for {inst_path} is {:?} (path is {})",
|
||||
@@ -215,14 +213,10 @@ pub fn syncback_loop(
|
||||
let syncback = match middleware.syncback(&snapshot) {
|
||||
Ok(syncback) => syncback,
|
||||
Err(err) if middleware == Middleware::Dir => {
|
||||
let new_middleware = if force_json {
|
||||
Middleware::JsonModel
|
||||
} else {
|
||||
match env::var(DEBUG_MODEL_FORMAT_VAR) {
|
||||
Ok(value) if value == "1" => Middleware::Rbxmx,
|
||||
Ok(value) if value == "2" => Middleware::JsonModel,
|
||||
_ => Middleware::Rbxm,
|
||||
}
|
||||
let new_middleware = match env::var(DEBUG_MODEL_FORMAT_VAR) {
|
||||
Ok(value) if value == "1" => Middleware::Rbxmx,
|
||||
Ok(value) if value == "2" => Middleware::JsonModel,
|
||||
_ => Middleware::Rbxm,
|
||||
};
|
||||
let file_name = snapshot
|
||||
.path
|
||||
@@ -301,13 +295,12 @@ pub struct SyncbackReturn<'sync> {
|
||||
pub removed_children: Vec<InstanceWithMeta<'sync>>,
|
||||
}
|
||||
|
||||
pub fn get_best_middleware(snapshot: &SyncbackSnapshot, force_json: bool) -> Middleware {
|
||||
pub fn get_best_middleware(snapshot: &SyncbackSnapshot) -> Middleware {
|
||||
// At some point, we're better off using an O(1) method for checking
|
||||
// equality for classes like this.
|
||||
static JSON_MODEL_CLASSES: OnceLock<HashSet<&str>> = OnceLock::new();
|
||||
let json_model_classes = JSON_MODEL_CLASSES.get_or_init(|| {
|
||||
[
|
||||
"Actor",
|
||||
"Sound",
|
||||
"SoundGroup",
|
||||
"Sky",
|
||||
@@ -325,11 +318,6 @@ pub fn get_best_middleware(snapshot: &SyncbackSnapshot, force_json: bool) -> Mid
|
||||
"ChatInputBarConfiguration",
|
||||
"BubbleChatConfiguration",
|
||||
"ChannelTabsConfiguration",
|
||||
"RemoteEvent",
|
||||
"UnreliableRemoteEvent",
|
||||
"RemoteFunction",
|
||||
"BindableEvent",
|
||||
"BindableFunction",
|
||||
]
|
||||
.into()
|
||||
});
|
||||
@@ -373,18 +361,10 @@ pub fn get_best_middleware(snapshot: &SyncbackSnapshot, force_json: bool) -> Mid
|
||||
}
|
||||
|
||||
if middleware == Middleware::Rbxm {
|
||||
middleware = if force_json {
|
||||
if !inst.children().is_empty() {
|
||||
Middleware::Dir
|
||||
} else {
|
||||
Middleware::JsonModel
|
||||
}
|
||||
} else {
|
||||
match env::var(DEBUG_MODEL_FORMAT_VAR) {
|
||||
Ok(value) if value == "1" => Middleware::Rbxmx,
|
||||
Ok(value) if value == "2" => Middleware::JsonModel,
|
||||
_ => Middleware::Rbxm,
|
||||
}
|
||||
middleware = match env::var(DEBUG_MODEL_FORMAT_VAR) {
|
||||
Ok(value) if value == "1" => Middleware::Rbxmx,
|
||||
Ok(value) if value == "2" => Middleware::JsonModel,
|
||||
_ => Middleware::Rbxm,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -20,7 +20,6 @@ pub struct SyncbackData<'sync> {
|
||||
pub(super) old_tree: &'sync RojoTree,
|
||||
pub(super) new_tree: &'sync WeakDom,
|
||||
pub(super) project: &'sync Project,
|
||||
pub(super) force_json: bool,
|
||||
}
|
||||
|
||||
pub struct SyncbackSnapshot<'sync> {
|
||||
@@ -32,6 +31,25 @@ pub struct SyncbackSnapshot<'sync> {
|
||||
}
|
||||
|
||||
impl<'sync> SyncbackSnapshot<'sync> {
|
||||
/// Computes the middleware and filesystem name for a child without
|
||||
/// creating a full snapshot. Uses the same logic as `with_joined_path`.
|
||||
pub fn child_middleware_and_name(
|
||||
&self,
|
||||
new_ref: Ref,
|
||||
old_ref: Option<Ref>,
|
||||
) -> anyhow::Result<(Middleware, String)> {
|
||||
let temp = Self {
|
||||
data: self.data,
|
||||
old: old_ref,
|
||||
new: new_ref,
|
||||
path: PathBuf::new(),
|
||||
middleware: None,
|
||||
};
|
||||
let middleware = get_best_middleware(&temp, self.data.force_json);
|
||||
let name = name_for_inst(middleware, temp.new_inst(), temp.old_inst())?;
|
||||
Ok((middleware, name.into_owned()))
|
||||
}
|
||||
|
||||
/// Constructs a SyncbackSnapshot from the provided refs
|
||||
/// while inheriting this snapshot's path and data. This should be used for
|
||||
/// directories.
|
||||
@@ -44,7 +62,7 @@ impl<'sync> SyncbackSnapshot<'sync> {
|
||||
path: PathBuf::new(),
|
||||
middleware: None,
|
||||
};
|
||||
let middleware = get_best_middleware(&snapshot, self.data.force_json);
|
||||
let middleware = get_best_middleware(&snapshot);
|
||||
let name = name_for_inst(middleware, snapshot.new_inst(), snapshot.old_inst())?;
|
||||
snapshot.path = self.path.join(name.as_ref());
|
||||
|
||||
@@ -70,7 +88,7 @@ impl<'sync> SyncbackSnapshot<'sync> {
|
||||
path: PathBuf::new(),
|
||||
middleware: None,
|
||||
};
|
||||
let middleware = get_best_middleware(&snapshot, self.data.force_json);
|
||||
let middleware = get_best_middleware(&snapshot);
|
||||
let name = name_for_inst(middleware, snapshot.new_inst(), snapshot.old_inst())?;
|
||||
snapshot.path = base_path.join(name.as_ref());
|
||||
|
||||
@@ -252,6 +270,7 @@ impl<'sync> SyncbackData<'sync> {
|
||||
old_tree,
|
||||
new_tree,
|
||||
project,
|
||||
force_json: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ use rbx_dom_weak::{
|
||||
};
|
||||
|
||||
use crate::{
|
||||
json,
|
||||
serve_session::ServeSession,
|
||||
snapshot::{InstanceWithMeta, PatchSet, PatchUpdate},
|
||||
web::{
|
||||
@@ -21,10 +22,11 @@ use crate::{
|
||||
ServerInfoResponse, SocketPacket, SocketPacketBody, SocketPacketType, SubscribeMessage,
|
||||
WriteRequest, WriteResponse, PROTOCOL_VERSION, SERVER_VERSION,
|
||||
},
|
||||
util::{deserialize_msgpack, msgpack, msgpack_ok, serialize_msgpack},
|
||||
util::{json, json_ok},
|
||||
},
|
||||
web_api::{
|
||||
InstanceUpdate, RefPatchRequest, RefPatchResponse, SerializeRequest, SerializeResponse,
|
||||
BufferEncode, InstanceUpdate, RefPatchRequest, RefPatchResponse, SerializeRequest,
|
||||
SerializeResponse,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -40,7 +42,7 @@ pub async fn call(serve_session: Arc<ServeSession>, mut request: Request<Body>)
|
||||
if is_upgrade_request(&request) {
|
||||
service.handle_api_socket(&mut request).await
|
||||
} else {
|
||||
msgpack(
|
||||
json(
|
||||
ErrorResponse::bad_request(
|
||||
"/api/socket must be called as a websocket upgrade request",
|
||||
),
|
||||
@@ -56,7 +58,7 @@ pub async fn call(serve_session: Arc<ServeSession>, mut request: Request<Body>)
|
||||
}
|
||||
(&Method::POST, "/api/write") => service.handle_api_write(request).await,
|
||||
|
||||
(_method, path) => msgpack(
|
||||
(_method, path) => json(
|
||||
ErrorResponse::not_found(format!("Route not found: {}", path)),
|
||||
StatusCode::NOT_FOUND,
|
||||
),
|
||||
@@ -77,7 +79,7 @@ impl ApiService {
|
||||
let tree = self.serve_session.tree();
|
||||
let root_instance_id = tree.get_root_id();
|
||||
|
||||
msgpack_ok(&ServerInfoResponse {
|
||||
json_ok(&ServerInfoResponse {
|
||||
server_version: SERVER_VERSION.to_owned(),
|
||||
protocol_version: PROTOCOL_VERSION,
|
||||
session_id: self.serve_session.session_id(),
|
||||
@@ -96,7 +98,7 @@ impl ApiService {
|
||||
let input_cursor: u32 = match argument.parse() {
|
||||
Ok(v) => v,
|
||||
Err(err) => {
|
||||
return msgpack(
|
||||
return json(
|
||||
ErrorResponse::bad_request(format!("Malformed message cursor: {}", err)),
|
||||
StatusCode::BAD_REQUEST,
|
||||
);
|
||||
@@ -107,7 +109,7 @@ impl ApiService {
|
||||
let (response, websocket) = match upgrade(request, None) {
|
||||
Ok(result) => result,
|
||||
Err(err) => {
|
||||
return msgpack(
|
||||
return json(
|
||||
ErrorResponse::internal_error(format!("WebSocket upgrade failed: {}", err)),
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
);
|
||||
@@ -134,10 +136,10 @@ impl ApiService {
|
||||
|
||||
let body = body::to_bytes(request.into_body()).await.unwrap();
|
||||
|
||||
let request: WriteRequest = match deserialize_msgpack(&body) {
|
||||
let request: WriteRequest = match json::from_slice(&body) {
|
||||
Ok(request) => request,
|
||||
Err(err) => {
|
||||
return msgpack(
|
||||
return json(
|
||||
ErrorResponse::bad_request(format!("Invalid body: {}", err)),
|
||||
StatusCode::BAD_REQUEST,
|
||||
);
|
||||
@@ -145,7 +147,7 @@ impl ApiService {
|
||||
};
|
||||
|
||||
if request.session_id != session_id {
|
||||
return msgpack(
|
||||
return json(
|
||||
ErrorResponse::bad_request("Wrong session ID"),
|
||||
StatusCode::BAD_REQUEST,
|
||||
);
|
||||
@@ -171,7 +173,7 @@ impl ApiService {
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
msgpack_ok(WriteResponse { session_id })
|
||||
json_ok(WriteResponse { session_id })
|
||||
}
|
||||
|
||||
async fn handle_api_read(&self, request: Request<Body>) -> Response<Body> {
|
||||
@@ -181,7 +183,7 @@ impl ApiService {
|
||||
let requested_ids = match requested_ids {
|
||||
Ok(ids) => ids,
|
||||
Err(_) => {
|
||||
return msgpack(
|
||||
return json(
|
||||
ErrorResponse::bad_request("Malformed ID list"),
|
||||
StatusCode::BAD_REQUEST,
|
||||
);
|
||||
@@ -205,7 +207,7 @@ impl ApiService {
|
||||
}
|
||||
}
|
||||
|
||||
msgpack_ok(ReadResponse {
|
||||
json_ok(ReadResponse {
|
||||
session_id: self.serve_session.session_id(),
|
||||
message_cursor,
|
||||
instances,
|
||||
@@ -223,10 +225,10 @@ impl ApiService {
|
||||
let session_id = self.serve_session.session_id();
|
||||
let body = body::to_bytes(request.into_body()).await.unwrap();
|
||||
|
||||
let request: SerializeRequest = match deserialize_msgpack(&body) {
|
||||
let request: SerializeRequest = match json::from_slice(&body) {
|
||||
Ok(request) => request,
|
||||
Err(err) => {
|
||||
return msgpack(
|
||||
return json(
|
||||
ErrorResponse::bad_request(format!("Invalid body: {}", err)),
|
||||
StatusCode::BAD_REQUEST,
|
||||
);
|
||||
@@ -234,7 +236,7 @@ impl ApiService {
|
||||
};
|
||||
|
||||
if request.session_id != session_id {
|
||||
return msgpack(
|
||||
return json(
|
||||
ErrorResponse::bad_request("Wrong session ID"),
|
||||
StatusCode::BAD_REQUEST,
|
||||
);
|
||||
@@ -267,7 +269,7 @@ impl ApiService {
|
||||
|
||||
response_dom.transfer_within(child_ref, object_value);
|
||||
} else {
|
||||
msgpack(
|
||||
json(
|
||||
ErrorResponse::bad_request(format!("provided id {id} is not in the tree")),
|
||||
StatusCode::BAD_REQUEST,
|
||||
);
|
||||
@@ -278,9 +280,9 @@ impl ApiService {
|
||||
let mut source = Vec::new();
|
||||
rbx_binary::to_writer(&mut source, &response_dom, &[response_dom.root_ref()]).unwrap();
|
||||
|
||||
msgpack_ok(SerializeResponse {
|
||||
json_ok(SerializeResponse {
|
||||
session_id: self.serve_session.session_id(),
|
||||
model_contents: source,
|
||||
model_contents: BufferEncode::new(source),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -292,10 +294,10 @@ impl ApiService {
|
||||
let session_id = self.serve_session.session_id();
|
||||
let body = body::to_bytes(request.into_body()).await.unwrap();
|
||||
|
||||
let request: RefPatchRequest = match deserialize_msgpack(&body) {
|
||||
let request: RefPatchRequest = match json::from_slice(&body) {
|
||||
Ok(request) => request,
|
||||
Err(err) => {
|
||||
return msgpack(
|
||||
return json(
|
||||
ErrorResponse::bad_request(format!("Invalid body: {}", err)),
|
||||
StatusCode::BAD_REQUEST,
|
||||
);
|
||||
@@ -303,7 +305,7 @@ impl ApiService {
|
||||
};
|
||||
|
||||
if request.session_id != session_id {
|
||||
return msgpack(
|
||||
return json(
|
||||
ErrorResponse::bad_request("Wrong session ID"),
|
||||
StatusCode::BAD_REQUEST,
|
||||
);
|
||||
@@ -336,7 +338,7 @@ impl ApiService {
|
||||
}
|
||||
}
|
||||
|
||||
msgpack_ok(RefPatchResponse {
|
||||
json_ok(RefPatchResponse {
|
||||
session_id: self.serve_session.session_id(),
|
||||
patch: SubscribeMessage {
|
||||
added: HashMap::new(),
|
||||
@@ -352,7 +354,7 @@ impl ApiService {
|
||||
let requested_id = match Ref::from_str(argument) {
|
||||
Ok(id) => id,
|
||||
Err(_) => {
|
||||
return msgpack(
|
||||
return json(
|
||||
ErrorResponse::bad_request("Invalid instance ID"),
|
||||
StatusCode::BAD_REQUEST,
|
||||
);
|
||||
@@ -364,7 +366,7 @@ impl ApiService {
|
||||
let instance = match tree.get_instance(requested_id) {
|
||||
Some(instance) => instance,
|
||||
None => {
|
||||
return msgpack(
|
||||
return json(
|
||||
ErrorResponse::bad_request("Instance not found"),
|
||||
StatusCode::NOT_FOUND,
|
||||
);
|
||||
@@ -374,7 +376,7 @@ impl ApiService {
|
||||
let script_path = match pick_script_path(instance) {
|
||||
Some(path) => path,
|
||||
None => {
|
||||
return msgpack(
|
||||
return json(
|
||||
ErrorResponse::bad_request(
|
||||
"No appropriate file could be found to open this script",
|
||||
),
|
||||
@@ -387,7 +389,7 @@ impl ApiService {
|
||||
Ok(()) => {}
|
||||
Err(error) => match error {
|
||||
OpenError::Io(io_error) => {
|
||||
return msgpack(
|
||||
return json(
|
||||
ErrorResponse::internal_error(format!(
|
||||
"Attempting to open {} failed because of the following io error: {}",
|
||||
script_path.display(),
|
||||
@@ -401,7 +403,7 @@ impl ApiService {
|
||||
status,
|
||||
stderr,
|
||||
} => {
|
||||
return msgpack(
|
||||
return json(
|
||||
ErrorResponse::internal_error(format!(
|
||||
r#"The command '{}' to open '{}' failed with the error code '{}'.
|
||||
Error logs:
|
||||
@@ -417,7 +419,7 @@ impl ApiService {
|
||||
},
|
||||
};
|
||||
|
||||
msgpack_ok(OpenResponse {
|
||||
json_ok(OpenResponse {
|
||||
session_id: self.serve_session.session_id(),
|
||||
})
|
||||
}
|
||||
@@ -481,7 +483,7 @@ async fn handle_websocket_subscription(
|
||||
match result {
|
||||
Ok((new_cursor, messages)) => {
|
||||
if !messages.is_empty() {
|
||||
let msgpack_message = {
|
||||
let json_message = {
|
||||
let tree = tree_handle.lock().unwrap();
|
||||
let api_messages = messages
|
||||
.into_iter()
|
||||
@@ -497,12 +499,12 @@ async fn handle_websocket_subscription(
|
||||
}),
|
||||
};
|
||||
|
||||
serialize_msgpack(response)?
|
||||
serde_json::to_string(&response)?
|
||||
};
|
||||
|
||||
log::debug!("Sending batch of messages over WebSocket subscription");
|
||||
|
||||
if websocket.send(Message::Binary(msgpack_message)).await.is_err() {
|
||||
if websocket.send(Message::Text(json_message)).await.is_err() {
|
||||
// Client disconnected
|
||||
log::debug!("WebSocket subscription closed by client");
|
||||
break;
|
||||
|
||||
@@ -249,8 +249,31 @@ pub struct SerializeRequest {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct SerializeResponse {
|
||||
pub session_id: SessionId,
|
||||
#[serde(with = "serde_bytes")]
|
||||
pub model_contents: Vec<u8>,
|
||||
pub model_contents: BufferEncode,
|
||||
}
|
||||
|
||||
/// Using this struct we can force Roblox to JSONDecode this as a buffer.
|
||||
/// This is what Roblox's serde APIs use, so it saves a step in the plugin.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct BufferEncode {
|
||||
m: (),
|
||||
t: Cow<'static, str>,
|
||||
base64: String,
|
||||
}
|
||||
|
||||
impl BufferEncode {
|
||||
pub fn new(content: Vec<u8>) -> Self {
|
||||
let base64 = data_encoding::BASE64.encode(&content);
|
||||
Self {
|
||||
m: (),
|
||||
t: Cow::Borrowed("buffer"),
|
||||
base64,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn model(&self) -> &str {
|
||||
&self.base64
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
|
||||
@@ -1,48 +1,8 @@
|
||||
use hyper::{header::CONTENT_TYPE, Body, Response, StatusCode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde::Serialize;
|
||||
|
||||
pub fn msgpack_ok<T: Serialize>(value: T) -> Response<Body> {
|
||||
msgpack(value, StatusCode::OK)
|
||||
}
|
||||
|
||||
pub fn msgpack<T: Serialize>(value: T, code: StatusCode) -> Response<Body> {
|
||||
let mut serialized = Vec::new();
|
||||
let mut serializer = rmp_serde::Serializer::new(&mut serialized)
|
||||
.with_human_readable()
|
||||
.with_struct_map();
|
||||
|
||||
if let Err(err) = value.serialize(&mut serializer) {
|
||||
return Response::builder()
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
.header(CONTENT_TYPE, "text/plain")
|
||||
.body(Body::from(err.to_string()))
|
||||
.unwrap();
|
||||
};
|
||||
|
||||
Response::builder()
|
||||
.status(code)
|
||||
.header(CONTENT_TYPE, "application/msgpack")
|
||||
.body(Body::from(serialized))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub fn serialize_msgpack<T: Serialize>(value: T) -> anyhow::Result<Vec<u8>> {
|
||||
let mut serialized = Vec::new();
|
||||
let mut serializer = rmp_serde::Serializer::new(&mut serialized)
|
||||
.with_human_readable()
|
||||
.with_struct_map();
|
||||
|
||||
value.serialize(&mut serializer)?;
|
||||
|
||||
Ok(serialized)
|
||||
}
|
||||
|
||||
pub fn deserialize_msgpack<'a, T: Deserialize<'a>>(
|
||||
input: &'a [u8],
|
||||
) -> Result<T, rmp_serde::decode::Error> {
|
||||
let mut deserializer = rmp_serde::Deserializer::new(input).with_human_readable();
|
||||
|
||||
T::deserialize(&mut deserializer)
|
||||
pub fn json_ok<T: Serialize>(value: T) -> Response<Body> {
|
||||
json(value, StatusCode::OK)
|
||||
}
|
||||
|
||||
pub fn json<T: Serialize>(value: T, code: StatusCode) -> Response<Body> {
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
{
|
||||
"name": "default",
|
||||
"tree": {
|
||||
"$className": "DataModel",
|
||||
"ReplicatedStorage": {
|
||||
"Project": {
|
||||
"$path": "project/src",
|
||||
"Module": {
|
||||
"$path": "module"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
return nil
|
||||
@@ -1,14 +0,0 @@
|
||||
{
|
||||
"name": "default",
|
||||
"tree": {
|
||||
"$className": "DataModel",
|
||||
"ReplicatedStorage": {
|
||||
"Project": {
|
||||
"$path": "src/",
|
||||
"Module": {
|
||||
"$path": "../module"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
return nil
|
||||
@@ -10,7 +10,6 @@ use std::{
|
||||
use hyper_tungstenite::tungstenite::{connect, Message};
|
||||
use rbx_dom_weak::types::Ref;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tempfile::{tempdir, TempDir};
|
||||
|
||||
use librojo::{
|
||||
@@ -162,16 +161,22 @@ impl TestServeSession {
|
||||
|
||||
pub fn get_api_rojo(&self) -> Result<ServerInfoResponse, reqwest::Error> {
|
||||
let url = format!("http://localhost:{}/api/rojo", self.port);
|
||||
let body = reqwest::blocking::get(url)?.bytes()?;
|
||||
let body = reqwest::blocking::get(url)?.text()?;
|
||||
|
||||
Ok(deserialize_msgpack(&body).expect("Server returned malformed response"))
|
||||
let value = jsonc_parser::parse_to_serde_value(&body, &Default::default())
|
||||
.expect("Failed to parse JSON")
|
||||
.expect("No JSON value");
|
||||
Ok(serde_json::from_value(value).expect("Server returned malformed response"))
|
||||
}
|
||||
|
||||
pub fn get_api_read(&self, id: Ref) -> Result<ReadResponse<'_>, reqwest::Error> {
|
||||
let url = format!("http://localhost:{}/api/read/{}", self.port, id);
|
||||
let body = reqwest::blocking::get(url)?.bytes()?;
|
||||
let body = reqwest::blocking::get(url)?.text()?;
|
||||
|
||||
Ok(deserialize_msgpack(&body).expect("Server returned malformed response"))
|
||||
let value = jsonc_parser::parse_to_serde_value(&body, &Default::default())
|
||||
.expect("Failed to parse JSON")
|
||||
.expect("No JSON value");
|
||||
Ok(serde_json::from_value(value).expect("Server returned malformed response"))
|
||||
}
|
||||
|
||||
pub fn get_api_socket_packet(
|
||||
@@ -193,8 +198,8 @@ impl TestServeSession {
|
||||
}
|
||||
|
||||
match socket.read() {
|
||||
Ok(Message::Binary(binary)) => {
|
||||
let packet: SocketPacket = deserialize_msgpack(&binary)?;
|
||||
Ok(Message::Text(text)) => {
|
||||
let packet: SocketPacket = serde_json::from_str(&text)?;
|
||||
if packet.packet_type != packet_type {
|
||||
continue;
|
||||
}
|
||||
@@ -207,7 +212,7 @@ impl TestServeSession {
|
||||
return Err("WebSocket closed before receiving messages".into());
|
||||
}
|
||||
Ok(_) => {
|
||||
// Ignore other message types (ping, pong, text)
|
||||
// Ignore other message types (ping, pong, binary)
|
||||
continue;
|
||||
}
|
||||
Err(hyper_tungstenite::tungstenite::Error::Io(e))
|
||||
@@ -231,37 +236,15 @@ impl TestServeSession {
|
||||
) -> Result<SerializeResponse, reqwest::Error> {
|
||||
let client = reqwest::blocking::Client::new();
|
||||
let url = format!("http://localhost:{}/api/serialize", self.port);
|
||||
let body = serialize_msgpack(&SerializeRequest {
|
||||
let body = serde_json::to_string(&SerializeRequest {
|
||||
session_id,
|
||||
ids: ids.to_vec(),
|
||||
})
|
||||
.unwrap();
|
||||
});
|
||||
|
||||
let body = client.post(url).body(body).send()?.bytes()?;
|
||||
|
||||
Ok(deserialize_msgpack(&body).expect("Server returned malformed response"))
|
||||
client.post(url).body((body).unwrap()).send()?.json()
|
||||
}
|
||||
}
|
||||
|
||||
fn serialize_msgpack<T: Serialize>(value: T) -> Result<Vec<u8>, rmp_serde::encode::Error> {
|
||||
let mut serialized = Vec::new();
|
||||
let mut serializer = rmp_serde::Serializer::new(&mut serialized)
|
||||
.with_human_readable()
|
||||
.with_struct_map();
|
||||
|
||||
value.serialize(&mut serializer)?;
|
||||
|
||||
Ok(serialized)
|
||||
}
|
||||
|
||||
fn deserialize_msgpack<'a, T: Deserialize<'a>>(
|
||||
input: &'a [u8],
|
||||
) -> Result<T, rmp_serde::decode::Error> {
|
||||
let mut deserializer = rmp_serde::Deserializer::new(input).with_human_readable();
|
||||
|
||||
T::deserialize(&mut deserializer)
|
||||
}
|
||||
|
||||
/// Probably-okay way to generate random enough port numbers for running the
|
||||
/// Rojo live server.
|
||||
///
|
||||
@@ -279,7 +262,11 @@ fn get_port_number() -> usize {
|
||||
/// Since the provided structure intentionally includes unredacted referents,
|
||||
/// some post-processing is done to ensure they don't show up in the model.
|
||||
pub fn serialize_to_xml_model(response: &SerializeResponse, redactions: &RedactionMap) -> String {
|
||||
let mut dom = rbx_binary::from_reader(response.model_contents.as_slice()).unwrap();
|
||||
let model_content = data_encoding::BASE64
|
||||
.decode(response.model_contents.model().as_bytes())
|
||||
.unwrap();
|
||||
|
||||
let mut dom = rbx_binary::from_reader(model_content.as_slice()).unwrap();
|
||||
// This makes me realize that maybe we need a `descendants_mut` iter.
|
||||
let ref_list: Vec<Ref> = dom.descendants().map(|inst| inst.referent()).collect();
|
||||
for referent in ref_list {
|
||||
|
||||
@@ -60,8 +60,8 @@ syncback_tests! {
|
||||
// Ensures that projects can be reserialized by syncback and that
|
||||
// default.project.json doesn't change unexpectedly.
|
||||
project_reserialize => ["attribute_mismatch.luau", "property_mismatch.project.json"],
|
||||
// Confirms that Instances that cannot serialize as directories serialize as rbxms
|
||||
rbxm_fallback => ["src/ChildWithDuplicates.rbxm"],
|
||||
// Confirms that duplicate children are resolved with incrementing suffixes
|
||||
rbxm_fallback => ["src/ChildWithDuplicates/DuplicateChild/.gitkeep", "src/ChildWithDuplicates/DuplicateChild1/.gitkeep"],
|
||||
// Ensures that ref properties are linked properly on the file system
|
||||
ref_properties => ["src/pointer.model.json", "src/target.model.json"],
|
||||
// Ensures that ref properties are linked when no attributes are manually
|
||||
|
||||
Reference in New Issue
Block a user