diff --git a/.gitignore b/.gitignore index 61bdf0b2..a660919d 100644 --- a/.gitignore +++ b/.gitignore @@ -23,3 +23,6 @@ # Macos file system junk ._* .DS_STORE + +# JetBrains IDEs +/.idea/ diff --git a/CHANGELOG.md b/CHANGELOG.md index 7c876312..0dc94efc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,9 +32,13 @@ Making a new release? Simply add the new header with the version and date undern ## Unreleased * Fixed a bug caused by having reference properties (such as `ObjectValue.Value`) that point to an Instance not included in syncback. ([#1179]) * Implemented support for the "name" property in meta/model JSON files. ([#1187]) +* Fixed instance replacement fallback failing when too many instances needed to be replaced. ([#1192]) +* Fixed a bug where MacOS paths weren't being handled correctly. ([#1201]) [#1179]: https://github.com/rojo-rbx/rojo/pull/1179 [#1187]: https://github.com/rojo-rbx/rojo/pull/1187 +[#1192]: https://github.com/rojo-rbx/rojo/pull/1192 +[#1201]: https://github.com/rojo-rbx/rojo/pull/1201 ## [7.7.0-rc.1] (November 27th, 2025) diff --git a/Cargo.lock b/Cargo.lock index 7f9a58a7..8ac0676b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1319,6 +1319,7 @@ dependencies = [ "fs-err", "notify", "serde", + "tempfile", ] [[package]] diff --git a/crates/memofs/CHANGELOG.md b/crates/memofs/CHANGELOG.md index b83bfeaf..c35b5399 100644 --- a/crates/memofs/CHANGELOG.md +++ b/crates/memofs/CHANGELOG.md @@ -1,6 +1,7 @@ # memofs Changelog ## Unreleased Changes +* Added `Vfs::canonicalize`. [#1201] ## 0.3.1 (2025-11-27) * Added `Vfs::exists`. [#1169] diff --git a/crates/memofs/Cargo.toml b/crates/memofs/Cargo.toml index ad0b2637..d1d8cc5e 100644 --- a/crates/memofs/Cargo.toml +++ b/crates/memofs/Cargo.toml @@ -19,3 +19,6 @@ crossbeam-channel = "0.5.12" fs-err = "2.11.0" notify = "4.0.17" serde = { version = "1.0.197", features = ["derive"] } + +[dev-dependencies] +tempfile = "3.10.1" diff --git a/crates/memofs/src/in_memory_fs.rs b/crates/memofs/src/in_memory_fs.rs index 3c825575..41006f1c 100644 --- a/crates/memofs/src/in_memory_fs.rs +++ b/crates/memofs/src/in_memory_fs.rs @@ -232,6 +232,33 @@ impl VfsBackend for InMemoryFs { } } + // TODO: We rely on Rojo to prepend cwd to any relative path before storing paths + // in MemoFS. The current implementation will error if no prepended absolute path + // is found. It really only normalizes paths within the provided path's context. + // Example: "/Users/username/project/../other/file.txt" -> + // "/Users/username/other/file.txt" + // Erroneous example: "/Users/../../other/file.txt" -> "/other/file.txt" + // This is not very robust. We should implement proper path normalization here or otherwise + // warn if we are missing context and can not fully canonicalize the path correctly. + fn canonicalize(&mut self, path: &Path) -> io::Result { + let mut normalized = PathBuf::new(); + for component in path.components() { + match component { + std::path::Component::ParentDir => { + normalized.pop(); + } + std::path::Component::CurDir => {} + _ => normalized.push(component), + } + } + + let inner = self.inner.lock().unwrap(); + match inner.entries.get(&normalized) { + Some(_) => Ok(normalized), + None => not_found(&normalized), + } + } + fn event_receiver(&self) -> crossbeam_channel::Receiver { let inner = self.inner.lock().unwrap(); diff --git a/crates/memofs/src/lib.rs b/crates/memofs/src/lib.rs index bc7e329e..41d21100 100644 --- a/crates/memofs/src/lib.rs +++ b/crates/memofs/src/lib.rs @@ -77,6 +77,7 @@ pub trait VfsBackend: sealed::Sealed + Send + 'static { fn metadata(&mut self, path: &Path) -> io::Result; fn remove_file(&mut self, path: &Path) -> io::Result<()>; fn remove_dir_all(&mut self, path: &Path) -> io::Result<()>; + fn canonicalize(&mut self, path: &Path) -> io::Result; fn event_receiver(&self) -> crossbeam_channel::Receiver; fn watch(&mut self, path: &Path) -> io::Result<()>; @@ -225,6 +226,11 @@ impl VfsInner { self.backend.metadata(path) } + fn canonicalize>(&mut self, path: P) -> io::Result { + let path = path.as_ref(); + self.backend.canonicalize(path) + } + fn event_receiver(&self) -> crossbeam_channel::Receiver { self.backend.event_receiver() } @@ -413,6 +419,19 @@ impl Vfs { self.inner.lock().unwrap().metadata(path) } + /// Normalize a path via the underlying backend. + /// + /// Roughly equivalent to [`std::fs::canonicalize`][std::fs::canonicalize]. Relative paths are + /// resolved against the backend's current working directory (if applicable) and errors are + /// surfaced directly from the backend. + /// + /// [std::fs::canonicalize]: https://doc.rust-lang.org/stable/std/fs/fn.canonicalize.html + #[inline] + pub fn canonicalize>(&self, path: P) -> io::Result { + let path = path.as_ref(); + self.inner.lock().unwrap().canonicalize(path) + } + /// Retrieve a handle to the event receiver for this `Vfs`. #[inline] pub fn event_receiver(&self) -> crossbeam_channel::Receiver { @@ -540,6 +559,13 @@ impl VfsLock<'_> { self.inner.metadata(path) } + /// Normalize a path via the underlying backend. + #[inline] + pub fn normalize>(&mut self, path: P) -> io::Result { + let path = path.as_ref(); + self.inner.canonicalize(path) + } + /// Retrieve a handle to the event receiver for this `Vfs`. #[inline] pub fn event_receiver(&self) -> crossbeam_channel::Receiver { @@ -555,7 +581,9 @@ impl VfsLock<'_> { #[cfg(test)] mod test { - use crate::{InMemoryFs, Vfs, VfsSnapshot}; + use crate::{InMemoryFs, StdBackend, Vfs, VfsSnapshot}; + use std::io; + use std::path::PathBuf; /// https://github.com/rojo-rbx/rojo/issues/899 #[test] @@ -571,4 +599,62 @@ mod test { "bar\nfoo\n\n" ); } + + /// https://github.com/rojo-rbx/rojo/issues/1200 + #[test] + fn canonicalize_in_memory_success() { + let mut imfs = InMemoryFs::new(); + let contents = "Lorem ipsum dolor sit amet.".to_string(); + + imfs.load_snapshot("/test/file.txt", VfsSnapshot::file(contents.to_string())) + .unwrap(); + + let vfs = Vfs::new(imfs); + + assert_eq!( + vfs.canonicalize("/test/nested/../file.txt").unwrap(), + PathBuf::from("/test/file.txt") + ); + assert_eq!( + vfs.read_to_string(vfs.canonicalize("/test/nested/../file.txt").unwrap()) + .unwrap() + .to_string(), + contents.to_string() + ); + } + + #[test] + fn canonicalize_in_memory_missing_errors() { + let imfs = InMemoryFs::new(); + let vfs = Vfs::new(imfs); + + let err = vfs.canonicalize("test").unwrap_err(); + assert_eq!(err.kind(), io::ErrorKind::NotFound); + } + + #[test] + fn canonicalize_std_backend_success() { + let contents = "Lorem ipsum dolor sit amet.".to_string(); + let dir = tempfile::tempdir().unwrap(); + let file_path = dir.path().join("file.txt"); + fs_err::write(&file_path, contents.to_string()).unwrap(); + + let vfs = Vfs::new(StdBackend::new()); + let canonicalized = vfs.canonicalize(&file_path).unwrap(); + assert_eq!(canonicalized, file_path.canonicalize().unwrap()); + assert_eq!( + vfs.read_to_string(&canonicalized).unwrap().to_string(), + contents.to_string() + ); + } + + #[test] + fn canonicalize_std_backend_missing_errors() { + let dir = tempfile::tempdir().unwrap(); + let file_path = dir.path().join("test"); + + let vfs = Vfs::new(StdBackend::new()); + let err = vfs.canonicalize(&file_path).unwrap_err(); + assert_eq!(err.kind(), io::ErrorKind::NotFound); + } } diff --git a/crates/memofs/src/noop_backend.rs b/crates/memofs/src/noop_backend.rs index 98b86d1a..b1ee301d 100644 --- a/crates/memofs/src/noop_backend.rs +++ b/crates/memofs/src/noop_backend.rs @@ -1,5 +1,5 @@ use std::io; -use std::path::Path; +use std::path::{Path, PathBuf}; use crate::{Metadata, ReadDir, VfsBackend, VfsEvent}; @@ -50,6 +50,10 @@ impl VfsBackend for NoopBackend { Err(io::Error::other("NoopBackend doesn't do anything")) } + fn canonicalize(&mut self, _path: &Path) -> io::Result { + Err(io::Error::other("NoopBackend doesn't do anything")) + } + fn event_receiver(&self) -> crossbeam_channel::Receiver { crossbeam_channel::never() } diff --git a/crates/memofs/src/std_backend.rs b/crates/memofs/src/std_backend.rs index f354d806..a25fa39d 100644 --- a/crates/memofs/src/std_backend.rs +++ b/crates/memofs/src/std_backend.rs @@ -106,6 +106,10 @@ impl VfsBackend for StdBackend { }) } + fn canonicalize(&mut self, path: &Path) -> io::Result { + fs_err::canonicalize(path) + } + fn event_receiver(&self) -> crossbeam_channel::Receiver { self.watcher_receiver.clone() } diff --git a/plugin/src/ApiContext.lua b/plugin/src/ApiContext.lua index cbbffcc1..2d2459ec 100644 --- a/plugin/src/ApiContext.lua +++ b/plugin/src/ApiContext.lua @@ -290,31 +290,39 @@ function ApiContext:open(id) end function ApiContext:serialize(ids: { string }) - local url = ("%s/api/serialize/%s"):format(self.__baseUrl, table.concat(ids, ",")) + local url = ("%s/api/serialize"):format(self.__baseUrl) + local request_body = Http.jsonEncode({ sessionId = self.__sessionId, ids = ids }) - return Http.get(url):andThen(rejectFailedRequests):andThen(Http.Response.json):andThen(function(body) - if body.sessionId ~= self.__sessionId then - return Promise.reject("Server changed ID") - end + return Http.post(url, request_body) + :andThen(rejectFailedRequests) + :andThen(Http.Response.json) + :andThen(function(response_body) + if response_body.sessionId ~= self.__sessionId then + return Promise.reject("Server changed ID") + end - assert(validateApiSerialize(body)) + assert(validateApiSerialize(response_body)) - return body - end) + return response_body + end) end function ApiContext:refPatch(ids: { string }) - local url = ("%s/api/ref-patch/%s"):format(self.__baseUrl, table.concat(ids, ",")) + local url = ("%s/api/ref-patch"):format(self.__baseUrl) + local request_body = Http.jsonEncode({ sessionId = self.__sessionId, ids = ids }) - return Http.get(url):andThen(rejectFailedRequests):andThen(Http.Response.json):andThen(function(body) - if body.sessionId ~= self.__sessionId then - return Promise.reject("Server changed ID") - end + return Http.post(url, request_body) + :andThen(rejectFailedRequests) + :andThen(Http.Response.json) + :andThen(function(response_body) + if response_body.sessionId ~= self.__sessionId then + return Promise.reject("Server changed ID") + end - assert(validateApiRefPatch(body)) + assert(validateApiRefPatch(response_body)) - return body - end) + return response_body + end) end return ApiContext diff --git a/src/change_processor.rs b/src/change_processor.rs index 78d45acc..6c5c8224 100644 --- a/src/change_processor.rs +++ b/src/change_processor.rs @@ -1,12 +1,12 @@ -use std::{ - fs, - sync::{Arc, Mutex}, -}; - use crossbeam_channel::{select, Receiver, RecvError, Sender}; use jod_thread::JoinHandle; use memofs::{IoResultExt, Vfs, VfsEvent}; use rbx_dom_weak::types::{Ref, Variant}; +use std::path::PathBuf; +use std::{ + fs, + sync::{Arc, Mutex}, +}; use crate::{ message_queue::MessageQueue, @@ -114,6 +114,49 @@ struct JobThreadContext { } impl JobThreadContext { + /// Computes and applies patches to the DOM for a given file path. + /// + /// This function finds the nearest ancestor to the given path that has associated instances + /// in the tree. + /// It then computes and applies changes for each affected instance ID and + /// returns a vector of applied patch sets. + fn apply_patches(&self, path: PathBuf) -> Vec { + let mut tree = self.tree.lock().unwrap(); + let mut applied_patches = Vec::new(); + + // Find the nearest ancestor to this path that has + // associated instances in the tree. This helps make sure + // that we handle additions correctly, especially if we + // receive events for descendants of a large tree being + // created all at once. + let mut current_path = path.as_path(); + let affected_ids = loop { + let ids = tree.get_ids_at_path(current_path); + + log::trace!("Path {} affects IDs {:?}", current_path.display(), ids); + + if !ids.is_empty() { + break ids.to_vec(); + } + + log::trace!("Trying parent path..."); + match current_path.parent() { + Some(parent) => current_path = parent, + None => break Vec::new(), + } + }; + + for id in affected_ids { + if let Some(patch) = compute_and_apply_changes(&mut tree, &self.vfs, id) { + if !patch.is_empty() { + applied_patches.push(patch); + } + } + } + + applied_patches + } + fn handle_vfs_event(&self, event: VfsEvent) { log::trace!("Vfs event: {:?}", event); @@ -125,41 +168,16 @@ impl JobThreadContext { // For a given VFS event, we might have many changes to different parts // of the tree. Calculate and apply all of these changes. let applied_patches = match event { - VfsEvent::Create(path) | VfsEvent::Remove(path) | VfsEvent::Write(path) => { - let mut tree = self.tree.lock().unwrap(); - let mut applied_patches = Vec::new(); - - // Find the nearest ancestor to this path that has - // associated instances in the tree. This helps make sure - // that we handle additions correctly, especially if we - // receive events for descendants of a large tree being - // created all at once. - let mut current_path = path.as_path(); - let affected_ids = loop { - let ids = tree.get_ids_at_path(current_path); - - log::trace!("Path {} affects IDs {:?}", current_path.display(), ids); - - if !ids.is_empty() { - break ids.to_vec(); - } - - log::trace!("Trying parent path..."); - match current_path.parent() { - Some(parent) => current_path = parent, - None => break Vec::new(), - } - }; - - for id in affected_ids { - if let Some(patch) = compute_and_apply_changes(&mut tree, &self.vfs, id) { - if !patch.is_empty() { - applied_patches.push(patch); - } - } - } - - applied_patches + VfsEvent::Create(path) | VfsEvent::Write(path) => { + self.apply_patches(self.vfs.canonicalize(&path).unwrap()) + } + VfsEvent::Remove(path) => { + // MemoFS does not track parent removals yet, so we can canonicalize + // the parent path safely and then append the removed path's file name. + let parent = path.parent().unwrap(); + let file_name = path.file_name().unwrap(); + let parent_normalized = self.vfs.canonicalize(parent).unwrap(); + self.apply_patches(parent_normalized.join(file_name)) } _ => { log::warn!("Unhandled VFS event: {:?}", event); diff --git a/src/snapshot_middleware/csv.rs b/src/snapshot_middleware/csv.rs index 75a043d1..18fa1424 100644 --- a/src/snapshot_middleware/csv.rs +++ b/src/snapshot_middleware/csv.rs @@ -42,7 +42,7 @@ pub fn snapshot_csv( .metadata( InstanceMetadata::new() .instigating_source(path) - .relevant_paths(vec![path.to_path_buf()]), + .relevant_paths(vec![vfs.canonicalize(path)?]), ); AdjacentMetadata::read_and_apply_all(vfs, path, name, &mut snapshot)?; diff --git a/src/snapshot_middleware/dir.rs b/src/snapshot_middleware/dir.rs index b825542d..fb444c37 100644 --- a/src/snapshot_middleware/dir.rs +++ b/src/snapshot_middleware/dir.rs @@ -62,18 +62,19 @@ pub fn snapshot_dir_no_meta( } } + let normalized_path = vfs.canonicalize(path)?; let relevant_paths = vec![ - path.to_path_buf(), + normalized_path.clone(), // TODO: We shouldn't need to know about Lua existing in this // middleware. Should we figure out a way for that function to add // relevant paths to this middleware? - path.join("init.lua"), - path.join("init.luau"), - path.join("init.server.lua"), - path.join("init.server.luau"), - path.join("init.client.lua"), - path.join("init.client.luau"), - path.join("init.csv"), + normalized_path.join("init.lua"), + normalized_path.join("init.luau"), + normalized_path.join("init.server.lua"), + normalized_path.join("init.server.luau"), + normalized_path.join("init.client.lua"), + normalized_path.join("init.client.luau"), + normalized_path.join("init.csv"), ]; let snapshot = InstanceSnapshot::new() diff --git a/src/snapshot_middleware/json.rs b/src/snapshot_middleware/json.rs index 8cd029f5..1a9537ad 100644 --- a/src/snapshot_middleware/json.rs +++ b/src/snapshot_middleware/json.rs @@ -32,7 +32,7 @@ pub fn snapshot_json( .metadata( InstanceMetadata::new() .instigating_source(path) - .relevant_paths(vec![path.to_path_buf()]) + .relevant_paths(vec![vfs.canonicalize(path)?]) .context(context), ); diff --git a/src/snapshot_middleware/json_model.rs b/src/snapshot_middleware/json_model.rs index 122adf6a..b3b5c8f4 100644 --- a/src/snapshot_middleware/json_model.rs +++ b/src/snapshot_middleware/json_model.rs @@ -53,7 +53,7 @@ pub fn snapshot_json_model( snapshot.metadata = snapshot .metadata .instigating_source(path) - .relevant_paths(vec![path.to_path_buf()]) + .relevant_paths(vec![vfs.canonicalize(path)?]) .context(context) .specified_id(id) .schema(schema) diff --git a/src/snapshot_middleware/lua.rs b/src/snapshot_middleware/lua.rs index d738e122..b777fbfe 100644 --- a/src/snapshot_middleware/lua.rs +++ b/src/snapshot_middleware/lua.rs @@ -88,7 +88,7 @@ pub fn snapshot_lua( .metadata( InstanceMetadata::new() .instigating_source(path) - .relevant_paths(vec![path.to_path_buf()]) + .relevant_paths(vec![vfs.canonicalize(path)?]) .context(context), ); diff --git a/src/snapshot_middleware/rbxm.rs b/src/snapshot_middleware/rbxm.rs index f3693d52..6518be6a 100644 --- a/src/snapshot_middleware/rbxm.rs +++ b/src/snapshot_middleware/rbxm.rs @@ -28,7 +28,7 @@ pub fn snapshot_rbxm( .metadata( InstanceMetadata::new() .instigating_source(path) - .relevant_paths(vec![path.to_path_buf()]) + .relevant_paths(vec![vfs.canonicalize(path)?]) .context(context), ); diff --git a/src/snapshot_middleware/rbxmx.rs b/src/snapshot_middleware/rbxmx.rs index 59aff679..8f122324 100644 --- a/src/snapshot_middleware/rbxmx.rs +++ b/src/snapshot_middleware/rbxmx.rs @@ -31,7 +31,7 @@ pub fn snapshot_rbxmx( .metadata( InstanceMetadata::new() .instigating_source(path) - .relevant_paths(vec![path.to_path_buf()]) + .relevant_paths(vec![vfs.canonicalize(path)?]) .context(context), ); diff --git a/src/snapshot_middleware/toml.rs b/src/snapshot_middleware/toml.rs index 7262cfa9..3c42de6c 100644 --- a/src/snapshot_middleware/toml.rs +++ b/src/snapshot_middleware/toml.rs @@ -31,7 +31,7 @@ pub fn snapshot_toml( .metadata( InstanceMetadata::new() .instigating_source(path) - .relevant_paths(vec![path.to_path_buf()]) + .relevant_paths(vec![vfs.canonicalize(path)?]) .context(context), ); diff --git a/src/snapshot_middleware/txt.rs b/src/snapshot_middleware/txt.rs index 715354e0..c56aacdd 100644 --- a/src/snapshot_middleware/txt.rs +++ b/src/snapshot_middleware/txt.rs @@ -28,7 +28,7 @@ pub fn snapshot_txt( .metadata( InstanceMetadata::new() .instigating_source(path) - .relevant_paths(vec![path.to_path_buf()]) + .relevant_paths(vec![vfs.canonicalize(path)?]) .context(context), ); diff --git a/src/snapshot_middleware/yaml.rs b/src/snapshot_middleware/yaml.rs index c8132d49..10f744bc 100644 --- a/src/snapshot_middleware/yaml.rs +++ b/src/snapshot_middleware/yaml.rs @@ -37,7 +37,7 @@ pub fn snapshot_yaml( .metadata( InstanceMetadata::new() .instigating_source(path) - .relevant_paths(vec![path.to_path_buf()]) + .relevant_paths(vec![vfs.canonicalize(path)?]) .context(context), ); diff --git a/src/web/api.rs b/src/web/api.rs index 8e7ce72f..5fbd7b6d 100644 --- a/src/web/api.rs +++ b/src/web/api.rs @@ -1,13 +1,7 @@ //! Defines Rojo's HTTP API, all under /api. These endpoints generally return //! JSON. -use std::{ - collections::{HashMap, HashSet}, - fs, - path::PathBuf, - str::FromStr, - sync::Arc, -}; +use std::{collections::HashMap, fs, path::PathBuf, str::FromStr, sync::Arc}; use futures::{sink::SinkExt, stream::StreamExt}; use hyper::{body, Body, Method, Request, Response, StatusCode}; @@ -30,7 +24,10 @@ use crate::{ }, util::{json, json_ok}, }, - web_api::{BufferEncode, InstanceUpdate, RefPatchResponse, SerializeResponse}, + web_api::{ + BufferEncode, InstanceUpdate, RefPatchRequest, RefPatchResponse, SerializeRequest, + SerializeResponse, + }, }; pub async fn call(serve_session: Arc, mut request: Request) -> Response { @@ -53,12 +50,8 @@ pub async fn call(serve_session: Arc, mut request: Request) ) } } - (&Method::GET, path) if path.starts_with("/api/serialize/") => { - service.handle_api_serialize(request).await - } - (&Method::GET, path) if path.starts_with("/api/ref-patch/") => { - service.handle_api_ref_patch(request).await - } + (&Method::POST, "/api/serialize") => service.handle_api_serialize(request).await, + (&Method::POST, "/api/ref-patch") => service.handle_api_ref_patch(request).await, (&Method::POST, path) if path.starts_with("/api/open/") => { service.handle_api_open(request).await @@ -229,22 +222,30 @@ impl ApiService { /// that correspond to the requested Instances. These values have their /// `Value` property set to point to the requested Instance. async fn handle_api_serialize(&self, request: Request) -> Response { - let argument = &request.uri().path()["/api/serialize/".len()..]; - let requested_ids: Result, _> = argument.split(',').map(Ref::from_str).collect(); + let session_id = self.serve_session.session_id(); + let body = body::to_bytes(request.into_body()).await.unwrap(); - let requested_ids = match requested_ids { - Ok(ids) => ids, - Err(_) => { + let request: SerializeRequest = match json::from_slice(&body) { + Ok(request) => request, + Err(err) => { return json( - ErrorResponse::bad_request("Malformed ID list"), + ErrorResponse::bad_request(format!("Invalid body: {}", err)), StatusCode::BAD_REQUEST, ); } }; + + if request.session_id != session_id { + return json( + ErrorResponse::bad_request("Wrong session ID"), + StatusCode::BAD_REQUEST, + ); + } + let mut response_dom = WeakDom::new(InstanceBuilder::new("Folder")); let tree = self.serve_session.tree(); - for id in &requested_ids { + for id in &request.ids { if let Some(instance) = tree.get_instance(*id) { let clone = response_dom.insert( Ref::none(), @@ -290,20 +291,26 @@ impl ApiService { /// and referent properties need to be updated after the serialize /// endpoint is used. async fn handle_api_ref_patch(self, request: Request) -> Response { - let argument = &request.uri().path()["/api/ref-patch/".len()..]; - let requested_ids: Result, _> = - argument.split(',').map(Ref::from_str).collect(); + let session_id = self.serve_session.session_id(); + let body = body::to_bytes(request.into_body()).await.unwrap(); - let requested_ids = match requested_ids { - Ok(ids) => ids, - Err(_) => { + let request: RefPatchRequest = match json::from_slice(&body) { + Ok(request) => request, + Err(err) => { return json( - ErrorResponse::bad_request("Malformed ID list"), + ErrorResponse::bad_request(format!("Invalid body: {}", err)), StatusCode::BAD_REQUEST, ); } }; + if request.session_id != session_id { + return json( + ErrorResponse::bad_request("Wrong session ID"), + StatusCode::BAD_REQUEST, + ); + } + let mut instance_updates: HashMap = HashMap::new(); let tree = self.serve_session.tree(); @@ -312,7 +319,7 @@ impl ApiService { let Variant::Ref(prop_value) = prop_value else { continue; }; - if let Some(target_id) = requested_ids.get(prop_value) { + if let Some(target_id) = request.ids.get(prop_value) { let instance_id = instance.id(); let update = instance_updates diff --git a/src/web/interface.rs b/src/web/interface.rs index 4871ec97..430f13ec 100644 --- a/src/web/interface.rs +++ b/src/web/interface.rs @@ -238,6 +238,13 @@ pub struct OpenResponse { pub session_id: SessionId, } +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct SerializeRequest { + pub session_id: SessionId, + pub ids: Vec, +} + #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct SerializeResponse { @@ -269,6 +276,13 @@ impl BufferEncode { } } +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RefPatchRequest { + pub session_id: SessionId, + pub ids: HashSet, +} + #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct RefPatchResponse<'a> { diff --git a/tests/rojo_test/serve_util.rs b/tests/rojo_test/serve_util.rs index 9036316f..a5decdbb 100644 --- a/tests/rojo_test/serve_util.rs +++ b/tests/rojo_test/serve_util.rs @@ -1,5 +1,4 @@ use std::{ - fmt::Write as _, fs, path::{Path, PathBuf}, process::Command, @@ -13,8 +12,12 @@ use rbx_dom_weak::types::Ref; use tempfile::{tempdir, TempDir}; -use librojo::web_api::{ - ReadResponse, SerializeResponse, ServerInfoResponse, SocketPacket, SocketPacketType, +use librojo::{ + web_api::{ + ReadResponse, SerializeRequest, SerializeResponse, ServerInfoResponse, SocketPacket, + SocketPacketType, + }, + SessionId, }; use rojo_insta_ext::RedactionMap; @@ -226,16 +229,19 @@ impl TestServeSession { } } - pub fn get_api_serialize(&self, ids: &[Ref]) -> Result { - let mut id_list = String::with_capacity(ids.len() * 33); - for id in ids { - write!(id_list, "{id},").unwrap(); - } - id_list.pop(); + pub fn get_api_serialize( + &self, + ids: &[Ref], + session_id: SessionId, + ) -> Result { + let client = reqwest::blocking::Client::new(); + let url = format!("http://localhost:{}/api/serialize", self.port); + let body = serde_json::to_string(&SerializeRequest { + session_id, + ids: ids.to_vec(), + }); - let url = format!("http://localhost:{}/api/serialize/{}", self.port, id_list); - - reqwest::blocking::get(url)?.json() + client.post(url).body((body).unwrap()).send()?.json() } } diff --git a/tests/tests/serve.rs b/tests/tests/serve.rs index ee0528e1..748f1687 100644 --- a/tests/tests/serve.rs +++ b/tests/tests/serve.rs @@ -646,7 +646,7 @@ fn meshpart_with_id() { .unwrap(); let serialize_response = session - .get_api_serialize(&[*meshpart, *objectvalue]) + .get_api_serialize(&[*meshpart, *objectvalue], info.session_id) .unwrap(); // We don't assert a snapshot on the SerializeResponse because the model includes the @@ -673,7 +673,9 @@ fn forced_parent() { read_response.intern_and_redact(&mut redactions, root_id) ); - let serialize_response = session.get_api_serialize(&[root_id]).unwrap(); + let serialize_response = session + .get_api_serialize(&[root_id], info.session_id) + .unwrap(); assert_eq!(serialize_response.session_id, info.session_id);