Merge branch 'master' into name-prop

This commit is contained in:
2026-01-20 01:10:20 +01:00
committed by GitHub
25 changed files with 307 additions and 118 deletions

3
.gitignore vendored
View File

@@ -23,3 +23,6 @@
# Macos file system junk # Macos file system junk
._* ._*
.DS_STORE .DS_STORE
# JetBrains IDEs
/.idea/

View File

@@ -32,9 +32,13 @@ Making a new release? Simply add the new header with the version and date undern
## Unreleased ## Unreleased
* Fixed a bug caused by having reference properties (such as `ObjectValue.Value`) that point to an Instance not included in syncback. ([#1179]) * Fixed a bug caused by having reference properties (such as `ObjectValue.Value`) that point to an Instance not included in syncback. ([#1179])
* Implemented support for the "name" property in meta/model JSON files. ([#1187]) * Implemented support for the "name" property in meta/model JSON files. ([#1187])
* Fixed instance replacement fallback failing when too many instances needed to be replaced. ([#1192])
* Fixed a bug where MacOS paths weren't being handled correctly. ([#1201])
[#1179]: https://github.com/rojo-rbx/rojo/pull/1179 [#1179]: https://github.com/rojo-rbx/rojo/pull/1179
[#1187]: https://github.com/rojo-rbx/rojo/pull/1187 [#1187]: https://github.com/rojo-rbx/rojo/pull/1187
[#1192]: https://github.com/rojo-rbx/rojo/pull/1192
[#1201]: https://github.com/rojo-rbx/rojo/pull/1201
## [7.7.0-rc.1] (November 27th, 2025) ## [7.7.0-rc.1] (November 27th, 2025)

1
Cargo.lock generated
View File

@@ -1319,6 +1319,7 @@ dependencies = [
"fs-err", "fs-err",
"notify", "notify",
"serde", "serde",
"tempfile",
] ]
[[package]] [[package]]

View File

@@ -1,6 +1,7 @@
# memofs Changelog # memofs Changelog
## Unreleased Changes ## Unreleased Changes
* Added `Vfs::canonicalize`. [#1201]
## 0.3.1 (2025-11-27) ## 0.3.1 (2025-11-27)
* Added `Vfs::exists`. [#1169] * Added `Vfs::exists`. [#1169]

View File

@@ -19,3 +19,6 @@ crossbeam-channel = "0.5.12"
fs-err = "2.11.0" fs-err = "2.11.0"
notify = "4.0.17" notify = "4.0.17"
serde = { version = "1.0.197", features = ["derive"] } serde = { version = "1.0.197", features = ["derive"] }
[dev-dependencies]
tempfile = "3.10.1"

View File

@@ -232,6 +232,33 @@ impl VfsBackend for InMemoryFs {
} }
} }
// TODO: We rely on Rojo to prepend cwd to any relative path before storing paths
// in MemoFS. The current implementation will error if no prepended absolute path
// is found. It really only normalizes paths within the provided path's context.
// Example: "/Users/username/project/../other/file.txt" ->
// "/Users/username/other/file.txt"
// Erroneous example: "/Users/../../other/file.txt" -> "/other/file.txt"
// This is not very robust. We should implement proper path normalization here or otherwise
// warn if we are missing context and can not fully canonicalize the path correctly.
fn canonicalize(&mut self, path: &Path) -> io::Result<PathBuf> {
let mut normalized = PathBuf::new();
for component in path.components() {
match component {
std::path::Component::ParentDir => {
normalized.pop();
}
std::path::Component::CurDir => {}
_ => normalized.push(component),
}
}
let inner = self.inner.lock().unwrap();
match inner.entries.get(&normalized) {
Some(_) => Ok(normalized),
None => not_found(&normalized),
}
}
fn event_receiver(&self) -> crossbeam_channel::Receiver<VfsEvent> { fn event_receiver(&self) -> crossbeam_channel::Receiver<VfsEvent> {
let inner = self.inner.lock().unwrap(); let inner = self.inner.lock().unwrap();

View File

@@ -77,6 +77,7 @@ pub trait VfsBackend: sealed::Sealed + Send + 'static {
fn metadata(&mut self, path: &Path) -> io::Result<Metadata>; fn metadata(&mut self, path: &Path) -> io::Result<Metadata>;
fn remove_file(&mut self, path: &Path) -> io::Result<()>; fn remove_file(&mut self, path: &Path) -> io::Result<()>;
fn remove_dir_all(&mut self, path: &Path) -> io::Result<()>; fn remove_dir_all(&mut self, path: &Path) -> io::Result<()>;
fn canonicalize(&mut self, path: &Path) -> io::Result<PathBuf>;
fn event_receiver(&self) -> crossbeam_channel::Receiver<VfsEvent>; fn event_receiver(&self) -> crossbeam_channel::Receiver<VfsEvent>;
fn watch(&mut self, path: &Path) -> io::Result<()>; fn watch(&mut self, path: &Path) -> io::Result<()>;
@@ -225,6 +226,11 @@ impl VfsInner {
self.backend.metadata(path) self.backend.metadata(path)
} }
fn canonicalize<P: AsRef<Path>>(&mut self, path: P) -> io::Result<PathBuf> {
let path = path.as_ref();
self.backend.canonicalize(path)
}
fn event_receiver(&self) -> crossbeam_channel::Receiver<VfsEvent> { fn event_receiver(&self) -> crossbeam_channel::Receiver<VfsEvent> {
self.backend.event_receiver() self.backend.event_receiver()
} }
@@ -413,6 +419,19 @@ impl Vfs {
self.inner.lock().unwrap().metadata(path) self.inner.lock().unwrap().metadata(path)
} }
/// Normalize a path via the underlying backend.
///
/// Roughly equivalent to [`std::fs::canonicalize`][std::fs::canonicalize]. Relative paths are
/// resolved against the backend's current working directory (if applicable) and errors are
/// surfaced directly from the backend.
///
/// [std::fs::canonicalize]: https://doc.rust-lang.org/stable/std/fs/fn.canonicalize.html
#[inline]
pub fn canonicalize<P: AsRef<Path>>(&self, path: P) -> io::Result<PathBuf> {
let path = path.as_ref();
self.inner.lock().unwrap().canonicalize(path)
}
/// Retrieve a handle to the event receiver for this `Vfs`. /// Retrieve a handle to the event receiver for this `Vfs`.
#[inline] #[inline]
pub fn event_receiver(&self) -> crossbeam_channel::Receiver<VfsEvent> { pub fn event_receiver(&self) -> crossbeam_channel::Receiver<VfsEvent> {
@@ -540,6 +559,13 @@ impl VfsLock<'_> {
self.inner.metadata(path) self.inner.metadata(path)
} }
/// Normalize a path via the underlying backend.
#[inline]
pub fn normalize<P: AsRef<Path>>(&mut self, path: P) -> io::Result<PathBuf> {
let path = path.as_ref();
self.inner.canonicalize(path)
}
/// Retrieve a handle to the event receiver for this `Vfs`. /// Retrieve a handle to the event receiver for this `Vfs`.
#[inline] #[inline]
pub fn event_receiver(&self) -> crossbeam_channel::Receiver<VfsEvent> { pub fn event_receiver(&self) -> crossbeam_channel::Receiver<VfsEvent> {
@@ -555,7 +581,9 @@ impl VfsLock<'_> {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use crate::{InMemoryFs, Vfs, VfsSnapshot}; use crate::{InMemoryFs, StdBackend, Vfs, VfsSnapshot};
use std::io;
use std::path::PathBuf;
/// https://github.com/rojo-rbx/rojo/issues/899 /// https://github.com/rojo-rbx/rojo/issues/899
#[test] #[test]
@@ -571,4 +599,62 @@ mod test {
"bar\nfoo\n\n" "bar\nfoo\n\n"
); );
} }
/// https://github.com/rojo-rbx/rojo/issues/1200
#[test]
fn canonicalize_in_memory_success() {
let mut imfs = InMemoryFs::new();
let contents = "Lorem ipsum dolor sit amet.".to_string();
imfs.load_snapshot("/test/file.txt", VfsSnapshot::file(contents.to_string()))
.unwrap();
let vfs = Vfs::new(imfs);
assert_eq!(
vfs.canonicalize("/test/nested/../file.txt").unwrap(),
PathBuf::from("/test/file.txt")
);
assert_eq!(
vfs.read_to_string(vfs.canonicalize("/test/nested/../file.txt").unwrap())
.unwrap()
.to_string(),
contents.to_string()
);
}
#[test]
fn canonicalize_in_memory_missing_errors() {
let imfs = InMemoryFs::new();
let vfs = Vfs::new(imfs);
let err = vfs.canonicalize("test").unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::NotFound);
}
#[test]
fn canonicalize_std_backend_success() {
let contents = "Lorem ipsum dolor sit amet.".to_string();
let dir = tempfile::tempdir().unwrap();
let file_path = dir.path().join("file.txt");
fs_err::write(&file_path, contents.to_string()).unwrap();
let vfs = Vfs::new(StdBackend::new());
let canonicalized = vfs.canonicalize(&file_path).unwrap();
assert_eq!(canonicalized, file_path.canonicalize().unwrap());
assert_eq!(
vfs.read_to_string(&canonicalized).unwrap().to_string(),
contents.to_string()
);
}
#[test]
fn canonicalize_std_backend_missing_errors() {
let dir = tempfile::tempdir().unwrap();
let file_path = dir.path().join("test");
let vfs = Vfs::new(StdBackend::new());
let err = vfs.canonicalize(&file_path).unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::NotFound);
}
} }

View File

@@ -1,5 +1,5 @@
use std::io; use std::io;
use std::path::Path; use std::path::{Path, PathBuf};
use crate::{Metadata, ReadDir, VfsBackend, VfsEvent}; use crate::{Metadata, ReadDir, VfsBackend, VfsEvent};
@@ -50,6 +50,10 @@ impl VfsBackend for NoopBackend {
Err(io::Error::other("NoopBackend doesn't do anything")) Err(io::Error::other("NoopBackend doesn't do anything"))
} }
fn canonicalize(&mut self, _path: &Path) -> io::Result<PathBuf> {
Err(io::Error::other("NoopBackend doesn't do anything"))
}
fn event_receiver(&self) -> crossbeam_channel::Receiver<VfsEvent> { fn event_receiver(&self) -> crossbeam_channel::Receiver<VfsEvent> {
crossbeam_channel::never() crossbeam_channel::never()
} }

View File

@@ -106,6 +106,10 @@ impl VfsBackend for StdBackend {
}) })
} }
fn canonicalize(&mut self, path: &Path) -> io::Result<PathBuf> {
fs_err::canonicalize(path)
}
fn event_receiver(&self) -> crossbeam_channel::Receiver<VfsEvent> { fn event_receiver(&self) -> crossbeam_channel::Receiver<VfsEvent> {
self.watcher_receiver.clone() self.watcher_receiver.clone()
} }

View File

@@ -290,30 +290,38 @@ function ApiContext:open(id)
end end
function ApiContext:serialize(ids: { string }) function ApiContext:serialize(ids: { string })
local url = ("%s/api/serialize/%s"):format(self.__baseUrl, table.concat(ids, ",")) local url = ("%s/api/serialize"):format(self.__baseUrl)
local request_body = Http.jsonEncode({ sessionId = self.__sessionId, ids = ids })
return Http.get(url):andThen(rejectFailedRequests):andThen(Http.Response.json):andThen(function(body) return Http.post(url, request_body)
if body.sessionId ~= self.__sessionId then :andThen(rejectFailedRequests)
:andThen(Http.Response.json)
:andThen(function(response_body)
if response_body.sessionId ~= self.__sessionId then
return Promise.reject("Server changed ID") return Promise.reject("Server changed ID")
end end
assert(validateApiSerialize(body)) assert(validateApiSerialize(response_body))
return body return response_body
end) end)
end end
function ApiContext:refPatch(ids: { string }) function ApiContext:refPatch(ids: { string })
local url = ("%s/api/ref-patch/%s"):format(self.__baseUrl, table.concat(ids, ",")) local url = ("%s/api/ref-patch"):format(self.__baseUrl)
local request_body = Http.jsonEncode({ sessionId = self.__sessionId, ids = ids })
return Http.get(url):andThen(rejectFailedRequests):andThen(Http.Response.json):andThen(function(body) return Http.post(url, request_body)
if body.sessionId ~= self.__sessionId then :andThen(rejectFailedRequests)
:andThen(Http.Response.json)
:andThen(function(response_body)
if response_body.sessionId ~= self.__sessionId then
return Promise.reject("Server changed ID") return Promise.reject("Server changed ID")
end end
assert(validateApiRefPatch(body)) assert(validateApiRefPatch(response_body))
return body return response_body
end) end)
end end

View File

@@ -1,12 +1,12 @@
use std::{
fs,
sync::{Arc, Mutex},
};
use crossbeam_channel::{select, Receiver, RecvError, Sender}; use crossbeam_channel::{select, Receiver, RecvError, Sender};
use jod_thread::JoinHandle; use jod_thread::JoinHandle;
use memofs::{IoResultExt, Vfs, VfsEvent}; use memofs::{IoResultExt, Vfs, VfsEvent};
use rbx_dom_weak::types::{Ref, Variant}; use rbx_dom_weak::types::{Ref, Variant};
use std::path::PathBuf;
use std::{
fs,
sync::{Arc, Mutex},
};
use crate::{ use crate::{
message_queue::MessageQueue, message_queue::MessageQueue,
@@ -114,18 +114,13 @@ struct JobThreadContext {
} }
impl JobThreadContext { impl JobThreadContext {
fn handle_vfs_event(&self, event: VfsEvent) { /// Computes and applies patches to the DOM for a given file path.
log::trace!("Vfs event: {:?}", event); ///
/// This function finds the nearest ancestor to the given path that has associated instances
// Update the VFS immediately with the event. /// in the tree.
self.vfs /// It then computes and applies changes for each affected instance ID and
.commit_event(&event) /// returns a vector of applied patch sets.
.expect("Error applying VFS change"); fn apply_patches(&self, path: PathBuf) -> Vec<AppliedPatchSet> {
// For a given VFS event, we might have many changes to different parts
// of the tree. Calculate and apply all of these changes.
let applied_patches = match event {
VfsEvent::Create(path) | VfsEvent::Remove(path) | VfsEvent::Write(path) => {
let mut tree = self.tree.lock().unwrap(); let mut tree = self.tree.lock().unwrap();
let mut applied_patches = Vec::new(); let mut applied_patches = Vec::new();
@@ -161,6 +156,29 @@ impl JobThreadContext {
applied_patches applied_patches
} }
fn handle_vfs_event(&self, event: VfsEvent) {
log::trace!("Vfs event: {:?}", event);
// Update the VFS immediately with the event.
self.vfs
.commit_event(&event)
.expect("Error applying VFS change");
// For a given VFS event, we might have many changes to different parts
// of the tree. Calculate and apply all of these changes.
let applied_patches = match event {
VfsEvent::Create(path) | VfsEvent::Write(path) => {
self.apply_patches(self.vfs.canonicalize(&path).unwrap())
}
VfsEvent::Remove(path) => {
// MemoFS does not track parent removals yet, so we can canonicalize
// the parent path safely and then append the removed path's file name.
let parent = path.parent().unwrap();
let file_name = path.file_name().unwrap();
let parent_normalized = self.vfs.canonicalize(parent).unwrap();
self.apply_patches(parent_normalized.join(file_name))
}
_ => { _ => {
log::warn!("Unhandled VFS event: {:?}", event); log::warn!("Unhandled VFS event: {:?}", event);
Vec::new() Vec::new()

View File

@@ -42,7 +42,7 @@ pub fn snapshot_csv(
.metadata( .metadata(
InstanceMetadata::new() InstanceMetadata::new()
.instigating_source(path) .instigating_source(path)
.relevant_paths(vec![path.to_path_buf()]), .relevant_paths(vec![vfs.canonicalize(path)?]),
); );
AdjacentMetadata::read_and_apply_all(vfs, path, name, &mut snapshot)?; AdjacentMetadata::read_and_apply_all(vfs, path, name, &mut snapshot)?;

View File

@@ -62,18 +62,19 @@ pub fn snapshot_dir_no_meta(
} }
} }
let normalized_path = vfs.canonicalize(path)?;
let relevant_paths = vec![ let relevant_paths = vec![
path.to_path_buf(), normalized_path.clone(),
// TODO: We shouldn't need to know about Lua existing in this // TODO: We shouldn't need to know about Lua existing in this
// middleware. Should we figure out a way for that function to add // middleware. Should we figure out a way for that function to add
// relevant paths to this middleware? // relevant paths to this middleware?
path.join("init.lua"), normalized_path.join("init.lua"),
path.join("init.luau"), normalized_path.join("init.luau"),
path.join("init.server.lua"), normalized_path.join("init.server.lua"),
path.join("init.server.luau"), normalized_path.join("init.server.luau"),
path.join("init.client.lua"), normalized_path.join("init.client.lua"),
path.join("init.client.luau"), normalized_path.join("init.client.luau"),
path.join("init.csv"), normalized_path.join("init.csv"),
]; ];
let snapshot = InstanceSnapshot::new() let snapshot = InstanceSnapshot::new()

View File

@@ -32,7 +32,7 @@ pub fn snapshot_json(
.metadata( .metadata(
InstanceMetadata::new() InstanceMetadata::new()
.instigating_source(path) .instigating_source(path)
.relevant_paths(vec![path.to_path_buf()]) .relevant_paths(vec![vfs.canonicalize(path)?])
.context(context), .context(context),
); );

View File

@@ -53,7 +53,7 @@ pub fn snapshot_json_model(
snapshot.metadata = snapshot snapshot.metadata = snapshot
.metadata .metadata
.instigating_source(path) .instigating_source(path)
.relevant_paths(vec![path.to_path_buf()]) .relevant_paths(vec![vfs.canonicalize(path)?])
.context(context) .context(context)
.specified_id(id) .specified_id(id)
.schema(schema) .schema(schema)

View File

@@ -88,7 +88,7 @@ pub fn snapshot_lua(
.metadata( .metadata(
InstanceMetadata::new() InstanceMetadata::new()
.instigating_source(path) .instigating_source(path)
.relevant_paths(vec![path.to_path_buf()]) .relevant_paths(vec![vfs.canonicalize(path)?])
.context(context), .context(context),
); );

View File

@@ -28,7 +28,7 @@ pub fn snapshot_rbxm(
.metadata( .metadata(
InstanceMetadata::new() InstanceMetadata::new()
.instigating_source(path) .instigating_source(path)
.relevant_paths(vec![path.to_path_buf()]) .relevant_paths(vec![vfs.canonicalize(path)?])
.context(context), .context(context),
); );

View File

@@ -31,7 +31,7 @@ pub fn snapshot_rbxmx(
.metadata( .metadata(
InstanceMetadata::new() InstanceMetadata::new()
.instigating_source(path) .instigating_source(path)
.relevant_paths(vec![path.to_path_buf()]) .relevant_paths(vec![vfs.canonicalize(path)?])
.context(context), .context(context),
); );

View File

@@ -31,7 +31,7 @@ pub fn snapshot_toml(
.metadata( .metadata(
InstanceMetadata::new() InstanceMetadata::new()
.instigating_source(path) .instigating_source(path)
.relevant_paths(vec![path.to_path_buf()]) .relevant_paths(vec![vfs.canonicalize(path)?])
.context(context), .context(context),
); );

View File

@@ -28,7 +28,7 @@ pub fn snapshot_txt(
.metadata( .metadata(
InstanceMetadata::new() InstanceMetadata::new()
.instigating_source(path) .instigating_source(path)
.relevant_paths(vec![path.to_path_buf()]) .relevant_paths(vec![vfs.canonicalize(path)?])
.context(context), .context(context),
); );

View File

@@ -37,7 +37,7 @@ pub fn snapshot_yaml(
.metadata( .metadata(
InstanceMetadata::new() InstanceMetadata::new()
.instigating_source(path) .instigating_source(path)
.relevant_paths(vec![path.to_path_buf()]) .relevant_paths(vec![vfs.canonicalize(path)?])
.context(context), .context(context),
); );

View File

@@ -1,13 +1,7 @@
//! Defines Rojo's HTTP API, all under /api. These endpoints generally return //! Defines Rojo's HTTP API, all under /api. These endpoints generally return
//! JSON. //! JSON.
use std::{ use std::{collections::HashMap, fs, path::PathBuf, str::FromStr, sync::Arc};
collections::{HashMap, HashSet},
fs,
path::PathBuf,
str::FromStr,
sync::Arc,
};
use futures::{sink::SinkExt, stream::StreamExt}; use futures::{sink::SinkExt, stream::StreamExt};
use hyper::{body, Body, Method, Request, Response, StatusCode}; use hyper::{body, Body, Method, Request, Response, StatusCode};
@@ -30,7 +24,10 @@ use crate::{
}, },
util::{json, json_ok}, util::{json, json_ok},
}, },
web_api::{BufferEncode, InstanceUpdate, RefPatchResponse, SerializeResponse}, web_api::{
BufferEncode, InstanceUpdate, RefPatchRequest, RefPatchResponse, SerializeRequest,
SerializeResponse,
},
}; };
pub async fn call(serve_session: Arc<ServeSession>, mut request: Request<Body>) -> Response<Body> { pub async fn call(serve_session: Arc<ServeSession>, mut request: Request<Body>) -> Response<Body> {
@@ -53,12 +50,8 @@ pub async fn call(serve_session: Arc<ServeSession>, mut request: Request<Body>)
) )
} }
} }
(&Method::GET, path) if path.starts_with("/api/serialize/") => { (&Method::POST, "/api/serialize") => service.handle_api_serialize(request).await,
service.handle_api_serialize(request).await (&Method::POST, "/api/ref-patch") => service.handle_api_ref_patch(request).await,
}
(&Method::GET, path) if path.starts_with("/api/ref-patch/") => {
service.handle_api_ref_patch(request).await
}
(&Method::POST, path) if path.starts_with("/api/open/") => { (&Method::POST, path) if path.starts_with("/api/open/") => {
service.handle_api_open(request).await service.handle_api_open(request).await
@@ -229,22 +222,30 @@ impl ApiService {
/// that correspond to the requested Instances. These values have their /// that correspond to the requested Instances. These values have their
/// `Value` property set to point to the requested Instance. /// `Value` property set to point to the requested Instance.
async fn handle_api_serialize(&self, request: Request<Body>) -> Response<Body> { async fn handle_api_serialize(&self, request: Request<Body>) -> Response<Body> {
let argument = &request.uri().path()["/api/serialize/".len()..]; let session_id = self.serve_session.session_id();
let requested_ids: Result<Vec<Ref>, _> = argument.split(',').map(Ref::from_str).collect(); let body = body::to_bytes(request.into_body()).await.unwrap();
let requested_ids = match requested_ids { let request: SerializeRequest = match json::from_slice(&body) {
Ok(ids) => ids, Ok(request) => request,
Err(_) => { Err(err) => {
return json( return json(
ErrorResponse::bad_request("Malformed ID list"), ErrorResponse::bad_request(format!("Invalid body: {}", err)),
StatusCode::BAD_REQUEST, StatusCode::BAD_REQUEST,
); );
} }
}; };
if request.session_id != session_id {
return json(
ErrorResponse::bad_request("Wrong session ID"),
StatusCode::BAD_REQUEST,
);
}
let mut response_dom = WeakDom::new(InstanceBuilder::new("Folder")); let mut response_dom = WeakDom::new(InstanceBuilder::new("Folder"));
let tree = self.serve_session.tree(); let tree = self.serve_session.tree();
for id in &requested_ids { for id in &request.ids {
if let Some(instance) = tree.get_instance(*id) { if let Some(instance) = tree.get_instance(*id) {
let clone = response_dom.insert( let clone = response_dom.insert(
Ref::none(), Ref::none(),
@@ -290,20 +291,26 @@ impl ApiService {
/// and referent properties need to be updated after the serialize /// and referent properties need to be updated after the serialize
/// endpoint is used. /// endpoint is used.
async fn handle_api_ref_patch(self, request: Request<Body>) -> Response<Body> { async fn handle_api_ref_patch(self, request: Request<Body>) -> Response<Body> {
let argument = &request.uri().path()["/api/ref-patch/".len()..]; let session_id = self.serve_session.session_id();
let requested_ids: Result<HashSet<Ref>, _> = let body = body::to_bytes(request.into_body()).await.unwrap();
argument.split(',').map(Ref::from_str).collect();
let requested_ids = match requested_ids { let request: RefPatchRequest = match json::from_slice(&body) {
Ok(ids) => ids, Ok(request) => request,
Err(_) => { Err(err) => {
return json( return json(
ErrorResponse::bad_request("Malformed ID list"), ErrorResponse::bad_request(format!("Invalid body: {}", err)),
StatusCode::BAD_REQUEST, StatusCode::BAD_REQUEST,
); );
} }
}; };
if request.session_id != session_id {
return json(
ErrorResponse::bad_request("Wrong session ID"),
StatusCode::BAD_REQUEST,
);
}
let mut instance_updates: HashMap<Ref, InstanceUpdate> = HashMap::new(); let mut instance_updates: HashMap<Ref, InstanceUpdate> = HashMap::new();
let tree = self.serve_session.tree(); let tree = self.serve_session.tree();
@@ -312,7 +319,7 @@ impl ApiService {
let Variant::Ref(prop_value) = prop_value else { let Variant::Ref(prop_value) = prop_value else {
continue; continue;
}; };
if let Some(target_id) = requested_ids.get(prop_value) { if let Some(target_id) = request.ids.get(prop_value) {
let instance_id = instance.id(); let instance_id = instance.id();
let update = let update =
instance_updates instance_updates

View File

@@ -238,6 +238,13 @@ pub struct OpenResponse {
pub session_id: SessionId, pub session_id: SessionId,
} }
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct SerializeRequest {
pub session_id: SessionId,
pub ids: Vec<Ref>,
}
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub struct SerializeResponse { pub struct SerializeResponse {
@@ -269,6 +276,13 @@ impl BufferEncode {
} }
} }
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RefPatchRequest {
pub session_id: SessionId,
pub ids: HashSet<Ref>,
}
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub struct RefPatchResponse<'a> { pub struct RefPatchResponse<'a> {

View File

@@ -1,5 +1,4 @@
use std::{ use std::{
fmt::Write as _,
fs, fs,
path::{Path, PathBuf}, path::{Path, PathBuf},
process::Command, process::Command,
@@ -13,8 +12,12 @@ use rbx_dom_weak::types::Ref;
use tempfile::{tempdir, TempDir}; use tempfile::{tempdir, TempDir};
use librojo::web_api::{ use librojo::{
ReadResponse, SerializeResponse, ServerInfoResponse, SocketPacket, SocketPacketType, web_api::{
ReadResponse, SerializeRequest, SerializeResponse, ServerInfoResponse, SocketPacket,
SocketPacketType,
},
SessionId,
}; };
use rojo_insta_ext::RedactionMap; use rojo_insta_ext::RedactionMap;
@@ -226,16 +229,19 @@ impl TestServeSession {
} }
} }
pub fn get_api_serialize(&self, ids: &[Ref]) -> Result<SerializeResponse, reqwest::Error> { pub fn get_api_serialize(
let mut id_list = String::with_capacity(ids.len() * 33); &self,
for id in ids { ids: &[Ref],
write!(id_list, "{id},").unwrap(); session_id: SessionId,
} ) -> Result<SerializeResponse, reqwest::Error> {
id_list.pop(); let client = reqwest::blocking::Client::new();
let url = format!("http://localhost:{}/api/serialize", self.port);
let body = serde_json::to_string(&SerializeRequest {
session_id,
ids: ids.to_vec(),
});
let url = format!("http://localhost:{}/api/serialize/{}", self.port, id_list); client.post(url).body((body).unwrap()).send()?.json()
reqwest::blocking::get(url)?.json()
} }
} }

View File

@@ -646,7 +646,7 @@ fn meshpart_with_id() {
.unwrap(); .unwrap();
let serialize_response = session let serialize_response = session
.get_api_serialize(&[*meshpart, *objectvalue]) .get_api_serialize(&[*meshpart, *objectvalue], info.session_id)
.unwrap(); .unwrap();
// We don't assert a snapshot on the SerializeResponse because the model includes the // We don't assert a snapshot on the SerializeResponse because the model includes the
@@ -673,7 +673,9 @@ fn forced_parent() {
read_response.intern_and_redact(&mut redactions, root_id) read_response.intern_and_redact(&mut redactions, root_id)
); );
let serialize_response = session.get_api_serialize(&[root_id]).unwrap(); let serialize_response = session
.get_api_serialize(&[root_id], info.session_id)
.unwrap();
assert_eq!(serialize_response.session_id, info.session_id); assert_eq!(serialize_response.session_id, info.session_id);