merge impl-v2: server

This commit is contained in:
Lucien Greathouse
2018-06-10 22:59:04 -07:00
parent e30545c132
commit ec1f9bd706
35 changed files with 1643 additions and 1207 deletions

View File

@@ -1,27 +1,11 @@
#[macro_use] extern crate serde_derive;
#[macro_use] extern crate rouille;
#[macro_use] extern crate clap;
#[macro_use] extern crate lazy_static;
extern crate notify;
extern crate rand;
extern crate serde;
extern crate serde_json;
extern crate regex;
pub mod web;
pub mod core;
pub mod project;
pub mod pathext;
pub mod vfs;
pub mod rbx;
pub mod plugin;
pub mod plugins;
pub mod commands;
extern crate librojo;
use std::path::{Path, PathBuf};
use std::process;
use pathext::canonicalish;
use librojo::pathext::canonicalish;
fn main() {
let matches = clap_app!(rojo =>
@@ -40,26 +24,15 @@ fn main() {
(@arg port: --port +takes_value "The port to listen on. Defaults to 8000.")
)
(@subcommand pack =>
(about: "Packs the project into a GUI installer bundle. NOT YET IMPLEMENTED!")
(@arg PROJECT: "Path to the project to pack. Defaults to the current directory.")
)
(@arg verbose: --verbose "Enable extended logging.")
).get_matches();
let verbose = match matches.occurrences_of("verbose") {
0 => false,
_ => true,
};
match matches.subcommand() {
("init", sub_matches) => {
let sub_matches = sub_matches.unwrap();
let project_path = Path::new(sub_matches.value_of("PATH").unwrap_or("."));
let full_path = canonicalish(project_path);
commands::init(&full_path);
librojo::commands::init(&full_path);
},
("serve", sub_matches) => {
let sub_matches = sub_matches.unwrap();
@@ -82,11 +55,7 @@ fn main() {
}
};
commands::serve(&project_path, verbose, port);
},
("pack", _) => {
eprintln!("'rojo pack' is not yet implemented!");
process::exit(1);
librojo::commands::serve(&project_path, port);
},
_ => {
eprintln!("Please specify a subcommand!");

View File

@@ -1,98 +1,37 @@
use std::path::{Path, PathBuf};
use std::path::PathBuf;
use std::process;
use std::sync::{Arc, Mutex};
use std::thread;
use std::fs;
use rand;
use project::{Project, ProjectLoadError};
use plugin::{PluginChain};
use plugins::{DefaultPlugin, JsonModelPlugin, ScriptPlugin};
use vfs::{VfsSession, VfsWatcher};
use web;
use project::Project;
use web::{self, WebConfig};
use session::Session;
pub fn serve(project_path: &PathBuf, verbose: bool, port: Option<u64>) {
pub fn serve(project_dir: &PathBuf, override_port: Option<u64>) {
let server_id = rand::random::<u64>();
let project = match Project::load(project_path) {
Ok(project) => {
println!("Using project \"{}\" from {}", project.name, project_path.display());
project
let project = match Project::load(project_dir) {
Ok(v) => {
println!("Using project from {}", fs::canonicalize(project_dir).unwrap().display());
v
},
Err(err) => {
match err {
ProjectLoadError::InvalidJson(serde_err) => {
eprintln!("Project contained invalid JSON!");
eprintln!("{}", project_path.display());
eprintln!("Error: {}", serde_err);
process::exit(1);
},
ProjectLoadError::FailedToOpen | ProjectLoadError::FailedToRead => {
eprintln!("Found project file, but failed to read it!");
eprintln!("Check the permissions of the project file at {}", project_path.display());
process::exit(1);
},
ProjectLoadError::DidNotExist => {
eprintln!("Found no project file! Create one using 'rojo init'");
eprintln!("Checked for a project at {}", project_path.display());
process::exit(1);
},
}
eprintln!("{}", err);
process::exit(1);
},
};
if project.partitions.len() == 0 {
println!("");
println!("This project has no partitions and will not do anything when served!");
println!("This is usually a mistake -- edit rojo.json!");
println!("");
}
let port = override_port.unwrap_or(project.serve_port);
lazy_static! {
static ref PLUGIN_CHAIN: PluginChain = PluginChain::new(vec![
Box::new(ScriptPlugin::new()),
Box::new(JsonModelPlugin::new()),
Box::new(DefaultPlugin::new()),
]);
}
println!("Using project {:#?}", project);
let vfs = {
let mut vfs = VfsSession::new(&PLUGIN_CHAIN);
let mut session = Session::new(project.clone());
session.start();
for (name, project_partition) in &project.partitions {
let path = {
let given_path = Path::new(&project_partition.path);
let web_config = WebConfig::from_session(server_id, port, &session);
if given_path.is_absolute() {
given_path.to_path_buf()
} else {
project_path.join(given_path)
}
};
println!("Server listening on port {}", port);
vfs.insert_partition(name, path);
}
Arc::new(Mutex::new(vfs))
};
{
let vfs = vfs.clone();
thread::spawn(move || {
VfsWatcher::new(vfs).start();
});
}
let web_config = web::WebConfig {
verbose,
port: port.unwrap_or(project.serve_port),
server_id,
};
println!("Server listening on port {}", web_config.port);
web::start(web_config, project.clone(), &PLUGIN_CHAIN, vfs.clone());
web::start(web_config);
}

View File

@@ -1 +0,0 @@
pub type Route = Vec<String>;

89
server/src/file_route.rs Normal file
View File

@@ -0,0 +1,89 @@
use std::path::{Path, PathBuf, Component};
use partition::Partition;
// TODO: Change backing data structure to use a single allocation with slices
// taken out of it for each portion
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct FileRoute {
pub partition: String,
pub route: Vec<String>,
}
impl FileRoute {
pub fn from_path(path: &Path, partition: &Partition) -> Option<FileRoute> {
assert!(path.is_absolute());
let relative_path = path.strip_prefix(&partition.path).ok()?;
let mut route = Vec::new();
for component in relative_path.components() {
match component {
Component::Normal(piece) => {
route.push(piece.to_string_lossy().into_owned());
},
_ => panic!("Unexpected path component: {:?}", component),
}
}
Some(FileRoute {
partition: partition.name.clone(),
route,
})
}
pub fn parent(&self) -> Option<FileRoute> {
if self.route.len() == 0 {
return None;
}
let mut new_route = self.route.clone();
new_route.pop();
Some(FileRoute {
partition: self.partition.clone(),
route: new_route,
})
}
/// Creates a PathBuf out of the `FileRoute` based on the given partition
/// `Path`.
pub fn to_path_buf(&self, partition_path: &Path) -> PathBuf {
let mut result = partition_path.to_path_buf();
for route_piece in &self.route {
result.push(route_piece);
}
result
}
/// Creates a version of the FileRoute with the given extra pieces appended
/// to the end.
pub fn extended_with(&self, pieces: &[&str]) -> FileRoute {
let mut result = self.clone();
for piece in pieces {
result.route.push(piece.to_string());
}
result
}
/// This function is totally wrong and should be handled by middleware, heh.
pub fn name(&self, partition: &Partition) -> String { // I guess??
if self.route.len() == 0 {
// This FileRoute refers to the partition itself
if partition.target.len() == 0 {
// We're targeting the game!
"game".to_string()
} else {
partition.target.last().unwrap().clone()
}
} else {
// This FileRoute refers to an item in a partition
self.route.last().unwrap().clone()
}
}
}

21
server/src/id.rs Normal file
View File

@@ -0,0 +1,21 @@
use std::sync::atomic::{AtomicUsize, Ordering};
/// A unique identifier, not guaranteed to be generated in any order.
pub type Id = usize;
lazy_static! {
static ref LAST_ID: AtomicUsize = AtomicUsize::new(0);
}
/// Generate a new ID, which has no defined ordering.
pub fn get_id() -> Id {
LAST_ID.fetch_add(1, Ordering::SeqCst)
}
#[test]
fn it_gives_unique_numbers() {
let a = get_id();
let b = get_id();
assert!(a != b);
}

26
server/src/lib.rs Normal file
View File

@@ -0,0 +1,26 @@
#[macro_use] extern crate serde_derive;
#[macro_use] extern crate rouille;
#[macro_use] extern crate lazy_static;
extern crate notify;
extern crate rand;
extern crate serde;
extern crate serde_json;
extern crate regex;
#[cfg(test)]
extern crate tempfile;
pub mod commands;
pub mod file_route;
pub mod id;
pub mod message_session;
pub mod partition;
pub mod partition_watcher;
pub mod pathext;
pub mod project;
pub mod rbx;
pub mod rbx_session;
pub mod session;
pub mod vfs_session;
pub mod web;
pub mod web_util;

View File

@@ -0,0 +1,64 @@
use std::collections::HashMap;
use std::sync::{mpsc, Arc, RwLock, Mutex};
use id::{Id, get_id};
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "type")]
pub enum Message {
InstanceChanged {
id: Id,
},
}
#[derive(Clone)]
pub struct MessageSession {
pub messages: Arc<RwLock<Vec<Message>>>,
pub message_listeners: Arc<Mutex<HashMap<Id, mpsc::Sender<()>>>>,
}
impl MessageSession {
pub fn new() -> MessageSession {
MessageSession {
messages: Arc::new(RwLock::new(Vec::new())),
message_listeners: Arc::new(Mutex::new(HashMap::new())),
}
}
pub fn push_messages(&self, new_messages: &[Message]) {
let message_listeners = self.message_listeners.lock().unwrap();
{
let mut messages = self.messages.write().unwrap();
messages.extend_from_slice(new_messages);
}
{
for listener in message_listeners.values() {
listener.send(()).unwrap();
}
}
}
pub fn subscribe(&self, sender: mpsc::Sender<()>) -> Id {
let id = get_id();
{
let mut message_listeners = self.message_listeners.lock().unwrap();
message_listeners.insert(id, sender);
}
id
}
pub fn unsubscribe(&self, id: Id) {
{
let mut message_listeners = self.message_listeners.lock().unwrap();
message_listeners.remove(&id);
}
}
pub fn get_message_cursor(&self) -> i32 {
self.messages.read().unwrap().len() as i32 - 1
}
}

13
server/src/partition.rs Normal file
View File

@@ -0,0 +1,13 @@
use std::path::PathBuf;
#[derive(Debug, Clone, PartialEq)]
pub struct Partition {
/// The unique name of this partition, used for debugging.
pub name: String,
/// The path on the filesystem that this partition maps to.
pub path: PathBuf,
/// The route to the Roblox instance that this partition maps to.
pub target: Vec<String>,
}

View File

@@ -0,0 +1,65 @@
use std::sync::mpsc::{channel, Sender};
use std::time::Duration;
use std::thread;
use notify::{DebouncedEvent, RecommendedWatcher, RecursiveMode, Watcher, watcher};
use partition::Partition;
use vfs_session::FileChange;
use file_route::FileRoute;
pub struct PartitionWatcher {
pub watcher: RecommendedWatcher,
}
impl PartitionWatcher {
pub fn start_new(partition: Partition, tx: Sender<FileChange>) -> PartitionWatcher {
let (watch_tx, watch_rx) = channel();
let mut watcher = watcher(watch_tx, Duration::from_millis(100)).unwrap();
watcher.watch(&partition.path, RecursiveMode::Recursive).unwrap();
thread::spawn(move || {
loop {
match watch_rx.recv() {
Ok(event) => {
let file_change = match event {
DebouncedEvent::Create(path) => {
let route = FileRoute::from_path(&path, &partition).unwrap();
FileChange::Created(route)
},
DebouncedEvent::Write(path) => {
let route = FileRoute::from_path(&path, &partition).unwrap();
FileChange::Updated(route)
},
DebouncedEvent::Remove(path) => {
let route = FileRoute::from_path(&path, &partition).unwrap();
FileChange::Deleted(route)
},
DebouncedEvent::Rename(from_path, to_path) => {
let from_route = FileRoute::from_path(&from_path, &partition).unwrap();
let to_route = FileRoute::from_path(&to_path, &partition).unwrap();
FileChange::Moved(from_route, to_route)
},
_ => continue,
};
match tx.send(file_change) {
Ok(_) => {},
Err(_) => break,
}
},
Err(_) => break,
};
}
});
PartitionWatcher {
watcher,
}
}
pub fn stop(self) {
}
}

View File

@@ -1,60 +0,0 @@
use rbx::RbxInstance;
use vfs::VfsItem;
use core::Route;
pub enum TransformFileResult {
Value(Option<RbxInstance>),
Pass,
// TODO: Error case
}
pub enum FileChangeResult {
MarkChanged(Option<Vec<Route>>),
Pass,
}
pub trait Plugin {
/// Invoked when a file is read from the filesystem and needs to be turned
/// into a Roblox instance.
fn transform_file(&self, plugins: &PluginChain, vfs_item: &VfsItem) -> TransformFileResult;
/// Invoked when a file changes on the filesystem. The result defines what
/// routes are marked as needing to be refreshed.
fn handle_file_change(&self, route: &Route) -> FileChangeResult;
}
/// A set of plugins that are composed in order.
pub struct PluginChain {
plugins: Vec<Box<Plugin + Send + Sync>>,
}
impl PluginChain {
pub fn new(plugins: Vec<Box<Plugin + Send + Sync>>) -> PluginChain {
PluginChain {
plugins,
}
}
pub fn transform_file(&self, vfs_item: &VfsItem) -> Option<RbxInstance> {
for plugin in &self.plugins {
match plugin.transform_file(self, vfs_item) {
TransformFileResult::Value(rbx_item) => return rbx_item,
TransformFileResult::Pass => {},
}
}
None
}
pub fn handle_file_change(&self, route: &Route) -> Option<Vec<Route>> {
for plugin in &self.plugins {
match plugin.handle_file_change(route) {
FileChangeResult::MarkChanged(changes) => return changes,
FileChangeResult::Pass => {},
}
}
None
}
}

View File

@@ -1,63 +0,0 @@
use std::collections::HashMap;
use core::Route;
use plugin::{Plugin, PluginChain, TransformFileResult, FileChangeResult};
use rbx::{RbxInstance, RbxValue};
use vfs::VfsItem;
/// A plugin with simple transforms:
/// * Directories become Folder instances
/// * Files become StringValue objects with 'Value' as their contents
pub struct DefaultPlugin;
impl DefaultPlugin {
pub fn new() -> DefaultPlugin {
DefaultPlugin
}
}
impl Plugin for DefaultPlugin {
fn transform_file(&self, plugins: &PluginChain, vfs_item: &VfsItem) -> TransformFileResult {
match vfs_item {
&VfsItem::File { ref contents, .. } => {
let mut properties = HashMap::new();
properties.insert("Value".to_string(), RbxValue::String {
value: contents.clone(),
});
TransformFileResult::Value(Some(RbxInstance {
name: vfs_item.name().clone(),
class_name: "StringValue".to_string(),
children: Vec::new(),
properties,
route: Some(vfs_item.route().to_vec()),
}))
},
&VfsItem::Dir { ref children, .. } => {
let mut rbx_children = Vec::new();
for (_, child_item) in children {
match plugins.transform_file(child_item) {
Some(rbx_item) => {
rbx_children.push(rbx_item);
},
_ => {},
}
}
TransformFileResult::Value(Some(RbxInstance {
name: vfs_item.name().clone(),
class_name: "*".to_string(),
children: rbx_children,
properties: HashMap::new(),
route: Some(vfs_item.route().to_vec()),
}))
},
}
}
fn handle_file_change(&self, route: &Route) -> FileChangeResult {
FileChangeResult::MarkChanged(Some(vec![route.clone()]))
}
}

View File

@@ -1,99 +0,0 @@
use regex::Regex;
use serde_json;
use core::Route;
use plugin::{Plugin, PluginChain, TransformFileResult, FileChangeResult};
use rbx::RbxInstance;
use vfs::VfsItem;
lazy_static! {
static ref JSON_MODEL_PATTERN: Regex = Regex::new(r"^(.*?)\.model\.json$").unwrap();
}
static JSON_MODEL_INIT: &'static str = "init.model.json";
pub struct JsonModelPlugin;
impl JsonModelPlugin {
pub fn new() -> JsonModelPlugin {
JsonModelPlugin
}
}
impl Plugin for JsonModelPlugin {
fn transform_file(&self, plugins: &PluginChain, vfs_item: &VfsItem) -> TransformFileResult {
match vfs_item {
&VfsItem::File { ref contents, .. } => {
let rbx_name = match JSON_MODEL_PATTERN.captures(vfs_item.name()) {
Some(captures) => captures.get(1).unwrap().as_str().to_string(),
None => return TransformFileResult::Pass,
};
let mut rbx_item: RbxInstance = match serde_json::from_str(contents) {
Ok(v) => v,
Err(e) => {
eprintln!("Unable to parse JSON Model File named {}: {}", vfs_item.name(), e);
return TransformFileResult::Pass; // This should be an error in the future!
},
};
rbx_item.route = Some(vfs_item.route().to_vec());
rbx_item.name = rbx_name;
TransformFileResult::Value(Some(rbx_item))
},
&VfsItem::Dir { ref children, .. } => {
let init_item = match children.get(JSON_MODEL_INIT) {
Some(v) => v,
None => return TransformFileResult::Pass,
};
let mut rbx_item = match self.transform_file(plugins, init_item) {
TransformFileResult::Value(Some(item)) => item,
TransformFileResult::Value(None) | TransformFileResult::Pass => {
eprintln!("Inconsistency detected in JsonModelPlugin!");
return TransformFileResult::Pass;
},
};
rbx_item.name.clear();
rbx_item.name.push_str(vfs_item.name());
rbx_item.route = Some(vfs_item.route().to_vec());
for (child_name, child_item) in children {
if child_name == init_item.name() {
continue;
}
match plugins.transform_file(child_item) {
Some(child_rbx_item) => {
rbx_item.children.push(child_rbx_item);
},
_ => {},
}
}
TransformFileResult::Value(Some(rbx_item))
},
}
}
fn handle_file_change(&self, route: &Route) -> FileChangeResult {
let leaf = match route.last() {
Some(v) => v,
None => return FileChangeResult::Pass,
};
let is_init = leaf == JSON_MODEL_INIT;
if is_init {
let mut changed = route.clone();
changed.pop();
FileChangeResult::MarkChanged(Some(vec![changed]))
} else {
FileChangeResult::Pass
}
}
}

View File

@@ -1,7 +0,0 @@
mod default_plugin;
mod script_plugin;
mod json_model_plugin;
pub use self::default_plugin::*;
pub use self::script_plugin::*;
pub use self::json_model_plugin::*;

View File

@@ -1,121 +0,0 @@
use std::collections::HashMap;
use regex::Regex;
use core::Route;
use plugin::{Plugin, PluginChain, TransformFileResult, FileChangeResult};
use rbx::{RbxInstance, RbxValue};
use vfs::VfsItem;
lazy_static! {
static ref SERVER_PATTERN: Regex = Regex::new(r"^(.*?)\.server\.lua$").unwrap();
static ref CLIENT_PATTERN: Regex = Regex::new(r"^(.*?)\.client\.lua$").unwrap();
static ref MODULE_PATTERN: Regex = Regex::new(r"^(.*?)\.lua$").unwrap();
}
static SERVER_INIT: &'static str = "init.server.lua";
static CLIENT_INIT: &'static str = "init.client.lua";
static MODULE_INIT: &'static str = "init.lua";
pub struct ScriptPlugin;
impl ScriptPlugin {
pub fn new() -> ScriptPlugin {
ScriptPlugin
}
}
impl Plugin for ScriptPlugin {
fn transform_file(&self, plugins: &PluginChain, vfs_item: &VfsItem) -> TransformFileResult {
match vfs_item {
&VfsItem::File { ref contents, .. } => {
let name = vfs_item.name();
let (class_name, rbx_name) = {
if let Some(captures) = SERVER_PATTERN.captures(name) {
("Script".to_string(), captures.get(1).unwrap().as_str().to_string())
} else if let Some(captures) = CLIENT_PATTERN.captures(name) {
("LocalScript".to_string(), captures.get(1).unwrap().as_str().to_string())
} else if let Some(captures) = MODULE_PATTERN.captures(name) {
("ModuleScript".to_string(), captures.get(1).unwrap().as_str().to_string())
} else {
return TransformFileResult::Pass;
}
};
let mut properties = HashMap::new();
properties.insert("Source".to_string(), RbxValue::String {
value: contents.clone(),
});
TransformFileResult::Value(Some(RbxInstance {
name: rbx_name,
class_name: class_name,
children: Vec::new(),
properties,
route: Some(vfs_item.route().to_vec()),
}))
},
&VfsItem::Dir { ref children, .. } => {
let init_item = {
let maybe_item = children.get(SERVER_INIT)
.or(children.get(CLIENT_INIT))
.or(children.get(MODULE_INIT));
match maybe_item {
Some(v) => v,
None => return TransformFileResult::Pass,
}
};
let mut rbx_item = match self.transform_file(plugins, init_item) {
TransformFileResult::Value(Some(item)) => item,
_ => {
eprintln!("Inconsistency detected in ScriptPlugin!");
return TransformFileResult::Pass;
},
};
rbx_item.name.clear();
rbx_item.name.push_str(vfs_item.name());
rbx_item.route = Some(vfs_item.route().to_vec());
for (child_name, child_item) in children {
if child_name == init_item.name() {
continue;
}
match plugins.transform_file(child_item) {
Some(child_rbx_item) => {
rbx_item.children.push(child_rbx_item);
},
_ => {},
}
}
TransformFileResult::Value(Some(rbx_item))
},
}
}
fn handle_file_change(&self, route: &Route) -> FileChangeResult {
let leaf = match route.last() {
Some(v) => v,
None => return FileChangeResult::Pass,
};
let is_init = leaf == SERVER_INIT
|| leaf == CLIENT_INIT
|| leaf == MODULE_INIT;
if is_init {
let mut changed = route.clone();
changed.pop();
FileChangeResult::MarkChanged(Some(vec![changed]))
} else {
FileChangeResult::Pass
}
}
}

View File

@@ -2,18 +2,39 @@ use std::collections::HashMap;
use std::fmt;
use std::fs::{self, File};
use std::io::{Read, Write};
use std::path::Path;
use std::path::{Path, PathBuf};
use rand::{self, Rng};
use serde_json;
use partition::Partition;
pub static PROJECT_FILENAME: &'static str = "rojo.json";
#[derive(Debug)]
pub enum ProjectLoadError {
DidNotExist,
FailedToOpen,
FailedToRead,
InvalidJson(serde_json::Error),
DidNotExist(PathBuf),
FailedToOpen(PathBuf),
FailedToRead(PathBuf),
InvalidJson(PathBuf, serde_json::Error),
}
impl fmt::Display for ProjectLoadError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&ProjectLoadError::InvalidJson(ref project_path, ref serde_err) => {
write!(f, "Found invalid JSON reading project: {}\nError: {}", project_path.display(), serde_err)
},
&ProjectLoadError::FailedToOpen(ref project_path) |
&ProjectLoadError::FailedToRead(ref project_path) => {
write!(f, "Found project file, but failed to read it: {}", project_path.display())
},
&ProjectLoadError::DidNotExist(ref project_path) => {
write!(f, "Could not locate a project file at {}.\nUse 'rojo init' to create one.", project_path.display())
},
}
}
}
#[derive(Debug)]
@@ -34,16 +55,17 @@ impl fmt::Display for ProjectInitError {
&ProjectInitError::AlreadyExists => {
write!(f, "A project already exists at that location.")
},
&ProjectInitError::FailedToCreate | &ProjectInitError::FailedToWrite => {
&ProjectInitError::FailedToCreate |
&ProjectInitError::FailedToWrite => {
write!(f, "Failed to write to the given location.")
},
}
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ProjectPartition {
pub struct SourceProjectPartition {
/// A slash-separated path to a file or folder, relative to the project's
/// directory.
pub path: String,
@@ -52,43 +74,114 @@ pub struct ProjectPartition {
pub target: String,
}
/// Represents a project configured by a user for use with Rojo. Holds anything
/// that can be configured with `rojo.json`.
///
/// In the future, this object will hold dependency information and other handy
/// configurables
#[derive(Clone, Debug, Serialize, Deserialize)]
/// Represents a Rojo project in the format that's most convenient for users to
/// edit. This should generally line up with `Project`, but can diverge when
/// there's either compatibility shims or when the data structures that Rojo
/// want are too verbose to write in JSON but easy to convert from something
/// else.
//
/// Holds anything that can be configured with `rojo.json`.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default, rename_all = "camelCase")]
pub struct Project {
pub struct SourceProject {
pub name: String,
pub serve_port: u64,
pub partitions: HashMap<String, ProjectPartition>,
pub partitions: HashMap<String, SourceProjectPartition>,
}
impl Default for SourceProject {
fn default() -> SourceProject {
SourceProject {
name: "new-project".to_string(),
serve_port: 8000,
partitions: HashMap::new(),
}
}
}
/// Represents a Rojo project in the format that's convenient for Rojo to work
/// with.
#[derive(Debug, Clone)]
pub struct Project {
/// The path to the project file that this project is associated with.
pub project_path: PathBuf,
/// The name of this project, used for user-facing labels.
pub name: String,
/// The port that this project will run a web server on.
pub serve_port: u64,
/// All of the project's partitions, laid out in an expanded way.
pub partitions: HashMap<String, Partition>,
}
impl Project {
/// Creates a new empty Project object with the given name.
pub fn new<T: Into<String>>(name: T) -> Project {
fn from_source_project(source_project: SourceProject, project_path: PathBuf) -> Project {
let mut partitions = HashMap::new();
{
let project_directory = project_path.parent().unwrap();
for (partition_name, partition) in source_project.partitions.into_iter() {
let path = project_directory.join(&partition.path);
let target = partition.target
.split(".")
.map(String::from)
.collect::<Vec<_>>();
partitions.insert(partition_name.clone(), Partition {
path,
target,
name: partition_name,
});
}
}
Project {
name: name.into(),
..Default::default()
project_path,
name: source_project.name,
serve_port: source_project.serve_port,
partitions,
}
}
fn as_source_project(&self) -> SourceProject {
let mut partitions = HashMap::new();
for partition in self.partitions.values() {
let path = partition.path.strip_prefix(&self.project_path)
.unwrap_or_else(|_| &partition.path)
.to_str()
.unwrap()
.to_string();
let target = partition.target.join(".");
partitions.insert(partition.name.clone(), SourceProjectPartition {
path,
target,
});
}
SourceProject {
partitions,
name: self.name.clone(),
serve_port: self.serve_port,
}
}
/// Initializes a new project inside the given folder path.
pub fn init<T: AsRef<Path>>(location: T) -> Result<Project, ProjectInitError> {
let location = location.as_ref();
let package_path = location.join(PROJECT_FILENAME);
let project_path = location.join(PROJECT_FILENAME);
// We abort if the project file already exists.
match fs::metadata(&package_path) {
Ok(_) => return Err(ProjectInitError::AlreadyExists),
Err(_) => {},
}
fs::metadata(&project_path)
.map_err(|_| ProjectInitError::AlreadyExists)?;
let mut file = match File::create(&package_path) {
Ok(f) => f,
Err(_) => return Err(ProjectInitError::FailedToCreate),
};
let mut file = File::create(&project_path)
.map_err(|_| ProjectInitError::FailedToCreate)?;
// Try to give the project a meaningful name.
// If we can't, we'll just fall back to a default.
@@ -97,69 +190,57 @@ impl Project {
None => "new-project".to_string(),
};
// Generate a random port to run the server on.
let serve_port = rand::thread_rng().gen_range(2000, 49151);
// Configure the project with all of the values we know so far.
let project = Project::new(name);
let serialized = serde_json::to_string_pretty(&project).unwrap();
let source_project = SourceProject {
name,
serve_port,
partitions: HashMap::new(),
};
let serialized = serde_json::to_string_pretty(&source_project).unwrap();
match file.write(serialized.as_bytes()) {
Ok(_) => {},
Err(_) => return Err(ProjectInitError::FailedToWrite),
}
file.write(serialized.as_bytes())
.map_err(|_| ProjectInitError::FailedToWrite)?;
Ok(project)
Ok(Project::from_source_project(source_project, project_path))
}
/// Attempts to load a project from the file named PROJECT_FILENAME from the
/// given folder.
pub fn load<T: AsRef<Path>>(location: T) -> Result<Project, ProjectLoadError> {
let package_path = location.as_ref().join(Path::new(PROJECT_FILENAME));
let project_path = location.as_ref().join(Path::new(PROJECT_FILENAME));
match fs::metadata(&package_path) {
Ok(_) => {},
Err(_) => return Err(ProjectLoadError::DidNotExist),
}
fs::metadata(&project_path)
.map_err(|_| ProjectLoadError::DidNotExist(project_path.clone()))?;
let mut file = match File::open(&package_path) {
Ok(f) => f,
Err(_) => return Err(ProjectLoadError::FailedToOpen),
};
let mut file = File::open(&project_path)
.map_err(|_| ProjectLoadError::FailedToOpen(project_path.clone()))?;
let mut contents = String::new();
match file.read_to_string(&mut contents) {
Ok(_) => {},
Err(_) => return Err(ProjectLoadError::FailedToRead),
}
file.read_to_string(&mut contents)
.map_err(|_| ProjectLoadError::FailedToRead(project_path.clone()))?;
match serde_json::from_str(&contents) {
Ok(v) => Ok(v),
Err(e) => return Err(ProjectLoadError::InvalidJson(e)),
}
let source_project = serde_json::from_str(&contents)
.map_err(|e| ProjectLoadError::InvalidJson(project_path.clone(), e))?;
Ok(Project::from_source_project(source_project, project_path))
}
/// Saves the given project file to the given folder with the appropriate name.
pub fn save<T: AsRef<Path>>(&self, location: T) -> Result<(), ProjectSaveError> {
let package_path = location.as_ref().join(Path::new(PROJECT_FILENAME));
let project_path = location.as_ref().join(Path::new(PROJECT_FILENAME));
let mut file = match File::create(&package_path) {
Ok(f) => f,
Err(_) => return Err(ProjectSaveError::FailedToCreate),
};
let mut file = File::create(&project_path)
.map_err(|_| ProjectSaveError::FailedToCreate)?;
let serialized = serde_json::to_string_pretty(self).unwrap();
let source_project = self.as_source_project();
let serialized = serde_json::to_string_pretty(&source_project).unwrap();
file.write(serialized.as_bytes()).unwrap();
Ok(())
}
}
impl Default for Project {
fn default() -> Project {
Project {
name: "new-project".to_string(),
serve_port: 8000,
partitions: HashMap::new(),
}
}
}

View File

@@ -1,38 +1,116 @@
use std::borrow::Cow;
use std::collections::HashMap;
/// Represents data about a Roblox instance
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "PascalCase")]
use id::Id;
// TODO: Switch to enum to represent more value types
pub type RbxValue = String;
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RbxInstance {
/// Maps to the `Name` property on Instance.
pub name: String,
/// Maps to the `ClassName` property on Instance.
pub class_name: String,
#[serde(default = "Vec::new")]
pub children: Vec<RbxInstance>,
#[serde(default = "HashMap::new")]
/// Contains all other properties of an Instance.
pub properties: HashMap<String, RbxValue>,
/// The route that this instance was generated from, if there was one.
pub route: Option<Vec<String>>,
/// All of the children of this instance. Order is relevant to preserve!
pub children: Vec<Id>,
pub parent: Option<Id>,
}
/// Any kind value that can be used by Roblox
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "PascalCase", tag = "Type")]
pub enum RbxValue {
#[serde(rename_all = "PascalCase")]
String {
value: String,
},
#[serde(rename_all = "PascalCase")]
Bool {
value: bool,
},
#[serde(rename_all = "PascalCase")]
Number {
value: f64,
},
// TODO: Compound types like Vector3
// This seems like a really bad idea?
// Why isn't there a blanket impl for this for all T?
impl<'a> From<&'a RbxInstance> for Cow<'a, RbxInstance> {
fn from(instance: &'a RbxInstance) -> Cow<'a, RbxInstance> {
Cow::Borrowed(instance)
}
}
pub struct RbxTree {
instances: HashMap<Id, RbxInstance>,
}
impl RbxTree {
pub fn new() -> RbxTree {
RbxTree {
instances: HashMap::new(),
}
}
pub fn get_all_instances(&self) -> &HashMap<Id, RbxInstance> {
&self.instances
}
pub fn insert_instance(&mut self, id: Id, instance: RbxInstance) {
if let Some(parent_id) = instance.parent {
if let Some(mut parent) = self.instances.get_mut(&parent_id) {
if !parent.children.contains(&id) {
parent.children.push(id);
}
}
}
self.instances.insert(id, instance);
}
pub fn delete_instance(&mut self, id: Id) -> Vec<Id> {
let mut ids_to_visit = vec![id];
let mut ids_deleted = Vec::new();
for instance in self.instances.values_mut() {
match instance.children.iter().position(|&v| v == id) {
Some(index) => {
instance.children.remove(index);
},
None => {},
}
}
loop {
let id = match ids_to_visit.pop() {
Some(id) => id,
None => break,
};
match self.instances.get(&id) {
Some(instance) => ids_to_visit.extend_from_slice(&instance.children),
None => continue,
}
self.instances.remove(&id);
ids_deleted.push(id);
}
ids_deleted
}
pub fn get_instance<'a, 'b, T>(&'a self, id: Id, output: &'b mut HashMap<Id, T>)
where T: From<&'a RbxInstance>
{
let mut ids_to_visit = vec![id];
loop {
let id = match ids_to_visit.pop() {
Some(id) => id,
None => break,
};
match self.instances.get(&id) {
Some(instance) => {
output.insert(id, instance.into());
for child_id in &instance.children {
ids_to_visit.push(*child_id);
}
},
None => continue,
}
}
}
}

231
server/src/rbx_session.rs Normal file
View File

@@ -0,0 +1,231 @@
use std::collections::HashMap;
use std::sync::{Arc, RwLock};
use file_route::FileRoute;
use id::{Id, get_id};
use message_session::{Message, MessageSession};
use partition::Partition;
use project::Project;
use rbx::{RbxInstance, RbxTree};
use vfs_session::{VfsSession, FileItem, FileChange};
// TODO: Rethink data structure and insertion/update behavior. Maybe break some
// pieces off into a new object?
fn file_to_instances(
file_item: &FileItem,
partition: &Partition,
tree: &mut RbxTree,
instances_by_route: &mut HashMap<FileRoute, Id>,
parent_id: Option<Id>,
) -> (Id, Vec<Id>) {
match file_item {
FileItem::File { contents, route } => {
let primary_id = match instances_by_route.get(&file_item.get_route()) {
Some(&id) => id,
None => {
let id = get_id();
instances_by_route.insert(route.clone(), id);
id
},
};
// This is placeholder logic; this whole function is!
let (class_name, property_key, name) = {
// TODO: Root instances have an empty route
let file_name = route.route.last().unwrap();
fn strip_suffix<'a>(source: &'a str, suffix: &'static str) -> &'a str {
&source[..source.len() - suffix.len()]
}
if file_name.ends_with(".client.lua") {
("LocalScript", "Source", strip_suffix(&file_name, ".client.lua"))
} else if file_name.ends_with(".server.lua") {
("Script", "Source", strip_suffix(&file_name, ".server.lua"))
} else if file_name.ends_with(".lua") {
("ModuleScript", "Source", strip_suffix(&file_name, ".lua"))
} else {
// TODO: Error/warn/skip instead of falling back
("StringValue", "Value", file_name.as_str())
}
};
let mut properties = HashMap::new();
properties.insert(property_key.to_string(), contents.clone());
tree.insert_instance(primary_id, RbxInstance {
name: name.to_string(),
class_name: class_name.to_string(),
properties,
children: Vec::new(),
parent: parent_id,
});
(primary_id, vec![primary_id])
},
FileItem::Directory { children, route } => {
let primary_id = match instances_by_route.get(&file_item.get_route()) {
Some(&id) => id,
None => {
let id = get_id();
instances_by_route.insert(route.clone(), id);
id
},
};
let mut child_ids = Vec::new();
let mut changed_ids = vec![primary_id];
for child_file_item in children.values() {
let (child_id, mut child_changed_ids) = file_to_instances(child_file_item, partition, tree, instances_by_route, Some(primary_id));
child_ids.push(child_id);
changed_ids.push(child_id);
// TODO: Should I stop using drain on Vecs of Copyable types?
for id in child_changed_ids.drain(..) {
changed_ids.push(id);
}
}
tree.insert_instance(primary_id, RbxInstance {
name: route.name(partition).to_string(),
class_name: "Folder".to_string(),
properties: HashMap::new(),
children: child_ids,
parent: parent_id,
});
(primary_id, changed_ids)
},
}
}
pub struct RbxSession {
project: Project,
vfs_session: Arc<RwLock<VfsSession>>,
message_session: MessageSession,
/// The RbxInstance that represents each partition.
// TODO: Can this be removed in favor of instances_by_route?
pub partition_instances: HashMap<String, Id>,
/// Keeps track of all of the instances in the tree
pub tree: RbxTree,
/// A map from files in the VFS to instances loaded in the session.
instances_by_route: HashMap<FileRoute, Id>,
}
impl RbxSession {
pub fn new(project: Project, vfs_session: Arc<RwLock<VfsSession>>, message_session: MessageSession) -> RbxSession {
RbxSession {
project,
vfs_session,
message_session,
partition_instances: HashMap::new(),
tree: RbxTree::new(),
instances_by_route: HashMap::new(),
}
}
pub fn read_partitions(&mut self) {
let vfs_session_arc = self.vfs_session.clone();
let vfs_session = vfs_session_arc.read().unwrap();
for partition in self.project.partitions.values() {
let route = FileRoute {
partition: partition.name.clone(),
route: Vec::new(),
};
let file_item = vfs_session.get_by_route(&route).unwrap();
let parent_id = match route.parent() {
Some(parent_route) => match self.instances_by_route.get(&parent_route) {
Some(&parent_id) => Some(parent_id),
None => None,
},
None => None,
};
let (root_id, _) = file_to_instances(file_item, partition, &mut self.tree, &mut self.instances_by_route, parent_id);
self.partition_instances.insert(partition.name.clone(), root_id);
}
}
pub fn handle_change(&mut self, change: &FileChange) {
let vfs_session_arc = self.vfs_session.clone();
let vfs_session = vfs_session_arc.read().unwrap();
match change {
FileChange::Created(route) | FileChange::Updated(route) => {
let file_item = vfs_session.get_by_route(route).unwrap();
let partition = self.project.partitions.get(&route.partition).unwrap();
let parent_id = match route.parent() {
Some(parent_route) => match self.instances_by_route.get(&parent_route) {
Some(&parent_id) => Some(parent_id),
None => None,
},
None => None,
};
let (_, changed_ids) = file_to_instances(file_item, partition, &mut self.tree, &mut self.instances_by_route, parent_id);
let messages = changed_ids
.iter()
.map(|&id| Message::InstanceChanged { id })
.collect::<Vec<_>>();
self.message_session.push_messages(&messages);
},
FileChange::Deleted(route) => {
match self.instances_by_route.get(route) {
Some(&id) => {
self.tree.delete_instance(id);
self.instances_by_route.remove(route);
self.message_session.push_messages(&[Message::InstanceChanged { id }]);
},
None => (),
}
},
FileChange::Moved(from_route, to_route) => {
let mut messages = Vec::new();
match self.instances_by_route.get(from_route) {
Some(&id) => {
self.tree.delete_instance(id);
self.instances_by_route.remove(from_route);
messages.push(Message::InstanceChanged { id });
},
None => (),
}
let file_item = vfs_session.get_by_route(to_route).unwrap();
let partition = self.project.partitions.get(&to_route.partition).unwrap();
let parent_id = match to_route.parent() {
Some(parent_route) => match self.instances_by_route.get(&parent_route) {
Some(&parent_id) => Some(parent_id),
None => None,
},
None => None,
};
let (_, changed_ids) = file_to_instances(file_item, partition, &mut self.tree, &mut self.instances_by_route, parent_id);
for id in changed_ids {
messages.push(Message::InstanceChanged { id });
}
self.message_session.push_messages(&messages);
},
}
}
}

95
server/src/session.rs Normal file
View File

@@ -0,0 +1,95 @@
use std::sync::{mpsc, Arc, RwLock};
use std::thread;
use message_session::MessageSession;
use partition_watcher::PartitionWatcher;
use project::Project;
use rbx_session::RbxSession;
use vfs_session::VfsSession;
/// Stub trait for middleware
trait Middleware {
}
pub struct Session {
pub project: Project,
vfs_session: Arc<RwLock<VfsSession>>,
rbx_session: Arc<RwLock<RbxSession>>,
message_session: MessageSession,
watchers: Vec<PartitionWatcher>,
}
impl Session {
pub fn new(project: Project) -> Session {
let message_session = MessageSession::new();
let vfs_session = Arc::new(RwLock::new(VfsSession::new(project.clone())));
let rbx_session = Arc::new(RwLock::new(RbxSession::new(project.clone(), vfs_session.clone(), message_session.clone())));
Session {
vfs_session,
rbx_session,
watchers: Vec::new(),
message_session,
project,
}
}
pub fn start(&mut self) {
{
let mut vfs_session = self.vfs_session.write().unwrap();
vfs_session.read_partitions();
}
{
let mut rbx_session = self.rbx_session.write().unwrap();
rbx_session.read_partitions();
}
let (tx, rx) = mpsc::channel();
for partition in self.project.partitions.values() {
let watcher = PartitionWatcher::start_new(partition.clone(), tx.clone());
self.watchers.push(watcher);
}
{
let vfs_session = self.vfs_session.clone();
let rbx_session = self.rbx_session.clone();
thread::spawn(move || {
loop {
match rx.recv() {
Ok(change) => {
{
let mut vfs_session = vfs_session.write().unwrap();
vfs_session.handle_change(&change);
}
{
let mut rbx_session = rbx_session.write().unwrap();
rbx_session.handle_change(&change);
}
},
Err(_) => break,
}
}
});
}
}
pub fn stop(self) {
}
pub fn get_vfs_session(&self) -> Arc<RwLock<VfsSession>> {
self.vfs_session.clone()
}
pub fn get_rbx_session(&self) -> Arc<RwLock<RbxSession>> {
self.rbx_session.clone()
}
pub fn get_message_session(&self) -> MessageSession {
self.message_session.clone()
}
}

View File

@@ -1,7 +0,0 @@
mod vfs_session;
mod vfs_item;
mod vfs_watcher;
pub use self::vfs_session::*;
pub use self::vfs_item::*;
pub use self::vfs_watcher::*;

View File

@@ -1,36 +0,0 @@
use std::collections::HashMap;
/// A VfsItem represents either a file or directory as it came from the filesystem.
///
/// The interface here is intentionally simplified to make it easier to traverse
/// files that have been read into memory.
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase", tag = "type")]
pub enum VfsItem {
File {
route: Vec<String>,
file_name: String,
contents: String,
},
Dir {
route: Vec<String>,
file_name: String,
children: HashMap<String, VfsItem>,
},
}
impl VfsItem {
pub fn name(&self) -> &String {
match self {
&VfsItem::File { ref file_name , .. } => file_name,
&VfsItem::Dir { ref file_name , .. } => file_name,
}
}
pub fn route(&self) -> &[String] {
match self {
&VfsItem::File { ref route, .. } => route,
&VfsItem::Dir { ref route, .. } => route,
}
}
}

View File

@@ -1,220 +0,0 @@
use std::collections::HashMap;
use std::fs::{self, File};
use std::io::Read;
use std::path::{Path, PathBuf};
use std::time::Instant;
use plugin::PluginChain;
use vfs::VfsItem;
/// Represents a virtual layer over multiple parts of the filesystem.
///
/// Paths in this system are represented as slices of strings, and are always
/// relative to a partition, which is an absolute path into the real filesystem.
pub struct VfsSession {
/// Contains all of the partitions mounted by the Vfs.
///
/// These must be absolute paths!
partitions: HashMap<String, PathBuf>,
/// A chronologically-sorted list of routes that changed since the Vfs was
/// created, along with a timestamp denoting when.
change_history: Vec<VfsChange>,
/// When the Vfs was initialized; used for change tracking.
start_time: Instant,
plugin_chain: &'static PluginChain,
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct VfsChange {
timestamp: f64,
route: Vec<String>,
}
impl VfsSession {
pub fn new(plugin_chain: &'static PluginChain) -> VfsSession {
VfsSession {
partitions: HashMap::new(),
start_time: Instant::now(),
change_history: Vec::new(),
plugin_chain,
}
}
pub fn get_partitions(&self) -> &HashMap<String, PathBuf> {
&self.partitions
}
pub fn insert_partition<P: Into<PathBuf>>(&mut self, name: &str, path: P) {
let path = path.into();
assert!(path.is_absolute());
self.partitions.insert(name.to_string(), path.into());
}
fn route_to_path(&self, route: &[String]) -> Option<PathBuf> {
let (partition_name, rest) = match route.split_first() {
Some((first, rest)) => (first, rest),
None => return None,
};
let partition = match self.partitions.get(partition_name) {
Some(v) => v,
None => return None,
};
// It's possible that the partition points to a file if `rest` is empty.
// Joining "" onto a path will put a trailing slash on, which causes
// file reads to fail.
let full_path = if rest.is_empty() {
partition.clone()
} else {
let joined = rest.join("/");
let relative = Path::new(&joined);
partition.join(relative)
};
Some(full_path)
}
fn read_dir<P: AsRef<Path>>(&self, route: &[String], path: P) -> Result<VfsItem, ()> {
let path = path.as_ref();
let reader = match fs::read_dir(path) {
Ok(v) => v,
Err(_) => return Err(()),
};
let mut children = HashMap::new();
for entry in reader {
let entry = match entry {
Ok(v) => v,
Err(_) => return Err(()),
};
let path = entry.path();
let name = path.file_name().unwrap().to_string_lossy().into_owned();
let mut child_route = route.iter().cloned().collect::<Vec<_>>();
child_route.push(name.clone());
match self.read_path(&child_route, &path) {
Ok(child_item) => {
children.insert(name, child_item);
},
Err(_) => {},
}
}
let file_name = path.file_name().unwrap().to_string_lossy().into_owned();
Ok(VfsItem::Dir {
route: route.iter().cloned().collect::<Vec<_>>(),
file_name,
children,
})
}
fn read_file<P: AsRef<Path>>(&self, route: &[String], path: P) -> Result<VfsItem, ()> {
let path = path.as_ref();
let mut file = match File::open(path) {
Ok(v) => v,
Err(_) => return Err(()),
};
let mut contents = String::new();
match file.read_to_string(&mut contents) {
Ok(_) => {},
Err(_) => return Err(()),
}
let file_name = path.file_name().unwrap().to_string_lossy().into_owned();
Ok(VfsItem::File {
route: route.iter().cloned().collect::<Vec<_>>(),
file_name,
contents,
})
}
fn read_path<P: AsRef<Path>>(&self, route: &[String], path: P) -> Result<VfsItem, ()> {
let path = path.as_ref();
let metadata = match fs::metadata(path) {
Ok(v) => v,
Err(_) => return Err(()),
};
if metadata.is_dir() {
self.read_dir(route, path)
} else if metadata.is_file() {
self.read_file(route, path)
} else {
Err(())
}
}
/// Get the current time, used for logging timestamps for file changes.
pub fn current_time(&self) -> f64 {
let elapsed = self.start_time.elapsed();
elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 * 1e-9
}
/// Register a new change to the filesystem at the given timestamp and VFS
/// route.
pub fn add_change(&mut self, timestamp: f64, route: Vec<String>) {
match self.plugin_chain.handle_file_change(&route) {
Some(routes) => {
for route in routes {
self.change_history.push(VfsChange {
timestamp,
route,
});
}
},
None => {}
}
}
/// Collect a list of changes that occured since the given timestamp.
pub fn changes_since(&self, timestamp: f64) -> &[VfsChange] {
let mut marker: Option<usize> = None;
for (index, value) in self.change_history.iter().enumerate().rev() {
if value.timestamp >= timestamp {
marker = Some(index);
} else {
break;
}
}
if let Some(index) = marker {
&self.change_history[index..]
} else {
&self.change_history[..0]
}
}
/// Read an item from the filesystem using the given VFS route.
pub fn read(&self, route: &[String]) -> Result<VfsItem, ()> {
match self.route_to_path(route) {
Some(path) => self.read_path(route, &path),
None => Err(()),
}
}
pub fn write(&self, _route: &[String], _item: VfsItem) -> Result<(), ()> {
unimplemented!()
}
pub fn delete(&self, _route: &[String]) -> Result<(), ()> {
unimplemented!()
}
}

View File

@@ -1,108 +0,0 @@
use std::path::PathBuf;
use std::sync::{mpsc, Arc, Mutex};
use std::thread;
use std::time::Duration;
use notify::{DebouncedEvent, RecommendedWatcher, RecursiveMode, Watcher};
use pathext::path_to_route;
use vfs::VfsSession;
/// An object that registers watchers on the real filesystem and relays those
/// changes to the virtual filesystem layer.
pub struct VfsWatcher {
vfs: Arc<Mutex<VfsSession>>,
}
impl VfsWatcher {
pub fn new(vfs: Arc<Mutex<VfsSession>>) -> VfsWatcher {
VfsWatcher {
vfs,
}
}
fn start_watcher(
vfs: Arc<Mutex<VfsSession>>,
rx: mpsc::Receiver<DebouncedEvent>,
partition_name: String,
root_path: PathBuf,
) {
loop {
let event = rx.recv().unwrap();
let mut vfs = vfs.lock().unwrap();
let current_time = vfs.current_time();
match event {
DebouncedEvent::Write(ref change_path) |
DebouncedEvent::Create(ref change_path) |
DebouncedEvent::Remove(ref change_path) => {
if let Some(mut route) = path_to_route(&root_path, change_path) {
route.insert(0, partition_name.clone());
vfs.add_change(current_time, route);
} else {
eprintln!("Failed to get route from {}", change_path.display());
}
},
DebouncedEvent::Rename(ref from_change, ref to_change) => {
if let Some(mut route) = path_to_route(&root_path, from_change) {
route.insert(0, partition_name.clone());
vfs.add_change(current_time, route);
} else {
eprintln!("Failed to get route from {}", from_change.display());
}
if let Some(mut route) = path_to_route(&root_path, to_change) {
route.insert(0, partition_name.clone());
vfs.add_change(current_time, route);
} else {
eprintln!("Failed to get route from {}", to_change.display());
}
},
_ => {},
}
}
}
pub fn start(self) {
let mut watchers = Vec::new();
// Create an extra scope so that `vfs` gets dropped and unlocked
{
let vfs = self.vfs.lock().unwrap();
for (ref partition_name, ref root_path) in vfs.get_partitions() {
let (tx, rx) = mpsc::channel();
let mut watcher: RecommendedWatcher = Watcher::new(tx, Duration::from_millis(200))
.expect("Unable to create watcher! This is a bug in Rojo.");
match watcher.watch(&root_path, RecursiveMode::Recursive) {
Ok(_) => (),
Err(_) => {
panic!("Unable to watch partition {}, with path {}! Make sure that it's a file or directory.", partition_name, root_path.display());
},
}
watchers.push(watcher);
{
let partition_name = partition_name.to_string();
let root_path = root_path.to_path_buf();
let vfs = self.vfs.clone();
thread::spawn(move || {
Self::start_watcher(vfs, rx, partition_name, root_path);
});
}
}
}
loop {
thread::park();
}
}
}

242
server/src/vfs_session.rs Normal file
View File

@@ -0,0 +1,242 @@
use std::collections::HashMap;
use std::io::Read;
use std::fs::{self, File};
use std::mem;
use file_route::FileRoute;
use project::Project;
/// Represents a file or directory that has been read from the filesystem.
#[derive(Debug, Clone)]
pub enum FileItem {
File {
contents: String,
route: FileRoute,
},
Directory {
children: HashMap<String, FileItem>,
route: FileRoute,
},
}
impl FileItem {
pub fn get_route(&self) -> &FileRoute {
match self {
FileItem::File { route, .. } => route,
FileItem::Directory { route, .. } => route,
}
}
}
#[derive(Debug, Clone)]
pub enum FileChange {
Created(FileRoute),
Deleted(FileRoute),
Updated(FileRoute),
Moved(FileRoute, FileRoute),
}
pub struct VfsSession {
pub project: Project,
/// The in-memory files associated with each partition.
pub partition_files: HashMap<String, FileItem>,
}
impl VfsSession {
pub fn new(project: Project) -> VfsSession {
VfsSession {
project,
partition_files: HashMap::new(),
}
}
pub fn read_partitions(&mut self) {
for partition_name in self.project.partitions.keys() {
let route = FileRoute {
partition: partition_name.clone(),
route: Vec::new(),
};
let file_item = self.read(&route).expect("Couldn't load partitions");
self.partition_files.insert(partition_name.clone(), file_item);
}
}
pub fn handle_change(&mut self, change: &FileChange) -> Option<()> {
match change {
FileChange::Created(route) | FileChange::Updated(route) => {
let new_item = self.read(&route).ok()?;
self.set_file_item(new_item);
},
FileChange::Deleted(route) => {
self.delete_route(&route);
},
FileChange::Moved(from_route, to_route) => {
let new_item = self.read(&to_route).ok()?;
self.delete_route(&from_route);
self.set_file_item(new_item);
},
}
None
}
pub fn get_by_route(&self, route: &FileRoute) -> Option<&FileItem> {
let partition = self.partition_files.get(&route.partition)?;
let mut current = partition;
for piece in &route.route {
match current {
FileItem::File { .. } => return None,
FileItem::Directory { children, .. } => {
current = children.get(piece)?;
},
}
}
Some(current)
}
pub fn get_by_route_mut(&mut self, route: &FileRoute) -> Option<&mut FileItem> {
let mut current = self.partition_files.get_mut(&route.partition)?;
for piece in &route.route {
let mut next = match { current } {
FileItem::File { .. } => return None,
FileItem::Directory { children, .. } => {
children.get_mut(piece)?
},
};
current = next;
}
Some(current)
}
pub fn set_file_item(&mut self, item: FileItem) {
match self.get_by_route_mut(item.get_route()) {
Some(existing) => {
mem::replace(existing, item);
return;
},
None => {},
}
if item.get_route().route.len() > 0 {
let mut parent_route = item.get_route().clone();
let child_name = parent_route.route.pop().unwrap();
let mut parent_children = HashMap::new();
parent_children.insert(child_name, item);
let parent_item = FileItem::Directory {
route: parent_route,
children: parent_children,
};
self.set_file_item(parent_item);
} else {
self.partition_files.insert(item.get_route().partition.clone(), item);
}
}
pub fn delete_route(&mut self, route: &FileRoute) -> Option<()> {
if route.route.len() == 0 {
self.partition_files.remove(&route.partition);
return Some(());
}
let mut current = self.partition_files.get_mut(&route.partition)?;
for i in 0..(route.route.len() - 1) {
let piece = &route.route[i];
let mut next = match { current } {
FileItem::File { .. } => return None,
FileItem::Directory { children, .. } => {
children.get_mut(piece)?
},
};
current = next;
}
match current {
FileItem::Directory { children, .. } => {
children.remove(route.route.last().unwrap().as_str());
},
_ => {},
}
Some(())
}
fn read(&self, route: &FileRoute) -> Result<FileItem, ()> {
let partition_path = &self.project.partitions.get(&route.partition)
.ok_or(())?.path;
let path = route.to_path_buf(partition_path);
let metadata = fs::metadata(path)
.map_err(|_| ())?;
if metadata.is_dir() {
self.read_directory(route)
} else if metadata.is_file() {
self.read_file(route)
} else {
Err(())
}
}
fn read_file(&self, route: &FileRoute) -> Result<FileItem, ()> {
let partition_path = &self.project.partitions.get(&route.partition)
.ok_or(())?.path;
let path = route.to_path_buf(partition_path);
let mut file = File::open(path)
.map_err(|_| ())?;
let mut contents = String::new();
file.read_to_string(&mut contents)
.map_err(|_| ())?;
Ok(FileItem::File {
contents,
route: route.clone(),
})
}
fn read_directory(&self, route: &FileRoute) -> Result<FileItem, ()> {
let partition_path = &self.project.partitions.get(&route.partition)
.ok_or(())?.path;
let path = route.to_path_buf(partition_path);
let reader = fs::read_dir(path)
.map_err(|_| ())?;
let mut children = HashMap::new();
for entry in reader {
let entry = entry
.map_err(|_| ())?;
let path = entry.path();
let name = path.file_name().unwrap().to_string_lossy().into_owned();
let child_route = route.extended_with(&[&name]);
let child_item = self.read(&child_route)?;
children.insert(name, child_item);
}
Ok(FileItem::Directory {
children,
route: route.clone(),
})
}
}

View File

@@ -1,225 +1,212 @@
use std::io::Read;
use std::sync::{Arc, Mutex};
use std::borrow::Cow;
use std::collections::HashMap;
use std::sync::{mpsc, RwLock, Arc};
use rouille;
use serde;
use serde_json;
use rouille::{self, Request, Response};
use id::Id;
use message_session::{MessageSession, Message};
use project::Project;
use vfs::{VfsSession, VfsChange};
use rbx::RbxInstance;
use plugin::PluginChain;
static MAX_BODY_SIZE: usize = 25 * 1024 * 1024; // 25 MiB
use rbx_session::RbxSession;
use session::Session;
/// The set of configuration the web server needs to start.
pub struct WebConfig {
pub port: u64,
pub verbose: bool,
pub project: Project,
pub server_id: u64,
pub rbx_session: Arc<RwLock<RbxSession>>,
pub message_session: MessageSession,
}
#[derive(Debug, Serialize)]
impl WebConfig {
pub fn from_session(server_id: u64, port: u64, session: &Session) -> WebConfig {
WebConfig {
port,
server_id,
project: session.project.clone(),
rbx_session: session.get_rbx_session(),
message_session: session.get_message_session(),
}
}
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
struct ServerInfo<'a> {
pub struct ServerInfoResponse<'a> {
pub server_id: &'a str,
pub server_version: &'a str,
pub protocol_version: u64,
pub partitions: HashMap<String, Vec<String>>,
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ReadAllResponse<'a> {
pub server_id: &'a str,
pub message_cursor: i32,
pub instances: Cow<'a, HashMap<Id, RbxInstance>>,
pub partition_instances: Cow<'a, HashMap<String, Id>>,
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ReadResponse<'a> {
pub server_id: &'a str,
pub message_cursor: i32,
pub instances: HashMap<Id, Cow<'a, RbxInstance>>,
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct SubscribeResponse<'a> {
pub server_id: &'a str,
pub message_cursor: i32,
pub messages: Cow<'a, [Message]>,
}
pub struct Server {
config: WebConfig,
server_version: &'static str,
protocol_version: u64,
server_id: &'a str,
project: &'a Project,
current_time: f64,
server_id: String,
}
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct ReadResult<'a> {
items: Vec<Option<RbxInstance>>,
server_id: &'a str,
current_time: f64,
}
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct ChangesResult<'a> {
changes: &'a [VfsChange],
server_id: &'a str,
current_time: f64,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct WriteSpecifier {
route: String,
item: RbxInstance,
}
fn json<T: serde::Serialize>(value: T) -> rouille::Response {
let data = serde_json::to_string(&value).unwrap();
rouille::Response::from_data("application/json", data)
}
/// Pulls text that may be JSON out of a Rouille Request object.
///
/// Doesn't do any actual parsing -- all this method does is verify the content
/// type of the request and read the request's body.
fn read_json_text(request: &rouille::Request) -> Option<String> {
// Bail out if the request body isn't marked as JSON
match request.header("Content-Type") {
Some(header) => if !header.starts_with("application/json") {
return None;
},
None => return None,
impl Server {
pub fn new(config: WebConfig) -> Server {
Server {
server_version: env!("CARGO_PKG_VERSION"),
server_id: config.server_id.to_string(),
config,
}
}
let body = match request.data() {
Some(v) => v,
None => return None,
};
// Allocate a buffer and read up to MAX_BODY_SIZE+1 bytes into it.
let mut out = Vec::new();
match body.take(MAX_BODY_SIZE.saturating_add(1) as u64).read_to_end(&mut out) {
Ok(_) => {},
Err(_) => return None,
}
// If the body was too big (MAX_BODY_SIZE+1), we abort instead of trying to
// process it.
if out.len() > MAX_BODY_SIZE {
return None;
}
let parsed = match String::from_utf8(out) {
Ok(v) => v,
Err(_) => return None,
};
Some(parsed)
}
/// Reads the body out of a Rouille Request and attempts to turn it into JSON.
fn read_json<T>(request: &rouille::Request) -> Option<T>
where
T: serde::de::DeserializeOwned,
{
let body = match read_json_text(&request) {
Some(v) => v,
None => return None,
};
let parsed = match serde_json::from_str(&body) {
Ok(v) => v,
Err(_) => return None,
};
// TODO: Change return type to some sort of Result
Some(parsed)
}
/// Start the Rojo web server and park our current thread.
pub fn start(config: WebConfig, project: Project, plugin_chain: &'static PluginChain, vfs: Arc<Mutex<VfsSession>>) {
let address = format!("localhost:{}", config.port);
let server_id = config.server_id.to_string();
rouille::start_server(address, move |request| {
pub fn handle_request(&self, request: &Request) -> Response {
router!(request,
(GET) (/) => {
Response::text("Rojo up and running!")
},
(GET) (/api/rojo) => {
// Get a summary of information about the server.
let current_time = {
let vfs = vfs.lock().unwrap();
let mut partitions = HashMap::new();
vfs.current_time()
};
json(ServerInfo {
server_version: env!("CARGO_PKG_VERSION"),
protocol_version: 1,
server_id: &server_id,
project: &project,
current_time,
})
},
(GET) (/changes/{ last_time: f64 }) => {
// Get the list of changes since the given time.
let vfs = vfs.lock().unwrap();
let current_time = vfs.current_time();
let changes = vfs.changes_since(last_time);
json(ChangesResult {
changes,
server_id: &server_id,
current_time,
})
},
(POST) (/read) => {
// Read some instances from the server according to a JSON
// format body.
let read_request: Vec<Vec<String>> = match read_json(&request) {
Some(v) => v,
None => return rouille::Response::empty_400(),
};
// Read the files off of the filesystem that the client
// requested.
let (items, current_time) = {
let vfs = vfs.lock().unwrap();
let current_time = vfs.current_time();
let mut items = Vec::new();
for route in &read_request {
match vfs.read(&route) {
Ok(v) => items.push(Some(v)),
Err(_) => items.push(None),
}
}
(items, current_time)
};
// Transform all of our VfsItem objects into Roblox instances
// the client can use.
let rbx_items = items
.iter()
.map(|item| {
match *item {
Some(ref item) => plugin_chain.transform_file(item),
None => None,
}
})
.collect::<Vec<_>>();
if config.verbose {
println!("Got read request: {:?}", read_request);
println!("Responding with:\n\t{:?}", rbx_items);
for partition in self.config.project.partitions.values() {
partitions.insert(partition.name.clone(), partition.target.clone());
}
json(ReadResult {
server_id: &server_id,
items: rbx_items,
current_time,
Response::json(&ServerInfoResponse {
server_version: self.server_version,
protocol_version: 2,
server_id: &self.server_id,
partitions: partitions,
})
},
(POST) (/write) => {
// Not yet implemented.
(GET) (/api/subscribe/{ cursor: i32 }) => {
// Retrieve any messages past the given cursor index, and if
// there weren't any, subscribe to receive any new messages.
let _write_request: Vec<WriteSpecifier> = match read_json(&request) {
Some(v) => v,
None => return rouille::Response::empty_400(),
};
// Did the client miss any messages since the last subscribe?
{
let messages = self.config.message_session.messages.read().unwrap();
rouille::Response::empty_404()
if cursor > messages.len() as i32 {
return Response::json(&SubscribeResponse {
server_id: &self.server_id,
messages: Cow::Borrowed(&[]),
message_cursor: messages.len() as i32 - 1,
});
}
if cursor < messages.len() as i32 - 1 {
let new_messages = &messages[(cursor + 1) as usize..];
let new_cursor = cursor + new_messages.len() as i32;
return Response::json(&SubscribeResponse {
server_id: &self.server_id,
messages: Cow::Borrowed(new_messages),
message_cursor: new_cursor,
});
}
}
let (tx, rx) = mpsc::channel();
let sender_id = self.config.message_session.subscribe(tx);
match rx.recv() {
Ok(_) => (),
Err(_) => return Response::text("error!").with_status_code(500),
}
self.config.message_session.unsubscribe(sender_id);
{
let messages = self.config.message_session.messages.read().unwrap();
let new_messages = &messages[(cursor + 1) as usize..];
let new_cursor = cursor + new_messages.len() as i32;
Response::json(&SubscribeResponse {
server_id: &self.server_id,
messages: Cow::Borrowed(new_messages),
message_cursor: new_cursor,
})
}
},
_ => rouille::Response::empty_404()
(GET) (/api/read_all) => {
let rbx_session = self.config.rbx_session.read().unwrap();
let message_cursor = self.config.message_session.get_message_cursor();
Response::json(&ReadAllResponse {
server_id: &self.server_id,
message_cursor,
instances: Cow::Borrowed(rbx_session.tree.get_all_instances()),
partition_instances: Cow::Borrowed(&rbx_session.partition_instances),
})
},
(GET) (/api/read/{ id_list: String }) => {
let requested_ids = id_list
.split(",")
.map(str::parse::<Id>)
.collect::<Result<Vec<Id>, _>>();
let requested_ids = match requested_ids {
Ok(v) => v,
Err(_) => return rouille::Response::text("Malformed ID list").with_status_code(400),
};
let rbx_session = self.config.rbx_session.read().unwrap();
let message_cursor = self.config.message_session.get_message_cursor();
let mut instances = HashMap::new();
for requested_id in &requested_ids {
rbx_session.tree.get_instance(*requested_id, &mut instances);
}
Response::json(&ReadResponse {
server_id: &self.server_id,
message_cursor,
instances,
})
},
_ => Response::empty_404()
)
});
}
}
/// Start the Rojo web server, taking over the current thread.
#[allow(unreachable_code)]
pub fn start(config: WebConfig) {
let address = format!("localhost:{}", config.port);
let server = Server::new(config);
rouille::start_server(address, move |request| server.handle_request(request));
}

43
server/src/web_util.rs Normal file
View File

@@ -0,0 +1,43 @@
use std::io::Read;
use rouille;
use serde;
use serde_json;
static MAX_BODY_SIZE: usize = 100 * 1024 * 1024; // 100 MiB
/// Pulls text that may be JSON out of a Rouille Request object.
///
/// Doesn't do any actual parsing -- all this method does is verify the content
/// type of the request and read the request's body.
fn read_json_text(request: &rouille::Request) -> Option<String> {
// Bail out if the request body isn't marked as JSON
let content_type = request.header("Content-Type")?;
if !content_type.starts_with("application/json") {
return None;
}
let body = request.data()?;
// Allocate a buffer and read up to MAX_BODY_SIZE+1 bytes into it.
let mut out = Vec::new();
body.take(MAX_BODY_SIZE.saturating_add(1) as u64).read_to_end(&mut out).ok()?;
// If the body was too big (MAX_BODY_SIZE+1), we abort instead of trying to
// process it.
if out.len() > MAX_BODY_SIZE {
return None;
}
String::from_utf8(out).ok()
}
/// Reads the body out of a Rouille Request and attempts to turn it into JSON.
pub fn read_json<T>(request: &rouille::Request) -> Option<T>
where
T: serde::de::DeserializeOwned,
{
let body = read_json_text(&request)?;
serde_json::from_str(&body).ok()?
}