mirror of
https://github.com/uqbar-dao/nectar.git
synced 2024-12-20 15:11:45 +03:00
Merge pull request #118 from uqbar-dao/dr/app-store-refresh
Dr/app store refresh
This commit is contained in:
commit
6d4ad9fb7e
1
Cargo.lock
generated
1
Cargo.lock
generated
@ -4698,6 +4698,7 @@ dependencies = [
|
|||||||
"mio",
|
"mio",
|
||||||
"num_cpus",
|
"num_cpus",
|
||||||
"pin-project-lite",
|
"pin-project-lite",
|
||||||
|
"signal-hook-registry",
|
||||||
"socket2 0.5.4",
|
"socket2 0.5.4",
|
||||||
"tokio-macros",
|
"tokio-macros",
|
||||||
"windows-sys 0.48.0",
|
"windows-sys 0.48.0",
|
||||||
|
@ -55,7 +55,9 @@ rand = "0.8.4"
|
|||||||
reqwest = "0.11.18"
|
reqwest = "0.11.18"
|
||||||
ring = "0.16.20"
|
ring = "0.16.20"
|
||||||
rmp-serde = "1.1.2"
|
rmp-serde = "1.1.2"
|
||||||
|
rocksdb = { version = "0.21.0", features = ["multi-threaded-cf"] }
|
||||||
route-recognizer = "0.3.1"
|
route-recognizer = "0.3.1"
|
||||||
|
rusqlite = { version = "0.30.0", features = ["bundled"] }
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
serde_urlencoded = "0.7"
|
serde_urlencoded = "0.7"
|
||||||
@ -63,7 +65,7 @@ sha2 = "0.10"
|
|||||||
snow = { version = "0.9.3", features = ["ring-resolver"] }
|
snow = { version = "0.9.3", features = ["ring-resolver"] }
|
||||||
static_dir = "0.2.0"
|
static_dir = "0.2.0"
|
||||||
thiserror = "1.0"
|
thiserror = "1.0"
|
||||||
tokio = { version = "1.28", features = ["fs", "macros", "rt-multi-thread", "sync"] }
|
tokio = { version = "1.28", features = ["fs", "macros", "rt-multi-thread", "signal", "sync"] }
|
||||||
tokio-tungstenite = "0.20.1"
|
tokio-tungstenite = "0.20.1"
|
||||||
url = "2.4.1"
|
url = "2.4.1"
|
||||||
uuid = { version = "1.1.2", features = ["serde", "v4"] }
|
uuid = { version = "1.1.2", features = ["serde", "v4"] }
|
||||||
@ -71,5 +73,3 @@ warp = "0.3.5"
|
|||||||
wasmtime = "15.0.1"
|
wasmtime = "15.0.1"
|
||||||
wasmtime-wasi = "15.0.1"
|
wasmtime-wasi = "15.0.1"
|
||||||
zip = "0.6"
|
zip = "0.6"
|
||||||
rocksdb = { version = "0.21.0", features = ["multi-threaded-cf"] }
|
|
||||||
rusqlite = { version = "0.30.0", features = ["bundled"] }
|
|
||||||
|
28
modules/app_store/app_store/Cargo.lock
generated
28
modules/app_store/app_store/Cargo.lock
generated
@ -206,6 +206,22 @@ version = "0.4.20"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
|
checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "mime"
|
||||||
|
version = "0.3.17"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "mime_guess"
|
||||||
|
version = "2.0.4"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef"
|
||||||
|
dependencies = [
|
||||||
|
"mime",
|
||||||
|
"unicase",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "percent-encoding"
|
name = "percent-encoding"
|
||||||
version = "2.3.1"
|
version = "2.3.1"
|
||||||
@ -387,6 +403,15 @@ version = "1.17.0"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
|
checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "unicase"
|
||||||
|
version = "2.7.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89"
|
||||||
|
dependencies = [
|
||||||
|
"version_check",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "unicode-bidi"
|
name = "unicode-bidi"
|
||||||
version = "0.3.14"
|
version = "0.3.14"
|
||||||
@ -423,11 +448,12 @@ checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c"
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "uqbar_process_lib"
|
name = "uqbar_process_lib"
|
||||||
version = "0.4.0"
|
version = "0.4.0"
|
||||||
source = "git+ssh://git@github.com/uqbar-dao/process_lib.git?rev=8342b1a#8342b1a131401fb5d141dab8c90e79aa6d2bc909"
|
source = "git+ssh://git@github.com/uqbar-dao/process_lib.git?rev=2d17d75#2d17d75152e55ef3ed417c79312e209ca45b8dbb"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"bincode",
|
"bincode",
|
||||||
"http",
|
"http",
|
||||||
|
"mime_guess",
|
||||||
"rand",
|
"rand",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
|
@ -3,8 +3,6 @@ name = "app_store"
|
|||||||
version = "0.2.0"
|
version = "0.2.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
panic = "abort"
|
panic = "abort"
|
||||||
opt-level = "s"
|
opt-level = "s"
|
||||||
@ -17,7 +15,7 @@ rand = "0.8"
|
|||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
sha2 = "0.10.8"
|
sha2 = "0.10.8"
|
||||||
uqbar_process_lib = { git = "ssh://git@github.com/uqbar-dao/process_lib.git", rev = "8342b1a" }
|
uqbar_process_lib = { git = "ssh://git@github.com/uqbar-dao/process_lib.git", rev = "2d17d75" }
|
||||||
wit-bindgen = { git = "https://github.com/bytecodealliance/wit-bindgen", rev = "efcc759" }
|
wit-bindgen = { git = "https://github.com/bytecodealliance/wit-bindgen", rev = "efcc759" }
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
|
@ -1,11 +1,10 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_json::Value;
|
||||||
use sha2::Digest;
|
use sha2::Digest;
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
use uqbar_process_lib::kernel_types as kt;
|
use uqbar_process_lib::kernel_types as kt;
|
||||||
use uqbar_process_lib::{
|
use uqbar_process_lib::println;
|
||||||
await_message, get_capability, get_payload, get_typed_state, grant_messaging, println,
|
use uqbar_process_lib::*;
|
||||||
set_state, share_capability, Address, Message, NodeId, PackageId, ProcessId, Request, Response,
|
|
||||||
};
|
|
||||||
|
|
||||||
wit_bindgen::generate!({
|
wit_bindgen::generate!({
|
||||||
path: "../../../wit",
|
path: "../../../wit",
|
||||||
@ -15,14 +14,11 @@ wit_bindgen::generate!({
|
|||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
#[allow(dead_code)]
|
|
||||||
mod ft_worker_lib;
|
mod ft_worker_lib;
|
||||||
use ft_worker_lib::{
|
use ft_worker_lib::{
|
||||||
spawn_receive_transfer, spawn_transfer, FTWorkerCommand, FTWorkerResult, FileTransferContext,
|
spawn_receive_transfer, spawn_transfer, FTWorkerCommand, FTWorkerResult, FileTransferContext,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct Component;
|
|
||||||
|
|
||||||
/// Uqbar App Store:
|
/// Uqbar App Store:
|
||||||
/// acts as both a local package manager and a protocol to share packages across the network.
|
/// acts as both a local package manager and a protocol to share packages across the network.
|
||||||
/// packages are apps; apps are packages. we use an onchain app listing contract to determine
|
/// packages are apps; apps are packages. we use an onchain app listing contract to determine
|
||||||
@ -73,25 +69,20 @@ struct PackageListing {
|
|||||||
// app store API
|
// app store API
|
||||||
//
|
//
|
||||||
|
|
||||||
|
/// Remote requests, those sent between instantiations of this process
|
||||||
|
/// on different nodes, take this form. Will add more to enum in the future
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
#[serde(untagged)] // untagged as a meta-type for all requests
|
pub enum RemoteRequest {
|
||||||
pub enum Req {
|
/// no payload; request a package from a node
|
||||||
LocalRequest(LocalRequest),
|
/// remote node must return RemoteResponse::DownloadApproved,
|
||||||
RemoteRequest(RemoteRequest),
|
/// at which point requester can expect a FTWorkerRequest::Receive
|
||||||
FTWorkerCommand(FTWorkerCommand),
|
Download(PackageId),
|
||||||
FTWorkerResult(FTWorkerResult),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
#[serde(untagged)] // untagged as a meta-type for all responses
|
pub enum RemoteResponse {
|
||||||
pub enum Resp {
|
DownloadApproved,
|
||||||
RemoteResponse(RemoteResponse),
|
DownloadDenied, // TODO expand on why
|
||||||
FTWorkerResult(FTWorkerResult),
|
|
||||||
// note that we do not need to ourselves handle local responses, as
|
|
||||||
// those are given to others rather than received.
|
|
||||||
NewPackageResponse(NewPackageResponse),
|
|
||||||
DownloadResponse(DownloadResponse),
|
|
||||||
InstallResponse(InstallResponse),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Local requests take this form.
|
/// Local requests take this form.
|
||||||
@ -112,28 +103,23 @@ pub enum LocalRequest {
|
|||||||
/// no payload; select a downloaded package and install it
|
/// no payload; select a downloaded package and install it
|
||||||
/// if requested, will return an InstallResponse indicating success/failure
|
/// if requested, will return an InstallResponse indicating success/failure
|
||||||
Install(PackageId),
|
Install(PackageId),
|
||||||
/// no payload; select an installed package and uninstall it
|
/// Takes no payload; Select an installed package and uninstall it.
|
||||||
/// no response will be given
|
/// This will kill the processes in the **manifest** of the package,
|
||||||
|
/// but not the processes that were spawned by those processes! Take
|
||||||
|
/// care to kill those processes yourself. This will also delete the drive
|
||||||
|
/// containing the source code for this package. This does not guarantee
|
||||||
|
/// that other data created by this package will be removed from places such
|
||||||
|
/// as the key-value store.
|
||||||
Uninstall(PackageId),
|
Uninstall(PackageId),
|
||||||
/// no payload; select a downloaded package and delete it
|
|
||||||
/// no response will be given
|
|
||||||
Delete(PackageId),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Remote requests, those sent between instantiations of this process
|
/// Local responses take this form.
|
||||||
/// on different nodes, take this form.
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
pub enum RemoteRequest {
|
pub enum LocalResponse {
|
||||||
/// no payload; request a package from a node
|
NewPackageResponse(NewPackageResponse),
|
||||||
/// remote node must return RemoteResponse::DownloadApproved,
|
DownloadResponse(DownloadResponse),
|
||||||
/// at which point requester can expect a FTWorkerRequest::Receive
|
InstallResponse(InstallResponse),
|
||||||
Download(PackageId),
|
UninstallResponse(UninstallResponse),
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub enum RemoteResponse {
|
|
||||||
DownloadApproved,
|
|
||||||
DownloadDenied, // TODO expand on why
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO for all: expand these to elucidate why something failed
|
// TODO for all: expand these to elucidate why something failed
|
||||||
@ -157,39 +143,59 @@ pub enum InstallResponse {
|
|||||||
Failure,
|
Failure,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Guest for Component {
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
fn init(our: String) {
|
pub enum UninstallResponse {
|
||||||
let our = Address::from_str(&our).unwrap();
|
Success,
|
||||||
// begin by granting messaging capabilities to http_server and terminal,
|
Failure,
|
||||||
// so that they can send us requests.
|
}
|
||||||
grant_messaging(
|
|
||||||
&our,
|
// internal types
|
||||||
vec![
|
|
||||||
ProcessId::new(Some("http_server"), "sys", "uqbar"),
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
ProcessId::new(Some("terminal"), "terminal", "uqbar"),
|
#[serde(untagged)] // untagged as a meta-type for all incoming requests
|
||||||
ProcessId::new(Some("vfs"), "sys", "uqbar"),
|
pub enum Req {
|
||||||
],
|
LocalRequest(LocalRequest),
|
||||||
);
|
RemoteRequest(RemoteRequest),
|
||||||
println!("{}: start", our.process);
|
FTWorkerCommand(FTWorkerCommand),
|
||||||
|
FTWorkerResult(FTWorkerResult),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
#[serde(untagged)] // untagged as a meta-type for all incoming responses
|
||||||
|
pub enum Resp {
|
||||||
|
RemoteResponse(RemoteResponse),
|
||||||
|
FTWorkerResult(FTWorkerResult),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
struct ManifestCap {
|
||||||
|
process: String,
|
||||||
|
params: Value,
|
||||||
|
}
|
||||||
|
|
||||||
|
// /m our@main:app_store:ben.uq {"Download": {"package": {"package_name": "sdapi", "publisher_node": "benjammin.uq"}, "install_from": "testnode107.uq"}}
|
||||||
|
// /m our@main:app_store:ben.uq {"Install": {"package_name": "sdapi", "publisher_node": "benjammin.uq"}}
|
||||||
|
|
||||||
|
|
||||||
|
call_init!(init);
|
||||||
|
fn init(our: Address) {
|
||||||
|
println!("{}: running", our.process);
|
||||||
|
|
||||||
// load in our saved state or initalize a new one if none exists
|
// load in our saved state or initalize a new one if none exists
|
||||||
let mut state =
|
let mut state = get_typed_state(|bytes| Ok(bincode::deserialize(bytes)?)).unwrap_or(State {
|
||||||
get_typed_state(|bytes| Ok(bincode::deserialize(bytes)?)).unwrap_or(State {
|
|
||||||
packages: HashMap::new(),
|
packages: HashMap::new(),
|
||||||
requested_packages: HashSet::new(),
|
requested_packages: HashSet::new(),
|
||||||
});
|
});
|
||||||
|
|
||||||
// active the main messaging loop: handle requests and responses
|
|
||||||
loop {
|
loop {
|
||||||
match await_message() {
|
match await_message() {
|
||||||
Err(send_error) => {
|
Err(send_error) => {
|
||||||
println!("{our}: got network error: {send_error:?}");
|
println!("app store: got network error: {send_error:?}");
|
||||||
continue;
|
}
|
||||||
|
Ok(message) => {
|
||||||
|
if let Err(e) = handle_message(&our, &mut state, &message) {
|
||||||
|
println!("app store: error handling message: {:?}", e)
|
||||||
}
|
}
|
||||||
Ok(message) => match handle_message(&our, &mut state, &message) {
|
|
||||||
Ok(()) => {}
|
|
||||||
Err(e) => println!("app-store: error handling message: {:?}", e),
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -203,36 +209,25 @@ fn handle_message(our: &Address, mut state: &mut State, message: &Message) -> an
|
|||||||
ipc,
|
ipc,
|
||||||
..
|
..
|
||||||
} => {
|
} => {
|
||||||
match &serde_json::from_slice::<Req>(&ipc) {
|
match &serde_json::from_slice::<Req>(&ipc)? {
|
||||||
Ok(Req::LocalRequest(local_request)) => {
|
Req::LocalRequest(local_request) => {
|
||||||
match handle_local_request(&our, &source, local_request, &mut state) {
|
if our.node != source.node {
|
||||||
Ok(None) => return Ok(()),
|
return Err(anyhow::anyhow!("local request from non-local node"));
|
||||||
Ok(Some(resp)) => {
|
}
|
||||||
|
let resp = handle_local_request(&our, local_request, &mut state);
|
||||||
if expects_response.is_some() {
|
if expects_response.is_some() {
|
||||||
Response::new().ipc(serde_json::to_vec(&resp)?).send()?;
|
Response::new().ipc(serde_json::to_vec(&resp)?).send()?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Req::RemoteRequest(remote_request) => {
|
||||||
println!("app-store: local request error: {:?}", err);
|
let resp = handle_remote_request(&our, &source, remote_request, &mut state);
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(Req::RemoteRequest(remote_request)) => {
|
|
||||||
match handle_remote_request(&our, &source, remote_request, &mut state) {
|
|
||||||
Ok(None) => return Ok(()),
|
|
||||||
Ok(Some(resp)) => {
|
|
||||||
if expects_response.is_some() {
|
if expects_response.is_some() {
|
||||||
Response::new().ipc(serde_json::to_vec(&resp)?).send()?;
|
Response::new().ipc(serde_json::to_vec(&resp)?).send()?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Req::FTWorkerResult(FTWorkerResult::ReceiveSuccess(name)) => {
|
||||||
println!("app-store: remote request error: {:?}", err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(Req::FTWorkerResult(FTWorkerResult::ReceiveSuccess(name))) => {
|
|
||||||
// do with file what you'd like here
|
// do with file what you'd like here
|
||||||
println!("file_transfer: successfully received {:?}", name);
|
println!("app store: successfully received {:?}", name);
|
||||||
// remove leading / and .zip from file name to get package ID
|
// remove leading / and .zip from file name to get package ID
|
||||||
let package_id = match PackageId::from_str(name[1..].trim_end_matches(".zip")) {
|
let package_id = match PackageId::from_str(name[1..].trim_end_matches(".zip")) {
|
||||||
Ok(package_id) => package_id,
|
Ok(package_id) => package_id,
|
||||||
@ -241,6 +236,7 @@ fn handle_message(our: &Address, mut state: &mut State, message: &Message) -> an
|
|||||||
return Err(anyhow::anyhow!(e));
|
return Err(anyhow::anyhow!(e));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
// only install the app if we actually requested it
|
||||||
if state.requested_packages.remove(&package_id) {
|
if state.requested_packages.remove(&package_id) {
|
||||||
// auto-take zip from payload and request ourself with New
|
// auto-take zip from payload and request ourself with New
|
||||||
Request::new()
|
Request::new()
|
||||||
@ -249,43 +245,37 @@ fn handle_message(our: &Address, mut state: &mut State, message: &Message) -> an
|
|||||||
.ipc(serde_json::to_vec(&Req::LocalRequest(
|
.ipc(serde_json::to_vec(&Req::LocalRequest(
|
||||||
LocalRequest::NewPackage {
|
LocalRequest::NewPackage {
|
||||||
package: package_id,
|
package: package_id,
|
||||||
mirror: true,
|
mirror: true, // can turn off auto-mirroring
|
||||||
},
|
},
|
||||||
))?)
|
))?)
|
||||||
.send()?;
|
.send()?;
|
||||||
|
crate::set_state(&bincode::serialize(state)?);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(Req::FTWorkerCommand(_)) => {
|
Req::FTWorkerResult(r) => {
|
||||||
spawn_receive_transfer(&our, &ipc);
|
println!("app store: got ft_worker result: {r:?}");
|
||||||
}
|
}
|
||||||
e => {
|
Req::FTWorkerCommand(_) => {
|
||||||
return Err(anyhow::anyhow!(
|
spawn_receive_transfer(&our, &ipc)?;
|
||||||
"app store bad request: {:?}, error {:?}",
|
|
||||||
ipc,
|
|
||||||
e
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Message::Response { ipc, context, .. } => match &serde_json::from_slice::<Resp>(&ipc) {
|
Message::Response { ipc, context, .. } => match &serde_json::from_slice::<Resp>(&ipc)? {
|
||||||
Ok(Resp::RemoteResponse(remote_response)) => match remote_response {
|
Resp::RemoteResponse(remote_response) => match remote_response {
|
||||||
RemoteResponse::DownloadApproved => {
|
RemoteResponse::DownloadApproved => {
|
||||||
println!("app store: download approved, should be starting");
|
println!("app store: download approved");
|
||||||
}
|
}
|
||||||
RemoteResponse::DownloadDenied => {
|
RemoteResponse::DownloadDenied => {
|
||||||
println!("app store: could not download package from that node!");
|
println!("app store: could not download package from that node!");
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Ok(Resp::FTWorkerResult(ft_worker_result)) => {
|
Resp::FTWorkerResult(ft_worker_result) => {
|
||||||
let Ok(context) =
|
let context =
|
||||||
serde_json::from_slice::<FileTransferContext>(&context.as_ref().unwrap())
|
serde_json::from_slice::<FileTransferContext>(&context.as_ref().unwrap())?;
|
||||||
else {
|
|
||||||
return Err(anyhow::anyhow!("file_transfer: got weird local request"));
|
|
||||||
};
|
|
||||||
match ft_worker_result {
|
match ft_worker_result {
|
||||||
FTWorkerResult::SendSuccess => {
|
FTWorkerResult::SendSuccess => {
|
||||||
println!(
|
println!(
|
||||||
"file_transfer: successfully shared app {} in {:.4}s",
|
"app store: successfully shared app {} in {:.4}s",
|
||||||
context.file_name,
|
context.file_name,
|
||||||
std::time::SystemTime::now()
|
std::time::SystemTime::now()
|
||||||
.duration_since(context.start_time)
|
.duration_since(context.start_time)
|
||||||
@ -293,39 +283,76 @@ fn handle_message(our: &Address, mut state: &mut State, message: &Message) -> an
|
|||||||
.as_secs_f64(),
|
.as_secs_f64(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
e => return Err(anyhow::anyhow!("file_transfer: {:?}", e)),
|
e => return Err(anyhow::anyhow!("app store: ft_worker gave us {e:?}")),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => return Err(anyhow::anyhow!("bad response from file transfer worker")),
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_local_request(
|
/// only `our.node` can call this
|
||||||
our: &Address,
|
fn handle_local_request(our: &Address, request: &LocalRequest, state: &mut State) -> LocalResponse {
|
||||||
source: &Address,
|
|
||||||
request: &LocalRequest,
|
|
||||||
state: &mut State,
|
|
||||||
) -> anyhow::Result<Option<Resp>> {
|
|
||||||
if our.node != source.node {
|
|
||||||
return Err(anyhow::anyhow!("local request from non-local node"));
|
|
||||||
}
|
|
||||||
match request {
|
match request {
|
||||||
LocalRequest::NewPackage { package, mirror } => {
|
LocalRequest::NewPackage { package, mirror } => {
|
||||||
|
match handle_new_package(our, package, *mirror, state) {
|
||||||
|
Ok(()) => LocalResponse::NewPackageResponse(NewPackageResponse::Success),
|
||||||
|
Err(_) => LocalResponse::NewPackageResponse(NewPackageResponse::Failure),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
LocalRequest::Download {
|
||||||
|
package,
|
||||||
|
install_from,
|
||||||
|
} => LocalResponse::DownloadResponse(
|
||||||
|
match Request::new()
|
||||||
|
.target((install_from.as_str(), our.process.clone()))
|
||||||
|
.inherit(true)
|
||||||
|
.ipc(serde_json::to_vec(&RemoteRequest::Download(package.clone())).unwrap())
|
||||||
|
.send_and_await_response(5)
|
||||||
|
{
|
||||||
|
Ok(Ok(Message::Response { ipc, .. })) => {
|
||||||
|
match serde_json::from_slice::<Resp>(&ipc) {
|
||||||
|
Ok(Resp::RemoteResponse(RemoteResponse::DownloadApproved)) => {
|
||||||
|
state.requested_packages.insert(package.clone());
|
||||||
|
crate::set_state(&bincode::serialize(&state).unwrap());
|
||||||
|
DownloadResponse::Started
|
||||||
|
}
|
||||||
|
_ => DownloadResponse::Failure,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => DownloadResponse::Failure,
|
||||||
|
},
|
||||||
|
),
|
||||||
|
LocalRequest::Install(package) => match handle_install(our, package) {
|
||||||
|
Ok(()) => LocalResponse::InstallResponse(InstallResponse::Success),
|
||||||
|
Err(_) => LocalResponse::InstallResponse(InstallResponse::Failure),
|
||||||
|
},
|
||||||
|
LocalRequest::Uninstall(package) => match handle_uninstall(package) {
|
||||||
|
Ok(()) => LocalResponse::UninstallResponse(UninstallResponse::Success),
|
||||||
|
Err(_) => LocalResponse::UninstallResponse(UninstallResponse::Failure),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_new_package(
|
||||||
|
our: &Address,
|
||||||
|
package: &PackageId,
|
||||||
|
mirror: bool,
|
||||||
|
state: &mut State,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
let Some(mut payload) = get_payload() else {
|
let Some(mut payload) = get_payload() else {
|
||||||
return Err(anyhow::anyhow!("no payload"));
|
return Err(anyhow::anyhow!("no payload"));
|
||||||
};
|
};
|
||||||
let drive = format!("/{}/pkg", package);
|
let drive = format!("/{}/pkg", package);
|
||||||
|
|
||||||
|
// create a new drive for this package in VFS
|
||||||
Request::new()
|
Request::new()
|
||||||
.target(Address::from_str("our@vfs:sys:uqbar")?)
|
.target(("our", "vfs", "sys", "uqbar"))
|
||||||
.ipc(serde_json::to_vec(&kt::VfsRequest {
|
.ipc(serde_json::to_vec(&kt::VfsRequest {
|
||||||
path: drive.clone(),
|
path: drive.clone(),
|
||||||
action: kt::VfsAction::CreateDrive,
|
action: kt::VfsAction::CreateDrive,
|
||||||
})?)
|
})?)
|
||||||
.send_and_await_response(5)?
|
.send_and_await_response(5)??;
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// produce the version hash for this new package
|
// produce the version hash for this new package
|
||||||
let mut hasher = sha2::Sha256::new();
|
let mut hasher = sha2::Sha256::new();
|
||||||
@ -335,44 +362,43 @@ fn handle_local_request(
|
|||||||
// add zip bytes
|
// add zip bytes
|
||||||
payload.mime = Some("application/zip".to_string());
|
payload.mime = Some("application/zip".to_string());
|
||||||
let response = Request::new()
|
let response = Request::new()
|
||||||
.target(Address::from_str("our@vfs:sys:uqbar")?)
|
.target(("our", "vfs", "sys", "uqbar"))
|
||||||
.ipc(serde_json::to_vec(&kt::VfsRequest {
|
.ipc(serde_json::to_vec(&kt::VfsRequest {
|
||||||
path: drive.clone(),
|
path: drive.clone(),
|
||||||
action: kt::VfsAction::AddZip,
|
action: kt::VfsAction::AddZip,
|
||||||
})?)
|
})?)
|
||||||
.payload(payload.clone())
|
.payload(payload.clone())
|
||||||
.send_and_await_response(5)?.unwrap();
|
.send_and_await_response(5)??;
|
||||||
let Message::Response { ipc: ref vfs_ipc, .. } = response else {
|
let vfs_ipc = serde_json::from_slice::<serde_json::Value>(response.ipc())?;
|
||||||
panic!("app_store: send_and_await_response must return Response");
|
|
||||||
};
|
|
||||||
let vfs_ipc = serde_json::from_slice::<serde_json::Value>(vfs_ipc)?;
|
|
||||||
if vfs_ipc == serde_json::json!({"Err": "NoCap"}) {
|
if vfs_ipc == serde_json::json!({"Err": "NoCap"}) {
|
||||||
return Err(anyhow::anyhow!("cannot add NewPackage: do not have capability to access vfs"));
|
return Err(anyhow::anyhow!(
|
||||||
|
"cannot add NewPackage: do not have capability to access vfs"
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
// save the zip file itself in VFS for sharing with other nodes
|
// save the zip file itself in VFS for sharing with other nodes
|
||||||
// call it <package>.zip
|
// call it <package>.zip
|
||||||
let zip_path = format!("{}/{}.zip", drive.clone(), package);
|
let zip_path = format!("{}/{}.zip", drive.clone(), package);
|
||||||
Request::new()
|
Request::new()
|
||||||
.target(Address::from_str("our@vfs:sys:uqbar")?)
|
.target(("our", "vfs", "sys", "uqbar"))
|
||||||
.inherit(true)
|
.inherit(true)
|
||||||
.ipc(serde_json::to_vec(&kt::VfsRequest {
|
.ipc(serde_json::to_vec(&kt::VfsRequest {
|
||||||
path: zip_path,
|
path: zip_path,
|
||||||
action: kt::VfsAction::ReWrite,
|
action: kt::VfsAction::ReWrite,
|
||||||
})?)
|
})?)
|
||||||
.payload(payload)
|
.payload(payload)
|
||||||
.send_and_await_response(5)?
|
.send_and_await_response(5)??;
|
||||||
.unwrap();
|
|
||||||
let metadata_path = format!("{}/metadata.json", drive.clone());
|
let metadata_path = format!("{}/metadata.json", drive.clone());
|
||||||
|
|
||||||
|
// now, read the pkg contents to create our own listing and state,
|
||||||
|
// such that we can mirror this package to others.
|
||||||
Request::new()
|
Request::new()
|
||||||
.target(Address::from_str("our@vfs:sys:uqbar")?)
|
.target(("our", "vfs", "sys", "uqbar"))
|
||||||
.ipc(serde_json::to_vec(&kt::VfsRequest {
|
.ipc(serde_json::to_vec(&kt::VfsRequest {
|
||||||
path: metadata_path,
|
path: metadata_path,
|
||||||
action: kt::VfsAction::Read,
|
action: kt::VfsAction::Read,
|
||||||
})?)
|
})?)
|
||||||
.send_and_await_response(5)?
|
.send_and_await_response(5)??;
|
||||||
.unwrap();
|
|
||||||
let Some(payload) = get_payload() else {
|
let Some(payload) = get_payload() else {
|
||||||
return Err(anyhow::anyhow!("no metadata found!"));
|
return Err(anyhow::anyhow!("no metadata found!"));
|
||||||
};
|
};
|
||||||
@ -391,49 +417,23 @@ fn handle_local_request(
|
|||||||
let package_state = PackageState {
|
let package_state = PackageState {
|
||||||
mirrored_from: our.node.clone(),
|
mirrored_from: our.node.clone(),
|
||||||
listing_data,
|
listing_data,
|
||||||
mirroring: *mirror,
|
mirroring: mirror,
|
||||||
auto_update: true,
|
auto_update: true,
|
||||||
};
|
};
|
||||||
state.packages.insert(package.clone(), package_state);
|
state.packages.insert(package.clone(), package_state);
|
||||||
crate::set_state(&bincode::serialize(state)?);
|
crate::set_state(&bincode::serialize(state).unwrap());
|
||||||
Ok(Some(Resp::NewPackageResponse(NewPackageResponse::Success)))
|
Ok(())
|
||||||
}
|
}
|
||||||
LocalRequest::Download {
|
|
||||||
package,
|
fn handle_install(our: &Address, package: &PackageId) -> anyhow::Result<()> {
|
||||||
install_from,
|
|
||||||
} => Ok(Some(Resp::DownloadResponse(
|
|
||||||
match Request::new()
|
|
||||||
.target(Address::new(install_from, our.process.clone()))
|
|
||||||
.inherit(true)
|
|
||||||
.ipc(serde_json::to_vec(&RemoteRequest::Download(
|
|
||||||
package.clone(),
|
|
||||||
))?)
|
|
||||||
.send_and_await_response(5)
|
|
||||||
{
|
|
||||||
Ok(Ok(Message::Response { ipc, .. })) => {
|
|
||||||
let resp = serde_json::from_slice::<Resp>(&ipc)?;
|
|
||||||
match resp {
|
|
||||||
Resp::RemoteResponse(RemoteResponse::DownloadApproved) => {
|
|
||||||
state.requested_packages.insert(package.clone());
|
|
||||||
crate::set_state(&bincode::serialize(&state)?);
|
|
||||||
DownloadResponse::Started
|
|
||||||
}
|
|
||||||
_ => DownloadResponse::Failure,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => DownloadResponse::Failure,
|
|
||||||
},
|
|
||||||
))),
|
|
||||||
LocalRequest::Install(package) => {
|
|
||||||
let drive_path = format!("/{}/pkg", package);
|
let drive_path = format!("/{}/pkg", package);
|
||||||
Request::new()
|
Request::new()
|
||||||
.target(Address::from_str("our@vfs:sys:uqbar")?)
|
.target(("our", "vfs", "sys", "uqbar"))
|
||||||
.ipc(serde_json::to_vec(&kt::VfsRequest {
|
.ipc(serde_json::to_vec(&kt::VfsRequest {
|
||||||
path: format!("{}/manifest.json", drive_path),
|
path: format!("{}/manifest.json", drive_path),
|
||||||
action: kt::VfsAction::Read,
|
action: kt::VfsAction::Read,
|
||||||
})?)
|
})?)
|
||||||
.send_and_await_response(5)?
|
.send_and_await_response(5)??;
|
||||||
.unwrap();
|
|
||||||
let Some(payload) = get_payload() else {
|
let Some(payload) = get_payload() else {
|
||||||
return Err(anyhow::anyhow!("no payload"));
|
return Err(anyhow::anyhow!("no payload"));
|
||||||
};
|
};
|
||||||
@ -447,7 +447,7 @@ fn handle_local_request(
|
|||||||
"drive": drive_path,
|
"drive": drive_path,
|
||||||
}))?,
|
}))?,
|
||||||
) else {
|
) else {
|
||||||
return Err(anyhow::anyhow!("app-store: no read cap"));
|
return Err(anyhow::anyhow!("app store: no read cap"));
|
||||||
};
|
};
|
||||||
let Some(write_cap) = get_capability(
|
let Some(write_cap) = get_capability(
|
||||||
&Address::new(&our.node, ("vfs", "sys", "uqbar")),
|
&Address::new(&our.node, ("vfs", "sys", "uqbar")),
|
||||||
@ -456,13 +456,13 @@ fn handle_local_request(
|
|||||||
"drive": drive_path,
|
"drive": drive_path,
|
||||||
}))?,
|
}))?,
|
||||||
) else {
|
) else {
|
||||||
return Err(anyhow::anyhow!("app-store: no write cap"));
|
return Err(anyhow::anyhow!("app store: no write cap"));
|
||||||
};
|
};
|
||||||
let Some(networking_cap) = get_capability(
|
let Some(networking_cap) = get_capability(
|
||||||
&Address::new(&our.node, ("kernel", "sys", "uqbar")),
|
&Address::new(&our.node, ("kernel", "sys", "uqbar")),
|
||||||
&"\"network\"".to_string(),
|
&"\"network\"".to_string(),
|
||||||
) else {
|
) else {
|
||||||
return Err(anyhow::anyhow!("app-store: no net cap"));
|
return Err(anyhow::anyhow!("app store: no net cap"));
|
||||||
};
|
};
|
||||||
// first, for each process in manifest, initialize it
|
// first, for each process in manifest, initialize it
|
||||||
// then, once all have been initialized, grant them requested caps
|
// then, once all have been initialized, grant them requested caps
|
||||||
@ -477,33 +477,31 @@ fn handle_local_request(
|
|||||||
// build initial caps
|
// build initial caps
|
||||||
let mut initial_capabilities: HashSet<kt::SignedCapability> = HashSet::new();
|
let mut initial_capabilities: HashSet<kt::SignedCapability> = HashSet::new();
|
||||||
if entry.request_networking {
|
if entry.request_networking {
|
||||||
initial_capabilities
|
initial_capabilities.insert(kt::de_wit_signed_capability(networking_cap.clone()));
|
||||||
.insert(kt::de_wit_signed_capability(networking_cap.clone()));
|
|
||||||
}
|
}
|
||||||
initial_capabilities.insert(kt::de_wit_signed_capability(read_cap.clone()));
|
initial_capabilities.insert(kt::de_wit_signed_capability(read_cap.clone()));
|
||||||
initial_capabilities.insert(kt::de_wit_signed_capability(write_cap.clone()));
|
initial_capabilities.insert(kt::de_wit_signed_capability(write_cap.clone()));
|
||||||
let process_id = format!("{}:{}", entry.process_name, package);
|
let process_id = format!("{}:{}", entry.process_name, package);
|
||||||
let Ok(parsed_new_process_id) = ProcessId::from_str(&process_id) else {
|
let Ok(parsed_new_process_id) = ProcessId::from_str(&process_id) else {
|
||||||
return Err(anyhow::anyhow!("app-store: invalid process id!"));
|
return Err(anyhow::anyhow!("app store: invalid process id!"));
|
||||||
};
|
};
|
||||||
// kill process if it already exists
|
// kill process if it already exists
|
||||||
Request::new()
|
Request::new()
|
||||||
.target(Address::from_str("our@kernel:sys:uqbar")?)
|
.target(("our", "kernel", "sys", "uqbar"))
|
||||||
.ipc(serde_json::to_vec(&kt::KernelCommand::KillProcess(
|
.ipc(serde_json::to_vec(&kt::KernelCommand::KillProcess(
|
||||||
parsed_new_process_id.clone(),
|
parsed_new_process_id.clone(),
|
||||||
))?)
|
))?)
|
||||||
.send()?;
|
.send()?;
|
||||||
|
|
||||||
let _bytes_response = Request::new()
|
let _bytes_response = Request::new()
|
||||||
.target(Address::from_str("our@vfs:sys:uqbar")?)
|
.target(("our", "vfs", "sys", "uqbar"))
|
||||||
.ipc(serde_json::to_vec(&kt::VfsRequest {
|
.ipc(serde_json::to_vec(&kt::VfsRequest {
|
||||||
path: wasm_path.clone(),
|
path: wasm_path.clone(),
|
||||||
action: kt::VfsAction::Read,
|
action: kt::VfsAction::Read,
|
||||||
})?)
|
})?)
|
||||||
.send_and_await_response(5)?
|
.send_and_await_response(5)??;
|
||||||
.unwrap();
|
|
||||||
Request::new()
|
Request::new()
|
||||||
.target(Address::from_str("our@kernel:sys:uqbar")?)
|
.target(("our", "kernel", "sys", "uqbar"))
|
||||||
.ipc(serde_json::to_vec(&kt::KernelCommand::InitializeProcess {
|
.ipc(serde_json::to_vec(&kt::KernelCommand::InitializeProcess {
|
||||||
id: parsed_new_process_id,
|
id: parsed_new_process_id,
|
||||||
wasm_bytes_handle: wasm_path,
|
wasm_bytes_handle: wasm_path,
|
||||||
@ -512,8 +510,7 @@ fn handle_local_request(
|
|||||||
public: entry.public,
|
public: entry.public,
|
||||||
})?)
|
})?)
|
||||||
.inherit(true)
|
.inherit(true)
|
||||||
.send_and_await_response(5)?
|
.send_and_await_response(5)??;
|
||||||
.unwrap();
|
|
||||||
}
|
}
|
||||||
for entry in &manifest {
|
for entry in &manifest {
|
||||||
let process_id = ProcessId::new(
|
let process_id = ProcessId::new(
|
||||||
@ -524,8 +521,7 @@ fn handle_local_request(
|
|||||||
if let Some(to_request) = &entry.request_messaging {
|
if let Some(to_request) = &entry.request_messaging {
|
||||||
for value in to_request {
|
for value in to_request {
|
||||||
let mut capability = None;
|
let mut capability = None;
|
||||||
match value {
|
if let serde_json::Value::String(process_name) = value {
|
||||||
serde_json::Value::String(process_name) => {
|
|
||||||
if let Ok(parsed_process_id) = ProcessId::from_str(process_name) {
|
if let Ok(parsed_process_id) = ProcessId::from_str(process_name) {
|
||||||
capability = get_capability(
|
capability = get_capability(
|
||||||
&Address {
|
&Address {
|
||||||
@ -535,32 +531,28 @@ fn handle_local_request(
|
|||||||
&"\"messaging\"".into(),
|
&"\"messaging\"".into(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
serde_json::Value::Object(map) => {
|
let Ok(parsed) = serde_json::from_value::<ManifestCap>(value.to_owned()) else {
|
||||||
if let Some(process_name) = map.get("process") {
|
continue
|
||||||
if let Ok(parsed_process_id) =
|
};
|
||||||
ProcessId::from_str(&process_name.to_string())
|
if let Ok(parsed_process_id) = ProcessId::from_str(&parsed.process) {
|
||||||
{
|
|
||||||
if let Some(params) = map.get("params") {
|
|
||||||
capability = get_capability(
|
capability = get_capability(
|
||||||
&Address {
|
&Address {
|
||||||
node: our.node.clone(),
|
node: our.node.clone(),
|
||||||
process: parsed_process_id.clone(),
|
process: parsed_process_id.clone(),
|
||||||
},
|
},
|
||||||
¶ms.to_string(),
|
&parsed.params.to_string(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if let Some(cap) = capability {
|
if let Some(cap) = capability {
|
||||||
share_capability(&process_id, &cap);
|
share_capability(&process_id, &cap);
|
||||||
} else {
|
} else {
|
||||||
println!("app-store: no cap: {}, for {} to request!", value.to_string(), process_id);
|
println!(
|
||||||
|
"app store: no cap {} for {} to request!",
|
||||||
|
value.to_string(),
|
||||||
|
process_id
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -607,29 +599,60 @@ fn handle_local_request(
|
|||||||
if let Some(cap) = capability {
|
if let Some(cap) = capability {
|
||||||
share_capability(&to_process.unwrap(), &cap);
|
share_capability(&to_process.unwrap(), &cap);
|
||||||
} else {
|
} else {
|
||||||
println!("app-store: no cap: {}, for {} to grant!", value.to_string(), process_id);
|
println!(
|
||||||
|
"app store: no cap {} for {} to grant!",
|
||||||
|
value.to_string(),
|
||||||
|
process_id
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Request::new()
|
Request::new()
|
||||||
.target(Address::from_str("our@kernel:sys:uqbar")?)
|
.target(("our", "kernel", "sys", "uqbar"))
|
||||||
.ipc(serde_json::to_vec(&kt::KernelCommand::RunProcess(
|
.ipc(serde_json::to_vec(&kt::KernelCommand::RunProcess(
|
||||||
process_id,
|
process_id,
|
||||||
))?)
|
))?)
|
||||||
.send_and_await_response(5)?
|
.send_and_await_response(5)??;
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
Ok(Some(Resp::InstallResponse(InstallResponse::Success)))
|
|
||||||
}
|
|
||||||
LocalRequest::Uninstall(_package) => {
|
|
||||||
// TODO
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
LocalRequest::Delete(_package) => {
|
|
||||||
// TODO
|
|
||||||
Ok(None)
|
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_uninstall(package: &PackageId) -> anyhow::Result<()> {
|
||||||
|
let drive_path = format!("/{}/pkg", package);
|
||||||
|
Request::new()
|
||||||
|
.target(("our", "vfs", "sys", "uqbar"))
|
||||||
|
.ipc(serde_json::to_vec(&kt::VfsRequest {
|
||||||
|
path: format!("{}/manifest.json", drive_path),
|
||||||
|
action: kt::VfsAction::Read,
|
||||||
|
})?)
|
||||||
|
.send_and_await_response(5)??;
|
||||||
|
let Some(payload) = get_payload() else {
|
||||||
|
return Err(anyhow::anyhow!("no payload"));
|
||||||
|
};
|
||||||
|
let manifest = String::from_utf8(payload.bytes)?;
|
||||||
|
let manifest = serde_json::from_str::<Vec<kt::PackageManifestEntry>>(&manifest)?;
|
||||||
|
// reading from the package manifest, kill every process
|
||||||
|
for entry in &manifest {
|
||||||
|
let process_id = format!("{}:{}", entry.process_name, package);
|
||||||
|
let Ok(parsed_new_process_id) = ProcessId::from_str(&process_id) else {
|
||||||
|
continue
|
||||||
|
};
|
||||||
|
Request::new()
|
||||||
|
.target(("our", "kernel", "sys", "uqbar"))
|
||||||
|
.ipc(serde_json::to_vec(&kt::KernelCommand::KillProcess(
|
||||||
|
parsed_new_process_id,
|
||||||
|
))?)
|
||||||
|
.send()?;
|
||||||
}
|
}
|
||||||
|
// then, delete the drive
|
||||||
|
Request::new()
|
||||||
|
.target(("our", "vfs", "sys", "uqbar"))
|
||||||
|
.ipc(serde_json::to_vec(&kt::VfsRequest {
|
||||||
|
path: drive_path,
|
||||||
|
action: kt::VfsAction::RemoveDirAll,
|
||||||
|
})?)
|
||||||
|
.send_and_await_response(5)??;
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_remote_request(
|
fn handle_remote_request(
|
||||||
@ -637,30 +660,32 @@ fn handle_remote_request(
|
|||||||
source: &Address,
|
source: &Address,
|
||||||
request: &RemoteRequest,
|
request: &RemoteRequest,
|
||||||
state: &mut State,
|
state: &mut State,
|
||||||
) -> anyhow::Result<Option<Resp>> {
|
) -> Resp {
|
||||||
match request {
|
match request {
|
||||||
RemoteRequest::Download(package) => {
|
RemoteRequest::Download(package) => {
|
||||||
let Some(package_state) = state.packages.get(&package) else {
|
let Some(package_state) = state.packages.get(&package) else {
|
||||||
return Ok(Some(Resp::RemoteResponse(RemoteResponse::DownloadDenied)));
|
return Resp::RemoteResponse(RemoteResponse::DownloadDenied);
|
||||||
};
|
};
|
||||||
if !package_state.mirroring {
|
if !package_state.mirroring {
|
||||||
return Ok(Some(Resp::RemoteResponse(RemoteResponse::DownloadDenied)));
|
return Resp::RemoteResponse(RemoteResponse::DownloadDenied);
|
||||||
}
|
}
|
||||||
// get the .zip from VFS and attach as payload to response
|
// get the .zip from VFS and attach as payload to response
|
||||||
let drive_name = format!("/{}/pkg", package);
|
let file_path = format!("/{}/pkg/{}.zip", package, package);
|
||||||
let file_path = format!("/{}.zip", drive_name);
|
let Ok(Ok(_)) = Request::new()
|
||||||
Request::new()
|
.target(("our", "vfs", "sys", "uqbar"))
|
||||||
.target(Address::from_str("our@vfs:sys:uqbar")?)
|
|
||||||
.ipc(serde_json::to_vec(&kt::VfsRequest {
|
.ipc(serde_json::to_vec(&kt::VfsRequest {
|
||||||
path: file_path,
|
path: file_path,
|
||||||
action: kt::VfsAction::Read,
|
action: kt::VfsAction::Read,
|
||||||
})?)
|
}).unwrap())
|
||||||
.send_and_await_response(5)?
|
.send_and_await_response(5) else {
|
||||||
.unwrap();
|
return Resp::RemoteResponse(RemoteResponse::DownloadDenied);
|
||||||
// transfer will inherit the payload bytes we receive from VFS
|
};
|
||||||
|
// transfer will *inherit* the payload bytes we receive from VFS
|
||||||
let file_name = format!("/{}.zip", package);
|
let file_name = format!("/{}.zip", package);
|
||||||
spawn_transfer(&our, &file_name, None, &source);
|
match spawn_transfer(&our, &file_name, None, 60, &source) {
|
||||||
Ok(Some(Resp::RemoteResponse(RemoteResponse::DownloadApproved)))
|
Ok(()) => Resp::RemoteResponse(RemoteResponse::DownloadApproved),
|
||||||
|
Err(_e) => Resp::RemoteResponse(RemoteResponse::DownloadDenied),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
36
modules/app_store/ft_worker/Cargo.lock
generated
36
modules/app_store/ft_worker/Cargo.lock
generated
@ -58,7 +58,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ft_worker"
|
name = "ft_worker"
|
||||||
version = "0.1.0"
|
version = "0.2.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"bincode",
|
"bincode",
|
||||||
@ -157,6 +157,22 @@ version = "0.4.20"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
|
checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "mime"
|
||||||
|
version = "0.3.17"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "mime_guess"
|
||||||
|
version = "2.0.4"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef"
|
||||||
|
dependencies = [
|
||||||
|
"mime",
|
||||||
|
"unicase",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "percent-encoding"
|
name = "percent-encoding"
|
||||||
version = "2.3.1"
|
version = "2.3.1"
|
||||||
@ -321,6 +337,15 @@ version = "0.1.1"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
|
checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "unicase"
|
||||||
|
version = "2.7.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89"
|
||||||
|
dependencies = [
|
||||||
|
"version_check",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "unicode-bidi"
|
name = "unicode-bidi"
|
||||||
version = "0.3.14"
|
version = "0.3.14"
|
||||||
@ -357,11 +382,12 @@ checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c"
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "uqbar_process_lib"
|
name = "uqbar_process_lib"
|
||||||
version = "0.4.0"
|
version = "0.4.0"
|
||||||
source = "git+ssh://git@github.com/uqbar-dao/process_lib.git?rev=b09d987#b09d9875edce1a230549cf56cf088f95e38d4abd"
|
source = "git+ssh://git@github.com/uqbar-dao/process_lib.git?rev=2d17d75#2d17d75152e55ef3ed417c79312e209ca45b8dbb"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"bincode",
|
"bincode",
|
||||||
"http",
|
"http",
|
||||||
|
"mime_guess",
|
||||||
"rand",
|
"rand",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
@ -381,6 +407,12 @@ dependencies = [
|
|||||||
"percent-encoding",
|
"percent-encoding",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "version_check"
|
||||||
|
version = "0.9.4"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "wasi"
|
name = "wasi"
|
||||||
version = "0.11.0+wasi-snapshot-preview1"
|
version = "0.11.0+wasi-snapshot-preview1"
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "ft_worker"
|
name = "ft_worker"
|
||||||
version = "0.1.0"
|
version = "0.2.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
@ -16,7 +16,7 @@ bincode = "1.3.3"
|
|||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
uqbar_process_lib = { git = "ssh://git@github.com/uqbar-dao/process_lib.git", rev = "b09d987" }
|
uqbar_process_lib = { git = "ssh://git@github.com/uqbar-dao/process_lib.git", rev = "2d17d75" }
|
||||||
wit-bindgen = { git = "https://github.com/bytecodealliance/wit-bindgen", rev = "efcc759" }
|
wit-bindgen = { git = "https://github.com/bytecodealliance/wit-bindgen", rev = "efcc759" }
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use uqbar_process_lib::uqbar::process::standard::*;
|
use uqbar_process_lib::*;
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
pub struct FileTransferContext {
|
pub struct FileTransferContext {
|
||||||
@ -13,9 +13,9 @@ pub struct FileTransferContext {
|
|||||||
/// in order to prompt them to spawn a worker
|
/// in order to prompt them to spawn a worker
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
pub enum FTWorkerCommand {
|
pub enum FTWorkerCommand {
|
||||||
|
/// make sure to attach file itself as payload
|
||||||
Send {
|
Send {
|
||||||
// make sure to attach file itself as payload
|
target: Address,
|
||||||
target: String, // annoying, but this is Address
|
|
||||||
file_name: String,
|
file_name: String,
|
||||||
timeout: u64,
|
timeout: u64,
|
||||||
},
|
},
|
||||||
@ -32,10 +32,12 @@ pub enum FTWorkerCommand {
|
|||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
pub enum FTWorkerResult {
|
pub enum FTWorkerResult {
|
||||||
SendSuccess,
|
SendSuccess,
|
||||||
ReceiveSuccess(String), // name of file, bytes in payload
|
/// string is name of file. bytes in payload
|
||||||
|
ReceiveSuccess(String),
|
||||||
Err(TransferError),
|
Err(TransferError),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// the possible errors that can be returned to the parent inside `FTWorkerResult`
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
pub enum TransferError {
|
pub enum TransferError {
|
||||||
TargetOffline,
|
TargetOffline,
|
||||||
@ -44,47 +46,48 @@ pub enum TransferError {
|
|||||||
SourceFailed,
|
SourceFailed,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// A helper function to spawn a worker and initialize a file transfer.
|
||||||
|
/// The outcome will be sent as an [`FTWorkerResult`] to the caller process.
|
||||||
|
///
|
||||||
|
/// if `file_bytes` is None, expects to inherit payload!
|
||||||
|
#[allow(dead_code)]
|
||||||
pub fn spawn_transfer(
|
pub fn spawn_transfer(
|
||||||
our: &Address,
|
our: &Address,
|
||||||
file_name: &str,
|
file_name: &str,
|
||||||
file_bytes: Option<Vec<u8>>, // if None, expects to inherit payload!
|
file_bytes: Option<Vec<u8>>,
|
||||||
|
timeout: u64,
|
||||||
to_addr: &Address,
|
to_addr: &Address,
|
||||||
) {
|
) -> anyhow::Result<()> {
|
||||||
let transfer_id: u64 = rand::random();
|
let transfer_id: u64 = rand::random();
|
||||||
// spawn a worker and tell it to send the file
|
// spawn a worker and tell it to send the file
|
||||||
let Ok(worker_process_id) = spawn(
|
let Ok(worker_process_id) = spawn(
|
||||||
Some(&transfer_id.to_string()),
|
Some(transfer_id.to_string().as_str()),
|
||||||
"/ft_worker.wasm".into(),
|
&format!("{}/pkg/ft_worker.wasm", our.package_id()),
|
||||||
&OnExit::None, // can set message-on-panic here
|
OnExit::None, // can set message-on-panic here
|
||||||
&Capabilities::All,
|
&Capabilities::All,
|
||||||
false, // not public
|
false, // not public
|
||||||
) else {
|
) else {
|
||||||
print_to_terminal(0, "file_transfer: failed to spawn worker!");
|
return Err(anyhow::anyhow!("failed to spawn ft_worker!"));
|
||||||
return;
|
|
||||||
};
|
};
|
||||||
// tell the worker what to do
|
// tell the worker what to do
|
||||||
let payload_or_inherit = match file_bytes {
|
let payload_or_inherit = match file_bytes {
|
||||||
Some(bytes) => Some(Payload { mime: None, bytes }),
|
Some(bytes) => Some(Payload { mime: None, bytes }),
|
||||||
None => None,
|
None => None,
|
||||||
};
|
};
|
||||||
send_request(
|
let mut req = Request::new()
|
||||||
&Address {
|
.target((our.node.as_ref(), worker_process_id))
|
||||||
node: our.node.clone(),
|
.inherit(!payload_or_inherit.is_some())
|
||||||
process: worker_process_id,
|
.expects_response(timeout + 1) // don't call with 2^64 lol
|
||||||
},
|
.ipc(
|
||||||
&Request {
|
serde_json::to_vec(&FTWorkerCommand::Send {
|
||||||
inherit: !payload_or_inherit.is_some(),
|
target: to_addr.clone(),
|
||||||
expects_response: Some(61),
|
|
||||||
ipc: serde_json::to_vec(&FTWorkerCommand::Send {
|
|
||||||
target: to_addr.to_string(),
|
|
||||||
file_name: file_name.into(),
|
file_name: file_name.into(),
|
||||||
timeout: 60,
|
timeout,
|
||||||
})
|
})
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
metadata: None,
|
)
|
||||||
},
|
.context(
|
||||||
Some(
|
serde_json::to_vec(&FileTransferContext {
|
||||||
&serde_json::to_vec(&FileTransferContext {
|
|
||||||
file_name: file_name.into(),
|
file_name: file_name.into(),
|
||||||
file_size: match &payload_or_inherit {
|
file_size: match &payload_or_inherit {
|
||||||
Some(p) => Some(p.bytes.len() as u64),
|
Some(p) => Some(p.bytes.len() as u64),
|
||||||
@ -93,39 +96,36 @@ pub fn spawn_transfer(
|
|||||||
start_time: std::time::SystemTime::now(),
|
start_time: std::time::SystemTime::now(),
|
||||||
})
|
})
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
),
|
|
||||||
payload_or_inherit.as_ref(),
|
|
||||||
);
|
);
|
||||||
|
|
||||||
|
if let Some(payload) = payload_or_inherit {
|
||||||
|
req = req.payload(payload);
|
||||||
|
}
|
||||||
|
req.send()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn spawn_receive_transfer(our: &Address, ipc: &[u8]) {
|
/// A helper function to allow a process to easily handle an incoming transfer
|
||||||
|
/// from an ft_worker. Call this when you get the initial [`FTWorkerCommand::Receive`]
|
||||||
|
/// and let it do the rest. The outcome will be sent as an [`FTWorkerResult`] inside
|
||||||
|
/// a Response to the caller.
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub fn spawn_receive_transfer(our: &Address, ipc: &[u8]) -> anyhow::Result<()> {
|
||||||
let Ok(FTWorkerCommand::Receive { transfer_id, .. }) = serde_json::from_slice(ipc) else {
|
let Ok(FTWorkerCommand::Receive { transfer_id, .. }) = serde_json::from_slice(ipc) else {
|
||||||
print_to_terminal(0, "file_transfer: got weird request");
|
return Err(anyhow::anyhow!("spawn_receive_transfer: got malformed request"));
|
||||||
return;
|
|
||||||
};
|
};
|
||||||
let Ok(worker_process_id) = spawn(
|
let Ok(worker_process_id) = spawn(
|
||||||
Some(&transfer_id.to_string()),
|
Some(transfer_id.to_string().as_str()),
|
||||||
"/ft_worker.wasm".into(),
|
&format!("{}/pkg/ft_worker.wasm", our.package_id()),
|
||||||
&OnExit::None, // can set message-on-panic here
|
OnExit::None, // can set message-on-panic here
|
||||||
&Capabilities::All,
|
&Capabilities::All,
|
||||||
false, // not public
|
false, // not public
|
||||||
) else {
|
) else {
|
||||||
print_to_terminal(0, "file_transfer: failed to spawn worker!");
|
return Err(anyhow::anyhow!("failed to spawn ft_worker!"));
|
||||||
return;
|
|
||||||
};
|
};
|
||||||
// forward receive command to worker
|
// forward receive command to worker
|
||||||
send_request(
|
Request::new()
|
||||||
&Address {
|
.target((our.node.as_ref(), worker_process_id))
|
||||||
node: our.node.clone(),
|
.inherit(true)
|
||||||
process: worker_process_id,
|
.ipc(ipc)
|
||||||
},
|
.send()
|
||||||
&Request {
|
|
||||||
inherit: true,
|
|
||||||
expects_response: None,
|
|
||||||
ipc: ipc.to_vec(),
|
|
||||||
metadata: None,
|
|
||||||
},
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
@ -1,8 +1,6 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
//use uqbar_process_lib::uqbar::process::standard::*;
|
use uqbar_process_lib::*;
|
||||||
|
use uqbar_process_lib::println;
|
||||||
use uqbar_process_lib::uqbar::process::standard::{Message as StdMessage, Request as StdRequest, Response as StdResponse, SendErrorKind};
|
|
||||||
use uqbar_process_lib::{await_message, get_payload, print_to_terminal, send_and_await_response, send_request, send_response, Address, Message, Payload};
|
|
||||||
|
|
||||||
mod ft_worker_lib;
|
mod ft_worker_lib;
|
||||||
use ft_worker_lib::*;
|
use ft_worker_lib::*;
|
||||||
@ -22,12 +20,9 @@ pub enum FTWorkerProtocol {
|
|||||||
Finished,
|
Finished,
|
||||||
}
|
}
|
||||||
|
|
||||||
struct Component;
|
call_init!(init);
|
||||||
impl Guest for Component {
|
|
||||||
fn init(our: String) {
|
|
||||||
let our = Address::from_str(&our).unwrap();
|
|
||||||
print_to_terminal(1, &format!("{}: start", our.process));
|
|
||||||
|
|
||||||
|
fn init(our: Address) {
|
||||||
let Ok(Message::Request { source: parent_process, ipc, .. }) = await_message() else {
|
let Ok(Message::Request { source: parent_process, ipc, .. }) = await_message() else {
|
||||||
panic!("ft_worker: got bad init message");
|
panic!("ft_worker: got bad init message");
|
||||||
};
|
};
|
||||||
@ -35,129 +30,106 @@ impl Guest for Component {
|
|||||||
let command = serde_json::from_slice::<FTWorkerCommand>(&ipc)
|
let command = serde_json::from_slice::<FTWorkerCommand>(&ipc)
|
||||||
.expect("ft_worker: got unparseable init message");
|
.expect("ft_worker: got unparseable init message");
|
||||||
|
|
||||||
match command {
|
let Some(result) = (match command {
|
||||||
FTWorkerCommand::Send {
|
FTWorkerCommand::Send {
|
||||||
target,
|
target,
|
||||||
file_name,
|
file_name,
|
||||||
timeout,
|
timeout,
|
||||||
} => {
|
} => Some(handle_send(&our, &target, &file_name, timeout)),
|
||||||
let transfer_id: u64 = our.process.process().parse().unwrap();
|
FTWorkerCommand::Receive {
|
||||||
|
file_name,
|
||||||
|
total_chunks,
|
||||||
|
timeout,
|
||||||
|
..
|
||||||
|
} => handle_receive(parent_process, &file_name, total_chunks, timeout),
|
||||||
|
}) else { return };
|
||||||
|
|
||||||
|
Response::new()
|
||||||
|
.ipc(serde_json::to_vec(&result).unwrap())
|
||||||
|
.send()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// job is done
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_send(our: &Address, target: &Address, file_name: &str, timeout: u64) -> FTWorkerResult {
|
||||||
|
let transfer_id: u64 = our.process().parse().unwrap();
|
||||||
let Some(payload) = get_payload() else {
|
let Some(payload) = get_payload() else {
|
||||||
print_to_terminal(0, "FTWorker wasn't given payload, exiting");
|
println!("ft_worker: wasn't given payload!");
|
||||||
return
|
return FTWorkerResult::Err(TransferError::SourceFailed)
|
||||||
};
|
};
|
||||||
let file_bytes = payload.bytes;
|
let file_bytes = payload.bytes;
|
||||||
let mut file_size = file_bytes.len() as u64;
|
let mut file_size = file_bytes.len() as u64;
|
||||||
let mut offset: u64 = 0;
|
let mut offset: u64 = 0;
|
||||||
let mut chunk_size: u64 = 1048576; // 1MB
|
let chunk_size: u64 = 1048576; // 1MB, can be changed
|
||||||
let total_chunks = (file_size as f64 / chunk_size as f64).ceil() as u64;
|
let total_chunks = (file_size as f64 / chunk_size as f64).ceil() as u64;
|
||||||
// send a file to another worker
|
// send a file to another worker
|
||||||
// start by telling target to expect a file,
|
// start by telling target to expect a file,
|
||||||
// then upon reciving affirmative response,
|
// then upon reciving affirmative response,
|
||||||
// send contents in chunks and wait for
|
// send contents in chunks and wait for
|
||||||
// acknowledgement.
|
// acknowledgement.
|
||||||
match send_and_await_response(
|
let Ok(Ok(response)) = Request::to(target.clone())
|
||||||
&Address::from_str(&target).unwrap(),
|
.ipc(serde_json::to_vec(&FTWorkerCommand::Receive {
|
||||||
&StdRequest {
|
|
||||||
inherit: false,
|
|
||||||
expects_response: Some(timeout),
|
|
||||||
ipc: serde_json::to_vec(&FTWorkerCommand::Receive {
|
|
||||||
transfer_id,
|
transfer_id,
|
||||||
file_name,
|
file_name: file_name.to_string(),
|
||||||
file_size,
|
file_size,
|
||||||
total_chunks,
|
total_chunks,
|
||||||
timeout,
|
timeout,
|
||||||
})
|
}).unwrap())
|
||||||
.unwrap(),
|
.send_and_await_response(timeout) else {
|
||||||
metadata: None,
|
return FTWorkerResult::Err(TransferError::TargetOffline)
|
||||||
},
|
};
|
||||||
None,
|
let opp_worker = response.source();
|
||||||
) {
|
let Ok(FTWorkerProtocol::Ready) = serde_json::from_slice(&response.ipc()) else {
|
||||||
Err(send_error) => {
|
return FTWorkerResult::Err(TransferError::TargetRejected)
|
||||||
respond_to_parent(FTWorkerResult::Err(match send_error.kind {
|
|
||||||
SendErrorKind::Offline => TransferError::TargetOffline,
|
|
||||||
SendErrorKind::Timeout => TransferError::TargetTimeout,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
Ok((opp_worker, StdMessage::Response((response, _)))) => {
|
|
||||||
let Ok(FTWorkerProtocol::Ready) = serde_json::from_slice(&response.ipc) else {
|
|
||||||
respond_to_parent(FTWorkerResult::Err(TransferError::TargetRejected));
|
|
||||||
return;
|
|
||||||
};
|
};
|
||||||
// send file in chunks
|
// send file in chunks
|
||||||
loop {
|
loop {
|
||||||
if file_size < chunk_size {
|
if file_size < chunk_size {
|
||||||
// this is the last chunk, so we should expect a Finished response
|
// this is the last chunk, so we should expect a Finished response
|
||||||
chunk_size = file_size;
|
let _ = Request::to(opp_worker.clone())
|
||||||
let payload = Payload {
|
.ipc(vec![])
|
||||||
|
.payload(Payload {
|
||||||
mime: None,
|
mime: None,
|
||||||
bytes: file_bytes
|
bytes: file_bytes[offset as usize..offset as usize + file_size as usize]
|
||||||
[offset as usize..offset as usize + chunk_size as usize]
|
|
||||||
.to_vec(),
|
.to_vec(),
|
||||||
};
|
})
|
||||||
send_request(
|
.expects_response(timeout)
|
||||||
&opp_worker,
|
.send();
|
||||||
&StdRequest {
|
|
||||||
inherit: false,
|
|
||||||
expects_response: Some(timeout),
|
|
||||||
ipc: vec![],
|
|
||||||
metadata: None,
|
|
||||||
},
|
|
||||||
None,
|
|
||||||
Some(&payload),
|
|
||||||
);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
let payload = Payload {
|
let _ = Request::to(opp_worker.clone())
|
||||||
|
.ipc(vec![])
|
||||||
|
.payload(Payload {
|
||||||
mime: None,
|
mime: None,
|
||||||
bytes: file_bytes
|
bytes: file_bytes[offset as usize..offset as usize + chunk_size as usize].to_vec(),
|
||||||
[offset as usize..offset as usize + chunk_size as usize]
|
})
|
||||||
.to_vec(),
|
.send();
|
||||||
};
|
|
||||||
send_request(
|
|
||||||
&opp_worker,
|
|
||||||
&StdRequest {
|
|
||||||
inherit: false,
|
|
||||||
expects_response: None,
|
|
||||||
ipc: vec![],
|
|
||||||
metadata: None,
|
|
||||||
},
|
|
||||||
None,
|
|
||||||
Some(&payload),
|
|
||||||
);
|
|
||||||
file_size -= chunk_size;
|
file_size -= chunk_size;
|
||||||
offset += chunk_size;
|
offset += chunk_size;
|
||||||
}
|
}
|
||||||
// now wait for Finished response
|
// now wait for Finished response
|
||||||
let Ok(Message::Response { ipc, .. }) = await_message() else {
|
let Ok(Message::Response { ipc, .. }) = await_message() else {
|
||||||
respond_to_parent(FTWorkerResult::Err(TransferError::TargetRejected));
|
return FTWorkerResult::Err(TransferError::TargetRejected)
|
||||||
return;
|
|
||||||
};
|
};
|
||||||
let Ok(FTWorkerProtocol::Finished) = serde_json::from_slice(&ipc) else {
|
let Ok(FTWorkerProtocol::Finished) = serde_json::from_slice(&ipc) else {
|
||||||
respond_to_parent(FTWorkerResult::Err(TransferError::TargetRejected));
|
return FTWorkerResult::Err(TransferError::TargetRejected)
|
||||||
return;
|
|
||||||
};
|
};
|
||||||
// return success to parent
|
// return success to parent
|
||||||
respond_to_parent(FTWorkerResult::SendSuccess);
|
return FTWorkerResult::SendSuccess;
|
||||||
}
|
}
|
||||||
_ => respond_to_parent(FTWorkerResult::Err(TransferError::TargetRejected)),
|
|
||||||
}
|
fn handle_receive(
|
||||||
}
|
parent_process: Address,
|
||||||
FTWorkerCommand::Receive {
|
file_name: &str,
|
||||||
file_name,
|
total_chunks: u64,
|
||||||
total_chunks,
|
timeout: u64,
|
||||||
timeout,
|
) -> Option<FTWorkerResult> {
|
||||||
..
|
|
||||||
} => {
|
|
||||||
// send Ready response to counterparty
|
// send Ready response to counterparty
|
||||||
send_response(
|
Response::new()
|
||||||
&StdResponse {
|
.ipc(serde_json::to_vec(&FTWorkerProtocol::Ready).unwrap())
|
||||||
inherit: false,
|
.send()
|
||||||
ipc: serde_json::to_vec(&FTWorkerProtocol::Ready).unwrap(),
|
.unwrap();
|
||||||
metadata: None,
|
|
||||||
},
|
|
||||||
None,
|
|
||||||
);
|
|
||||||
// receive a file from a worker, then send it to parent
|
// receive a file from a worker, then send it to parent
|
||||||
// all messages will be chunks of file. when we receive the
|
// all messages will be chunks of file. when we receive the
|
||||||
// last chunk, send a Finished message to sender and Success to parent.
|
// last chunk, send a Finished message to sender and Success to parent.
|
||||||
@ -166,16 +138,13 @@ impl Guest for Component {
|
|||||||
let start_time = std::time::Instant::now();
|
let start_time = std::time::Instant::now();
|
||||||
loop {
|
loop {
|
||||||
let Ok(Message::Request { .. }) = await_message() else {
|
let Ok(Message::Request { .. }) = await_message() else {
|
||||||
respond_to_parent(FTWorkerResult::Err(TransferError::SourceFailed));
|
return Some(FTWorkerResult::Err(TransferError::SourceFailed))
|
||||||
return;
|
|
||||||
};
|
};
|
||||||
if start_time.elapsed().as_secs() > timeout {
|
if start_time.elapsed().as_secs() > timeout {
|
||||||
respond_to_parent(FTWorkerResult::Err(TransferError::SourceFailed));
|
return Some(FTWorkerResult::Err(TransferError::SourceFailed))
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
let Some(payload) = get_payload() else {
|
let Some(payload) = get_payload() else {
|
||||||
respond_to_parent(FTWorkerResult::Err(TransferError::SourceFailed));
|
return Some(FTWorkerResult::Err(TransferError::SourceFailed))
|
||||||
return;
|
|
||||||
};
|
};
|
||||||
chunks_received += 1;
|
chunks_received += 1;
|
||||||
file_bytes.extend(payload.bytes);
|
file_bytes.extend(payload.bytes);
|
||||||
@ -184,42 +153,18 @@ impl Guest for Component {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// send Finished message to sender
|
// send Finished message to sender
|
||||||
send_response(
|
Response::new()
|
||||||
&StdResponse {
|
.ipc(serde_json::to_vec(&FTWorkerProtocol::Finished).unwrap())
|
||||||
inherit: false,
|
.send()
|
||||||
ipc: serde_json::to_vec(&FTWorkerProtocol::Finished).unwrap(),
|
.unwrap();
|
||||||
metadata: None,
|
|
||||||
},
|
|
||||||
None,
|
|
||||||
);
|
|
||||||
// send Success message to parent
|
// send Success message to parent
|
||||||
send_request(
|
Request::to(parent_process)
|
||||||
&parent_process,
|
.ipc(serde_json::to_vec(&FTWorkerResult::ReceiveSuccess(file_name.to_string())).unwrap())
|
||||||
&StdRequest {
|
.payload(Payload {
|
||||||
inherit: false,
|
|
||||||
expects_response: None,
|
|
||||||
ipc: serde_json::to_vec(&FTWorkerResult::ReceiveSuccess(file_name))
|
|
||||||
.unwrap(),
|
|
||||||
metadata: None,
|
|
||||||
},
|
|
||||||
None,
|
|
||||||
Some(&Payload {
|
|
||||||
mime: None,
|
mime: None,
|
||||||
bytes: file_bytes,
|
bytes: file_bytes,
|
||||||
}),
|
})
|
||||||
);
|
.send()
|
||||||
}
|
.unwrap();
|
||||||
}
|
None
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn respond_to_parent(result: FTWorkerResult) {
|
|
||||||
send_response(
|
|
||||||
&StdResponse {
|
|
||||||
inherit: false,
|
|
||||||
ipc: serde_json::to_vec(&result).unwrap(),
|
|
||||||
metadata: None,
|
|
||||||
},
|
|
||||||
None,
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
@ -9,7 +9,6 @@
|
|||||||
"filesystem:sys:uqbar",
|
"filesystem:sys:uqbar",
|
||||||
"http_server:sys:uqbar",
|
"http_server:sys:uqbar",
|
||||||
"http_client:sys:uqbar",
|
"http_client:sys:uqbar",
|
||||||
"encryptor:sys:uqbar",
|
|
||||||
"net:sys:uqbar",
|
"net:sys:uqbar",
|
||||||
"vfs:sys:uqbar",
|
"vfs:sys:uqbar",
|
||||||
"kernel:sys:uqbar",
|
"kernel:sys:uqbar",
|
||||||
@ -21,6 +20,11 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
"grant_messaging": [
|
||||||
|
"http_server:sys:uqbar",
|
||||||
|
"terminal:terminal:uqbar",
|
||||||
|
"vfs:sys:uqbar"
|
||||||
|
],
|
||||||
"public": false
|
"public": false
|
||||||
}
|
}
|
||||||
]
|
]
|
@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"package": "app_store",
|
"package": "app_store",
|
||||||
"publisher": "uqbar",
|
"publisher": "uqbar",
|
||||||
"version": [0, 1, 0],
|
"version": [0, 2, 0],
|
||||||
"description": "A package manager + app store. This JSON field is optional and you can add whatever you want in addition to this."
|
"description": "A package manager + app store."
|
||||||
}
|
}
|
||||||
|
@ -6,8 +6,7 @@
|
|||||||
"request_networking": false,
|
"request_networking": false,
|
||||||
"request_messaging": [
|
"request_messaging": [
|
||||||
"http_bindings:http_bindings:uqbar",
|
"http_bindings:http_bindings:uqbar",
|
||||||
"http_server:sys:uqbar",
|
"http_server:sys:uqbar"
|
||||||
"encryptor:sys:uqbar"
|
|
||||||
],
|
],
|
||||||
"public": false
|
"public": false
|
||||||
}
|
}
|
||||||
|
@ -69,7 +69,7 @@ pub async fn send_and_await_response(
|
|||||||
}
|
}
|
||||||
let id = process
|
let id = process
|
||||||
.process
|
.process
|
||||||
.handle_request(source, target, request, None, payload)
|
.send_request(source, target, request, None, payload)
|
||||||
.await;
|
.await;
|
||||||
match id {
|
match id {
|
||||||
Ok(id) => match process.process.get_specific_message_for_process(id).await {
|
Ok(id) => match process.process.get_specific_message_for_process(id).await {
|
||||||
@ -103,7 +103,7 @@ impl ProcessState {
|
|||||||
/// will only fail if process does not have capability to send to target.
|
/// will only fail if process does not have capability to send to target.
|
||||||
/// if the request has a timeout (expects response), start a task to track
|
/// if the request has a timeout (expects response), start a task to track
|
||||||
/// that timeout and return timeout error if it expires.
|
/// that timeout and return timeout error if it expires.
|
||||||
pub async fn handle_request(
|
pub async fn send_request(
|
||||||
&mut self,
|
&mut self,
|
||||||
fake_source: Option<t::Address>, // only used when kernel steps in to get/set state
|
fake_source: Option<t::Address>, // only used when kernel steps in to get/set state
|
||||||
target: wit::Address,
|
target: wit::Address,
|
||||||
|
@ -579,7 +579,7 @@ impl StandardHost for process::ProcessWasi {
|
|||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let id = self
|
let id = self
|
||||||
.process
|
.process
|
||||||
.handle_request(None, target, request, context, payload)
|
.send_request(None, target, request, context, payload)
|
||||||
.await;
|
.await;
|
||||||
match id {
|
match id {
|
||||||
Ok(_id) => Ok(()),
|
Ok(_id) => Ok(()),
|
||||||
@ -599,7 +599,7 @@ impl StandardHost for process::ProcessWasi {
|
|||||||
for request in requests {
|
for request in requests {
|
||||||
let id = self
|
let id = self
|
||||||
.process
|
.process
|
||||||
.handle_request(None, request.0, request.1, request.2, request.3)
|
.send_request(None, request.0, request.1, request.2, request.3)
|
||||||
.await;
|
.await;
|
||||||
match id {
|
match id {
|
||||||
Ok(_id) => continue,
|
Ok(_id) => continue,
|
||||||
|
24
src/kv.rs
24
src/kv.rs
@ -17,9 +17,9 @@ pub async fn kv(
|
|||||||
send_to_caps_oracle: CapMessageSender,
|
send_to_caps_oracle: CapMessageSender,
|
||||||
home_directory_path: String,
|
home_directory_path: String,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
let vfs_path = format!("{}/vfs", &home_directory_path);
|
let kv_path = format!("{}/kv", &home_directory_path);
|
||||||
|
|
||||||
if let Err(e) = fs::create_dir_all(&vfs_path).await {
|
if let Err(e) = fs::create_dir_all(&kv_path).await {
|
||||||
panic!("failed creating kv dir! {:?}", e);
|
panic!("failed creating kv dir! {:?}", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -59,7 +59,7 @@ pub async fn kv(
|
|||||||
let send_to_loop = send_to_loop.clone();
|
let send_to_loop = send_to_loop.clone();
|
||||||
let open_kvs = open_kvs.clone();
|
let open_kvs = open_kvs.clone();
|
||||||
let txs = txs.clone();
|
let txs = txs.clone();
|
||||||
let vfs_path = vfs_path.clone();
|
let kv_path = kv_path.clone();
|
||||||
|
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
let mut queue_lock = queue.lock().await;
|
let mut queue_lock = queue.lock().await;
|
||||||
@ -72,7 +72,7 @@ pub async fn kv(
|
|||||||
send_to_loop.clone(),
|
send_to_loop.clone(),
|
||||||
send_to_terminal.clone(),
|
send_to_terminal.clone(),
|
||||||
send_to_caps_oracle.clone(),
|
send_to_caps_oracle.clone(),
|
||||||
vfs_path.clone(),
|
kv_path.clone(),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
@ -95,7 +95,7 @@ async fn handle_request(
|
|||||||
send_to_loop: MessageSender,
|
send_to_loop: MessageSender,
|
||||||
send_to_terminal: PrintSender,
|
send_to_terminal: PrintSender,
|
||||||
send_to_caps_oracle: CapMessageSender,
|
send_to_caps_oracle: CapMessageSender,
|
||||||
vfs_path: String,
|
kv_path: String,
|
||||||
) -> Result<(), KvError> {
|
) -> Result<(), KvError> {
|
||||||
let KernelMessage {
|
let KernelMessage {
|
||||||
id,
|
id,
|
||||||
@ -132,7 +132,7 @@ async fn handle_request(
|
|||||||
open_kvs.clone(),
|
open_kvs.clone(),
|
||||||
send_to_caps_oracle.clone(),
|
send_to_caps_oracle.clone(),
|
||||||
&request,
|
&request,
|
||||||
vfs_path.clone(),
|
kv_path.clone(),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
@ -328,7 +328,7 @@ async fn check_caps(
|
|||||||
open_kvs: Arc<DashMap<(PackageId, String), OptimisticTransactionDB>>,
|
open_kvs: Arc<DashMap<(PackageId, String), OptimisticTransactionDB>>,
|
||||||
mut send_to_caps_oracle: CapMessageSender,
|
mut send_to_caps_oracle: CapMessageSender,
|
||||||
request: &KvRequest,
|
request: &KvRequest,
|
||||||
vfs_path: String,
|
kv_path: String,
|
||||||
) -> Result<(), KvError> {
|
) -> Result<(), KvError> {
|
||||||
let (send_cap_bool, recv_cap_bool) = tokio::sync::oneshot::channel();
|
let (send_cap_bool, recv_cap_bool) = tokio::sync::oneshot::channel();
|
||||||
let src_package_id = PackageId::new(source.process.package(), source.process.publisher());
|
let src_package_id = PackageId::new(source.process.package(), source.process.publisher());
|
||||||
@ -418,8 +418,8 @@ async fn check_caps(
|
|||||||
}
|
}
|
||||||
|
|
||||||
let db_path = format!(
|
let db_path = format!(
|
||||||
"{}/{}/kv/{}",
|
"{}/{}/{}",
|
||||||
vfs_path,
|
kv_path,
|
||||||
request.package_id.to_string(),
|
request.package_id.to_string(),
|
||||||
request.db.to_string()
|
request.db.to_string()
|
||||||
);
|
);
|
||||||
@ -431,11 +431,7 @@ async fn check_caps(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
KvAction::Backup { .. } => {
|
KvAction::Backup { .. } => {
|
||||||
if source.process != *STATE_PROCESS_ID {
|
// caps
|
||||||
return Err(KvError::NoCap {
|
|
||||||
error: request.action.to_string(),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -472,6 +472,7 @@ async fn main() {
|
|||||||
));
|
));
|
||||||
// if a runtime task exits, try to recover it,
|
// if a runtime task exits, try to recover it,
|
||||||
// unless it was terminal signaling a quit
|
// unless it was terminal signaling a quit
|
||||||
|
// or a SIG* was intercepted
|
||||||
let quit_msg: String = tokio::select! {
|
let quit_msg: String = tokio::select! {
|
||||||
Some(Ok(res)) = tasks.join_next() => {
|
Some(Ok(res)) = tasks.join_next() => {
|
||||||
format!(
|
format!(
|
||||||
@ -521,6 +522,13 @@ async fn main() {
|
|||||||
|
|
||||||
// abort all remaining tasks
|
// abort all remaining tasks
|
||||||
tasks.shutdown().await;
|
tasks.shutdown().await;
|
||||||
|
let stdout = std::io::stdout();
|
||||||
|
let mut stdout = stdout.lock();
|
||||||
|
let _ = crossterm::execute!(
|
||||||
|
stdout,
|
||||||
|
crossterm::event::DisableBracketedPaste,
|
||||||
|
crossterm::terminal::SetTitle(""),
|
||||||
|
);
|
||||||
let _ = crossterm::terminal::disable_raw_mode();
|
let _ = crossterm::terminal::disable_raw_mode();
|
||||||
println!("\r\n\x1b[38;5;196m{}\x1b[0m", quit_msg);
|
println!("\r\n\x1b[38;5;196m{}\x1b[0m", quit_msg);
|
||||||
return;
|
return;
|
||||||
|
@ -1,11 +1,11 @@
|
|||||||
{
|
{
|
||||||
"files": {
|
"files": {
|
||||||
"main.css": "/static/css/main.115771e3.css",
|
"main.css": "/static/css/main.115771e3.css",
|
||||||
"main.js": "/static/js/main.ca6eb8b1.js",
|
"main.js": "/static/js/main.87f3f3b4.js",
|
||||||
"index.html": "/index.html"
|
"index.html": "/index.html"
|
||||||
},
|
},
|
||||||
"entrypoints": [
|
"entrypoints": [
|
||||||
"static/css/main.115771e3.css",
|
"static/css/main.115771e3.css",
|
||||||
"static/js/main.ca6eb8b1.js"
|
"static/js/main.87f3f3b4.js"
|
||||||
]
|
]
|
||||||
}
|
}
|
File diff suppressed because one or more lines are too long
@ -1,2 +0,0 @@
|
|||||||
:root{--uq-vlightpurple:#d9dcfc;--uq-lightpurple:#c7cafa;--uq-purple:#727bf2;--uq-darkpurple:#5761ef;--midnightpurp:#0a1170;--forgottenpurp:#45475e;--uq-lightpink:#f3ced2;--uq-pink:#dd7c8a;--uq-darkpink:#cd3c52;--blush:#d55d6f;--celeste:#adebe5;--lturq:#6bdbd0;--turq:#3acfc0;--celadon:#21897e;--deep-jungle:#14524c;--old-mint:#659792;--washed-gray:rgba(0,0,0,.03);--light-gray:#cbcbcb;--medium-gray:#656565;--dark-gray:rgba(0,0,0,.5);--charcoal:#333}body{background-color:#027;color:#fff;font-family:Press Start\ 2P;font-size:16px;margin:0;padding:0}h1,h2,h3,h4,h5,h6{line-height:1.5em}.col{flex-direction:column}.col,.row{align-items:center;display:flex}.row{flex-direction:row}input[type=password],input[type=text]{border:1px solid #ccc;border-radius:4px;padding:.5em .75em}button,input[type=password],input[type=text]{box-sizing:border-box;font-size:1em;margin-bottom:.5em;width:100%}button{background-color:#dd7c8a;background-color:var(--uq-pink);border:1px solid #dd7c8a;border:1px solid var(--uq-pink);border-radius:4px;box-shadow:0 1px 2px #cd3c52;box-shadow:0 1px 2px var(--uq-darkpink);color:#fff;cursor:pointer;font-family:Press Start\ 2P;margin-top:1em;padding:.75em 1em;transition:all .1s}button:hover{background-color:#cd3c52;background-color:var(--uq-darkpink);border:1px solid #cd3c52;border:1px solid var(--uq-darkpink)}button:disabled{background-color:#656565;background-color:var(--medium-gray);border:1px solid #656565;border:1px solid var(--medium-gray);cursor:not-allowed}#signup-page{display:flex;flex:1 1;height:100%;max-width:calc(100vw - 4em);padding:2em;width:100%}.key-err{color:red;font-size:12px;margin:1em}label{font-size:.8em}.login-row{align-self:flex-start;margin:.5em}.label-row{align-self:flex-start;margin:.5em 0}.tooltip-container{cursor:pointer;display:inline-block;position:relative}.tooltip-button{border:2px solid #fff;border-radius:50%;font-size:16px;height:1em;line-height:.5em;margin-left:.5em;text-align:center;width:1em}.tooltip-content{background-color:#555;border-radius:6px;color:#fff;font-family:sans-serif;left:50%;line-height:1.5em;margin-left:-60px;min-width:200px;opacity:0;padding:.5em;position:absolute;text-align:center;top:125%;transition:opacity .3s;visibility:hidden;z-index:1}.tooltip-content:after{border:5px solid transparent;border-bottom-color:#555;bottom:100%;content:"";left:30%;margin-left:-5px;position:absolute}.tooltip-container:hover .tooltip-content{opacity:1;visibility:visible}#signup-form{max-width:calc(100vw - 2em);padding:1em;width:calc(420px + 2em)}#signup-form-header{margin-bottom:1em}#signup-form-header button{max-width:calc(100vw - 2em);width:calc(420px + 2em)}#current-address{font-family:Courier New,Courier,monospace;font-size:1.25em;font-weight:600;margin-left:1em}.current-username{border:1px solid #fff;border-radius:4px;cursor:pointer;margin:1em 0;padding:.5em}.current-username:hover{background-color:#fff;border:1px solid #fff;color:#027}#connect-wallet{max-width:420px}#wallet-required-message{line-height:1.5em;max-width:500px;text-align:center}#loading h3{text-align:center}#loader{display:inline-block;height:48px;margin-top:16px;position:relative;width:48px}#loader div{-webkit-animation:loader 1.2s cubic-bezier(.5,0,.5,1) infinite;animation:loader 1.2s cubic-bezier(.5,0,.5,1) infinite;border:6px solid transparent;border-radius:50%;border-top-color:#fff;box-sizing:border-box;display:block;height:36px;margin:6px;position:absolute;width:36px}#loader div:first-child{-webkit-animation-delay:-.45s;animation-delay:-.45s}#loader div:nth-child(2){-webkit-animation-delay:-.3s;animation-delay:-.3s}#loader div:nth-child(3){-webkit-animation-delay:-.15s;animation-delay:-.15s}@-webkit-keyframes loader{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(1turn);transform:rotate(1turn)}}@keyframes loader{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(1turn);transform:rotate(1turn)}}.connect-modal{align-items:center;display:flex;justify-content:center}.connect-modal-content{background:#027;border-radius:15px;height:200px;padding:20px;position:fixed;top:120px;width:400px}.overlay-modal{background-color:hsla(0,0%,100%,.25);inset:0;position:fixed}.signup-form{max-width:400px;width:50vw}.direct-node-message{line-height:1.5em;margin-left:1em}.name-validity{color:red}.more-info-direct{cursor:pointer;margin:0 1em;text-decoration:underline}.more-info-direct:hover{color:#cd3c52;color:var(--uq-darkpink)}a{color:#dd7c8a;color:var(--uq-pink)}a:hover{color:#cd3c52;color:var(--uq-darkpink)}.reset-networking{cursor:pointer;font-size:.8em;margin-left:1em;margin-top:1em;text-decoration:underline;width:100%}.reset-networking:hover{color:#cd3c52;color:var(--uq-darkpink)}.name-err{color:red;font-size:.8em;line-height:1.5em;margin-bottom:1em}
|
|
||||||
/*# sourceMappingURL=main.e62f8e3a.css.map*/
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -1,139 +0,0 @@
|
|||||||
/*!
|
|
||||||
Copyright (c) 2015 Jed Watson.
|
|
||||||
Based on code that is Copyright 2013-2015, Facebook, Inc.
|
|
||||||
All rights reserved.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* Adapted from jQuery UI core
|
|
||||||
*
|
|
||||||
* http://jqueryui.com
|
|
||||||
*
|
|
||||||
* Copyright 2014 jQuery Foundation and other contributors
|
|
||||||
* Released under the MIT license.
|
|
||||||
* http://jquery.org/license
|
|
||||||
*
|
|
||||||
* http://api.jqueryui.com/category/ui-core/
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* The buffer module from node.js, for the browser.
|
|
||||||
*
|
|
||||||
* @author Feross Aboukhadijeh <https://feross.org>
|
|
||||||
* @license MIT
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*! ieee754. BSD-3-Clause License. Feross Aboukhadijeh <https://feross.org/opensource> */
|
|
||||||
|
|
||||||
/*! regenerator-runtime -- Copyright (c) 2014-present, Facebook, Inc. -- license (MIT): https://github.com/facebook/regenerator/blob/main/LICENSE */
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @license React
|
|
||||||
* react-dom.production.min.js
|
|
||||||
*
|
|
||||||
* Copyright (c) Facebook, Inc. and its affiliates.
|
|
||||||
*
|
|
||||||
* This source code is licensed under the MIT license found in the
|
|
||||||
* LICENSE file in the root directory of this source tree.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @license React
|
|
||||||
* react-jsx-runtime.production.min.js
|
|
||||||
*
|
|
||||||
* Copyright (c) Facebook, Inc. and its affiliates.
|
|
||||||
*
|
|
||||||
* This source code is licensed under the MIT license found in the
|
|
||||||
* LICENSE file in the root directory of this source tree.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @license React
|
|
||||||
* react.production.min.js
|
|
||||||
*
|
|
||||||
* Copyright (c) Facebook, Inc. and its affiliates.
|
|
||||||
*
|
|
||||||
* This source code is licensed under the MIT license found in the
|
|
||||||
* LICENSE file in the root directory of this source tree.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @license React
|
|
||||||
* scheduler.production.min.js
|
|
||||||
*
|
|
||||||
* Copyright (c) Facebook, Inc. and its affiliates.
|
|
||||||
*
|
|
||||||
* This source code is licensed under the MIT license found in the
|
|
||||||
* LICENSE file in the root directory of this source tree.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @license React
|
|
||||||
* use-sync-external-store-shim.production.min.js
|
|
||||||
*
|
|
||||||
* Copyright (c) Facebook, Inc. and its affiliates.
|
|
||||||
*
|
|
||||||
* This source code is licensed under the MIT license found in the
|
|
||||||
* LICENSE file in the root directory of this source tree.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @license React
|
|
||||||
* use-sync-external-store-shim/with-selector.production.min.js
|
|
||||||
*
|
|
||||||
* Copyright (c) Facebook, Inc. and its affiliates.
|
|
||||||
*
|
|
||||||
* This source code is licensed under the MIT license found in the
|
|
||||||
* LICENSE file in the root directory of this source tree.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @remix-run/router v1.10.0
|
|
||||||
*
|
|
||||||
* Copyright (c) Remix Software Inc.
|
|
||||||
*
|
|
||||||
* This source code is licensed under the MIT license found in the
|
|
||||||
* LICENSE.md file in the root directory of this source tree.
|
|
||||||
*
|
|
||||||
* @license MIT
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* React Router DOM v6.17.0
|
|
||||||
*
|
|
||||||
* Copyright (c) Remix Software Inc.
|
|
||||||
*
|
|
||||||
* This source code is licensed under the MIT license found in the
|
|
||||||
* LICENSE.md file in the root directory of this source tree.
|
|
||||||
*
|
|
||||||
* @license MIT
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* React Router v6.17.0
|
|
||||||
*
|
|
||||||
* Copyright (c) Remix Software Inc.
|
|
||||||
*
|
|
||||||
* This source code is licensed under the MIT license found in the
|
|
||||||
* LICENSE.md file in the root directory of this source tree.
|
|
||||||
*
|
|
||||||
* @license MIT
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* [js-sha3]{@link https://github.com/emn178/js-sha3}
|
|
||||||
*
|
|
||||||
* @version 0.5.7
|
|
||||||
* @author Chen, Yi-Cyuan [emn178@gmail.com]
|
|
||||||
* @copyright Chen, Yi-Cyuan 2015-2016
|
|
||||||
* @license MIT
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* [js-sha3]{@link https://github.com/emn178/js-sha3}
|
|
||||||
*
|
|
||||||
* @version 0.8.0
|
|
||||||
* @author Chen, Yi-Cyuan [emn178@gmail.com]
|
|
||||||
* @copyright Chen, Yi-Cyuan 2015-2018
|
|
||||||
* @license MIT
|
|
||||||
*/
|
|
@ -37,9 +37,9 @@ pub async fn sqlite(
|
|||||||
send_to_caps_oracle: CapMessageSender,
|
send_to_caps_oracle: CapMessageSender,
|
||||||
home_directory_path: String,
|
home_directory_path: String,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
let vfs_path = format!("{}/vfs", &home_directory_path);
|
let sqlite_path = format!("{}/sqlite", &home_directory_path);
|
||||||
|
|
||||||
if let Err(e) = fs::create_dir_all(&vfs_path).await {
|
if let Err(e) = fs::create_dir_all(&sqlite_path).await {
|
||||||
panic!("failed creating sqlite dir! {:?}", e);
|
panic!("failed creating sqlite dir! {:?}", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -79,7 +79,7 @@ pub async fn sqlite(
|
|||||||
let open_dbs = open_dbs.clone();
|
let open_dbs = open_dbs.clone();
|
||||||
|
|
||||||
let txs = txs.clone();
|
let txs = txs.clone();
|
||||||
let vfs_path = vfs_path.clone();
|
let sqlite_path = sqlite_path.clone();
|
||||||
|
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
let mut queue_lock = queue.lock().await;
|
let mut queue_lock = queue.lock().await;
|
||||||
@ -92,7 +92,7 @@ pub async fn sqlite(
|
|||||||
send_to_loop.clone(),
|
send_to_loop.clone(),
|
||||||
send_to_terminal.clone(),
|
send_to_terminal.clone(),
|
||||||
send_to_caps_oracle.clone(),
|
send_to_caps_oracle.clone(),
|
||||||
vfs_path.clone(),
|
sqlite_path.clone(),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
@ -115,7 +115,7 @@ async fn handle_request(
|
|||||||
send_to_loop: MessageSender,
|
send_to_loop: MessageSender,
|
||||||
send_to_terminal: PrintSender,
|
send_to_terminal: PrintSender,
|
||||||
send_to_caps_oracle: CapMessageSender,
|
send_to_caps_oracle: CapMessageSender,
|
||||||
vfs_path: String,
|
sqlite_path: String,
|
||||||
) -> Result<(), SqliteError> {
|
) -> Result<(), SqliteError> {
|
||||||
let KernelMessage {
|
let KernelMessage {
|
||||||
id,
|
id,
|
||||||
@ -152,7 +152,7 @@ async fn handle_request(
|
|||||||
open_dbs.clone(),
|
open_dbs.clone(),
|
||||||
send_to_caps_oracle.clone(),
|
send_to_caps_oracle.clone(),
|
||||||
&request,
|
&request,
|
||||||
vfs_path.clone(),
|
sqlite_path.clone(),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
@ -284,8 +284,17 @@ async fn handle_request(
|
|||||||
(serde_json::to_vec(&SqliteResponse::Ok).unwrap(), None)
|
(serde_json::to_vec(&SqliteResponse::Ok).unwrap(), None)
|
||||||
}
|
}
|
||||||
SqliteAction::Backup => {
|
SqliteAction::Backup => {
|
||||||
// execute WAL flush.
|
for db_ref in open_dbs.iter() {
|
||||||
//
|
let db = db_ref.value().lock().await;
|
||||||
|
let result: rusqlite::Result<()> = db
|
||||||
|
.query_row("PRAGMA wal_checkpoint(TRUNCATE)", [], |_| Ok(()))
|
||||||
|
.map(|_| ());
|
||||||
|
if let Err(e) = result {
|
||||||
|
return Err(SqliteError::RusqliteError {
|
||||||
|
error: e.to_string(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
(serde_json::to_vec(&SqliteResponse::Ok).unwrap(), None)
|
(serde_json::to_vec(&SqliteResponse::Ok).unwrap(), None)
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -342,7 +351,7 @@ async fn check_caps(
|
|||||||
open_dbs: Arc<DashMap<(PackageId, String), Mutex<Connection>>>,
|
open_dbs: Arc<DashMap<(PackageId, String), Mutex<Connection>>>,
|
||||||
mut send_to_caps_oracle: CapMessageSender,
|
mut send_to_caps_oracle: CapMessageSender,
|
||||||
request: &SqliteRequest,
|
request: &SqliteRequest,
|
||||||
vfs_path: String,
|
sqlite_path: String,
|
||||||
) -> Result<(), SqliteError> {
|
) -> Result<(), SqliteError> {
|
||||||
let (send_cap_bool, recv_cap_bool) = tokio::sync::oneshot::channel();
|
let (send_cap_bool, recv_cap_bool) = tokio::sync::oneshot::channel();
|
||||||
let src_package_id = PackageId::new(source.process.package(), source.process.publisher());
|
let src_package_id = PackageId::new(source.process.package(), source.process.publisher());
|
||||||
@ -429,8 +438,8 @@ async fn check_caps(
|
|||||||
}
|
}
|
||||||
|
|
||||||
let db_path = format!(
|
let db_path = format!(
|
||||||
"{}/{}/sqlite/{}",
|
"{}/{}/{}",
|
||||||
vfs_path,
|
sqlite_path,
|
||||||
request.package_id.to_string(),
|
request.package_id.to_string(),
|
||||||
request.db.to_string()
|
request.db.to_string()
|
||||||
);
|
);
|
||||||
@ -449,11 +458,6 @@ async fn check_caps(
|
|||||||
}
|
}
|
||||||
SqliteAction::Backup => {
|
SqliteAction::Backup => {
|
||||||
// flushing WALs for backup
|
// flushing WALs for backup
|
||||||
// check caps.
|
|
||||||
for db_ref in open_dbs.iter() {
|
|
||||||
let db = db_ref.value().lock().await;
|
|
||||||
db.execute("pragma wal_checkpoint", [])?;
|
|
||||||
}
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -532,7 +536,7 @@ fn make_error_message(our_name: String, km: &KernelMessage, error: SqliteError)
|
|||||||
id: km.id,
|
id: km.id,
|
||||||
source: Address {
|
source: Address {
|
||||||
node: our_name.clone(),
|
node: our_name.clone(),
|
||||||
process: KV_PROCESS_ID.clone(),
|
process: SQLITE_PROCESS_ID.clone(),
|
||||||
},
|
},
|
||||||
target: match &km.rsvp {
|
target: match &km.rsvp {
|
||||||
None => km.source.clone(),
|
None => km.source.clone(),
|
||||||
|
164
src/state.rs
164
src/state.rs
@ -218,7 +218,7 @@ async fn handle_request(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
StateAction::Backup => {
|
StateAction::Backup => {
|
||||||
let checkpoint_dir = format!("{}/vfs/kernel_backup", &home_directory_path);
|
let checkpoint_dir = format!("{}/kernel/backup", &home_directory_path);
|
||||||
|
|
||||||
if Path::new(&checkpoint_dir).exists() {
|
if Path::new(&checkpoint_dir).exists() {
|
||||||
fs::remove_dir_all(&checkpoint_dir).await?;
|
fs::remove_dir_all(&checkpoint_dir).await?;
|
||||||
@ -356,7 +356,7 @@ async fn bootstrap(
|
|||||||
|
|
||||||
let packages = get_zipped_packages().await;
|
let packages = get_zipped_packages().await;
|
||||||
|
|
||||||
for (package_name, mut package) in packages {
|
for (package_name, mut package) in packages.clone() {
|
||||||
// special case tester: only load it in if in simulation mode
|
// special case tester: only load it in if in simulation mode
|
||||||
if package_name == "tester" {
|
if package_name == "tester" {
|
||||||
#[cfg(not(feature = "simulation-mode"))]
|
#[cfg(not(feature = "simulation-mode"))]
|
||||||
@ -496,58 +496,6 @@ async fn bootstrap(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// grant capabilities to other initially spawned processes, distro
|
|
||||||
if let Some(to_grant) = &entry.grant_messaging {
|
|
||||||
for value in to_grant {
|
|
||||||
let mut capability = None;
|
|
||||||
let mut to_process = None;
|
|
||||||
match value {
|
|
||||||
serde_json::Value::String(process_name) => {
|
|
||||||
if let Ok(parsed_process_id) = ProcessId::from_str(process_name) {
|
|
||||||
capability = Some(Capability {
|
|
||||||
issuer: Address {
|
|
||||||
node: our_name.to_string(),
|
|
||||||
process: ProcessId::from_str(process_name).unwrap(),
|
|
||||||
},
|
|
||||||
params: "\"messaging\"".into(),
|
|
||||||
});
|
|
||||||
to_process = Some(parsed_process_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
serde_json::Value::Object(map) => {
|
|
||||||
if let Some(process_name) = map.get("process") {
|
|
||||||
if let Ok(parsed_process_id) =
|
|
||||||
ProcessId::from_str(&process_name.as_str().unwrap())
|
|
||||||
{
|
|
||||||
if let Some(params) = map.get("params") {
|
|
||||||
capability = Some(Capability {
|
|
||||||
issuer: Address {
|
|
||||||
node: our_name.to_string(),
|
|
||||||
process: ProcessId::from_str(
|
|
||||||
process_name.as_str().unwrap(),
|
|
||||||
)
|
|
||||||
.unwrap(),
|
|
||||||
},
|
|
||||||
params: params.to_string(),
|
|
||||||
});
|
|
||||||
to_process = Some(parsed_process_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(cap) = capability {
|
|
||||||
if let Some(process) = process_map.get_mut(&to_process.unwrap()) {
|
|
||||||
process.capabilities.insert(cap);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if entry.request_networking {
|
if entry.request_networking {
|
||||||
requested_caps.insert(Capability {
|
requested_caps.insert(Capability {
|
||||||
issuer: Address {
|
issuer: Address {
|
||||||
@ -597,6 +545,114 @@ async fn bootstrap(
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// second loop: go and grant_capabilities to processes
|
||||||
|
// can't do this in first loop because we need to have all processes in the map first
|
||||||
|
for (package_name, mut package) in packages {
|
||||||
|
// special case tester: only load it in if in simulation mode
|
||||||
|
if package_name == "tester" {
|
||||||
|
#[cfg(not(feature = "simulation-mode"))]
|
||||||
|
continue;
|
||||||
|
#[cfg(feature = "simulation-mode")]
|
||||||
|
{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// get and read manifest.json
|
||||||
|
let Ok(mut package_manifest_zip) = package.by_name("manifest.json") else {
|
||||||
|
println!(
|
||||||
|
"fs: missing manifest for package {}, skipping",
|
||||||
|
package_name
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
let mut manifest_content = Vec::new();
|
||||||
|
package_manifest_zip
|
||||||
|
.read_to_end(&mut manifest_content)
|
||||||
|
.unwrap();
|
||||||
|
drop(package_manifest_zip);
|
||||||
|
let package_manifest = String::from_utf8(manifest_content)?;
|
||||||
|
let package_manifest = serde_json::from_str::<Vec<PackageManifestEntry>>(&package_manifest)
|
||||||
|
.expect("fs: manifest parse error");
|
||||||
|
|
||||||
|
// get and read metadata.json
|
||||||
|
let Ok(mut package_metadata_zip) = package.by_name("metadata.json") else {
|
||||||
|
println!(
|
||||||
|
"fs: missing metadata for package {}, skipping",
|
||||||
|
package_name
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
let mut metadata_content = Vec::new();
|
||||||
|
package_metadata_zip
|
||||||
|
.read_to_end(&mut metadata_content)
|
||||||
|
.unwrap();
|
||||||
|
drop(package_metadata_zip);
|
||||||
|
let package_metadata: serde_json::Value =
|
||||||
|
serde_json::from_slice(&metadata_content).expect("fs: metadata parse error");
|
||||||
|
|
||||||
|
println!("fs: found package metadata: {:?}\r", package_metadata);
|
||||||
|
|
||||||
|
let package_name = package_metadata["package"]
|
||||||
|
.as_str()
|
||||||
|
.expect("fs: metadata parse error: bad package name");
|
||||||
|
|
||||||
|
let package_publisher = package_metadata["publisher"]
|
||||||
|
.as_str()
|
||||||
|
.expect("fs: metadata parse error: bad publisher name");
|
||||||
|
|
||||||
|
// for each process-entry in manifest.json:
|
||||||
|
for entry in package_manifest {
|
||||||
|
let our_process_id = format!(
|
||||||
|
"{}:{}:{}",
|
||||||
|
entry.process_name, package_name, package_publisher
|
||||||
|
);
|
||||||
|
|
||||||
|
// grant capabilities to other initially spawned processes, distro
|
||||||
|
if let Some(to_grant) = &entry.grant_messaging {
|
||||||
|
for value in to_grant {
|
||||||
|
match value {
|
||||||
|
serde_json::Value::String(process_name) => {
|
||||||
|
if let Ok(parsed_process_id) = ProcessId::from_str(process_name) {
|
||||||
|
if let Some(process) = process_map.get_mut(&parsed_process_id) {
|
||||||
|
process.capabilities.insert(Capability {
|
||||||
|
issuer: Address {
|
||||||
|
node: our_name.to_string(),
|
||||||
|
process: ProcessId::from_str(&our_process_id).unwrap(),
|
||||||
|
},
|
||||||
|
params: "\"messaging\"".into(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
serde_json::Value::Object(map) => {
|
||||||
|
if let Some(process_name) = map.get("process") {
|
||||||
|
if let Ok(parsed_process_id) =
|
||||||
|
ProcessId::from_str(&process_name.as_str().unwrap())
|
||||||
|
{
|
||||||
|
if let Some(params) = map.get("params") {
|
||||||
|
if let Some(process) =
|
||||||
|
process_map.get_mut(&parsed_process_id)
|
||||||
|
{
|
||||||
|
process.capabilities.insert(Capability {
|
||||||
|
issuer: Address {
|
||||||
|
node: our_name.to_string(),
|
||||||
|
process: ProcessId::from_str(&our_process_id)
|
||||||
|
.unwrap(),
|
||||||
|
},
|
||||||
|
params: params.to_string(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -14,6 +14,7 @@ use crossterm::{
|
|||||||
use futures::{future::FutureExt, StreamExt};
|
use futures::{future::FutureExt, StreamExt};
|
||||||
use std::fs::{read_to_string, OpenOptions};
|
use std::fs::{read_to_string, OpenOptions};
|
||||||
use std::io::{stdout, BufWriter, Write};
|
use std::io::{stdout, BufWriter, Write};
|
||||||
|
use tokio::signal::unix::{signal, SignalKind};
|
||||||
|
|
||||||
mod utils;
|
mod utils;
|
||||||
|
|
||||||
@ -129,6 +130,20 @@ pub async fn terminal(
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
let mut log_writer = BufWriter::new(log_handle);
|
let mut log_writer = BufWriter::new(log_handle);
|
||||||
|
|
||||||
|
// use to trigger cleanup if receive signal to kill process
|
||||||
|
let mut sigalrm = signal(SignalKind::alarm()).expect("uqbar: failed to set up SIGALRM handler");
|
||||||
|
let mut sighup = signal(SignalKind::hangup()).expect("uqbar: failed to set up SIGHUP handler");
|
||||||
|
let mut sigint =
|
||||||
|
signal(SignalKind::interrupt()).expect("uqbar: failed to set up SIGINT handler");
|
||||||
|
let mut sigpipe = signal(SignalKind::pipe()).expect("uqbar: failed to set up SIGPIPE handler");
|
||||||
|
let mut sigquit = signal(SignalKind::quit()).expect("uqbar: failed to set up SIGQUIT handler");
|
||||||
|
let mut sigterm =
|
||||||
|
signal(SignalKind::terminate()).expect("uqbar: failed to set up SIGTERM handler");
|
||||||
|
let mut sigusr1 =
|
||||||
|
signal(SignalKind::user_defined1()).expect("uqbar: failed to set up SIGUSR1 handler");
|
||||||
|
let mut sigusr2 =
|
||||||
|
signal(SignalKind::user_defined2()).expect("uqbar: failed to set up SIGUSR2 handler");
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let event = reader.next().fuse();
|
let event = reader.next().fuse();
|
||||||
|
|
||||||
@ -172,7 +187,7 @@ pub async fn terminal(
|
|||||||
Print(utils::truncate_in_place(¤t_line, prompt_len, win_cols, (line_col, cursor_col))),
|
Print(utils::truncate_in_place(¤t_line, prompt_len, win_cols, (line_col, cursor_col))),
|
||||||
cursor::MoveTo(cursor_col, win_rows),
|
cursor::MoveTo(cursor_col, win_rows),
|
||||||
)?;
|
)?;
|
||||||
},
|
}
|
||||||
Some(Ok(event)) = event => {
|
Some(Ok(event)) = event => {
|
||||||
let mut stdout = stdout.lock();
|
let mut stdout = stdout.lock();
|
||||||
match event {
|
match event {
|
||||||
@ -600,6 +615,14 @@ pub async fn terminal(
|
|||||||
_ => {},
|
_ => {},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
_ = sigalrm.recv() => return Err(anyhow::anyhow!("exiting due to SIGALRM")),
|
||||||
|
_ = sighup.recv() => return Err(anyhow::anyhow!("exiting due to SIGHUP")),
|
||||||
|
_ = sigint.recv() => return Err(anyhow::anyhow!("exiting due to SIGINT")),
|
||||||
|
_ = sigpipe.recv() => return Err(anyhow::anyhow!("exiting due to SIGPIPE")),
|
||||||
|
_ = sigquit.recv() => return Err(anyhow::anyhow!("exiting due to SIGQUIT")),
|
||||||
|
_ = sigterm.recv() => return Err(anyhow::anyhow!("exiting due to SIGTERM")),
|
||||||
|
_ = sigusr1.recv() => return Err(anyhow::anyhow!("exiting due to SIGUSR1")),
|
||||||
|
_ = sigusr2.recv() => return Err(anyhow::anyhow!("exiting due to SIGUSR2")),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
execute!(stdout.lock(), DisableBracketedPaste, terminal::SetTitle(""))?;
|
execute!(stdout.lock(), DisableBracketedPaste, terminal::SetTitle(""))?;
|
||||||
|
@ -5,7 +5,6 @@ use std::collections::{HashMap, HashSet};
|
|||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
|
|
||||||
lazy_static::lazy_static! {
|
lazy_static::lazy_static! {
|
||||||
pub static ref ENCRYPTOR_PROCESS_ID: ProcessId = ProcessId::new(Some("encryptor"), "sys", "uqbar");
|
|
||||||
pub static ref ETH_RPC_PROCESS_ID: ProcessId = ProcessId::new(Some("eth_rpc"), "sys", "uqbar");
|
pub static ref ETH_RPC_PROCESS_ID: ProcessId = ProcessId::new(Some("eth_rpc"), "sys", "uqbar");
|
||||||
pub static ref HTTP_CLIENT_PROCESS_ID: ProcessId = ProcessId::new(Some("http_client"), "sys", "uqbar");
|
pub static ref HTTP_CLIENT_PROCESS_ID: ProcessId = ProcessId::new(Some("http_client"), "sys", "uqbar");
|
||||||
pub static ref HTTP_SERVER_PROCESS_ID: ProcessId = ProcessId::new(Some("http_server"), "sys", "uqbar");
|
pub static ref HTTP_SERVER_PROCESS_ID: ProcessId = ProcessId::new(Some("http_server"), "sys", "uqbar");
|
||||||
|
Loading…
Reference in New Issue
Block a user