Merge pull request #118 from uqbar-dao/dr/app-store-refresh

Dr/app store refresh
This commit is contained in:
dr-frmr 2023-12-27 00:53:11 -05:00 committed by GitHub
commit 6d4ad9fb7e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
29 changed files with 896 additions and 926 deletions

1
Cargo.lock generated
View File

@ -4698,6 +4698,7 @@ dependencies = [
"mio",
"num_cpus",
"pin-project-lite",
"signal-hook-registry",
"socket2 0.5.4",
"tokio-macros",
"windows-sys 0.48.0",

View File

@ -55,7 +55,9 @@ rand = "0.8.4"
reqwest = "0.11.18"
ring = "0.16.20"
rmp-serde = "1.1.2"
rocksdb = { version = "0.21.0", features = ["multi-threaded-cf"] }
route-recognizer = "0.3.1"
rusqlite = { version = "0.30.0", features = ["bundled"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
serde_urlencoded = "0.7"
@ -63,7 +65,7 @@ sha2 = "0.10"
snow = { version = "0.9.3", features = ["ring-resolver"] }
static_dir = "0.2.0"
thiserror = "1.0"
tokio = { version = "1.28", features = ["fs", "macros", "rt-multi-thread", "sync"] }
tokio = { version = "1.28", features = ["fs", "macros", "rt-multi-thread", "signal", "sync"] }
tokio-tungstenite = "0.20.1"
url = "2.4.1"
uuid = { version = "1.1.2", features = ["serde", "v4"] }
@ -71,5 +73,3 @@ warp = "0.3.5"
wasmtime = "15.0.1"
wasmtime-wasi = "15.0.1"
zip = "0.6"
rocksdb = { version = "0.21.0", features = ["multi-threaded-cf"] }
rusqlite = { version = "0.30.0", features = ["bundled"] }

View File

@ -206,6 +206,22 @@ version = "0.4.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
[[package]]
name = "mime"
version = "0.3.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a"
[[package]]
name = "mime_guess"
version = "2.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef"
dependencies = [
"mime",
"unicase",
]
[[package]]
name = "percent-encoding"
version = "2.3.1"
@ -387,6 +403,15 @@ version = "1.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
[[package]]
name = "unicase"
version = "2.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89"
dependencies = [
"version_check",
]
[[package]]
name = "unicode-bidi"
version = "0.3.14"
@ -423,11 +448,12 @@ checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c"
[[package]]
name = "uqbar_process_lib"
version = "0.4.0"
source = "git+ssh://git@github.com/uqbar-dao/process_lib.git?rev=8342b1a#8342b1a131401fb5d141dab8c90e79aa6d2bc909"
source = "git+ssh://git@github.com/uqbar-dao/process_lib.git?rev=2d17d75#2d17d75152e55ef3ed417c79312e209ca45b8dbb"
dependencies = [
"anyhow",
"bincode",
"http",
"mime_guess",
"rand",
"serde",
"serde_json",

View File

@ -3,8 +3,6 @@ name = "app_store"
version = "0.2.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[profile.release]
panic = "abort"
opt-level = "s"
@ -17,7 +15,7 @@ rand = "0.8"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
sha2 = "0.10.8"
uqbar_process_lib = { git = "ssh://git@github.com/uqbar-dao/process_lib.git", rev = "8342b1a" }
uqbar_process_lib = { git = "ssh://git@github.com/uqbar-dao/process_lib.git", rev = "2d17d75" }
wit-bindgen = { git = "https://github.com/bytecodealliance/wit-bindgen", rev = "efcc759" }
[lib]

File diff suppressed because it is too large Load Diff

View File

@ -58,7 +58,7 @@ dependencies = [
[[package]]
name = "ft_worker"
version = "0.1.0"
version = "0.2.0"
dependencies = [
"anyhow",
"bincode",
@ -157,6 +157,22 @@ version = "0.4.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
[[package]]
name = "mime"
version = "0.3.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a"
[[package]]
name = "mime_guess"
version = "2.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef"
dependencies = [
"mime",
"unicase",
]
[[package]]
name = "percent-encoding"
version = "2.3.1"
@ -321,6 +337,15 @@ version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "unicase"
version = "2.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89"
dependencies = [
"version_check",
]
[[package]]
name = "unicode-bidi"
version = "0.3.14"
@ -357,11 +382,12 @@ checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c"
[[package]]
name = "uqbar_process_lib"
version = "0.4.0"
source = "git+ssh://git@github.com/uqbar-dao/process_lib.git?rev=b09d987#b09d9875edce1a230549cf56cf088f95e38d4abd"
source = "git+ssh://git@github.com/uqbar-dao/process_lib.git?rev=2d17d75#2d17d75152e55ef3ed417c79312e209ca45b8dbb"
dependencies = [
"anyhow",
"bincode",
"http",
"mime_guess",
"rand",
"serde",
"serde_json",
@ -381,6 +407,12 @@ dependencies = [
"percent-encoding",
]
[[package]]
name = "version_check"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
[[package]]
name = "wasi"
version = "0.11.0+wasi-snapshot-preview1"

View File

@ -1,6 +1,6 @@
[package]
name = "ft_worker"
version = "0.1.0"
version = "0.2.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
@ -16,7 +16,7 @@ bincode = "1.3.3"
rand = "0.8"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
uqbar_process_lib = { git = "ssh://git@github.com/uqbar-dao/process_lib.git", rev = "b09d987" }
uqbar_process_lib = { git = "ssh://git@github.com/uqbar-dao/process_lib.git", rev = "2d17d75" }
wit-bindgen = { git = "https://github.com/bytecodealliance/wit-bindgen", rev = "efcc759" }
[lib]

View File

@ -1,5 +1,5 @@
use serde::{Deserialize, Serialize};
use uqbar_process_lib::uqbar::process::standard::*;
use uqbar_process_lib::*;
#[derive(Debug, Serialize, Deserialize)]
pub struct FileTransferContext {
@ -13,9 +13,9 @@ pub struct FileTransferContext {
/// in order to prompt them to spawn a worker
#[derive(Debug, Serialize, Deserialize)]
pub enum FTWorkerCommand {
/// make sure to attach file itself as payload
Send {
// make sure to attach file itself as payload
target: String, // annoying, but this is Address
target: Address,
file_name: String,
timeout: u64,
},
@ -32,10 +32,12 @@ pub enum FTWorkerCommand {
#[derive(Debug, Serialize, Deserialize)]
pub enum FTWorkerResult {
SendSuccess,
ReceiveSuccess(String), // name of file, bytes in payload
/// string is name of file. bytes in payload
ReceiveSuccess(String),
Err(TransferError),
}
/// the possible errors that can be returned to the parent inside `FTWorkerResult`
#[derive(Debug, Serialize, Deserialize)]
pub enum TransferError {
TargetOffline,
@ -44,47 +46,48 @@ pub enum TransferError {
SourceFailed,
}
/// A helper function to spawn a worker and initialize a file transfer.
/// The outcome will be sent as an [`FTWorkerResult`] to the caller process.
///
/// if `file_bytes` is None, expects to inherit payload!
#[allow(dead_code)]
pub fn spawn_transfer(
our: &Address,
file_name: &str,
file_bytes: Option<Vec<u8>>, // if None, expects to inherit payload!
file_bytes: Option<Vec<u8>>,
timeout: u64,
to_addr: &Address,
) {
) -> anyhow::Result<()> {
let transfer_id: u64 = rand::random();
// spawn a worker and tell it to send the file
let Ok(worker_process_id) = spawn(
Some(&transfer_id.to_string()),
"/ft_worker.wasm".into(),
&OnExit::None, // can set message-on-panic here
Some(transfer_id.to_string().as_str()),
&format!("{}/pkg/ft_worker.wasm", our.package_id()),
OnExit::None, // can set message-on-panic here
&Capabilities::All,
false, // not public
) else {
print_to_terminal(0, "file_transfer: failed to spawn worker!");
return;
return Err(anyhow::anyhow!("failed to spawn ft_worker!"));
};
// tell the worker what to do
let payload_or_inherit = match file_bytes {
Some(bytes) => Some(Payload { mime: None, bytes }),
None => None,
};
send_request(
&Address {
node: our.node.clone(),
process: worker_process_id,
},
&Request {
inherit: !payload_or_inherit.is_some(),
expects_response: Some(61),
ipc: serde_json::to_vec(&FTWorkerCommand::Send {
target: to_addr.to_string(),
let mut req = Request::new()
.target((our.node.as_ref(), worker_process_id))
.inherit(!payload_or_inherit.is_some())
.expects_response(timeout + 1) // don't call with 2^64 lol
.ipc(
serde_json::to_vec(&FTWorkerCommand::Send {
target: to_addr.clone(),
file_name: file_name.into(),
timeout: 60,
timeout,
})
.unwrap(),
metadata: None,
},
Some(
&serde_json::to_vec(&FileTransferContext {
)
.context(
serde_json::to_vec(&FileTransferContext {
file_name: file_name.into(),
file_size: match &payload_or_inherit {
Some(p) => Some(p.bytes.len() as u64),
@ -93,39 +96,36 @@ pub fn spawn_transfer(
start_time: std::time::SystemTime::now(),
})
.unwrap(),
),
payload_or_inherit.as_ref(),
);
);
if let Some(payload) = payload_or_inherit {
req = req.payload(payload);
}
req.send()
}
pub fn spawn_receive_transfer(our: &Address, ipc: &[u8]) {
/// A helper function to allow a process to easily handle an incoming transfer
/// from an ft_worker. Call this when you get the initial [`FTWorkerCommand::Receive`]
/// and let it do the rest. The outcome will be sent as an [`FTWorkerResult`] inside
/// a Response to the caller.
#[allow(dead_code)]
pub fn spawn_receive_transfer(our: &Address, ipc: &[u8]) -> anyhow::Result<()> {
let Ok(FTWorkerCommand::Receive { transfer_id, .. }) = serde_json::from_slice(ipc) else {
print_to_terminal(0, "file_transfer: got weird request");
return;
return Err(anyhow::anyhow!("spawn_receive_transfer: got malformed request"));
};
let Ok(worker_process_id) = spawn(
Some(&transfer_id.to_string()),
"/ft_worker.wasm".into(),
&OnExit::None, // can set message-on-panic here
Some(transfer_id.to_string().as_str()),
&format!("{}/pkg/ft_worker.wasm", our.package_id()),
OnExit::None, // can set message-on-panic here
&Capabilities::All,
false, // not public
) else {
print_to_terminal(0, "file_transfer: failed to spawn worker!");
return;
return Err(anyhow::anyhow!("failed to spawn ft_worker!"));
};
// forward receive command to worker
send_request(
&Address {
node: our.node.clone(),
process: worker_process_id,
},
&Request {
inherit: true,
expects_response: None,
ipc: ipc.to_vec(),
metadata: None,
},
None,
None,
);
Request::new()
.target((our.node.as_ref(), worker_process_id))
.inherit(true)
.ipc(ipc)
.send()
}

View File

@ -1,8 +1,6 @@
use serde::{Deserialize, Serialize};
//use uqbar_process_lib::uqbar::process::standard::*;
use uqbar_process_lib::uqbar::process::standard::{Message as StdMessage, Request as StdRequest, Response as StdResponse, SendErrorKind};
use uqbar_process_lib::{await_message, get_payload, print_to_terminal, send_and_await_response, send_request, send_response, Address, Message, Payload};
use uqbar_process_lib::*;
use uqbar_process_lib::println;
mod ft_worker_lib;
use ft_worker_lib::*;
@ -22,204 +20,151 @@ pub enum FTWorkerProtocol {
Finished,
}
struct Component;
impl Guest for Component {
fn init(our: String) {
let our = Address::from_str(&our).unwrap();
print_to_terminal(1, &format!("{}: start", our.process));
call_init!(init);
let Ok(Message::Request { source: parent_process, ipc, .. }) = await_message() else {
fn init(our: Address) {
let Ok(Message::Request { source: parent_process, ipc, .. }) = await_message() else {
panic!("ft_worker: got bad init message");
};
let command = serde_json::from_slice::<FTWorkerCommand>(&ipc)
.expect("ft_worker: got unparseable init message");
let command = serde_json::from_slice::<FTWorkerCommand>(&ipc)
.expect("ft_worker: got unparseable init message");
match command {
FTWorkerCommand::Send {
target,
file_name,
timeout,
} => {
let transfer_id: u64 = our.process.process().parse().unwrap();
let Some(payload) = get_payload() else {
print_to_terminal(0, "FTWorker wasn't given payload, exiting");
return
};
let file_bytes = payload.bytes;
let mut file_size = file_bytes.len() as u64;
let mut offset: u64 = 0;
let mut chunk_size: u64 = 1048576; // 1MB
let total_chunks = (file_size as f64 / chunk_size as f64).ceil() as u64;
// send a file to another worker
// start by telling target to expect a file,
// then upon reciving affirmative response,
// send contents in chunks and wait for
// acknowledgement.
match send_and_await_response(
&Address::from_str(&target).unwrap(),
&StdRequest {
inherit: false,
expects_response: Some(timeout),
ipc: serde_json::to_vec(&FTWorkerCommand::Receive {
transfer_id,
file_name,
file_size,
total_chunks,
timeout,
})
.unwrap(),
metadata: None,
},
None,
) {
Err(send_error) => {
respond_to_parent(FTWorkerResult::Err(match send_error.kind {
SendErrorKind::Offline => TransferError::TargetOffline,
SendErrorKind::Timeout => TransferError::TargetTimeout,
}))
}
Ok((opp_worker, StdMessage::Response((response, _)))) => {
let Ok(FTWorkerProtocol::Ready) = serde_json::from_slice(&response.ipc) else {
respond_to_parent(FTWorkerResult::Err(TransferError::TargetRejected));
return;
};
// send file in chunks
loop {
if file_size < chunk_size {
// this is the last chunk, so we should expect a Finished response
chunk_size = file_size;
let payload = Payload {
mime: None,
bytes: file_bytes
[offset as usize..offset as usize + chunk_size as usize]
.to_vec(),
};
send_request(
&opp_worker,
&StdRequest {
inherit: false,
expects_response: Some(timeout),
ipc: vec![],
metadata: None,
},
None,
Some(&payload),
);
break;
}
let payload = Payload {
mime: None,
bytes: file_bytes
[offset as usize..offset as usize + chunk_size as usize]
.to_vec(),
};
send_request(
&opp_worker,
&StdRequest {
inherit: false,
expects_response: None,
ipc: vec![],
metadata: None,
},
None,
Some(&payload),
);
file_size -= chunk_size;
offset += chunk_size;
}
// now wait for Finished response
let Ok(Message::Response { ipc, .. }) = await_message() else {
respond_to_parent(FTWorkerResult::Err(TransferError::TargetRejected));
return;
};
let Ok(FTWorkerProtocol::Finished) = serde_json::from_slice(&ipc) else {
respond_to_parent(FTWorkerResult::Err(TransferError::TargetRejected));
return;
};
// return success to parent
respond_to_parent(FTWorkerResult::SendSuccess);
}
_ => respond_to_parent(FTWorkerResult::Err(TransferError::TargetRejected)),
}
}
FTWorkerCommand::Receive {
file_name,
total_chunks,
timeout,
..
} => {
// send Ready response to counterparty
send_response(
&StdResponse {
inherit: false,
ipc: serde_json::to_vec(&FTWorkerProtocol::Ready).unwrap(),
metadata: None,
},
None,
);
// receive a file from a worker, then send it to parent
// all messages will be chunks of file. when we receive the
// last chunk, send a Finished message to sender and Success to parent.
let mut file_bytes = Vec::new();
let mut chunks_received = 0;
let start_time = std::time::Instant::now();
loop {
let Ok(Message::Request { .. }) = await_message() else {
respond_to_parent(FTWorkerResult::Err(TransferError::SourceFailed));
return;
};
if start_time.elapsed().as_secs() > timeout {
respond_to_parent(FTWorkerResult::Err(TransferError::SourceFailed));
return;
}
let Some(payload) = get_payload() else {
respond_to_parent(FTWorkerResult::Err(TransferError::SourceFailed));
return;
};
chunks_received += 1;
file_bytes.extend(payload.bytes);
if chunks_received == total_chunks {
break;
}
}
// send Finished message to sender
send_response(
&StdResponse {
inherit: false,
ipc: serde_json::to_vec(&FTWorkerProtocol::Finished).unwrap(),
metadata: None,
},
None,
);
// send Success message to parent
send_request(
&parent_process,
&StdRequest {
inherit: false,
expects_response: None,
ipc: serde_json::to_vec(&FTWorkerResult::ReceiveSuccess(file_name))
.unwrap(),
metadata: None,
},
None,
Some(&Payload {
mime: None,
bytes: file_bytes,
}),
);
}
let Some(result) = (match command {
FTWorkerCommand::Send {
target,
file_name,
timeout,
} => Some(handle_send(&our, &target, &file_name, timeout)),
FTWorkerCommand::Receive {
file_name,
total_chunks,
timeout,
..
} => handle_receive(parent_process, &file_name, total_chunks, timeout),
}) else { return };
Response::new()
.ipc(serde_json::to_vec(&result).unwrap())
.send()
.unwrap();
// job is done
}
fn handle_send(our: &Address, target: &Address, file_name: &str, timeout: u64) -> FTWorkerResult {
let transfer_id: u64 = our.process().parse().unwrap();
let Some(payload) = get_payload() else {
println!("ft_worker: wasn't given payload!");
return FTWorkerResult::Err(TransferError::SourceFailed)
};
let file_bytes = payload.bytes;
let mut file_size = file_bytes.len() as u64;
let mut offset: u64 = 0;
let chunk_size: u64 = 1048576; // 1MB, can be changed
let total_chunks = (file_size as f64 / chunk_size as f64).ceil() as u64;
// send a file to another worker
// start by telling target to expect a file,
// then upon reciving affirmative response,
// send contents in chunks and wait for
// acknowledgement.
let Ok(Ok(response)) = Request::to(target.clone())
.ipc(serde_json::to_vec(&FTWorkerCommand::Receive {
transfer_id,
file_name: file_name.to_string(),
file_size,
total_chunks,
timeout,
}).unwrap())
.send_and_await_response(timeout) else {
return FTWorkerResult::Err(TransferError::TargetOffline)
};
let opp_worker = response.source();
let Ok(FTWorkerProtocol::Ready) = serde_json::from_slice(&response.ipc()) else {
return FTWorkerResult::Err(TransferError::TargetRejected)
};
// send file in chunks
loop {
if file_size < chunk_size {
// this is the last chunk, so we should expect a Finished response
let _ = Request::to(opp_worker.clone())
.ipc(vec![])
.payload(Payload {
mime: None,
bytes: file_bytes[offset as usize..offset as usize + file_size as usize]
.to_vec(),
})
.expects_response(timeout)
.send();
break;
}
let _ = Request::to(opp_worker.clone())
.ipc(vec![])
.payload(Payload {
mime: None,
bytes: file_bytes[offset as usize..offset as usize + chunk_size as usize].to_vec(),
})
.send();
file_size -= chunk_size;
offset += chunk_size;
}
// now wait for Finished response
let Ok(Message::Response { ipc, .. }) = await_message() else {
return FTWorkerResult::Err(TransferError::TargetRejected)
};
let Ok(FTWorkerProtocol::Finished) = serde_json::from_slice(&ipc) else {
return FTWorkerResult::Err(TransferError::TargetRejected)
};
// return success to parent
return FTWorkerResult::SendSuccess;
}
fn handle_receive(
parent_process: Address,
file_name: &str,
total_chunks: u64,
timeout: u64,
) -> Option<FTWorkerResult> {
// send Ready response to counterparty
Response::new()
.ipc(serde_json::to_vec(&FTWorkerProtocol::Ready).unwrap())
.send()
.unwrap();
// receive a file from a worker, then send it to parent
// all messages will be chunks of file. when we receive the
// last chunk, send a Finished message to sender and Success to parent.
let mut file_bytes = Vec::new();
let mut chunks_received = 0;
let start_time = std::time::Instant::now();
loop {
let Ok(Message::Request { .. }) = await_message() else {
return Some(FTWorkerResult::Err(TransferError::SourceFailed))
};
if start_time.elapsed().as_secs() > timeout {
return Some(FTWorkerResult::Err(TransferError::SourceFailed))
}
let Some(payload) = get_payload() else {
return Some(FTWorkerResult::Err(TransferError::SourceFailed))
};
chunks_received += 1;
file_bytes.extend(payload.bytes);
if chunks_received == total_chunks {
break;
}
}
}
fn respond_to_parent(result: FTWorkerResult) {
send_response(
&StdResponse {
inherit: false,
ipc: serde_json::to_vec(&result).unwrap(),
metadata: None,
},
None,
);
// send Finished message to sender
Response::new()
.ipc(serde_json::to_vec(&FTWorkerProtocol::Finished).unwrap())
.send()
.unwrap();
// send Success message to parent
Request::to(parent_process)
.ipc(serde_json::to_vec(&FTWorkerResult::ReceiveSuccess(file_name.to_string())).unwrap())
.payload(Payload {
mime: None,
bytes: file_bytes,
})
.send()
.unwrap();
None
}

View File

@ -9,7 +9,6 @@
"filesystem:sys:uqbar",
"http_server:sys:uqbar",
"http_client:sys:uqbar",
"encryptor:sys:uqbar",
"net:sys:uqbar",
"vfs:sys:uqbar",
"kernel:sys:uqbar",
@ -21,6 +20,11 @@
}
}
],
"grant_messaging": [
"http_server:sys:uqbar",
"terminal:terminal:uqbar",
"vfs:sys:uqbar"
],
"public": false
}
]

View File

@ -1,6 +1,6 @@
{
"package": "app_store",
"publisher": "uqbar",
"version": [0, 1, 0],
"description": "A package manager + app store. This JSON field is optional and you can add whatever you want in addition to this."
"version": [0, 2, 0],
"description": "A package manager + app store."
}

View File

@ -6,8 +6,7 @@
"request_networking": false,
"request_messaging": [
"http_bindings:http_bindings:uqbar",
"http_server:sys:uqbar",
"encryptor:sys:uqbar"
"http_server:sys:uqbar"
],
"public": false
}

View File

@ -69,7 +69,7 @@ pub async fn send_and_await_response(
}
let id = process
.process
.handle_request(source, target, request, None, payload)
.send_request(source, target, request, None, payload)
.await;
match id {
Ok(id) => match process.process.get_specific_message_for_process(id).await {
@ -103,7 +103,7 @@ impl ProcessState {
/// will only fail if process does not have capability to send to target.
/// if the request has a timeout (expects response), start a task to track
/// that timeout and return timeout error if it expires.
pub async fn handle_request(
pub async fn send_request(
&mut self,
fake_source: Option<t::Address>, // only used when kernel steps in to get/set state
target: wit::Address,

View File

@ -579,7 +579,7 @@ impl StandardHost for process::ProcessWasi {
) -> Result<()> {
let id = self
.process
.handle_request(None, target, request, context, payload)
.send_request(None, target, request, context, payload)
.await;
match id {
Ok(_id) => Ok(()),
@ -599,7 +599,7 @@ impl StandardHost for process::ProcessWasi {
for request in requests {
let id = self
.process
.handle_request(None, request.0, request.1, request.2, request.3)
.send_request(None, request.0, request.1, request.2, request.3)
.await;
match id {
Ok(_id) => continue,

View File

@ -17,9 +17,9 @@ pub async fn kv(
send_to_caps_oracle: CapMessageSender,
home_directory_path: String,
) -> anyhow::Result<()> {
let vfs_path = format!("{}/vfs", &home_directory_path);
let kv_path = format!("{}/kv", &home_directory_path);
if let Err(e) = fs::create_dir_all(&vfs_path).await {
if let Err(e) = fs::create_dir_all(&kv_path).await {
panic!("failed creating kv dir! {:?}", e);
}
@ -59,7 +59,7 @@ pub async fn kv(
let send_to_loop = send_to_loop.clone();
let open_kvs = open_kvs.clone();
let txs = txs.clone();
let vfs_path = vfs_path.clone();
let kv_path = kv_path.clone();
tokio::spawn(async move {
let mut queue_lock = queue.lock().await;
@ -72,7 +72,7 @@ pub async fn kv(
send_to_loop.clone(),
send_to_terminal.clone(),
send_to_caps_oracle.clone(),
vfs_path.clone(),
kv_path.clone(),
)
.await
{
@ -95,7 +95,7 @@ async fn handle_request(
send_to_loop: MessageSender,
send_to_terminal: PrintSender,
send_to_caps_oracle: CapMessageSender,
vfs_path: String,
kv_path: String,
) -> Result<(), KvError> {
let KernelMessage {
id,
@ -132,7 +132,7 @@ async fn handle_request(
open_kvs.clone(),
send_to_caps_oracle.clone(),
&request,
vfs_path.clone(),
kv_path.clone(),
)
.await?;
@ -328,7 +328,7 @@ async fn check_caps(
open_kvs: Arc<DashMap<(PackageId, String), OptimisticTransactionDB>>,
mut send_to_caps_oracle: CapMessageSender,
request: &KvRequest,
vfs_path: String,
kv_path: String,
) -> Result<(), KvError> {
let (send_cap_bool, recv_cap_bool) = tokio::sync::oneshot::channel();
let src_package_id = PackageId::new(source.process.package(), source.process.publisher());
@ -418,8 +418,8 @@ async fn check_caps(
}
let db_path = format!(
"{}/{}/kv/{}",
vfs_path,
"{}/{}/{}",
kv_path,
request.package_id.to_string(),
request.db.to_string()
);
@ -431,11 +431,7 @@ async fn check_caps(
Ok(())
}
KvAction::Backup { .. } => {
if source.process != *STATE_PROCESS_ID {
return Err(KvError::NoCap {
error: request.action.to_string(),
});
}
// caps
Ok(())
}
}

View File

@ -472,6 +472,7 @@ async fn main() {
));
// if a runtime task exits, try to recover it,
// unless it was terminal signaling a quit
// or a SIG* was intercepted
let quit_msg: String = tokio::select! {
Some(Ok(res)) = tasks.join_next() => {
format!(
@ -521,6 +522,13 @@ async fn main() {
// abort all remaining tasks
tasks.shutdown().await;
let stdout = std::io::stdout();
let mut stdout = stdout.lock();
let _ = crossterm::execute!(
stdout,
crossterm::event::DisableBracketedPaste,
crossterm::terminal::SetTitle(""),
);
let _ = crossterm::terminal::disable_raw_mode();
println!("\r\n\x1b[38;5;196m{}\x1b[0m", quit_msg);
return;

View File

@ -1,11 +1,11 @@
{
"files": {
"main.css": "/static/css/main.115771e3.css",
"main.js": "/static/js/main.ca6eb8b1.js",
"main.js": "/static/js/main.87f3f3b4.js",
"index.html": "/index.html"
},
"entrypoints": [
"static/css/main.115771e3.css",
"static/js/main.ca6eb8b1.js"
"static/js/main.87f3f3b4.js"
]
}

File diff suppressed because one or more lines are too long

View File

@ -1,2 +0,0 @@
:root{--uq-vlightpurple:#d9dcfc;--uq-lightpurple:#c7cafa;--uq-purple:#727bf2;--uq-darkpurple:#5761ef;--midnightpurp:#0a1170;--forgottenpurp:#45475e;--uq-lightpink:#f3ced2;--uq-pink:#dd7c8a;--uq-darkpink:#cd3c52;--blush:#d55d6f;--celeste:#adebe5;--lturq:#6bdbd0;--turq:#3acfc0;--celadon:#21897e;--deep-jungle:#14524c;--old-mint:#659792;--washed-gray:rgba(0,0,0,.03);--light-gray:#cbcbcb;--medium-gray:#656565;--dark-gray:rgba(0,0,0,.5);--charcoal:#333}body{background-color:#027;color:#fff;font-family:Press Start\ 2P;font-size:16px;margin:0;padding:0}h1,h2,h3,h4,h5,h6{line-height:1.5em}.col{flex-direction:column}.col,.row{align-items:center;display:flex}.row{flex-direction:row}input[type=password],input[type=text]{border:1px solid #ccc;border-radius:4px;padding:.5em .75em}button,input[type=password],input[type=text]{box-sizing:border-box;font-size:1em;margin-bottom:.5em;width:100%}button{background-color:#dd7c8a;background-color:var(--uq-pink);border:1px solid #dd7c8a;border:1px solid var(--uq-pink);border-radius:4px;box-shadow:0 1px 2px #cd3c52;box-shadow:0 1px 2px var(--uq-darkpink);color:#fff;cursor:pointer;font-family:Press Start\ 2P;margin-top:1em;padding:.75em 1em;transition:all .1s}button:hover{background-color:#cd3c52;background-color:var(--uq-darkpink);border:1px solid #cd3c52;border:1px solid var(--uq-darkpink)}button:disabled{background-color:#656565;background-color:var(--medium-gray);border:1px solid #656565;border:1px solid var(--medium-gray);cursor:not-allowed}#signup-page{display:flex;flex:1 1;height:100%;max-width:calc(100vw - 4em);padding:2em;width:100%}.key-err{color:red;font-size:12px;margin:1em}label{font-size:.8em}.login-row{align-self:flex-start;margin:.5em}.label-row{align-self:flex-start;margin:.5em 0}.tooltip-container{cursor:pointer;display:inline-block;position:relative}.tooltip-button{border:2px solid #fff;border-radius:50%;font-size:16px;height:1em;line-height:.5em;margin-left:.5em;text-align:center;width:1em}.tooltip-content{background-color:#555;border-radius:6px;color:#fff;font-family:sans-serif;left:50%;line-height:1.5em;margin-left:-60px;min-width:200px;opacity:0;padding:.5em;position:absolute;text-align:center;top:125%;transition:opacity .3s;visibility:hidden;z-index:1}.tooltip-content:after{border:5px solid transparent;border-bottom-color:#555;bottom:100%;content:"";left:30%;margin-left:-5px;position:absolute}.tooltip-container:hover .tooltip-content{opacity:1;visibility:visible}#signup-form{max-width:calc(100vw - 2em);padding:1em;width:calc(420px + 2em)}#signup-form-header{margin-bottom:1em}#signup-form-header button{max-width:calc(100vw - 2em);width:calc(420px + 2em)}#current-address{font-family:Courier New,Courier,monospace;font-size:1.25em;font-weight:600;margin-left:1em}.current-username{border:1px solid #fff;border-radius:4px;cursor:pointer;margin:1em 0;padding:.5em}.current-username:hover{background-color:#fff;border:1px solid #fff;color:#027}#connect-wallet{max-width:420px}#wallet-required-message{line-height:1.5em;max-width:500px;text-align:center}#loading h3{text-align:center}#loader{display:inline-block;height:48px;margin-top:16px;position:relative;width:48px}#loader div{-webkit-animation:loader 1.2s cubic-bezier(.5,0,.5,1) infinite;animation:loader 1.2s cubic-bezier(.5,0,.5,1) infinite;border:6px solid transparent;border-radius:50%;border-top-color:#fff;box-sizing:border-box;display:block;height:36px;margin:6px;position:absolute;width:36px}#loader div:first-child{-webkit-animation-delay:-.45s;animation-delay:-.45s}#loader div:nth-child(2){-webkit-animation-delay:-.3s;animation-delay:-.3s}#loader div:nth-child(3){-webkit-animation-delay:-.15s;animation-delay:-.15s}@-webkit-keyframes loader{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(1turn);transform:rotate(1turn)}}@keyframes loader{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(1turn);transform:rotate(1turn)}}.connect-modal{align-items:center;display:flex;justify-content:center}.connect-modal-content{background:#027;border-radius:15px;height:200px;padding:20px;position:fixed;top:120px;width:400px}.overlay-modal{background-color:hsla(0,0%,100%,.25);inset:0;position:fixed}.signup-form{max-width:400px;width:50vw}.direct-node-message{line-height:1.5em;margin-left:1em}.name-validity{color:red}.more-info-direct{cursor:pointer;margin:0 1em;text-decoration:underline}.more-info-direct:hover{color:#cd3c52;color:var(--uq-darkpink)}a{color:#dd7c8a;color:var(--uq-pink)}a:hover{color:#cd3c52;color:var(--uq-darkpink)}.reset-networking{cursor:pointer;font-size:.8em;margin-left:1em;margin-top:1em;text-decoration:underline;width:100%}.reset-networking:hover{color:#cd3c52;color:var(--uq-darkpink)}.name-err{color:red;font-size:.8em;line-height:1.5em;margin-bottom:1em}
/*# sourceMappingURL=main.e62f8e3a.css.map*/

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -1,139 +0,0 @@
/*!
Copyright (c) 2015 Jed Watson.
Based on code that is Copyright 2013-2015, Facebook, Inc.
All rights reserved.
*/
/*!
* Adapted from jQuery UI core
*
* http://jqueryui.com
*
* Copyright 2014 jQuery Foundation and other contributors
* Released under the MIT license.
* http://jquery.org/license
*
* http://api.jqueryui.com/category/ui-core/
*/
/*!
* The buffer module from node.js, for the browser.
*
* @author Feross Aboukhadijeh <https://feross.org>
* @license MIT
*/
/*! ieee754. BSD-3-Clause License. Feross Aboukhadijeh <https://feross.org/opensource> */
/*! regenerator-runtime -- Copyright (c) 2014-present, Facebook, Inc. -- license (MIT): https://github.com/facebook/regenerator/blob/main/LICENSE */
/**
* @license React
* react-dom.production.min.js
*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
/**
* @license React
* react-jsx-runtime.production.min.js
*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
/**
* @license React
* react.production.min.js
*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
/**
* @license React
* scheduler.production.min.js
*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
/**
* @license React
* use-sync-external-store-shim.production.min.js
*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
/**
* @license React
* use-sync-external-store-shim/with-selector.production.min.js
*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
/**
* @remix-run/router v1.10.0
*
* Copyright (c) Remix Software Inc.
*
* This source code is licensed under the MIT license found in the
* LICENSE.md file in the root directory of this source tree.
*
* @license MIT
*/
/**
* React Router DOM v6.17.0
*
* Copyright (c) Remix Software Inc.
*
* This source code is licensed under the MIT license found in the
* LICENSE.md file in the root directory of this source tree.
*
* @license MIT
*/
/**
* React Router v6.17.0
*
* Copyright (c) Remix Software Inc.
*
* This source code is licensed under the MIT license found in the
* LICENSE.md file in the root directory of this source tree.
*
* @license MIT
*/
/**
* [js-sha3]{@link https://github.com/emn178/js-sha3}
*
* @version 0.5.7
* @author Chen, Yi-Cyuan [emn178@gmail.com]
* @copyright Chen, Yi-Cyuan 2015-2016
* @license MIT
*/
/**
* [js-sha3]{@link https://github.com/emn178/js-sha3}
*
* @version 0.8.0
* @author Chen, Yi-Cyuan [emn178@gmail.com]
* @copyright Chen, Yi-Cyuan 2015-2018
* @license MIT
*/

View File

@ -37,9 +37,9 @@ pub async fn sqlite(
send_to_caps_oracle: CapMessageSender,
home_directory_path: String,
) -> anyhow::Result<()> {
let vfs_path = format!("{}/vfs", &home_directory_path);
let sqlite_path = format!("{}/sqlite", &home_directory_path);
if let Err(e) = fs::create_dir_all(&vfs_path).await {
if let Err(e) = fs::create_dir_all(&sqlite_path).await {
panic!("failed creating sqlite dir! {:?}", e);
}
@ -79,7 +79,7 @@ pub async fn sqlite(
let open_dbs = open_dbs.clone();
let txs = txs.clone();
let vfs_path = vfs_path.clone();
let sqlite_path = sqlite_path.clone();
tokio::spawn(async move {
let mut queue_lock = queue.lock().await;
@ -92,7 +92,7 @@ pub async fn sqlite(
send_to_loop.clone(),
send_to_terminal.clone(),
send_to_caps_oracle.clone(),
vfs_path.clone(),
sqlite_path.clone(),
)
.await
{
@ -115,7 +115,7 @@ async fn handle_request(
send_to_loop: MessageSender,
send_to_terminal: PrintSender,
send_to_caps_oracle: CapMessageSender,
vfs_path: String,
sqlite_path: String,
) -> Result<(), SqliteError> {
let KernelMessage {
id,
@ -152,7 +152,7 @@ async fn handle_request(
open_dbs.clone(),
send_to_caps_oracle.clone(),
&request,
vfs_path.clone(),
sqlite_path.clone(),
)
.await?;
@ -284,8 +284,17 @@ async fn handle_request(
(serde_json::to_vec(&SqliteResponse::Ok).unwrap(), None)
}
SqliteAction::Backup => {
// execute WAL flush.
//
for db_ref in open_dbs.iter() {
let db = db_ref.value().lock().await;
let result: rusqlite::Result<()> = db
.query_row("PRAGMA wal_checkpoint(TRUNCATE)", [], |_| Ok(()))
.map(|_| ());
if let Err(e) = result {
return Err(SqliteError::RusqliteError {
error: e.to_string(),
});
}
}
(serde_json::to_vec(&SqliteResponse::Ok).unwrap(), None)
}
};
@ -342,7 +351,7 @@ async fn check_caps(
open_dbs: Arc<DashMap<(PackageId, String), Mutex<Connection>>>,
mut send_to_caps_oracle: CapMessageSender,
request: &SqliteRequest,
vfs_path: String,
sqlite_path: String,
) -> Result<(), SqliteError> {
let (send_cap_bool, recv_cap_bool) = tokio::sync::oneshot::channel();
let src_package_id = PackageId::new(source.process.package(), source.process.publisher());
@ -429,8 +438,8 @@ async fn check_caps(
}
let db_path = format!(
"{}/{}/sqlite/{}",
vfs_path,
"{}/{}/{}",
sqlite_path,
request.package_id.to_string(),
request.db.to_string()
);
@ -449,11 +458,6 @@ async fn check_caps(
}
SqliteAction::Backup => {
// flushing WALs for backup
// check caps.
for db_ref in open_dbs.iter() {
let db = db_ref.value().lock().await;
db.execute("pragma wal_checkpoint", [])?;
}
Ok(())
}
}
@ -532,7 +536,7 @@ fn make_error_message(our_name: String, km: &KernelMessage, error: SqliteError)
id: km.id,
source: Address {
node: our_name.clone(),
process: KV_PROCESS_ID.clone(),
process: SQLITE_PROCESS_ID.clone(),
},
target: match &km.rsvp {
None => km.source.clone(),

View File

@ -218,7 +218,7 @@ async fn handle_request(
}
}
StateAction::Backup => {
let checkpoint_dir = format!("{}/vfs/kernel_backup", &home_directory_path);
let checkpoint_dir = format!("{}/kernel/backup", &home_directory_path);
if Path::new(&checkpoint_dir).exists() {
fs::remove_dir_all(&checkpoint_dir).await?;
@ -356,7 +356,7 @@ async fn bootstrap(
let packages = get_zipped_packages().await;
for (package_name, mut package) in packages {
for (package_name, mut package) in packages.clone() {
// special case tester: only load it in if in simulation mode
if package_name == "tester" {
#[cfg(not(feature = "simulation-mode"))]
@ -496,58 +496,6 @@ async fn bootstrap(
}
}
// grant capabilities to other initially spawned processes, distro
if let Some(to_grant) = &entry.grant_messaging {
for value in to_grant {
let mut capability = None;
let mut to_process = None;
match value {
serde_json::Value::String(process_name) => {
if let Ok(parsed_process_id) = ProcessId::from_str(process_name) {
capability = Some(Capability {
issuer: Address {
node: our_name.to_string(),
process: ProcessId::from_str(process_name).unwrap(),
},
params: "\"messaging\"".into(),
});
to_process = Some(parsed_process_id);
}
}
serde_json::Value::Object(map) => {
if let Some(process_name) = map.get("process") {
if let Ok(parsed_process_id) =
ProcessId::from_str(&process_name.as_str().unwrap())
{
if let Some(params) = map.get("params") {
capability = Some(Capability {
issuer: Address {
node: our_name.to_string(),
process: ProcessId::from_str(
process_name.as_str().unwrap(),
)
.unwrap(),
},
params: params.to_string(),
});
to_process = Some(parsed_process_id);
}
}
}
}
_ => {
continue;
}
}
if let Some(cap) = capability {
if let Some(process) = process_map.get_mut(&to_process.unwrap()) {
process.capabilities.insert(cap);
}
}
}
}
if entry.request_networking {
requested_caps.insert(Capability {
issuer: Address {
@ -597,6 +545,114 @@ async fn bootstrap(
);
}
}
// second loop: go and grant_capabilities to processes
// can't do this in first loop because we need to have all processes in the map first
for (package_name, mut package) in packages {
// special case tester: only load it in if in simulation mode
if package_name == "tester" {
#[cfg(not(feature = "simulation-mode"))]
continue;
#[cfg(feature = "simulation-mode")]
{}
}
// get and read manifest.json
let Ok(mut package_manifest_zip) = package.by_name("manifest.json") else {
println!(
"fs: missing manifest for package {}, skipping",
package_name
);
continue;
};
let mut manifest_content = Vec::new();
package_manifest_zip
.read_to_end(&mut manifest_content)
.unwrap();
drop(package_manifest_zip);
let package_manifest = String::from_utf8(manifest_content)?;
let package_manifest = serde_json::from_str::<Vec<PackageManifestEntry>>(&package_manifest)
.expect("fs: manifest parse error");
// get and read metadata.json
let Ok(mut package_metadata_zip) = package.by_name("metadata.json") else {
println!(
"fs: missing metadata for package {}, skipping",
package_name
);
continue;
};
let mut metadata_content = Vec::new();
package_metadata_zip
.read_to_end(&mut metadata_content)
.unwrap();
drop(package_metadata_zip);
let package_metadata: serde_json::Value =
serde_json::from_slice(&metadata_content).expect("fs: metadata parse error");
println!("fs: found package metadata: {:?}\r", package_metadata);
let package_name = package_metadata["package"]
.as_str()
.expect("fs: metadata parse error: bad package name");
let package_publisher = package_metadata["publisher"]
.as_str()
.expect("fs: metadata parse error: bad publisher name");
// for each process-entry in manifest.json:
for entry in package_manifest {
let our_process_id = format!(
"{}:{}:{}",
entry.process_name, package_name, package_publisher
);
// grant capabilities to other initially spawned processes, distro
if let Some(to_grant) = &entry.grant_messaging {
for value in to_grant {
match value {
serde_json::Value::String(process_name) => {
if let Ok(parsed_process_id) = ProcessId::from_str(process_name) {
if let Some(process) = process_map.get_mut(&parsed_process_id) {
process.capabilities.insert(Capability {
issuer: Address {
node: our_name.to_string(),
process: ProcessId::from_str(&our_process_id).unwrap(),
},
params: "\"messaging\"".into(),
});
}
}
}
serde_json::Value::Object(map) => {
if let Some(process_name) = map.get("process") {
if let Ok(parsed_process_id) =
ProcessId::from_str(&process_name.as_str().unwrap())
{
if let Some(params) = map.get("params") {
if let Some(process) =
process_map.get_mut(&parsed_process_id)
{
process.capabilities.insert(Capability {
issuer: Address {
node: our_name.to_string(),
process: ProcessId::from_str(&our_process_id)
.unwrap(),
},
params: params.to_string(),
});
}
}
}
}
}
_ => {
continue;
}
}
}
}
}
}
Ok(())
}

View File

@ -14,6 +14,7 @@ use crossterm::{
use futures::{future::FutureExt, StreamExt};
use std::fs::{read_to_string, OpenOptions};
use std::io::{stdout, BufWriter, Write};
use tokio::signal::unix::{signal, SignalKind};
mod utils;
@ -129,6 +130,20 @@ pub async fn terminal(
.unwrap();
let mut log_writer = BufWriter::new(log_handle);
// use to trigger cleanup if receive signal to kill process
let mut sigalrm = signal(SignalKind::alarm()).expect("uqbar: failed to set up SIGALRM handler");
let mut sighup = signal(SignalKind::hangup()).expect("uqbar: failed to set up SIGHUP handler");
let mut sigint =
signal(SignalKind::interrupt()).expect("uqbar: failed to set up SIGINT handler");
let mut sigpipe = signal(SignalKind::pipe()).expect("uqbar: failed to set up SIGPIPE handler");
let mut sigquit = signal(SignalKind::quit()).expect("uqbar: failed to set up SIGQUIT handler");
let mut sigterm =
signal(SignalKind::terminate()).expect("uqbar: failed to set up SIGTERM handler");
let mut sigusr1 =
signal(SignalKind::user_defined1()).expect("uqbar: failed to set up SIGUSR1 handler");
let mut sigusr2 =
signal(SignalKind::user_defined2()).expect("uqbar: failed to set up SIGUSR2 handler");
loop {
let event = reader.next().fuse();
@ -172,7 +187,7 @@ pub async fn terminal(
Print(utils::truncate_in_place(&current_line, prompt_len, win_cols, (line_col, cursor_col))),
cursor::MoveTo(cursor_col, win_rows),
)?;
},
}
Some(Ok(event)) = event => {
let mut stdout = stdout.lock();
match event {
@ -599,7 +614,15 @@ pub async fn terminal(
},
_ => {},
}
}
}
_ = sigalrm.recv() => return Err(anyhow::anyhow!("exiting due to SIGALRM")),
_ = sighup.recv() => return Err(anyhow::anyhow!("exiting due to SIGHUP")),
_ = sigint.recv() => return Err(anyhow::anyhow!("exiting due to SIGINT")),
_ = sigpipe.recv() => return Err(anyhow::anyhow!("exiting due to SIGPIPE")),
_ = sigquit.recv() => return Err(anyhow::anyhow!("exiting due to SIGQUIT")),
_ = sigterm.recv() => return Err(anyhow::anyhow!("exiting due to SIGTERM")),
_ = sigusr1.recv() => return Err(anyhow::anyhow!("exiting due to SIGUSR1")),
_ = sigusr2.recv() => return Err(anyhow::anyhow!("exiting due to SIGUSR2")),
}
}
execute!(stdout.lock(), DisableBracketedPaste, terminal::SetTitle(""))?;

View File

@ -5,7 +5,6 @@ use std::collections::{HashMap, HashSet};
use thiserror::Error;
lazy_static::lazy_static! {
pub static ref ENCRYPTOR_PROCESS_ID: ProcessId = ProcessId::new(Some("encryptor"), "sys", "uqbar");
pub static ref ETH_RPC_PROCESS_ID: ProcessId = ProcessId::new(Some("eth_rpc"), "sys", "uqbar");
pub static ref HTTP_CLIENT_PROCESS_ID: ProcessId = ProcessId::new(Some("http_client"), "sys", "uqbar");
pub static ref HTTP_SERVER_PROCESS_ID: ProcessId = ProcessId::new(Some("http_server"), "sys", "uqbar");