mirror of
https://github.com/uqbar-dao/nectar.git
synced 2024-12-23 16:43:24 +03:00
peacefully merge in develop
This commit is contained in:
commit
77a1acb434
1772
Cargo.lock
generated
1772
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "kinode_lib"
|
name = "kinode_lib"
|
||||||
authors = ["KinodeDAO"]
|
authors = ["KinodeDAO"]
|
||||||
version = "0.7.0"
|
version = "0.7.1"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
description = "A general-purpose sovereign cloud computing platform"
|
description = "A general-purpose sovereign cloud computing platform"
|
||||||
homepage = "https://kinode.org"
|
homepage = "https://kinode.org"
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "kinode"
|
name = "kinode"
|
||||||
authors = ["KinodeDAO"]
|
authors = ["KinodeDAO"]
|
||||||
version = "0.7.0"
|
version = "0.7.1"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
description = "A general-purpose sovereign cloud computing platform"
|
description = "A general-purpose sovereign cloud computing platform"
|
||||||
homepage = "https://kinode.org"
|
homepage = "https://kinode.org"
|
||||||
@ -25,7 +25,7 @@ zip = "0.6"
|
|||||||
simulation-mode = []
|
simulation-mode = []
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
aes-gcm = "0.10.2"
|
aes-gcm = "0.10.3"
|
||||||
alloy-pubsub = { git = "https://github.com/alloy-rs/alloy", rev = "6f8ebb4" }
|
alloy-pubsub = { git = "https://github.com/alloy-rs/alloy", rev = "6f8ebb4" }
|
||||||
alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "6f8ebb4" }
|
alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "6f8ebb4" }
|
||||||
alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "6f8ebb4", features = ["ws"]}
|
alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "6f8ebb4", features = ["ws"]}
|
||||||
@ -37,27 +37,26 @@ alloy-sol-types = "0.6.2"
|
|||||||
alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "6f8ebb4" }
|
alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "6f8ebb4" }
|
||||||
anyhow = "1.0.71"
|
anyhow = "1.0.71"
|
||||||
async-trait = "0.1.71"
|
async-trait = "0.1.71"
|
||||||
base64 = "0.13"
|
base64 = "0.22.0"
|
||||||
bincode = "1.3.3"
|
bincode = "1.3.3"
|
||||||
blake3 = "1.4.1"
|
blake3 = "1.4.1"
|
||||||
bytes = "1.4.0"
|
bytes = "1.4.0"
|
||||||
cap-std = "2.0.0"
|
|
||||||
chacha20poly1305 = "0.10.1"
|
chacha20poly1305 = "0.10.1"
|
||||||
chrono = "0.4.31"
|
chrono = "0.4.31"
|
||||||
clap = { version = "4.4", features = ["derive"] }
|
clap = { version = "4.4", features = ["derive"] }
|
||||||
crossterm = { version = "0.26.1", features = ["event-stream", "bracketed-paste"] }
|
crossterm = { version = "0.27.0", features = ["event-stream", "bracketed-paste"] }
|
||||||
curve25519-dalek = "^4.1.2"
|
curve25519-dalek = "^4.1.2"
|
||||||
dashmap = "5.5.3"
|
dashmap = "5.5.3"
|
||||||
digest = "0.10"
|
digest = "0.10"
|
||||||
elliptic-curve = { version = "0.13.8", features = ["ecdh"] }
|
elliptic-curve = { version = "0.13.8", features = ["ecdh"] }
|
||||||
flate2 = "1.0"
|
flate2 = "1.0"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
generic-array = "0.14"
|
generic-array = "1.0.0"
|
||||||
getrandom = "0.2.10"
|
getrandom = "0.2.10"
|
||||||
hex = "0.4.3"
|
hex = "0.4.3"
|
||||||
hkdf = "0.12.3"
|
hkdf = "0.12.3"
|
||||||
hmac = "0.12"
|
hmac = "0.12"
|
||||||
http = "0.2.9"
|
http = "1.1.0"
|
||||||
jwt = "0.16"
|
jwt = "0.16"
|
||||||
lib = { path = "../lib" }
|
lib = { path = "../lib" }
|
||||||
lazy_static = "1.4.0"
|
lazy_static = "1.4.0"
|
||||||
@ -67,26 +66,25 @@ num-traits = "0.2"
|
|||||||
open = "5.0.0"
|
open = "5.0.0"
|
||||||
public-ip = "0.2.2"
|
public-ip = "0.2.2"
|
||||||
rand = "0.8.4"
|
rand = "0.8.4"
|
||||||
reqwest = "0.11.18"
|
reqwest = "0.12.4"
|
||||||
ring = "0.16.20"
|
ring = "0.17.8"
|
||||||
rmp-serde = "1.1.2"
|
rmp-serde = "1.1.2"
|
||||||
rocksdb = { version = "0.21.0", features = ["multi-threaded-cf"] }
|
rocksdb = { version = "0.22.0", features = ["multi-threaded-cf"] }
|
||||||
route-recognizer = "0.3.1"
|
route-recognizer = "0.3.1"
|
||||||
rusqlite = { version = "0.30.0", features = ["bundled"] }
|
rusqlite = { version = "0.31.0", features = ["bundled"] }
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
serde_urlencoded = "0.7"
|
serde_urlencoded = "0.7"
|
||||||
sha2 = "0.10"
|
sha2 = "0.10"
|
||||||
snow = { version = "0.9.3", features = ["ring-resolver"] }
|
snow = { version = "0.9.5", features = ["ring-resolver"] }
|
||||||
static_dir = "0.2.0"
|
static_dir = "0.2.0"
|
||||||
thiserror = "1.0"
|
thiserror = "1.0"
|
||||||
tokio = { version = "1.28", features = ["fs", "macros", "rt-multi-thread", "signal", "sync"] }
|
tokio = { version = "1.28", features = ["fs", "macros", "rt-multi-thread", "signal", "sync"] }
|
||||||
tokio-stream = "0.1.14"
|
tokio-tungstenite = "0.21.0"
|
||||||
tokio-tungstenite = "0.20.1"
|
|
||||||
url = "2.4.1"
|
url = "2.4.1"
|
||||||
uuid = { version = "1.1.2", features = ["serde", "v4"] }
|
uuid = { version = "1.1.2", features = ["serde", "v4"] }
|
||||||
warp = "0.3.5"
|
warp = "0.3.5"
|
||||||
wasi-common = "19.0.1"
|
wasi-common = "19.0.1"
|
||||||
wasmtime = "19.0.1"
|
wasmtime = "19.0.1"
|
||||||
wasmtime-wasi = "19.0.1"
|
wasmtime-wasi = "19.0.1"
|
||||||
zip = "0.6"
|
zip = "1.1.1"
|
||||||
|
@ -1,17 +1,17 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "app_store"
|
name = "app_store"
|
||||||
version = "0.3.0"
|
version = "0.3.1"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
simulation-mode = []
|
simulation-mode = []
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
alloy-primitives = "0.6.2"
|
alloy-primitives = "0.7.0"
|
||||||
alloy-sol-types = "0.6.2"
|
alloy-sol-types = "0.7.0"
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
bincode = "1.3.3"
|
bincode = "1.3.3"
|
||||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
@ -20,7 +20,7 @@ sha3 = "0.10.8"
|
|||||||
url = "2.4.1"
|
url = "2.4.1"
|
||||||
urlencoding = "2.1.0"
|
urlencoding = "2.1.0"
|
||||||
wit-bindgen = "0.24.0"
|
wit-bindgen = "0.24.0"
|
||||||
zip = { version = "0.6.6", default-features = false }
|
zip = { version = "1.1.1", default-features = false }
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
crate-type = ["cdylib"]
|
crate-type = ["cdylib"]
|
||||||
|
@ -41,6 +41,7 @@ const ICON: &str = include_str!("icon");
|
|||||||
|
|
||||||
const CHAIN_ID: u64 = 10; // optimism
|
const CHAIN_ID: u64 = 10; // optimism
|
||||||
const CONTRACT_ADDRESS: &str = "0x52185B6a6017E6f079B994452F234f7C2533787B"; // optimism
|
const CONTRACT_ADDRESS: &str = "0x52185B6a6017E6f079B994452F234f7C2533787B"; // optimism
|
||||||
|
const CONTRACT_FIRST_BLOCK: u64 = 118_590_088;
|
||||||
|
|
||||||
const EVENTS: [&str; 3] = [
|
const EVENTS: [&str; 3] = [
|
||||||
"AppRegistered(uint256,string,bytes,string,bytes32)",
|
"AppRegistered(uint256,string,bytes,string,bytes32)",
|
||||||
|
@ -111,7 +111,7 @@ impl State {
|
|||||||
crate::print_to_terminal(1, "producing new state");
|
crate::print_to_terminal(1, "producing new state");
|
||||||
let mut state = State {
|
let mut state = State {
|
||||||
contract_address,
|
contract_address,
|
||||||
last_saved_block: 1,
|
last_saved_block: crate::CONTRACT_FIRST_BLOCK,
|
||||||
package_hashes: HashMap::new(),
|
package_hashes: HashMap::new(),
|
||||||
listed_packages: HashMap::new(),
|
listed_packages: HashMap::new(),
|
||||||
downloaded_packages: HashMap::new(),
|
downloaded_packages: HashMap::new(),
|
||||||
@ -371,14 +371,16 @@ impl State {
|
|||||||
.ok_or(anyhow::anyhow!("got log with no block number"))?
|
.ok_or(anyhow::anyhow!("got log with no block number"))?
|
||||||
.try_into()?;
|
.try_into()?;
|
||||||
|
|
||||||
// let package_hash: alloy_primitives::U256 = log.topics[1].into();
|
match log.topics()[0] {
|
||||||
// let package_hash = package_hash.to_string();
|
|
||||||
|
|
||||||
match log.topics[0] {
|
|
||||||
AppRegistered::SIGNATURE_HASH => {
|
AppRegistered::SIGNATURE_HASH => {
|
||||||
let package_hash = log.topics[1];
|
let package_hash = log.topics()[1];
|
||||||
let (package_name, publisher_dnswire, metadata_url, metadata_hash) =
|
|
||||||
AppRegistered::abi_decode_data(&log.data, true)?;
|
let app = AppRegistered::decode_log_data(log.data(), false)?;
|
||||||
|
let package_name = app.packageName;
|
||||||
|
let publisher_dnswire = app.publisherName;
|
||||||
|
let metadata_url = app.metadataUrl;
|
||||||
|
let metadata_hash = app.metadataHash;
|
||||||
|
|
||||||
let package_hash = package_hash.to_string();
|
let package_hash = package_hash.to_string();
|
||||||
let metadata_hash = metadata_hash.to_string();
|
let metadata_hash = metadata_hash.to_string();
|
||||||
|
|
||||||
@ -390,13 +392,14 @@ impl State {
|
|||||||
),
|
),
|
||||||
);
|
);
|
||||||
|
|
||||||
if generate_package_hash(&package_name, publisher_dnswire.as_slice())
|
if generate_package_hash(&package_name, publisher_dnswire.to_vec().as_slice())
|
||||||
!= package_hash
|
!= package_hash
|
||||||
{
|
{
|
||||||
return Err(anyhow::anyhow!("got log with mismatched package hash"));
|
return Err(anyhow::anyhow!("got log with mismatched package hash"));
|
||||||
}
|
}
|
||||||
|
|
||||||
let Ok(publisher_name) = dnswire_decode(publisher_dnswire.as_slice()) else {
|
let Ok(publisher_name) = dnswire_decode(publisher_dnswire.to_vec().as_slice())
|
||||||
|
else {
|
||||||
return Err(anyhow::anyhow!("got log with invalid publisher name"));
|
return Err(anyhow::anyhow!("got log with invalid publisher name"));
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -430,9 +433,12 @@ impl State {
|
|||||||
self.insert_listing(package_hash, listing);
|
self.insert_listing(package_hash, listing);
|
||||||
}
|
}
|
||||||
AppMetadataUpdated::SIGNATURE_HASH => {
|
AppMetadataUpdated::SIGNATURE_HASH => {
|
||||||
let package_hash = log.topics[1].to_string();
|
let package_hash = log.topics()[1].to_string();
|
||||||
let (metadata_url, metadata_hash) =
|
|
||||||
AppMetadataUpdated::abi_decode_data(&log.data, false)?;
|
let upd = AppMetadataUpdated::decode_log_data(log.data(), false)?;
|
||||||
|
let metadata_url = upd.metadataUrl;
|
||||||
|
let metadata_hash = upd.metadataHash;
|
||||||
|
|
||||||
let metadata_hash = metadata_hash.to_string();
|
let metadata_hash = metadata_hash.to_string();
|
||||||
|
|
||||||
let current_listing = self
|
let current_listing = self
|
||||||
@ -484,9 +490,9 @@ impl State {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
Transfer::SIGNATURE_HASH => {
|
Transfer::SIGNATURE_HASH => {
|
||||||
let from = alloy_primitives::Address::from_word(log.topics[1]);
|
let from = alloy_primitives::Address::from_word(log.topics()[1]);
|
||||||
let to = alloy_primitives::Address::from_word(log.topics[2]);
|
let to = alloy_primitives::Address::from_word(log.topics()[2]);
|
||||||
let package_hash = log.topics[3].to_string();
|
let package_hash = log.topics()[3].to_string();
|
||||||
|
|
||||||
if from == alloy_primitives::Address::ZERO {
|
if from == alloy_primitives::Address::ZERO {
|
||||||
match self.get_listing_with_hash_mut(&package_hash) {
|
match self.get_listing_with_hash_mut(&package_hash) {
|
||||||
|
@ -8,7 +8,7 @@ simulation-mode = []
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
wit-bindgen = "0.24.0"
|
wit-bindgen = "0.24.0"
|
||||||
|
@ -9,7 +9,7 @@ simulation-mode = []
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
bincode = "1.3.3"
|
bincode = "1.3.3"
|
||||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
|
@ -8,7 +8,7 @@ simulation-mode = []
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
wit-bindgen = "0.24.0"
|
wit-bindgen = "0.24.0"
|
||||||
|
@ -4,11 +4,11 @@
|
|||||||
"image": "",
|
"image": "",
|
||||||
"properties": {
|
"properties": {
|
||||||
"package_name": "app_store",
|
"package_name": "app_store",
|
||||||
"current_version": "0.3.0",
|
"current_version": "0.3.1",
|
||||||
"publisher": "sys",
|
"publisher": "sys",
|
||||||
"mirrors": [],
|
"mirrors": [],
|
||||||
"code_hashes": {
|
"code_hashes": {
|
||||||
"0.3.0": ""
|
"0.3.1": ""
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"external_url": "https://kinode.org",
|
"external_url": "https://kinode.org",
|
||||||
|
@ -8,7 +8,7 @@ simulation-mode = []
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
wit-bindgen = "0.24.0"
|
wit-bindgen = "0.24.0"
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "chess"
|
name = "chess"
|
||||||
version = "0.2.0"
|
version = "0.2.1"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
@ -8,9 +8,9 @@ simulation-mode = []
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
base64 = "0.13"
|
base64 = "0.22.0"
|
||||||
bincode = "1.3.3"
|
bincode = "1.3.3"
|
||||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||||
pleco = "0.5"
|
pleco = "0.5"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
|
@ -4,11 +4,11 @@
|
|||||||
"image": "",
|
"image": "",
|
||||||
"properties": {
|
"properties": {
|
||||||
"package_name": "chess",
|
"package_name": "chess",
|
||||||
"current_version": "0.2.0",
|
"current_version": "0.2.1",
|
||||||
"publisher": "sys",
|
"publisher": "sys",
|
||||||
"mirrors": [],
|
"mirrors": [],
|
||||||
"code_hashes": {
|
"code_hashes": {
|
||||||
"0.2.0": ""
|
"0.2.1": ""
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"external_url": "https://kinode.org",
|
"external_url": "https://kinode.org",
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "homepage"
|
name = "homepage"
|
||||||
version = "0.1.0"
|
version = "0.1.1"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
@ -9,7 +9,7 @@ simulation-mode = []
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
bincode = "1.3.3"
|
bincode = "1.3.3"
|
||||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
wit-bindgen = "0.24.0"
|
wit-bindgen = "0.24.0"
|
||||||
|
@ -4,11 +4,11 @@
|
|||||||
"image": "",
|
"image": "",
|
||||||
"properties": {
|
"properties": {
|
||||||
"package_name": "homepage",
|
"package_name": "homepage",
|
||||||
"current_version": "0.1.0",
|
"current_version": "0.1.1",
|
||||||
"publisher": "sys",
|
"publisher": "sys",
|
||||||
"mirrors": [],
|
"mirrors": [],
|
||||||
"code_hashes": {
|
"code_hashes": {
|
||||||
"0.1.0": ""
|
"0.1.1": ""
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"external_url": "https://kinode.org",
|
"external_url": "https://kinode.org",
|
||||||
|
@ -7,7 +7,7 @@ edition = "2021"
|
|||||||
simulation-mode = []
|
simulation-mode = []
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
wit-bindgen = "0.24.0"
|
wit-bindgen = "0.24.0"
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "kns_indexer"
|
name = "kns_indexer"
|
||||||
version = "0.2.0"
|
version = "0.2.1"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
@ -8,11 +8,11 @@ simulation-mode = []
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
alloy-primitives = "0.6.2"
|
alloy-primitives = "0.7.0"
|
||||||
alloy-sol-types = "0.6.2"
|
alloy-sol-types = "0.7.0"
|
||||||
bincode = "1.3.3"
|
bincode = "1.3.3"
|
||||||
hex = "0.4.3"
|
hex = "0.4.3"
|
||||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||||
rmp-serde = "1.1.2"
|
rmp-serde = "1.1.2"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
|
@ -18,6 +18,7 @@ wit_bindgen::generate!({
|
|||||||
// perhaps a constant in process_lib?
|
// perhaps a constant in process_lib?
|
||||||
const KNS_OPTIMISM_ADDRESS: &'static str = "0xca5b5811c0c40aab3295f932b1b5112eb7bb4bd6";
|
const KNS_OPTIMISM_ADDRESS: &'static str = "0xca5b5811c0c40aab3295f932b1b5112eb7bb4bd6";
|
||||||
const KNS_LOCAL_ADDRESS: &'static str = "0x5FbDB2315678afecb367f032d93F642f64180aa3";
|
const KNS_LOCAL_ADDRESS: &'static str = "0x5FbDB2315678afecb367f032d93F642f64180aa3";
|
||||||
|
const KNS_FIRST_BLOCK: u64 = 114_923_786;
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
struct State {
|
struct State {
|
||||||
@ -133,7 +134,7 @@ fn init(our: Address) {
|
|||||||
contract_address,
|
contract_address,
|
||||||
names: HashMap::new(),
|
names: HashMap::new(),
|
||||||
nodes: HashMap::new(),
|
nodes: HashMap::new(),
|
||||||
block: 1,
|
block: KNS_FIRST_BLOCK,
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
println!("loading in {} persisted PKI entries", s.nodes.len());
|
println!("loading in {} persisted PKI entries", s.nodes.len());
|
||||||
@ -145,7 +146,7 @@ fn init(our: Address) {
|
|||||||
contract_address: contract_address.clone(),
|
contract_address: contract_address.clone(),
|
||||||
names: HashMap::new(),
|
names: HashMap::new(),
|
||||||
nodes: HashMap::new(),
|
nodes: HashMap::new(),
|
||||||
block: 1,
|
block: KNS_FIRST_BLOCK,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -188,7 +189,12 @@ fn main(our: Address, mut state: State) -> anyhow::Result<()> {
|
|||||||
match eth_provider.get_logs(&filter) {
|
match eth_provider.get_logs(&filter) {
|
||||||
Ok(logs) => {
|
Ok(logs) => {
|
||||||
for log in logs {
|
for log in logs {
|
||||||
handle_log(&our, &mut state, &log)?;
|
match handle_log(&our, &mut state, &log) {
|
||||||
|
Ok(_) => {}
|
||||||
|
Err(e) => {
|
||||||
|
println!("log-handling error! {e:?}");
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -295,7 +301,12 @@ fn handle_eth_message(
|
|||||||
match eth_result {
|
match eth_result {
|
||||||
Ok(eth::EthSub { result, .. }) => {
|
Ok(eth::EthSub { result, .. }) => {
|
||||||
if let eth::SubscriptionResult::Log(log) = result {
|
if let eth::SubscriptionResult::Log(log) = result {
|
||||||
handle_log(our, state, &log)?;
|
match handle_log(our, state, &log) {
|
||||||
|
Ok(_) => {}
|
||||||
|
Err(e) => {
|
||||||
|
println!("log-handling error! {e:?}");
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(_e) => {
|
Err(_e) => {
|
||||||
@ -345,11 +356,11 @@ fn handle_eth_message(
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn handle_log(our: &Address, state: &mut State, log: ð::Log) -> anyhow::Result<()> {
|
fn handle_log(our: &Address, state: &mut State, log: ð::Log) -> anyhow::Result<()> {
|
||||||
let node_id = log.topics[1];
|
let node_id = log.topics()[1];
|
||||||
|
|
||||||
let name = match state.names.entry(node_id.to_string()) {
|
let name = match state.names.entry(node_id.to_string()) {
|
||||||
Entry::Occupied(o) => o.into_mut(),
|
Entry::Occupied(o) => o.into_mut(),
|
||||||
Entry::Vacant(v) => v.insert(get_name(&log)),
|
Entry::Vacant(v) => v.insert(get_name(&log)?),
|
||||||
};
|
};
|
||||||
|
|
||||||
let node = state
|
let node = state
|
||||||
@ -359,15 +370,15 @@ fn handle_log(our: &Address, state: &mut State, log: ð::Log) -> anyhow::Resul
|
|||||||
|
|
||||||
let mut send = true;
|
let mut send = true;
|
||||||
|
|
||||||
match log.topics[0] {
|
match log.topics()[0] {
|
||||||
KeyUpdate::SIGNATURE_HASH => {
|
KeyUpdate::SIGNATURE_HASH => {
|
||||||
node.public_key = KeyUpdate::abi_decode_data(&log.data, true)
|
node.public_key = KeyUpdate::decode_log_data(log.data(), true)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.0
|
.key
|
||||||
.to_string();
|
.to_string();
|
||||||
}
|
}
|
||||||
IpUpdate::SIGNATURE_HASH => {
|
IpUpdate::SIGNATURE_HASH => {
|
||||||
let ip = IpUpdate::abi_decode_data(&log.data, true).unwrap().0;
|
let ip = IpUpdate::decode_log_data(log.data(), true).unwrap().ip;
|
||||||
node.ip = format!(
|
node.ip = format!(
|
||||||
"{}.{}.{}.{}",
|
"{}.{}.{}.{}",
|
||||||
(ip >> 24) & 0xFF,
|
(ip >> 24) & 0xFF,
|
||||||
@ -380,15 +391,15 @@ fn handle_log(our: &Address, state: &mut State, log: ð::Log) -> anyhow::Resul
|
|||||||
node.routers = vec![];
|
node.routers = vec![];
|
||||||
}
|
}
|
||||||
WsUpdate::SIGNATURE_HASH => {
|
WsUpdate::SIGNATURE_HASH => {
|
||||||
node.port = WsUpdate::abi_decode_data(&log.data, true).unwrap().0;
|
node.port = WsUpdate::decode_log_data(log.data(), true).unwrap().port;
|
||||||
// when we get port data, we should delete any router data,
|
// when we get port data, we should delete any router data,
|
||||||
// since the assignment of port indicates an direct node
|
// since the assignment of port indicates an direct node
|
||||||
node.routers = vec![];
|
node.routers = vec![];
|
||||||
}
|
}
|
||||||
RoutingUpdate::SIGNATURE_HASH => {
|
RoutingUpdate::SIGNATURE_HASH => {
|
||||||
node.routers = RoutingUpdate::abi_decode_data(&log.data, true)
|
node.routers = RoutingUpdate::decode_log_data(log.data(), true)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.0
|
.routers
|
||||||
.iter()
|
.iter()
|
||||||
.map(|r| r.to_string())
|
.map(|r| r.to_string())
|
||||||
.collect::<Vec<String>>();
|
.collect::<Vec<String>>();
|
||||||
@ -413,7 +424,7 @@ fn handle_log(our: &Address, state: &mut State, log: ð::Log) -> anyhow::Resul
|
|||||||
}
|
}
|
||||||
|
|
||||||
// if new block is > 100 from last block, save state
|
// if new block is > 100 from last block, save state
|
||||||
let block = log.block_number.expect("expect").to::<u64>();
|
let block = log.block_number.expect("expect");
|
||||||
if block > state.block + 100 {
|
if block > state.block + 100 {
|
||||||
kinode_process_lib::print_to_terminal(
|
kinode_process_lib::print_to_terminal(
|
||||||
1,
|
1,
|
||||||
@ -429,16 +440,13 @@ fn handle_log(our: &Address, state: &mut State, log: ð::Log) -> anyhow::Resul
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_name(log: ð::Log) -> String {
|
fn get_name(log: ð::Log) -> anyhow::Result<String> {
|
||||||
let decoded = NodeRegistered::abi_decode_data(&log.data, true).unwrap();
|
let decoded = NodeRegistered::decode_log_data(log.data(), false).map_err(|_e| {
|
||||||
let name = match dnswire_decode(decoded.0.clone()) {
|
anyhow::anyhow!(
|
||||||
Ok(n) => n,
|
"got event other than NodeRegistered without knowing about existing node name"
|
||||||
Err(_) => {
|
)
|
||||||
println!("failed to decode name: {:?}", decoded.0);
|
})?;
|
||||||
panic!("")
|
dnswire_decode(decoded.name.to_vec()).map_err(|e| anyhow::anyhow!(e))
|
||||||
}
|
|
||||||
};
|
|
||||||
name
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn dnswire_decode(wire_format_bytes: Vec<u8>) -> Result<String, FromUtf8Error> {
|
fn dnswire_decode(wire_format_bytes: Vec<u8>) -> Result<String, FromUtf8Error> {
|
||||||
|
@ -1,14 +1,14 @@
|
|||||||
{
|
{
|
||||||
"name": "KNS Indexer",
|
"name": "KNS Indexer",
|
||||||
"description": "Kinode OS pki indexer",
|
"description": "Kinode OS PKI indexer",
|
||||||
"image": "",
|
"image": "",
|
||||||
"properties": {
|
"properties": {
|
||||||
"package_name": "kns_indexer",
|
"package_name": "kns_indexer",
|
||||||
"current_version": "0.1.0",
|
"current_version": "0.2.1",
|
||||||
"publisher": "sys",
|
"publisher": "sys",
|
||||||
"mirrors": [],
|
"mirrors": [],
|
||||||
"code_hashes": {
|
"code_hashes": {
|
||||||
"0.1.0": ""
|
"0.2.1": ""
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"external_url": "https://kinode.org",
|
"external_url": "https://kinode.org",
|
||||||
|
@ -7,7 +7,7 @@ edition = "2021"
|
|||||||
simulation-mode = []
|
simulation-mode = []
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
wit-bindgen = "0.24.0"
|
wit-bindgen = "0.24.0"
|
||||||
|
@ -8,7 +8,7 @@ simulation-mode = []
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
wit-bindgen = "0.24.0"
|
wit-bindgen = "0.24.0"
|
||||||
|
@ -8,7 +8,7 @@ simulation-mode = []
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
wit-bindgen = "0.24.0"
|
wit-bindgen = "0.24.0"
|
||||||
|
@ -8,7 +8,7 @@ simulation-mode = []
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
wit-bindgen = "0.24.0"
|
wit-bindgen = "0.24.0"
|
||||||
|
@ -7,7 +7,7 @@ edition = "2021"
|
|||||||
simulation-mode = []
|
simulation-mode = []
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
wit-bindgen = "0.24.0"
|
wit-bindgen = "0.24.0"
|
||||||
|
@ -9,7 +9,7 @@ simulation-mode = []
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
clap = "4.4.18"
|
clap = "4.4.18"
|
||||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||||
regex = "1.10.3"
|
regex = "1.10.3"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
|
@ -4,11 +4,11 @@
|
|||||||
"image": "",
|
"image": "",
|
||||||
"properties": {
|
"properties": {
|
||||||
"package_name": "terminal",
|
"package_name": "terminal",
|
||||||
"current_version": "0.1.0",
|
"current_version": "0.1.1",
|
||||||
"publisher": "sys",
|
"publisher": "sys",
|
||||||
"mirrors": [],
|
"mirrors": [],
|
||||||
"code_hashes": {
|
"code_hashes": {
|
||||||
"0.1.0": ""
|
"0.1.1": ""
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"external_url": "https://kinode.org",
|
"external_url": "https://kinode.org",
|
||||||
|
@ -7,7 +7,7 @@ edition = "2021"
|
|||||||
simulation-mode = []
|
simulation-mode = []
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||||
rmp-serde = "1.1.2"
|
rmp-serde = "1.1.2"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
wit-bindgen = "0.24.0"
|
wit-bindgen = "0.24.0"
|
||||||
|
@ -7,7 +7,7 @@ edition = "2021"
|
|||||||
simulation-mode = []
|
simulation-mode = []
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||||
rmp-serde = "1.1.2"
|
rmp-serde = "1.1.2"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
wit-bindgen = "0.24.0"
|
wit-bindgen = "0.24.0"
|
||||||
|
@ -7,7 +7,7 @@ edition = "2021"
|
|||||||
simulation-mode = []
|
simulation-mode = []
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||||
rmp-serde = "1.1.2"
|
rmp-serde = "1.1.2"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
wit-bindgen = "0.24.0"
|
wit-bindgen = "0.24.0"
|
||||||
|
@ -7,7 +7,7 @@ edition = "2021"
|
|||||||
simulation-mode = []
|
simulation-mode = []
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||||
rmp-serde = "1.1.2"
|
rmp-serde = "1.1.2"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
wit-bindgen = "0.24.0"
|
wit-bindgen = "0.24.0"
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "terminal"
|
name = "terminal"
|
||||||
version = "0.1.0"
|
version = "0.1.1"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
@ -9,7 +9,7 @@ simulation-mode = []
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
bincode = "1.3.3"
|
bincode = "1.3.3"
|
||||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
regex = "1.10.3"
|
regex = "1.10.3"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
|
@ -295,7 +295,7 @@ fn handle_run(our: &Address, process: &ProcessId, args: String) -> anyhow::Resul
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
print_to_terminal(
|
print_to_terminal(
|
||||||
2,
|
3,
|
||||||
&format!(
|
&format!(
|
||||||
"{}: Process {{\n wasm_bytes_handle: {},\n wit_version: {},\n on_exit: {:?},\n public: {}\n capabilities: {}\n}}",
|
"{}: Process {{\n wasm_bytes_handle: {},\n wit_version: {},\n on_exit: {:?},\n public: {}\n capabilities: {}\n}}",
|
||||||
parsed_new_process_id.clone(),
|
parsed_new_process_id.clone(),
|
||||||
|
@ -8,7 +8,7 @@ simulation-mode = []
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
wit-bindgen = "0.24.0"
|
wit-bindgen = "0.24.0"
|
||||||
|
@ -4,11 +4,11 @@
|
|||||||
"image": "",
|
"image": "",
|
||||||
"properties": {
|
"properties": {
|
||||||
"package_name": "tester",
|
"package_name": "tester",
|
||||||
"current_version": "0.1.0",
|
"current_version": "0.1.1",
|
||||||
"publisher": "sys",
|
"publisher": "sys",
|
||||||
"mirrors": [],
|
"mirrors": [],
|
||||||
"code_hashes": {
|
"code_hashes": {
|
||||||
"0.1.0": ""
|
"0.1.1": ""
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"external_url": "https://kinode.org",
|
"external_url": "https://kinode.org",
|
||||||
|
@ -9,7 +9,7 @@ simulation-mode = []
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
bincode = "1.3.3"
|
bincode = "1.3.3"
|
||||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
thiserror = "1.0"
|
thiserror = "1.0"
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "tester"
|
name = "tester"
|
||||||
version = "0.1.0"
|
version = "0.1.1"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
@ -10,7 +10,7 @@ simulation-mode = []
|
|||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
bincode = "1.3.3"
|
bincode = "1.3.3"
|
||||||
indexmap = "2.1"
|
indexmap = "2.1"
|
||||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
thiserror = "1.0"
|
thiserror = "1.0"
|
||||||
|
@ -1,11 +1,4 @@
|
|||||||
[
|
[
|
||||||
{
|
|
||||||
"chain_id": 1,
|
|
||||||
"trusted": false,
|
|
||||||
"provider": {
|
|
||||||
"RpcUrl": "wss://ethereum.publicnode.com"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"chain_id": 31337,
|
"chain_id": 31337,
|
||||||
"trusted": true,
|
"trusted": true,
|
||||||
@ -13,6 +6,13 @@
|
|||||||
"RpcUrl": "wss://localhost:8545"
|
"RpcUrl": "wss://localhost:8545"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"chain_id": 1,
|
||||||
|
"trusted": false,
|
||||||
|
"provider": {
|
||||||
|
"RpcUrl": "wss://ethereum.publicnode.com"
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"chain_id": 10,
|
"chain_id": 10,
|
||||||
"trusted": false,
|
"trusted": false,
|
||||||
@ -21,18 +21,18 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"chain_id": 10,
|
"chain_id": 1,
|
||||||
"trusted": false,
|
"trusted": false,
|
||||||
"provider": {
|
"provider": {
|
||||||
"Node": {
|
"Node": {
|
||||||
"use_as_provider": true,
|
"use_as_provider": true,
|
||||||
"kns_update": {
|
"kns_update": {
|
||||||
"name": "default-router-1.os",
|
"name": "providerfren.os",
|
||||||
"owner": "",
|
"owner": "",
|
||||||
"node": "0xb35eb347deb896bc3fb6132a07fca1601f83462385ed11e835c24c33ba4ef73d",
|
"node": "",
|
||||||
"public_key": "0xe1f7a266eafe46c79494d4dcce4222d81e5767511b295f1ed26c37221aecb80b",
|
"public_key": "0x54f5a8a4c625d5925e63ed3f0203b63e007e3f822d7858bd98b1fd9704c99451",
|
||||||
"ip": "147.135.114.167",
|
"ip": "147.135.114.167",
|
||||||
"port": 9000,
|
"port": 9999,
|
||||||
"routers": []
|
"routers": []
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -45,30 +45,12 @@
|
|||||||
"Node": {
|
"Node": {
|
||||||
"use_as_provider": true,
|
"use_as_provider": true,
|
||||||
"kns_update": {
|
"kns_update": {
|
||||||
"name": "default-router-2.os",
|
"name": "providerfren.os",
|
||||||
"owner": "",
|
"owner": "",
|
||||||
"node": "0xd827ae579fafa604af79fbed977e8abe048497f10885c6473dfd343a3b7b4458",
|
"node": "",
|
||||||
"public_key": "0xcc2752e30ba865ab6baa1b819b0291b71f1136245234274b274df5950c3abcc4",
|
"public_key": "0x54f5a8a4c625d5925e63ed3f0203b63e007e3f822d7858bd98b1fd9704c99451",
|
||||||
"ip": "147.135.114.167",
|
"ip": "147.135.114.167",
|
||||||
"port": 9001,
|
"port": 9999,
|
||||||
"routers": []
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"chain_id": 10,
|
|
||||||
"trusted": false,
|
|
||||||
"provider": {
|
|
||||||
"Node": {
|
|
||||||
"use_as_provider": true,
|
|
||||||
"kns_update": {
|
|
||||||
"name": "default-router-3.os",
|
|
||||||
"owner": "",
|
|
||||||
"node": "0x96e36331c8f0882f2c0c46c13b15d812def04fe8606d503bc0e2be39db26486a",
|
|
||||||
"public_key": "0xe992841b17212206a3929b4cc065ad2854875d48fc7177e260005b9eb8d4f123",
|
|
||||||
"ip": "147.135.114.167",
|
|
||||||
"port": 9005,
|
|
||||||
"routers": []
|
"routers": []
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -81,48 +63,12 @@
|
|||||||
"Node": {
|
"Node": {
|
||||||
"use_as_provider": true,
|
"use_as_provider": true,
|
||||||
"kns_update": {
|
"kns_update": {
|
||||||
"name": "default-router-3.os",
|
"name": "providerfren.os",
|
||||||
"owner": "",
|
"owner": "",
|
||||||
"node": "0x96e36331c8f0882f2c0c46c13b15d812def04fe8606d503bc0e2be39db26486a",
|
"node": "",
|
||||||
"public_key": "0xe992841b17212206a3929b4cc065ad2854875d48fc7177e260005b9eb8d4f123",
|
"public_key": "0x54f5a8a4c625d5925e63ed3f0203b63e007e3f822d7858bd98b1fd9704c99451",
|
||||||
"ip": "147.135.114.167",
|
"ip": "147.135.114.167",
|
||||||
"port": 9005,
|
"port": 9999,
|
||||||
"routers": []
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"chain_id": 11155111,
|
|
||||||
"trusted": false,
|
|
||||||
"provider": {
|
|
||||||
"Node": {
|
|
||||||
"use_as_provider": true,
|
|
||||||
"kns_update": {
|
|
||||||
"name": "default-router-2.os",
|
|
||||||
"owner": "",
|
|
||||||
"node": "0xd827ae579fafa604af79fbed977e8abe048497f10885c6473dfd343a3b7b4458",
|
|
||||||
"public_key": "0xcc2752e30ba865ab6baa1b819b0291b71f1136245234274b274df5950c3abcc4",
|
|
||||||
"ip": "147.135.114.167",
|
|
||||||
"port": 9001,
|
|
||||||
"routers": []
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"chain_id": 11155111,
|
|
||||||
"trusted": false,
|
|
||||||
"provider": {
|
|
||||||
"Node": {
|
|
||||||
"use_as_provider": true,
|
|
||||||
"kns_update": {
|
|
||||||
"name": "default-router-1.os",
|
|
||||||
"owner": "",
|
|
||||||
"node": "0xb35eb347deb896bc3fb6132a07fca1601f83462385ed11e835c24c33ba4ef73d",
|
|
||||||
"public_key": "0xe1f7a266eafe46c79494d4dcce4222d81e5767511b295f1ed26c37221aecb80b",
|
|
||||||
"ip": "147.135.114.167",
|
|
||||||
"port": 9000,
|
|
||||||
"routers": []
|
"routers": []
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -18,9 +18,13 @@ mod subscription;
|
|||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
#[serde(untagged)]
|
#[serde(untagged)]
|
||||||
enum IncomingReq {
|
enum IncomingReq {
|
||||||
|
/// requests for an RPC action that can come from processes on this node or others
|
||||||
EthAction(EthAction),
|
EthAction(EthAction),
|
||||||
|
/// requests that must come from this node to modify provider settings / fetch them
|
||||||
EthConfigAction(EthConfigAction),
|
EthConfigAction(EthConfigAction),
|
||||||
|
/// subscription updates coming in from a remote provider
|
||||||
EthSubResult(EthSubResult),
|
EthSubResult(EthSubResult),
|
||||||
|
/// a remote node who uses our provider keeping their subscription alive
|
||||||
SubKeepalive(u64),
|
SubKeepalive(u64),
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -42,11 +46,14 @@ struct UrlProvider {
|
|||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
struct NodeProvider {
|
struct NodeProvider {
|
||||||
|
/// NOT CURRENTLY USED
|
||||||
pub trusted: bool,
|
pub trusted: bool,
|
||||||
/// semi-temporary flag to mark if this provider is currently usable
|
/// semi-temporary flag to mark if this provider is currently usable
|
||||||
/// future updates will make this more dynamic
|
/// future updates will make this more dynamic
|
||||||
pub usable: bool,
|
pub usable: bool,
|
||||||
pub name: String,
|
/// the KNS update that describes this node provider
|
||||||
|
/// kept so we can re-serialize to SavedConfigs
|
||||||
|
pub kns_update: KnsUpdate,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ActiveProviders {
|
impl ActiveProviders {
|
||||||
@ -59,7 +66,7 @@ impl ActiveProviders {
|
|||||||
self.nodes.push(NodeProvider {
|
self.nodes.push(NodeProvider {
|
||||||
trusted: new.trusted,
|
trusted: new.trusted,
|
||||||
usable: use_as_provider,
|
usable: use_as_provider,
|
||||||
name: kns_update.name,
|
kns_update,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
NodeOrRpcUrl::RpcUrl(url) => {
|
NodeOrRpcUrl::RpcUrl(url) => {
|
||||||
@ -74,7 +81,7 @@ impl ActiveProviders {
|
|||||||
|
|
||||||
fn remove_provider(&mut self, remove: &str) {
|
fn remove_provider(&mut self, remove: &str) {
|
||||||
self.urls.retain(|x| x.url != remove);
|
self.urls.retain(|x| x.url != remove);
|
||||||
self.nodes.retain(|x| x.name != remove);
|
self.nodes.retain(|x| x.kns_update.name != remove);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -157,7 +164,9 @@ pub async fn provider(
|
|||||||
caps_oracle: CapMessageSender,
|
caps_oracle: CapMessageSender,
|
||||||
print_tx: PrintSender,
|
print_tx: PrintSender,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
// load access settings if they've been saved
|
// load access settings if they've been persisted to disk
|
||||||
|
// this merely describes whether our provider is available to other nodes
|
||||||
|
// and if so, which nodes are allowed to access it (public/whitelist/blacklist)
|
||||||
let access_settings: AccessSettings =
|
let access_settings: AccessSettings =
|
||||||
match tokio::fs::read_to_string(format!("{}/.eth_access_settings", home_directory_path))
|
match tokio::fs::read_to_string(format!("{}/.eth_access_settings", home_directory_path))
|
||||||
.await
|
.await
|
||||||
@ -169,11 +178,6 @@ pub async fn provider(
|
|||||||
allow: HashSet::new(),
|
allow: HashSet::new(),
|
||||||
deny: HashSet::new(),
|
deny: HashSet::new(),
|
||||||
};
|
};
|
||||||
let _ = tokio::fs::write(
|
|
||||||
format!("{}/.eth_access_settings", home_directory_path),
|
|
||||||
serde_json::to_string(&access_settings).unwrap(),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
access_settings
|
access_settings
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -183,6 +187,9 @@ pub async fn provider(
|
|||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
|
// initialize module state
|
||||||
|
// fill out providers based on saved configs (possibly persisted, given to us)
|
||||||
|
// this can be a mix of node providers and rpc providers
|
||||||
let mut state = ModuleState {
|
let mut state = ModuleState {
|
||||||
our: Arc::new(our),
|
our: Arc::new(our),
|
||||||
home_directory_path,
|
home_directory_path,
|
||||||
@ -208,14 +215,13 @@ pub async fn provider(
|
|||||||
|
|
||||||
verbose_print(&state.print_tx, "eth: provider initialized").await;
|
verbose_print(&state.print_tx, "eth: provider initialized").await;
|
||||||
|
|
||||||
|
// main loop: handle incoming network errors and incoming kernel messages
|
||||||
loop {
|
loop {
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
Some(wrapped_error) = net_error_recv.recv() => {
|
Some(wrapped_error) = net_error_recv.recv() => {
|
||||||
handle_network_error(
|
handle_network_error(
|
||||||
wrapped_error,
|
wrapped_error,
|
||||||
&state.active_subscriptions,
|
&state,
|
||||||
&state.response_channels,
|
|
||||||
&state.print_tx
|
|
||||||
).await;
|
).await;
|
||||||
}
|
}
|
||||||
Some(km) = recv_in_client.recv() => {
|
Some(km) = recv_in_client.recv() => {
|
||||||
@ -241,40 +247,54 @@ pub async fn provider(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_network_error(
|
/// network errors only come from remote provider nodes we tried to access,
|
||||||
wrapped_error: WrappedSendError,
|
/// or from remote nodes that are using us as a provider.
|
||||||
active_subscriptions: &ActiveSubscriptions,
|
///
|
||||||
response_channels: &ResponseChannels,
|
/// if we tried to access them, we will have a response channel to send the error to.
|
||||||
print_tx: &PrintSender,
|
/// if they are using us as a provider, close the subscription associated with the target.
|
||||||
) {
|
async fn handle_network_error(wrapped_error: WrappedSendError, state: &ModuleState) {
|
||||||
verbose_print(&print_tx, "eth: got network error").await;
|
verbose_print(
|
||||||
// if we hold active subscriptions for the remote node that this error refers to,
|
&state.print_tx,
|
||||||
// close them here -- they will need to resubscribe
|
&format!(
|
||||||
// TODO is this necessary?
|
"eth: got network error from {}",
|
||||||
if let Some((_who, sub_map)) = active_subscriptions.remove(&wrapped_error.error.target) {
|
&wrapped_error.error.target
|
||||||
for (_sub_id, sub) in sub_map.iter() {
|
),
|
||||||
if let ActiveSub::Local(handle) = sub {
|
)
|
||||||
verbose_print(
|
.await;
|
||||||
&print_tx,
|
|
||||||
"eth: closing local sub in response to network error",
|
// close all subscriptions held by the process that we (possibly) tried to send an update to
|
||||||
)
|
if let Some((_who, sub_map)) = state
|
||||||
.await;
|
.active_subscriptions
|
||||||
handle.abort();
|
.remove(&wrapped_error.error.target)
|
||||||
}
|
{
|
||||||
|
for (sub_id, sub) in sub_map.iter() {
|
||||||
|
verbose_print(
|
||||||
|
&state.print_tx,
|
||||||
|
&format!(
|
||||||
|
"eth: closed subscription {} in response to network error",
|
||||||
|
sub_id
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
sub.close(*sub_id, state).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// we got an error from a remote node provider --
|
|
||||||
// forward it to response channel if it exists
|
// forward error to response channel if it exists
|
||||||
if let Some(chan) = response_channels.get(&wrapped_error.id) {
|
if let Some(chan) = state.response_channels.get(&wrapped_error.id) {
|
||||||
// can't close channel here, as response may be an error
|
// don't close channel here, as channel holder will wish to try other providers.
|
||||||
// and fulfill_request may wish to try other providers.
|
verbose_print(
|
||||||
verbose_print(&print_tx, "eth: sent network error to response channel").await;
|
&state.print_tx,
|
||||||
|
"eth: forwarded network error to response channel",
|
||||||
|
)
|
||||||
|
.await;
|
||||||
let _ = chan.send(Err(wrapped_error)).await;
|
let _ = chan.send(Err(wrapped_error)).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// handle incoming requests, namely [`EthAction`] and [`EthConfigAction`].
|
/// handle incoming requests and responses.
|
||||||
/// also handle responses that are passthroughs from remote provider nodes.
|
/// requests must be one of types in [`IncomingReq`].
|
||||||
|
/// responses are passthroughs from remote provider nodes.
|
||||||
async fn handle_message(
|
async fn handle_message(
|
||||||
state: &mut ModuleState,
|
state: &mut ModuleState,
|
||||||
km: KernelMessage,
|
km: KernelMessage,
|
||||||
@ -335,6 +355,8 @@ async fn handle_message(
|
|||||||
{
|
{
|
||||||
if provider_node == &km.source.node {
|
if provider_node == &km.source.node {
|
||||||
if let Ok(()) = sender.send(eth_sub_result).await {
|
if let Ok(()) = sender.send(eth_sub_result).await {
|
||||||
|
// successfully sent a subscription update from a
|
||||||
|
// remote provider to one of our processes
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -344,7 +366,7 @@ async fn handle_message(
|
|||||||
// so they can stop sending us updates
|
// so they can stop sending us updates
|
||||||
verbose_print(
|
verbose_print(
|
||||||
&state.print_tx,
|
&state.print_tx,
|
||||||
"eth: got eth_sub_result but no matching sub found",
|
"eth: got eth_sub_result but no matching sub found, unsubscribing",
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
kernel_message(
|
kernel_message(
|
||||||
@ -367,6 +389,26 @@ async fn handle_message(
|
|||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
verbose_print(
|
||||||
|
&state.print_tx,
|
||||||
|
"eth: got sub_keepalive but no matching sub found",
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
// send a response with an EthSubError
|
||||||
|
kernel_message(
|
||||||
|
&state.our.clone(),
|
||||||
|
km.id,
|
||||||
|
km.source.clone(),
|
||||||
|
None,
|
||||||
|
false,
|
||||||
|
None,
|
||||||
|
EthSubResult::Err(EthSubError {
|
||||||
|
id: sub_id,
|
||||||
|
error: "Subscription not found".to_string(),
|
||||||
|
}),
|
||||||
|
&state.send_to_loop,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -382,7 +424,10 @@ async fn handle_eth_action(
|
|||||||
) -> Result<(), EthError> {
|
) -> Result<(), EthError> {
|
||||||
// check our access settings if the request is from a remote node
|
// check our access settings if the request is from a remote node
|
||||||
if km.source.node != *state.our {
|
if km.source.node != *state.our {
|
||||||
if state.access_settings.deny.contains(&km.source.node) {
|
if state.access_settings.deny.contains(&km.source.node)
|
||||||
|
|| (!state.access_settings.public
|
||||||
|
&& !state.access_settings.allow.contains(&km.source.node))
|
||||||
|
{
|
||||||
verbose_print(
|
verbose_print(
|
||||||
&state.print_tx,
|
&state.print_tx,
|
||||||
"eth: got eth_action from unauthorized remote source",
|
"eth: got eth_action from unauthorized remote source",
|
||||||
@ -390,21 +435,19 @@ async fn handle_eth_action(
|
|||||||
.await;
|
.await;
|
||||||
return Err(EthError::PermissionDenied);
|
return Err(EthError::PermissionDenied);
|
||||||
}
|
}
|
||||||
if !state.access_settings.public {
|
|
||||||
if !state.access_settings.allow.contains(&km.source.node) {
|
|
||||||
verbose_print(
|
|
||||||
&state.print_tx,
|
|
||||||
"eth: got eth_action from unauthorized remote source",
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
return Err(EthError::PermissionDenied);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
verbose_print(
|
verbose_print(
|
||||||
&state.print_tx,
|
&state.print_tx,
|
||||||
&format!("eth: handling eth_action {eth_action:?}"),
|
&format!(
|
||||||
|
"eth: handling {} from {}",
|
||||||
|
match ð_action {
|
||||||
|
EthAction::SubscribeLogs { .. } => "subscribe",
|
||||||
|
EthAction::UnsubscribeLogs(_) => "unsubscribe",
|
||||||
|
EthAction::Request { .. } => "request",
|
||||||
|
},
|
||||||
|
km.source
|
||||||
|
),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
@ -414,19 +457,15 @@ async fn handle_eth_action(
|
|||||||
// before returning an error.
|
// before returning an error.
|
||||||
match eth_action {
|
match eth_action {
|
||||||
EthAction::SubscribeLogs { sub_id, .. } => {
|
EthAction::SubscribeLogs { sub_id, .. } => {
|
||||||
tokio::spawn(subscription::create_new_subscription(
|
subscription::create_new_subscription(
|
||||||
state.our.to_string(),
|
state,
|
||||||
km.id,
|
km.id,
|
||||||
km.source.clone(),
|
km.source.clone(),
|
||||||
km.rsvp,
|
km.rsvp,
|
||||||
state.send_to_loop.clone(),
|
|
||||||
sub_id,
|
sub_id,
|
||||||
eth_action,
|
eth_action,
|
||||||
state.providers.clone(),
|
)
|
||||||
state.active_subscriptions.clone(),
|
.await;
|
||||||
state.response_channels.clone(),
|
|
||||||
state.print_tx.clone(),
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
EthAction::UnsubscribeLogs(sub_id) => {
|
EthAction::UnsubscribeLogs(sub_id) => {
|
||||||
let mut sub_map = state
|
let mut sub_map = state
|
||||||
@ -509,29 +548,62 @@ async fn fulfill_request(
|
|||||||
let Some(mut aps) = providers.get_mut(&chain_id) else {
|
let Some(mut aps) = providers.get_mut(&chain_id) else {
|
||||||
return EthResponse::Err(EthError::NoRpcForChain);
|
return EthResponse::Err(EthError::NoRpcForChain);
|
||||||
};
|
};
|
||||||
|
|
||||||
// first, try any url providers we have for this chain,
|
// first, try any url providers we have for this chain,
|
||||||
// then if we have none or they all fail, go to node provider.
|
// then if we have none or they all fail, go to node providers.
|
||||||
// finally, if no provider works, return an error.
|
// finally, if no provider works, return an error.
|
||||||
for url_provider in &mut aps.urls {
|
|
||||||
|
// bump the successful provider to the front of the list for future requests
|
||||||
|
for (index, url_provider) in aps.urls.iter_mut().enumerate() {
|
||||||
let pubsub = match &url_provider.pubsub {
|
let pubsub = match &url_provider.pubsub {
|
||||||
Some(pubsub) => pubsub,
|
Some(pubsub) => pubsub,
|
||||||
None => {
|
None => {
|
||||||
if let Ok(()) = activate_url_provider(url_provider).await {
|
if let Ok(()) = activate_url_provider(url_provider).await {
|
||||||
verbose_print(print_tx, "eth: activated a url provider").await;
|
verbose_print(
|
||||||
|
print_tx,
|
||||||
|
&format!("eth: activated url provider {}", url_provider.url),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
url_provider.pubsub.as_ref().unwrap()
|
url_provider.pubsub.as_ref().unwrap()
|
||||||
} else {
|
} else {
|
||||||
|
verbose_print(
|
||||||
|
print_tx,
|
||||||
|
&format!("eth: could not activate url provider {}", url_provider.url),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let Ok(value) = pubsub.inner().prepare(method, params.clone()).await else {
|
match pubsub.inner().prepare(method, params.clone()).await {
|
||||||
// this provider failed and needs to be reset
|
Ok(value) => {
|
||||||
url_provider.pubsub = None;
|
let successful_provider = aps.urls.remove(index);
|
||||||
continue;
|
aps.urls.insert(0, successful_provider);
|
||||||
};
|
return EthResponse::Response { value };
|
||||||
return EthResponse::Response { value };
|
}
|
||||||
|
Err(rpc_error) => {
|
||||||
|
verbose_print(
|
||||||
|
print_tx,
|
||||||
|
&format!(
|
||||||
|
"eth: got error from url provider {}: {}",
|
||||||
|
url_provider.url, rpc_error
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
// this provider failed and needs to be reset
|
||||||
|
url_provider.pubsub = None;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
for node_provider in &mut aps.nodes {
|
for node_provider in &mut aps.nodes {
|
||||||
|
verbose_print(
|
||||||
|
print_tx,
|
||||||
|
&format!(
|
||||||
|
"eth: attempting to fulfill via {}",
|
||||||
|
node_provider.kns_update.name
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
let response = forward_to_node_provider(
|
let response = forward_to_node_provider(
|
||||||
our,
|
our,
|
||||||
km_id,
|
km_id,
|
||||||
@ -563,14 +635,14 @@ async fn forward_to_node_provider(
|
|||||||
send_to_loop: &MessageSender,
|
send_to_loop: &MessageSender,
|
||||||
receiver: &mut ProcessMessageReceiver,
|
receiver: &mut ProcessMessageReceiver,
|
||||||
) -> EthResponse {
|
) -> EthResponse {
|
||||||
if !node_provider.usable || node_provider.name == our {
|
if !node_provider.usable || node_provider.kns_update.name == our {
|
||||||
return EthResponse::Err(EthError::PermissionDenied);
|
return EthResponse::Err(EthError::PermissionDenied);
|
||||||
}
|
}
|
||||||
kernel_message(
|
kernel_message(
|
||||||
our,
|
our,
|
||||||
km_id,
|
km_id,
|
||||||
Address {
|
Address {
|
||||||
node: node_provider.name.clone(),
|
node: node_provider.kns_update.name.clone(),
|
||||||
process: ETH_PROCESS_ID.clone(),
|
process: ETH_PROCESS_ID.clone(),
|
||||||
},
|
},
|
||||||
rsvp,
|
rsvp,
|
||||||
@ -585,15 +657,13 @@ async fn forward_to_node_provider(
|
|||||||
else {
|
else {
|
||||||
return EthResponse::Err(EthError::RpcTimeout);
|
return EthResponse::Err(EthError::RpcTimeout);
|
||||||
};
|
};
|
||||||
let Message::Response((resp, _context)) = response_km.message else {
|
if let Message::Response((resp, _context)) = response_km.message {
|
||||||
// if we hit this, they spoofed a request with same id, ignore and possibly punish
|
if let Ok(eth_response) = serde_json::from_slice::<EthResponse>(&resp.body) {
|
||||||
return EthResponse::Err(EthError::RpcMalformedResponse);
|
return eth_response;
|
||||||
};
|
}
|
||||||
let Ok(eth_response) = serde_json::from_slice::<EthResponse>(&resp.body) else {
|
}
|
||||||
// if we hit this, they sent a malformed response, ignore and possibly punish
|
// if we hit this, they sent a malformed response, ignore and possibly punish
|
||||||
return EthResponse::Err(EthError::RpcMalformedResponse);
|
EthResponse::Err(EthError::RpcMalformedResponse)
|
||||||
};
|
|
||||||
eth_response
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_eth_config_action(
|
async fn handle_eth_config_action(
|
||||||
@ -627,6 +697,7 @@ async fn handle_eth_config_action(
|
|||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
|
let mut save_settings = false;
|
||||||
let mut save_providers = false;
|
let mut save_providers = false;
|
||||||
|
|
||||||
// modify our providers and access settings based on config action
|
// modify our providers and access settings based on config action
|
||||||
@ -650,21 +721,27 @@ async fn handle_eth_config_action(
|
|||||||
}
|
}
|
||||||
EthConfigAction::SetPublic => {
|
EthConfigAction::SetPublic => {
|
||||||
state.access_settings.public = true;
|
state.access_settings.public = true;
|
||||||
|
save_settings = true;
|
||||||
}
|
}
|
||||||
EthConfigAction::SetPrivate => {
|
EthConfigAction::SetPrivate => {
|
||||||
state.access_settings.public = false;
|
state.access_settings.public = false;
|
||||||
|
save_settings = true;
|
||||||
}
|
}
|
||||||
EthConfigAction::AllowNode(node) => {
|
EthConfigAction::AllowNode(node) => {
|
||||||
state.access_settings.allow.insert(node);
|
state.access_settings.allow.insert(node);
|
||||||
|
save_settings = true;
|
||||||
}
|
}
|
||||||
EthConfigAction::UnallowNode(node) => {
|
EthConfigAction::UnallowNode(node) => {
|
||||||
state.access_settings.allow.remove(&node);
|
state.access_settings.allow.remove(&node);
|
||||||
|
save_settings = true;
|
||||||
}
|
}
|
||||||
EthConfigAction::DenyNode(node) => {
|
EthConfigAction::DenyNode(node) => {
|
||||||
state.access_settings.deny.insert(node);
|
state.access_settings.deny.insert(node);
|
||||||
|
save_settings = true;
|
||||||
}
|
}
|
||||||
EthConfigAction::UndenyNode(node) => {
|
EthConfigAction::UndenyNode(node) => {
|
||||||
state.access_settings.deny.remove(&node);
|
state.access_settings.deny.remove(&node);
|
||||||
|
save_settings = true;
|
||||||
}
|
}
|
||||||
EthConfigAction::SetProviders(new_providers) => {
|
EthConfigAction::SetProviders(new_providers) => {
|
||||||
let new_map = DashMap::new();
|
let new_map = DashMap::new();
|
||||||
@ -713,20 +790,26 @@ async fn handle_eth_config_action(
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// save providers and access settings to disk
|
// save providers and/or access settings, depending on necessity, to disk
|
||||||
let _ = tokio::fs::write(
|
if save_settings {
|
||||||
format!("{}/.eth_access_settings", state.home_directory_path),
|
if let Ok(()) = tokio::fs::write(
|
||||||
serde_json::to_string(&state.access_settings).unwrap(),
|
format!("{}/.eth_access_settings", state.home_directory_path),
|
||||||
)
|
serde_json::to_string(&state.access_settings).unwrap(),
|
||||||
.await;
|
)
|
||||||
verbose_print(&state.print_tx, "eth: saved new access settings").await;
|
.await
|
||||||
|
{
|
||||||
|
verbose_print(&state.print_tx, "eth: saved new access settings").await;
|
||||||
|
};
|
||||||
|
}
|
||||||
if save_providers {
|
if save_providers {
|
||||||
let _ = tokio::fs::write(
|
if let Ok(()) = tokio::fs::write(
|
||||||
format!("{}/.eth_providers", state.home_directory_path),
|
format!("{}/.eth_providers", state.home_directory_path),
|
||||||
serde_json::to_string(&providers_to_saved_configs(&state.providers)).unwrap(),
|
serde_json::to_string(&providers_to_saved_configs(&state.providers)).unwrap(),
|
||||||
)
|
)
|
||||||
.await;
|
.await
|
||||||
verbose_print(&state.print_tx, "eth: saved new provider settings").await;
|
{
|
||||||
|
verbose_print(&state.print_tx, "eth: saved new provider settings").await;
|
||||||
|
};
|
||||||
}
|
}
|
||||||
EthConfigResponse::Ok
|
EthConfigResponse::Ok
|
||||||
}
|
}
|
||||||
@ -767,15 +850,7 @@ fn providers_to_saved_configs(providers: &Providers) -> SavedConfigs {
|
|||||||
.chain(entry.nodes.iter().map(|node_provider| ProviderConfig {
|
.chain(entry.nodes.iter().map(|node_provider| ProviderConfig {
|
||||||
chain_id: *entry.key(),
|
chain_id: *entry.key(),
|
||||||
provider: NodeOrRpcUrl::Node {
|
provider: NodeOrRpcUrl::Node {
|
||||||
kns_update: KnsUpdate {
|
kns_update: node_provider.kns_update.clone(),
|
||||||
name: node_provider.name.clone(),
|
|
||||||
owner: "".to_string(),
|
|
||||||
node: "".to_string(),
|
|
||||||
public_key: "".to_string(),
|
|
||||||
ip: "".to_string(),
|
|
||||||
port: 0,
|
|
||||||
routers: vec![],
|
|
||||||
},
|
|
||||||
use_as_provider: node_provider.usable,
|
use_as_provider: node_provider.usable,
|
||||||
},
|
},
|
||||||
trusted: node_provider.trusted,
|
trusted: node_provider.trusted,
|
||||||
|
@ -4,123 +4,82 @@ use alloy_rpc_types::pubsub::SubscriptionResult;
|
|||||||
|
|
||||||
/// cleans itself up when the subscription is closed or fails.
|
/// cleans itself up when the subscription is closed or fails.
|
||||||
pub async fn create_new_subscription(
|
pub async fn create_new_subscription(
|
||||||
our: String,
|
state: &ModuleState,
|
||||||
km_id: u64,
|
km_id: u64,
|
||||||
target: Address,
|
target: Address,
|
||||||
rsvp: Option<Address>,
|
rsvp: Option<Address>,
|
||||||
send_to_loop: MessageSender,
|
|
||||||
sub_id: u64,
|
sub_id: u64,
|
||||||
eth_action: EthAction,
|
eth_action: EthAction,
|
||||||
providers: Providers,
|
|
||||||
active_subscriptions: ActiveSubscriptions,
|
|
||||||
response_channels: ResponseChannels,
|
|
||||||
print_tx: PrintSender,
|
|
||||||
) {
|
) {
|
||||||
verbose_print(&print_tx, "eth: creating new subscription").await;
|
let our = state.our.clone();
|
||||||
match build_subscription(
|
let send_to_loop = state.send_to_loop.clone();
|
||||||
&our,
|
let active_subscriptions = state.active_subscriptions.clone();
|
||||||
km_id,
|
let providers = state.providers.clone();
|
||||||
&target,
|
let response_channels = state.response_channels.clone();
|
||||||
&send_to_loop,
|
let print_tx = state.print_tx.clone();
|
||||||
ð_action,
|
tokio::spawn(async move {
|
||||||
&providers,
|
match tokio::time::timeout(
|
||||||
&response_channels,
|
std::time::Duration::from_secs(30),
|
||||||
&print_tx,
|
build_subscription(
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(maybe_raw_sub) => {
|
|
||||||
// send a response to the target that the subscription was successful
|
|
||||||
kernel_message(
|
|
||||||
&our,
|
&our,
|
||||||
km_id,
|
km_id,
|
||||||
target.clone(),
|
&target,
|
||||||
rsvp.clone(),
|
|
||||||
false,
|
|
||||||
None,
|
|
||||||
EthResponse::Ok,
|
|
||||||
&send_to_loop,
|
&send_to_loop,
|
||||||
)
|
ð_action,
|
||||||
.await;
|
&providers,
|
||||||
let mut subs = active_subscriptions
|
&response_channels,
|
||||||
.entry(target.clone())
|
&print_tx,
|
||||||
.or_insert(HashMap::new());
|
),
|
||||||
let active_subscriptions = active_subscriptions.clone();
|
)
|
||||||
match maybe_raw_sub {
|
.await
|
||||||
Ok(rx) => {
|
{
|
||||||
subs.insert(
|
Ok(Ok(maybe_raw_sub)) => {
|
||||||
sub_id,
|
// send a response to the target that the subscription was successful
|
||||||
// this is a local sub, as in, we connect to the rpc endpt
|
kernel_message(
|
||||||
ActiveSub::Local(tokio::spawn(async move {
|
&our,
|
||||||
// await the subscription error and kill it if so
|
km_id,
|
||||||
if let Err(e) = maintain_local_subscription(
|
target.clone(),
|
||||||
&our,
|
rsvp.clone(),
|
||||||
sub_id,
|
false,
|
||||||
rx,
|
None,
|
||||||
&target,
|
EthResponse::Ok,
|
||||||
&rsvp,
|
&send_to_loop,
|
||||||
&send_to_loop,
|
)
|
||||||
)
|
.await;
|
||||||
.await
|
let mut subs = active_subscriptions
|
||||||
{
|
.entry(target.clone())
|
||||||
verbose_print(
|
.or_insert(HashMap::new());
|
||||||
&print_tx,
|
let active_subscriptions = active_subscriptions.clone();
|
||||||
"eth: closed local subscription due to error",
|
match maybe_raw_sub {
|
||||||
)
|
Ok(rx) => {
|
||||||
.await;
|
let our = our.clone();
|
||||||
kernel_message(
|
let send_to_loop = send_to_loop.clone();
|
||||||
|
let print_tx = print_tx.clone();
|
||||||
|
subs.insert(
|
||||||
|
sub_id,
|
||||||
|
// this is a local sub, as in, we connect to the rpc endpoint
|
||||||
|
ActiveSub::Local(tokio::spawn(async move {
|
||||||
|
// await the subscription error and kill it if so
|
||||||
|
if let Err(e) = maintain_local_subscription(
|
||||||
&our,
|
&our,
|
||||||
rand::random(),
|
|
||||||
target.clone(),
|
|
||||||
rsvp,
|
|
||||||
true,
|
|
||||||
None,
|
|
||||||
EthSubResult::Err(e),
|
|
||||||
&send_to_loop,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
active_subscriptions.entry(target).and_modify(|sub_map| {
|
|
||||||
sub_map.remove(&km_id);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
})),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Err((provider_node, remote_sub_id)) => {
|
|
||||||
// this is a remote sub, given by a relay node
|
|
||||||
let (sender, rx) = tokio::sync::mpsc::channel(10);
|
|
||||||
let keepalive_km_id = rand::random();
|
|
||||||
let (keepalive_err_sender, keepalive_err_receiver) =
|
|
||||||
tokio::sync::mpsc::channel(1);
|
|
||||||
response_channels.insert(keepalive_km_id, keepalive_err_sender);
|
|
||||||
subs.insert(
|
|
||||||
remote_sub_id,
|
|
||||||
ActiveSub::Remote {
|
|
||||||
provider_node: provider_node.clone(),
|
|
||||||
handle: tokio::spawn(async move {
|
|
||||||
if let Err(e) = maintain_remote_subscription(
|
|
||||||
&our,
|
|
||||||
&provider_node,
|
|
||||||
remote_sub_id,
|
|
||||||
sub_id,
|
sub_id,
|
||||||
keepalive_km_id,
|
|
||||||
rx,
|
rx,
|
||||||
keepalive_err_receiver,
|
|
||||||
&target,
|
&target,
|
||||||
|
&rsvp,
|
||||||
&send_to_loop,
|
&send_to_loop,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
verbose_print(
|
verbose_print(
|
||||||
&print_tx,
|
&print_tx,
|
||||||
"eth: closed subscription with provider node due to error",
|
"eth: closed local subscription due to error",
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
kernel_message(
|
kernel_message(
|
||||||
&our,
|
&our,
|
||||||
rand::random(),
|
rand::random(),
|
||||||
target.clone(),
|
target.clone(),
|
||||||
None,
|
rsvp,
|
||||||
true,
|
true,
|
||||||
None,
|
None,
|
||||||
EthSubResult::Err(e),
|
EthSubResult::Err(e),
|
||||||
@ -128,21 +87,84 @@ pub async fn create_new_subscription(
|
|||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
active_subscriptions.entry(target).and_modify(|sub_map| {
|
active_subscriptions.entry(target).and_modify(|sub_map| {
|
||||||
sub_map.remove(&sub_id);
|
sub_map.remove(&km_id);
|
||||||
});
|
});
|
||||||
response_channels.remove(&keepalive_km_id);
|
|
||||||
}
|
}
|
||||||
}),
|
})),
|
||||||
sender,
|
);
|
||||||
},
|
}
|
||||||
);
|
Err((provider_node, remote_sub_id)) => {
|
||||||
|
// this is a remote sub, given by a relay node
|
||||||
|
let (sender, rx) = tokio::sync::mpsc::channel(10);
|
||||||
|
let keepalive_km_id = rand::random();
|
||||||
|
let (keepalive_err_sender, keepalive_err_receiver) =
|
||||||
|
tokio::sync::mpsc::channel(1);
|
||||||
|
response_channels.insert(keepalive_km_id, keepalive_err_sender);
|
||||||
|
let our = our.clone();
|
||||||
|
let send_to_loop = send_to_loop.clone();
|
||||||
|
let print_tx = print_tx.clone();
|
||||||
|
let response_channels = response_channels.clone();
|
||||||
|
subs.insert(
|
||||||
|
remote_sub_id,
|
||||||
|
ActiveSub::Remote {
|
||||||
|
provider_node: provider_node.clone(),
|
||||||
|
handle: tokio::spawn(async move {
|
||||||
|
if let Err(e) = maintain_remote_subscription(
|
||||||
|
&our,
|
||||||
|
&provider_node,
|
||||||
|
remote_sub_id,
|
||||||
|
sub_id,
|
||||||
|
keepalive_km_id,
|
||||||
|
rx,
|
||||||
|
keepalive_err_receiver,
|
||||||
|
&target,
|
||||||
|
&send_to_loop,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
verbose_print(
|
||||||
|
&print_tx,
|
||||||
|
"eth: closed subscription with provider node due to error",
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
kernel_message(
|
||||||
|
&our,
|
||||||
|
rand::random(),
|
||||||
|
target.clone(),
|
||||||
|
None,
|
||||||
|
true,
|
||||||
|
None,
|
||||||
|
EthSubResult::Err(e),
|
||||||
|
&send_to_loop,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
active_subscriptions.entry(target).and_modify(|sub_map| {
|
||||||
|
sub_map.remove(&sub_id);
|
||||||
|
});
|
||||||
|
response_channels.remove(&keepalive_km_id);
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
sender,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(Err(e)) => {
|
||||||
|
error_message(&our, km_id, target.clone(), e, &send_to_loop).await;
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
error_message(
|
||||||
|
&our,
|
||||||
|
km_id,
|
||||||
|
target.clone(),
|
||||||
|
EthError::RpcTimeout,
|
||||||
|
&send_to_loop,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Err(e) => {
|
});
|
||||||
error_message(&our, km_id, target.clone(), e, &send_to_loop).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// terrible abuse of result in return type, yes, sorry
|
/// terrible abuse of result in return type, yes, sorry
|
||||||
@ -171,38 +193,71 @@ async fn build_subscription(
|
|||||||
// first, try any url providers we have for this chain,
|
// first, try any url providers we have for this chain,
|
||||||
// then if we have none or they all fail, go to node providers.
|
// then if we have none or they all fail, go to node providers.
|
||||||
// finally, if no provider works, return an error.
|
// finally, if no provider works, return an error.
|
||||||
for url_provider in &mut aps.urls {
|
|
||||||
|
// bump the successful provider to the front of the list for future requests
|
||||||
|
for (index, url_provider) in aps.urls.iter_mut().enumerate() {
|
||||||
let pubsub = match &url_provider.pubsub {
|
let pubsub = match &url_provider.pubsub {
|
||||||
Some(pubsub) => pubsub,
|
Some(pubsub) => pubsub,
|
||||||
None => {
|
None => {
|
||||||
if let Ok(()) = activate_url_provider(url_provider).await {
|
if let Ok(()) = activate_url_provider(url_provider).await {
|
||||||
verbose_print(print_tx, "eth: activated a url provider").await;
|
verbose_print(
|
||||||
|
&print_tx,
|
||||||
|
&format!("eth: activated url provider {}", url_provider.url),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
url_provider.pubsub.as_ref().unwrap()
|
url_provider.pubsub.as_ref().unwrap()
|
||||||
} else {
|
} else {
|
||||||
|
verbose_print(
|
||||||
|
&print_tx,
|
||||||
|
&format!("eth: could not activate url provider {}", url_provider.url),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let kind = serde_json::to_value(&kind).unwrap();
|
let kind = serde_json::to_value(&kind).unwrap();
|
||||||
let params = serde_json::to_value(¶ms).unwrap();
|
let params = serde_json::to_value(¶ms).unwrap();
|
||||||
if let Ok(id) = pubsub
|
match pubsub
|
||||||
.inner()
|
.inner()
|
||||||
.prepare("eth_subscribe", [kind, params])
|
.prepare("eth_subscribe", [kind, params])
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
let rx = pubsub.inner().get_raw_subscription(id).await;
|
Ok(id) => {
|
||||||
return Ok(Ok(rx));
|
let rx = pubsub.inner().get_raw_subscription(id).await;
|
||||||
|
let successful_provider = aps.urls.remove(index);
|
||||||
|
aps.urls.insert(0, successful_provider);
|
||||||
|
return Ok(Ok(rx));
|
||||||
|
}
|
||||||
|
Err(rpc_error) => {
|
||||||
|
verbose_print(
|
||||||
|
&print_tx,
|
||||||
|
&format!(
|
||||||
|
"eth: got error from url provider {}: {}",
|
||||||
|
url_provider.url, rpc_error
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
// this provider failed and needs to be reset
|
||||||
|
url_provider.pubsub = None;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// this provider failed and needs to be reset
|
|
||||||
url_provider.pubsub = None;
|
|
||||||
}
|
}
|
||||||
// now we need a response channel
|
|
||||||
let (sender, mut response_receiver) = tokio::sync::mpsc::channel(1);
|
let (sender, mut response_receiver) = tokio::sync::mpsc::channel(1);
|
||||||
response_channels.insert(km_id, sender);
|
response_channels.insert(km_id, sender);
|
||||||
// we need to create our own unique sub id because in the remote provider node,
|
// we need to create our own unique sub id because in the remote provider node,
|
||||||
// all subs will be identified under our process address.
|
// all subs will be identified under our process address.
|
||||||
let remote_sub_id = rand::random();
|
let remote_sub_id = rand::random();
|
||||||
for node_provider in &mut aps.nodes {
|
for node_provider in &mut aps.nodes {
|
||||||
|
verbose_print(
|
||||||
|
&print_tx,
|
||||||
|
&format!(
|
||||||
|
"eth: attempting to fulfill via {}",
|
||||||
|
node_provider.kns_update.name
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
match forward_to_node_provider(
|
match forward_to_node_provider(
|
||||||
&our,
|
&our,
|
||||||
km_id,
|
km_id,
|
||||||
@ -232,7 +287,7 @@ async fn build_subscription(
|
|||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
response_channels.remove(&km_id);
|
response_channels.remove(&km_id);
|
||||||
return Ok(Err((node_provider.name.clone(), remote_sub_id)));
|
return Ok(Err((node_provider.kns_update.name.clone(), remote_sub_id)));
|
||||||
}
|
}
|
||||||
EthResponse::Response { .. } => {
|
EthResponse::Response { .. } => {
|
||||||
// the response to a SubscribeLogs request must be an 'ok'
|
// the response to a SubscribeLogs request must be an 'ok'
|
||||||
|
@ -265,7 +265,7 @@ async fn listen_to_stream(
|
|||||||
match message {
|
match message {
|
||||||
Ok(msg) => {
|
Ok(msg) => {
|
||||||
// Handle different types of incoming WebSocket messages
|
// Handle different types of incoming WebSocket messages
|
||||||
let (body, blob) = match msg {
|
let (body, blob, should_exit) = match msg {
|
||||||
TungsteniteMessage::Text(text) => (
|
TungsteniteMessage::Text(text) => (
|
||||||
HttpClientRequest::WebSocketPush {
|
HttpClientRequest::WebSocketPush {
|
||||||
channel_id,
|
channel_id,
|
||||||
@ -275,6 +275,7 @@ async fn listen_to_stream(
|
|||||||
mime: Some("text/plain".into()),
|
mime: Some("text/plain".into()),
|
||||||
bytes: text.into_bytes(),
|
bytes: text.into_bytes(),
|
||||||
}),
|
}),
|
||||||
|
false,
|
||||||
),
|
),
|
||||||
TungsteniteMessage::Binary(bytes) => (
|
TungsteniteMessage::Binary(bytes) => (
|
||||||
HttpClientRequest::WebSocketPush {
|
HttpClientRequest::WebSocketPush {
|
||||||
@ -285,12 +286,13 @@ async fn listen_to_stream(
|
|||||||
mime: Some("application/octet-stream".into()),
|
mime: Some("application/octet-stream".into()),
|
||||||
bytes,
|
bytes,
|
||||||
}),
|
}),
|
||||||
|
false,
|
||||||
),
|
),
|
||||||
TungsteniteMessage::Close(_) => {
|
TungsteniteMessage::Close(_) => {
|
||||||
// remove the websocket from the map
|
// remove the websocket from the map
|
||||||
ws_streams.remove(&(target.process.clone(), channel_id));
|
ws_streams.remove(&(target.process.clone(), channel_id));
|
||||||
|
|
||||||
(HttpClientRequest::WebSocketClose { channel_id }, None)
|
(HttpClientRequest::WebSocketClose { channel_id }, None, true)
|
||||||
}
|
}
|
||||||
TungsteniteMessage::Ping(_) => (
|
TungsteniteMessage::Ping(_) => (
|
||||||
HttpClientRequest::WebSocketPush {
|
HttpClientRequest::WebSocketPush {
|
||||||
@ -298,6 +300,7 @@ async fn listen_to_stream(
|
|||||||
message_type: WsMessageType::Ping,
|
message_type: WsMessageType::Ping,
|
||||||
},
|
},
|
||||||
None,
|
None,
|
||||||
|
false,
|
||||||
),
|
),
|
||||||
TungsteniteMessage::Pong(_) => (
|
TungsteniteMessage::Pong(_) => (
|
||||||
HttpClientRequest::WebSocketPush {
|
HttpClientRequest::WebSocketPush {
|
||||||
@ -305,6 +308,7 @@ async fn listen_to_stream(
|
|||||||
message_type: WsMessageType::Pong,
|
message_type: WsMessageType::Pong,
|
||||||
},
|
},
|
||||||
None,
|
None,
|
||||||
|
false,
|
||||||
),
|
),
|
||||||
_ => {
|
_ => {
|
||||||
// should never get a TungsteniteMessage::Frame, ignore if we do
|
// should never get a TungsteniteMessage::Frame, ignore if we do
|
||||||
@ -312,15 +316,21 @@ async fn listen_to_stream(
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
handle_ws_message(
|
if ws_streams.contains_key(&(target.process.clone(), channel_id)) || should_exit {
|
||||||
our.clone(),
|
handle_ws_message(
|
||||||
id,
|
our.clone(),
|
||||||
target.clone(),
|
id,
|
||||||
body,
|
target.clone(),
|
||||||
blob,
|
body,
|
||||||
send_to_loop.clone(),
|
blob,
|
||||||
)
|
send_to_loop.clone(),
|
||||||
.await;
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
if should_exit {
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
println!("WebSocket Client Error ({}): {:?}", channel_id, e);
|
println!("WebSocket Client Error ({}): {:?}", channel_id, e);
|
||||||
@ -633,7 +643,7 @@ async fn close_ws_connection(
|
|||||||
ws_streams: WebSocketStreams,
|
ws_streams: WebSocketStreams,
|
||||||
_print_tx: PrintSender,
|
_print_tx: PrintSender,
|
||||||
) -> Result<HttpClientResponse, HttpClientError> {
|
) -> Result<HttpClientResponse, HttpClientError> {
|
||||||
let Some(mut ws_sink) = ws_streams.get_mut(&(target.process.clone(), channel_id)) else {
|
let Some((_, mut ws_sink)) = ws_streams.remove(&(target.process.clone(), channel_id)) else {
|
||||||
return Err(HttpClientError::WsCloseFailed { channel_id });
|
return Err(HttpClientError::WsCloseFailed { channel_id });
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1,10 +1,12 @@
|
|||||||
use crate::http::server_types::*;
|
use crate::http::server_types::*;
|
||||||
use crate::http::utils::*;
|
use crate::http::utils::*;
|
||||||
use crate::{keygen, register};
|
use crate::keygen;
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
|
use base64::{engine::general_purpose::STANDARD as base64_standard, Engine};
|
||||||
use dashmap::DashMap;
|
use dashmap::DashMap;
|
||||||
use futures::{SinkExt, StreamExt};
|
use futures::{SinkExt, StreamExt};
|
||||||
use http::uri::Authority;
|
use http::uri::Authority;
|
||||||
|
use lib::types::core::*;
|
||||||
use route_recognizer::Router;
|
use route_recognizer::Router;
|
||||||
use sha2::{Digest, Sha256};
|
use sha2::{Digest, Sha256};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
@ -15,8 +17,6 @@ use warp::http::{header::HeaderValue, StatusCode};
|
|||||||
use warp::ws::{WebSocket, Ws};
|
use warp::ws::{WebSocket, Ws};
|
||||||
use warp::{Filter, Reply};
|
use warp::{Filter, Reply};
|
||||||
|
|
||||||
use lib::types::core::*;
|
|
||||||
|
|
||||||
#[cfg(not(feature = "simulation-mode"))]
|
#[cfg(not(feature = "simulation-mode"))]
|
||||||
const HTTP_SELF_IMPOSED_TIMEOUT: u64 = 15;
|
const HTTP_SELF_IMPOSED_TIMEOUT: u64 = 15;
|
||||||
#[cfg(feature = "simulation-mode")]
|
#[cfg(feature = "simulation-mode")]
|
||||||
@ -331,7 +331,7 @@ async fn login_handler(
|
|||||||
|
|
||||||
match keygen::decode_keyfile(&encoded_keyfile, &info.password_hash) {
|
match keygen::decode_keyfile(&encoded_keyfile, &info.password_hash) {
|
||||||
Ok(keyfile) => {
|
Ok(keyfile) => {
|
||||||
let token = match register::generate_jwt(&keyfile.jwt_secret_bytes, our.as_ref()) {
|
let token = match keygen::generate_jwt(&keyfile.jwt_secret_bytes, our.as_ref()) {
|
||||||
Some(token) => token,
|
Some(token) => token,
|
||||||
None => {
|
None => {
|
||||||
return Ok(warp::reply::with_status(
|
return Ok(warp::reply::with_status(
|
||||||
@ -343,14 +343,14 @@ async fn login_handler(
|
|||||||
};
|
};
|
||||||
|
|
||||||
let mut response = warp::reply::with_status(
|
let mut response = warp::reply::with_status(
|
||||||
warp::reply::json(&base64::encode(encoded_keyfile.to_vec())),
|
warp::reply::json(&base64_standard.encode(encoded_keyfile.to_vec())),
|
||||||
StatusCode::OK,
|
StatusCode::OK,
|
||||||
)
|
)
|
||||||
.into_response();
|
.into_response();
|
||||||
|
|
||||||
match HeaderValue::from_str(&format!("kinode-auth_{}={};", our.as_ref(), &token)) {
|
match HeaderValue::from_str(&format!("kinode-auth_{}={};", our.as_ref(), &token)) {
|
||||||
Ok(v) => {
|
Ok(v) => {
|
||||||
response.headers_mut().append(http::header::SET_COOKIE, v);
|
response.headers_mut().append("set-cookie", v);
|
||||||
Ok(response)
|
Ok(response)
|
||||||
}
|
}
|
||||||
Err(_) => Ok(warp::reply::with_status(
|
Err(_) => Ok(warp::reply::with_status(
|
||||||
@ -372,7 +372,7 @@ async fn ws_handler(
|
|||||||
ws_connection: Ws,
|
ws_connection: Ws,
|
||||||
socket_addr: Option<SocketAddr>,
|
socket_addr: Option<SocketAddr>,
|
||||||
path: warp::path::FullPath,
|
path: warp::path::FullPath,
|
||||||
host: Option<Authority>,
|
host: Option<warp::host::Authority>,
|
||||||
headers: warp::http::HeaderMap,
|
headers: warp::http::HeaderMap,
|
||||||
our: Arc<String>,
|
our: Arc<String>,
|
||||||
jwt_secret_bytes: Arc<Vec<u8>>,
|
jwt_secret_bytes: Arc<Vec<u8>>,
|
||||||
@ -468,7 +468,7 @@ async fn ws_handler(
|
|||||||
async fn http_handler(
|
async fn http_handler(
|
||||||
method: warp::http::Method,
|
method: warp::http::Method,
|
||||||
socket_addr: Option<SocketAddr>,
|
socket_addr: Option<SocketAddr>,
|
||||||
host: Option<Authority>,
|
host: Option<warp::host::Authority>,
|
||||||
path: warp::path::FullPath,
|
path: warp::path::FullPath,
|
||||||
query_params: HashMap<String, String>,
|
query_params: HashMap<String, String>,
|
||||||
headers: warp::http::HeaderMap,
|
headers: warp::http::HeaderMap,
|
||||||
@ -529,7 +529,7 @@ async fn http_handler(
|
|||||||
"Location",
|
"Location",
|
||||||
format!(
|
format!(
|
||||||
"http://{}/login",
|
"http://{}/login",
|
||||||
host.unwrap_or(Authority::from_static("localhost"))
|
host.unwrap_or(warp::host::Authority::from_static("localhost"))
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
.body(vec![])
|
.body(vec![])
|
||||||
@ -619,7 +619,7 @@ async fn http_handler(
|
|||||||
method: method.to_string(),
|
method: method.to_string(),
|
||||||
url: format!(
|
url: format!(
|
||||||
"http://{}{}",
|
"http://{}{}",
|
||||||
host.unwrap_or(Authority::from_static("localhost")),
|
host.unwrap_or(warp::host::Authority::from_static("localhost")),
|
||||||
original_path
|
original_path
|
||||||
),
|
),
|
||||||
bound_path: bound_path.path.clone(),
|
bound_path: bound_path.path.clone(),
|
||||||
@ -735,7 +735,7 @@ async fn handle_rpc_message(
|
|||||||
|
|
||||||
let blob: Option<LazyLoadBlob> = match rpc_message.data {
|
let blob: Option<LazyLoadBlob> = match rpc_message.data {
|
||||||
None => None,
|
None => None,
|
||||||
Some(b64_bytes) => match base64::decode(b64_bytes) {
|
Some(b64_bytes) => match base64_standard.decode(b64_bytes) {
|
||||||
Ok(bytes) => Some(LazyLoadBlob {
|
Ok(bytes) => Some(LazyLoadBlob {
|
||||||
mime: rpc_message.mime,
|
mime: rpc_message.mime,
|
||||||
bytes,
|
bytes,
|
||||||
@ -1069,7 +1069,7 @@ async fn handle_app_message(
|
|||||||
if path == "/rpc:distro:sys/message" {
|
if path == "/rpc:distro:sys/message" {
|
||||||
let blob = km.lazy_load_blob.map(|p| LazyLoadBlob {
|
let blob = km.lazy_load_blob.map(|p| LazyLoadBlob {
|
||||||
mime: p.mime,
|
mime: p.mime,
|
||||||
bytes: base64::encode(p.bytes).into_bytes(),
|
bytes: base64_standard.encode(p.bytes).into_bytes(),
|
||||||
});
|
});
|
||||||
|
|
||||||
let mut default_headers = HashMap::new();
|
let mut default_headers = HashMap::new();
|
||||||
@ -1143,8 +1143,14 @@ async fn handle_app_message(
|
|||||||
.send(Printout {
|
.send(Printout {
|
||||||
verbosity: 2,
|
verbosity: 2,
|
||||||
content: format!(
|
content: format!(
|
||||||
"binding path {path} for {}, authenticated={authenticated}, local={local_only}, cached={cache}",
|
"http: binding {path}, {}, {}, {}",
|
||||||
km.source.process
|
if authenticated {
|
||||||
|
"authenticated"
|
||||||
|
} else {
|
||||||
|
"unauthenticated"
|
||||||
|
},
|
||||||
|
if local_only { "local only" } else { "open" },
|
||||||
|
if cache { "cached" } else { "dynamic" },
|
||||||
),
|
),
|
||||||
})
|
})
|
||||||
.await;
|
.await;
|
||||||
|
@ -607,7 +607,7 @@ pub async fn make_process_loop(
|
|||||||
};
|
};
|
||||||
|
|
||||||
// the process will run until it returns from init() or crashes
|
// the process will run until it returns from init() or crashes
|
||||||
let is_error = match bindings
|
match bindings
|
||||||
.call_init(&mut store, &metadata.our.to_string())
|
.call_init(&mut store, &metadata.our.to_string())
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
@ -618,7 +618,6 @@ pub async fn make_process_loop(
|
|||||||
content: format!("process {} returned without error", metadata.our.process),
|
content: format!("process {} returned without error", metadata.our.process),
|
||||||
})
|
})
|
||||||
.await;
|
.await;
|
||||||
false
|
|
||||||
}
|
}
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
let stderr = wasi_stderr.contents().into();
|
let stderr = wasi_stderr.contents().into();
|
||||||
@ -632,7 +631,6 @@ pub async fn make_process_loop(
|
|||||||
),
|
),
|
||||||
})
|
})
|
||||||
.await;
|
.await;
|
||||||
true
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -715,73 +713,61 @@ pub async fn make_process_loop(
|
|||||||
lazy_load_blob: None,
|
lazy_load_blob: None,
|
||||||
})
|
})
|
||||||
.await?;
|
.await?;
|
||||||
if is_error {
|
let _ = send_to_terminal
|
||||||
let _ = send_to_terminal
|
.send(t::Printout {
|
||||||
.send(t::Printout {
|
verbosity: 1,
|
||||||
verbosity: 0,
|
content: format!(
|
||||||
content: format!(
|
"firing OnExit::Restart for process {}",
|
||||||
"skipping OnExit::Restart for process {} due to crash",
|
metadata.our.process
|
||||||
metadata.our.process
|
),
|
||||||
),
|
})
|
||||||
})
|
.await;
|
||||||
.await;
|
send_to_loop
|
||||||
} else {
|
.send(t::KernelMessage {
|
||||||
let _ = send_to_terminal
|
id: rand::random(),
|
||||||
.send(t::Printout {
|
source: our_kernel.clone(),
|
||||||
verbosity: 1,
|
target: our_kernel.clone(),
|
||||||
content: format!(
|
rsvp: None,
|
||||||
"firing OnExit::Restart for process {}",
|
message: t::Message::Request(t::Request {
|
||||||
metadata.our.process
|
inherit: false,
|
||||||
),
|
expects_response: None,
|
||||||
})
|
body: serde_json::to_vec(&t::KernelCommand::InitializeProcess {
|
||||||
.await;
|
id: metadata.our.process.clone(),
|
||||||
send_to_loop
|
wasm_bytes_handle: metadata.wasm_bytes_handle,
|
||||||
.send(t::KernelMessage {
|
wit_version: Some(metadata.wit_version),
|
||||||
id: rand::random(),
|
on_exit: metadata.on_exit,
|
||||||
source: our_kernel.clone(),
|
initial_capabilities,
|
||||||
target: our_kernel.clone(),
|
public: metadata.public,
|
||||||
rsvp: None,
|
})
|
||||||
message: t::Message::Request(t::Request {
|
.unwrap(),
|
||||||
inherit: false,
|
metadata: None,
|
||||||
expects_response: None,
|
capabilities: vec![],
|
||||||
body: serde_json::to_vec(&t::KernelCommand::InitializeProcess {
|
}),
|
||||||
id: metadata.our.process.clone(),
|
lazy_load_blob: Some(t::LazyLoadBlob {
|
||||||
wasm_bytes_handle: metadata.wasm_bytes_handle,
|
mime: None,
|
||||||
wit_version: Some(metadata.wit_version),
|
bytes: wasm_bytes,
|
||||||
on_exit: metadata.on_exit,
|
}),
|
||||||
initial_capabilities,
|
})
|
||||||
public: metadata.public,
|
.await?;
|
||||||
})
|
send_to_loop
|
||||||
.unwrap(),
|
.send(t::KernelMessage {
|
||||||
metadata: None,
|
id: rand::random(),
|
||||||
capabilities: vec![],
|
source: our_kernel.clone(),
|
||||||
}),
|
target: our_kernel.clone(),
|
||||||
lazy_load_blob: Some(t::LazyLoadBlob {
|
rsvp: None,
|
||||||
mime: None,
|
message: t::Message::Request(t::Request {
|
||||||
bytes: wasm_bytes,
|
inherit: false,
|
||||||
}),
|
expects_response: None,
|
||||||
})
|
body: serde_json::to_vec(&t::KernelCommand::RunProcess(
|
||||||
.await?;
|
metadata.our.process.clone(),
|
||||||
send_to_loop
|
))
|
||||||
.send(t::KernelMessage {
|
.unwrap(),
|
||||||
id: rand::random(),
|
metadata: None,
|
||||||
source: our_kernel.clone(),
|
capabilities: vec![],
|
||||||
target: our_kernel.clone(),
|
}),
|
||||||
rsvp: None,
|
lazy_load_blob: None,
|
||||||
message: t::Message::Request(t::Request {
|
})
|
||||||
inherit: false,
|
.await?;
|
||||||
expects_response: None,
|
|
||||||
body: serde_json::to_vec(&t::KernelCommand::RunProcess(
|
|
||||||
metadata.our.process.clone(),
|
|
||||||
))
|
|
||||||
.unwrap(),
|
|
||||||
metadata: None,
|
|
||||||
capabilities: vec![],
|
|
||||||
}),
|
|
||||||
lazy_load_blob: None,
|
|
||||||
})
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// if requests, fire them
|
// if requests, fire them
|
||||||
// even in death, a process can only message processes it has capabilities for
|
// even in death, a process can only message processes it has capabilities for
|
||||||
|
@ -3,13 +3,17 @@ use aes_gcm::{
|
|||||||
Aes256Gcm, Key,
|
Aes256Gcm, Key,
|
||||||
};
|
};
|
||||||
use alloy_primitives::keccak256;
|
use alloy_primitives::keccak256;
|
||||||
|
use anyhow::Result;
|
||||||
use digest::generic_array::GenericArray;
|
use digest::generic_array::GenericArray;
|
||||||
|
use hmac::Hmac;
|
||||||
|
use jwt::SignWithKey;
|
||||||
use lib::types::core::Keyfile;
|
use lib::types::core::Keyfile;
|
||||||
use ring::pbkdf2;
|
use ring::pbkdf2;
|
||||||
use ring::pkcs8::Document;
|
use ring::pkcs8::Document;
|
||||||
use ring::rand::SystemRandom;
|
use ring::rand::SystemRandom;
|
||||||
use ring::signature::{self, KeyPair};
|
use ring::signature::{self, KeyPair};
|
||||||
use ring::{digest as ring_digest, rand::SecureRandom};
|
use ring::{digest as ring_digest, rand::SecureRandom};
|
||||||
|
use sha2::Sha256;
|
||||||
use std::num::NonZeroU32;
|
use std::num::NonZeroU32;
|
||||||
|
|
||||||
type DiskKey = [u8; CREDENTIAL_LEN];
|
type DiskKey = [u8; CREDENTIAL_LEN];
|
||||||
@ -108,6 +112,23 @@ pub fn decode_keyfile(keyfile: &[u8], password: &str) -> Result<Keyfile, &'stati
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn generate_jwt(jwt_secret_bytes: &[u8], username: &str) -> Option<String> {
|
||||||
|
let jwt_secret: Hmac<Sha256> = match Hmac::new_from_slice(jwt_secret_bytes) {
|
||||||
|
Ok(secret) => secret,
|
||||||
|
Err(_) => return None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let claims = crate::http::server_types::JwtClaims {
|
||||||
|
username: username.to_string(),
|
||||||
|
expiration: 0,
|
||||||
|
};
|
||||||
|
|
||||||
|
match claims.sign_with_key(&jwt_secret) {
|
||||||
|
Ok(token) => Some(token),
|
||||||
|
Err(_) => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn get_username_and_routers(keyfile: &[u8]) -> Result<(String, Vec<String>), &'static str> {
|
pub fn get_username_and_routers(keyfile: &[u8]) -> Result<(String, Vec<String>), &'static str> {
|
||||||
let (username, routers, _salt, _key_enc, _jwt_enc) =
|
let (username, routers, _salt, _key_enc, _jwt_enc) =
|
||||||
bincode::deserialize::<(String, Vec<String>, Vec<u8>, Vec<u8>, Vec<u8>)>(keyfile)
|
bincode::deserialize::<(String, Vec<String>, Vec<u8>, Vec<u8>, Vec<u8>)>(keyfile)
|
||||||
|
@ -1,14 +1,13 @@
|
|||||||
|
#![feature(async_closure)]
|
||||||
#![feature(btree_extract_if)]
|
#![feature(btree_extract_if)]
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use clap::{arg, value_parser, Command};
|
use clap::{arg, value_parser, Command};
|
||||||
use lib::types::core::*;
|
use lib::types::core::*;
|
||||||
#[cfg(feature = "simulation-mode")]
|
//#[cfg(feature = "simulation-mode")]
|
||||||
use ring::{rand::SystemRandom, signature, signature::KeyPair};
|
use ring::{rand::SystemRandom, signature, signature::KeyPair};
|
||||||
use std::env;
|
use std::env;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio::sync::{mpsc, oneshot};
|
use tokio::sync::mpsc;
|
||||||
use tokio::{fs, time::timeout};
|
|
||||||
|
|
||||||
mod eth;
|
mod eth;
|
||||||
mod http;
|
mod http;
|
||||||
@ -34,152 +33,53 @@ const VFS_CHANNEL_CAPACITY: usize = 1_000;
|
|||||||
const CAP_CHANNEL_CAPACITY: usize = 1_000;
|
const CAP_CHANNEL_CAPACITY: usize = 1_000;
|
||||||
const KV_CHANNEL_CAPACITY: usize = 1_000;
|
const KV_CHANNEL_CAPACITY: usize = 1_000;
|
||||||
const SQLITE_CHANNEL_CAPACITY: usize = 1_000;
|
const SQLITE_CHANNEL_CAPACITY: usize = 1_000;
|
||||||
|
|
||||||
const VERSION: &str = env!("CARGO_PKG_VERSION");
|
const VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||||
|
|
||||||
/// default routers as a eth-provider fallback
|
/// default routers as a eth-provider fallback
|
||||||
const DEFAULT_PROVIDERS_MAINNET: &str = include_str!("eth/default_providers_mainnet.json");
|
const DEFAULT_ETH_PROVIDERS: &str = include_str!("eth/default_providers_mainnet.json");
|
||||||
|
#[cfg(not(feature = "simulation-mode"))]
|
||||||
async fn serve_register_fe(
|
const CHAIN_ID: u64 = 10;
|
||||||
home_directory_path: &str,
|
#[cfg(feature = "simulation-mode")]
|
||||||
our_ip: String,
|
const CHAIN_ID: u64 = 31337;
|
||||||
ws_networking: (tokio::net::TcpListener, bool),
|
|
||||||
http_server_port: u16,
|
|
||||||
maybe_rpc: Option<String>,
|
|
||||||
) -> (Identity, Vec<u8>, Keyfile) {
|
|
||||||
// check if we have keys saved on disk, encrypted
|
|
||||||
// if so, prompt user for "password" to decrypt with
|
|
||||||
|
|
||||||
// once password is received, use to decrypt local keys file,
|
|
||||||
// and pass the keys into boot process as is done in registration.
|
|
||||||
|
|
||||||
// NOTE: when we log in, we MUST check the PKI to make sure our
|
|
||||||
// information matches what we think it should be. this includes
|
|
||||||
// username, networking key, and routing info.
|
|
||||||
// if any do not match, we should prompt user to create a "transaction"
|
|
||||||
// that updates their PKI info on-chain.
|
|
||||||
let (kill_tx, kill_rx) = oneshot::channel::<bool>();
|
|
||||||
|
|
||||||
let disk_keyfile: Option<Vec<u8>> = fs::read(format!("{}/.keys", home_directory_path))
|
|
||||||
.await
|
|
||||||
.ok();
|
|
||||||
|
|
||||||
let (tx, mut rx) = mpsc::channel::<(Identity, Keyfile, Vec<u8>)>(1);
|
|
||||||
let (our, decoded_keyfile, encoded_keyfile) = tokio::select! {
|
|
||||||
_ = register::register(
|
|
||||||
tx,
|
|
||||||
kill_rx,
|
|
||||||
our_ip,
|
|
||||||
ws_networking,
|
|
||||||
http_server_port,
|
|
||||||
disk_keyfile,
|
|
||||||
maybe_rpc) => {
|
|
||||||
panic!("registration failed")
|
|
||||||
}
|
|
||||||
Some((our, decoded_keyfile, encoded_keyfile)) = rx.recv() => {
|
|
||||||
(our, decoded_keyfile, encoded_keyfile)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
fs::write(
|
|
||||||
format!("{}/.keys", home_directory_path),
|
|
||||||
encoded_keyfile.clone(),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let _ = kill_tx.send(true);
|
|
||||||
|
|
||||||
(our, encoded_keyfile, decoded_keyfile)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
let app = Command::new("kinode")
|
let app = build_command();
|
||||||
.version(VERSION)
|
|
||||||
.author("Kinode DAO: https://github.com/kinode-dao")
|
|
||||||
.about("A General Purpose Sovereign Cloud Computing Platform")
|
|
||||||
.arg(arg!([home] "Path to home directory").required(true))
|
|
||||||
.arg(
|
|
||||||
arg!(--port <PORT> "Port to bind [default: first unbound at or above 8080]")
|
|
||||||
.value_parser(value_parser!(u16)),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
arg!(--"ws-port" <PORT> "Kinode internal WebSockets protocol port [default: first unbound at or above 9000]")
|
|
||||||
.alias("network-router-port")
|
|
||||||
.value_parser(value_parser!(u16)),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
arg!(--verbosity <VERBOSITY> "Verbosity level: higher is more verbose")
|
|
||||||
.default_value("0")
|
|
||||||
.value_parser(value_parser!(u8)),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
arg!(--"reveal-ip" "If set to false, as an indirect node, always use routers to connect to other nodes.")
|
|
||||||
.default_value("true")
|
|
||||||
.value_parser(value_parser!(bool)),
|
|
||||||
)
|
|
||||||
.arg(arg!(--rpc <RPC> "Add a WebSockets RPC URL at boot"));
|
|
||||||
|
|
||||||
#[cfg(feature = "simulation-mode")]
|
|
||||||
let app = app
|
|
||||||
.arg(arg!(--password <PASSWORD> "Networking password"))
|
|
||||||
.arg(arg!(--"fake-node-name" <NAME> "Name of fake node to boot"))
|
|
||||||
.arg(arg!(--"networking-pk" <NETPK> "Fake networking private key"))
|
|
||||||
.arg(
|
|
||||||
arg!(--detached <IS_DETACHED> "Run in detached mode (don't accept input)")
|
|
||||||
.action(clap::ArgAction::SetTrue),
|
|
||||||
);
|
|
||||||
// add arg for fakechain bootup w/ kit?
|
|
||||||
let fakenode = cfg!(feature = "simulation-mode");
|
|
||||||
|
|
||||||
let matches = app.get_matches();
|
let matches = app.get_matches();
|
||||||
|
let home_directory_path = matches
|
||||||
let home_directory_path = matches.get_one::<String>("home").unwrap();
|
.get_one::<String>("home")
|
||||||
|
.expect("home directory required");
|
||||||
let http_port = matches.get_one::<u16>("port");
|
create_home_directory(&home_directory_path).await;
|
||||||
|
let http_server_port = set_http_server_port(matches.get_one::<u16>("port")).await;
|
||||||
let ws_networking_port = matches.get_one::<u16>("ws-port");
|
let ws_networking_port = matches.get_one::<u16>("ws-port");
|
||||||
|
let verbose_mode = *matches
|
||||||
|
.get_one::<u8>("verbosity")
|
||||||
|
.expect("verbosity required");
|
||||||
|
let rpc = matches.get_one::<String>("rpc");
|
||||||
|
|
||||||
|
// if we are in sim-mode, detached determines whether terminal is interactive
|
||||||
#[cfg(not(feature = "simulation-mode"))]
|
#[cfg(not(feature = "simulation-mode"))]
|
||||||
let is_detached = false;
|
let is_detached = false;
|
||||||
|
|
||||||
#[cfg(feature = "simulation-mode")]
|
#[cfg(feature = "simulation-mode")]
|
||||||
let (password, fake_node_name, is_detached, net_pk) = (
|
let (password, fake_node_name, is_detached) = (
|
||||||
matches.get_one::<String>("password"),
|
matches.get_one::<String>("password"),
|
||||||
matches.get_one::<String>("fake-node-name"),
|
matches.get_one::<String>("fake-node-name"),
|
||||||
*matches.get_one::<bool>("detached").unwrap(),
|
*matches.get_one::<bool>("detached").unwrap(),
|
||||||
matches.get_one::<String>("networking-pk"),
|
|
||||||
);
|
);
|
||||||
|
|
||||||
let verbose_mode = *matches.get_one::<u8>("verbosity").unwrap();
|
|
||||||
|
|
||||||
// check .testnet file for true/false in order to enforce testnet mode on subsequent boots of this node
|
|
||||||
match fs::read(format!("{}/.testnet", home_directory_path)).await {
|
|
||||||
Ok(contents) => {
|
|
||||||
if contents == b"true" {
|
|
||||||
println!("\x1b[38;5;196mfatal: this is a deprecated testnet node, either boot a fakenode or a real one. exiting.\x1b[0m");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Err(e) = fs::create_dir_all(home_directory_path).await {
|
|
||||||
panic!("failed to create home directory: {:?}", e);
|
|
||||||
}
|
|
||||||
println!("home at {}\r", home_directory_path);
|
|
||||||
|
|
||||||
// default eth providers/routers
|
// default eth providers/routers
|
||||||
let mut eth_provider_config: lib::eth::SavedConfigs =
|
let mut eth_provider_config: lib::eth::SavedConfigs =
|
||||||
match fs::read_to_string(format!("{}/.eth_providers", home_directory_path)).await {
|
match tokio::fs::read_to_string(format!("{}/.eth_providers", home_directory_path)).await {
|
||||||
Ok(contents) => {
|
Ok(contents) => {
|
||||||
println!("loaded saved eth providers\r");
|
println!("loaded saved eth providers\r");
|
||||||
serde_json::from_str(&contents).unwrap()
|
serde_json::from_str(&contents).unwrap()
|
||||||
}
|
}
|
||||||
Err(_) => serde_json::from_str(DEFAULT_PROVIDERS_MAINNET).unwrap(),
|
Err(_) => serde_json::from_str(DEFAULT_ETH_PROVIDERS).unwrap(),
|
||||||
};
|
};
|
||||||
if let Some(rpc) = matches.get_one::<String>("rpc") {
|
if let Some(rpc) = matches.get_one::<String>("rpc") {
|
||||||
eth_provider_config.push(lib::eth::ProviderConfig {
|
eth_provider_config.push(lib::eth::ProviderConfig {
|
||||||
chain_id: if fakenode { 31337 } else { 10 },
|
chain_id: CHAIN_ID,
|
||||||
trusted: true,
|
trusted: true,
|
||||||
provider: lib::eth::NodeOrRpcUrl::RpcUrl(rpc.to_string()),
|
provider: lib::eth::NodeOrRpcUrl::RpcUrl(rpc.to_string()),
|
||||||
});
|
});
|
||||||
@ -228,171 +128,25 @@ async fn main() {
|
|||||||
let (print_sender, print_receiver): (PrintSender, PrintReceiver) =
|
let (print_sender, print_receiver): (PrintSender, PrintReceiver) =
|
||||||
mpsc::channel(TERMINAL_CHANNEL_CAPACITY);
|
mpsc::channel(TERMINAL_CHANNEL_CAPACITY);
|
||||||
|
|
||||||
println!("finding public IP address...");
|
let our_ip = find_public_ip().await;
|
||||||
let our_ip: std::net::Ipv4Addr = {
|
let (wc_tcp_handle, flag_used) = setup_ws_networking(ws_networking_port.cloned()).await;
|
||||||
if let Ok(Some(ip)) = timeout(std::time::Duration::from_secs(5), public_ip::addr_v4()).await
|
|
||||||
{
|
|
||||||
ip
|
|
||||||
} else {
|
|
||||||
println!("failed to find public IPv4 address: booting as a routed node");
|
|
||||||
std::net::Ipv4Addr::LOCALHOST
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let http_server_port = if let Some(port) = http_port {
|
|
||||||
match http::utils::find_open_port(*port, port + 1).await {
|
|
||||||
Some(bound) => bound.local_addr().unwrap().port(),
|
|
||||||
None => {
|
|
||||||
println!(
|
|
||||||
"error: couldn't bind {}; first available port found was {}. \
|
|
||||||
Set an available port with `--port` and try again.",
|
|
||||||
port,
|
|
||||||
http::utils::find_open_port(*port, port + 1000)
|
|
||||||
.await
|
|
||||||
.expect("no ports found in range")
|
|
||||||
.local_addr()
|
|
||||||
.unwrap()
|
|
||||||
.port(),
|
|
||||||
);
|
|
||||||
panic!();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
match http::utils::find_open_port(8080, 8999).await {
|
|
||||||
Some(bound) => bound.local_addr().unwrap().port(),
|
|
||||||
None => {
|
|
||||||
println!(
|
|
||||||
"error: couldn't bind any ports between 8080 and 8999. \
|
|
||||||
Set an available port with `--port` and try again."
|
|
||||||
);
|
|
||||||
panic!();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// if the --ws-port flag is used, bind to that port right away.
|
|
||||||
// if the flag is not used, find the first available port between 9000 and 65535.
|
|
||||||
// NOTE: if the node has a different port specified in its onchain (direct) id,
|
|
||||||
// booting will fail if the flag was used to select a different port.
|
|
||||||
// if the flag was not used, the bound port will be dropped in favor of the onchain port.
|
|
||||||
|
|
||||||
#[cfg(not(feature = "simulation-mode"))]
|
|
||||||
let (ws_tcp_handle, flag_used) = if let Some(port) = ws_networking_port {
|
|
||||||
(
|
|
||||||
http::utils::find_open_port(*port, port + 1)
|
|
||||||
.await
|
|
||||||
.expect("ws-port selected with flag could not be bound"),
|
|
||||||
true,
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
(
|
|
||||||
http::utils::find_open_port(9000, 65535)
|
|
||||||
.await
|
|
||||||
.expect("no ports found in range 9000-65535 for websocket server"),
|
|
||||||
false,
|
|
||||||
)
|
|
||||||
};
|
|
||||||
|
|
||||||
println!(
|
|
||||||
"login or register at http://localhost:{}\r",
|
|
||||||
http_server_port
|
|
||||||
);
|
|
||||||
|
|
||||||
#[cfg(not(feature = "simulation-mode"))]
|
|
||||||
let (our, encoded_keyfile, decoded_keyfile) = serve_register_fe(
|
|
||||||
home_directory_path,
|
|
||||||
our_ip.to_string(),
|
|
||||||
(ws_tcp_handle, flag_used),
|
|
||||||
http_server_port,
|
|
||||||
matches.get_one::<String>("rpc").cloned(),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
#[cfg(feature = "simulation-mode")]
|
#[cfg(feature = "simulation-mode")]
|
||||||
let (our, encoded_keyfile, decoded_keyfile) = match fake_node_name {
|
let (our, encoded_keyfile, decoded_keyfile) = simulate_node(
|
||||||
None => {
|
fake_node_name.cloned(),
|
||||||
match password {
|
password.cloned(),
|
||||||
None => {
|
home_directory_path,
|
||||||
panic!("Fake node must be booted with either a --fake-node-name, --password, or both.");
|
)
|
||||||
}
|
.await;
|
||||||
Some(password) => {
|
#[cfg(not(feature = "simulation-mode"))]
|
||||||
match fs::read(format!("{}/.keys", home_directory_path)).await {
|
let (our, encoded_keyfile, decoded_keyfile) = serve_register_fe(
|
||||||
Err(e) => panic!("could not read keyfile: {}", e),
|
&home_directory_path,
|
||||||
Ok(keyfile) => {
|
our_ip.to_string(),
|
||||||
match keygen::decode_keyfile(&keyfile, &password) {
|
(wc_tcp_handle, flag_used),
|
||||||
Err(e) => panic!("could not decode keyfile: {}", e),
|
http_server_port,
|
||||||
Ok(decoded_keyfile) => {
|
rpc.cloned(),
|
||||||
let our = Identity {
|
)
|
||||||
name: decoded_keyfile.username.clone(),
|
.await;
|
||||||
networking_key: format!(
|
|
||||||
"0x{}",
|
|
||||||
hex::encode(
|
|
||||||
decoded_keyfile
|
|
||||||
.networking_keypair
|
|
||||||
.public_key()
|
|
||||||
.as_ref()
|
|
||||||
)
|
|
||||||
),
|
|
||||||
ws_routing: None, // TODO
|
|
||||||
allowed_routers: decoded_keyfile.routers.clone(),
|
|
||||||
};
|
|
||||||
(our, keyfile, decoded_keyfile)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Some(name) => {
|
|
||||||
let password_hash = match password {
|
|
||||||
None => "secret".to_string(),
|
|
||||||
Some(password) => password.to_string(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let seed = SystemRandom::new();
|
|
||||||
let mut jwt_secret = [0u8, 32];
|
|
||||||
ring::rand::SecureRandom::fill(&seed, &mut jwt_secret).unwrap();
|
|
||||||
|
|
||||||
let net_pk_bytes = hex::decode(net_pk.unwrap()).unwrap();
|
|
||||||
|
|
||||||
let networking_keypair = signature::Ed25519KeyPair::from_pkcs8(
|
|
||||||
&net_pk_bytes,
|
|
||||||
).expect("failed to parse networking private key");
|
|
||||||
|
|
||||||
let our = Identity {
|
|
||||||
name: name.clone(),
|
|
||||||
networking_key: format!("0x{}", hex::encode(networking_keypair.public_key().as_ref())),
|
|
||||||
ws_routing: None,
|
|
||||||
allowed_routers: vec![],
|
|
||||||
};
|
|
||||||
let decoded_keyfile = Keyfile {
|
|
||||||
username: name.clone(),
|
|
||||||
routers: vec![],
|
|
||||||
networking_keypair,
|
|
||||||
jwt_secret_bytes: jwt_secret.to_vec(),
|
|
||||||
file_key: keygen::generate_file_key(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let encoded_keyfile = keygen::encode_keyfile(
|
|
||||||
password_hash,
|
|
||||||
name.clone(),
|
|
||||||
decoded_keyfile.routers.clone(),
|
|
||||||
&net_pk_bytes,
|
|
||||||
&decoded_keyfile.jwt_secret_bytes,
|
|
||||||
&decoded_keyfile.file_key,
|
|
||||||
);
|
|
||||||
|
|
||||||
fs::write(
|
|
||||||
format!("{}/.keys", home_directory_path),
|
|
||||||
encoded_keyfile.clone(),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
(our, encoded_keyfile, decoded_keyfile)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// the boolean flag determines whether the runtime module is *public* or not,
|
// the boolean flag determines whether the runtime module is *public* or not,
|
||||||
// where public means that any process can always message it.
|
// where public means that any process can always message it.
|
||||||
@ -496,7 +250,7 @@ async fn main() {
|
|||||||
})
|
})
|
||||||
.collect(),
|
.collect(),
|
||||||
));
|
));
|
||||||
tasks.spawn(net::networking(
|
tasks.spawn(net::ws::networking(
|
||||||
our.clone(),
|
our.clone(),
|
||||||
our_ip.to_string(),
|
our_ip.to_string(),
|
||||||
networking_keypair_arc.clone(),
|
networking_keypair_arc.clone(),
|
||||||
@ -571,10 +325,11 @@ async fn main() {
|
|||||||
caps_oracle_sender.clone(),
|
caps_oracle_sender.clone(),
|
||||||
home_directory_path.clone(),
|
home_directory_path.clone(),
|
||||||
));
|
));
|
||||||
|
|
||||||
// if a runtime task exits, try to recover it,
|
// if a runtime task exits, try to recover it,
|
||||||
// unless it was terminal signaling a quit
|
// unless it was terminal signaling a quit
|
||||||
// or a SIG* was intercepted
|
// or a SIG* was intercepted
|
||||||
let quit_msg: String = tokio::select! {
|
let mut quit_msg: String = tokio::select! {
|
||||||
Some(Ok(res)) = tasks.join_next() => {
|
Some(Ok(res)) = tasks.join_next() => {
|
||||||
format!(
|
format!(
|
||||||
"uh oh, a kernel process crashed -- this should never happen: {:?}",
|
"uh oh, a kernel process crashed -- this should never happen: {:?}",
|
||||||
@ -600,7 +355,7 @@ async fn main() {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// gracefully abort all running processes in kernel
|
// gracefully abort all running processes in kernel
|
||||||
let _ = kernel_message_sender
|
if let Err(_) = kernel_message_sender
|
||||||
.send(KernelMessage {
|
.send(KernelMessage {
|
||||||
id: rand::random(),
|
id: rand::random(),
|
||||||
source: Address {
|
source: Address {
|
||||||
@ -621,18 +376,280 @@ async fn main() {
|
|||||||
}),
|
}),
|
||||||
lazy_load_blob: None,
|
lazy_load_blob: None,
|
||||||
})
|
})
|
||||||
.await;
|
.await
|
||||||
|
{
|
||||||
|
quit_msg = "failed to gracefully shut down kernel".into();
|
||||||
|
}
|
||||||
|
|
||||||
// abort all remaining tasks
|
// abort all remaining tasks
|
||||||
tasks.shutdown().await;
|
tasks.shutdown().await;
|
||||||
let stdout = std::io::stdout();
|
let stdout = std::io::stdout();
|
||||||
let mut stdout = stdout.lock();
|
let mut stdout = stdout.lock();
|
||||||
let _ = crossterm::execute!(
|
crossterm::execute!(
|
||||||
stdout,
|
stdout,
|
||||||
crossterm::event::DisableBracketedPaste,
|
crossterm::event::DisableBracketedPaste,
|
||||||
crossterm::terminal::SetTitle(""),
|
crossterm::terminal::SetTitle(""),
|
||||||
crossterm::style::SetForegroundColor(crossterm::style::Color::Red),
|
crossterm::style::SetForegroundColor(crossterm::style::Color::Red),
|
||||||
crossterm::style::Print(format!("\r\n{quit_msg}\r\n")),
|
crossterm::style::Print(format!("\r\n{quit_msg}\r\n")),
|
||||||
crossterm::style::ResetColor,
|
crossterm::style::ResetColor,
|
||||||
);
|
)
|
||||||
|
.expect("failed to clean up terminal visual state! your terminal window might be funky now");
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn set_http_server_port(set_port: Option<&u16>) -> u16 {
|
||||||
|
if let Some(port) = set_port {
|
||||||
|
match http::utils::find_open_port(*port, port + 1).await {
|
||||||
|
Some(bound) => bound.local_addr().unwrap().port(),
|
||||||
|
None => {
|
||||||
|
println!(
|
||||||
|
"error: couldn't bind {}; first available port found was {}. \
|
||||||
|
Set an available port with `--port` and try again.",
|
||||||
|
port,
|
||||||
|
http::utils::find_open_port(*port, port + 1000)
|
||||||
|
.await
|
||||||
|
.expect("no ports found in range")
|
||||||
|
.local_addr()
|
||||||
|
.unwrap()
|
||||||
|
.port(),
|
||||||
|
);
|
||||||
|
panic!();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
match http::utils::find_open_port(8080, 8999).await {
|
||||||
|
Some(bound) => bound.local_addr().unwrap().port(),
|
||||||
|
None => {
|
||||||
|
println!(
|
||||||
|
"error: couldn't bind any ports between 8080 and 8999. \
|
||||||
|
Set an available port with `--port` and try again."
|
||||||
|
);
|
||||||
|
panic!();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets up WebSocket networking by finding an open port and creating a TCP listener.
|
||||||
|
/// If a specific port is provided, it attempts to bind to it directly.
|
||||||
|
/// If no port is provided, it searches for the first available port between 9000 and 65535.
|
||||||
|
/// Returns a tuple containing the TcpListener and a boolean indicating if a specific port was used.
|
||||||
|
async fn setup_ws_networking(ws_networking_port: Option<u16>) -> (tokio::net::TcpListener, bool) {
|
||||||
|
match ws_networking_port {
|
||||||
|
Some(port) => {
|
||||||
|
let listener = http::utils::find_open_port(port, port + 1)
|
||||||
|
.await
|
||||||
|
.expect("ws-port selected with flag could not be bound");
|
||||||
|
(listener, true)
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
let listener = http::utils::find_open_port(9000, 65535)
|
||||||
|
.await
|
||||||
|
.expect("no ports found in range 9000-65535 for websocket server");
|
||||||
|
(listener, false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// TODO: writeup.
|
||||||
|
pub async fn simulate_node(
|
||||||
|
fake_node_name: Option<String>,
|
||||||
|
password: Option<String>,
|
||||||
|
home_directory_path: &str,
|
||||||
|
) -> (Identity, Vec<u8>, Keyfile) {
|
||||||
|
match fake_node_name {
|
||||||
|
None => {
|
||||||
|
match password {
|
||||||
|
None => {
|
||||||
|
panic!("Fake node must be booted with either a --fake-node-name, --password, or both.");
|
||||||
|
}
|
||||||
|
Some(password) => {
|
||||||
|
let keyfile = tokio::fs::read(format!("{}/.keys", home_directory_path))
|
||||||
|
.await
|
||||||
|
.expect("could not read keyfile");
|
||||||
|
let decoded = keygen::decode_keyfile(&keyfile, &password)
|
||||||
|
.expect("could not decode keyfile");
|
||||||
|
let identity = Identity {
|
||||||
|
name: decoded.username.clone(),
|
||||||
|
networking_key: format!(
|
||||||
|
"0x{}",
|
||||||
|
hex::encode(decoded.networking_keypair.public_key().as_ref())
|
||||||
|
),
|
||||||
|
ws_routing: None, // TODO: Define WebSocket routing logic
|
||||||
|
allowed_routers: decoded.routers.clone(),
|
||||||
|
};
|
||||||
|
(identity, keyfile, decoded)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Some(name) => {
|
||||||
|
let password_hash = password.unwrap_or_else(|| "secret".to_string());
|
||||||
|
let (pubkey, networking_keypair) = keygen::generate_networking_key();
|
||||||
|
let seed = SystemRandom::new();
|
||||||
|
let mut jwt_secret = [0u8; 32];
|
||||||
|
ring::rand::SecureRandom::fill(&seed, &mut jwt_secret).unwrap();
|
||||||
|
|
||||||
|
let identity = Identity {
|
||||||
|
name: name.clone(),
|
||||||
|
networking_key: pubkey,
|
||||||
|
ws_routing: None,
|
||||||
|
allowed_routers: vec![],
|
||||||
|
};
|
||||||
|
|
||||||
|
let decoded_keyfile = Keyfile {
|
||||||
|
username: name.clone(),
|
||||||
|
routers: vec![],
|
||||||
|
networking_keypair: signature::Ed25519KeyPair::from_pkcs8(
|
||||||
|
networking_keypair.as_ref(),
|
||||||
|
)
|
||||||
|
.unwrap(),
|
||||||
|
jwt_secret_bytes: jwt_secret.to_vec(),
|
||||||
|
file_key: keygen::generate_file_key(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let encoded_keyfile = keygen::encode_keyfile(
|
||||||
|
password_hash,
|
||||||
|
name.clone(),
|
||||||
|
decoded_keyfile.routers.clone(),
|
||||||
|
networking_keypair.as_ref(),
|
||||||
|
&decoded_keyfile.jwt_secret_bytes,
|
||||||
|
&decoded_keyfile.file_key,
|
||||||
|
);
|
||||||
|
|
||||||
|
tokio::fs::write(
|
||||||
|
format!("{}/.keys", home_directory_path),
|
||||||
|
encoded_keyfile.clone(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.expect("Failed to write keyfile");
|
||||||
|
|
||||||
|
(identity, encoded_keyfile, decoded_keyfile)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn create_home_directory(home_directory_path: &str) {
|
||||||
|
if let Err(e) = tokio::fs::create_dir_all(home_directory_path).await {
|
||||||
|
panic!("failed to create home directory: {:?}", e);
|
||||||
|
}
|
||||||
|
println!("home at {}\r", home_directory_path);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// build the command line interface for kinode
|
||||||
|
///
|
||||||
|
fn build_command() -> Command {
|
||||||
|
let app = Command::new("kinode")
|
||||||
|
.version(VERSION)
|
||||||
|
.author("Kinode DAO: https://github.com/kinode-dao")
|
||||||
|
.about("A General Purpose Sovereign Cloud Computing Platform")
|
||||||
|
.arg(arg!([home] "Path to home directory").required(true))
|
||||||
|
.arg(
|
||||||
|
arg!(--port <PORT> "Port to bind [default: first unbound at or above 8080]")
|
||||||
|
.value_parser(value_parser!(u16)),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
arg!(--"ws-port" <PORT> "Kinode internal WebSockets protocol port [default: first unbound at or above 9000]")
|
||||||
|
.alias("network-router-port")
|
||||||
|
.value_parser(value_parser!(u16)),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
arg!(--verbosity <VERBOSITY> "Verbosity level: higher is more verbose")
|
||||||
|
.default_value("0")
|
||||||
|
.value_parser(value_parser!(u8)),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
arg!(--"reveal-ip" "If set to false, as an indirect node, always use routers to connect to other nodes.")
|
||||||
|
.default_value("true")
|
||||||
|
.value_parser(value_parser!(bool)),
|
||||||
|
)
|
||||||
|
.arg(arg!(--rpc <RPC> "Add a WebSockets RPC URL at boot"));
|
||||||
|
|
||||||
|
#[cfg(feature = "simulation-mode")]
|
||||||
|
let app = app
|
||||||
|
.arg(arg!(--password <PASSWORD> "Networking password"))
|
||||||
|
.arg(arg!(--"fake-node-name" <NAME> "Name of fake node to boot"))
|
||||||
|
.arg(arg!(--"net-pk" <NET_PK> "Networking private key"))
|
||||||
|
.arg(
|
||||||
|
arg!(--detached <IS_DETACHED> "Run in detached mode (don't accept input)")
|
||||||
|
.action(clap::ArgAction::SetTrue),
|
||||||
|
);
|
||||||
|
app
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Attempts to find the public IPv4 address of the node.
|
||||||
|
/// If in simulation mode, it immediately returns localhost.
|
||||||
|
/// Otherwise, it tries to find the public IP and defaults to localhost on failure.
|
||||||
|
async fn find_public_ip() -> std::net::Ipv4Addr {
|
||||||
|
#[cfg(feature = "simulation-mode")]
|
||||||
|
{
|
||||||
|
std::net::Ipv4Addr::LOCALHOST
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(not(feature = "simulation-mode"))]
|
||||||
|
{
|
||||||
|
println!("Finding public IP address...");
|
||||||
|
match tokio::time::timeout(std::time::Duration::from_secs(5), public_ip::addr_v4()).await {
|
||||||
|
Ok(Some(ip)) => {
|
||||||
|
println!("Public IP found: {}", ip);
|
||||||
|
ip
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
println!("Failed to find public IPv4 address: booting as a routed node.");
|
||||||
|
std::net::Ipv4Addr::LOCALHOST
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// check if we have keys saved on disk, encrypted
|
||||||
|
/// if so, prompt user for "password" to decrypt with
|
||||||
|
///
|
||||||
|
/// once password is received, use to decrypt local keys file,
|
||||||
|
/// and pass the keys into boot process as is done in registration.
|
||||||
|
///
|
||||||
|
/// NOTE: when we log in, we MUST check the PKI to make sure our
|
||||||
|
/// information matches what we think it should be. this includes
|
||||||
|
/// username, networking key, and routing info.
|
||||||
|
/// if any do not match, we should prompt user to create a "transaction"
|
||||||
|
/// that updates their PKI info on-chain.
|
||||||
|
#[cfg(not(feature = "simulation-mode"))]
|
||||||
|
async fn serve_register_fe(
|
||||||
|
home_directory_path: &str,
|
||||||
|
our_ip: String,
|
||||||
|
ws_networking: (tokio::net::TcpListener, bool),
|
||||||
|
http_server_port: u16,
|
||||||
|
maybe_rpc: Option<String>,
|
||||||
|
) -> (Identity, Vec<u8>, Keyfile) {
|
||||||
|
let (kill_tx, kill_rx) = tokio::sync::oneshot::channel::<bool>();
|
||||||
|
|
||||||
|
let disk_keyfile: Option<Vec<u8>> = tokio::fs::read(format!("{}/.keys", home_directory_path))
|
||||||
|
.await
|
||||||
|
.ok();
|
||||||
|
|
||||||
|
let (tx, mut rx) = mpsc::channel::<(Identity, Keyfile, Vec<u8>)>(1);
|
||||||
|
let (our, decoded_keyfile, encoded_keyfile) = tokio::select! {
|
||||||
|
_ = register::register(
|
||||||
|
tx,
|
||||||
|
kill_rx,
|
||||||
|
our_ip,
|
||||||
|
ws_networking,
|
||||||
|
http_server_port,
|
||||||
|
disk_keyfile,
|
||||||
|
maybe_rpc) => {
|
||||||
|
panic!("registration failed")
|
||||||
|
}
|
||||||
|
Some((our, decoded_keyfile, encoded_keyfile)) = rx.recv() => {
|
||||||
|
(our, decoded_keyfile, encoded_keyfile)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
tokio::fs::write(
|
||||||
|
format!("{}/.keys", home_directory_path),
|
||||||
|
encoded_keyfile.clone(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let _ = kill_tx.send(true);
|
||||||
|
|
||||||
|
(our, encoded_keyfile, decoded_keyfile)
|
||||||
}
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -1,7 +1,8 @@
|
|||||||
use crate::net::{types::*, MESSAGE_MAX_SIZE, TIMEOUT};
|
use crate::net::{types::*, ws::MESSAGE_MAX_SIZE, ws::TIMEOUT};
|
||||||
use anyhow::{anyhow, Result};
|
use anyhow::{anyhow, Result};
|
||||||
use futures::stream::{SplitSink, SplitStream};
|
use futures::stream::{SplitSink, SplitStream};
|
||||||
use futures::{SinkExt, StreamExt};
|
use futures::{SinkExt, StreamExt};
|
||||||
|
use lib::types::core::*;
|
||||||
use ring::signature::{self, Ed25519KeyPair};
|
use ring::signature::{self, Ed25519KeyPair};
|
||||||
use snow::params::NoiseParams;
|
use snow::params::NoiseParams;
|
||||||
use tokio::net::TcpStream;
|
use tokio::net::TcpStream;
|
||||||
@ -9,8 +10,6 @@ use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver};
|
|||||||
use tokio::time::timeout;
|
use tokio::time::timeout;
|
||||||
use tokio_tungstenite::{connect_async, tungstenite, MaybeTlsStream, WebSocketStream};
|
use tokio_tungstenite::{connect_async, tungstenite, MaybeTlsStream, WebSocketStream};
|
||||||
|
|
||||||
use lib::types::core::*;
|
|
||||||
|
|
||||||
lazy_static::lazy_static! {
|
lazy_static::lazy_static! {
|
||||||
static ref PARAMS: NoiseParams = "Noise_XX_25519_ChaChaPoly_BLAKE2s"
|
static ref PARAMS: NoiseParams = "Noise_XX_25519_ChaChaPoly_BLAKE2s"
|
||||||
.parse()
|
.parse()
|
||||||
@ -264,7 +263,7 @@ pub fn validate_signature(from: &str, signature: &[u8], message: &[u8], pki: &On
|
|||||||
if let Some(peer_id) = pki.get(from) {
|
if let Some(peer_id) = pki.get(from) {
|
||||||
let their_networking_key = signature::UnparsedPublicKey::new(
|
let their_networking_key = signature::UnparsedPublicKey::new(
|
||||||
&signature::ED25519,
|
&signature::ED25519,
|
||||||
hex::decode(strip_0x(&peer_id.networking_key)).unwrap_or_default(),
|
net_key_string_to_hex(&peer_id.networking_key),
|
||||||
);
|
);
|
||||||
their_networking_key.verify(message, signature).is_ok()
|
their_networking_key.verify(message, signature).is_ok()
|
||||||
} else {
|
} else {
|
||||||
@ -283,7 +282,7 @@ pub fn validate_routing_request(
|
|||||||
.ok_or(anyhow!("unknown KNS name"))?;
|
.ok_or(anyhow!("unknown KNS name"))?;
|
||||||
let their_networking_key = signature::UnparsedPublicKey::new(
|
let their_networking_key = signature::UnparsedPublicKey::new(
|
||||||
&signature::ED25519,
|
&signature::ED25519,
|
||||||
hex::decode(strip_0x(&their_id.networking_key))?,
|
net_key_string_to_hex(&their_id.networking_key),
|
||||||
);
|
);
|
||||||
their_networking_key
|
their_networking_key
|
||||||
.verify(
|
.verify(
|
||||||
@ -308,7 +307,7 @@ pub fn validate_handshake(
|
|||||||
// verify their signature of their static key
|
// verify their signature of their static key
|
||||||
let their_networking_key = signature::UnparsedPublicKey::new(
|
let their_networking_key = signature::UnparsedPublicKey::new(
|
||||||
&signature::ED25519,
|
&signature::ED25519,
|
||||||
hex::decode(strip_0x(&their_id.networking_key))?,
|
net_key_string_to_hex(&their_id.networking_key),
|
||||||
);
|
);
|
||||||
their_networking_key
|
their_networking_key
|
||||||
.verify(their_static_key, &handshake.signature)
|
.verify(their_static_key, &handshake.signature)
|
||||||
@ -342,10 +341,10 @@ pub async fn recv_protocol_message(conn: &mut PeerConnection) -> Result<KernelMe
|
|||||||
&ws_recv(&mut conn.read_stream, &mut conn.write_stream).await?,
|
&ws_recv(&mut conn.read_stream, &mut conn.write_stream).await?,
|
||||||
&mut conn.buf,
|
&mut conn.buf,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
if outer_len < 4 {
|
if outer_len < 4 {
|
||||||
return Err(anyhow!("protocol message too small!"));
|
return Err(anyhow!("protocol message too small!"));
|
||||||
}
|
}
|
||||||
|
|
||||||
let length_bytes = [conn.buf[0], conn.buf[1], conn.buf[2], conn.buf[3]];
|
let length_bytes = [conn.buf[0], conn.buf[1], conn.buf[2], conn.buf[3]];
|
||||||
let msg_len = u32::from_be_bytes(length_bytes);
|
let msg_len = u32::from_be_bytes(length_bytes);
|
||||||
if msg_len > MESSAGE_MAX_SIZE {
|
if msg_len > MESSAGE_MAX_SIZE {
|
||||||
@ -444,7 +443,7 @@ pub fn build_initiator() -> (snow::HandshakeState, Vec<u8>) {
|
|||||||
builder
|
builder
|
||||||
.local_private_key(&keypair.private)
|
.local_private_key(&keypair.private)
|
||||||
.build_initiator()
|
.build_initiator()
|
||||||
.expect("net: couldn't build responder?"),
|
.expect("net: couldn't build initiator?"),
|
||||||
keypair.public,
|
keypair.public,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -473,11 +472,8 @@ pub async fn error_offline(km: KernelMessage, network_error_tx: &NetworkErrorSen
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn strip_0x(s: &str) -> String {
|
fn net_key_string_to_hex(s: &str) -> Vec<u8> {
|
||||||
if let Some(stripped) = s.strip_prefix("0x") {
|
hex::decode(s.strip_prefix("0x").unwrap_or(s)).unwrap_or_default()
|
||||||
return stripped.to_string();
|
|
||||||
}
|
|
||||||
s.to_string()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn parse_hello_message(
|
pub async fn parse_hello_message(
|
||||||
|
1112
kinode/src/net/ws.rs
Normal file
1112
kinode/src/net/ws.rs
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,4 @@
|
|||||||
use crate::keygen;
|
use crate::keygen;
|
||||||
use aes_gcm::aead::KeyInit;
|
|
||||||
use alloy_primitives::{Address as EthAddress, Bytes, FixedBytes, U256};
|
use alloy_primitives::{Address as EthAddress, Bytes, FixedBytes, U256};
|
||||||
use alloy_providers::provider::{Provider, TempProvider};
|
use alloy_providers::provider::{Provider, TempProvider};
|
||||||
use alloy_pubsub::PubSubFrontend;
|
use alloy_pubsub::PubSubFrontend;
|
||||||
@ -9,13 +8,11 @@ use alloy_signer::Signature;
|
|||||||
use alloy_sol_macro::sol;
|
use alloy_sol_macro::sol;
|
||||||
use alloy_sol_types::{SolCall, SolValue};
|
use alloy_sol_types::{SolCall, SolValue};
|
||||||
use alloy_transport_ws::WsConnect;
|
use alloy_transport_ws::WsConnect;
|
||||||
use hmac::Hmac;
|
use base64::{engine::general_purpose::STANDARD as base64_standard, Engine};
|
||||||
use jwt::SignWithKey;
|
|
||||||
use lib::types::core::*;
|
use lib::types::core::*;
|
||||||
use ring::rand::SystemRandom;
|
use ring::rand::SystemRandom;
|
||||||
use ring::signature;
|
use ring::signature;
|
||||||
use ring::signature::KeyPair;
|
use ring::signature::KeyPair;
|
||||||
use sha2::Sha256;
|
|
||||||
use static_dir::static_dir;
|
use static_dir::static_dir;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@ -95,23 +92,6 @@ fn _hex_string_to_u8_array(hex_str: &str) -> Result<[u8; 32], &'static str> {
|
|||||||
Ok(bytes)
|
Ok(bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_jwt(jwt_secret_bytes: &[u8], username: &str) -> Option<String> {
|
|
||||||
let jwt_secret: Hmac<Sha256> = match Hmac::new_from_slice(jwt_secret_bytes) {
|
|
||||||
Ok(secret) => secret,
|
|
||||||
Err(_) => return None,
|
|
||||||
};
|
|
||||||
|
|
||||||
let claims = crate::http::server_types::JwtClaims {
|
|
||||||
username: username.to_string(),
|
|
||||||
expiration: 0,
|
|
||||||
};
|
|
||||||
|
|
||||||
match claims.sign_with_key(&jwt_secret) {
|
|
||||||
Ok(token) => Some(token),
|
|
||||||
Err(_) => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Serve the registration page and receive POSTs and PUTs from it
|
/// Serve the registration page and receive POSTs and PUTs from it
|
||||||
pub async fn register(
|
pub async fn register(
|
||||||
tx: RegistrationSender,
|
tx: RegistrationSender,
|
||||||
@ -366,7 +346,9 @@ async fn handle_keyfile_vet(
|
|||||||
// additional checks?
|
// additional checks?
|
||||||
let encoded_keyfile = match payload.keyfile.is_empty() {
|
let encoded_keyfile = match payload.keyfile.is_empty() {
|
||||||
true => keyfile.ok_or(warp::reject())?,
|
true => keyfile.ok_or(warp::reject())?,
|
||||||
false => base64::decode(payload.keyfile).map_err(|_| warp::reject())?,
|
false => base64_standard
|
||||||
|
.decode(payload.keyfile)
|
||||||
|
.map_err(|_| warp::reject())?,
|
||||||
};
|
};
|
||||||
|
|
||||||
let decoded_keyfile = keygen::decode_keyfile(&encoded_keyfile, &payload.password_hash)
|
let decoded_keyfile = keygen::decode_keyfile(&encoded_keyfile, &payload.password_hash)
|
||||||
@ -543,7 +525,7 @@ async fn handle_import_keyfile(
|
|||||||
provider: Arc<Provider<PubSubFrontend>>,
|
provider: Arc<Provider<PubSubFrontend>>,
|
||||||
) -> Result<impl Reply, Rejection> {
|
) -> Result<impl Reply, Rejection> {
|
||||||
// if keyfile was not present in node and is present from user upload
|
// if keyfile was not present in node and is present from user upload
|
||||||
let encoded_keyfile = match base64::decode(info.keyfile.clone()) {
|
let encoded_keyfile = match base64_standard.decode(info.keyfile.clone()) {
|
||||||
Ok(k) => k,
|
Ok(k) => k,
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
return Ok(warp::reply::with_status(
|
return Ok(warp::reply::with_status(
|
||||||
@ -762,8 +744,8 @@ async fn success_response(
|
|||||||
decoded_keyfile: Keyfile,
|
decoded_keyfile: Keyfile,
|
||||||
encoded_keyfile: Vec<u8>,
|
encoded_keyfile: Vec<u8>,
|
||||||
) -> Result<warp::reply::Response, Rejection> {
|
) -> Result<warp::reply::Response, Rejection> {
|
||||||
let encoded_keyfile_str = base64::encode(&encoded_keyfile);
|
let encoded_keyfile_str = base64_standard.encode(&encoded_keyfile);
|
||||||
let token = match generate_jwt(&decoded_keyfile.jwt_secret_bytes, &our.name) {
|
let token = match keygen::generate_jwt(&decoded_keyfile.jwt_secret_bytes, &our.name) {
|
||||||
Some(token) => token,
|
Some(token) => token,
|
||||||
None => {
|
None => {
|
||||||
return Ok(warp::reply::with_status(
|
return Ok(warp::reply::with_status(
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
|
use base64::{engine::general_purpose::STANDARD as base64_standard, Engine};
|
||||||
use dashmap::DashMap;
|
use dashmap::DashMap;
|
||||||
use rusqlite::Connection;
|
use rusqlite::Connection;
|
||||||
use std::collections::{HashMap, HashSet, VecDeque};
|
use std::collections::{HashMap, HashSet, VecDeque};
|
||||||
@ -201,7 +202,9 @@ async fn handle_request(
|
|||||||
serde_json::Number::from_f64(real).unwrap(),
|
serde_json::Number::from_f64(real).unwrap(),
|
||||||
),
|
),
|
||||||
SqlValue::Text(text) => serde_json::Value::String(text),
|
SqlValue::Text(text) => serde_json::Value::String(text),
|
||||||
SqlValue::Blob(blob) => serde_json::Value::String(base64::encode(blob)), // or another representation if you prefer
|
SqlValue::Blob(blob) => {
|
||||||
|
serde_json::Value::String(base64_standard.encode(blob))
|
||||||
|
} // or another representation if you prefer
|
||||||
_ => serde_json::Value::Null,
|
_ => serde_json::Value::Null,
|
||||||
};
|
};
|
||||||
map.insert(column_name.clone(), value_json);
|
map.insert(column_name.clone(), value_json);
|
||||||
@ -511,7 +514,7 @@ fn json_to_sqlite(value: &serde_json::Value) -> Result<SqlValue, SqliteError> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
serde_json::Value::String(s) => {
|
serde_json::Value::String(s) => {
|
||||||
match base64::decode(s) {
|
match base64_standard.decode(s) {
|
||||||
Ok(decoded_bytes) => {
|
Ok(decoded_bytes) => {
|
||||||
// convert to SQLite Blob if it's a valid base64 string
|
// convert to SQLite Blob if it's a valid base64 string
|
||||||
Ok(SqlValue::Blob(decoded_bytes))
|
Ok(SqlValue::Blob(decoded_bytes))
|
||||||
|
@ -391,11 +391,11 @@ async fn bootstrap(
|
|||||||
|
|
||||||
for (package_metadata, mut package) in packages.clone() {
|
for (package_metadata, mut package) in packages.clone() {
|
||||||
let package_name = package_metadata.properties.package_name.as_str();
|
let package_name = package_metadata.properties.package_name.as_str();
|
||||||
// // special case tester: only load it in if in simulation mode
|
// special case tester: only load it in if in simulation mode
|
||||||
// if package_name == "tester" {
|
#[cfg(not(feature = "simulation-mode"))]
|
||||||
// #[cfg(not(feature = "simulation-mode"))]
|
if package_name == "tester" {
|
||||||
// continue;
|
continue;
|
||||||
// }
|
}
|
||||||
|
|
||||||
println!("fs: handling package {package_name}...\r");
|
println!("fs: handling package {package_name}...\r");
|
||||||
let package_publisher = package_metadata.properties.publisher.as_str();
|
let package_publisher = package_metadata.properties.publisher.as_str();
|
||||||
@ -610,8 +610,8 @@ async fn bootstrap(
|
|||||||
for (package_metadata, mut package) in packages {
|
for (package_metadata, mut package) in packages {
|
||||||
let package_name = package_metadata.properties.package_name.as_str();
|
let package_name = package_metadata.properties.package_name.as_str();
|
||||||
// special case tester: only load it in if in simulation mode
|
// special case tester: only load it in if in simulation mode
|
||||||
|
#[cfg(not(feature = "simulation-mode"))]
|
||||||
if package_name == "tester" {
|
if package_name == "tester" {
|
||||||
#[cfg(not(feature = "simulation-mode"))]
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "lib"
|
name = "lib"
|
||||||
authors = ["KinodeDAO"]
|
authors = ["KinodeDAO"]
|
||||||
version = "0.7.0"
|
version = "0.7.1"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
description = "A general-purpose sovereign cloud computing platform"
|
description = "A general-purpose sovereign cloud computing platform"
|
||||||
homepage = "https://kinode.org"
|
homepage = "https://kinode.org"
|
||||||
@ -13,15 +13,15 @@ license = "Apache-2.0"
|
|||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
anyhow = "1.0.71"
|
anyhow = "1.0.71"
|
||||||
kit = { git = "https://github.com/kinode-dao/kit", rev = "659f59e" }
|
kit = { git = "https://github.com/kinode-dao/kit", rev = "659f59e" }
|
||||||
reqwest = { version = "0.11.22", features = ["blocking"] }
|
reqwest = { version = "0.12.4", features = ["blocking"] }
|
||||||
tokio = "1.28"
|
tokio = "1.28"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "6f8ebb4" }
|
alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "6f8ebb4" }
|
||||||
lazy_static = "1.4.0"
|
lazy_static = "1.4.0"
|
||||||
rand = "0.8.4"
|
rand = "0.8.4"
|
||||||
ring = "0.16.20"
|
ring = "0.17.8"
|
||||||
rusqlite = { version = "0.30.0", features = ["bundled"] }
|
rusqlite = { version = "0.31.0", features = ["bundled"] }
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
thiserror = "1.0"
|
thiserror = "1.0"
|
||||||
|
@ -155,7 +155,7 @@ pub struct ProviderConfig {
|
|||||||
pub enum NodeOrRpcUrl {
|
pub enum NodeOrRpcUrl {
|
||||||
Node {
|
Node {
|
||||||
kns_update: crate::core::KnsUpdate,
|
kns_update: crate::core::KnsUpdate,
|
||||||
use_as_provider: bool, // for routers inside saved config
|
use_as_provider: bool, // false for just-routers inside saved config
|
||||||
},
|
},
|
||||||
RpcUrl(String),
|
RpcUrl(String),
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user