mirror of
https://github.com/uqbar-dao/nectar.git
synced 2024-12-23 08:32:23 +03:00
peacefully merge in develop
This commit is contained in:
commit
77a1acb434
1772
Cargo.lock
generated
1772
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "kinode_lib"
|
||||
authors = ["KinodeDAO"]
|
||||
version = "0.7.0"
|
||||
version = "0.7.1"
|
||||
edition = "2021"
|
||||
description = "A general-purpose sovereign cloud computing platform"
|
||||
homepage = "https://kinode.org"
|
||||
|
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "kinode"
|
||||
authors = ["KinodeDAO"]
|
||||
version = "0.7.0"
|
||||
version = "0.7.1"
|
||||
edition = "2021"
|
||||
description = "A general-purpose sovereign cloud computing platform"
|
||||
homepage = "https://kinode.org"
|
||||
@ -25,7 +25,7 @@ zip = "0.6"
|
||||
simulation-mode = []
|
||||
|
||||
[dependencies]
|
||||
aes-gcm = "0.10.2"
|
||||
aes-gcm = "0.10.3"
|
||||
alloy-pubsub = { git = "https://github.com/alloy-rs/alloy", rev = "6f8ebb4" }
|
||||
alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "6f8ebb4" }
|
||||
alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "6f8ebb4", features = ["ws"]}
|
||||
@ -37,27 +37,26 @@ alloy-sol-types = "0.6.2"
|
||||
alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "6f8ebb4" }
|
||||
anyhow = "1.0.71"
|
||||
async-trait = "0.1.71"
|
||||
base64 = "0.13"
|
||||
base64 = "0.22.0"
|
||||
bincode = "1.3.3"
|
||||
blake3 = "1.4.1"
|
||||
bytes = "1.4.0"
|
||||
cap-std = "2.0.0"
|
||||
chacha20poly1305 = "0.10.1"
|
||||
chrono = "0.4.31"
|
||||
clap = { version = "4.4", features = ["derive"] }
|
||||
crossterm = { version = "0.26.1", features = ["event-stream", "bracketed-paste"] }
|
||||
crossterm = { version = "0.27.0", features = ["event-stream", "bracketed-paste"] }
|
||||
curve25519-dalek = "^4.1.2"
|
||||
dashmap = "5.5.3"
|
||||
digest = "0.10"
|
||||
elliptic-curve = { version = "0.13.8", features = ["ecdh"] }
|
||||
flate2 = "1.0"
|
||||
futures = "0.3"
|
||||
generic-array = "0.14"
|
||||
generic-array = "1.0.0"
|
||||
getrandom = "0.2.10"
|
||||
hex = "0.4.3"
|
||||
hkdf = "0.12.3"
|
||||
hmac = "0.12"
|
||||
http = "0.2.9"
|
||||
http = "1.1.0"
|
||||
jwt = "0.16"
|
||||
lib = { path = "../lib" }
|
||||
lazy_static = "1.4.0"
|
||||
@ -67,26 +66,25 @@ num-traits = "0.2"
|
||||
open = "5.0.0"
|
||||
public-ip = "0.2.2"
|
||||
rand = "0.8.4"
|
||||
reqwest = "0.11.18"
|
||||
ring = "0.16.20"
|
||||
reqwest = "0.12.4"
|
||||
ring = "0.17.8"
|
||||
rmp-serde = "1.1.2"
|
||||
rocksdb = { version = "0.21.0", features = ["multi-threaded-cf"] }
|
||||
rocksdb = { version = "0.22.0", features = ["multi-threaded-cf"] }
|
||||
route-recognizer = "0.3.1"
|
||||
rusqlite = { version = "0.30.0", features = ["bundled"] }
|
||||
rusqlite = { version = "0.31.0", features = ["bundled"] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
serde_urlencoded = "0.7"
|
||||
sha2 = "0.10"
|
||||
snow = { version = "0.9.3", features = ["ring-resolver"] }
|
||||
snow = { version = "0.9.5", features = ["ring-resolver"] }
|
||||
static_dir = "0.2.0"
|
||||
thiserror = "1.0"
|
||||
tokio = { version = "1.28", features = ["fs", "macros", "rt-multi-thread", "signal", "sync"] }
|
||||
tokio-stream = "0.1.14"
|
||||
tokio-tungstenite = "0.20.1"
|
||||
tokio-tungstenite = "0.21.0"
|
||||
url = "2.4.1"
|
||||
uuid = { version = "1.1.2", features = ["serde", "v4"] }
|
||||
warp = "0.3.5"
|
||||
wasi-common = "19.0.1"
|
||||
wasmtime = "19.0.1"
|
||||
wasmtime-wasi = "19.0.1"
|
||||
zip = "0.6"
|
||||
zip = "1.1.1"
|
||||
|
@ -1,17 +1,17 @@
|
||||
[package]
|
||||
name = "app_store"
|
||||
version = "0.3.0"
|
||||
version = "0.3.1"
|
||||
edition = "2021"
|
||||
|
||||
[features]
|
||||
simulation-mode = []
|
||||
|
||||
[dependencies]
|
||||
alloy-primitives = "0.6.2"
|
||||
alloy-sol-types = "0.6.2"
|
||||
alloy-primitives = "0.7.0"
|
||||
alloy-sol-types = "0.7.0"
|
||||
anyhow = "1.0"
|
||||
bincode = "1.3.3"
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||
rand = "0.8"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
@ -20,7 +20,7 @@ sha3 = "0.10.8"
|
||||
url = "2.4.1"
|
||||
urlencoding = "2.1.0"
|
||||
wit-bindgen = "0.24.0"
|
||||
zip = { version = "0.6.6", default-features = false }
|
||||
zip = { version = "1.1.1", default-features = false }
|
||||
|
||||
[lib]
|
||||
crate-type = ["cdylib"]
|
||||
|
@ -41,6 +41,7 @@ const ICON: &str = include_str!("icon");
|
||||
|
||||
const CHAIN_ID: u64 = 10; // optimism
|
||||
const CONTRACT_ADDRESS: &str = "0x52185B6a6017E6f079B994452F234f7C2533787B"; // optimism
|
||||
const CONTRACT_FIRST_BLOCK: u64 = 118_590_088;
|
||||
|
||||
const EVENTS: [&str; 3] = [
|
||||
"AppRegistered(uint256,string,bytes,string,bytes32)",
|
||||
|
@ -111,7 +111,7 @@ impl State {
|
||||
crate::print_to_terminal(1, "producing new state");
|
||||
let mut state = State {
|
||||
contract_address,
|
||||
last_saved_block: 1,
|
||||
last_saved_block: crate::CONTRACT_FIRST_BLOCK,
|
||||
package_hashes: HashMap::new(),
|
||||
listed_packages: HashMap::new(),
|
||||
downloaded_packages: HashMap::new(),
|
||||
@ -371,14 +371,16 @@ impl State {
|
||||
.ok_or(anyhow::anyhow!("got log with no block number"))?
|
||||
.try_into()?;
|
||||
|
||||
// let package_hash: alloy_primitives::U256 = log.topics[1].into();
|
||||
// let package_hash = package_hash.to_string();
|
||||
|
||||
match log.topics[0] {
|
||||
match log.topics()[0] {
|
||||
AppRegistered::SIGNATURE_HASH => {
|
||||
let package_hash = log.topics[1];
|
||||
let (package_name, publisher_dnswire, metadata_url, metadata_hash) =
|
||||
AppRegistered::abi_decode_data(&log.data, true)?;
|
||||
let package_hash = log.topics()[1];
|
||||
|
||||
let app = AppRegistered::decode_log_data(log.data(), false)?;
|
||||
let package_name = app.packageName;
|
||||
let publisher_dnswire = app.publisherName;
|
||||
let metadata_url = app.metadataUrl;
|
||||
let metadata_hash = app.metadataHash;
|
||||
|
||||
let package_hash = package_hash.to_string();
|
||||
let metadata_hash = metadata_hash.to_string();
|
||||
|
||||
@ -390,13 +392,14 @@ impl State {
|
||||
),
|
||||
);
|
||||
|
||||
if generate_package_hash(&package_name, publisher_dnswire.as_slice())
|
||||
if generate_package_hash(&package_name, publisher_dnswire.to_vec().as_slice())
|
||||
!= package_hash
|
||||
{
|
||||
return Err(anyhow::anyhow!("got log with mismatched package hash"));
|
||||
}
|
||||
|
||||
let Ok(publisher_name) = dnswire_decode(publisher_dnswire.as_slice()) else {
|
||||
let Ok(publisher_name) = dnswire_decode(publisher_dnswire.to_vec().as_slice())
|
||||
else {
|
||||
return Err(anyhow::anyhow!("got log with invalid publisher name"));
|
||||
};
|
||||
|
||||
@ -430,9 +433,12 @@ impl State {
|
||||
self.insert_listing(package_hash, listing);
|
||||
}
|
||||
AppMetadataUpdated::SIGNATURE_HASH => {
|
||||
let package_hash = log.topics[1].to_string();
|
||||
let (metadata_url, metadata_hash) =
|
||||
AppMetadataUpdated::abi_decode_data(&log.data, false)?;
|
||||
let package_hash = log.topics()[1].to_string();
|
||||
|
||||
let upd = AppMetadataUpdated::decode_log_data(log.data(), false)?;
|
||||
let metadata_url = upd.metadataUrl;
|
||||
let metadata_hash = upd.metadataHash;
|
||||
|
||||
let metadata_hash = metadata_hash.to_string();
|
||||
|
||||
let current_listing = self
|
||||
@ -484,9 +490,9 @@ impl State {
|
||||
}
|
||||
}
|
||||
Transfer::SIGNATURE_HASH => {
|
||||
let from = alloy_primitives::Address::from_word(log.topics[1]);
|
||||
let to = alloy_primitives::Address::from_word(log.topics[2]);
|
||||
let package_hash = log.topics[3].to_string();
|
||||
let from = alloy_primitives::Address::from_word(log.topics()[1]);
|
||||
let to = alloy_primitives::Address::from_word(log.topics()[2]);
|
||||
let package_hash = log.topics()[3].to_string();
|
||||
|
||||
if from == alloy_primitives::Address::ZERO {
|
||||
match self.get_listing_with_hash_mut(&package_hash) {
|
||||
|
@ -8,7 +8,7 @@ simulation-mode = []
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
wit-bindgen = "0.24.0"
|
||||
|
@ -9,7 +9,7 @@ simulation-mode = []
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
bincode = "1.3.3"
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||
rand = "0.8"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
|
@ -8,7 +8,7 @@ simulation-mode = []
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
wit-bindgen = "0.24.0"
|
||||
|
@ -4,11 +4,11 @@
|
||||
"image": "",
|
||||
"properties": {
|
||||
"package_name": "app_store",
|
||||
"current_version": "0.3.0",
|
||||
"current_version": "0.3.1",
|
||||
"publisher": "sys",
|
||||
"mirrors": [],
|
||||
"code_hashes": {
|
||||
"0.3.0": ""
|
||||
"0.3.1": ""
|
||||
}
|
||||
},
|
||||
"external_url": "https://kinode.org",
|
||||
|
@ -8,7 +8,7 @@ simulation-mode = []
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
wit-bindgen = "0.24.0"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "chess"
|
||||
version = "0.2.0"
|
||||
version = "0.2.1"
|
||||
edition = "2021"
|
||||
|
||||
[features]
|
||||
@ -8,9 +8,9 @@ simulation-mode = []
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
base64 = "0.13"
|
||||
base64 = "0.22.0"
|
||||
bincode = "1.3.3"
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||
pleco = "0.5"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
|
@ -4,11 +4,11 @@
|
||||
"image": "",
|
||||
"properties": {
|
||||
"package_name": "chess",
|
||||
"current_version": "0.2.0",
|
||||
"current_version": "0.2.1",
|
||||
"publisher": "sys",
|
||||
"mirrors": [],
|
||||
"code_hashes": {
|
||||
"0.2.0": ""
|
||||
"0.2.1": ""
|
||||
}
|
||||
},
|
||||
"external_url": "https://kinode.org",
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "homepage"
|
||||
version = "0.1.0"
|
||||
version = "0.1.1"
|
||||
edition = "2021"
|
||||
|
||||
[features]
|
||||
@ -9,7 +9,7 @@ simulation-mode = []
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
bincode = "1.3.3"
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
wit-bindgen = "0.24.0"
|
||||
|
@ -4,11 +4,11 @@
|
||||
"image": "",
|
||||
"properties": {
|
||||
"package_name": "homepage",
|
||||
"current_version": "0.1.0",
|
||||
"current_version": "0.1.1",
|
||||
"publisher": "sys",
|
||||
"mirrors": [],
|
||||
"code_hashes": {
|
||||
"0.1.0": ""
|
||||
"0.1.1": ""
|
||||
}
|
||||
},
|
||||
"external_url": "https://kinode.org",
|
||||
|
@ -7,7 +7,7 @@ edition = "2021"
|
||||
simulation-mode = []
|
||||
|
||||
[dependencies]
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
wit-bindgen = "0.24.0"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "kns_indexer"
|
||||
version = "0.2.0"
|
||||
version = "0.2.1"
|
||||
edition = "2021"
|
||||
|
||||
[features]
|
||||
@ -8,11 +8,11 @@ simulation-mode = []
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
alloy-primitives = "0.6.2"
|
||||
alloy-sol-types = "0.6.2"
|
||||
alloy-primitives = "0.7.0"
|
||||
alloy-sol-types = "0.7.0"
|
||||
bincode = "1.3.3"
|
||||
hex = "0.4.3"
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||
rmp-serde = "1.1.2"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
|
@ -18,6 +18,7 @@ wit_bindgen::generate!({
|
||||
// perhaps a constant in process_lib?
|
||||
const KNS_OPTIMISM_ADDRESS: &'static str = "0xca5b5811c0c40aab3295f932b1b5112eb7bb4bd6";
|
||||
const KNS_LOCAL_ADDRESS: &'static str = "0x5FbDB2315678afecb367f032d93F642f64180aa3";
|
||||
const KNS_FIRST_BLOCK: u64 = 114_923_786;
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
struct State {
|
||||
@ -133,7 +134,7 @@ fn init(our: Address) {
|
||||
contract_address,
|
||||
names: HashMap::new(),
|
||||
nodes: HashMap::new(),
|
||||
block: 1,
|
||||
block: KNS_FIRST_BLOCK,
|
||||
}
|
||||
} else {
|
||||
println!("loading in {} persisted PKI entries", s.nodes.len());
|
||||
@ -145,7 +146,7 @@ fn init(our: Address) {
|
||||
contract_address: contract_address.clone(),
|
||||
names: HashMap::new(),
|
||||
nodes: HashMap::new(),
|
||||
block: 1,
|
||||
block: KNS_FIRST_BLOCK,
|
||||
},
|
||||
};
|
||||
|
||||
@ -188,7 +189,12 @@ fn main(our: Address, mut state: State) -> anyhow::Result<()> {
|
||||
match eth_provider.get_logs(&filter) {
|
||||
Ok(logs) => {
|
||||
for log in logs {
|
||||
handle_log(&our, &mut state, &log)?;
|
||||
match handle_log(&our, &mut state, &log) {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
println!("log-handling error! {e:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -295,7 +301,12 @@ fn handle_eth_message(
|
||||
match eth_result {
|
||||
Ok(eth::EthSub { result, .. }) => {
|
||||
if let eth::SubscriptionResult::Log(log) = result {
|
||||
handle_log(our, state, &log)?;
|
||||
match handle_log(our, state, &log) {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
println!("log-handling error! {e:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(_e) => {
|
||||
@ -345,11 +356,11 @@ fn handle_eth_message(
|
||||
}
|
||||
|
||||
fn handle_log(our: &Address, state: &mut State, log: ð::Log) -> anyhow::Result<()> {
|
||||
let node_id = log.topics[1];
|
||||
let node_id = log.topics()[1];
|
||||
|
||||
let name = match state.names.entry(node_id.to_string()) {
|
||||
Entry::Occupied(o) => o.into_mut(),
|
||||
Entry::Vacant(v) => v.insert(get_name(&log)),
|
||||
Entry::Vacant(v) => v.insert(get_name(&log)?),
|
||||
};
|
||||
|
||||
let node = state
|
||||
@ -359,15 +370,15 @@ fn handle_log(our: &Address, state: &mut State, log: ð::Log) -> anyhow::Resul
|
||||
|
||||
let mut send = true;
|
||||
|
||||
match log.topics[0] {
|
||||
match log.topics()[0] {
|
||||
KeyUpdate::SIGNATURE_HASH => {
|
||||
node.public_key = KeyUpdate::abi_decode_data(&log.data, true)
|
||||
node.public_key = KeyUpdate::decode_log_data(log.data(), true)
|
||||
.unwrap()
|
||||
.0
|
||||
.key
|
||||
.to_string();
|
||||
}
|
||||
IpUpdate::SIGNATURE_HASH => {
|
||||
let ip = IpUpdate::abi_decode_data(&log.data, true).unwrap().0;
|
||||
let ip = IpUpdate::decode_log_data(log.data(), true).unwrap().ip;
|
||||
node.ip = format!(
|
||||
"{}.{}.{}.{}",
|
||||
(ip >> 24) & 0xFF,
|
||||
@ -380,15 +391,15 @@ fn handle_log(our: &Address, state: &mut State, log: ð::Log) -> anyhow::Resul
|
||||
node.routers = vec![];
|
||||
}
|
||||
WsUpdate::SIGNATURE_HASH => {
|
||||
node.port = WsUpdate::abi_decode_data(&log.data, true).unwrap().0;
|
||||
node.port = WsUpdate::decode_log_data(log.data(), true).unwrap().port;
|
||||
// when we get port data, we should delete any router data,
|
||||
// since the assignment of port indicates an direct node
|
||||
node.routers = vec![];
|
||||
}
|
||||
RoutingUpdate::SIGNATURE_HASH => {
|
||||
node.routers = RoutingUpdate::abi_decode_data(&log.data, true)
|
||||
node.routers = RoutingUpdate::decode_log_data(log.data(), true)
|
||||
.unwrap()
|
||||
.0
|
||||
.routers
|
||||
.iter()
|
||||
.map(|r| r.to_string())
|
||||
.collect::<Vec<String>>();
|
||||
@ -413,7 +424,7 @@ fn handle_log(our: &Address, state: &mut State, log: ð::Log) -> anyhow::Resul
|
||||
}
|
||||
|
||||
// if new block is > 100 from last block, save state
|
||||
let block = log.block_number.expect("expect").to::<u64>();
|
||||
let block = log.block_number.expect("expect");
|
||||
if block > state.block + 100 {
|
||||
kinode_process_lib::print_to_terminal(
|
||||
1,
|
||||
@ -429,16 +440,13 @@ fn handle_log(our: &Address, state: &mut State, log: ð::Log) -> anyhow::Resul
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_name(log: ð::Log) -> String {
|
||||
let decoded = NodeRegistered::abi_decode_data(&log.data, true).unwrap();
|
||||
let name = match dnswire_decode(decoded.0.clone()) {
|
||||
Ok(n) => n,
|
||||
Err(_) => {
|
||||
println!("failed to decode name: {:?}", decoded.0);
|
||||
panic!("")
|
||||
}
|
||||
};
|
||||
name
|
||||
fn get_name(log: ð::Log) -> anyhow::Result<String> {
|
||||
let decoded = NodeRegistered::decode_log_data(log.data(), false).map_err(|_e| {
|
||||
anyhow::anyhow!(
|
||||
"got event other than NodeRegistered without knowing about existing node name"
|
||||
)
|
||||
})?;
|
||||
dnswire_decode(decoded.name.to_vec()).map_err(|e| anyhow::anyhow!(e))
|
||||
}
|
||||
|
||||
fn dnswire_decode(wire_format_bytes: Vec<u8>) -> Result<String, FromUtf8Error> {
|
||||
|
@ -1,14 +1,14 @@
|
||||
{
|
||||
"name": "KNS Indexer",
|
||||
"description": "Kinode OS pki indexer",
|
||||
"description": "Kinode OS PKI indexer",
|
||||
"image": "",
|
||||
"properties": {
|
||||
"package_name": "kns_indexer",
|
||||
"current_version": "0.1.0",
|
||||
"current_version": "0.2.1",
|
||||
"publisher": "sys",
|
||||
"mirrors": [],
|
||||
"code_hashes": {
|
||||
"0.1.0": ""
|
||||
"0.2.1": ""
|
||||
}
|
||||
},
|
||||
"external_url": "https://kinode.org",
|
||||
|
@ -7,7 +7,7 @@ edition = "2021"
|
||||
simulation-mode = []
|
||||
|
||||
[dependencies]
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
wit-bindgen = "0.24.0"
|
||||
|
@ -8,7 +8,7 @@ simulation-mode = []
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
wit-bindgen = "0.24.0"
|
||||
|
@ -8,7 +8,7 @@ simulation-mode = []
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
wit-bindgen = "0.24.0"
|
||||
|
@ -8,7 +8,7 @@ simulation-mode = []
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
wit-bindgen = "0.24.0"
|
||||
|
@ -7,7 +7,7 @@ edition = "2021"
|
||||
simulation-mode = []
|
||||
|
||||
[dependencies]
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
wit-bindgen = "0.24.0"
|
||||
|
@ -9,7 +9,7 @@ simulation-mode = []
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
clap = "4.4.18"
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||
regex = "1.10.3"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
|
@ -4,11 +4,11 @@
|
||||
"image": "",
|
||||
"properties": {
|
||||
"package_name": "terminal",
|
||||
"current_version": "0.1.0",
|
||||
"current_version": "0.1.1",
|
||||
"publisher": "sys",
|
||||
"mirrors": [],
|
||||
"code_hashes": {
|
||||
"0.1.0": ""
|
||||
"0.1.1": ""
|
||||
}
|
||||
},
|
||||
"external_url": "https://kinode.org",
|
||||
|
@ -7,7 +7,7 @@ edition = "2021"
|
||||
simulation-mode = []
|
||||
|
||||
[dependencies]
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||
rmp-serde = "1.1.2"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
wit-bindgen = "0.24.0"
|
||||
|
@ -7,7 +7,7 @@ edition = "2021"
|
||||
simulation-mode = []
|
||||
|
||||
[dependencies]
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||
rmp-serde = "1.1.2"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
wit-bindgen = "0.24.0"
|
||||
|
@ -7,7 +7,7 @@ edition = "2021"
|
||||
simulation-mode = []
|
||||
|
||||
[dependencies]
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||
rmp-serde = "1.1.2"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
wit-bindgen = "0.24.0"
|
||||
|
@ -7,7 +7,7 @@ edition = "2021"
|
||||
simulation-mode = []
|
||||
|
||||
[dependencies]
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||
rmp-serde = "1.1.2"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
wit-bindgen = "0.24.0"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "terminal"
|
||||
version = "0.1.0"
|
||||
version = "0.1.1"
|
||||
edition = "2021"
|
||||
|
||||
[features]
|
||||
@ -9,7 +9,7 @@ simulation-mode = []
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
bincode = "1.3.3"
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||
rand = "0.8"
|
||||
regex = "1.10.3"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
|
@ -295,7 +295,7 @@ fn handle_run(our: &Address, process: &ProcessId, args: String) -> anyhow::Resul
|
||||
}
|
||||
}
|
||||
print_to_terminal(
|
||||
2,
|
||||
3,
|
||||
&format!(
|
||||
"{}: Process {{\n wasm_bytes_handle: {},\n wit_version: {},\n on_exit: {:?},\n public: {}\n capabilities: {}\n}}",
|
||||
parsed_new_process_id.clone(),
|
||||
|
@ -8,7 +8,7 @@ simulation-mode = []
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
wit-bindgen = "0.24.0"
|
||||
|
@ -4,11 +4,11 @@
|
||||
"image": "",
|
||||
"properties": {
|
||||
"package_name": "tester",
|
||||
"current_version": "0.1.0",
|
||||
"current_version": "0.1.1",
|
||||
"publisher": "sys",
|
||||
"mirrors": [],
|
||||
"code_hashes": {
|
||||
"0.1.0": ""
|
||||
"0.1.1": ""
|
||||
}
|
||||
},
|
||||
"external_url": "https://kinode.org",
|
||||
|
@ -9,7 +9,7 @@ simulation-mode = []
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
bincode = "1.3.3"
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
thiserror = "1.0"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tester"
|
||||
version = "0.1.0"
|
||||
version = "0.1.1"
|
||||
edition = "2021"
|
||||
|
||||
[features]
|
||||
@ -10,7 +10,7 @@ simulation-mode = []
|
||||
anyhow = "1.0"
|
||||
bincode = "1.3.3"
|
||||
indexmap = "2.1"
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.1" }
|
||||
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.7.0" }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
thiserror = "1.0"
|
||||
|
@ -1,11 +1,4 @@
|
||||
[
|
||||
{
|
||||
"chain_id": 1,
|
||||
"trusted": false,
|
||||
"provider": {
|
||||
"RpcUrl": "wss://ethereum.publicnode.com"
|
||||
}
|
||||
},
|
||||
{
|
||||
"chain_id": 31337,
|
||||
"trusted": true,
|
||||
@ -13,6 +6,13 @@
|
||||
"RpcUrl": "wss://localhost:8545"
|
||||
}
|
||||
},
|
||||
{
|
||||
"chain_id": 1,
|
||||
"trusted": false,
|
||||
"provider": {
|
||||
"RpcUrl": "wss://ethereum.publicnode.com"
|
||||
}
|
||||
},
|
||||
{
|
||||
"chain_id": 10,
|
||||
"trusted": false,
|
||||
@ -21,18 +21,18 @@
|
||||
}
|
||||
},
|
||||
{
|
||||
"chain_id": 10,
|
||||
"chain_id": 1,
|
||||
"trusted": false,
|
||||
"provider": {
|
||||
"Node": {
|
||||
"use_as_provider": true,
|
||||
"kns_update": {
|
||||
"name": "default-router-1.os",
|
||||
"name": "providerfren.os",
|
||||
"owner": "",
|
||||
"node": "0xb35eb347deb896bc3fb6132a07fca1601f83462385ed11e835c24c33ba4ef73d",
|
||||
"public_key": "0xe1f7a266eafe46c79494d4dcce4222d81e5767511b295f1ed26c37221aecb80b",
|
||||
"node": "",
|
||||
"public_key": "0x54f5a8a4c625d5925e63ed3f0203b63e007e3f822d7858bd98b1fd9704c99451",
|
||||
"ip": "147.135.114.167",
|
||||
"port": 9000,
|
||||
"port": 9999,
|
||||
"routers": []
|
||||
}
|
||||
}
|
||||
@ -45,30 +45,12 @@
|
||||
"Node": {
|
||||
"use_as_provider": true,
|
||||
"kns_update": {
|
||||
"name": "default-router-2.os",
|
||||
"name": "providerfren.os",
|
||||
"owner": "",
|
||||
"node": "0xd827ae579fafa604af79fbed977e8abe048497f10885c6473dfd343a3b7b4458",
|
||||
"public_key": "0xcc2752e30ba865ab6baa1b819b0291b71f1136245234274b274df5950c3abcc4",
|
||||
"node": "",
|
||||
"public_key": "0x54f5a8a4c625d5925e63ed3f0203b63e007e3f822d7858bd98b1fd9704c99451",
|
||||
"ip": "147.135.114.167",
|
||||
"port": 9001,
|
||||
"routers": []
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"chain_id": 10,
|
||||
"trusted": false,
|
||||
"provider": {
|
||||
"Node": {
|
||||
"use_as_provider": true,
|
||||
"kns_update": {
|
||||
"name": "default-router-3.os",
|
||||
"owner": "",
|
||||
"node": "0x96e36331c8f0882f2c0c46c13b15d812def04fe8606d503bc0e2be39db26486a",
|
||||
"public_key": "0xe992841b17212206a3929b4cc065ad2854875d48fc7177e260005b9eb8d4f123",
|
||||
"ip": "147.135.114.167",
|
||||
"port": 9005,
|
||||
"port": 9999,
|
||||
"routers": []
|
||||
}
|
||||
}
|
||||
@ -81,48 +63,12 @@
|
||||
"Node": {
|
||||
"use_as_provider": true,
|
||||
"kns_update": {
|
||||
"name": "default-router-3.os",
|
||||
"name": "providerfren.os",
|
||||
"owner": "",
|
||||
"node": "0x96e36331c8f0882f2c0c46c13b15d812def04fe8606d503bc0e2be39db26486a",
|
||||
"public_key": "0xe992841b17212206a3929b4cc065ad2854875d48fc7177e260005b9eb8d4f123",
|
||||
"node": "",
|
||||
"public_key": "0x54f5a8a4c625d5925e63ed3f0203b63e007e3f822d7858bd98b1fd9704c99451",
|
||||
"ip": "147.135.114.167",
|
||||
"port": 9005,
|
||||
"routers": []
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"chain_id": 11155111,
|
||||
"trusted": false,
|
||||
"provider": {
|
||||
"Node": {
|
||||
"use_as_provider": true,
|
||||
"kns_update": {
|
||||
"name": "default-router-2.os",
|
||||
"owner": "",
|
||||
"node": "0xd827ae579fafa604af79fbed977e8abe048497f10885c6473dfd343a3b7b4458",
|
||||
"public_key": "0xcc2752e30ba865ab6baa1b819b0291b71f1136245234274b274df5950c3abcc4",
|
||||
"ip": "147.135.114.167",
|
||||
"port": 9001,
|
||||
"routers": []
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"chain_id": 11155111,
|
||||
"trusted": false,
|
||||
"provider": {
|
||||
"Node": {
|
||||
"use_as_provider": true,
|
||||
"kns_update": {
|
||||
"name": "default-router-1.os",
|
||||
"owner": "",
|
||||
"node": "0xb35eb347deb896bc3fb6132a07fca1601f83462385ed11e835c24c33ba4ef73d",
|
||||
"public_key": "0xe1f7a266eafe46c79494d4dcce4222d81e5767511b295f1ed26c37221aecb80b",
|
||||
"ip": "147.135.114.167",
|
||||
"port": 9000,
|
||||
"port": 9999,
|
||||
"routers": []
|
||||
}
|
||||
}
|
||||
|
@ -18,9 +18,13 @@ mod subscription;
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
enum IncomingReq {
|
||||
/// requests for an RPC action that can come from processes on this node or others
|
||||
EthAction(EthAction),
|
||||
/// requests that must come from this node to modify provider settings / fetch them
|
||||
EthConfigAction(EthConfigAction),
|
||||
/// subscription updates coming in from a remote provider
|
||||
EthSubResult(EthSubResult),
|
||||
/// a remote node who uses our provider keeping their subscription alive
|
||||
SubKeepalive(u64),
|
||||
}
|
||||
|
||||
@ -42,11 +46,14 @@ struct UrlProvider {
|
||||
|
||||
#[derive(Debug)]
|
||||
struct NodeProvider {
|
||||
/// NOT CURRENTLY USED
|
||||
pub trusted: bool,
|
||||
/// semi-temporary flag to mark if this provider is currently usable
|
||||
/// future updates will make this more dynamic
|
||||
pub usable: bool,
|
||||
pub name: String,
|
||||
/// the KNS update that describes this node provider
|
||||
/// kept so we can re-serialize to SavedConfigs
|
||||
pub kns_update: KnsUpdate,
|
||||
}
|
||||
|
||||
impl ActiveProviders {
|
||||
@ -59,7 +66,7 @@ impl ActiveProviders {
|
||||
self.nodes.push(NodeProvider {
|
||||
trusted: new.trusted,
|
||||
usable: use_as_provider,
|
||||
name: kns_update.name,
|
||||
kns_update,
|
||||
});
|
||||
}
|
||||
NodeOrRpcUrl::RpcUrl(url) => {
|
||||
@ -74,7 +81,7 @@ impl ActiveProviders {
|
||||
|
||||
fn remove_provider(&mut self, remove: &str) {
|
||||
self.urls.retain(|x| x.url != remove);
|
||||
self.nodes.retain(|x| x.name != remove);
|
||||
self.nodes.retain(|x| x.kns_update.name != remove);
|
||||
}
|
||||
}
|
||||
|
||||
@ -157,7 +164,9 @@ pub async fn provider(
|
||||
caps_oracle: CapMessageSender,
|
||||
print_tx: PrintSender,
|
||||
) -> Result<()> {
|
||||
// load access settings if they've been saved
|
||||
// load access settings if they've been persisted to disk
|
||||
// this merely describes whether our provider is available to other nodes
|
||||
// and if so, which nodes are allowed to access it (public/whitelist/blacklist)
|
||||
let access_settings: AccessSettings =
|
||||
match tokio::fs::read_to_string(format!("{}/.eth_access_settings", home_directory_path))
|
||||
.await
|
||||
@ -169,11 +178,6 @@ pub async fn provider(
|
||||
allow: HashSet::new(),
|
||||
deny: HashSet::new(),
|
||||
};
|
||||
let _ = tokio::fs::write(
|
||||
format!("{}/.eth_access_settings", home_directory_path),
|
||||
serde_json::to_string(&access_settings).unwrap(),
|
||||
)
|
||||
.await;
|
||||
access_settings
|
||||
}
|
||||
};
|
||||
@ -183,6 +187,9 @@ pub async fn provider(
|
||||
)
|
||||
.await;
|
||||
|
||||
// initialize module state
|
||||
// fill out providers based on saved configs (possibly persisted, given to us)
|
||||
// this can be a mix of node providers and rpc providers
|
||||
let mut state = ModuleState {
|
||||
our: Arc::new(our),
|
||||
home_directory_path,
|
||||
@ -208,14 +215,13 @@ pub async fn provider(
|
||||
|
||||
verbose_print(&state.print_tx, "eth: provider initialized").await;
|
||||
|
||||
// main loop: handle incoming network errors and incoming kernel messages
|
||||
loop {
|
||||
tokio::select! {
|
||||
Some(wrapped_error) = net_error_recv.recv() => {
|
||||
handle_network_error(
|
||||
wrapped_error,
|
||||
&state.active_subscriptions,
|
||||
&state.response_channels,
|
||||
&state.print_tx
|
||||
&state,
|
||||
).await;
|
||||
}
|
||||
Some(km) = recv_in_client.recv() => {
|
||||
@ -241,40 +247,54 @@ pub async fn provider(
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_network_error(
|
||||
wrapped_error: WrappedSendError,
|
||||
active_subscriptions: &ActiveSubscriptions,
|
||||
response_channels: &ResponseChannels,
|
||||
print_tx: &PrintSender,
|
||||
) {
|
||||
verbose_print(&print_tx, "eth: got network error").await;
|
||||
// if we hold active subscriptions for the remote node that this error refers to,
|
||||
// close them here -- they will need to resubscribe
|
||||
// TODO is this necessary?
|
||||
if let Some((_who, sub_map)) = active_subscriptions.remove(&wrapped_error.error.target) {
|
||||
for (_sub_id, sub) in sub_map.iter() {
|
||||
if let ActiveSub::Local(handle) = sub {
|
||||
verbose_print(
|
||||
&print_tx,
|
||||
"eth: closing local sub in response to network error",
|
||||
)
|
||||
.await;
|
||||
handle.abort();
|
||||
}
|
||||
/// network errors only come from remote provider nodes we tried to access,
|
||||
/// or from remote nodes that are using us as a provider.
|
||||
///
|
||||
/// if we tried to access them, we will have a response channel to send the error to.
|
||||
/// if they are using us as a provider, close the subscription associated with the target.
|
||||
async fn handle_network_error(wrapped_error: WrappedSendError, state: &ModuleState) {
|
||||
verbose_print(
|
||||
&state.print_tx,
|
||||
&format!(
|
||||
"eth: got network error from {}",
|
||||
&wrapped_error.error.target
|
||||
),
|
||||
)
|
||||
.await;
|
||||
|
||||
// close all subscriptions held by the process that we (possibly) tried to send an update to
|
||||
if let Some((_who, sub_map)) = state
|
||||
.active_subscriptions
|
||||
.remove(&wrapped_error.error.target)
|
||||
{
|
||||
for (sub_id, sub) in sub_map.iter() {
|
||||
verbose_print(
|
||||
&state.print_tx,
|
||||
&format!(
|
||||
"eth: closed subscription {} in response to network error",
|
||||
sub_id
|
||||
),
|
||||
)
|
||||
.await;
|
||||
sub.close(*sub_id, state).await;
|
||||
}
|
||||
}
|
||||
// we got an error from a remote node provider --
|
||||
// forward it to response channel if it exists
|
||||
if let Some(chan) = response_channels.get(&wrapped_error.id) {
|
||||
// can't close channel here, as response may be an error
|
||||
// and fulfill_request may wish to try other providers.
|
||||
verbose_print(&print_tx, "eth: sent network error to response channel").await;
|
||||
|
||||
// forward error to response channel if it exists
|
||||
if let Some(chan) = state.response_channels.get(&wrapped_error.id) {
|
||||
// don't close channel here, as channel holder will wish to try other providers.
|
||||
verbose_print(
|
||||
&state.print_tx,
|
||||
"eth: forwarded network error to response channel",
|
||||
)
|
||||
.await;
|
||||
let _ = chan.send(Err(wrapped_error)).await;
|
||||
}
|
||||
}
|
||||
|
||||
/// handle incoming requests, namely [`EthAction`] and [`EthConfigAction`].
|
||||
/// also handle responses that are passthroughs from remote provider nodes.
|
||||
/// handle incoming requests and responses.
|
||||
/// requests must be one of types in [`IncomingReq`].
|
||||
/// responses are passthroughs from remote provider nodes.
|
||||
async fn handle_message(
|
||||
state: &mut ModuleState,
|
||||
km: KernelMessage,
|
||||
@ -335,6 +355,8 @@ async fn handle_message(
|
||||
{
|
||||
if provider_node == &km.source.node {
|
||||
if let Ok(()) = sender.send(eth_sub_result).await {
|
||||
// successfully sent a subscription update from a
|
||||
// remote provider to one of our processes
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
@ -344,7 +366,7 @@ async fn handle_message(
|
||||
// so they can stop sending us updates
|
||||
verbose_print(
|
||||
&state.print_tx,
|
||||
"eth: got eth_sub_result but no matching sub found",
|
||||
"eth: got eth_sub_result but no matching sub found, unsubscribing",
|
||||
)
|
||||
.await;
|
||||
kernel_message(
|
||||
@ -367,6 +389,26 @@ async fn handle_message(
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
verbose_print(
|
||||
&state.print_tx,
|
||||
"eth: got sub_keepalive but no matching sub found",
|
||||
)
|
||||
.await;
|
||||
// send a response with an EthSubError
|
||||
kernel_message(
|
||||
&state.our.clone(),
|
||||
km.id,
|
||||
km.source.clone(),
|
||||
None,
|
||||
false,
|
||||
None,
|
||||
EthSubResult::Err(EthSubError {
|
||||
id: sub_id,
|
||||
error: "Subscription not found".to_string(),
|
||||
}),
|
||||
&state.send_to_loop,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -382,7 +424,10 @@ async fn handle_eth_action(
|
||||
) -> Result<(), EthError> {
|
||||
// check our access settings if the request is from a remote node
|
||||
if km.source.node != *state.our {
|
||||
if state.access_settings.deny.contains(&km.source.node) {
|
||||
if state.access_settings.deny.contains(&km.source.node)
|
||||
|| (!state.access_settings.public
|
||||
&& !state.access_settings.allow.contains(&km.source.node))
|
||||
{
|
||||
verbose_print(
|
||||
&state.print_tx,
|
||||
"eth: got eth_action from unauthorized remote source",
|
||||
@ -390,21 +435,19 @@ async fn handle_eth_action(
|
||||
.await;
|
||||
return Err(EthError::PermissionDenied);
|
||||
}
|
||||
if !state.access_settings.public {
|
||||
if !state.access_settings.allow.contains(&km.source.node) {
|
||||
verbose_print(
|
||||
&state.print_tx,
|
||||
"eth: got eth_action from unauthorized remote source",
|
||||
)
|
||||
.await;
|
||||
return Err(EthError::PermissionDenied);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
verbose_print(
|
||||
&state.print_tx,
|
||||
&format!("eth: handling eth_action {eth_action:?}"),
|
||||
&format!(
|
||||
"eth: handling {} from {}",
|
||||
match ð_action {
|
||||
EthAction::SubscribeLogs { .. } => "subscribe",
|
||||
EthAction::UnsubscribeLogs(_) => "unsubscribe",
|
||||
EthAction::Request { .. } => "request",
|
||||
},
|
||||
km.source
|
||||
),
|
||||
)
|
||||
.await;
|
||||
|
||||
@ -414,19 +457,15 @@ async fn handle_eth_action(
|
||||
// before returning an error.
|
||||
match eth_action {
|
||||
EthAction::SubscribeLogs { sub_id, .. } => {
|
||||
tokio::spawn(subscription::create_new_subscription(
|
||||
state.our.to_string(),
|
||||
subscription::create_new_subscription(
|
||||
state,
|
||||
km.id,
|
||||
km.source.clone(),
|
||||
km.rsvp,
|
||||
state.send_to_loop.clone(),
|
||||
sub_id,
|
||||
eth_action,
|
||||
state.providers.clone(),
|
||||
state.active_subscriptions.clone(),
|
||||
state.response_channels.clone(),
|
||||
state.print_tx.clone(),
|
||||
));
|
||||
)
|
||||
.await;
|
||||
}
|
||||
EthAction::UnsubscribeLogs(sub_id) => {
|
||||
let mut sub_map = state
|
||||
@ -509,29 +548,62 @@ async fn fulfill_request(
|
||||
let Some(mut aps) = providers.get_mut(&chain_id) else {
|
||||
return EthResponse::Err(EthError::NoRpcForChain);
|
||||
};
|
||||
|
||||
// first, try any url providers we have for this chain,
|
||||
// then if we have none or they all fail, go to node provider.
|
||||
// then if we have none or they all fail, go to node providers.
|
||||
// finally, if no provider works, return an error.
|
||||
for url_provider in &mut aps.urls {
|
||||
|
||||
// bump the successful provider to the front of the list for future requests
|
||||
for (index, url_provider) in aps.urls.iter_mut().enumerate() {
|
||||
let pubsub = match &url_provider.pubsub {
|
||||
Some(pubsub) => pubsub,
|
||||
None => {
|
||||
if let Ok(()) = activate_url_provider(url_provider).await {
|
||||
verbose_print(print_tx, "eth: activated a url provider").await;
|
||||
verbose_print(
|
||||
print_tx,
|
||||
&format!("eth: activated url provider {}", url_provider.url),
|
||||
)
|
||||
.await;
|
||||
url_provider.pubsub.as_ref().unwrap()
|
||||
} else {
|
||||
verbose_print(
|
||||
print_tx,
|
||||
&format!("eth: could not activate url provider {}", url_provider.url),
|
||||
)
|
||||
.await;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
};
|
||||
let Ok(value) = pubsub.inner().prepare(method, params.clone()).await else {
|
||||
// this provider failed and needs to be reset
|
||||
url_provider.pubsub = None;
|
||||
continue;
|
||||
};
|
||||
return EthResponse::Response { value };
|
||||
match pubsub.inner().prepare(method, params.clone()).await {
|
||||
Ok(value) => {
|
||||
let successful_provider = aps.urls.remove(index);
|
||||
aps.urls.insert(0, successful_provider);
|
||||
return EthResponse::Response { value };
|
||||
}
|
||||
Err(rpc_error) => {
|
||||
verbose_print(
|
||||
print_tx,
|
||||
&format!(
|
||||
"eth: got error from url provider {}: {}",
|
||||
url_provider.url, rpc_error
|
||||
),
|
||||
)
|
||||
.await;
|
||||
// this provider failed and needs to be reset
|
||||
url_provider.pubsub = None;
|
||||
}
|
||||
}
|
||||
}
|
||||
for node_provider in &mut aps.nodes {
|
||||
verbose_print(
|
||||
print_tx,
|
||||
&format!(
|
||||
"eth: attempting to fulfill via {}",
|
||||
node_provider.kns_update.name
|
||||
),
|
||||
)
|
||||
.await;
|
||||
let response = forward_to_node_provider(
|
||||
our,
|
||||
km_id,
|
||||
@ -563,14 +635,14 @@ async fn forward_to_node_provider(
|
||||
send_to_loop: &MessageSender,
|
||||
receiver: &mut ProcessMessageReceiver,
|
||||
) -> EthResponse {
|
||||
if !node_provider.usable || node_provider.name == our {
|
||||
if !node_provider.usable || node_provider.kns_update.name == our {
|
||||
return EthResponse::Err(EthError::PermissionDenied);
|
||||
}
|
||||
kernel_message(
|
||||
our,
|
||||
km_id,
|
||||
Address {
|
||||
node: node_provider.name.clone(),
|
||||
node: node_provider.kns_update.name.clone(),
|
||||
process: ETH_PROCESS_ID.clone(),
|
||||
},
|
||||
rsvp,
|
||||
@ -585,15 +657,13 @@ async fn forward_to_node_provider(
|
||||
else {
|
||||
return EthResponse::Err(EthError::RpcTimeout);
|
||||
};
|
||||
let Message::Response((resp, _context)) = response_km.message else {
|
||||
// if we hit this, they spoofed a request with same id, ignore and possibly punish
|
||||
return EthResponse::Err(EthError::RpcMalformedResponse);
|
||||
};
|
||||
let Ok(eth_response) = serde_json::from_slice::<EthResponse>(&resp.body) else {
|
||||
// if we hit this, they sent a malformed response, ignore and possibly punish
|
||||
return EthResponse::Err(EthError::RpcMalformedResponse);
|
||||
};
|
||||
eth_response
|
||||
if let Message::Response((resp, _context)) = response_km.message {
|
||||
if let Ok(eth_response) = serde_json::from_slice::<EthResponse>(&resp.body) {
|
||||
return eth_response;
|
||||
}
|
||||
}
|
||||
// if we hit this, they sent a malformed response, ignore and possibly punish
|
||||
EthResponse::Err(EthError::RpcMalformedResponse)
|
||||
}
|
||||
|
||||
async fn handle_eth_config_action(
|
||||
@ -627,6 +697,7 @@ async fn handle_eth_config_action(
|
||||
)
|
||||
.await;
|
||||
|
||||
let mut save_settings = false;
|
||||
let mut save_providers = false;
|
||||
|
||||
// modify our providers and access settings based on config action
|
||||
@ -650,21 +721,27 @@ async fn handle_eth_config_action(
|
||||
}
|
||||
EthConfigAction::SetPublic => {
|
||||
state.access_settings.public = true;
|
||||
save_settings = true;
|
||||
}
|
||||
EthConfigAction::SetPrivate => {
|
||||
state.access_settings.public = false;
|
||||
save_settings = true;
|
||||
}
|
||||
EthConfigAction::AllowNode(node) => {
|
||||
state.access_settings.allow.insert(node);
|
||||
save_settings = true;
|
||||
}
|
||||
EthConfigAction::UnallowNode(node) => {
|
||||
state.access_settings.allow.remove(&node);
|
||||
save_settings = true;
|
||||
}
|
||||
EthConfigAction::DenyNode(node) => {
|
||||
state.access_settings.deny.insert(node);
|
||||
save_settings = true;
|
||||
}
|
||||
EthConfigAction::UndenyNode(node) => {
|
||||
state.access_settings.deny.remove(&node);
|
||||
save_settings = true;
|
||||
}
|
||||
EthConfigAction::SetProviders(new_providers) => {
|
||||
let new_map = DashMap::new();
|
||||
@ -713,20 +790,26 @@ async fn handle_eth_config_action(
|
||||
};
|
||||
}
|
||||
}
|
||||
// save providers and access settings to disk
|
||||
let _ = tokio::fs::write(
|
||||
format!("{}/.eth_access_settings", state.home_directory_path),
|
||||
serde_json::to_string(&state.access_settings).unwrap(),
|
||||
)
|
||||
.await;
|
||||
verbose_print(&state.print_tx, "eth: saved new access settings").await;
|
||||
// save providers and/or access settings, depending on necessity, to disk
|
||||
if save_settings {
|
||||
if let Ok(()) = tokio::fs::write(
|
||||
format!("{}/.eth_access_settings", state.home_directory_path),
|
||||
serde_json::to_string(&state.access_settings).unwrap(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
verbose_print(&state.print_tx, "eth: saved new access settings").await;
|
||||
};
|
||||
}
|
||||
if save_providers {
|
||||
let _ = tokio::fs::write(
|
||||
if let Ok(()) = tokio::fs::write(
|
||||
format!("{}/.eth_providers", state.home_directory_path),
|
||||
serde_json::to_string(&providers_to_saved_configs(&state.providers)).unwrap(),
|
||||
)
|
||||
.await;
|
||||
verbose_print(&state.print_tx, "eth: saved new provider settings").await;
|
||||
.await
|
||||
{
|
||||
verbose_print(&state.print_tx, "eth: saved new provider settings").await;
|
||||
};
|
||||
}
|
||||
EthConfigResponse::Ok
|
||||
}
|
||||
@ -767,15 +850,7 @@ fn providers_to_saved_configs(providers: &Providers) -> SavedConfigs {
|
||||
.chain(entry.nodes.iter().map(|node_provider| ProviderConfig {
|
||||
chain_id: *entry.key(),
|
||||
provider: NodeOrRpcUrl::Node {
|
||||
kns_update: KnsUpdate {
|
||||
name: node_provider.name.clone(),
|
||||
owner: "".to_string(),
|
||||
node: "".to_string(),
|
||||
public_key: "".to_string(),
|
||||
ip: "".to_string(),
|
||||
port: 0,
|
||||
routers: vec![],
|
||||
},
|
||||
kns_update: node_provider.kns_update.clone(),
|
||||
use_as_provider: node_provider.usable,
|
||||
},
|
||||
trusted: node_provider.trusted,
|
||||
|
@ -4,123 +4,82 @@ use alloy_rpc_types::pubsub::SubscriptionResult;
|
||||
|
||||
/// cleans itself up when the subscription is closed or fails.
|
||||
pub async fn create_new_subscription(
|
||||
our: String,
|
||||
state: &ModuleState,
|
||||
km_id: u64,
|
||||
target: Address,
|
||||
rsvp: Option<Address>,
|
||||
send_to_loop: MessageSender,
|
||||
sub_id: u64,
|
||||
eth_action: EthAction,
|
||||
providers: Providers,
|
||||
active_subscriptions: ActiveSubscriptions,
|
||||
response_channels: ResponseChannels,
|
||||
print_tx: PrintSender,
|
||||
) {
|
||||
verbose_print(&print_tx, "eth: creating new subscription").await;
|
||||
match build_subscription(
|
||||
&our,
|
||||
km_id,
|
||||
&target,
|
||||
&send_to_loop,
|
||||
ð_action,
|
||||
&providers,
|
||||
&response_channels,
|
||||
&print_tx,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(maybe_raw_sub) => {
|
||||
// send a response to the target that the subscription was successful
|
||||
kernel_message(
|
||||
let our = state.our.clone();
|
||||
let send_to_loop = state.send_to_loop.clone();
|
||||
let active_subscriptions = state.active_subscriptions.clone();
|
||||
let providers = state.providers.clone();
|
||||
let response_channels = state.response_channels.clone();
|
||||
let print_tx = state.print_tx.clone();
|
||||
tokio::spawn(async move {
|
||||
match tokio::time::timeout(
|
||||
std::time::Duration::from_secs(30),
|
||||
build_subscription(
|
||||
&our,
|
||||
km_id,
|
||||
target.clone(),
|
||||
rsvp.clone(),
|
||||
false,
|
||||
None,
|
||||
EthResponse::Ok,
|
||||
&target,
|
||||
&send_to_loop,
|
||||
)
|
||||
.await;
|
||||
let mut subs = active_subscriptions
|
||||
.entry(target.clone())
|
||||
.or_insert(HashMap::new());
|
||||
let active_subscriptions = active_subscriptions.clone();
|
||||
match maybe_raw_sub {
|
||||
Ok(rx) => {
|
||||
subs.insert(
|
||||
sub_id,
|
||||
// this is a local sub, as in, we connect to the rpc endpt
|
||||
ActiveSub::Local(tokio::spawn(async move {
|
||||
// await the subscription error and kill it if so
|
||||
if let Err(e) = maintain_local_subscription(
|
||||
&our,
|
||||
sub_id,
|
||||
rx,
|
||||
&target,
|
||||
&rsvp,
|
||||
&send_to_loop,
|
||||
)
|
||||
.await
|
||||
{
|
||||
verbose_print(
|
||||
&print_tx,
|
||||
"eth: closed local subscription due to error",
|
||||
)
|
||||
.await;
|
||||
kernel_message(
|
||||
ð_action,
|
||||
&providers,
|
||||
&response_channels,
|
||||
&print_tx,
|
||||
),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(Ok(maybe_raw_sub)) => {
|
||||
// send a response to the target that the subscription was successful
|
||||
kernel_message(
|
||||
&our,
|
||||
km_id,
|
||||
target.clone(),
|
||||
rsvp.clone(),
|
||||
false,
|
||||
None,
|
||||
EthResponse::Ok,
|
||||
&send_to_loop,
|
||||
)
|
||||
.await;
|
||||
let mut subs = active_subscriptions
|
||||
.entry(target.clone())
|
||||
.or_insert(HashMap::new());
|
||||
let active_subscriptions = active_subscriptions.clone();
|
||||
match maybe_raw_sub {
|
||||
Ok(rx) => {
|
||||
let our = our.clone();
|
||||
let send_to_loop = send_to_loop.clone();
|
||||
let print_tx = print_tx.clone();
|
||||
subs.insert(
|
||||
sub_id,
|
||||
// this is a local sub, as in, we connect to the rpc endpoint
|
||||
ActiveSub::Local(tokio::spawn(async move {
|
||||
// await the subscription error and kill it if so
|
||||
if let Err(e) = maintain_local_subscription(
|
||||
&our,
|
||||
rand::random(),
|
||||
target.clone(),
|
||||
rsvp,
|
||||
true,
|
||||
None,
|
||||
EthSubResult::Err(e),
|
||||
&send_to_loop,
|
||||
)
|
||||
.await;
|
||||
active_subscriptions.entry(target).and_modify(|sub_map| {
|
||||
sub_map.remove(&km_id);
|
||||
});
|
||||
}
|
||||
})),
|
||||
);
|
||||
}
|
||||
Err((provider_node, remote_sub_id)) => {
|
||||
// this is a remote sub, given by a relay node
|
||||
let (sender, rx) = tokio::sync::mpsc::channel(10);
|
||||
let keepalive_km_id = rand::random();
|
||||
let (keepalive_err_sender, keepalive_err_receiver) =
|
||||
tokio::sync::mpsc::channel(1);
|
||||
response_channels.insert(keepalive_km_id, keepalive_err_sender);
|
||||
subs.insert(
|
||||
remote_sub_id,
|
||||
ActiveSub::Remote {
|
||||
provider_node: provider_node.clone(),
|
||||
handle: tokio::spawn(async move {
|
||||
if let Err(e) = maintain_remote_subscription(
|
||||
&our,
|
||||
&provider_node,
|
||||
remote_sub_id,
|
||||
sub_id,
|
||||
keepalive_km_id,
|
||||
rx,
|
||||
keepalive_err_receiver,
|
||||
&target,
|
||||
&rsvp,
|
||||
&send_to_loop,
|
||||
)
|
||||
.await
|
||||
{
|
||||
verbose_print(
|
||||
&print_tx,
|
||||
"eth: closed subscription with provider node due to error",
|
||||
"eth: closed local subscription due to error",
|
||||
)
|
||||
.await;
|
||||
kernel_message(
|
||||
&our,
|
||||
rand::random(),
|
||||
target.clone(),
|
||||
None,
|
||||
rsvp,
|
||||
true,
|
||||
None,
|
||||
EthSubResult::Err(e),
|
||||
@ -128,21 +87,84 @@ pub async fn create_new_subscription(
|
||||
)
|
||||
.await;
|
||||
active_subscriptions.entry(target).and_modify(|sub_map| {
|
||||
sub_map.remove(&sub_id);
|
||||
sub_map.remove(&km_id);
|
||||
});
|
||||
response_channels.remove(&keepalive_km_id);
|
||||
}
|
||||
}),
|
||||
sender,
|
||||
},
|
||||
);
|
||||
})),
|
||||
);
|
||||
}
|
||||
Err((provider_node, remote_sub_id)) => {
|
||||
// this is a remote sub, given by a relay node
|
||||
let (sender, rx) = tokio::sync::mpsc::channel(10);
|
||||
let keepalive_km_id = rand::random();
|
||||
let (keepalive_err_sender, keepalive_err_receiver) =
|
||||
tokio::sync::mpsc::channel(1);
|
||||
response_channels.insert(keepalive_km_id, keepalive_err_sender);
|
||||
let our = our.clone();
|
||||
let send_to_loop = send_to_loop.clone();
|
||||
let print_tx = print_tx.clone();
|
||||
let response_channels = response_channels.clone();
|
||||
subs.insert(
|
||||
remote_sub_id,
|
||||
ActiveSub::Remote {
|
||||
provider_node: provider_node.clone(),
|
||||
handle: tokio::spawn(async move {
|
||||
if let Err(e) = maintain_remote_subscription(
|
||||
&our,
|
||||
&provider_node,
|
||||
remote_sub_id,
|
||||
sub_id,
|
||||
keepalive_km_id,
|
||||
rx,
|
||||
keepalive_err_receiver,
|
||||
&target,
|
||||
&send_to_loop,
|
||||
)
|
||||
.await
|
||||
{
|
||||
verbose_print(
|
||||
&print_tx,
|
||||
"eth: closed subscription with provider node due to error",
|
||||
)
|
||||
.await;
|
||||
kernel_message(
|
||||
&our,
|
||||
rand::random(),
|
||||
target.clone(),
|
||||
None,
|
||||
true,
|
||||
None,
|
||||
EthSubResult::Err(e),
|
||||
&send_to_loop,
|
||||
)
|
||||
.await;
|
||||
active_subscriptions.entry(target).and_modify(|sub_map| {
|
||||
sub_map.remove(&sub_id);
|
||||
});
|
||||
response_channels.remove(&keepalive_km_id);
|
||||
}
|
||||
}),
|
||||
sender,
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
error_message(&our, km_id, target.clone(), e, &send_to_loop).await;
|
||||
}
|
||||
Err(_) => {
|
||||
error_message(
|
||||
&our,
|
||||
km_id,
|
||||
target.clone(),
|
||||
EthError::RpcTimeout,
|
||||
&send_to_loop,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error_message(&our, km_id, target.clone(), e, &send_to_loop).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/// terrible abuse of result in return type, yes, sorry
|
||||
@ -171,38 +193,71 @@ async fn build_subscription(
|
||||
// first, try any url providers we have for this chain,
|
||||
// then if we have none or they all fail, go to node providers.
|
||||
// finally, if no provider works, return an error.
|
||||
for url_provider in &mut aps.urls {
|
||||
|
||||
// bump the successful provider to the front of the list for future requests
|
||||
for (index, url_provider) in aps.urls.iter_mut().enumerate() {
|
||||
let pubsub = match &url_provider.pubsub {
|
||||
Some(pubsub) => pubsub,
|
||||
None => {
|
||||
if let Ok(()) = activate_url_provider(url_provider).await {
|
||||
verbose_print(print_tx, "eth: activated a url provider").await;
|
||||
verbose_print(
|
||||
&print_tx,
|
||||
&format!("eth: activated url provider {}", url_provider.url),
|
||||
)
|
||||
.await;
|
||||
url_provider.pubsub.as_ref().unwrap()
|
||||
} else {
|
||||
verbose_print(
|
||||
&print_tx,
|
||||
&format!("eth: could not activate url provider {}", url_provider.url),
|
||||
)
|
||||
.await;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
};
|
||||
let kind = serde_json::to_value(&kind).unwrap();
|
||||
let params = serde_json::to_value(¶ms).unwrap();
|
||||
if let Ok(id) = pubsub
|
||||
match pubsub
|
||||
.inner()
|
||||
.prepare("eth_subscribe", [kind, params])
|
||||
.await
|
||||
{
|
||||
let rx = pubsub.inner().get_raw_subscription(id).await;
|
||||
return Ok(Ok(rx));
|
||||
Ok(id) => {
|
||||
let rx = pubsub.inner().get_raw_subscription(id).await;
|
||||
let successful_provider = aps.urls.remove(index);
|
||||
aps.urls.insert(0, successful_provider);
|
||||
return Ok(Ok(rx));
|
||||
}
|
||||
Err(rpc_error) => {
|
||||
verbose_print(
|
||||
&print_tx,
|
||||
&format!(
|
||||
"eth: got error from url provider {}: {}",
|
||||
url_provider.url, rpc_error
|
||||
),
|
||||
)
|
||||
.await;
|
||||
// this provider failed and needs to be reset
|
||||
url_provider.pubsub = None;
|
||||
}
|
||||
}
|
||||
// this provider failed and needs to be reset
|
||||
url_provider.pubsub = None;
|
||||
}
|
||||
// now we need a response channel
|
||||
|
||||
let (sender, mut response_receiver) = tokio::sync::mpsc::channel(1);
|
||||
response_channels.insert(km_id, sender);
|
||||
// we need to create our own unique sub id because in the remote provider node,
|
||||
// all subs will be identified under our process address.
|
||||
let remote_sub_id = rand::random();
|
||||
for node_provider in &mut aps.nodes {
|
||||
verbose_print(
|
||||
&print_tx,
|
||||
&format!(
|
||||
"eth: attempting to fulfill via {}",
|
||||
node_provider.kns_update.name
|
||||
),
|
||||
)
|
||||
.await;
|
||||
match forward_to_node_provider(
|
||||
&our,
|
||||
km_id,
|
||||
@ -232,7 +287,7 @@ async fn build_subscription(
|
||||
)
|
||||
.await;
|
||||
response_channels.remove(&km_id);
|
||||
return Ok(Err((node_provider.name.clone(), remote_sub_id)));
|
||||
return Ok(Err((node_provider.kns_update.name.clone(), remote_sub_id)));
|
||||
}
|
||||
EthResponse::Response { .. } => {
|
||||
// the response to a SubscribeLogs request must be an 'ok'
|
||||
|
@ -265,7 +265,7 @@ async fn listen_to_stream(
|
||||
match message {
|
||||
Ok(msg) => {
|
||||
// Handle different types of incoming WebSocket messages
|
||||
let (body, blob) = match msg {
|
||||
let (body, blob, should_exit) = match msg {
|
||||
TungsteniteMessage::Text(text) => (
|
||||
HttpClientRequest::WebSocketPush {
|
||||
channel_id,
|
||||
@ -275,6 +275,7 @@ async fn listen_to_stream(
|
||||
mime: Some("text/plain".into()),
|
||||
bytes: text.into_bytes(),
|
||||
}),
|
||||
false,
|
||||
),
|
||||
TungsteniteMessage::Binary(bytes) => (
|
||||
HttpClientRequest::WebSocketPush {
|
||||
@ -285,12 +286,13 @@ async fn listen_to_stream(
|
||||
mime: Some("application/octet-stream".into()),
|
||||
bytes,
|
||||
}),
|
||||
false,
|
||||
),
|
||||
TungsteniteMessage::Close(_) => {
|
||||
// remove the websocket from the map
|
||||
ws_streams.remove(&(target.process.clone(), channel_id));
|
||||
|
||||
(HttpClientRequest::WebSocketClose { channel_id }, None)
|
||||
(HttpClientRequest::WebSocketClose { channel_id }, None, true)
|
||||
}
|
||||
TungsteniteMessage::Ping(_) => (
|
||||
HttpClientRequest::WebSocketPush {
|
||||
@ -298,6 +300,7 @@ async fn listen_to_stream(
|
||||
message_type: WsMessageType::Ping,
|
||||
},
|
||||
None,
|
||||
false,
|
||||
),
|
||||
TungsteniteMessage::Pong(_) => (
|
||||
HttpClientRequest::WebSocketPush {
|
||||
@ -305,6 +308,7 @@ async fn listen_to_stream(
|
||||
message_type: WsMessageType::Pong,
|
||||
},
|
||||
None,
|
||||
false,
|
||||
),
|
||||
_ => {
|
||||
// should never get a TungsteniteMessage::Frame, ignore if we do
|
||||
@ -312,15 +316,21 @@ async fn listen_to_stream(
|
||||
}
|
||||
};
|
||||
|
||||
handle_ws_message(
|
||||
our.clone(),
|
||||
id,
|
||||
target.clone(),
|
||||
body,
|
||||
blob,
|
||||
send_to_loop.clone(),
|
||||
)
|
||||
.await;
|
||||
if ws_streams.contains_key(&(target.process.clone(), channel_id)) || should_exit {
|
||||
handle_ws_message(
|
||||
our.clone(),
|
||||
id,
|
||||
target.clone(),
|
||||
body,
|
||||
blob,
|
||||
send_to_loop.clone(),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
if should_exit {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
println!("WebSocket Client Error ({}): {:?}", channel_id, e);
|
||||
@ -633,7 +643,7 @@ async fn close_ws_connection(
|
||||
ws_streams: WebSocketStreams,
|
||||
_print_tx: PrintSender,
|
||||
) -> Result<HttpClientResponse, HttpClientError> {
|
||||
let Some(mut ws_sink) = ws_streams.get_mut(&(target.process.clone(), channel_id)) else {
|
||||
let Some((_, mut ws_sink)) = ws_streams.remove(&(target.process.clone(), channel_id)) else {
|
||||
return Err(HttpClientError::WsCloseFailed { channel_id });
|
||||
};
|
||||
|
||||
|
@ -1,10 +1,12 @@
|
||||
use crate::http::server_types::*;
|
||||
use crate::http::utils::*;
|
||||
use crate::{keygen, register};
|
||||
use crate::keygen;
|
||||
use anyhow::Result;
|
||||
use base64::{engine::general_purpose::STANDARD as base64_standard, Engine};
|
||||
use dashmap::DashMap;
|
||||
use futures::{SinkExt, StreamExt};
|
||||
use http::uri::Authority;
|
||||
use lib::types::core::*;
|
||||
use route_recognizer::Router;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::collections::HashMap;
|
||||
@ -15,8 +17,6 @@ use warp::http::{header::HeaderValue, StatusCode};
|
||||
use warp::ws::{WebSocket, Ws};
|
||||
use warp::{Filter, Reply};
|
||||
|
||||
use lib::types::core::*;
|
||||
|
||||
#[cfg(not(feature = "simulation-mode"))]
|
||||
const HTTP_SELF_IMPOSED_TIMEOUT: u64 = 15;
|
||||
#[cfg(feature = "simulation-mode")]
|
||||
@ -331,7 +331,7 @@ async fn login_handler(
|
||||
|
||||
match keygen::decode_keyfile(&encoded_keyfile, &info.password_hash) {
|
||||
Ok(keyfile) => {
|
||||
let token = match register::generate_jwt(&keyfile.jwt_secret_bytes, our.as_ref()) {
|
||||
let token = match keygen::generate_jwt(&keyfile.jwt_secret_bytes, our.as_ref()) {
|
||||
Some(token) => token,
|
||||
None => {
|
||||
return Ok(warp::reply::with_status(
|
||||
@ -343,14 +343,14 @@ async fn login_handler(
|
||||
};
|
||||
|
||||
let mut response = warp::reply::with_status(
|
||||
warp::reply::json(&base64::encode(encoded_keyfile.to_vec())),
|
||||
warp::reply::json(&base64_standard.encode(encoded_keyfile.to_vec())),
|
||||
StatusCode::OK,
|
||||
)
|
||||
.into_response();
|
||||
|
||||
match HeaderValue::from_str(&format!("kinode-auth_{}={};", our.as_ref(), &token)) {
|
||||
Ok(v) => {
|
||||
response.headers_mut().append(http::header::SET_COOKIE, v);
|
||||
response.headers_mut().append("set-cookie", v);
|
||||
Ok(response)
|
||||
}
|
||||
Err(_) => Ok(warp::reply::with_status(
|
||||
@ -372,7 +372,7 @@ async fn ws_handler(
|
||||
ws_connection: Ws,
|
||||
socket_addr: Option<SocketAddr>,
|
||||
path: warp::path::FullPath,
|
||||
host: Option<Authority>,
|
||||
host: Option<warp::host::Authority>,
|
||||
headers: warp::http::HeaderMap,
|
||||
our: Arc<String>,
|
||||
jwt_secret_bytes: Arc<Vec<u8>>,
|
||||
@ -468,7 +468,7 @@ async fn ws_handler(
|
||||
async fn http_handler(
|
||||
method: warp::http::Method,
|
||||
socket_addr: Option<SocketAddr>,
|
||||
host: Option<Authority>,
|
||||
host: Option<warp::host::Authority>,
|
||||
path: warp::path::FullPath,
|
||||
query_params: HashMap<String, String>,
|
||||
headers: warp::http::HeaderMap,
|
||||
@ -529,7 +529,7 @@ async fn http_handler(
|
||||
"Location",
|
||||
format!(
|
||||
"http://{}/login",
|
||||
host.unwrap_or(Authority::from_static("localhost"))
|
||||
host.unwrap_or(warp::host::Authority::from_static("localhost"))
|
||||
),
|
||||
)
|
||||
.body(vec![])
|
||||
@ -619,7 +619,7 @@ async fn http_handler(
|
||||
method: method.to_string(),
|
||||
url: format!(
|
||||
"http://{}{}",
|
||||
host.unwrap_or(Authority::from_static("localhost")),
|
||||
host.unwrap_or(warp::host::Authority::from_static("localhost")),
|
||||
original_path
|
||||
),
|
||||
bound_path: bound_path.path.clone(),
|
||||
@ -735,7 +735,7 @@ async fn handle_rpc_message(
|
||||
|
||||
let blob: Option<LazyLoadBlob> = match rpc_message.data {
|
||||
None => None,
|
||||
Some(b64_bytes) => match base64::decode(b64_bytes) {
|
||||
Some(b64_bytes) => match base64_standard.decode(b64_bytes) {
|
||||
Ok(bytes) => Some(LazyLoadBlob {
|
||||
mime: rpc_message.mime,
|
||||
bytes,
|
||||
@ -1069,7 +1069,7 @@ async fn handle_app_message(
|
||||
if path == "/rpc:distro:sys/message" {
|
||||
let blob = km.lazy_load_blob.map(|p| LazyLoadBlob {
|
||||
mime: p.mime,
|
||||
bytes: base64::encode(p.bytes).into_bytes(),
|
||||
bytes: base64_standard.encode(p.bytes).into_bytes(),
|
||||
});
|
||||
|
||||
let mut default_headers = HashMap::new();
|
||||
@ -1143,8 +1143,14 @@ async fn handle_app_message(
|
||||
.send(Printout {
|
||||
verbosity: 2,
|
||||
content: format!(
|
||||
"binding path {path} for {}, authenticated={authenticated}, local={local_only}, cached={cache}",
|
||||
km.source.process
|
||||
"http: binding {path}, {}, {}, {}",
|
||||
if authenticated {
|
||||
"authenticated"
|
||||
} else {
|
||||
"unauthenticated"
|
||||
},
|
||||
if local_only { "local only" } else { "open" },
|
||||
if cache { "cached" } else { "dynamic" },
|
||||
),
|
||||
})
|
||||
.await;
|
||||
|
@ -607,7 +607,7 @@ pub async fn make_process_loop(
|
||||
};
|
||||
|
||||
// the process will run until it returns from init() or crashes
|
||||
let is_error = match bindings
|
||||
match bindings
|
||||
.call_init(&mut store, &metadata.our.to_string())
|
||||
.await
|
||||
{
|
||||
@ -618,7 +618,6 @@ pub async fn make_process_loop(
|
||||
content: format!("process {} returned without error", metadata.our.process),
|
||||
})
|
||||
.await;
|
||||
false
|
||||
}
|
||||
Err(_) => {
|
||||
let stderr = wasi_stderr.contents().into();
|
||||
@ -632,7 +631,6 @@ pub async fn make_process_loop(
|
||||
),
|
||||
})
|
||||
.await;
|
||||
true
|
||||
}
|
||||
};
|
||||
|
||||
@ -715,73 +713,61 @@ pub async fn make_process_loop(
|
||||
lazy_load_blob: None,
|
||||
})
|
||||
.await?;
|
||||
if is_error {
|
||||
let _ = send_to_terminal
|
||||
.send(t::Printout {
|
||||
verbosity: 0,
|
||||
content: format!(
|
||||
"skipping OnExit::Restart for process {} due to crash",
|
||||
metadata.our.process
|
||||
),
|
||||
})
|
||||
.await;
|
||||
} else {
|
||||
let _ = send_to_terminal
|
||||
.send(t::Printout {
|
||||
verbosity: 1,
|
||||
content: format!(
|
||||
"firing OnExit::Restart for process {}",
|
||||
metadata.our.process
|
||||
),
|
||||
})
|
||||
.await;
|
||||
send_to_loop
|
||||
.send(t::KernelMessage {
|
||||
id: rand::random(),
|
||||
source: our_kernel.clone(),
|
||||
target: our_kernel.clone(),
|
||||
rsvp: None,
|
||||
message: t::Message::Request(t::Request {
|
||||
inherit: false,
|
||||
expects_response: None,
|
||||
body: serde_json::to_vec(&t::KernelCommand::InitializeProcess {
|
||||
id: metadata.our.process.clone(),
|
||||
wasm_bytes_handle: metadata.wasm_bytes_handle,
|
||||
wit_version: Some(metadata.wit_version),
|
||||
on_exit: metadata.on_exit,
|
||||
initial_capabilities,
|
||||
public: metadata.public,
|
||||
})
|
||||
.unwrap(),
|
||||
metadata: None,
|
||||
capabilities: vec![],
|
||||
}),
|
||||
lazy_load_blob: Some(t::LazyLoadBlob {
|
||||
mime: None,
|
||||
bytes: wasm_bytes,
|
||||
}),
|
||||
})
|
||||
.await?;
|
||||
send_to_loop
|
||||
.send(t::KernelMessage {
|
||||
id: rand::random(),
|
||||
source: our_kernel.clone(),
|
||||
target: our_kernel.clone(),
|
||||
rsvp: None,
|
||||
message: t::Message::Request(t::Request {
|
||||
inherit: false,
|
||||
expects_response: None,
|
||||
body: serde_json::to_vec(&t::KernelCommand::RunProcess(
|
||||
metadata.our.process.clone(),
|
||||
))
|
||||
.unwrap(),
|
||||
metadata: None,
|
||||
capabilities: vec![],
|
||||
}),
|
||||
lazy_load_blob: None,
|
||||
})
|
||||
.await?;
|
||||
}
|
||||
let _ = send_to_terminal
|
||||
.send(t::Printout {
|
||||
verbosity: 1,
|
||||
content: format!(
|
||||
"firing OnExit::Restart for process {}",
|
||||
metadata.our.process
|
||||
),
|
||||
})
|
||||
.await;
|
||||
send_to_loop
|
||||
.send(t::KernelMessage {
|
||||
id: rand::random(),
|
||||
source: our_kernel.clone(),
|
||||
target: our_kernel.clone(),
|
||||
rsvp: None,
|
||||
message: t::Message::Request(t::Request {
|
||||
inherit: false,
|
||||
expects_response: None,
|
||||
body: serde_json::to_vec(&t::KernelCommand::InitializeProcess {
|
||||
id: metadata.our.process.clone(),
|
||||
wasm_bytes_handle: metadata.wasm_bytes_handle,
|
||||
wit_version: Some(metadata.wit_version),
|
||||
on_exit: metadata.on_exit,
|
||||
initial_capabilities,
|
||||
public: metadata.public,
|
||||
})
|
||||
.unwrap(),
|
||||
metadata: None,
|
||||
capabilities: vec![],
|
||||
}),
|
||||
lazy_load_blob: Some(t::LazyLoadBlob {
|
||||
mime: None,
|
||||
bytes: wasm_bytes,
|
||||
}),
|
||||
})
|
||||
.await?;
|
||||
send_to_loop
|
||||
.send(t::KernelMessage {
|
||||
id: rand::random(),
|
||||
source: our_kernel.clone(),
|
||||
target: our_kernel.clone(),
|
||||
rsvp: None,
|
||||
message: t::Message::Request(t::Request {
|
||||
inherit: false,
|
||||
expects_response: None,
|
||||
body: serde_json::to_vec(&t::KernelCommand::RunProcess(
|
||||
metadata.our.process.clone(),
|
||||
))
|
||||
.unwrap(),
|
||||
metadata: None,
|
||||
capabilities: vec![],
|
||||
}),
|
||||
lazy_load_blob: None,
|
||||
})
|
||||
.await?;
|
||||
}
|
||||
// if requests, fire them
|
||||
// even in death, a process can only message processes it has capabilities for
|
||||
|
@ -3,13 +3,17 @@ use aes_gcm::{
|
||||
Aes256Gcm, Key,
|
||||
};
|
||||
use alloy_primitives::keccak256;
|
||||
use anyhow::Result;
|
||||
use digest::generic_array::GenericArray;
|
||||
use hmac::Hmac;
|
||||
use jwt::SignWithKey;
|
||||
use lib::types::core::Keyfile;
|
||||
use ring::pbkdf2;
|
||||
use ring::pkcs8::Document;
|
||||
use ring::rand::SystemRandom;
|
||||
use ring::signature::{self, KeyPair};
|
||||
use ring::{digest as ring_digest, rand::SecureRandom};
|
||||
use sha2::Sha256;
|
||||
use std::num::NonZeroU32;
|
||||
|
||||
type DiskKey = [u8; CREDENTIAL_LEN];
|
||||
@ -108,6 +112,23 @@ pub fn decode_keyfile(keyfile: &[u8], password: &str) -> Result<Keyfile, &'stati
|
||||
})
|
||||
}
|
||||
|
||||
pub fn generate_jwt(jwt_secret_bytes: &[u8], username: &str) -> Option<String> {
|
||||
let jwt_secret: Hmac<Sha256> = match Hmac::new_from_slice(jwt_secret_bytes) {
|
||||
Ok(secret) => secret,
|
||||
Err(_) => return None,
|
||||
};
|
||||
|
||||
let claims = crate::http::server_types::JwtClaims {
|
||||
username: username.to_string(),
|
||||
expiration: 0,
|
||||
};
|
||||
|
||||
match claims.sign_with_key(&jwt_secret) {
|
||||
Ok(token) => Some(token),
|
||||
Err(_) => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_username_and_routers(keyfile: &[u8]) -> Result<(String, Vec<String>), &'static str> {
|
||||
let (username, routers, _salt, _key_enc, _jwt_enc) =
|
||||
bincode::deserialize::<(String, Vec<String>, Vec<u8>, Vec<u8>, Vec<u8>)>(keyfile)
|
||||
|
@ -1,14 +1,13 @@
|
||||
#![feature(async_closure)]
|
||||
#![feature(btree_extract_if)]
|
||||
|
||||
use anyhow::Result;
|
||||
use clap::{arg, value_parser, Command};
|
||||
use lib::types::core::*;
|
||||
#[cfg(feature = "simulation-mode")]
|
||||
//#[cfg(feature = "simulation-mode")]
|
||||
use ring::{rand::SystemRandom, signature, signature::KeyPair};
|
||||
use std::env;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::{mpsc, oneshot};
|
||||
use tokio::{fs, time::timeout};
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
mod eth;
|
||||
mod http;
|
||||
@ -34,152 +33,53 @@ const VFS_CHANNEL_CAPACITY: usize = 1_000;
|
||||
const CAP_CHANNEL_CAPACITY: usize = 1_000;
|
||||
const KV_CHANNEL_CAPACITY: usize = 1_000;
|
||||
const SQLITE_CHANNEL_CAPACITY: usize = 1_000;
|
||||
|
||||
const VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||
|
||||
/// default routers as a eth-provider fallback
|
||||
const DEFAULT_PROVIDERS_MAINNET: &str = include_str!("eth/default_providers_mainnet.json");
|
||||
|
||||
async fn serve_register_fe(
|
||||
home_directory_path: &str,
|
||||
our_ip: String,
|
||||
ws_networking: (tokio::net::TcpListener, bool),
|
||||
http_server_port: u16,
|
||||
maybe_rpc: Option<String>,
|
||||
) -> (Identity, Vec<u8>, Keyfile) {
|
||||
// check if we have keys saved on disk, encrypted
|
||||
// if so, prompt user for "password" to decrypt with
|
||||
|
||||
// once password is received, use to decrypt local keys file,
|
||||
// and pass the keys into boot process as is done in registration.
|
||||
|
||||
// NOTE: when we log in, we MUST check the PKI to make sure our
|
||||
// information matches what we think it should be. this includes
|
||||
// username, networking key, and routing info.
|
||||
// if any do not match, we should prompt user to create a "transaction"
|
||||
// that updates their PKI info on-chain.
|
||||
let (kill_tx, kill_rx) = oneshot::channel::<bool>();
|
||||
|
||||
let disk_keyfile: Option<Vec<u8>> = fs::read(format!("{}/.keys", home_directory_path))
|
||||
.await
|
||||
.ok();
|
||||
|
||||
let (tx, mut rx) = mpsc::channel::<(Identity, Keyfile, Vec<u8>)>(1);
|
||||
let (our, decoded_keyfile, encoded_keyfile) = tokio::select! {
|
||||
_ = register::register(
|
||||
tx,
|
||||
kill_rx,
|
||||
our_ip,
|
||||
ws_networking,
|
||||
http_server_port,
|
||||
disk_keyfile,
|
||||
maybe_rpc) => {
|
||||
panic!("registration failed")
|
||||
}
|
||||
Some((our, decoded_keyfile, encoded_keyfile)) = rx.recv() => {
|
||||
(our, decoded_keyfile, encoded_keyfile)
|
||||
}
|
||||
};
|
||||
|
||||
fs::write(
|
||||
format!("{}/.keys", home_directory_path),
|
||||
encoded_keyfile.clone(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let _ = kill_tx.send(true);
|
||||
|
||||
(our, encoded_keyfile, decoded_keyfile)
|
||||
}
|
||||
const DEFAULT_ETH_PROVIDERS: &str = include_str!("eth/default_providers_mainnet.json");
|
||||
#[cfg(not(feature = "simulation-mode"))]
|
||||
const CHAIN_ID: u64 = 10;
|
||||
#[cfg(feature = "simulation-mode")]
|
||||
const CHAIN_ID: u64 = 31337;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let app = Command::new("kinode")
|
||||
.version(VERSION)
|
||||
.author("Kinode DAO: https://github.com/kinode-dao")
|
||||
.about("A General Purpose Sovereign Cloud Computing Platform")
|
||||
.arg(arg!([home] "Path to home directory").required(true))
|
||||
.arg(
|
||||
arg!(--port <PORT> "Port to bind [default: first unbound at or above 8080]")
|
||||
.value_parser(value_parser!(u16)),
|
||||
)
|
||||
.arg(
|
||||
arg!(--"ws-port" <PORT> "Kinode internal WebSockets protocol port [default: first unbound at or above 9000]")
|
||||
.alias("network-router-port")
|
||||
.value_parser(value_parser!(u16)),
|
||||
)
|
||||
.arg(
|
||||
arg!(--verbosity <VERBOSITY> "Verbosity level: higher is more verbose")
|
||||
.default_value("0")
|
||||
.value_parser(value_parser!(u8)),
|
||||
)
|
||||
.arg(
|
||||
arg!(--"reveal-ip" "If set to false, as an indirect node, always use routers to connect to other nodes.")
|
||||
.default_value("true")
|
||||
.value_parser(value_parser!(bool)),
|
||||
)
|
||||
.arg(arg!(--rpc <RPC> "Add a WebSockets RPC URL at boot"));
|
||||
|
||||
#[cfg(feature = "simulation-mode")]
|
||||
let app = app
|
||||
.arg(arg!(--password <PASSWORD> "Networking password"))
|
||||
.arg(arg!(--"fake-node-name" <NAME> "Name of fake node to boot"))
|
||||
.arg(arg!(--"networking-pk" <NETPK> "Fake networking private key"))
|
||||
.arg(
|
||||
arg!(--detached <IS_DETACHED> "Run in detached mode (don't accept input)")
|
||||
.action(clap::ArgAction::SetTrue),
|
||||
);
|
||||
// add arg for fakechain bootup w/ kit?
|
||||
let fakenode = cfg!(feature = "simulation-mode");
|
||||
let app = build_command();
|
||||
|
||||
let matches = app.get_matches();
|
||||
|
||||
let home_directory_path = matches.get_one::<String>("home").unwrap();
|
||||
|
||||
let http_port = matches.get_one::<u16>("port");
|
||||
let home_directory_path = matches
|
||||
.get_one::<String>("home")
|
||||
.expect("home directory required");
|
||||
create_home_directory(&home_directory_path).await;
|
||||
let http_server_port = set_http_server_port(matches.get_one::<u16>("port")).await;
|
||||
let ws_networking_port = matches.get_one::<u16>("ws-port");
|
||||
let verbose_mode = *matches
|
||||
.get_one::<u8>("verbosity")
|
||||
.expect("verbosity required");
|
||||
let rpc = matches.get_one::<String>("rpc");
|
||||
|
||||
// if we are in sim-mode, detached determines whether terminal is interactive
|
||||
#[cfg(not(feature = "simulation-mode"))]
|
||||
let is_detached = false;
|
||||
|
||||
#[cfg(feature = "simulation-mode")]
|
||||
let (password, fake_node_name, is_detached, net_pk) = (
|
||||
let (password, fake_node_name, is_detached) = (
|
||||
matches.get_one::<String>("password"),
|
||||
matches.get_one::<String>("fake-node-name"),
|
||||
*matches.get_one::<bool>("detached").unwrap(),
|
||||
matches.get_one::<String>("networking-pk"),
|
||||
);
|
||||
|
||||
let verbose_mode = *matches.get_one::<u8>("verbosity").unwrap();
|
||||
|
||||
// check .testnet file for true/false in order to enforce testnet mode on subsequent boots of this node
|
||||
match fs::read(format!("{}/.testnet", home_directory_path)).await {
|
||||
Ok(contents) => {
|
||||
if contents == b"true" {
|
||||
println!("\x1b[38;5;196mfatal: this is a deprecated testnet node, either boot a fakenode or a real one. exiting.\x1b[0m");
|
||||
return;
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
if let Err(e) = fs::create_dir_all(home_directory_path).await {
|
||||
panic!("failed to create home directory: {:?}", e);
|
||||
}
|
||||
println!("home at {}\r", home_directory_path);
|
||||
|
||||
// default eth providers/routers
|
||||
let mut eth_provider_config: lib::eth::SavedConfigs =
|
||||
match fs::read_to_string(format!("{}/.eth_providers", home_directory_path)).await {
|
||||
match tokio::fs::read_to_string(format!("{}/.eth_providers", home_directory_path)).await {
|
||||
Ok(contents) => {
|
||||
println!("loaded saved eth providers\r");
|
||||
serde_json::from_str(&contents).unwrap()
|
||||
}
|
||||
Err(_) => serde_json::from_str(DEFAULT_PROVIDERS_MAINNET).unwrap(),
|
||||
Err(_) => serde_json::from_str(DEFAULT_ETH_PROVIDERS).unwrap(),
|
||||
};
|
||||
if let Some(rpc) = matches.get_one::<String>("rpc") {
|
||||
eth_provider_config.push(lib::eth::ProviderConfig {
|
||||
chain_id: if fakenode { 31337 } else { 10 },
|
||||
chain_id: CHAIN_ID,
|
||||
trusted: true,
|
||||
provider: lib::eth::NodeOrRpcUrl::RpcUrl(rpc.to_string()),
|
||||
});
|
||||
@ -228,171 +128,25 @@ async fn main() {
|
||||
let (print_sender, print_receiver): (PrintSender, PrintReceiver) =
|
||||
mpsc::channel(TERMINAL_CHANNEL_CAPACITY);
|
||||
|
||||
println!("finding public IP address...");
|
||||
let our_ip: std::net::Ipv4Addr = {
|
||||
if let Ok(Some(ip)) = timeout(std::time::Duration::from_secs(5), public_ip::addr_v4()).await
|
||||
{
|
||||
ip
|
||||
} else {
|
||||
println!("failed to find public IPv4 address: booting as a routed node");
|
||||
std::net::Ipv4Addr::LOCALHOST
|
||||
}
|
||||
};
|
||||
|
||||
let http_server_port = if let Some(port) = http_port {
|
||||
match http::utils::find_open_port(*port, port + 1).await {
|
||||
Some(bound) => bound.local_addr().unwrap().port(),
|
||||
None => {
|
||||
println!(
|
||||
"error: couldn't bind {}; first available port found was {}. \
|
||||
Set an available port with `--port` and try again.",
|
||||
port,
|
||||
http::utils::find_open_port(*port, port + 1000)
|
||||
.await
|
||||
.expect("no ports found in range")
|
||||
.local_addr()
|
||||
.unwrap()
|
||||
.port(),
|
||||
);
|
||||
panic!();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
match http::utils::find_open_port(8080, 8999).await {
|
||||
Some(bound) => bound.local_addr().unwrap().port(),
|
||||
None => {
|
||||
println!(
|
||||
"error: couldn't bind any ports between 8080 and 8999. \
|
||||
Set an available port with `--port` and try again."
|
||||
);
|
||||
panic!();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// if the --ws-port flag is used, bind to that port right away.
|
||||
// if the flag is not used, find the first available port between 9000 and 65535.
|
||||
// NOTE: if the node has a different port specified in its onchain (direct) id,
|
||||
// booting will fail if the flag was used to select a different port.
|
||||
// if the flag was not used, the bound port will be dropped in favor of the onchain port.
|
||||
|
||||
#[cfg(not(feature = "simulation-mode"))]
|
||||
let (ws_tcp_handle, flag_used) = if let Some(port) = ws_networking_port {
|
||||
(
|
||||
http::utils::find_open_port(*port, port + 1)
|
||||
.await
|
||||
.expect("ws-port selected with flag could not be bound"),
|
||||
true,
|
||||
)
|
||||
} else {
|
||||
(
|
||||
http::utils::find_open_port(9000, 65535)
|
||||
.await
|
||||
.expect("no ports found in range 9000-65535 for websocket server"),
|
||||
false,
|
||||
)
|
||||
};
|
||||
|
||||
println!(
|
||||
"login or register at http://localhost:{}\r",
|
||||
http_server_port
|
||||
);
|
||||
|
||||
#[cfg(not(feature = "simulation-mode"))]
|
||||
let (our, encoded_keyfile, decoded_keyfile) = serve_register_fe(
|
||||
home_directory_path,
|
||||
our_ip.to_string(),
|
||||
(ws_tcp_handle, flag_used),
|
||||
http_server_port,
|
||||
matches.get_one::<String>("rpc").cloned(),
|
||||
)
|
||||
.await;
|
||||
let our_ip = find_public_ip().await;
|
||||
let (wc_tcp_handle, flag_used) = setup_ws_networking(ws_networking_port.cloned()).await;
|
||||
|
||||
#[cfg(feature = "simulation-mode")]
|
||||
let (our, encoded_keyfile, decoded_keyfile) = match fake_node_name {
|
||||
None => {
|
||||
match password {
|
||||
None => {
|
||||
panic!("Fake node must be booted with either a --fake-node-name, --password, or both.");
|
||||
}
|
||||
Some(password) => {
|
||||
match fs::read(format!("{}/.keys", home_directory_path)).await {
|
||||
Err(e) => panic!("could not read keyfile: {}", e),
|
||||
Ok(keyfile) => {
|
||||
match keygen::decode_keyfile(&keyfile, &password) {
|
||||
Err(e) => panic!("could not decode keyfile: {}", e),
|
||||
Ok(decoded_keyfile) => {
|
||||
let our = Identity {
|
||||
name: decoded_keyfile.username.clone(),
|
||||
networking_key: format!(
|
||||
"0x{}",
|
||||
hex::encode(
|
||||
decoded_keyfile
|
||||
.networking_keypair
|
||||
.public_key()
|
||||
.as_ref()
|
||||
)
|
||||
),
|
||||
ws_routing: None, // TODO
|
||||
allowed_routers: decoded_keyfile.routers.clone(),
|
||||
};
|
||||
(our, keyfile, decoded_keyfile)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(name) => {
|
||||
let password_hash = match password {
|
||||
None => "secret".to_string(),
|
||||
Some(password) => password.to_string(),
|
||||
};
|
||||
|
||||
let seed = SystemRandom::new();
|
||||
let mut jwt_secret = [0u8, 32];
|
||||
ring::rand::SecureRandom::fill(&seed, &mut jwt_secret).unwrap();
|
||||
|
||||
let net_pk_bytes = hex::decode(net_pk.unwrap()).unwrap();
|
||||
|
||||
let networking_keypair = signature::Ed25519KeyPair::from_pkcs8(
|
||||
&net_pk_bytes,
|
||||
).expect("failed to parse networking private key");
|
||||
|
||||
let our = Identity {
|
||||
name: name.clone(),
|
||||
networking_key: format!("0x{}", hex::encode(networking_keypair.public_key().as_ref())),
|
||||
ws_routing: None,
|
||||
allowed_routers: vec![],
|
||||
};
|
||||
let decoded_keyfile = Keyfile {
|
||||
username: name.clone(),
|
||||
routers: vec![],
|
||||
networking_keypair,
|
||||
jwt_secret_bytes: jwt_secret.to_vec(),
|
||||
file_key: keygen::generate_file_key(),
|
||||
};
|
||||
|
||||
let encoded_keyfile = keygen::encode_keyfile(
|
||||
password_hash,
|
||||
name.clone(),
|
||||
decoded_keyfile.routers.clone(),
|
||||
&net_pk_bytes,
|
||||
&decoded_keyfile.jwt_secret_bytes,
|
||||
&decoded_keyfile.file_key,
|
||||
);
|
||||
|
||||
fs::write(
|
||||
format!("{}/.keys", home_directory_path),
|
||||
encoded_keyfile.clone(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
(our, encoded_keyfile, decoded_keyfile)
|
||||
}
|
||||
};
|
||||
let (our, encoded_keyfile, decoded_keyfile) = simulate_node(
|
||||
fake_node_name.cloned(),
|
||||
password.cloned(),
|
||||
home_directory_path,
|
||||
)
|
||||
.await;
|
||||
#[cfg(not(feature = "simulation-mode"))]
|
||||
let (our, encoded_keyfile, decoded_keyfile) = serve_register_fe(
|
||||
&home_directory_path,
|
||||
our_ip.to_string(),
|
||||
(wc_tcp_handle, flag_used),
|
||||
http_server_port,
|
||||
rpc.cloned(),
|
||||
)
|
||||
.await;
|
||||
|
||||
// the boolean flag determines whether the runtime module is *public* or not,
|
||||
// where public means that any process can always message it.
|
||||
@ -496,7 +250,7 @@ async fn main() {
|
||||
})
|
||||
.collect(),
|
||||
));
|
||||
tasks.spawn(net::networking(
|
||||
tasks.spawn(net::ws::networking(
|
||||
our.clone(),
|
||||
our_ip.to_string(),
|
||||
networking_keypair_arc.clone(),
|
||||
@ -571,10 +325,11 @@ async fn main() {
|
||||
caps_oracle_sender.clone(),
|
||||
home_directory_path.clone(),
|
||||
));
|
||||
|
||||
// if a runtime task exits, try to recover it,
|
||||
// unless it was terminal signaling a quit
|
||||
// or a SIG* was intercepted
|
||||
let quit_msg: String = tokio::select! {
|
||||
let mut quit_msg: String = tokio::select! {
|
||||
Some(Ok(res)) = tasks.join_next() => {
|
||||
format!(
|
||||
"uh oh, a kernel process crashed -- this should never happen: {:?}",
|
||||
@ -600,7 +355,7 @@ async fn main() {
|
||||
};
|
||||
|
||||
// gracefully abort all running processes in kernel
|
||||
let _ = kernel_message_sender
|
||||
if let Err(_) = kernel_message_sender
|
||||
.send(KernelMessage {
|
||||
id: rand::random(),
|
||||
source: Address {
|
||||
@ -621,18 +376,280 @@ async fn main() {
|
||||
}),
|
||||
lazy_load_blob: None,
|
||||
})
|
||||
.await;
|
||||
.await
|
||||
{
|
||||
quit_msg = "failed to gracefully shut down kernel".into();
|
||||
}
|
||||
|
||||
// abort all remaining tasks
|
||||
tasks.shutdown().await;
|
||||
let stdout = std::io::stdout();
|
||||
let mut stdout = stdout.lock();
|
||||
let _ = crossterm::execute!(
|
||||
crossterm::execute!(
|
||||
stdout,
|
||||
crossterm::event::DisableBracketedPaste,
|
||||
crossterm::terminal::SetTitle(""),
|
||||
crossterm::style::SetForegroundColor(crossterm::style::Color::Red),
|
||||
crossterm::style::Print(format!("\r\n{quit_msg}\r\n")),
|
||||
crossterm::style::ResetColor,
|
||||
);
|
||||
)
|
||||
.expect("failed to clean up terminal visual state! your terminal window might be funky now");
|
||||
}
|
||||
|
||||
async fn set_http_server_port(set_port: Option<&u16>) -> u16 {
|
||||
if let Some(port) = set_port {
|
||||
match http::utils::find_open_port(*port, port + 1).await {
|
||||
Some(bound) => bound.local_addr().unwrap().port(),
|
||||
None => {
|
||||
println!(
|
||||
"error: couldn't bind {}; first available port found was {}. \
|
||||
Set an available port with `--port` and try again.",
|
||||
port,
|
||||
http::utils::find_open_port(*port, port + 1000)
|
||||
.await
|
||||
.expect("no ports found in range")
|
||||
.local_addr()
|
||||
.unwrap()
|
||||
.port(),
|
||||
);
|
||||
panic!();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
match http::utils::find_open_port(8080, 8999).await {
|
||||
Some(bound) => bound.local_addr().unwrap().port(),
|
||||
None => {
|
||||
println!(
|
||||
"error: couldn't bind any ports between 8080 and 8999. \
|
||||
Set an available port with `--port` and try again."
|
||||
);
|
||||
panic!();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets up WebSocket networking by finding an open port and creating a TCP listener.
|
||||
/// If a specific port is provided, it attempts to bind to it directly.
|
||||
/// If no port is provided, it searches for the first available port between 9000 and 65535.
|
||||
/// Returns a tuple containing the TcpListener and a boolean indicating if a specific port was used.
|
||||
async fn setup_ws_networking(ws_networking_port: Option<u16>) -> (tokio::net::TcpListener, bool) {
|
||||
match ws_networking_port {
|
||||
Some(port) => {
|
||||
let listener = http::utils::find_open_port(port, port + 1)
|
||||
.await
|
||||
.expect("ws-port selected with flag could not be bound");
|
||||
(listener, true)
|
||||
}
|
||||
None => {
|
||||
let listener = http::utils::find_open_port(9000, 65535)
|
||||
.await
|
||||
.expect("no ports found in range 9000-65535 for websocket server");
|
||||
(listener, false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// TODO: writeup.
|
||||
pub async fn simulate_node(
|
||||
fake_node_name: Option<String>,
|
||||
password: Option<String>,
|
||||
home_directory_path: &str,
|
||||
) -> (Identity, Vec<u8>, Keyfile) {
|
||||
match fake_node_name {
|
||||
None => {
|
||||
match password {
|
||||
None => {
|
||||
panic!("Fake node must be booted with either a --fake-node-name, --password, or both.");
|
||||
}
|
||||
Some(password) => {
|
||||
let keyfile = tokio::fs::read(format!("{}/.keys", home_directory_path))
|
||||
.await
|
||||
.expect("could not read keyfile");
|
||||
let decoded = keygen::decode_keyfile(&keyfile, &password)
|
||||
.expect("could not decode keyfile");
|
||||
let identity = Identity {
|
||||
name: decoded.username.clone(),
|
||||
networking_key: format!(
|
||||
"0x{}",
|
||||
hex::encode(decoded.networking_keypair.public_key().as_ref())
|
||||
),
|
||||
ws_routing: None, // TODO: Define WebSocket routing logic
|
||||
allowed_routers: decoded.routers.clone(),
|
||||
};
|
||||
(identity, keyfile, decoded)
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(name) => {
|
||||
let password_hash = password.unwrap_or_else(|| "secret".to_string());
|
||||
let (pubkey, networking_keypair) = keygen::generate_networking_key();
|
||||
let seed = SystemRandom::new();
|
||||
let mut jwt_secret = [0u8; 32];
|
||||
ring::rand::SecureRandom::fill(&seed, &mut jwt_secret).unwrap();
|
||||
|
||||
let identity = Identity {
|
||||
name: name.clone(),
|
||||
networking_key: pubkey,
|
||||
ws_routing: None,
|
||||
allowed_routers: vec![],
|
||||
};
|
||||
|
||||
let decoded_keyfile = Keyfile {
|
||||
username: name.clone(),
|
||||
routers: vec![],
|
||||
networking_keypair: signature::Ed25519KeyPair::from_pkcs8(
|
||||
networking_keypair.as_ref(),
|
||||
)
|
||||
.unwrap(),
|
||||
jwt_secret_bytes: jwt_secret.to_vec(),
|
||||
file_key: keygen::generate_file_key(),
|
||||
};
|
||||
|
||||
let encoded_keyfile = keygen::encode_keyfile(
|
||||
password_hash,
|
||||
name.clone(),
|
||||
decoded_keyfile.routers.clone(),
|
||||
networking_keypair.as_ref(),
|
||||
&decoded_keyfile.jwt_secret_bytes,
|
||||
&decoded_keyfile.file_key,
|
||||
);
|
||||
|
||||
tokio::fs::write(
|
||||
format!("{}/.keys", home_directory_path),
|
||||
encoded_keyfile.clone(),
|
||||
)
|
||||
.await
|
||||
.expect("Failed to write keyfile");
|
||||
|
||||
(identity, encoded_keyfile, decoded_keyfile)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn create_home_directory(home_directory_path: &str) {
|
||||
if let Err(e) = tokio::fs::create_dir_all(home_directory_path).await {
|
||||
panic!("failed to create home directory: {:?}", e);
|
||||
}
|
||||
println!("home at {}\r", home_directory_path);
|
||||
}
|
||||
|
||||
/// build the command line interface for kinode
|
||||
///
|
||||
fn build_command() -> Command {
|
||||
let app = Command::new("kinode")
|
||||
.version(VERSION)
|
||||
.author("Kinode DAO: https://github.com/kinode-dao")
|
||||
.about("A General Purpose Sovereign Cloud Computing Platform")
|
||||
.arg(arg!([home] "Path to home directory").required(true))
|
||||
.arg(
|
||||
arg!(--port <PORT> "Port to bind [default: first unbound at or above 8080]")
|
||||
.value_parser(value_parser!(u16)),
|
||||
)
|
||||
.arg(
|
||||
arg!(--"ws-port" <PORT> "Kinode internal WebSockets protocol port [default: first unbound at or above 9000]")
|
||||
.alias("network-router-port")
|
||||
.value_parser(value_parser!(u16)),
|
||||
)
|
||||
.arg(
|
||||
arg!(--verbosity <VERBOSITY> "Verbosity level: higher is more verbose")
|
||||
.default_value("0")
|
||||
.value_parser(value_parser!(u8)),
|
||||
)
|
||||
.arg(
|
||||
arg!(--"reveal-ip" "If set to false, as an indirect node, always use routers to connect to other nodes.")
|
||||
.default_value("true")
|
||||
.value_parser(value_parser!(bool)),
|
||||
)
|
||||
.arg(arg!(--rpc <RPC> "Add a WebSockets RPC URL at boot"));
|
||||
|
||||
#[cfg(feature = "simulation-mode")]
|
||||
let app = app
|
||||
.arg(arg!(--password <PASSWORD> "Networking password"))
|
||||
.arg(arg!(--"fake-node-name" <NAME> "Name of fake node to boot"))
|
||||
.arg(arg!(--"net-pk" <NET_PK> "Networking private key"))
|
||||
.arg(
|
||||
arg!(--detached <IS_DETACHED> "Run in detached mode (don't accept input)")
|
||||
.action(clap::ArgAction::SetTrue),
|
||||
);
|
||||
app
|
||||
}
|
||||
|
||||
/// Attempts to find the public IPv4 address of the node.
|
||||
/// If in simulation mode, it immediately returns localhost.
|
||||
/// Otherwise, it tries to find the public IP and defaults to localhost on failure.
|
||||
async fn find_public_ip() -> std::net::Ipv4Addr {
|
||||
#[cfg(feature = "simulation-mode")]
|
||||
{
|
||||
std::net::Ipv4Addr::LOCALHOST
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "simulation-mode"))]
|
||||
{
|
||||
println!("Finding public IP address...");
|
||||
match tokio::time::timeout(std::time::Duration::from_secs(5), public_ip::addr_v4()).await {
|
||||
Ok(Some(ip)) => {
|
||||
println!("Public IP found: {}", ip);
|
||||
ip
|
||||
}
|
||||
_ => {
|
||||
println!("Failed to find public IPv4 address: booting as a routed node.");
|
||||
std::net::Ipv4Addr::LOCALHOST
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// check if we have keys saved on disk, encrypted
|
||||
/// if so, prompt user for "password" to decrypt with
|
||||
///
|
||||
/// once password is received, use to decrypt local keys file,
|
||||
/// and pass the keys into boot process as is done in registration.
|
||||
///
|
||||
/// NOTE: when we log in, we MUST check the PKI to make sure our
|
||||
/// information matches what we think it should be. this includes
|
||||
/// username, networking key, and routing info.
|
||||
/// if any do not match, we should prompt user to create a "transaction"
|
||||
/// that updates their PKI info on-chain.
|
||||
#[cfg(not(feature = "simulation-mode"))]
|
||||
async fn serve_register_fe(
|
||||
home_directory_path: &str,
|
||||
our_ip: String,
|
||||
ws_networking: (tokio::net::TcpListener, bool),
|
||||
http_server_port: u16,
|
||||
maybe_rpc: Option<String>,
|
||||
) -> (Identity, Vec<u8>, Keyfile) {
|
||||
let (kill_tx, kill_rx) = tokio::sync::oneshot::channel::<bool>();
|
||||
|
||||
let disk_keyfile: Option<Vec<u8>> = tokio::fs::read(format!("{}/.keys", home_directory_path))
|
||||
.await
|
||||
.ok();
|
||||
|
||||
let (tx, mut rx) = mpsc::channel::<(Identity, Keyfile, Vec<u8>)>(1);
|
||||
let (our, decoded_keyfile, encoded_keyfile) = tokio::select! {
|
||||
_ = register::register(
|
||||
tx,
|
||||
kill_rx,
|
||||
our_ip,
|
||||
ws_networking,
|
||||
http_server_port,
|
||||
disk_keyfile,
|
||||
maybe_rpc) => {
|
||||
panic!("registration failed")
|
||||
}
|
||||
Some((our, decoded_keyfile, encoded_keyfile)) = rx.recv() => {
|
||||
(our, decoded_keyfile, encoded_keyfile)
|
||||
}
|
||||
};
|
||||
|
||||
tokio::fs::write(
|
||||
format!("{}/.keys", home_directory_path),
|
||||
encoded_keyfile.clone(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let _ = kill_tx.send(true);
|
||||
|
||||
(our, encoded_keyfile, decoded_keyfile)
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,7 +1,8 @@
|
||||
use crate::net::{types::*, MESSAGE_MAX_SIZE, TIMEOUT};
|
||||
use crate::net::{types::*, ws::MESSAGE_MAX_SIZE, ws::TIMEOUT};
|
||||
use anyhow::{anyhow, Result};
|
||||
use futures::stream::{SplitSink, SplitStream};
|
||||
use futures::{SinkExt, StreamExt};
|
||||
use lib::types::core::*;
|
||||
use ring::signature::{self, Ed25519KeyPair};
|
||||
use snow::params::NoiseParams;
|
||||
use tokio::net::TcpStream;
|
||||
@ -9,8 +10,6 @@ use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver};
|
||||
use tokio::time::timeout;
|
||||
use tokio_tungstenite::{connect_async, tungstenite, MaybeTlsStream, WebSocketStream};
|
||||
|
||||
use lib::types::core::*;
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref PARAMS: NoiseParams = "Noise_XX_25519_ChaChaPoly_BLAKE2s"
|
||||
.parse()
|
||||
@ -264,7 +263,7 @@ pub fn validate_signature(from: &str, signature: &[u8], message: &[u8], pki: &On
|
||||
if let Some(peer_id) = pki.get(from) {
|
||||
let their_networking_key = signature::UnparsedPublicKey::new(
|
||||
&signature::ED25519,
|
||||
hex::decode(strip_0x(&peer_id.networking_key)).unwrap_or_default(),
|
||||
net_key_string_to_hex(&peer_id.networking_key),
|
||||
);
|
||||
their_networking_key.verify(message, signature).is_ok()
|
||||
} else {
|
||||
@ -283,7 +282,7 @@ pub fn validate_routing_request(
|
||||
.ok_or(anyhow!("unknown KNS name"))?;
|
||||
let their_networking_key = signature::UnparsedPublicKey::new(
|
||||
&signature::ED25519,
|
||||
hex::decode(strip_0x(&their_id.networking_key))?,
|
||||
net_key_string_to_hex(&their_id.networking_key),
|
||||
);
|
||||
their_networking_key
|
||||
.verify(
|
||||
@ -308,7 +307,7 @@ pub fn validate_handshake(
|
||||
// verify their signature of their static key
|
||||
let their_networking_key = signature::UnparsedPublicKey::new(
|
||||
&signature::ED25519,
|
||||
hex::decode(strip_0x(&their_id.networking_key))?,
|
||||
net_key_string_to_hex(&their_id.networking_key),
|
||||
);
|
||||
their_networking_key
|
||||
.verify(their_static_key, &handshake.signature)
|
||||
@ -342,10 +341,10 @@ pub async fn recv_protocol_message(conn: &mut PeerConnection) -> Result<KernelMe
|
||||
&ws_recv(&mut conn.read_stream, &mut conn.write_stream).await?,
|
||||
&mut conn.buf,
|
||||
)?;
|
||||
|
||||
if outer_len < 4 {
|
||||
return Err(anyhow!("protocol message too small!"));
|
||||
}
|
||||
|
||||
let length_bytes = [conn.buf[0], conn.buf[1], conn.buf[2], conn.buf[3]];
|
||||
let msg_len = u32::from_be_bytes(length_bytes);
|
||||
if msg_len > MESSAGE_MAX_SIZE {
|
||||
@ -444,7 +443,7 @@ pub fn build_initiator() -> (snow::HandshakeState, Vec<u8>) {
|
||||
builder
|
||||
.local_private_key(&keypair.private)
|
||||
.build_initiator()
|
||||
.expect("net: couldn't build responder?"),
|
||||
.expect("net: couldn't build initiator?"),
|
||||
keypair.public,
|
||||
)
|
||||
}
|
||||
@ -473,11 +472,8 @@ pub async fn error_offline(km: KernelMessage, network_error_tx: &NetworkErrorSen
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn strip_0x(s: &str) -> String {
|
||||
if let Some(stripped) = s.strip_prefix("0x") {
|
||||
return stripped.to_string();
|
||||
}
|
||||
s.to_string()
|
||||
fn net_key_string_to_hex(s: &str) -> Vec<u8> {
|
||||
hex::decode(s.strip_prefix("0x").unwrap_or(s)).unwrap_or_default()
|
||||
}
|
||||
|
||||
pub async fn parse_hello_message(
|
||||
|
1112
kinode/src/net/ws.rs
Normal file
1112
kinode/src/net/ws.rs
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,4 @@
|
||||
use crate::keygen;
|
||||
use aes_gcm::aead::KeyInit;
|
||||
use alloy_primitives::{Address as EthAddress, Bytes, FixedBytes, U256};
|
||||
use alloy_providers::provider::{Provider, TempProvider};
|
||||
use alloy_pubsub::PubSubFrontend;
|
||||
@ -9,13 +8,11 @@ use alloy_signer::Signature;
|
||||
use alloy_sol_macro::sol;
|
||||
use alloy_sol_types::{SolCall, SolValue};
|
||||
use alloy_transport_ws::WsConnect;
|
||||
use hmac::Hmac;
|
||||
use jwt::SignWithKey;
|
||||
use base64::{engine::general_purpose::STANDARD as base64_standard, Engine};
|
||||
use lib::types::core::*;
|
||||
use ring::rand::SystemRandom;
|
||||
use ring::signature;
|
||||
use ring::signature::KeyPair;
|
||||
use sha2::Sha256;
|
||||
use static_dir::static_dir;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
@ -95,23 +92,6 @@ fn _hex_string_to_u8_array(hex_str: &str) -> Result<[u8; 32], &'static str> {
|
||||
Ok(bytes)
|
||||
}
|
||||
|
||||
pub fn generate_jwt(jwt_secret_bytes: &[u8], username: &str) -> Option<String> {
|
||||
let jwt_secret: Hmac<Sha256> = match Hmac::new_from_slice(jwt_secret_bytes) {
|
||||
Ok(secret) => secret,
|
||||
Err(_) => return None,
|
||||
};
|
||||
|
||||
let claims = crate::http::server_types::JwtClaims {
|
||||
username: username.to_string(),
|
||||
expiration: 0,
|
||||
};
|
||||
|
||||
match claims.sign_with_key(&jwt_secret) {
|
||||
Ok(token) => Some(token),
|
||||
Err(_) => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Serve the registration page and receive POSTs and PUTs from it
|
||||
pub async fn register(
|
||||
tx: RegistrationSender,
|
||||
@ -366,7 +346,9 @@ async fn handle_keyfile_vet(
|
||||
// additional checks?
|
||||
let encoded_keyfile = match payload.keyfile.is_empty() {
|
||||
true => keyfile.ok_or(warp::reject())?,
|
||||
false => base64::decode(payload.keyfile).map_err(|_| warp::reject())?,
|
||||
false => base64_standard
|
||||
.decode(payload.keyfile)
|
||||
.map_err(|_| warp::reject())?,
|
||||
};
|
||||
|
||||
let decoded_keyfile = keygen::decode_keyfile(&encoded_keyfile, &payload.password_hash)
|
||||
@ -543,7 +525,7 @@ async fn handle_import_keyfile(
|
||||
provider: Arc<Provider<PubSubFrontend>>,
|
||||
) -> Result<impl Reply, Rejection> {
|
||||
// if keyfile was not present in node and is present from user upload
|
||||
let encoded_keyfile = match base64::decode(info.keyfile.clone()) {
|
||||
let encoded_keyfile = match base64_standard.decode(info.keyfile.clone()) {
|
||||
Ok(k) => k,
|
||||
Err(_) => {
|
||||
return Ok(warp::reply::with_status(
|
||||
@ -762,8 +744,8 @@ async fn success_response(
|
||||
decoded_keyfile: Keyfile,
|
||||
encoded_keyfile: Vec<u8>,
|
||||
) -> Result<warp::reply::Response, Rejection> {
|
||||
let encoded_keyfile_str = base64::encode(&encoded_keyfile);
|
||||
let token = match generate_jwt(&decoded_keyfile.jwt_secret_bytes, &our.name) {
|
||||
let encoded_keyfile_str = base64_standard.encode(&encoded_keyfile);
|
||||
let token = match keygen::generate_jwt(&decoded_keyfile.jwt_secret_bytes, &our.name) {
|
||||
Some(token) => token,
|
||||
None => {
|
||||
return Ok(warp::reply::with_status(
|
||||
|
@ -1,4 +1,5 @@
|
||||
use anyhow::Result;
|
||||
use base64::{engine::general_purpose::STANDARD as base64_standard, Engine};
|
||||
use dashmap::DashMap;
|
||||
use rusqlite::Connection;
|
||||
use std::collections::{HashMap, HashSet, VecDeque};
|
||||
@ -201,7 +202,9 @@ async fn handle_request(
|
||||
serde_json::Number::from_f64(real).unwrap(),
|
||||
),
|
||||
SqlValue::Text(text) => serde_json::Value::String(text),
|
||||
SqlValue::Blob(blob) => serde_json::Value::String(base64::encode(blob)), // or another representation if you prefer
|
||||
SqlValue::Blob(blob) => {
|
||||
serde_json::Value::String(base64_standard.encode(blob))
|
||||
} // or another representation if you prefer
|
||||
_ => serde_json::Value::Null,
|
||||
};
|
||||
map.insert(column_name.clone(), value_json);
|
||||
@ -511,7 +514,7 @@ fn json_to_sqlite(value: &serde_json::Value) -> Result<SqlValue, SqliteError> {
|
||||
}
|
||||
}
|
||||
serde_json::Value::String(s) => {
|
||||
match base64::decode(s) {
|
||||
match base64_standard.decode(s) {
|
||||
Ok(decoded_bytes) => {
|
||||
// convert to SQLite Blob if it's a valid base64 string
|
||||
Ok(SqlValue::Blob(decoded_bytes))
|
||||
|
@ -391,11 +391,11 @@ async fn bootstrap(
|
||||
|
||||
for (package_metadata, mut package) in packages.clone() {
|
||||
let package_name = package_metadata.properties.package_name.as_str();
|
||||
// // special case tester: only load it in if in simulation mode
|
||||
// if package_name == "tester" {
|
||||
// #[cfg(not(feature = "simulation-mode"))]
|
||||
// continue;
|
||||
// }
|
||||
// special case tester: only load it in if in simulation mode
|
||||
#[cfg(not(feature = "simulation-mode"))]
|
||||
if package_name == "tester" {
|
||||
continue;
|
||||
}
|
||||
|
||||
println!("fs: handling package {package_name}...\r");
|
||||
let package_publisher = package_metadata.properties.publisher.as_str();
|
||||
@ -610,8 +610,8 @@ async fn bootstrap(
|
||||
for (package_metadata, mut package) in packages {
|
||||
let package_name = package_metadata.properties.package_name.as_str();
|
||||
// special case tester: only load it in if in simulation mode
|
||||
#[cfg(not(feature = "simulation-mode"))]
|
||||
if package_name == "tester" {
|
||||
#[cfg(not(feature = "simulation-mode"))]
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "lib"
|
||||
authors = ["KinodeDAO"]
|
||||
version = "0.7.0"
|
||||
version = "0.7.1"
|
||||
edition = "2021"
|
||||
description = "A general-purpose sovereign cloud computing platform"
|
||||
homepage = "https://kinode.org"
|
||||
@ -13,15 +13,15 @@ license = "Apache-2.0"
|
||||
[build-dependencies]
|
||||
anyhow = "1.0.71"
|
||||
kit = { git = "https://github.com/kinode-dao/kit", rev = "659f59e" }
|
||||
reqwest = { version = "0.11.22", features = ["blocking"] }
|
||||
reqwest = { version = "0.12.4", features = ["blocking"] }
|
||||
tokio = "1.28"
|
||||
|
||||
[dependencies]
|
||||
alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "6f8ebb4" }
|
||||
lazy_static = "1.4.0"
|
||||
rand = "0.8.4"
|
||||
ring = "0.16.20"
|
||||
rusqlite = { version = "0.30.0", features = ["bundled"] }
|
||||
ring = "0.17.8"
|
||||
rusqlite = { version = "0.31.0", features = ["bundled"] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
thiserror = "1.0"
|
||||
|
@ -155,7 +155,7 @@ pub struct ProviderConfig {
|
||||
pub enum NodeOrRpcUrl {
|
||||
Node {
|
||||
kns_update: crate::core::KnsUpdate,
|
||||
use_as_provider: bool, // for routers inside saved config
|
||||
use_as_provider: bool, // false for just-routers inside saved config
|
||||
},
|
||||
RpcUrl(String),
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user