Merge pull request #475 from kinode-dao/release-candidate

Release candidate 0.8.7
This commit is contained in:
doria 2024-08-09 17:02:20 +03:00 committed by GitHub
commit 18f0eec50e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 111 additions and 41 deletions

6
Cargo.lock generated
View File

@ -3203,7 +3203,7 @@ dependencies = [
[[package]] [[package]]
name = "kinode" name = "kinode"
version = "0.8.6" version = "0.8.7"
dependencies = [ dependencies = [
"aes-gcm", "aes-gcm",
"alloy", "alloy",
@ -3259,7 +3259,7 @@ dependencies = [
[[package]] [[package]]
name = "kinode_lib" name = "kinode_lib"
version = "0.8.6" version = "0.8.7"
dependencies = [ dependencies = [
"lib", "lib",
] ]
@ -3376,7 +3376,7 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67"
[[package]] [[package]]
name = "lib" name = "lib"
version = "0.8.6" version = "0.8.7"
dependencies = [ dependencies = [
"alloy", "alloy",
"kit", "kit",

View File

@ -1,7 +1,7 @@
[package] [package]
name = "kinode_lib" name = "kinode_lib"
authors = ["KinodeDAO"] authors = ["KinodeDAO"]
version = "0.8.6" version = "0.8.7"
edition = "2021" edition = "2021"
description = "A general-purpose sovereign cloud computing platform" description = "A general-purpose sovereign cloud computing platform"
homepage = "https://kinode.org" homepage = "https://kinode.org"

View File

@ -1,12 +1,12 @@
FROM debian:12-slim AS downloader FROM debian:12-slim AS downloader
ARG VERSION
WORKDIR /tmp/download WORKDIR /tmp/download
RUN apt-get update RUN apt-get update
RUN apt-get install wget curl openssl jq unzip -y RUN apt-get install unzip -y
ADD https://api.github.com/repos/kinode-dao/kinode/releases releases.json ADD "https://github.com/kinode-dao/kinode/releases/download/${VERSION}/kinode-x86_64-unknown-linux-gnu.zip" kinode-x86_64-unknown-linux-gnu.zip
RUN wget "https://github.com/kinode-dao/kinode/releases/download/$(cat releases.json | jq -r '.[0].tag_name')/kinode-x86_64-unknown-linux-gnu.zip"
RUN unzip kinode-x86_64-unknown-linux-gnu.zip RUN unzip kinode-x86_64-unknown-linux-gnu.zip
FROM debian:12-slim FROM debian:12-slim

View File

@ -163,12 +163,14 @@ The image includes EXPOSE directives for TCP port `8080` and TCP port `9000`. Po
If you are running a direct node, you must map port `9000` to the same port on the host and on your router. Otherwise, your Kinode will not be able to connect to the rest of the network as connection info is written to the chain, and this information is based on the view from inside the Docker container. If you are running a direct node, you must map port `9000` to the same port on the host and on your router. Otherwise, your Kinode will not be able to connect to the rest of the network as connection info is written to the chain, and this information is based on the view from inside the Docker container.
To build a local Docker image, run the following command in this project root. To build a local Docker image, run the following command in this project root.
``` ```bash
docker build -t 0xlynett/kinode . # The `VERSION` may be replaced with the tag of a GitHub release
docker build -t 0xlynett/kinode . --build-arg VERSION=v0.8.6
``` ```
For example: For example:
```
```bash
docker volume create kinode-volume docker volume create kinode-volume
docker run -d -p 8080:8080 -it --name my-kinode \ docker run -d -p 8080:8080 -it --name my-kinode \

View File

@ -1,7 +1,7 @@
[package] [package]
name = "kinode" name = "kinode"
authors = ["KinodeDAO"] authors = ["KinodeDAO"]
version = "0.8.6" version = "0.8.7"
edition = "2021" edition = "2021"
description = "A general-purpose sovereign cloud computing platform" description = "A general-purpose sovereign cloud computing platform"
homepage = "https://kinode.org" homepage = "https://kinode.org"

View File

@ -322,6 +322,7 @@ async fn handle_message(
) )
.await; .await;
} }
Ok(())
} }
Message::Request(req) => { Message::Request(req) => {
let timeout = req.expects_response.unwrap_or(60); let timeout = req.expects_response.unwrap_or(60);
@ -330,7 +331,7 @@ async fn handle_message(
}; };
match req { match req {
IncomingReq::EthAction(eth_action) => { IncomingReq::EthAction(eth_action) => {
return handle_eth_action(state, km, timeout, eth_action).await; handle_eth_action(state, km, timeout, eth_action).await
} }
IncomingReq::EthConfigAction(eth_config_action) => { IncomingReq::EthConfigAction(eth_config_action) => {
kernel_message( kernel_message(
@ -344,22 +345,29 @@ async fn handle_message(
&state.send_to_loop, &state.send_to_loop,
) )
.await; .await;
Ok(())
} }
IncomingReq::EthSubResult(eth_sub_result) => { IncomingReq::EthSubResult(eth_sub_result) => {
// forward this to rsvp, if we have the sub id in our active subs // forward this to rsvp, if we have the sub id in our active subs
let Some(rsvp) = km.rsvp else { let Some(rsvp) = km.rsvp else {
verbose_print(
&state.print_tx,
"eth: got eth_sub_result with no rsvp, ignoring",
)
.await;
return Ok(()); // no rsvp, no need to forward return Ok(()); // no rsvp, no need to forward
}; };
let sub_id = match eth_sub_result { let sub_id = match eth_sub_result {
Ok(EthSub { id, .. }) => id, Ok(EthSub { id, .. }) => id,
Err(EthSubError { id, .. }) => id, Err(EthSubError { id, .. }) => id,
}; };
if let Some(sub_map) = state.active_subscriptions.get(&rsvp) { if let Some(mut sub_map) = state.active_subscriptions.get_mut(&rsvp) {
if let Some(ActiveSub::Remote { if let Some(sub) = sub_map.get(&sub_id) {
if let ActiveSub::Remote {
provider_node, provider_node,
sender, sender,
.. ..
}) = sub_map.get(&sub_id) } = sub
{ {
if provider_node == &km.source.node { if provider_node == &km.source.node {
if let Ok(()) = sender.send(eth_sub_result).await { if let Ok(()) = sender.send(eth_sub_result).await {
@ -368,19 +376,33 @@ async fn handle_message(
return Ok(()); return Ok(());
} }
} }
// failed to send subscription update to process,
// unsubscribe from provider and close
verbose_print(
&state.print_tx,
"eth: got eth_sub_result but provider node did not match or local sub was already closed",
)
.await;
sub.close(sub_id, state).await;
sub_map.remove(&sub_id);
return Ok(());
}
} }
} }
// tell the remote provider that we don't have this sub // tell the remote provider that we don't have this sub
// so they can stop sending us updates // so they can stop sending us updates
verbose_print( verbose_print(
&state.print_tx, &state.print_tx,
"eth: got eth_sub_result but no matching sub found, unsubscribing", &format!(
"eth: got eth_sub_result but no matching sub {} found, unsubscribing",
sub_id
),
) )
.await; .await;
kernel_message( kernel_message(
&state.our.clone(), &state.our,
km.id, km.id,
km.source.clone(), km.source,
None, None,
true, true,
None, None,
@ -388,6 +410,7 @@ async fn handle_message(
&state.send_to_loop, &state.send_to_loop,
) )
.await; .await;
Ok(())
} }
IncomingReq::SubKeepalive(sub_id) => { IncomingReq::SubKeepalive(sub_id) => {
// source expects that we have a local sub for them with this id // source expects that we have a local sub for them with this id
@ -420,12 +443,12 @@ async fn handle_message(
&state.send_to_loop, &state.send_to_loop,
) )
.await; .await;
}
}
}
}
Ok(()) Ok(())
} }
}
}
}
}
async fn handle_eth_action( async fn handle_eth_action(
state: &mut ModuleState, state: &mut ModuleState,
@ -479,12 +502,32 @@ async fn handle_eth_action(
.await; .await;
} }
EthAction::UnsubscribeLogs(sub_id) => { EthAction::UnsubscribeLogs(sub_id) => {
let mut sub_map = state let Some(mut sub_map) = state.active_subscriptions.get_mut(&km.source) else {
.active_subscriptions verbose_print(
.entry(km.source.clone()) &state.print_tx,
.or_insert(HashMap::new()); &format!(
"eth: got unsubscribe from {} but no subscription found",
km.source
),
)
.await;
error_message(
&state.our,
km.id,
km.source,
EthError::MalformedRequest,
&state.send_to_loop,
)
.await;
return Ok(());
};
if let Some(sub) = sub_map.remove(&sub_id) { if let Some(sub) = sub_map.remove(&sub_id) {
sub.close(sub_id, state).await; sub.close(sub_id, state).await;
verbose_print(
&state.print_tx,
&format!("eth: closed subscription {} for {}", sub_id, km.source.node),
)
.await;
kernel_message( kernel_message(
&state.our, &state.our,
km.id, km.id,
@ -499,7 +542,10 @@ async fn handle_eth_action(
} else { } else {
verbose_print( verbose_print(
&state.print_tx, &state.print_tx,
"eth: got unsubscribe but no matching subscription found", &format!(
"eth: got unsubscribe from {} but no subscription {} found",
km.source, sub_id
),
) )
.await; .await;
error_message( error_message(
@ -626,8 +672,11 @@ async fn fulfill_request(
is_replacement_successful = false; is_replacement_successful = false;
return (); return ();
}; };
aps.urls.remove(index); let old_provider = aps.urls.remove(index);
aps.urls.insert(0, url_provider.clone()); match old_provider.pubsub {
None => aps.urls.insert(0, url_provider.clone()),
Some(_) => aps.urls.insert(0, old_provider),
}
}); });
if !is_replacement_successful { if !is_replacement_successful {
verbose_print( verbose_print(

View File

@ -113,7 +113,6 @@ pub async fn create_new_subscription(
let (keepalive_err_sender, keepalive_err_receiver) = let (keepalive_err_sender, keepalive_err_receiver) =
tokio::sync::mpsc::channel(1); tokio::sync::mpsc::channel(1);
response_channels.insert(keepalive_km_id, keepalive_err_sender); response_channels.insert(keepalive_km_id, keepalive_err_sender);
let response_channels = response_channels.clone();
subs.insert( subs.insert(
remote_sub_id, remote_sub_id,
ActiveSub::Remote { ActiveSub::Remote {
@ -232,8 +231,11 @@ async fn build_subscription(
is_replacement_successful = false; is_replacement_successful = false;
return (); return ();
}; };
aps.urls.remove(index); let old_provider = aps.urls.remove(index);
aps.urls.insert(0, url_provider.clone()); match old_provider.pubsub {
None => aps.urls.insert(0, url_provider.clone()),
Some(_) => aps.urls.insert(0, old_provider),
}
}); });
if !is_replacement_successful { if !is_replacement_successful {
verbose_print( verbose_print(
@ -468,7 +470,7 @@ async fn maintain_remote_subscription(
true, true,
Some(30), Some(30),
IncomingReq::SubKeepalive(remote_sub_id), IncomingReq::SubKeepalive(remote_sub_id),
&send_to_loop, send_to_loop,
).await; ).await;
} }
_incoming = net_error_rx.recv() => { _incoming = net_error_rx.recv() => {
@ -485,6 +487,23 @@ async fn maintain_remote_subscription(
} }
} }
}; };
// tell provider node we don't need their services anymore
// (in case they did not close the subscription on their side,
// such as in the 2-hour timeout case)
kernel_message(
our,
rand::random(),
Address {
node: provider_node.to_string(),
process: ETH_PROCESS_ID.clone(),
},
None,
true,
None,
EthAction::UnsubscribeLogs(remote_sub_id),
send_to_loop,
)
.await;
active_subscriptions active_subscriptions
.entry(target.clone()) .entry(target.clone())
.and_modify(|sub_map| { .and_modify(|sub_map| {

View File

@ -1,7 +1,7 @@
[package] [package]
name = "lib" name = "lib"
authors = ["KinodeDAO"] authors = ["KinodeDAO"]
version = "0.8.6" version = "0.8.7"
edition = "2021" edition = "2021"
description = "A general-purpose sovereign cloud computing platform" description = "A general-purpose sovereign cloud computing platform"
homepage = "https://kinode.org" homepage = "https://kinode.org"