fix: more sub refactor

This commit is contained in:
dr-frmr 2024-04-25 02:42:11 +09:00
parent d3a36b378e
commit cfedc19102
No known key found for this signature in database
2 changed files with 147 additions and 113 deletions

View File

@ -46,6 +46,7 @@ struct UrlProvider {
#[derive(Debug)] #[derive(Debug)]
struct NodeProvider { struct NodeProvider {
/// NOT CURRENTLY USED
pub trusted: bool, pub trusted: bool,
/// semi-temporary flag to mark if this provider is currently usable /// semi-temporary flag to mark if this provider is currently usable
/// future updates will make this more dynamic /// future updates will make this more dynamic
@ -365,7 +366,7 @@ async fn handle_message(
// so they can stop sending us updates // so they can stop sending us updates
verbose_print( verbose_print(
&state.print_tx, &state.print_tx,
"eth: got eth_sub_result but no matching sub found", "eth: got eth_sub_result but no matching sub found, unsubscribing",
) )
.await; .await;
kernel_message( kernel_message(
@ -595,6 +596,14 @@ async fn fulfill_request(
} }
} }
for node_provider in &mut aps.nodes { for node_provider in &mut aps.nodes {
verbose_print(
print_tx,
&format!(
"eth: attempting to fulfill via {}",
node_provider.kns_update.name
),
)
.await;
let response = forward_to_node_provider( let response = forward_to_node_provider(
our, our,
km_id, km_id,

View File

@ -11,120 +11,75 @@ pub async fn create_new_subscription(
sub_id: u64, sub_id: u64,
eth_action: EthAction, eth_action: EthAction,
) { ) {
match build_subscription( let our = state.our.clone();
&state.our, let send_to_loop = state.send_to_loop.clone();
km_id, let active_subscriptions = state.active_subscriptions.clone();
&target, let providers = state.providers.clone();
&state.send_to_loop, let response_channels = state.response_channels.clone();
&eth_action, let print_tx = state.print_tx.clone();
&state.providers, tokio::spawn(async move {
&state.response_channels, match tokio::time::timeout(
&state.print_tx, std::time::Duration::from_secs(30),
) build_subscription(
.await &our,
{
Ok(maybe_raw_sub) => {
// send a response to the target that the subscription was successful
kernel_message(
&state.our,
km_id, km_id,
target.clone(), &target,
rsvp.clone(), &send_to_loop,
false, &eth_action,
None, &providers,
EthResponse::Ok, &response_channels,
&state.send_to_loop, &print_tx,
) ),
.await; )
let mut subs = state .await
.active_subscriptions {
.entry(target.clone()) Ok(Ok(maybe_raw_sub)) => {
.or_insert(HashMap::new()); // send a response to the target that the subscription was successful
let active_subscriptions = state.active_subscriptions.clone(); kernel_message(
match maybe_raw_sub { &our,
Ok(rx) => { km_id,
let our = state.our.clone(); target.clone(),
let send_to_loop = state.send_to_loop.clone(); rsvp.clone(),
let print_tx = state.print_tx.clone(); false,
subs.insert( None,
sub_id, EthResponse::Ok,
// this is a local sub, as in, we connect to the rpc endpoint &send_to_loop,
ActiveSub::Local(tokio::spawn(async move { )
// await the subscription error and kill it if so .await;
if let Err(e) = maintain_local_subscription( let mut subs = active_subscriptions
&our, .entry(target.clone())
sub_id, .or_insert(HashMap::new());
rx, let active_subscriptions = active_subscriptions.clone();
&target, match maybe_raw_sub {
&rsvp, Ok(rx) => {
&send_to_loop, let our = our.clone();
) let send_to_loop = send_to_loop.clone();
.await let print_tx = print_tx.clone();
{ subs.insert(
verbose_print( sub_id,
&print_tx, // this is a local sub, as in, we connect to the rpc endpoint
"eth: closed local subscription due to error", ActiveSub::Local(tokio::spawn(async move {
) // await the subscription error and kill it if so
.await; if let Err(e) = maintain_local_subscription(
kernel_message(
&our, &our,
rand::random(),
target.clone(),
rsvp,
true,
None,
EthSubResult::Err(e),
&send_to_loop,
)
.await;
active_subscriptions.entry(target).and_modify(|sub_map| {
sub_map.remove(&km_id);
});
}
})),
);
}
Err((provider_node, remote_sub_id)) => {
// this is a remote sub, given by a relay node
let (sender, rx) = tokio::sync::mpsc::channel(10);
let keepalive_km_id = rand::random();
let (keepalive_err_sender, keepalive_err_receiver) =
tokio::sync::mpsc::channel(1);
state
.response_channels
.insert(keepalive_km_id, keepalive_err_sender);
let our = state.our.clone();
let send_to_loop = state.send_to_loop.clone();
let print_tx = state.print_tx.clone();
let response_channels = state.response_channels.clone();
subs.insert(
remote_sub_id,
ActiveSub::Remote {
provider_node: provider_node.clone(),
handle: tokio::spawn(async move {
if let Err(e) = maintain_remote_subscription(
&our,
&provider_node,
remote_sub_id,
sub_id, sub_id,
keepalive_km_id,
rx, rx,
keepalive_err_receiver,
&target, &target,
&rsvp,
&send_to_loop, &send_to_loop,
) )
.await .await
{ {
verbose_print( verbose_print(
&print_tx, &print_tx,
"eth: closed subscription with provider node due to error", "eth: closed local subscription due to error",
) )
.await; .await;
kernel_message( kernel_message(
&our, &our,
rand::random(), rand::random(),
target.clone(), target.clone(),
None, rsvp,
true, true,
None, None,
EthSubResult::Err(e), EthSubResult::Err(e),
@ -132,21 +87,84 @@ pub async fn create_new_subscription(
) )
.await; .await;
active_subscriptions.entry(target).and_modify(|sub_map| { active_subscriptions.entry(target).and_modify(|sub_map| {
sub_map.remove(&sub_id); sub_map.remove(&km_id);
}); });
response_channels.remove(&keepalive_km_id);
} }
}), })),
sender, );
}, }
); Err((provider_node, remote_sub_id)) => {
// this is a remote sub, given by a relay node
let (sender, rx) = tokio::sync::mpsc::channel(10);
let keepalive_km_id = rand::random();
let (keepalive_err_sender, keepalive_err_receiver) =
tokio::sync::mpsc::channel(1);
response_channels.insert(keepalive_km_id, keepalive_err_sender);
let our = our.clone();
let send_to_loop = send_to_loop.clone();
let print_tx = print_tx.clone();
let response_channels = response_channels.clone();
subs.insert(
remote_sub_id,
ActiveSub::Remote {
provider_node: provider_node.clone(),
handle: tokio::spawn(async move {
if let Err(e) = maintain_remote_subscription(
&our,
&provider_node,
remote_sub_id,
sub_id,
keepalive_km_id,
rx,
keepalive_err_receiver,
&target,
&send_to_loop,
)
.await
{
verbose_print(
&print_tx,
"eth: closed subscription with provider node due to error",
)
.await;
kernel_message(
&our,
rand::random(),
target.clone(),
None,
true,
None,
EthSubResult::Err(e),
&send_to_loop,
)
.await;
active_subscriptions.entry(target).and_modify(|sub_map| {
sub_map.remove(&sub_id);
});
response_channels.remove(&keepalive_km_id);
}
}),
sender,
},
);
}
} }
} }
Ok(Err(e)) => {
error_message(&our, km_id, target.clone(), e, &send_to_loop).await;
}
Err(_) => {
error_message(
&our,
km_id,
target.clone(),
EthError::RpcTimeout,
&send_to_loop,
)
.await;
}
} }
Err(e) => { });
error_message(&state.our, km_id, target.clone(), e, &state.send_to_loop).await;
}
}
} }
/// terrible abuse of result in return type, yes, sorry /// terrible abuse of result in return type, yes, sorry
@ -183,14 +201,14 @@ async fn build_subscription(
None => { None => {
if let Ok(()) = activate_url_provider(url_provider).await { if let Ok(()) = activate_url_provider(url_provider).await {
verbose_print( verbose_print(
print_tx, &print_tx,
&format!("eth: activated url provider {}", url_provider.url), &format!("eth: activated url provider {}", url_provider.url),
) )
.await; .await;
url_provider.pubsub.as_ref().unwrap() url_provider.pubsub.as_ref().unwrap()
} else { } else {
verbose_print( verbose_print(
print_tx, &print_tx,
&format!("eth: could not activate url provider {}", url_provider.url), &format!("eth: could not activate url provider {}", url_provider.url),
) )
.await; .await;
@ -213,7 +231,7 @@ async fn build_subscription(
} }
Err(rpc_error) => { Err(rpc_error) => {
verbose_print( verbose_print(
print_tx, &print_tx,
&format!( &format!(
"eth: got error from url provider {}: {}", "eth: got error from url provider {}: {}",
url_provider.url, rpc_error url_provider.url, rpc_error
@ -225,14 +243,21 @@ async fn build_subscription(
} }
} }
} }
// now we need a response channel which will be open for the duration
// of the subscription
let (sender, mut response_receiver) = tokio::sync::mpsc::channel(1); let (sender, mut response_receiver) = tokio::sync::mpsc::channel(1);
response_channels.insert(km_id, sender); response_channels.insert(km_id, sender);
// we need to create our own unique sub id because in the remote provider node, // we need to create our own unique sub id because in the remote provider node,
// all subs will be identified under our process address. // all subs will be identified under our process address.
let remote_sub_id = rand::random(); let remote_sub_id = rand::random();
for node_provider in &mut aps.nodes { for node_provider in &mut aps.nodes {
verbose_print(
&print_tx,
&format!(
"eth: attempting to fulfill via {}",
node_provider.kns_update.name
),
)
.await;
match forward_to_node_provider( match forward_to_node_provider(
&our, &our,
km_id, km_id,