bring in changes from experimental #572

This commit is contained in:
hosted-fornet 2024-10-14 20:22:30 -07:00
parent e1e3c69ff5
commit d3ee13eb92
17 changed files with 476 additions and 222 deletions

View File

@ -26,7 +26,8 @@ members = [
"kinode/packages/terminal/help", "kinode/packages/terminal/hi", "kinode/packages/terminal/kfetch",
"kinode/packages/terminal/kill", "kinode/packages/terminal/m", "kinode/packages/terminal/top",
"kinode/packages/terminal/net_diagnostics", "kinode/packages/terminal/peer", "kinode/packages/terminal/peers",
"kinode/packages/tester/tester", "scripts/build_packages",
"kinode/packages/tester/tester",
"scripts/build_packages",
]
default-members = ["lib"]
resolver = "2"

View File

@ -62,6 +62,7 @@ nohash-hasher = "0.2.0"
open = "5.1.4"
public-ip = "0.2.2"
rand = "0.8.4"
regex = "1.11.0"
reqwest = "0.12.4"
ring = "0.17.8"
rmp-serde = "1.1.2"

View File

@ -54,6 +54,7 @@ fn main() -> anyhow::Result<()> {
std::fs::copy(&path_to_packages_zip_path, &canonical_packages_zip_path)?;
}
if !std::env::var("SKIP_BUILD_FRONTEND").is_ok() {
// build core frontends
let pwd = std::env::current_dir()?;
let core_frontends = vec!["src/register-ui"];
@ -68,6 +69,7 @@ fn main() -> anyhow::Result<()> {
return Err(anyhow::anyhow!("Failed to build frontend: {}", frontend));
}
}
}
let version = if let Ok(version) = std::env::var("DOCKER_BUILD_IMAGE_VERSION") {
// embed the DOCKER_BUILD_IMAGE_VERSION

View File

@ -9,6 +9,7 @@ use lib::types::core::*;
use lib::types::eth::*;
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet};
use std::path::PathBuf;
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::sync::Mutex;
@ -148,7 +149,7 @@ struct ModuleState {
/// the name of this node
our: Arc<String>,
/// the home directory path
home_directory_path: String,
home_directory_path: PathBuf,
/// the access settings for this provider
access_settings: AccessSettings,
/// the set of providers we have available for all chains
@ -207,7 +208,7 @@ fn valid_method(method: &str) -> Option<&'static str> {
/// for the entire module.
pub async fn provider(
our: String,
home_directory_path: String,
home_directory_path: PathBuf,
configs: SavedConfigs,
send_to_loop: MessageSender,
mut recv_in_client: MessageReceiver,
@ -219,7 +220,7 @@ pub async fn provider(
// this merely describes whether our provider is available to other nodes
// and if so, which nodes are allowed to access it (public/whitelist/blacklist)
let access_settings: AccessSettings =
match tokio::fs::read_to_string(format!("{}/.eth_access_settings", home_directory_path))
match tokio::fs::read_to_string(home_directory_path.join(".eth_access_settings"))
.await
{
Ok(contents) => serde_json::from_str(&contents).unwrap(),
@ -1052,7 +1053,7 @@ async fn handle_eth_config_action(
// save providers and/or access settings, depending on necessity, to disk
if save_settings {
if let Ok(()) = tokio::fs::write(
format!("{}/.eth_access_settings", state.home_directory_path),
state.home_directory_path.join(".eth_access_settings"),
serde_json::to_string(&state.access_settings).unwrap(),
)
.await
@ -1062,7 +1063,7 @@ async fn handle_eth_config_action(
}
if save_providers {
if let Ok(()) = tokio::fs::write(
format!("{}/.eth_providers", state.home_directory_path),
state.home_directory_path.join(".eth_access_settings"),
serde_json::to_string(&providers_to_saved_configs(&state.providers)).unwrap(),
)
.await

View File

@ -2,6 +2,7 @@ use lib::types::core::{self as t, KERNEL_PROCESS_ID, STATE_PROCESS_ID, VFS_PROCE
use serde::{Deserialize, Serialize};
use std::{
collections::{HashMap, HashSet},
path::PathBuf,
sync::Arc,
};
use tokio::{sync::mpsc, task::JoinHandle};
@ -76,7 +77,7 @@ async fn handle_kernel_request(
process_map: &mut t::ProcessMap,
caps_oracle: &t::CapMessageSender,
engine: &Engine,
home_directory_path: &str,
home_directory_path: &PathBuf,
) -> Option<()> {
let t::Message::Request(request) = km.message else {
return None;
@ -467,7 +468,7 @@ async fn start_process(
engine: &Engine,
caps_oracle: &t::CapMessageSender,
process_metadata: &StartProcessMetadata,
home_directory_path: &str,
home_directory_path: &PathBuf,
) -> anyhow::Result<()> {
let (send_to_process, recv_in_process) =
mpsc::channel::<Result<t::KernelMessage, t::WrappedSendError>>(PROCESS_CHANNEL_CAPACITY);
@ -501,7 +502,7 @@ async fn start_process(
km_blob_bytes,
caps_oracle.clone(),
engine.clone(),
home_directory_path.to_string(),
home_directory_path.clone(),
)),
);
Ok(())
@ -522,7 +523,7 @@ pub async fn kernel(
mut network_error_recv: t::NetworkErrorReceiver,
mut recv_debug_in_loop: t::DebugReceiver,
send_to_net: t::MessageSender,
home_directory_path: String,
home_directory_path: PathBuf,
runtime_extensions: Vec<(
t::ProcessId,
t::MessageSender,
@ -538,7 +539,7 @@ pub async fn kernel(
config.async_support(true);
let engine = Engine::new(&config).unwrap();
let vfs_path = format!("{home_directory_path}/vfs");
let vfs_path = home_directory_path.join("vfs");
tokio::fs::create_dir_all(&vfs_path)
.await
.expect("kernel startup fatal: couldn't create vfs dir");
@ -579,14 +580,22 @@ pub async fn kernel(
if persisted.wasm_bytes_handle.is_empty() {
continue;
}
let wasm_bytes_handle = persisted
.wasm_bytes_handle
.strip_prefix("/")
.unwrap_or_else(|| &persisted.wasm_bytes_handle);
#[cfg(unix)]
let path = vfs_path.join(wasm_bytes_handle);
#[cfg(target_os = "windows")]
let path = vfs_path.join(wasm_bytes_handle.replace(":", "_"));
// read wasm bytes directly from vfs
let wasm_bytes =
match tokio::fs::read(format!("{vfs_path}/{}", persisted.wasm_bytes_handle)).await {
let wasm_bytes = match tokio::fs::read(&path).await {
Ok(bytes) => bytes,
Err(e) => {
t::Printout::new(
0,
format!("kernel: couldn't read wasm bytes for process: {process_id}: {e}"),
format!("kernel: couldn't read wasm bytes for process: {process_id} at {path:?}: {e}"),
)
.send(&send_to_terminal)
.await;
@ -641,7 +650,7 @@ pub async fn kernel(
&engine,
&caps_oracle_sender,
&start_process_metadata,
home_directory_path.as_str(),
&home_directory_path,
)
.await
{

View File

@ -2,6 +2,7 @@ use crate::KERNEL_PROCESS_ID;
use lib::{types::core as t, v0::ProcessV0, Process};
use std::{
collections::{HashMap, VecDeque},
path::PathBuf,
sync::Arc,
};
use tokio::{fs, task::JoinHandle};
@ -84,18 +85,32 @@ impl WasiView for ProcessWasiV0 {
}
async fn make_table_and_wasi(
home_directory_path: String,
home_directory_path: PathBuf,
process_state: &ProcessState,
) -> (Table, WasiCtx, MemoryOutputPipe) {
let table = Table::new();
let wasi_stderr = MemoryOutputPipe::new(STACK_TRACE_SIZE);
let tmp_path = format!(
"{}/vfs/{}:{}/tmp",
home_directory_path,
#[cfg(unix)]
let tmp_path = home_directory_path
.join("vfs")
.join(format!(
"{}:{}",
process_state.metadata.our.process.package(),
process_state.metadata.our.process.publisher()
);
))
.join("tmp");
#[cfg(target_os = "windows")]
let tmp_path = home_directory_path
.join("vfs")
.join(format!(
"{}_{}",
process_state.metadata.our.process.package(),
process_state.metadata.our.process.publisher()
))
.join("tmp");
let tmp_path = tmp_path.to_str().unwrap();
let mut wasi = WasiCtxBuilder::new();
@ -107,14 +122,9 @@ async fn make_table_and_wasi(
.await
{
if let Ok(wasi_tempdir) =
Dir::open_ambient_dir(tmp_path.clone(), wasi_common::sync::ambient_authority())
Dir::open_ambient_dir(tmp_path, wasi_common::sync::ambient_authority())
{
wasi.preopened_dir(
wasi_tempdir,
DirPerms::all(),
FilePerms::all(),
tmp_path.clone(),
)
wasi.preopened_dir(wasi_tempdir, DirPerms::all(), FilePerms::all(), tmp_path)
.env("TEMP_DIR", tmp_path);
}
}
@ -125,7 +135,7 @@ async fn make_table_and_wasi(
async fn make_component(
engine: Engine,
wasm_bytes: &[u8],
home_directory_path: String,
home_directory_path: PathBuf,
process_state: ProcessState,
) -> anyhow::Result<(Process, Store<ProcessWasi>, MemoryOutputPipe)> {
let component =
@ -219,7 +229,7 @@ pub async fn make_process_loop(
wasm_bytes: Vec<u8>,
caps_oracle: t::CapMessageSender,
engine: Engine,
home_directory_path: String,
home_directory_path: PathBuf,
) -> anyhow::Result<()> {
// before process can be instantiated, need to await 'run' message from kernel
let mut pre_boot_queue = Vec::<Result<t::KernelMessage, t::WrappedSendError>>::new();

View File

@ -9,6 +9,7 @@ use lib::types::core::{
use rocksdb::OptimisticTransactionDB;
use std::{
collections::{HashMap, VecDeque},
path::PathBuf,
sync::Arc,
};
use tokio::{fs, sync::Mutex};
@ -16,7 +17,7 @@ use tokio::{fs, sync::Mutex};
#[derive(Clone)]
struct KvState {
our: Arc<Address>,
kv_path: Arc<String>,
kv_path: Arc<PathBuf>,
send_to_loop: MessageSender,
send_to_terminal: PrintSender,
open_kvs: Arc<DashMap<(PackageId, String), OptimisticTransactionDB>>,
@ -31,11 +32,11 @@ impl KvState {
our: Address,
send_to_terminal: PrintSender,
send_to_loop: MessageSender,
home_directory_path: String,
home_directory_path: PathBuf,
) -> Self {
Self {
our: Arc::new(our),
kv_path: Arc::new(format!("{home_directory_path}/kv")),
kv_path: Arc::new(home_directory_path.join("kv")),
send_to_loop,
send_to_terminal,
open_kvs: Arc::new(DashMap::new()),
@ -60,7 +61,14 @@ impl KvState {
self.remove_db(key.0, key.1).await;
}
let db_path = format!("{}/{}/{}", self.kv_path.as_str(), package_id, db);
#[cfg(unix)]
let db_path = self.kv_path.join(format!("{package_id}")).join(&db);
#[cfg(target_os = "windows")]
let db_path = self.kv_path
.join(format!("{}_{}", package_id._package(), package_id._publisher()))
.join(&db);
fs::create_dir_all(&db_path).await?;
self.open_kvs.insert(
@ -94,7 +102,7 @@ pub async fn kv(
send_to_terminal: PrintSender,
mut recv_from_loop: MessageReceiver,
send_to_caps_oracle: CapMessageSender,
home_directory_path: String,
home_directory_path: PathBuf,
) -> anyhow::Result<()> {
let our = Address::new(our_node.as_str(), KV_PROCESS_ID.clone());
@ -102,7 +110,7 @@ pub async fn kv(
let mut state = KvState::new(our, send_to_terminal, send_to_loop, home_directory_path);
if let Err(e) = fs::create_dir_all(state.kv_path.as_str()).await {
if let Err(e) = fs::create_dir_all(&state.kv_path).await {
panic!("failed creating kv dir! {e:?}");
}
@ -500,11 +508,16 @@ async fn check_caps(
.remove_db(request.package_id.clone(), request.db.clone())
.await;
fs::remove_dir_all(format!(
"{}/{}/{}",
state.kv_path, request.package_id, request.db
))
.await?;
#[cfg(unix)]
let db_path = state.kv_path
.join(format!("{}", request.package_id))
.join(&request.db);
#[cfg(target_os = "windows")]
let db_path = state.kv_path
.join(format!("{}_{}", request.package_id._package(), request.package_id._publisher()))
.join(&request.db);
fs::remove_dir_all(&db_path).await?;
Ok(())
}

View File

@ -9,6 +9,7 @@ use lib::types::core::{
#[cfg(feature = "simulation-mode")]
use ring::{rand::SystemRandom, signature, signature::KeyPair};
use std::env;
std::path::Path;
use std::sync::Arc;
use tokio::sync::mpsc;
@ -81,7 +82,12 @@ async fn main() {
let home_directory_path = matches
.get_one::<String>("home")
.expect("home directory required");
create_home_directory(&home_directory_path).await;
if let Err(e) = tokio::fs::create_dir_all(home_directory_path).await {
panic!("failed to create home directory: {e:?}");
}
let home_directory_path = std::fs::canonicalize(&home_directory_path)
.expect("specified home directory {home_directory_path} not found");
println!("home at {home_directory_path:?}\r");
let http_server_port = set_http_server_port(matches.get_one::<u16>("port")).await;
let ws_networking_port = matches.get_one::<u16>("ws-port");
#[cfg(not(feature = "simulation-mode"))]
@ -108,7 +114,7 @@ async fn main() {
// default eth providers/routers
let mut eth_provider_config: lib::eth::SavedConfigs = if let Ok(contents) =
tokio::fs::read_to_string(format!("{}/.eth_providers", home_directory_path)).await
tokio::fs::read_to_string(home_directory_path.join(".eth_providers")).await
{
if let Ok(contents) = serde_json::from_str(&contents) {
println!("loaded saved eth providers\r");
@ -128,7 +134,7 @@ async fn main() {
});
// save the new provider config
tokio::fs::write(
format!("{}/.eth_providers", home_directory_path),
home_directory_path.join(".eth_providers"),
serde_json::to_string(&eth_provider_config).unwrap(),
)
.await
@ -206,7 +212,7 @@ async fn main() {
let (our, encoded_keyfile, decoded_keyfile) = simulate_node(
fake_node_name.cloned(),
password.cloned(),
home_directory_path,
&home_directory_path,
(
ws_tcp_handle.expect("need ws networking for simulation mode"),
ws_flag_used,
@ -317,10 +323,13 @@ async fn main() {
let networking_keypair_arc = Arc::new(decoded_keyfile.networking_keypair);
let our_name_arc = Arc::new(our.name.clone());
let home_directory_string = matches
.get_one::<String>("home")
.expect("home directory required");
let (kernel_process_map, db, reverse_cap_index) = state::load_state(
our.name.clone(),
networking_keypair_arc.clone(),
home_directory_path.clone(),
home_directory_string.clone(),
runtime_extensions.clone(),
)
.await
@ -458,7 +467,7 @@ async fn main() {
quit = terminal::terminal(
our.clone(),
VERSION,
home_directory_path.into(),
home_directory_path.clone(),
kernel_message_sender.clone(),
kernel_debug_message_sender,
print_sender.clone(),
@ -567,7 +576,7 @@ async fn setup_networking(
pub async fn simulate_node(
fake_node_name: Option<String>,
password: Option<String>,
home_directory_path: &str,
home_directory_path: &Path,
(ws_networking, _ws_used): (tokio::net::TcpListener, bool),
fakechain_port: Option<u16>,
) -> (Identity, Vec<u8>, Keyfile) {
@ -578,7 +587,7 @@ pub async fn simulate_node(
panic!("Fake node must be booted with either a --fake-node-name, --password, or both.");
}
Some(password) => {
let keyfile = tokio::fs::read(format!("{home_directory_path}/.keys"))
let keyfile = tokio::fs::read(home_directory_path.join(".keys"))
.await
.expect("could not read keyfile");
let decoded = keygen::decode_keyfile(&keyfile, &password)
@ -647,10 +656,7 @@ pub async fn simulate_node(
&decoded_keyfile.file_key,
);
tokio::fs::write(
format!("{home_directory_path}/.keys"),
encoded_keyfile.clone(),
)
tokio::fs::write(home_directory_path.join(".keys"), encoded_keyfile.clone())
.await
.expect("Failed to write keyfile");
@ -777,7 +783,7 @@ async fn find_public_ip() -> std::net::Ipv4Addr {
/// that updates their PKI info on-chain.
#[cfg(not(feature = "simulation-mode"))]
async fn serve_register_fe(
home_directory_path: &str,
home_directory_path: &Path,
our_ip: String,
ws_networking: (Option<tokio::net::TcpListener>, bool),
tcp_networking: (Option<tokio::net::TcpListener>, bool),
@ -787,7 +793,7 @@ async fn serve_register_fe(
) -> (Identity, Vec<u8>, Keyfile) {
let (kill_tx, kill_rx) = tokio::sync::oneshot::channel::<bool>();
let disk_keyfile: Option<Vec<u8>> = tokio::fs::read(format!("{}/.keys", home_directory_path))
let disk_keyfile: Option<Vec<u8>> = tokio::fs::read(home_directory_path.join(".keys"))
.await
.ok();
@ -810,7 +816,7 @@ async fn serve_register_fe(
}
};
tokio::fs::write(format!("{home_directory_path}/.keys"), &encoded_keyfile)
tokio::fs::write(home_directory_path.join(".keys"), &encoded_keyfile)
.await
.unwrap();
@ -824,7 +830,7 @@ async fn serve_register_fe(
#[cfg(not(feature = "simulation-mode"))]
async fn login_with_password(
home_directory_path: &str,
home_directory_path: &Path,
our_ip: String,
ws_networking: (Option<tokio::net::TcpListener>, bool),
tcp_networking: (Option<tokio::net::TcpListener>, bool),
@ -836,7 +842,7 @@ async fn login_with_password(
sha2::{Digest, Sha256},
};
let disk_keyfile: Vec<u8> = tokio::fs::read(format!("{}/.keys", home_directory_path))
let disk_keyfile: Vec<u8> = tokio::fs::read(home_directory_path.join(".keys"))
.await
.expect("could not read keyfile");
@ -878,7 +884,7 @@ async fn login_with_password(
.await
.expect("information used to boot does not match information onchain");
tokio::fs::write(format!("{home_directory_path}/.keys"), &disk_keyfile)
tokio::fs::write(home_directory_path.join(".keys"), &disk_keyfile)
.await
.unwrap();

View File

@ -111,7 +111,7 @@ async fn connect_via_router(
routers
};
for router_name in &routers_shuffled {
if router_name.as_ref() == ext.our.name {
if router_name == &ext.our.name {
// we can't route through ourselves
continue;
}

View File

@ -94,9 +94,14 @@ pub async fn register(
let ws_port = warp::any().map(move || (ws_port, ws_flag_used));
let tcp_port = warp::any().map(move || (tcp_port, tcp_flag_used));
#[cfg(unix)]
let static_files =
warp::path("assets").and(static_dir::static_dir!("src/register-ui/build/assets/"));
#[cfg(target_os = "windows")]
let static_files =
warp::path("assets").and(static_dir::static_dir!("src\\register-ui\\build\\assets\\"));
#[cfg(unix)]
let react_app = warp::path::end()
.or(warp::path("login"))
.or(warp::path("commit-os-name"))
@ -108,6 +113,18 @@ pub async fn register(
.or(warp::path("custom-register"))
.and(warp::get())
.map(move |_| warp::reply::html(include_str!("register-ui/build/index.html")));
#[cfg(target_os = "windows")]
let react_app = warp::path::end()
.or(warp::path("login"))
.or(warp::path("commit-os-name"))
.or(warp::path("mint-os-name"))
.or(warp::path("claim-invite"))
.or(warp::path("reset"))
.or(warp::path("import-keyfile"))
.or(warp::path("set-password"))
.or(warp::path("custom-register"))
.and(warp::get())
.map(move |_| warp::reply::html(include_str!("register-ui\\build\\index.html")));
let boot_provider = provider.clone();
let login_provider = provider.clone();

View File

@ -10,6 +10,7 @@ use lib::types::core::{
use rusqlite::Connection;
use std::{
collections::{HashMap, HashSet, VecDeque},
path::PathBuf,
sync::Arc,
};
use tokio::{fs, sync::Mutex};
@ -25,7 +26,7 @@ lazy_static::lazy_static! {
#[derive(Clone)]
struct SqliteState {
our: Arc<Address>,
sqlite_path: Arc<String>,
sqlite_path: Arc<PathBuf>,
send_to_loop: MessageSender,
send_to_terminal: PrintSender,
open_dbs: Arc<DashMap<(PackageId, String), Mutex<Connection>>>,
@ -39,11 +40,11 @@ impl SqliteState {
our: Address,
send_to_terminal: PrintSender,
send_to_loop: MessageSender,
home_directory_path: String,
home_directory_path: PathBuf,
) -> Self {
Self {
our: Arc::new(our),
sqlite_path: Arc::new(format!("{home_directory_path}/sqlite")),
sqlite_path: Arc::new(home_directory_path.join("sqlite")),
send_to_loop,
send_to_terminal,
open_dbs: Arc::new(DashMap::new()),
@ -68,7 +69,13 @@ impl SqliteState {
self.remove_db(key.0, key.1).await;
}
let db_path = format!("{}/{}/{}", self.sqlite_path.as_str(), package_id, db);
#[cfg(unix)]
let db_path = self.sqlite_path.join(format!("{package_id}")).join(&db);
#[cfg(target_os = "windows")]
let db_path = self.sqlite_path
.join(format!("{}_{}", package_id._package(), package_id._publisher()))
.join(&db);
fs::create_dir_all(&db_path).await?;
let db_file_path = format!("{}/{}.db", db_path, db);
@ -105,7 +112,7 @@ pub async fn sqlite(
send_to_terminal: PrintSender,
mut recv_from_loop: MessageReceiver,
send_to_caps_oracle: CapMessageSender,
home_directory_path: String,
home_directory_path: PathBuf,
) -> anyhow::Result<()> {
let our = Address::new(our_node.as_str(), SQLITE_PROCESS_ID.clone());
@ -113,7 +120,7 @@ pub async fn sqlite(
let mut state = SqliteState::new(our, send_to_terminal, send_to_loop, home_directory_path);
if let Err(e) = fs::create_dir_all(state.sqlite_path.as_str()).await {
if let Err(e) = fs::create_dir_all(&state.sqlite_path).await {
panic!("failed creating sqlite dir! {e:?}");
}
@ -515,11 +522,14 @@ async fn check_caps(
.remove_db(request.package_id.clone(), request.db.clone())
.await;
fs::remove_dir_all(format!(
"{}/{}/{}",
state.sqlite_path, request.package_id, request.db
))
.await?;
#[cfg(unix)]
let db_path = state.sqlite_path
.join(format!("{}", request.package_id))
.join(&request.db);
#[cfg(target_os = "windows")]
let db_path = state.sqlite_path
.join(format!("{}_{}", request.package_id._package(), request.package_id._publisher()))
.join(&request.db);
Ok(())
}

View File

@ -9,7 +9,7 @@ use rocksdb::{checkpoint::Checkpoint, Options, DB};
use std::{
collections::{HashMap, VecDeque},
io::Read,
path::Path,
path::PathBuf,
sync::Arc,
};
use tokio::{fs, io::AsyncWriteExt, sync::Mutex};
@ -20,13 +20,19 @@ const FILE_TO_METADATA: &str = "file_to_metadata.json";
pub async fn load_state(
our_name: String,
keypair: Arc<signature::Ed25519KeyPair>,
home_directory_path: String,
home_directory_string: String,
runtime_extensions: Vec<(ProcessId, MessageSender, Option<NetworkErrorSender>, bool)>,
) -> Result<(ProcessMap, DB, ReverseCapIndex), StateError> {
let state_path = format!("{home_directory_path}/kernel");
let home_directory_path = std::fs::canonicalize(&home_directory_string)?;
let state_path = home_directory_path.join("kernel");
if let Err(e) = fs::create_dir_all(&state_path).await {
panic!("failed creating kernel state dir! {e:?}");
}
// use String to not upset rocksdb:
// * on Unix, works as expected
// * on Windows, would normally use std::path to be cross-platform,
// but here rocksdb appends a `/LOG` which breaks the path
let state_path = format!("{home_directory_string}/kernel");
let mut opts = Options::default();
opts.create_if_missing(true);
@ -84,7 +90,7 @@ pub async fn state_sender(
send_to_terminal: PrintSender,
mut recv_state: MessageReceiver,
db: DB,
home_directory_path: String,
home_directory_path: PathBuf,
) -> Result<(), anyhow::Error> {
let db = Arc::new(db);
let home_directory_path = Arc::new(home_directory_path);
@ -159,7 +165,7 @@ async fn handle_request(
kernel_message: KernelMessage,
db: Arc<DB>,
send_to_loop: &MessageSender,
home_directory_path: &str,
home_directory_path: &PathBuf,
) -> Result<(), StateError> {
let KernelMessage {
id,
@ -244,9 +250,8 @@ async fn handle_request(
}
}
StateAction::Backup => {
let checkpoint_dir = format!("{home_directory_path}/kernel/backup");
if Path::new(&checkpoint_dir).exists() {
let checkpoint_dir = home_directory_path.join("kernel".join("backup");
if checkpoint_dir.exists() {
fs::remove_dir_all(&checkpoint_dir).await?;
}
let checkpoint = Checkpoint::new(&db).map_err(|e| StateError::RocksDBError {
@ -303,7 +308,7 @@ async fn handle_request(
async fn bootstrap(
our_name: &str,
keypair: Arc<signature::Ed25519KeyPair>,
home_directory_path: String,
home_directory_path: PathBuf,
runtime_extensions: Vec<(ProcessId, MessageSender, Option<NetworkErrorSender>, bool)>,
process_map: &mut ProcessMap,
reverse_cap_index: &mut ReverseCapIndex,
@ -396,22 +401,28 @@ async fn bootstrap(
let package_publisher = package_metadata.properties.publisher.as_str();
// create a new package in VFS
#[cfg(unix)]
let our_drive_name = [package_name, package_publisher].join(":");
let pkg_path = format!("{}/vfs/{}/pkg", &home_directory_path, &our_drive_name);
#[cfg(target_os = "windows")]
let our_drive_name = [package_name, package_publisher].join("_");
let pkg_path = home_directory_path
.join("vfs")
.join(&our_drive_name)
.join("pkg");
// delete anything currently residing in the pkg folder
let pkg_path_buf = std::path::PathBuf::from(&pkg_path);
if pkg_path_buf.exists() {
if pkg_path.exists() {
fs::remove_dir_all(&pkg_path).await?;
}
fs::create_dir_all(&pkg_path)
.await
.expect("bootstrap vfs dir pkg creation failed!");
let drive_path = format!("/{}/pkg", &our_drive_name);
let drive_path = format!("/{}/pkg", [package_name, package_publisher].join(":"));
// save the zip itself inside pkg folder, for sharing with others
let mut zip_file =
fs::File::create(format!("{}/{}.zip", &pkg_path, &our_drive_name)).await?;
fs::File::create(pkg_path.join(format!("{}.zip", &our_drive_name)).await?;
let package_zip_bytes = package.clone().into_inner().into_inner();
zip_file.write_all(&package_zip_bytes).await?;
@ -434,7 +445,7 @@ async fn bootstrap(
};
let file_path_str = file_path.to_string_lossy().to_string();
let full_path = Path::new(&pkg_path).join(&file_path_str);
let full_path = pkg_path.join(&file_path_str);
if file.is_dir() {
// It's a directory, create it

View File

@ -1,7 +1,7 @@
use chrono::{Datelike, Local, Timelike};
use crossterm::{
cursor,
event::{Event, EventStream, KeyCode, KeyEvent, KeyModifiers},
event::{Event, EventStream, KeyCode, KeyEvent, KeyEventKind, KeyModifiers},
execute, style,
style::Print,
terminal::{self, ClearType},
@ -14,7 +14,9 @@ use lib::types::core::{
use std::{
fs::{read_to_string, OpenOptions},
io::BufWriter,
path::PathBuf,
};
#[cfg(unix)]
use tokio::signal::unix::{signal, SignalKind};
use unicode_segmentation::UnicodeSegmentation;
@ -174,7 +176,7 @@ impl CurrentLine {
pub async fn terminal(
our: Identity,
version: &str,
home_directory_path: String,
home_directory_path: PathBuf,
mut event_loop: MessageSender,
mut debug_event_loop: DebugSender,
mut print_tx: PrintSender,
@ -202,9 +204,7 @@ pub async fn terminal(
// the terminal stores the most recent 1000 lines entered by user
// in history. TODO should make history size adjustable.
let history_path = std::fs::canonicalize(&home_directory_path)
.expect("terminal: could not get path for .terminal_history file")
.join(".terminal_history");
let history_path = home_directory_path.join(".terminal_history");
let history = read_to_string(&history_path).unwrap_or_default();
let history_handle = OpenOptions::new()
.append(true)
@ -217,9 +217,7 @@ pub async fn terminal(
// if CTRL+L is used to turn on logging, all prints to terminal
// will also be written with their full timestamp to the .terminal_log file.
// logging mode is always on by default
let log_dir_path = std::fs::canonicalize(&home_directory_path)
.expect("terminal: could not get path for .terminal_logs dir")
.join(".terminal_logs");
let log_dir_path = home_directory_path.join(".terminal_logs");
let logger = utils::Logger::new(log_dir_path, max_log_size, number_log_files);
let mut state = State {
@ -243,22 +241,26 @@ pub async fn terminal(
};
// use to trigger cleanup if receive signal to kill process
let mut sigalrm =
signal(SignalKind::alarm()).expect("terminal: failed to set up SIGALRM handler");
let mut sighup =
signal(SignalKind::hangup()).expect("terminal: failed to set up SIGHUP handler");
let mut sigint =
signal(SignalKind::interrupt()).expect("terminal: failed to set up SIGINT handler");
let mut sigpipe =
signal(SignalKind::pipe()).expect("terminal: failed to set up SIGPIPE handler");
let mut sigquit =
signal(SignalKind::quit()).expect("terminal: failed to set up SIGQUIT handler");
let mut sigterm =
signal(SignalKind::terminate()).expect("terminal: failed to set up SIGTERM handler");
let mut sigusr1 =
signal(SignalKind::user_defined1()).expect("terminal: failed to set up SIGUSR1 handler");
let mut sigusr2 =
signal(SignalKind::user_defined2()).expect("terminal: failed to set up SIGUSR2 handler");
#[cfg(unix)]
let (
mut sigalrm,
mut sighup,
mut sigint,
mut sigpipe,
mut sigquit,
mut sigterm,
mut sigusr1,
mut sigusr2,
) = (
signal(SignalKind::alarm()).expect("terminal: failed to set up SIGALRM handler"),
signal(SignalKind::hangup()).expect("terminal: failed to set up SIGHUP handler"),
signal(SignalKind::interrupt()).expect("terminal: failed to set up SIGINT handler"),
signal(SignalKind::pipe()).expect("terminal: failed to set up SIGPIPE handler"),
signal(SignalKind::quit()).expect("terminal: failed to set up SIGQUIT handler"),
signal(SignalKind::terminate()).expect("terminal: failed to set up SIGTERM handler"),
signal(SignalKind::user_defined1()).expect("terminal: failed to set up SIGUSR1 handler"),
signal(SignalKind::user_defined2()).expect("terminal: failed to set up SIGUSR2 handler"),
);
// if the verbosity boot flag was **not** set to "full event loop", tell kernel
// the kernel will try and print all events by default so that booting with
@ -274,6 +276,7 @@ pub async fn terminal(
if !is_detached {
let mut reader = EventStream::new();
loop {
#[cfg(unix)]
tokio::select! {
Some(printout) = print_rx.recv() => {
handle_printout(printout, &mut state)?;
@ -292,9 +295,21 @@ pub async fn terminal(
_ = sigusr1.recv() => return Err(anyhow::anyhow!("exiting due to SIGUSR1")),
_ = sigusr2.recv() => return Err(anyhow::anyhow!("exiting due to SIGUSR2")),
}
#[cfg(target_os = "windows")]
tokio::select! {
Some(printout) = print_rx.recv() => {
handle_printout(printout, &mut state)?;
}
Some(Ok(event)) = reader.next().fuse() => {
if handle_event(&our, event, &mut state, &mut event_loop, &mut debug_event_loop, &mut print_tx).await? {
break;
}
}
}
}
} else {
loop {
#[cfg(unix)]
tokio::select! {
Some(printout) = print_rx.recv() => {
handle_printout(printout, &mut state)?;
@ -308,6 +323,10 @@ pub async fn terminal(
_ = sigusr1.recv() => return Err(anyhow::anyhow!("exiting due to SIGUSR1")),
_ = sigusr2.recv() => return Err(anyhow::anyhow!("exiting due to SIGUSR2")),
}
#[cfg(target_os = "windows")]
if let Some(printout) = print_rx.recv().await {
handle_printout(printout, &mut state)?;
};
}
};
Ok(())
@ -353,7 +372,7 @@ fn handle_printout(printout: Printout, state: &mut State) -> anyhow::Result<()>
Ok(())
}
/// returns True if runtime should exit due to CTRL+C or CTRL+D
/// returns true if runtime should exit due to CTRL+C or CTRL+D
async fn handle_event(
our: &Identity,
event: Event,
@ -364,19 +383,14 @@ async fn handle_event(
) -> anyhow::Result<bool> {
let State {
stdout,
command_history,
win_cols,
win_rows,
current_line,
in_step_through,
search_depth,
logging_mode,
verbose_mode,
..
} = state;
// lock here so that runtime can still use println! without freezing..
// can lock before loop later if we want to reduce overhead
let mut stdout = stdout.lock();
let stdout = stdout.lock();
match event {
//
// RESIZE: resize is super annoying because this event trigger often
@ -392,9 +406,7 @@ async fn handle_event(
cursor::MoveTo(0, height),
terminal::Clear(ClearType::CurrentLine)
)?;
// since we subtract prompt_len from win_cols, win_cols must always
// be >= prompt_len
*win_cols = std::cmp::max(width - 1, current_line.prompt_len as u16);
*win_cols = width - 1;
*win_rows = height;
if current_line.cursor_col + current_line.prompt_len as u16 > *win_cols {
current_line.cursor_col = *win_cols - current_line.prompt_len as u16;
@ -418,19 +430,73 @@ async fn handle_event(
*win_cols - current_line.prompt_len as u16,
);
}
Event::Key(key_event) => {
if let Some(should_exit) = handle_key_event(
our,
key_event,
state,
event_loop,
debug_event_loop,
print_tx,
stdout,
)
.await?
{
return Ok(should_exit);
}
}
_ => {
// some terminal event we don't care about, yet
}
}
if state.search_mode {
state.search(&our.name)?;
} else {
state.display_current_input_line(false)?;
}
Ok(false)
}
/// returns Some(true) if runtime should exit due to CTRL+C or CTRL+D,
/// Some(false) if caller should simple return `false`
/// None if caller should fall through
async fn handle_key_event(
our: &Identity,
key_event: KeyEvent,
state: &mut State,
event_loop: &mut MessageSender,
debug_event_loop: &mut DebugSender,
print_tx: &mut PrintSender,
mut stdout: std::io::StdoutLock<'static>,
) -> anyhow::Result<Option<bool>> {
if key_event.kind == KeyEventKind::Release {
return Ok(Some(false));
}
let State {
command_history,
win_cols,
win_rows,
current_line,
in_step_through,
search_depth,
logging_mode,
verbose_mode,
..
} = state;
match key_event {
//
// CTRL+C, CTRL+D: turn off the node
//
Event::Key(KeyEvent {
KeyEvent {
code: KeyCode::Char('c'),
modifiers: KeyModifiers::CONTROL,
..
})
| Event::Key(KeyEvent {
}
| KeyEvent {
code: KeyCode::Char('d'),
modifiers: KeyModifiers::CONTROL,
..
}) => {
} => {
execute!(
stdout,
// print goes immediately above the dedicated input line at bottom
@ -438,16 +504,16 @@ async fn handle_event(
terminal::Clear(ClearType::CurrentLine),
Print("exit code received"),
)?;
return Ok(true);
return Ok(Some(true));
}
//
// CTRL+V: toggle through verbosity modes
//
Event::Key(KeyEvent {
KeyEvent {
code: KeyCode::Char('v'),
modifiers: KeyModifiers::CONTROL,
..
}) => {
} => {
// go from low to high, then reset to 0
match verbose_mode {
0 => *verbose_mode = 1,
@ -483,16 +549,16 @@ async fn handle_event(
)
.send(&print_tx)
.await;
return Ok(false);
return Ok(Some(false));
}
//
// CTRL+J: toggle debug mode -- makes system-level event loop step-through
//
Event::Key(KeyEvent {
KeyEvent {
code: KeyCode::Char('j'),
modifiers: KeyModifiers::CONTROL,
..
}) => {
} => {
let _ = debug_event_loop.send(DebugCommand::ToggleStepthrough).await;
*in_step_through = !*in_step_through;
Printout::new(
@ -507,27 +573,27 @@ async fn handle_event(
)
.send(&print_tx)
.await;
return Ok(false);
return Ok(Some(false));
}
//
// CTRL+S: step through system-level event loop (when in step-through mode)
//
Event::Key(KeyEvent {
KeyEvent {
code: KeyCode::Char('s'),
modifiers: KeyModifiers::CONTROL,
..
}) => {
} => {
let _ = debug_event_loop.send(DebugCommand::Step).await;
return Ok(false);
return Ok(Some(false));
}
//
// CTRL+L: toggle logging mode
//
Event::Key(KeyEvent {
KeyEvent {
code: KeyCode::Char('l'),
modifiers: KeyModifiers::CONTROL,
..
}) => {
} => {
*logging_mode = !*logging_mode;
Printout::new(
0,
@ -535,21 +601,21 @@ async fn handle_event(
)
.send(&print_tx)
.await;
return Ok(false);
return Ok(Some(false));
}
//
// UP / CTRL+P: go up one command in history
//
Event::Key(KeyEvent {
KeyEvent {
code: KeyCode::Up, ..
})
| Event::Key(KeyEvent {
}
| KeyEvent {
code: KeyCode::Char('p'),
modifiers: KeyModifiers::CONTROL,
..
}) => {
} => {
if state.search_mode {
return Ok(false);
return Ok(Some(false));
}
// go up one command in history
match command_history.get_prev(&current_line.line) {
@ -566,22 +632,22 @@ async fn handle_event(
}
}
state.display_current_input_line(true)?;
return Ok(false);
return Ok(Some(false));
}
//
// DOWN / CTRL+N: go down one command in history
//
Event::Key(KeyEvent {
KeyEvent {
code: KeyCode::Down,
..
})
| Event::Key(KeyEvent {
}
| KeyEvent {
code: KeyCode::Char('n'),
modifiers: KeyModifiers::CONTROL,
..
}) => {
} => {
if state.search_mode {
return Ok(false);
return Ok(Some(false));
}
// go down one command in history
match command_history.get_next() {
@ -598,18 +664,18 @@ async fn handle_event(
}
}
state.display_current_input_line(true)?;
return Ok(false);
return Ok(Some(false));
}
//
// CTRL+A: jump to beginning of line
//
Event::Key(KeyEvent {
KeyEvent {
code: KeyCode::Char('a'),
modifiers: KeyModifiers::CONTROL,
..
}) => {
} => {
if state.search_mode {
return Ok(false);
return Ok(Some(false));
}
current_line.line_col = 0;
current_line.cursor_col = 0;
@ -617,13 +683,13 @@ async fn handle_event(
//
// CTRL+E: jump to end of line
//
Event::Key(KeyEvent {
KeyEvent {
code: KeyCode::Char('e'),
modifiers: KeyModifiers::CONTROL,
..
}) => {
} => {
if state.search_mode {
return Ok(false);
return Ok(Some(false));
}
current_line.line_col = current_line.line.graphemes(true).count();
current_line.cursor_col = std::cmp::min(
@ -635,11 +701,11 @@ async fn handle_event(
// CTRL+R: enter search mode
// if already in search mode, increase search depth
//
Event::Key(KeyEvent {
KeyEvent {
code: KeyCode::Char('r'),
modifiers: KeyModifiers::CONTROL,
..
}) => {
} => {
if state.search_mode {
*search_depth += 1;
}
@ -648,11 +714,11 @@ async fn handle_event(
//
// CTRL+G: exit search mode
//
Event::Key(KeyEvent {
KeyEvent {
code: KeyCode::Char('g'),
modifiers: KeyModifiers::CONTROL,
..
}) => {
} => {
// just show true current line as usual
state.search_mode = false;
*search_depth = 0;
@ -660,7 +726,7 @@ async fn handle_event(
//
// KEY: handle keypress events
//
Event::Key(k) => {
k => {
match k.code {
//
// CHAR: write a single character
@ -677,7 +743,7 @@ async fn handle_event(
//
KeyCode::Backspace => {
if current_line.line_col == 0 {
return Ok(false);
return Ok(Some(false));
} else {
current_line.line_col -= 1;
let c = current_line.delete_char();
@ -689,7 +755,7 @@ async fn handle_event(
//
KeyCode::Delete => {
if current_line.line_col == current_line.line.graphemes(true).count() {
return Ok(false);
return Ok(Some(false));
}
current_line.delete_char();
}
@ -700,7 +766,7 @@ async fn handle_event(
if current_line.cursor_col as usize == 0 {
if current_line.line_col == 0 {
// at the very beginning of the current typed line
return Ok(false);
return Ok(Some(false));
} else {
// virtual scroll leftward through line
current_line.line_col -= 1;
@ -716,7 +782,7 @@ async fn handle_event(
if current_line.line_col != 0 {
current_line.line_col -= 1;
}
return Ok(false);
return Ok(Some(false));
}
}
//
@ -725,7 +791,7 @@ async fn handle_event(
KeyCode::Right => {
if current_line.line_col == current_line.line.graphemes(true).count() {
// at the very end of the current typed line
return Ok(false);
return Ok(Some(false));
};
if (current_line.cursor_col + current_line.prompt_len as u16) < (*win_cols - 1)
{
@ -737,7 +803,7 @@ async fn handle_event(
execute!(stdout, cursor::MoveRight(width))?;
current_line.cursor_col += width;
current_line.line_col += 1;
return Ok(false);
return Ok(Some(false));
} else {
// virtual scroll rightward through line
current_line.line_col += 1;
@ -791,14 +857,6 @@ async fn handle_event(
}
}
}
_ => {
// some terminal event we don't care about, yet
}
}
if state.search_mode {
state.search(&our.name)?;
} else {
state.display_current_input_line(false)?;
}
Ok(false)
Ok(None)
}

View File

@ -412,7 +412,11 @@ fn make_log_writer(log_dir_path: &Path) -> anyhow::Result<BufWriter<std::fs::Fil
std::fs::create_dir(log_dir_path)?;
}
let now = chrono::Local::now();
#[cfg(unix)]
let log_name = format!("{}.log", now.format("%Y-%m-%d-%H:%M:%S"));
#[cfg(target_os = "windows")]
let log_name = format!("{}.log", now.format("%Y-%m-%d-%H_%M_%S"));
let log_path = log_dir_path.join(log_name);
let log_handle = OpenOptions::new()
.append(true)

View File

@ -40,9 +40,9 @@ pub async fn vfs(
send_to_terminal: PrintSender,
mut recv_from_loop: MessageReceiver,
send_to_caps_oracle: CapMessageSender,
home_directory_path: String,
home_directory_path: PathBuf,
) -> anyhow::Result<()> {
let vfs_path = format!("{home_directory_path}/vfs");
let vfs_path = home_directory_path.join("vfs");
fs::create_dir_all(&vfs_path)
.await
@ -342,8 +342,8 @@ async fn handle_request(
}
// current prepend to filepaths needs to be: /package_id/drive/path
let (package_id, drive, rest) = parse_package_and_drive(&request.path, &vfs_path).await?;
let drive = format!("/{package_id}/{drive}");
let (package_id, drive, rest) = parse_package_and_drive(&request.path, &vfs_path)?;
let drive = format!("{package_id}/{drive}");
let action = request.action;
let path = PathBuf::from(&request.path);
@ -364,10 +364,15 @@ async fn handle_request(
let base_drive = join_paths_safely(&vfs_path, &drive);
let path = join_paths_safely(&base_drive, &rest);
#[cfg(target_os = "windows")]
let (path, internal_path) = (internal_path_to_external(&path), path);
let (response_body, bytes) = match action {
VfsAction::CreateDrive => {
let drive_path = join_paths_safely(vfs_path, &drive);
fs::create_dir_all(drive_path).await?;
#[cfg(target_os = "windows")]
let base_drive = internal_path_to_external(&base_drive);
fs::create_dir_all(&base_drive).await?;
(VfsResponse::Ok, None)
}
VfsAction::CreateDir => {
@ -461,8 +466,19 @@ async fn handle_request(
let metadata = entry.metadata().await?;
let file_type = get_file_type(&metadata);
#[cfg(unix)]
let relative_path = relative_path.display().to_string();
#[cfg(target_os = "windows")]
let relative_path = {
let internal_path = internal_path
.strip_prefix(vfs_path)
.unwrap_or(&internal_path);
replace_path_prefix(&internal_path, &relative_path)
};
let dir_entry = DirEntry {
path: relative_path.display().to_string(),
path: relative_path,
file_type,
};
entries.push(dir_entry);
@ -588,7 +604,7 @@ async fn handle_request(
}
};
fs::create_dir_all(path.clone()).await?;
fs::create_dir_all(&path).await?;
// loop through items in archive; recursively add to root
for i in 0..zip.len() {
@ -612,7 +628,7 @@ async fn handle_request(
if is_file {
fs::write(&local_path, &file_contents).await?;
} else if is_dir {
fs::create_dir_all(local_path).await?;
fs::create_dir_all(&local_path).await?;
} else {
return Err(VfsError::CreateDirError {
path: path.display().to_string(),
@ -654,7 +670,7 @@ async fn handle_request(
async fn parse_package_and_drive(
path: &str,
vfs_path: &PathBuf,
) -> Result<(PackageId, String, String), VfsError> {
) -> Result<(PackageId, String, PathBuf), VfsError> {
let joined_path = join_paths_safely(&vfs_path, path);
// sanitize path..
@ -674,7 +690,10 @@ async fn parse_package_and_drive(
.display()
.to_string();
#[cfg(unix)]
let mut parts: Vec<&str> = path.split('/').collect();
#[cfg(target_os = "windows")]
let mut parts: Vec<&str> = path.split('\\').collect();
if parts[0].is_empty() {
parts.remove(0);
@ -697,11 +716,51 @@ async fn parse_package_and_drive(
};
let drive = parts[1].to_string();
let remaining_path = parts[2..].join("/");
let mut remaining_path = PathBuf::new();
for part in &parts[2..] {
remaining_path = remaining_path.join(part);
}
Ok((package_id, drive, remaining_path))
}
#[cfg(target_os = "windows")]
fn internal_path_to_external(internal: &Path) -> PathBuf {
let mut external = PathBuf::new();
for component in internal.components() {
match component {
Component::RootDir | Component::CurDir | Component::ParentDir => {}
Component::Prefix(_) => {
let component: &Path = component.as_ref();
external = component.to_path_buf();
}
Component::Normal(item) => {
external = external.join(item.to_string_lossy().into_owned().replace(":", "_"));
}
}
}
external
}
#[cfg(target_os = "windows")]
fn replace_path_prefix(base_path: &Path, to_replace_path: &Path) -> String {
let base_path = base_path.display().to_string();
let base_path_parts: Vec<&str> = base_path.split('\\').collect();
let num_base_path_parts = base_path_parts.len();
let to_replace_path = to_replace_path.display().to_string();
let parts: Vec<&str> = to_replace_path.split('\\').collect();
let mut new_path = base_path.to_string().replace("\\", "/");
for part in parts.iter().skip(num_base_path_parts) {
new_path.push('/');
new_path.push_str(part);
}
new_path
}
async fn check_caps(
our_node: &str,
source: &Address,
@ -777,8 +836,7 @@ async fn check_caps(
}
VfsAction::CopyFile { new_path } | VfsAction::Rename { new_path } => {
// these have 2 paths to validate
let (new_package_id, new_drive, _rest) =
parse_package_and_drive(new_path, &vfs_path).await?;
let (new_package_id, new_drive, _rest) = parse_package_and_drive(new_path, &vfs_path)?;
let new_drive = format!("/{new_package_id}/{new_drive}");
// if both new and old path are within the package_id path, ok
@ -1002,11 +1060,13 @@ fn normalize_path(path: &Path) -> PathBuf {
ret
}
fn join_paths_safely(base: &PathBuf, extension: &str) -> PathBuf {
let extension_str = Path::new(extension)
fn join_paths_safely<P: AsRef<Path>>(base: &PathBuf, extension: P) -> PathBuf {
let extension_str = extension
.as_ref()
.to_str()
.unwrap_or("")
.trim_start_matches('/');
.trim_start_matches('/')
.trim_start_matches('\\');
let extension_path = Path::new(extension_str);
base.join(extension_path)

View File

@ -11,8 +11,10 @@ license = "Apache-2.0"
[lib]
[build-dependencies]
kit = { git = "https://github.com/kinode-dao/kit", tag = "v0.7.6" }
tokio = "1.28"
anyhow = "1.0.71"
reqwest = "0.12.4"
sha2 = "0.10.8"
tokio = { version = "1.28", features = ["rt-multi-thread"] }
[dependencies]
alloy = { git = "https://github.com/kinode-dao/alloy.git", rev = "e672f3e", features = [

View File

@ -1,9 +1,58 @@
use std::fs;
use std::path::Path;
use sha2::{Digest, Sha256};
const KIT_CACHE: &str = "/tmp/kinode-kit-cache";
const KINODE_WIT_0_7_0_URL: &str =
"https://raw.githubusercontent.com/kinode-dao/kinode-wit/aa2c8b11c9171b949d1991c32f58591c0e881f85/kinode.wit";
const KINODE_WIT_0_8_0_URL: &str =
"https://raw.githubusercontent.com/kinode-dao/kinode-wit/v0.8/kinode.wit";
/// copied from `kit`
async fn download_file(url: &str, path: &Path) -> anyhow::Result<()> {
fs::create_dir_all(&KIT_CACHE)?;
let mut hasher = Sha256::new();
hasher.update(url.as_bytes());
let hashed_url = hasher.finalize();
let hashed_url_path = Path::new(KIT_CACHE).join(format!("{hashed_url:x}"));
let content = if hashed_url_path.exists() {
fs::read(hashed_url_path)?
} else {
let response = reqwest::get(url).await?;
// Check if response status is 200 (OK)
if response.status() != reqwest::StatusCode::OK {
return Err(anyhow::anyhow!(
"Failed to download file: HTTP Status {}",
response.status()
));
}
let content = response.bytes().await?.to_vec();
fs::write(hashed_url_path, &content)?;
content
};
if path.exists() {
if path.is_dir() {
fs::remove_dir_all(path)?;
} else {
let existing_content = fs::read(path)?;
if content == existing_content {
return Ok(());
}
}
}
fs::create_dir_all(
path.parent()
.ok_or_else(|| anyhow::anyhow!("path doesn't have parent"))?,
)?;
fs::write(path, &content)?;
Ok(())
}
fn main() {
if std::env::var("SKIP_BUILD_SCRIPT").is_ok() {
println!("Skipping build script");
@ -16,7 +65,7 @@ fn main() {
let rt = tokio::runtime::Runtime::new().unwrap();
rt.block_on(async {
kit::build::download_file(KINODE_WIT_0_7_0_URL, &wit_file)
download_file(KINODE_WIT_0_7_0_URL, &wit_file)
.await
.expect("Failed to download WIT 0.7");
});
@ -25,7 +74,7 @@ fn main() {
let rt = tokio::runtime::Runtime::new().unwrap();
rt.block_on(async {
kit::build::download_file(KINODE_WIT_0_8_0_URL, &wit_file)
download_file(KINODE_WIT_0_8_0_URL, &wit_file)
.await
.expect("Failed to download WIT 0.8");
})