mirror of
https://github.com/uqbar-dao/nectar.git
synced 2024-11-22 11:22:59 +03:00
commit
427abcca4a
89
Cargo.lock
generated
89
Cargo.lock
generated
@ -1402,6 +1402,20 @@ dependencies = [
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "build_packages"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"clap",
|
||||
"fs-err",
|
||||
"kit",
|
||||
"serde_json",
|
||||
"tokio",
|
||||
"walkdir",
|
||||
"zip 0.6.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bumpalo"
|
||||
version = "3.16.0"
|
||||
@ -2628,18 +2642,6 @@ version = "0.2.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d"
|
||||
|
||||
[[package]]
|
||||
name = "filetime"
|
||||
version = "0.2.24"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bf401df4a4e3872c4fe8151134cf483738e74b67fc934d6532c882b3d24a4550"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"libredox",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fixed-hash"
|
||||
version = "0.8.0"
|
||||
@ -3639,7 +3641,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "kinode"
|
||||
version = "0.9.6"
|
||||
version = "0.9.7"
|
||||
dependencies = [
|
||||
"aes-gcm",
|
||||
"alloy 0.2.1",
|
||||
@ -3654,7 +3656,6 @@ dependencies = [
|
||||
"clap",
|
||||
"crossterm",
|
||||
"dashmap 5.5.3",
|
||||
"flate2",
|
||||
"futures",
|
||||
"generic-array",
|
||||
"hex",
|
||||
@ -3662,7 +3663,6 @@ dependencies = [
|
||||
"http 1.1.0",
|
||||
"indexmap",
|
||||
"jwt",
|
||||
"kit",
|
||||
"lazy_static",
|
||||
"lib",
|
||||
"libc",
|
||||
@ -3670,6 +3670,7 @@ dependencies = [
|
||||
"open",
|
||||
"public-ip",
|
||||
"rand 0.8.5",
|
||||
"regex",
|
||||
"reqwest",
|
||||
"ring",
|
||||
"rmp-serde",
|
||||
@ -3683,25 +3684,22 @@ dependencies = [
|
||||
"snow",
|
||||
"socket2 0.5.7",
|
||||
"static_dir",
|
||||
"tar",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"tokio-tungstenite 0.21.0",
|
||||
"unicode-segmentation",
|
||||
"unicode-width",
|
||||
"url",
|
||||
"walkdir",
|
||||
"warp",
|
||||
"wasi-common",
|
||||
"wasmtime",
|
||||
"wasmtime-wasi",
|
||||
"zip 0.6.6",
|
||||
"zip 1.1.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "kinode_lib"
|
||||
version = "0.9.6"
|
||||
version = "0.9.7"
|
||||
dependencies = [
|
||||
"lib",
|
||||
]
|
||||
@ -3753,8 +3751,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "kit"
|
||||
version = "0.7.6"
|
||||
source = "git+https://github.com/kinode-dao/kit?tag=v0.7.6#da6676d79dfdaf47c00ce1d24874fd52de44f717"
|
||||
version = "0.7.7"
|
||||
source = "git+https://github.com/kinode-dao/kit?rev=9c94b4b#9c94b4bd3f2a9dc2eabb2da9bc2ef5e6eb07af9d"
|
||||
dependencies = [
|
||||
"alloy 0.1.4",
|
||||
"alloy-sol-macro",
|
||||
@ -3824,16 +3822,18 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67"
|
||||
|
||||
[[package]]
|
||||
name = "lib"
|
||||
version = "0.9.6"
|
||||
version = "0.9.7"
|
||||
dependencies = [
|
||||
"alloy 0.2.1",
|
||||
"kit",
|
||||
"anyhow",
|
||||
"lazy_static",
|
||||
"rand 0.8.5",
|
||||
"reqwest",
|
||||
"ring",
|
||||
"rusqlite",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"wasmtime",
|
||||
@ -3883,7 +3883,6 @@ checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d"
|
||||
dependencies = [
|
||||
"bitflags 2.6.0",
|
||||
"libc",
|
||||
"redox_syscall",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -4745,7 +4744,7 @@ dependencies = [
|
||||
"rand 0.8.5",
|
||||
"rand_chacha 0.3.1",
|
||||
"rand_xorshift 0.3.0",
|
||||
"regex-syntax 0.8.4",
|
||||
"regex-syntax 0.8.5",
|
||||
"rusty-fork",
|
||||
"tempfile",
|
||||
"unarray",
|
||||
@ -5041,14 +5040,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.10.6"
|
||||
version = "1.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619"
|
||||
checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
"regex-automata 0.4.7",
|
||||
"regex-syntax 0.8.4",
|
||||
"regex-automata 0.4.8",
|
||||
"regex-syntax 0.8.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -5062,13 +5061,13 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex-automata"
|
||||
version = "0.4.7"
|
||||
version = "0.4.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df"
|
||||
checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
"regex-syntax 0.8.4",
|
||||
"regex-syntax 0.8.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -5079,9 +5078,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.8.4"
|
||||
version = "0.8.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b"
|
||||
checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
|
||||
|
||||
[[package]]
|
||||
name = "reqwest"
|
||||
@ -5941,17 +5940,6 @@ version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
|
||||
|
||||
[[package]]
|
||||
name = "tar"
|
||||
version = "0.4.41"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cb797dad5fb5b76fcf519e702f4a589483b5ef06567f160c392832c1f5e44909"
|
||||
dependencies = [
|
||||
"filetime",
|
||||
"libc",
|
||||
"xattr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "target-lexicon"
|
||||
version = "0.12.16"
|
||||
@ -7715,17 +7703,6 @@ dependencies = [
|
||||
"tap",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "xattr"
|
||||
version = "1.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8da84f1a25939b27f6820d92aed108f83ff920fdf11a7b19366c27c4cda81d4f"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"linux-raw-sys",
|
||||
"rustix",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zerocopy"
|
||||
version = "0.7.35"
|
||||
|
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "kinode_lib"
|
||||
authors = ["KinodeDAO"]
|
||||
version = "0.9.6"
|
||||
version = "0.9.7"
|
||||
edition = "2021"
|
||||
description = "A general-purpose sovereign cloud computing platform"
|
||||
homepage = "https://kinode.org"
|
||||
@ -27,6 +27,7 @@ members = [
|
||||
"kinode/packages/terminal/kill", "kinode/packages/terminal/m", "kinode/packages/terminal/top",
|
||||
"kinode/packages/terminal/net_diagnostics", "kinode/packages/terminal/peer", "kinode/packages/terminal/peers",
|
||||
"kinode/packages/tester/tester",
|
||||
"scripts/build_packages",
|
||||
]
|
||||
default-members = ["lib"]
|
||||
resolver = "2"
|
||||
|
@ -10,5 +10,4 @@ ENV NVM_DIR=/root/.nvm \
|
||||
WORKDIR /input
|
||||
|
||||
# Set the default command to run the build script
|
||||
# TODO: once build is idempotent, remove the `rm -rf` line
|
||||
CMD ["/bin/bash", "-c", ". ~/.bashrc && . ~/.cargo/env && . $NVM_DIR/nvm.sh && rm -rf target/ kinode/packages/*/pkg/*wasm kinode/packages/*/*/target/ kinode/packages/*/pkg/api.zip kinode/packages/*/*/wit kinode/packages/app_store/pkg/ui kinode/packages/homepage/pkg/ui kinode/src/register-ui/build && ./scripts/build-release.py && cp -r /tmp/kinode-release/* /output && chmod 664 /output/* && rm -rf target/ kinode/packages/*/pkg/*wasm kinode/packages/*/*/target/ kinode/packages/*/pkg/api.zip kinode/packages/*/*/wit kinode/packages/app_store/pkg/ui kinode/packages/homepage/pkg/ui kinode/src/register-ui/build"]
|
||||
CMD ["/bin/bash", "-c", ". ~/.bashrc && . ~/.cargo/env && . $NVM_DIR/nvm.sh && ./scripts/build-release.py && cp -r /tmp/kinode-release/* /output && chmod 664 /output/* && find . -user root -print0 2>/dev/null | xargs -0 rm -rf"]
|
||||
|
24
README.md
24
README.md
@ -1,7 +1,7 @@
|
||||
<p align="center">
|
||||
<img width="551" alt="Screenshot 2024-05-08 at 2 38 11 PM" src="https://github.com/kinode-dao/kinode/assets/93405247/24c7982b-9d76-419a-96dc-ec4a25dda562">
|
||||
<br />
|
||||
<img src="https://img.shields.io/twitter/follow/kinodeOS">
|
||||
<img src="https://img.shields.io/twitter/follow/Kinode">
|
||||
|
||||
</p>
|
||||
|
||||
@ -28,28 +28,30 @@ On certain operating systems, you may need to install these dependencies if they
|
||||
|
||||
git clone git@github.com:kinode-dao/kinode.git
|
||||
|
||||
# Get some stuff so we can build Wasm.
|
||||
# Install Rust and some `cargo` tools so we can build the runtime and Wasm.
|
||||
|
||||
cd kinode
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
|
||||
cargo install wasm-tools
|
||||
rustup install nightly
|
||||
rustup target add wasm32-wasi
|
||||
rustup target add wasm32-wasi --toolchain nightly
|
||||
rustup target add wasm32-wasip1
|
||||
rustup target add wasm32-wasip1 --toolchain nightly
|
||||
cargo install cargo-wasi
|
||||
|
||||
# Install NPM so we can build frontends for "distro" packages.
|
||||
# https://docs.npmjs.com/downloading-and-installing-node-js-and-npm
|
||||
# If you want to skip this step, run cargo build with the environment variable SKIP_BUILD_FRONTEND=true
|
||||
# If you want to skip this step, build the packages with `cargo run -p build_packages -- --skip-build-frontend` to neglect building the frontends
|
||||
|
||||
# Build the runtime, along with a number of "distro" Wasm modules.
|
||||
# The compiled binary will be at `kinode/target/debug/kinode`
|
||||
# OPTIONAL: --release flag (slower build; faster runtime; binary at `kinode/target/release/kinode`)
|
||||
# Build the "distro" Wasm modules, then, build the runtime.
|
||||
# The compiled packages will be at `kinode/target/packages.zip`.
|
||||
# The compiled binary will be at `kinode/target/debug/kinode`.
|
||||
# OPTIONAL: --release flag (slower build; faster runtime; binary at `kinode/target/release/kinode`).
|
||||
|
||||
cargo +nightly build -p kinode
|
||||
cd kinode
|
||||
cargo run -p build_packages
|
||||
cargo build -p kinode
|
||||
```
|
||||
|
||||
[To build on Windows](https://gist.github.com/nick1udwig/f2d39a3fc6ccc7f7ad2912e8d3aeaae0)
|
||||
|
||||
## Security Status
|
||||
|
||||
No security audits of this crate have ever been performed. This software is under active development and should be **used at your own risk**.
|
||||
|
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "kinode"
|
||||
authors = ["KinodeDAO"]
|
||||
version = "0.9.6"
|
||||
version = "0.9.7"
|
||||
edition = "2021"
|
||||
description = "A general-purpose sovereign cloud computing platform"
|
||||
homepage = "https://kinode.org"
|
||||
@ -14,12 +14,7 @@ path = "src/main.rs"
|
||||
|
||||
[build-dependencies]
|
||||
anyhow = "1.0.71"
|
||||
flate2 = "1.0"
|
||||
kit = { git = "https://github.com/kinode-dao/kit", tag = "v0.7.6" }
|
||||
tar = "0.4"
|
||||
tokio = "1.28"
|
||||
walkdir = "2.4"
|
||||
zip = "0.6"
|
||||
sha2 = "0.10.8"
|
||||
|
||||
[features]
|
||||
simulation-mode = []
|
||||
@ -67,6 +62,7 @@ nohash-hasher = "0.2.0"
|
||||
open = "5.1.4"
|
||||
public-ip = "0.2.2"
|
||||
rand = "0.8.4"
|
||||
regex = "1.11.0"
|
||||
reqwest = "0.12.4"
|
||||
ring = "0.17.8"
|
||||
rmp-serde = "1.1.2"
|
||||
|
238
kinode/build.rs
238
kinode/build.rs
@ -1,12 +1,9 @@
|
||||
use std::{
|
||||
fs::{self, File},
|
||||
io::{BufReader, Cursor, Read, Write},
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
use std::io::Read;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use flate2::read::GzDecoder;
|
||||
use tar::Archive;
|
||||
use zip::write::FileOptions;
|
||||
use sha2::Digest;
|
||||
|
||||
const CANONICAL_PACKAGES_ZIP_PATH: &str = "../target/packages.zip";
|
||||
|
||||
macro_rules! p {
|
||||
($($tokens: tt)*) => {
|
||||
@ -14,144 +11,53 @@ macro_rules! p {
|
||||
}
|
||||
}
|
||||
|
||||
/// get cargo features to compile packages with
|
||||
fn get_features() -> String {
|
||||
let mut features = "".to_string();
|
||||
for (key, _) in std::env::vars() {
|
||||
if key.starts_with("CARGO_FEATURE_") {
|
||||
let feature = key
|
||||
.trim_start_matches("CARGO_FEATURE_")
|
||||
.to_lowercase()
|
||||
.replace("_", "-");
|
||||
features.push_str(&feature);
|
||||
fn compute_hash(file_path: &Path) -> anyhow::Result<String> {
|
||||
let input_file = std::fs::File::open(file_path)?;
|
||||
let mut reader = std::io::BufReader::new(input_file);
|
||||
let mut hasher = sha2::Sha256::new();
|
||||
let mut buffer = [0; 1024]; // buffer for chunks of the file
|
||||
|
||||
loop {
|
||||
let count = reader.read(&mut buffer)?;
|
||||
if count == 0 {
|
||||
break;
|
||||
}
|
||||
hasher.update(&buffer[..count]);
|
||||
}
|
||||
features
|
||||
}
|
||||
|
||||
/// print `cargo:rerun-if-changed=PATH` for each path of interest
|
||||
fn output_reruns(dir: &Path) {
|
||||
// Check files individually
|
||||
if let Ok(entries) = fs::read_dir(dir) {
|
||||
for entry in entries.filter_map(|e| e.ok()) {
|
||||
let path = entry.path();
|
||||
if path.is_dir() {
|
||||
if let Some(dirname) = path.file_name().and_then(|n| n.to_str()) {
|
||||
if dirname == "ui" || dirname == "target" {
|
||||
// do not prompt a rerun if only UI/build files have changed
|
||||
continue;
|
||||
}
|
||||
// If the entry is a directory not in rerun_files, recursively walk it
|
||||
output_reruns(&path);
|
||||
}
|
||||
} else {
|
||||
if let Some(filename) = path.file_name().and_then(|n| n.to_str()) {
|
||||
if filename.ends_with(".zip") || filename.ends_with(".wasm") {
|
||||
// do not prompt a rerun for compiled outputs
|
||||
continue;
|
||||
}
|
||||
// any other changed file within a package subdir prompts a rerun
|
||||
println!("cargo::rerun-if-changed={}", path.display());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn _untar_gz_file(path: &Path, dest: &Path) -> std::io::Result<()> {
|
||||
// Open the .tar.gz file
|
||||
let tar_gz = File::open(path)?;
|
||||
let tar_gz_reader = BufReader::new(tar_gz);
|
||||
|
||||
// Decode the gzip layer
|
||||
let tar = GzDecoder::new(tar_gz_reader);
|
||||
|
||||
// Create a new archive from the tar file
|
||||
let mut archive = Archive::new(tar);
|
||||
|
||||
// Unpack the archive into the specified destination directory
|
||||
archive.unpack(dest)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn build_and_zip_package(
|
||||
entry_path: PathBuf,
|
||||
parent_pkg_path: &str,
|
||||
features: &str,
|
||||
) -> anyhow::Result<(String, String, Vec<u8>)> {
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
rt.block_on(async {
|
||||
kit::build::execute(
|
||||
&entry_path,
|
||||
true,
|
||||
false,
|
||||
true,
|
||||
features,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
vec![],
|
||||
vec![],
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{:?}", e))?;
|
||||
|
||||
let mut writer = Cursor::new(Vec::new());
|
||||
let options = FileOptions::default()
|
||||
.compression_method(zip::CompressionMethod::Deflated)
|
||||
.unix_permissions(0o755)
|
||||
.last_modified_time(zip::DateTime::from_date_and_time(2023, 6, 19, 0, 0, 0).unwrap());
|
||||
{
|
||||
let mut zip = zip::ZipWriter::new(&mut writer);
|
||||
|
||||
for sub_entry in walkdir::WalkDir::new(parent_pkg_path) {
|
||||
let sub_entry = sub_entry?;
|
||||
let path = sub_entry.path();
|
||||
let name = path.strip_prefix(Path::new(parent_pkg_path))?;
|
||||
|
||||
if path.is_file() {
|
||||
zip.start_file(name.to_string_lossy(), options)?;
|
||||
let mut file = File::open(path)?;
|
||||
let mut buffer = Vec::new();
|
||||
file.read_to_end(&mut buffer)?;
|
||||
zip.write_all(&buffer)?;
|
||||
} else if !name.as_os_str().is_empty() {
|
||||
zip.add_directory(name.to_string_lossy(), options)?;
|
||||
}
|
||||
}
|
||||
zip.finish()?;
|
||||
}
|
||||
|
||||
let zip_contents = writer.into_inner();
|
||||
let zip_filename = format!("{}.zip", entry_path.file_name().unwrap().to_str().unwrap());
|
||||
Ok((entry_path.display().to_string(), zip_filename, zip_contents))
|
||||
})
|
||||
Ok(format!("{:x}", hasher.finalize()))
|
||||
}
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
if std::env::var("SKIP_BUILD_SCRIPT").is_ok() {
|
||||
p!("skipping build script");
|
||||
return Ok(());
|
||||
let path_to_packages_zip = match std::env::var("PATH_TO_PACKAGES_ZIP") {
|
||||
Ok(env_var) => env_var,
|
||||
Err(_) => {
|
||||
let canonical_path = PathBuf::from(CANONICAL_PACKAGES_ZIP_PATH);
|
||||
if canonical_path.exists() {
|
||||
p!("No path given via PATH_TO_PACKAGES_ZIP envvar. Defaulting to path of `kinode/target/packages.zip`.");
|
||||
CANONICAL_PACKAGES_ZIP_PATH.to_string()
|
||||
} else {
|
||||
return Err(anyhow::anyhow!("You must build packages.zip with scripts/build_packages or set PATH_TO_PACKAGES_ZIP to point to your desired pacakges.zip (default path at kinode/target/packages.zip was not populated)."));
|
||||
}
|
||||
}
|
||||
};
|
||||
let path = PathBuf::from(&path_to_packages_zip);
|
||||
if !path.exists() {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Path to packages {path_to_packages_zip} does not exist."
|
||||
));
|
||||
}
|
||||
|
||||
let pwd = std::env::current_dir()?;
|
||||
let parent_dir = pwd.parent().unwrap();
|
||||
let packages_dir = pwd.join("packages");
|
||||
let path_to_packages_zip_path = PathBuf::from(&path_to_packages_zip).canonicalize()?;
|
||||
let canonical_packages_zip_path = PathBuf::from(CANONICAL_PACKAGES_ZIP_PATH).canonicalize()?;
|
||||
if path_to_packages_zip_path != canonical_packages_zip_path {
|
||||
std::fs::copy(&path_to_packages_zip_path, &canonical_packages_zip_path)?;
|
||||
}
|
||||
|
||||
if std::env::var("SKIP_BUILD_FRONTEND").is_ok() {
|
||||
p!("skipping frontend builds");
|
||||
} else {
|
||||
if !std::env::var("SKIP_BUILD_FRONTEND").is_ok() {
|
||||
// build core frontends
|
||||
let core_frontends = vec![
|
||||
"src/register-ui",
|
||||
"packages/app_store/ui",
|
||||
"packages/homepage/ui",
|
||||
// chess when brought in
|
||||
];
|
||||
let pwd = std::env::current_dir()?;
|
||||
let core_frontends = vec!["src/register-ui"];
|
||||
|
||||
// for each frontend, execute build.sh
|
||||
for frontend in core_frontends {
|
||||
@ -165,63 +71,6 @@ fn main() -> anyhow::Result<()> {
|
||||
}
|
||||
}
|
||||
|
||||
output_reruns(&packages_dir);
|
||||
|
||||
let features = get_features();
|
||||
|
||||
let results: Vec<anyhow::Result<(String, String, Vec<u8>)>> = fs::read_dir(&packages_dir)?
|
||||
.filter_map(|entry| {
|
||||
let entry_path = match entry {
|
||||
Ok(e) => e.path(),
|
||||
Err(_) => return None,
|
||||
};
|
||||
let child_pkg_path = entry_path.join("pkg");
|
||||
if !child_pkg_path.exists() {
|
||||
// don't run on, e.g., `.DS_Store`
|
||||
return None;
|
||||
}
|
||||
Some(build_and_zip_package(
|
||||
entry_path.clone(),
|
||||
child_pkg_path.to_str().unwrap(),
|
||||
&features,
|
||||
))
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Process results, e.g., write to `bootstrapped_processes.rs`
|
||||
// This part remains sequential
|
||||
let mut bootstrapped_processes = vec![];
|
||||
writeln!(
|
||||
bootstrapped_processes,
|
||||
"pub static BOOTSTRAPPED_PROCESSES: &[(&str, &[u8], &[u8])] = &["
|
||||
)?;
|
||||
|
||||
for result in results {
|
||||
match result {
|
||||
Ok((entry_path, zip_filename, zip_contents)) => {
|
||||
// Further processing, like saving ZIP files and updating bootstrapped_processes
|
||||
let metadata_path = format!("{}/metadata.json", entry_path);
|
||||
let zip_path = format!("{}/target/{}", parent_dir.display(), zip_filename);
|
||||
fs::write(&zip_path, &zip_contents)?;
|
||||
|
||||
writeln!(
|
||||
bootstrapped_processes,
|
||||
" (\"{}\", include_bytes!(\"{}\"), include_bytes!(\"{}\")),",
|
||||
zip_filename, metadata_path, zip_path,
|
||||
)?;
|
||||
}
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
writeln!(bootstrapped_processes, "];")?;
|
||||
let target_dir = pwd.join("../target");
|
||||
if !target_dir.exists() {
|
||||
fs::create_dir_all(&target_dir)?;
|
||||
}
|
||||
let bootstrapped_processes_path = target_dir.join("bootstrapped_processes.rs");
|
||||
fs::write(&bootstrapped_processes_path, bootstrapped_processes)?;
|
||||
|
||||
let version = if let Ok(version) = std::env::var("DOCKER_BUILD_IMAGE_VERSION") {
|
||||
// embed the DOCKER_BUILD_IMAGE_VERSION
|
||||
version
|
||||
@ -230,5 +79,8 @@ fn main() -> anyhow::Result<()> {
|
||||
};
|
||||
println!("cargo:rustc-env=DOCKER_BUILD_IMAGE_VERSION={version}");
|
||||
|
||||
let packages_zip_hash = compute_hash(&canonical_packages_zip_path)?;
|
||||
println!("cargo:rustc-env=PACKAGES_ZIP_HASH={packages_zip_hash}");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -9,6 +9,7 @@ use lib::types::core::*;
|
||||
use lib::types::eth::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::sync::Mutex;
|
||||
@ -148,7 +149,7 @@ struct ModuleState {
|
||||
/// the name of this node
|
||||
our: Arc<String>,
|
||||
/// the home directory path
|
||||
home_directory_path: String,
|
||||
home_directory_path: PathBuf,
|
||||
/// the access settings for this provider
|
||||
access_settings: AccessSettings,
|
||||
/// the set of providers we have available for all chains
|
||||
@ -207,7 +208,7 @@ fn valid_method(method: &str) -> Option<&'static str> {
|
||||
/// for the entire module.
|
||||
pub async fn provider(
|
||||
our: String,
|
||||
home_directory_path: String,
|
||||
home_directory_path: PathBuf,
|
||||
configs: SavedConfigs,
|
||||
send_to_loop: MessageSender,
|
||||
mut recv_in_client: MessageReceiver,
|
||||
@ -219,9 +220,7 @@ pub async fn provider(
|
||||
// this merely describes whether our provider is available to other nodes
|
||||
// and if so, which nodes are allowed to access it (public/whitelist/blacklist)
|
||||
let access_settings: AccessSettings =
|
||||
match tokio::fs::read_to_string(format!("{}/.eth_access_settings", home_directory_path))
|
||||
.await
|
||||
{
|
||||
match tokio::fs::read_to_string(home_directory_path.join(".eth_access_settings")).await {
|
||||
Ok(contents) => serde_json::from_str(&contents).unwrap(),
|
||||
Err(_) => {
|
||||
let access_settings = AccessSettings {
|
||||
@ -1052,7 +1051,7 @@ async fn handle_eth_config_action(
|
||||
// save providers and/or access settings, depending on necessity, to disk
|
||||
if save_settings {
|
||||
if let Ok(()) = tokio::fs::write(
|
||||
format!("{}/.eth_access_settings", state.home_directory_path),
|
||||
state.home_directory_path.join(".eth_access_settings"),
|
||||
serde_json::to_string(&state.access_settings).unwrap(),
|
||||
)
|
||||
.await
|
||||
@ -1062,7 +1061,7 @@ async fn handle_eth_config_action(
|
||||
}
|
||||
if save_providers {
|
||||
if let Ok(()) = tokio::fs::write(
|
||||
format!("{}/.eth_providers", state.home_directory_path),
|
||||
state.home_directory_path.join(".eth_access_settings"),
|
||||
serde_json::to_string(&providers_to_saved_configs(&state.providers)).unwrap(),
|
||||
)
|
||||
.await
|
||||
|
@ -6,9 +6,15 @@ use lib::types::core::{
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
#[cfg(unix)]
|
||||
const DEFAULT_MAX_OPEN_FDS: u64 = 180;
|
||||
const DEFAULT_FDS_AS_FRACTION_OF_ULIMIT_PERCENTAGE: u64 = 90;
|
||||
#[cfg(target_os = "windows")]
|
||||
const DEFAULT_MAX_OPEN_FDS: u64 = 7_000;
|
||||
|
||||
#[cfg(unix)]
|
||||
const SYS_RESERVED_FDS: u64 = 30;
|
||||
|
||||
const DEFAULT_FDS_AS_FRACTION_OF_ULIMIT_PERCENTAGE: u64 = 90;
|
||||
const DEFAULT_UPDATE_ULIMIT_SECS: u64 = 3600;
|
||||
const _DEFAULT_CULL_FRACTION_DENOMINATOR: u64 = 2;
|
||||
|
||||
@ -46,6 +52,7 @@ impl State {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn update_max_fds_from_ulimit(&mut self, ulimit_max_fds: u64) {
|
||||
let Mode::DynamicMax {
|
||||
ref max_fds_as_fraction_of_ulimit_percentage,
|
||||
@ -104,7 +111,18 @@ pub async fn fd_manager(
|
||||
mut recv_from_loop: MessageReceiver,
|
||||
static_max_fds: Option<u64>,
|
||||
) -> anyhow::Result<()> {
|
||||
// Windows does not allow querying of max fds allowed.
|
||||
// However, it allows some 16m, will expectation of actual
|
||||
// max number open nearer to 10k; set to 7k which should be plenty.
|
||||
// https://techcommunity.microsoft.com/t5/windows-blog-archive/pushing-the-limits-of-windows-handles/ba-p/723848
|
||||
#[cfg(target_os = "windows")]
|
||||
let static_max_fds = match static_max_fds {
|
||||
Some(smf) => Some(smf),
|
||||
None => Some(DEFAULT_MAX_OPEN_FDS),
|
||||
};
|
||||
|
||||
let mut state = State::new(static_max_fds);
|
||||
#[cfg(unix)]
|
||||
let mut interval = {
|
||||
// in code block to release the reference into state
|
||||
let Mode::DynamicMax {
|
||||
@ -117,12 +135,12 @@ pub async fn fd_manager(
|
||||
tokio::time::interval(tokio::time::Duration::from_secs(*update_ulimit_secs))
|
||||
};
|
||||
loop {
|
||||
#[cfg(unix)]
|
||||
tokio::select! {
|
||||
Some(message) = recv_from_loop.recv() => {
|
||||
match handle_message(
|
||||
&our_node,
|
||||
message,
|
||||
&mut interval,
|
||||
&mut state,
|
||||
&send_to_loop,
|
||||
).await {
|
||||
@ -151,13 +169,26 @@ pub async fn fd_manager(
|
||||
}
|
||||
}
|
||||
}
|
||||
#[cfg(target_os = "windows")]
|
||||
if let Some(message) = recv_from_loop.recv().await {
|
||||
match handle_message(&our_node, message, &mut state, &send_to_loop).await {
|
||||
Ok(Some(to_print)) => {
|
||||
Printout::new(2, to_print).send(&send_to_terminal).await;
|
||||
}
|
||||
Err(e) => {
|
||||
Printout::new(1, &format!("handle_message error: {e:?}"))
|
||||
.send(&send_to_terminal)
|
||||
.await;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_message(
|
||||
our_node: &str,
|
||||
km: KernelMessage,
|
||||
_interval: &mut tokio::time::Interval,
|
||||
state: &mut State,
|
||||
send_to_loop: &MessageSender,
|
||||
) -> anyhow::Result<Option<String>> {
|
||||
@ -282,6 +313,7 @@ async fn handle_message(
|
||||
Ok(return_value)
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
async fn update_max_fds(state: &mut State) -> anyhow::Result<u64> {
|
||||
let ulimit_max_fds = get_max_fd_limit()
|
||||
.map_err(|_| anyhow::anyhow!("Couldn't update max fd limit: ulimit failed"))?;
|
||||
@ -289,6 +321,21 @@ async fn update_max_fds(state: &mut State) -> anyhow::Result<u64> {
|
||||
Ok(ulimit_max_fds)
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn get_max_fd_limit() -> anyhow::Result<u64> {
|
||||
let mut rlim = libc::rlimit {
|
||||
rlim_cur: 0, // Current limit
|
||||
rlim_max: 0, // Maximum limit value
|
||||
};
|
||||
|
||||
// RLIMIT_NOFILE is the resource indicating the maximum file descriptor number.
|
||||
if unsafe { libc::getrlimit(libc::RLIMIT_NOFILE, &mut rlim) } == 0 {
|
||||
Ok(rlim.rlim_cur as u64)
|
||||
} else {
|
||||
Err(anyhow::anyhow!("Failed to get the resource limit."))
|
||||
}
|
||||
}
|
||||
|
||||
async fn send_all_fds_limits(our_node: &str, send_to_loop: &MessageSender, state: &State) {
|
||||
for (process_id, limit) in &state.fds_limits {
|
||||
KernelMessage::builder()
|
||||
@ -309,20 +356,6 @@ async fn send_all_fds_limits(our_node: &str, send_to_loop: &MessageSender, state
|
||||
}
|
||||
}
|
||||
|
||||
fn get_max_fd_limit() -> anyhow::Result<u64> {
|
||||
let mut rlim = libc::rlimit {
|
||||
rlim_cur: 0, // Current limit
|
||||
rlim_max: 0, // Maximum limit value
|
||||
};
|
||||
|
||||
// RLIMIT_NOFILE is the resource indicating the maximum file descriptor number.
|
||||
if unsafe { libc::getrlimit(libc::RLIMIT_NOFILE, &mut rlim) } == 0 {
|
||||
Ok(rlim.rlim_cur as u64)
|
||||
} else {
|
||||
Err(anyhow::anyhow!("Failed to get the resource limit."))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn send_fd_manager_request_fds_limit(our: &Address, send_to_loop: &MessageSender) {
|
||||
let message = Message::Request(Request {
|
||||
inherit: false,
|
||||
|
@ -2,6 +2,7 @@ use lib::types::core::{self as t, KERNEL_PROCESS_ID, STATE_PROCESS_ID, VFS_PROCE
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
path::PathBuf,
|
||||
sync::Arc,
|
||||
};
|
||||
use tokio::{sync::mpsc, task::JoinHandle};
|
||||
@ -76,7 +77,7 @@ async fn handle_kernel_request(
|
||||
process_map: &mut t::ProcessMap,
|
||||
caps_oracle: &t::CapMessageSender,
|
||||
engine: &Engine,
|
||||
home_directory_path: &str,
|
||||
home_directory_path: &PathBuf,
|
||||
) -> Option<()> {
|
||||
let t::Message::Request(request) = km.message else {
|
||||
return None;
|
||||
@ -467,7 +468,7 @@ async fn start_process(
|
||||
engine: &Engine,
|
||||
caps_oracle: &t::CapMessageSender,
|
||||
process_metadata: &StartProcessMetadata,
|
||||
home_directory_path: &str,
|
||||
home_directory_path: &PathBuf,
|
||||
) -> anyhow::Result<()> {
|
||||
let (send_to_process, recv_in_process) =
|
||||
mpsc::channel::<Result<t::KernelMessage, t::WrappedSendError>>(PROCESS_CHANNEL_CAPACITY);
|
||||
@ -501,7 +502,7 @@ async fn start_process(
|
||||
km_blob_bytes,
|
||||
caps_oracle.clone(),
|
||||
engine.clone(),
|
||||
home_directory_path.to_string(),
|
||||
home_directory_path.clone(),
|
||||
)),
|
||||
);
|
||||
Ok(())
|
||||
@ -522,7 +523,7 @@ pub async fn kernel(
|
||||
mut network_error_recv: t::NetworkErrorReceiver,
|
||||
mut recv_debug_in_loop: t::DebugReceiver,
|
||||
send_to_net: t::MessageSender,
|
||||
home_directory_path: String,
|
||||
home_directory_path: PathBuf,
|
||||
runtime_extensions: Vec<(
|
||||
t::ProcessId,
|
||||
t::MessageSender,
|
||||
@ -538,7 +539,7 @@ pub async fn kernel(
|
||||
config.async_support(true);
|
||||
let engine = Engine::new(&config).unwrap();
|
||||
|
||||
let vfs_path = format!("{home_directory_path}/vfs");
|
||||
let vfs_path = home_directory_path.join("vfs");
|
||||
tokio::fs::create_dir_all(&vfs_path)
|
||||
.await
|
||||
.expect("kernel startup fatal: couldn't create vfs dir");
|
||||
@ -579,21 +580,29 @@ pub async fn kernel(
|
||||
if persisted.wasm_bytes_handle.is_empty() {
|
||||
continue;
|
||||
}
|
||||
let wasm_bytes_handle = persisted
|
||||
.wasm_bytes_handle
|
||||
.strip_prefix("/")
|
||||
.unwrap_or_else(|| &persisted.wasm_bytes_handle);
|
||||
#[cfg(unix)]
|
||||
let path = vfs_path.join(wasm_bytes_handle);
|
||||
#[cfg(target_os = "windows")]
|
||||
let path = vfs_path.join(wasm_bytes_handle.replace(":", "_"));
|
||||
|
||||
// read wasm bytes directly from vfs
|
||||
let wasm_bytes =
|
||||
match tokio::fs::read(format!("{vfs_path}/{}", persisted.wasm_bytes_handle)).await {
|
||||
Ok(bytes) => bytes,
|
||||
Err(e) => {
|
||||
t::Printout::new(
|
||||
0,
|
||||
format!("kernel: couldn't read wasm bytes for process: {process_id}: {e}"),
|
||||
)
|
||||
.send(&send_to_terminal)
|
||||
.await;
|
||||
non_rebooted_processes.insert(process_id.clone());
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let wasm_bytes = match tokio::fs::read(&path).await {
|
||||
Ok(bytes) => bytes,
|
||||
Err(e) => {
|
||||
t::Printout::new(
|
||||
0,
|
||||
format!("kernel: couldn't read wasm bytes for process: {process_id} at {path:?}: {e}"),
|
||||
)
|
||||
.send(&send_to_terminal)
|
||||
.await;
|
||||
non_rebooted_processes.insert(process_id.clone());
|
||||
continue;
|
||||
}
|
||||
};
|
||||
if let t::OnExit::Requests(requests) = &persisted.on_exit {
|
||||
// if a persisted process had on-death-requests, we should perform them now
|
||||
// even in death, a process can only message processes it has capabilities for
|
||||
@ -641,7 +650,7 @@ pub async fn kernel(
|
||||
&engine,
|
||||
&caps_oracle_sender,
|
||||
&start_process_metadata,
|
||||
home_directory_path.as_str(),
|
||||
&home_directory_path,
|
||||
)
|
||||
.await
|
||||
{
|
||||
|
@ -2,6 +2,7 @@ use crate::KERNEL_PROCESS_ID;
|
||||
use lib::{types::core as t, v0::ProcessV0, Process};
|
||||
use std::{
|
||||
collections::{HashMap, VecDeque},
|
||||
path::PathBuf,
|
||||
sync::Arc,
|
||||
};
|
||||
use tokio::{fs, task::JoinHandle};
|
||||
@ -84,18 +85,32 @@ impl WasiView for ProcessWasiV0 {
|
||||
}
|
||||
|
||||
async fn make_table_and_wasi(
|
||||
home_directory_path: String,
|
||||
home_directory_path: PathBuf,
|
||||
process_state: &ProcessState,
|
||||
) -> (Table, WasiCtx, MemoryOutputPipe) {
|
||||
let table = Table::new();
|
||||
let wasi_stderr = MemoryOutputPipe::new(STACK_TRACE_SIZE);
|
||||
|
||||
let tmp_path = format!(
|
||||
"{}/vfs/{}:{}/tmp",
|
||||
home_directory_path,
|
||||
process_state.metadata.our.process.package(),
|
||||
process_state.metadata.our.process.publisher()
|
||||
);
|
||||
#[cfg(unix)]
|
||||
let tmp_path = home_directory_path
|
||||
.join("vfs")
|
||||
.join(format!(
|
||||
"{}:{}",
|
||||
process_state.metadata.our.process.package(),
|
||||
process_state.metadata.our.process.publisher()
|
||||
))
|
||||
.join("tmp");
|
||||
#[cfg(target_os = "windows")]
|
||||
let tmp_path = home_directory_path
|
||||
.join("vfs")
|
||||
.join(format!(
|
||||
"{}_{}",
|
||||
process_state.metadata.our.process.package(),
|
||||
process_state.metadata.our.process.publisher()
|
||||
))
|
||||
.join("tmp");
|
||||
|
||||
let tmp_path = tmp_path.to_str().unwrap();
|
||||
|
||||
let mut wasi = WasiCtxBuilder::new();
|
||||
|
||||
@ -107,15 +122,10 @@ async fn make_table_and_wasi(
|
||||
.await
|
||||
{
|
||||
if let Ok(wasi_tempdir) =
|
||||
Dir::open_ambient_dir(tmp_path.clone(), wasi_common::sync::ambient_authority())
|
||||
Dir::open_ambient_dir(tmp_path, wasi_common::sync::ambient_authority())
|
||||
{
|
||||
wasi.preopened_dir(
|
||||
wasi_tempdir,
|
||||
DirPerms::all(),
|
||||
FilePerms::all(),
|
||||
tmp_path.clone(),
|
||||
)
|
||||
.env("TEMP_DIR", tmp_path);
|
||||
wasi.preopened_dir(wasi_tempdir, DirPerms::all(), FilePerms::all(), tmp_path)
|
||||
.env("TEMP_DIR", tmp_path);
|
||||
}
|
||||
}
|
||||
|
||||
@ -125,7 +135,7 @@ async fn make_table_and_wasi(
|
||||
async fn make_component(
|
||||
engine: Engine,
|
||||
wasm_bytes: &[u8],
|
||||
home_directory_path: String,
|
||||
home_directory_path: PathBuf,
|
||||
process_state: ProcessState,
|
||||
) -> anyhow::Result<(Process, Store<ProcessWasi>, MemoryOutputPipe)> {
|
||||
let component =
|
||||
@ -168,7 +178,7 @@ async fn make_component(
|
||||
async fn make_component_v0(
|
||||
engine: Engine,
|
||||
wasm_bytes: &[u8],
|
||||
home_directory_path: String,
|
||||
home_directory_path: PathBuf,
|
||||
process_state: ProcessState,
|
||||
) -> anyhow::Result<(ProcessV0, Store<ProcessWasiV0>, MemoryOutputPipe)> {
|
||||
let component =
|
||||
@ -219,7 +229,7 @@ pub async fn make_process_loop(
|
||||
wasm_bytes: Vec<u8>,
|
||||
caps_oracle: t::CapMessageSender,
|
||||
engine: Engine,
|
||||
home_directory_path: String,
|
||||
home_directory_path: PathBuf,
|
||||
) -> anyhow::Result<()> {
|
||||
// before process can be instantiated, need to await 'run' message from kernel
|
||||
let mut pre_boot_queue = Vec::<Result<t::KernelMessage, t::WrappedSendError>>::new();
|
||||
|
@ -9,6 +9,7 @@ use lib::types::core::{
|
||||
use rocksdb::OptimisticTransactionDB;
|
||||
use std::{
|
||||
collections::{HashMap, VecDeque},
|
||||
path::PathBuf,
|
||||
sync::Arc,
|
||||
};
|
||||
use tokio::{fs, sync::Mutex};
|
||||
@ -16,7 +17,7 @@ use tokio::{fs, sync::Mutex};
|
||||
#[derive(Clone)]
|
||||
struct KvState {
|
||||
our: Arc<Address>,
|
||||
kv_path: Arc<String>,
|
||||
kv_path: Arc<PathBuf>,
|
||||
send_to_loop: MessageSender,
|
||||
send_to_terminal: PrintSender,
|
||||
open_kvs: Arc<DashMap<(PackageId, String), OptimisticTransactionDB>>,
|
||||
@ -31,11 +32,11 @@ impl KvState {
|
||||
our: Address,
|
||||
send_to_terminal: PrintSender,
|
||||
send_to_loop: MessageSender,
|
||||
home_directory_path: String,
|
||||
home_directory_path: PathBuf,
|
||||
) -> Self {
|
||||
Self {
|
||||
our: Arc::new(our),
|
||||
kv_path: Arc::new(format!("{home_directory_path}/kv")),
|
||||
kv_path: Arc::new(home_directory_path.join("kv")),
|
||||
send_to_loop,
|
||||
send_to_terminal,
|
||||
open_kvs: Arc::new(DashMap::new()),
|
||||
@ -60,7 +61,18 @@ impl KvState {
|
||||
self.remove_db(key.0, key.1).await;
|
||||
}
|
||||
|
||||
let db_path = format!("{}/{}/{}", self.kv_path.as_str(), package_id, db);
|
||||
#[cfg(unix)]
|
||||
let db_path = self.kv_path.join(format!("{package_id}")).join(&db);
|
||||
#[cfg(target_os = "windows")]
|
||||
let db_path = self
|
||||
.kv_path
|
||||
.join(format!(
|
||||
"{}_{}",
|
||||
package_id._package(),
|
||||
package_id._publisher()
|
||||
))
|
||||
.join(&db);
|
||||
|
||||
fs::create_dir_all(&db_path).await?;
|
||||
|
||||
self.open_kvs.insert(
|
||||
@ -94,7 +106,7 @@ pub async fn kv(
|
||||
send_to_terminal: PrintSender,
|
||||
mut recv_from_loop: MessageReceiver,
|
||||
send_to_caps_oracle: CapMessageSender,
|
||||
home_directory_path: String,
|
||||
home_directory_path: PathBuf,
|
||||
) -> anyhow::Result<()> {
|
||||
let our = Address::new(our_node.as_str(), KV_PROCESS_ID.clone());
|
||||
|
||||
@ -102,7 +114,7 @@ pub async fn kv(
|
||||
|
||||
let mut state = KvState::new(our, send_to_terminal, send_to_loop, home_directory_path);
|
||||
|
||||
if let Err(e) = fs::create_dir_all(state.kv_path.as_str()).await {
|
||||
if let Err(e) = fs::create_dir_all(&*state.kv_path).await {
|
||||
panic!("failed creating kv dir! {e:?}");
|
||||
}
|
||||
|
||||
@ -500,11 +512,22 @@ async fn check_caps(
|
||||
.remove_db(request.package_id.clone(), request.db.clone())
|
||||
.await;
|
||||
|
||||
fs::remove_dir_all(format!(
|
||||
"{}/{}/{}",
|
||||
state.kv_path, request.package_id, request.db
|
||||
))
|
||||
.await?;
|
||||
#[cfg(unix)]
|
||||
let db_path = state
|
||||
.kv_path
|
||||
.join(format!("{}", request.package_id))
|
||||
.join(&request.db);
|
||||
#[cfg(target_os = "windows")]
|
||||
let db_path = state
|
||||
.kv_path
|
||||
.join(format!(
|
||||
"{}_{}",
|
||||
request.package_id._package(),
|
||||
request.package_id._publisher()
|
||||
))
|
||||
.join(&request.db);
|
||||
|
||||
fs::remove_dir_all(&db_path).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1,5 +1,3 @@
|
||||
#![feature(async_closure)]
|
||||
#![feature(btree_extract_if)]
|
||||
use anyhow::Result;
|
||||
use clap::{arg, value_parser, Command};
|
||||
use lib::types::core::{
|
||||
@ -11,6 +9,7 @@ use lib::types::core::{
|
||||
#[cfg(feature = "simulation-mode")]
|
||||
use ring::{rand::SystemRandom, signature, signature::KeyPair};
|
||||
use std::env;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
@ -66,17 +65,29 @@ pub const MULTICALL_ADDRESS: &str = "0xcA11bde05977b3631167028862bE2a173976CA11"
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
// embed values in binary for inspection without running & print on boot
|
||||
// e.g., to inspect without running, use
|
||||
// ```bash
|
||||
// strings kinode | grep DOCKER_BUILD_IMAGE_VERSION
|
||||
// ```
|
||||
println!(
|
||||
"\nDOCKER_BUILD_IMAGE_VERSION: {}\n",
|
||||
env!("DOCKER_BUILD_IMAGE_VERSION")
|
||||
"\nDOCKER_BUILD_IMAGE_VERSION: {}\nPACKAGES_ZIP_HASH: {}\n",
|
||||
env!("DOCKER_BUILD_IMAGE_VERSION"),
|
||||
env!("PACKAGES_ZIP_HASH"),
|
||||
);
|
||||
|
||||
let app = build_command();
|
||||
|
||||
let matches = app.get_matches();
|
||||
let home_directory_path = matches
|
||||
.get_one::<String>("home")
|
||||
.expect("home directory required");
|
||||
create_home_directory(&home_directory_path).await;
|
||||
if let Err(e) = tokio::fs::create_dir_all(home_directory_path).await {
|
||||
panic!("failed to create home directory: {e:?}");
|
||||
}
|
||||
let home_directory_path = std::fs::canonicalize(&home_directory_path)
|
||||
.expect("specified home directory {home_directory_path} not found");
|
||||
println!("home at {home_directory_path:?}\r");
|
||||
let http_server_port = set_http_server_port(matches.get_one::<u16>("port")).await;
|
||||
let ws_networking_port = matches.get_one::<u16>("ws-port");
|
||||
#[cfg(not(feature = "simulation-mode"))]
|
||||
@ -103,7 +114,7 @@ async fn main() {
|
||||
|
||||
// default eth providers/routers
|
||||
let mut eth_provider_config: lib::eth::SavedConfigs = if let Ok(contents) =
|
||||
tokio::fs::read_to_string(format!("{}/.eth_providers", home_directory_path)).await
|
||||
tokio::fs::read_to_string(home_directory_path.join(".eth_providers")).await
|
||||
{
|
||||
if let Ok(contents) = serde_json::from_str(&contents) {
|
||||
println!("loaded saved eth providers\r");
|
||||
@ -123,7 +134,7 @@ async fn main() {
|
||||
});
|
||||
// save the new provider config
|
||||
tokio::fs::write(
|
||||
format!("{}/.eth_providers", home_directory_path),
|
||||
home_directory_path.join(".eth_providers"),
|
||||
serde_json::to_string(ð_provider_config).unwrap(),
|
||||
)
|
||||
.await
|
||||
@ -201,7 +212,7 @@ async fn main() {
|
||||
let (our, encoded_keyfile, decoded_keyfile) = simulate_node(
|
||||
fake_node_name.cloned(),
|
||||
password.cloned(),
|
||||
home_directory_path,
|
||||
&home_directory_path,
|
||||
(
|
||||
ws_tcp_handle.expect("need ws networking for simulation mode"),
|
||||
ws_flag_used,
|
||||
@ -312,10 +323,13 @@ async fn main() {
|
||||
let networking_keypair_arc = Arc::new(decoded_keyfile.networking_keypair);
|
||||
let our_name_arc = Arc::new(our.name.clone());
|
||||
|
||||
let home_directory_string = matches
|
||||
.get_one::<String>("home")
|
||||
.expect("home directory required");
|
||||
let (kernel_process_map, db, reverse_cap_index) = state::load_state(
|
||||
our.name.clone(),
|
||||
networking_keypair_arc.clone(),
|
||||
home_directory_path.clone(),
|
||||
home_directory_string.clone(),
|
||||
runtime_extensions.clone(),
|
||||
)
|
||||
.await
|
||||
@ -453,7 +467,7 @@ async fn main() {
|
||||
quit = terminal::terminal(
|
||||
our.clone(),
|
||||
VERSION,
|
||||
home_directory_path.into(),
|
||||
home_directory_path.clone(),
|
||||
kernel_message_sender.clone(),
|
||||
kernel_debug_message_sender,
|
||||
print_sender.clone(),
|
||||
@ -562,7 +576,7 @@ async fn setup_networking(
|
||||
pub async fn simulate_node(
|
||||
fake_node_name: Option<String>,
|
||||
password: Option<String>,
|
||||
home_directory_path: &str,
|
||||
home_directory_path: &Path,
|
||||
(ws_networking, _ws_used): (tokio::net::TcpListener, bool),
|
||||
fakechain_port: Option<u16>,
|
||||
) -> (Identity, Vec<u8>, Keyfile) {
|
||||
@ -573,7 +587,7 @@ pub async fn simulate_node(
|
||||
panic!("Fake node must be booted with either a --fake-node-name, --password, or both.");
|
||||
}
|
||||
Some(password) => {
|
||||
let keyfile = tokio::fs::read(format!("{home_directory_path}/.keys"))
|
||||
let keyfile = tokio::fs::read(home_directory_path.join(".keys"))
|
||||
.await
|
||||
.expect("could not read keyfile");
|
||||
let decoded = keygen::decode_keyfile(&keyfile, &password)
|
||||
@ -642,25 +656,15 @@ pub async fn simulate_node(
|
||||
&decoded_keyfile.file_key,
|
||||
);
|
||||
|
||||
tokio::fs::write(
|
||||
format!("{home_directory_path}/.keys"),
|
||||
encoded_keyfile.clone(),
|
||||
)
|
||||
.await
|
||||
.expect("Failed to write keyfile");
|
||||
tokio::fs::write(home_directory_path.join(".keys"), encoded_keyfile.clone())
|
||||
.await
|
||||
.expect("Failed to write keyfile");
|
||||
|
||||
(identity, encoded_keyfile, decoded_keyfile)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn create_home_directory(home_directory_path: &str) {
|
||||
if let Err(e) = tokio::fs::create_dir_all(home_directory_path).await {
|
||||
panic!("failed to create home directory: {e:?}");
|
||||
}
|
||||
println!("home at {home_directory_path}\r");
|
||||
}
|
||||
|
||||
/// build the command line interface for kinode
|
||||
///
|
||||
fn build_command() -> Command {
|
||||
@ -713,11 +717,11 @@ fn build_command() -> Command {
|
||||
)
|
||||
.arg(
|
||||
arg!(--"max-peers" <MAX_PEERS> "Maximum number of peers to hold active connections with (default 32)")
|
||||
.value_parser(value_parser!(u32)),
|
||||
.value_parser(value_parser!(u64)),
|
||||
)
|
||||
.arg(
|
||||
arg!(--"max-passthroughs" <MAX_PASSTHROUGHS> "Maximum number of passthroughs serve as a router (default 0)")
|
||||
.value_parser(value_parser!(u32)),
|
||||
.value_parser(value_parser!(u64)),
|
||||
)
|
||||
.arg(
|
||||
arg!(--"soft-ulimit" <SOFT_ULIMIT> "Enforce a static maximum number of file descriptors (default fetched from system)")
|
||||
@ -772,7 +776,7 @@ async fn find_public_ip() -> std::net::Ipv4Addr {
|
||||
/// that updates their PKI info on-chain.
|
||||
#[cfg(not(feature = "simulation-mode"))]
|
||||
async fn serve_register_fe(
|
||||
home_directory_path: &str,
|
||||
home_directory_path: &Path,
|
||||
our_ip: String,
|
||||
ws_networking: (Option<tokio::net::TcpListener>, bool),
|
||||
tcp_networking: (Option<tokio::net::TcpListener>, bool),
|
||||
@ -782,7 +786,7 @@ async fn serve_register_fe(
|
||||
) -> (Identity, Vec<u8>, Keyfile) {
|
||||
let (kill_tx, kill_rx) = tokio::sync::oneshot::channel::<bool>();
|
||||
|
||||
let disk_keyfile: Option<Vec<u8>> = tokio::fs::read(format!("{}/.keys", home_directory_path))
|
||||
let disk_keyfile: Option<Vec<u8>> = tokio::fs::read(home_directory_path.join(".keys"))
|
||||
.await
|
||||
.ok();
|
||||
|
||||
@ -805,7 +809,7 @@ async fn serve_register_fe(
|
||||
}
|
||||
};
|
||||
|
||||
tokio::fs::write(format!("{home_directory_path}/.keys"), &encoded_keyfile)
|
||||
tokio::fs::write(home_directory_path.join(".keys"), &encoded_keyfile)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@ -819,7 +823,7 @@ async fn serve_register_fe(
|
||||
|
||||
#[cfg(not(feature = "simulation-mode"))]
|
||||
async fn login_with_password(
|
||||
home_directory_path: &str,
|
||||
home_directory_path: &Path,
|
||||
our_ip: String,
|
||||
ws_networking: (Option<tokio::net::TcpListener>, bool),
|
||||
tcp_networking: (Option<tokio::net::TcpListener>, bool),
|
||||
@ -831,7 +835,7 @@ async fn login_with_password(
|
||||
sha2::{Digest, Sha256},
|
||||
};
|
||||
|
||||
let disk_keyfile: Vec<u8> = tokio::fs::read(format!("{}/.keys", home_directory_path))
|
||||
let disk_keyfile: Vec<u8> = tokio::fs::read(home_directory_path.join(".keys"))
|
||||
.await
|
||||
.expect("could not read keyfile");
|
||||
|
||||
@ -873,7 +877,7 @@ async fn login_with_password(
|
||||
.await
|
||||
.expect("information used to boot does not match information onchain");
|
||||
|
||||
tokio::fs::write(format!("{home_directory_path}/.keys"), &disk_keyfile)
|
||||
tokio::fs::write(home_directory_path.join(".keys"), &disk_keyfile)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
|
@ -15,7 +15,8 @@ pub async fn send_to_peer(ext: &IdentityExt, data: &NetData, mut km: KernelMessa
|
||||
}
|
||||
Err(e_km) => {
|
||||
// peer connection was closed, remove it and try to reconnect
|
||||
data.peers.remove(&peer.identity.name).await;
|
||||
drop(peer);
|
||||
data.peers.remove(&e_km.0.target.node).await;
|
||||
km = e_km.0;
|
||||
}
|
||||
}
|
||||
@ -111,7 +112,7 @@ async fn connect_via_router(
|
||||
routers
|
||||
};
|
||||
for router_name in &routers_shuffled {
|
||||
if router_name.as_ref() == ext.our.name {
|
||||
if router_name == &ext.our.name {
|
||||
// we can't route through ourselves
|
||||
continue;
|
||||
}
|
||||
|
@ -1 +1,3 @@
|
||||
npm install && npm run build:copy
|
||||
#!/bin/bash
|
||||
|
||||
npm install && npm run build:copy
|
||||
|
@ -94,9 +94,14 @@ pub async fn register(
|
||||
let ws_port = warp::any().map(move || (ws_port, ws_flag_used));
|
||||
let tcp_port = warp::any().map(move || (tcp_port, tcp_flag_used));
|
||||
|
||||
#[cfg(unix)]
|
||||
let static_files =
|
||||
warp::path("assets").and(static_dir::static_dir!("src/register-ui/build/assets/"));
|
||||
#[cfg(target_os = "windows")]
|
||||
let static_files =
|
||||
warp::path("assets").and(static_dir::static_dir!("src\\register-ui\\build\\assets\\"));
|
||||
|
||||
#[cfg(unix)]
|
||||
let react_app = warp::path::end()
|
||||
.or(warp::path("login"))
|
||||
.or(warp::path("commit-os-name"))
|
||||
@ -108,6 +113,18 @@ pub async fn register(
|
||||
.or(warp::path("custom-register"))
|
||||
.and(warp::get())
|
||||
.map(move |_| warp::reply::html(include_str!("register-ui/build/index.html")));
|
||||
#[cfg(target_os = "windows")]
|
||||
let react_app = warp::path::end()
|
||||
.or(warp::path("login"))
|
||||
.or(warp::path("commit-os-name"))
|
||||
.or(warp::path("mint-os-name"))
|
||||
.or(warp::path("claim-invite"))
|
||||
.or(warp::path("reset"))
|
||||
.or(warp::path("import-keyfile"))
|
||||
.or(warp::path("set-password"))
|
||||
.or(warp::path("custom-register"))
|
||||
.and(warp::get())
|
||||
.map(move |_| warp::reply::html(include_str!("register-ui\\build\\index.html")));
|
||||
|
||||
let boot_provider = provider.clone();
|
||||
let login_provider = provider.clone();
|
||||
|
@ -10,6 +10,7 @@ use lib::types::core::{
|
||||
use rusqlite::Connection;
|
||||
use std::{
|
||||
collections::{HashMap, HashSet, VecDeque},
|
||||
path::PathBuf,
|
||||
sync::Arc,
|
||||
};
|
||||
use tokio::{fs, sync::Mutex};
|
||||
@ -25,7 +26,7 @@ lazy_static::lazy_static! {
|
||||
#[derive(Clone)]
|
||||
struct SqliteState {
|
||||
our: Arc<Address>,
|
||||
sqlite_path: Arc<String>,
|
||||
sqlite_path: Arc<PathBuf>,
|
||||
send_to_loop: MessageSender,
|
||||
send_to_terminal: PrintSender,
|
||||
open_dbs: Arc<DashMap<(PackageId, String), Mutex<Connection>>>,
|
||||
@ -39,11 +40,11 @@ impl SqliteState {
|
||||
our: Address,
|
||||
send_to_terminal: PrintSender,
|
||||
send_to_loop: MessageSender,
|
||||
home_directory_path: String,
|
||||
home_directory_path: PathBuf,
|
||||
) -> Self {
|
||||
Self {
|
||||
our: Arc::new(our),
|
||||
sqlite_path: Arc::new(format!("{home_directory_path}/sqlite")),
|
||||
sqlite_path: Arc::new(home_directory_path.join("sqlite")),
|
||||
send_to_loop,
|
||||
send_to_terminal,
|
||||
open_dbs: Arc::new(DashMap::new()),
|
||||
@ -68,10 +69,21 @@ impl SqliteState {
|
||||
self.remove_db(key.0, key.1).await;
|
||||
}
|
||||
|
||||
let db_path = format!("{}/{}/{}", self.sqlite_path.as_str(), package_id, db);
|
||||
#[cfg(unix)]
|
||||
let db_path = self.sqlite_path.join(format!("{package_id}")).join(&db);
|
||||
#[cfg(target_os = "windows")]
|
||||
let db_path = self
|
||||
.sqlite_path
|
||||
.join(format!(
|
||||
"{}_{}",
|
||||
package_id._package(),
|
||||
package_id._publisher()
|
||||
))
|
||||
.join(&db);
|
||||
|
||||
fs::create_dir_all(&db_path).await?;
|
||||
|
||||
let db_file_path = format!("{}/{}.db", db_path, db);
|
||||
let db_file_path = format!("{}.db", db);
|
||||
|
||||
let db_conn = Connection::open(db_file_path)?;
|
||||
let _ = db_conn.execute("PRAGMA journal_mode=WAL", []);
|
||||
@ -105,7 +117,7 @@ pub async fn sqlite(
|
||||
send_to_terminal: PrintSender,
|
||||
mut recv_from_loop: MessageReceiver,
|
||||
send_to_caps_oracle: CapMessageSender,
|
||||
home_directory_path: String,
|
||||
home_directory_path: PathBuf,
|
||||
) -> anyhow::Result<()> {
|
||||
let our = Address::new(our_node.as_str(), SQLITE_PROCESS_ID.clone());
|
||||
|
||||
@ -113,7 +125,7 @@ pub async fn sqlite(
|
||||
|
||||
let mut state = SqliteState::new(our, send_to_terminal, send_to_loop, home_directory_path);
|
||||
|
||||
if let Err(e) = fs::create_dir_all(state.sqlite_path.as_str()).await {
|
||||
if let Err(e) = fs::create_dir_all(&*state.sqlite_path).await {
|
||||
panic!("failed creating sqlite dir! {e:?}");
|
||||
}
|
||||
|
||||
@ -515,11 +527,19 @@ async fn check_caps(
|
||||
.remove_db(request.package_id.clone(), request.db.clone())
|
||||
.await;
|
||||
|
||||
fs::remove_dir_all(format!(
|
||||
"{}/{}/{}",
|
||||
state.sqlite_path, request.package_id, request.db
|
||||
))
|
||||
.await?;
|
||||
#[cfg(unix)]
|
||||
let db_path = state.sqlite_path.join(format!("{}", request.package_id)).join(&request.db);
|
||||
#[cfg(target_os = "windows")]
|
||||
let db_path = state
|
||||
.sqlite_path
|
||||
.join(format!(
|
||||
"{}_{}",
|
||||
request.package_id._package(),
|
||||
request.package_id._publisher()
|
||||
))
|
||||
.join(&request.db);
|
||||
|
||||
fs::remove_dir_all(&db_path).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -9,23 +9,30 @@ use rocksdb::{checkpoint::Checkpoint, Options, DB};
|
||||
use std::{
|
||||
collections::{HashMap, VecDeque},
|
||||
io::Read,
|
||||
path::Path,
|
||||
path::PathBuf,
|
||||
sync::Arc,
|
||||
};
|
||||
use tokio::{fs, io::AsyncWriteExt, sync::Mutex};
|
||||
|
||||
include!("../../target/bootstrapped_processes.rs");
|
||||
static PACKAGES_ZIP: &[u8] = include_bytes!("../../target/packages.zip");
|
||||
const FILE_TO_METADATA: &str = "file_to_metadata.json";
|
||||
|
||||
pub async fn load_state(
|
||||
our_name: String,
|
||||
keypair: Arc<signature::Ed25519KeyPair>,
|
||||
home_directory_path: String,
|
||||
home_directory_string: String,
|
||||
runtime_extensions: Vec<(ProcessId, MessageSender, Option<NetworkErrorSender>, bool)>,
|
||||
) -> Result<(ProcessMap, DB, ReverseCapIndex), StateError> {
|
||||
let state_path = format!("{home_directory_path}/kernel");
|
||||
let home_directory_path = std::fs::canonicalize(&home_directory_string)?;
|
||||
let state_path = home_directory_path.join("kernel");
|
||||
if let Err(e) = fs::create_dir_all(&state_path).await {
|
||||
panic!("failed creating kernel state dir! {e:?}");
|
||||
}
|
||||
// use String to not upset rocksdb:
|
||||
// * on Unix, works as expected
|
||||
// * on Windows, would normally use std::path to be cross-platform,
|
||||
// but here rocksdb appends a `/LOG` which breaks the path
|
||||
let state_path = format!("{home_directory_string}/kernel");
|
||||
|
||||
let mut opts = Options::default();
|
||||
opts.create_if_missing(true);
|
||||
@ -83,7 +90,7 @@ pub async fn state_sender(
|
||||
send_to_terminal: PrintSender,
|
||||
mut recv_state: MessageReceiver,
|
||||
db: DB,
|
||||
home_directory_path: String,
|
||||
home_directory_path: PathBuf,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
let db = Arc::new(db);
|
||||
let home_directory_path = Arc::new(home_directory_path);
|
||||
@ -158,7 +165,7 @@ async fn handle_request(
|
||||
kernel_message: KernelMessage,
|
||||
db: Arc<DB>,
|
||||
send_to_loop: &MessageSender,
|
||||
home_directory_path: &str,
|
||||
home_directory_path: &PathBuf,
|
||||
) -> Result<(), StateError> {
|
||||
let KernelMessage {
|
||||
id,
|
||||
@ -243,9 +250,8 @@ async fn handle_request(
|
||||
}
|
||||
}
|
||||
StateAction::Backup => {
|
||||
let checkpoint_dir = format!("{home_directory_path}/kernel/backup");
|
||||
|
||||
if Path::new(&checkpoint_dir).exists() {
|
||||
let checkpoint_dir = home_directory_path.join("kernel").join("backup");
|
||||
if checkpoint_dir.exists() {
|
||||
fs::remove_dir_all(&checkpoint_dir).await?;
|
||||
}
|
||||
let checkpoint = Checkpoint::new(&db).map_err(|e| StateError::RocksDBError {
|
||||
@ -302,7 +308,7 @@ async fn handle_request(
|
||||
async fn bootstrap(
|
||||
our_name: &str,
|
||||
keypair: Arc<signature::Ed25519KeyPair>,
|
||||
home_directory_path: String,
|
||||
home_directory_path: PathBuf,
|
||||
runtime_extensions: Vec<(ProcessId, MessageSender, Option<NetworkErrorSender>, bool)>,
|
||||
process_map: &mut ProcessMap,
|
||||
reverse_cap_index: &mut ReverseCapIndex,
|
||||
@ -381,7 +387,7 @@ async fn bootstrap(
|
||||
current.capabilities.extend(runtime_caps.clone());
|
||||
}
|
||||
|
||||
let packages = get_zipped_packages().await;
|
||||
let packages = get_zipped_packages();
|
||||
|
||||
for (package_metadata, mut package) in packages.clone() {
|
||||
let package_name = package_metadata.properties.package_name.as_str();
|
||||
@ -395,24 +401,30 @@ async fn bootstrap(
|
||||
let package_publisher = package_metadata.properties.publisher.as_str();
|
||||
|
||||
// create a new package in VFS
|
||||
#[cfg(unix)]
|
||||
let our_drive_name = [package_name, package_publisher].join(":");
|
||||
let pkg_path = format!("{}/vfs/{}/pkg", &home_directory_path, &our_drive_name);
|
||||
#[cfg(target_os = "windows")]
|
||||
let our_drive_name = [package_name, package_publisher].join("_");
|
||||
let pkg_path = home_directory_path
|
||||
.join("vfs")
|
||||
.join(&our_drive_name)
|
||||
.join("pkg");
|
||||
|
||||
// delete anything currently residing in the pkg folder
|
||||
let pkg_path_buf = std::path::PathBuf::from(&pkg_path);
|
||||
if pkg_path_buf.exists() {
|
||||
if pkg_path.exists() {
|
||||
fs::remove_dir_all(&pkg_path).await?;
|
||||
}
|
||||
fs::create_dir_all(&pkg_path)
|
||||
.await
|
||||
.expect("bootstrap vfs dir pkg creation failed!");
|
||||
|
||||
let drive_path = format!("/{}/pkg", &our_drive_name);
|
||||
let drive_path = format!("/{}/pkg", [package_name, package_publisher].join(":"));
|
||||
|
||||
// save the zip itself inside pkg folder, for sharing with others
|
||||
let mut zip_file =
|
||||
fs::File::create(format!("{}/{}.zip", &pkg_path, &our_drive_name)).await?;
|
||||
fs::File::create(pkg_path.join(format!("{}.zip", &our_drive_name))).await?;
|
||||
let package_zip_bytes = package.clone().into_inner().into_inner();
|
||||
zip_file.write_all(package_zip_bytes).await?;
|
||||
zip_file.write_all(&package_zip_bytes).await?;
|
||||
|
||||
// for each file in package.zip, write to vfs folder
|
||||
for i in 0..package.len() {
|
||||
@ -433,7 +445,7 @@ async fn bootstrap(
|
||||
};
|
||||
|
||||
let file_path_str = file_path.to_string_lossy().to_string();
|
||||
let full_path = Path::new(&pkg_path).join(&file_path_str);
|
||||
let full_path = pkg_path.join(&file_path_str);
|
||||
|
||||
if file.is_dir() {
|
||||
// It's a directory, create it
|
||||
@ -713,20 +725,28 @@ fn sign_cap(cap: Capability, keypair: Arc<signature::Ed25519KeyPair>) -> Vec<u8>
|
||||
}
|
||||
|
||||
/// read in `include!()`ed .zip package files
|
||||
async fn get_zipped_packages() -> Vec<(
|
||||
Erc721Metadata,
|
||||
zip::ZipArchive<std::io::Cursor<&'static [u8]>>,
|
||||
)> {
|
||||
fn get_zipped_packages() -> Vec<(Erc721Metadata, zip::ZipArchive<std::io::Cursor<Vec<u8>>>)> {
|
||||
let mut packages = Vec::new();
|
||||
|
||||
for (package_name, metadata_bytes, bytes) in BOOTSTRAPPED_PROCESSES.iter() {
|
||||
if let Ok(zip) = zip::ZipArchive::new(std::io::Cursor::new(*bytes)) {
|
||||
if let Ok(metadata) = serde_json::from_slice::<Erc721Metadata>(metadata_bytes) {
|
||||
packages.push((metadata, zip));
|
||||
} else {
|
||||
println!("fs: metadata for package {package_name} is not valid Erc721Metadata!\r",);
|
||||
}
|
||||
}
|
||||
let mut packages_zip = zip::ZipArchive::new(std::io::Cursor::new(PACKAGES_ZIP)).unwrap();
|
||||
let mut file_to_metadata = vec![];
|
||||
packages_zip
|
||||
.by_name(FILE_TO_METADATA)
|
||||
.unwrap()
|
||||
.read_to_end(&mut file_to_metadata)
|
||||
.unwrap();
|
||||
let file_to_metadata: HashMap<String, Erc721Metadata> =
|
||||
serde_json::from_slice(&file_to_metadata).unwrap();
|
||||
|
||||
for (file_name, metadata) in file_to_metadata {
|
||||
let mut zip_bytes = vec![];
|
||||
packages_zip
|
||||
.by_name(&file_name)
|
||||
.unwrap()
|
||||
.read_to_end(&mut zip_bytes)
|
||||
.unwrap();
|
||||
let zip_archive = zip::ZipArchive::new(std::io::Cursor::new(zip_bytes)).unwrap();
|
||||
packages.push((metadata, zip_archive));
|
||||
}
|
||||
|
||||
packages
|
||||
|
@ -1,7 +1,7 @@
|
||||
use chrono::{Datelike, Local, Timelike};
|
||||
use crossterm::{
|
||||
cursor,
|
||||
event::{Event, EventStream, KeyCode, KeyEvent, KeyModifiers},
|
||||
event::{Event, EventStream, KeyCode, KeyEvent, KeyEventKind, KeyModifiers},
|
||||
execute, style,
|
||||
style::Print,
|
||||
terminal::{self, ClearType},
|
||||
@ -14,7 +14,9 @@ use lib::types::core::{
|
||||
use std::{
|
||||
fs::{read_to_string, OpenOptions},
|
||||
io::BufWriter,
|
||||
path::PathBuf,
|
||||
};
|
||||
#[cfg(unix)]
|
||||
use tokio::signal::unix::{signal, SignalKind};
|
||||
use unicode_segmentation::UnicodeSegmentation;
|
||||
|
||||
@ -174,7 +176,7 @@ impl CurrentLine {
|
||||
pub async fn terminal(
|
||||
our: Identity,
|
||||
version: &str,
|
||||
home_directory_path: String,
|
||||
home_directory_path: PathBuf,
|
||||
mut event_loop: MessageSender,
|
||||
mut debug_event_loop: DebugSender,
|
||||
mut print_tx: PrintSender,
|
||||
@ -202,9 +204,7 @@ pub async fn terminal(
|
||||
|
||||
// the terminal stores the most recent 1000 lines entered by user
|
||||
// in history. TODO should make history size adjustable.
|
||||
let history_path = std::fs::canonicalize(&home_directory_path)
|
||||
.expect("terminal: could not get path for .terminal_history file")
|
||||
.join(".terminal_history");
|
||||
let history_path = home_directory_path.join(".terminal_history");
|
||||
let history = read_to_string(&history_path).unwrap_or_default();
|
||||
let history_handle = OpenOptions::new()
|
||||
.append(true)
|
||||
@ -217,9 +217,7 @@ pub async fn terminal(
|
||||
// if CTRL+L is used to turn on logging, all prints to terminal
|
||||
// will also be written with their full timestamp to the .terminal_log file.
|
||||
// logging mode is always on by default
|
||||
let log_dir_path = std::fs::canonicalize(&home_directory_path)
|
||||
.expect("terminal: could not get path for .terminal_logs dir")
|
||||
.join(".terminal_logs");
|
||||
let log_dir_path = home_directory_path.join(".terminal_logs");
|
||||
let logger = utils::Logger::new(log_dir_path, max_log_size, number_log_files);
|
||||
|
||||
let mut state = State {
|
||||
@ -243,22 +241,26 @@ pub async fn terminal(
|
||||
};
|
||||
|
||||
// use to trigger cleanup if receive signal to kill process
|
||||
let mut sigalrm =
|
||||
signal(SignalKind::alarm()).expect("terminal: failed to set up SIGALRM handler");
|
||||
let mut sighup =
|
||||
signal(SignalKind::hangup()).expect("terminal: failed to set up SIGHUP handler");
|
||||
let mut sigint =
|
||||
signal(SignalKind::interrupt()).expect("terminal: failed to set up SIGINT handler");
|
||||
let mut sigpipe =
|
||||
signal(SignalKind::pipe()).expect("terminal: failed to set up SIGPIPE handler");
|
||||
let mut sigquit =
|
||||
signal(SignalKind::quit()).expect("terminal: failed to set up SIGQUIT handler");
|
||||
let mut sigterm =
|
||||
signal(SignalKind::terminate()).expect("terminal: failed to set up SIGTERM handler");
|
||||
let mut sigusr1 =
|
||||
signal(SignalKind::user_defined1()).expect("terminal: failed to set up SIGUSR1 handler");
|
||||
let mut sigusr2 =
|
||||
signal(SignalKind::user_defined2()).expect("terminal: failed to set up SIGUSR2 handler");
|
||||
#[cfg(unix)]
|
||||
let (
|
||||
mut sigalrm,
|
||||
mut sighup,
|
||||
mut sigint,
|
||||
mut sigpipe,
|
||||
mut sigquit,
|
||||
mut sigterm,
|
||||
mut sigusr1,
|
||||
mut sigusr2,
|
||||
) = (
|
||||
signal(SignalKind::alarm()).expect("terminal: failed to set up SIGALRM handler"),
|
||||
signal(SignalKind::hangup()).expect("terminal: failed to set up SIGHUP handler"),
|
||||
signal(SignalKind::interrupt()).expect("terminal: failed to set up SIGINT handler"),
|
||||
signal(SignalKind::pipe()).expect("terminal: failed to set up SIGPIPE handler"),
|
||||
signal(SignalKind::quit()).expect("terminal: failed to set up SIGQUIT handler"),
|
||||
signal(SignalKind::terminate()).expect("terminal: failed to set up SIGTERM handler"),
|
||||
signal(SignalKind::user_defined1()).expect("terminal: failed to set up SIGUSR1 handler"),
|
||||
signal(SignalKind::user_defined2()).expect("terminal: failed to set up SIGUSR2 handler"),
|
||||
);
|
||||
|
||||
// if the verbosity boot flag was **not** set to "full event loop", tell kernel
|
||||
// the kernel will try and print all events by default so that booting with
|
||||
@ -274,6 +276,7 @@ pub async fn terminal(
|
||||
if !is_detached {
|
||||
let mut reader = EventStream::new();
|
||||
loop {
|
||||
#[cfg(unix)]
|
||||
tokio::select! {
|
||||
Some(printout) = print_rx.recv() => {
|
||||
handle_printout(printout, &mut state)?;
|
||||
@ -292,9 +295,21 @@ pub async fn terminal(
|
||||
_ = sigusr1.recv() => return Err(anyhow::anyhow!("exiting due to SIGUSR1")),
|
||||
_ = sigusr2.recv() => return Err(anyhow::anyhow!("exiting due to SIGUSR2")),
|
||||
}
|
||||
#[cfg(target_os = "windows")]
|
||||
tokio::select! {
|
||||
Some(printout) = print_rx.recv() => {
|
||||
handle_printout(printout, &mut state)?;
|
||||
}
|
||||
Some(Ok(event)) = reader.next().fuse() => {
|
||||
if handle_event(&our, event, &mut state, &mut event_loop, &mut debug_event_loop, &mut print_tx).await? {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
loop {
|
||||
#[cfg(unix)]
|
||||
tokio::select! {
|
||||
Some(printout) = print_rx.recv() => {
|
||||
handle_printout(printout, &mut state)?;
|
||||
@ -308,6 +323,10 @@ pub async fn terminal(
|
||||
_ = sigusr1.recv() => return Err(anyhow::anyhow!("exiting due to SIGUSR1")),
|
||||
_ = sigusr2.recv() => return Err(anyhow::anyhow!("exiting due to SIGUSR2")),
|
||||
}
|
||||
#[cfg(target_os = "windows")]
|
||||
if let Some(printout) = print_rx.recv().await {
|
||||
handle_printout(printout, &mut state)?;
|
||||
};
|
||||
}
|
||||
};
|
||||
Ok(())
|
||||
@ -353,7 +372,7 @@ fn handle_printout(printout: Printout, state: &mut State) -> anyhow::Result<()>
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// returns True if runtime should exit due to CTRL+C or CTRL+D
|
||||
/// returns true if runtime should exit due to CTRL+C or CTRL+D
|
||||
async fn handle_event(
|
||||
our: &Identity,
|
||||
event: Event,
|
||||
@ -364,19 +383,14 @@ async fn handle_event(
|
||||
) -> anyhow::Result<bool> {
|
||||
let State {
|
||||
stdout,
|
||||
command_history,
|
||||
win_cols,
|
||||
win_rows,
|
||||
current_line,
|
||||
in_step_through,
|
||||
search_depth,
|
||||
logging_mode,
|
||||
verbose_mode,
|
||||
..
|
||||
} = state;
|
||||
// lock here so that runtime can still use println! without freezing..
|
||||
// can lock before loop later if we want to reduce overhead
|
||||
let mut stdout = stdout.lock();
|
||||
let stdout = stdout.lock();
|
||||
match event {
|
||||
//
|
||||
// RESIZE: resize is super annoying because this event trigger often
|
||||
@ -392,9 +406,7 @@ async fn handle_event(
|
||||
cursor::MoveTo(0, height),
|
||||
terminal::Clear(ClearType::CurrentLine)
|
||||
)?;
|
||||
// since we subtract prompt_len from win_cols, win_cols must always
|
||||
// be >= prompt_len
|
||||
*win_cols = std::cmp::max(width - 1, current_line.prompt_len as u16);
|
||||
*win_cols = width - 1;
|
||||
*win_rows = height;
|
||||
if current_line.cursor_col + current_line.prompt_len as u16 > *win_cols {
|
||||
current_line.cursor_col = *win_cols - current_line.prompt_len as u16;
|
||||
@ -418,19 +430,73 @@ async fn handle_event(
|
||||
*win_cols - current_line.prompt_len as u16,
|
||||
);
|
||||
}
|
||||
Event::Key(key_event) => {
|
||||
if let Some(should_exit) = handle_key_event(
|
||||
our,
|
||||
key_event,
|
||||
state,
|
||||
event_loop,
|
||||
debug_event_loop,
|
||||
print_tx,
|
||||
stdout,
|
||||
)
|
||||
.await?
|
||||
{
|
||||
return Ok(should_exit);
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
// some terminal event we don't care about, yet
|
||||
}
|
||||
}
|
||||
if state.search_mode {
|
||||
state.search(&our.name)?;
|
||||
} else {
|
||||
state.display_current_input_line(false)?;
|
||||
}
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
/// returns Some(true) if runtime should exit due to CTRL+C or CTRL+D,
|
||||
/// Some(false) if caller should simple return `false`
|
||||
/// None if caller should fall through
|
||||
async fn handle_key_event(
|
||||
our: &Identity,
|
||||
key_event: KeyEvent,
|
||||
state: &mut State,
|
||||
event_loop: &mut MessageSender,
|
||||
debug_event_loop: &mut DebugSender,
|
||||
print_tx: &mut PrintSender,
|
||||
mut stdout: std::io::StdoutLock<'static>,
|
||||
) -> anyhow::Result<Option<bool>> {
|
||||
if key_event.kind == KeyEventKind::Release {
|
||||
return Ok(Some(false));
|
||||
}
|
||||
let State {
|
||||
command_history,
|
||||
win_cols,
|
||||
win_rows,
|
||||
current_line,
|
||||
in_step_through,
|
||||
search_depth,
|
||||
logging_mode,
|
||||
verbose_mode,
|
||||
..
|
||||
} = state;
|
||||
match key_event {
|
||||
//
|
||||
// CTRL+C, CTRL+D: turn off the node
|
||||
//
|
||||
Event::Key(KeyEvent {
|
||||
KeyEvent {
|
||||
code: KeyCode::Char('c'),
|
||||
modifiers: KeyModifiers::CONTROL,
|
||||
..
|
||||
})
|
||||
| Event::Key(KeyEvent {
|
||||
}
|
||||
| KeyEvent {
|
||||
code: KeyCode::Char('d'),
|
||||
modifiers: KeyModifiers::CONTROL,
|
||||
..
|
||||
}) => {
|
||||
} => {
|
||||
execute!(
|
||||
stdout,
|
||||
// print goes immediately above the dedicated input line at bottom
|
||||
@ -438,16 +504,16 @@ async fn handle_event(
|
||||
terminal::Clear(ClearType::CurrentLine),
|
||||
Print("exit code received"),
|
||||
)?;
|
||||
return Ok(true);
|
||||
return Ok(Some(true));
|
||||
}
|
||||
//
|
||||
// CTRL+V: toggle through verbosity modes
|
||||
//
|
||||
Event::Key(KeyEvent {
|
||||
KeyEvent {
|
||||
code: KeyCode::Char('v'),
|
||||
modifiers: KeyModifiers::CONTROL,
|
||||
..
|
||||
}) => {
|
||||
} => {
|
||||
// go from low to high, then reset to 0
|
||||
match verbose_mode {
|
||||
0 => *verbose_mode = 1,
|
||||
@ -483,16 +549,16 @@ async fn handle_event(
|
||||
)
|
||||
.send(&print_tx)
|
||||
.await;
|
||||
return Ok(false);
|
||||
return Ok(Some(false));
|
||||
}
|
||||
//
|
||||
// CTRL+J: toggle debug mode -- makes system-level event loop step-through
|
||||
//
|
||||
Event::Key(KeyEvent {
|
||||
KeyEvent {
|
||||
code: KeyCode::Char('j'),
|
||||
modifiers: KeyModifiers::CONTROL,
|
||||
..
|
||||
}) => {
|
||||
} => {
|
||||
let _ = debug_event_loop.send(DebugCommand::ToggleStepthrough).await;
|
||||
*in_step_through = !*in_step_through;
|
||||
Printout::new(
|
||||
@ -507,27 +573,27 @@ async fn handle_event(
|
||||
)
|
||||
.send(&print_tx)
|
||||
.await;
|
||||
return Ok(false);
|
||||
return Ok(Some(false));
|
||||
}
|
||||
//
|
||||
// CTRL+S: step through system-level event loop (when in step-through mode)
|
||||
//
|
||||
Event::Key(KeyEvent {
|
||||
KeyEvent {
|
||||
code: KeyCode::Char('s'),
|
||||
modifiers: KeyModifiers::CONTROL,
|
||||
..
|
||||
}) => {
|
||||
} => {
|
||||
let _ = debug_event_loop.send(DebugCommand::Step).await;
|
||||
return Ok(false);
|
||||
return Ok(Some(false));
|
||||
}
|
||||
//
|
||||
// CTRL+L: toggle logging mode
|
||||
//
|
||||
Event::Key(KeyEvent {
|
||||
KeyEvent {
|
||||
code: KeyCode::Char('l'),
|
||||
modifiers: KeyModifiers::CONTROL,
|
||||
..
|
||||
}) => {
|
||||
} => {
|
||||
*logging_mode = !*logging_mode;
|
||||
Printout::new(
|
||||
0,
|
||||
@ -535,21 +601,21 @@ async fn handle_event(
|
||||
)
|
||||
.send(&print_tx)
|
||||
.await;
|
||||
return Ok(false);
|
||||
return Ok(Some(false));
|
||||
}
|
||||
//
|
||||
// UP / CTRL+P: go up one command in history
|
||||
//
|
||||
Event::Key(KeyEvent {
|
||||
KeyEvent {
|
||||
code: KeyCode::Up, ..
|
||||
})
|
||||
| Event::Key(KeyEvent {
|
||||
}
|
||||
| KeyEvent {
|
||||
code: KeyCode::Char('p'),
|
||||
modifiers: KeyModifiers::CONTROL,
|
||||
..
|
||||
}) => {
|
||||
} => {
|
||||
if state.search_mode {
|
||||
return Ok(false);
|
||||
return Ok(Some(false));
|
||||
}
|
||||
// go up one command in history
|
||||
match command_history.get_prev(¤t_line.line) {
|
||||
@ -566,22 +632,22 @@ async fn handle_event(
|
||||
}
|
||||
}
|
||||
state.display_current_input_line(true)?;
|
||||
return Ok(false);
|
||||
return Ok(Some(false));
|
||||
}
|
||||
//
|
||||
// DOWN / CTRL+N: go down one command in history
|
||||
//
|
||||
Event::Key(KeyEvent {
|
||||
KeyEvent {
|
||||
code: KeyCode::Down,
|
||||
..
|
||||
})
|
||||
| Event::Key(KeyEvent {
|
||||
}
|
||||
| KeyEvent {
|
||||
code: KeyCode::Char('n'),
|
||||
modifiers: KeyModifiers::CONTROL,
|
||||
..
|
||||
}) => {
|
||||
} => {
|
||||
if state.search_mode {
|
||||
return Ok(false);
|
||||
return Ok(Some(false));
|
||||
}
|
||||
// go down one command in history
|
||||
match command_history.get_next() {
|
||||
@ -598,18 +664,18 @@ async fn handle_event(
|
||||
}
|
||||
}
|
||||
state.display_current_input_line(true)?;
|
||||
return Ok(false);
|
||||
return Ok(Some(false));
|
||||
}
|
||||
//
|
||||
// CTRL+A: jump to beginning of line
|
||||
//
|
||||
Event::Key(KeyEvent {
|
||||
KeyEvent {
|
||||
code: KeyCode::Char('a'),
|
||||
modifiers: KeyModifiers::CONTROL,
|
||||
..
|
||||
}) => {
|
||||
} => {
|
||||
if state.search_mode {
|
||||
return Ok(false);
|
||||
return Ok(Some(false));
|
||||
}
|
||||
current_line.line_col = 0;
|
||||
current_line.cursor_col = 0;
|
||||
@ -617,13 +683,13 @@ async fn handle_event(
|
||||
//
|
||||
// CTRL+E: jump to end of line
|
||||
//
|
||||
Event::Key(KeyEvent {
|
||||
KeyEvent {
|
||||
code: KeyCode::Char('e'),
|
||||
modifiers: KeyModifiers::CONTROL,
|
||||
..
|
||||
}) => {
|
||||
} => {
|
||||
if state.search_mode {
|
||||
return Ok(false);
|
||||
return Ok(Some(false));
|
||||
}
|
||||
current_line.line_col = current_line.line.graphemes(true).count();
|
||||
current_line.cursor_col = std::cmp::min(
|
||||
@ -635,11 +701,11 @@ async fn handle_event(
|
||||
// CTRL+R: enter search mode
|
||||
// if already in search mode, increase search depth
|
||||
//
|
||||
Event::Key(KeyEvent {
|
||||
KeyEvent {
|
||||
code: KeyCode::Char('r'),
|
||||
modifiers: KeyModifiers::CONTROL,
|
||||
..
|
||||
}) => {
|
||||
} => {
|
||||
if state.search_mode {
|
||||
*search_depth += 1;
|
||||
}
|
||||
@ -648,11 +714,11 @@ async fn handle_event(
|
||||
//
|
||||
// CTRL+G: exit search mode
|
||||
//
|
||||
Event::Key(KeyEvent {
|
||||
KeyEvent {
|
||||
code: KeyCode::Char('g'),
|
||||
modifiers: KeyModifiers::CONTROL,
|
||||
..
|
||||
}) => {
|
||||
} => {
|
||||
// just show true current line as usual
|
||||
state.search_mode = false;
|
||||
*search_depth = 0;
|
||||
@ -660,7 +726,7 @@ async fn handle_event(
|
||||
//
|
||||
// KEY: handle keypress events
|
||||
//
|
||||
Event::Key(k) => {
|
||||
k => {
|
||||
match k.code {
|
||||
//
|
||||
// CHAR: write a single character
|
||||
@ -677,7 +743,7 @@ async fn handle_event(
|
||||
//
|
||||
KeyCode::Backspace => {
|
||||
if current_line.line_col == 0 {
|
||||
return Ok(false);
|
||||
return Ok(Some(false));
|
||||
} else {
|
||||
current_line.line_col -= 1;
|
||||
let c = current_line.delete_char();
|
||||
@ -689,7 +755,7 @@ async fn handle_event(
|
||||
//
|
||||
KeyCode::Delete => {
|
||||
if current_line.line_col == current_line.line.graphemes(true).count() {
|
||||
return Ok(false);
|
||||
return Ok(Some(false));
|
||||
}
|
||||
current_line.delete_char();
|
||||
}
|
||||
@ -700,7 +766,7 @@ async fn handle_event(
|
||||
if current_line.cursor_col as usize == 0 {
|
||||
if current_line.line_col == 0 {
|
||||
// at the very beginning of the current typed line
|
||||
return Ok(false);
|
||||
return Ok(Some(false));
|
||||
} else {
|
||||
// virtual scroll leftward through line
|
||||
current_line.line_col -= 1;
|
||||
@ -716,7 +782,7 @@ async fn handle_event(
|
||||
if current_line.line_col != 0 {
|
||||
current_line.line_col -= 1;
|
||||
}
|
||||
return Ok(false);
|
||||
return Ok(Some(false));
|
||||
}
|
||||
}
|
||||
//
|
||||
@ -725,7 +791,7 @@ async fn handle_event(
|
||||
KeyCode::Right => {
|
||||
if current_line.line_col == current_line.line.graphemes(true).count() {
|
||||
// at the very end of the current typed line
|
||||
return Ok(false);
|
||||
return Ok(Some(false));
|
||||
};
|
||||
if (current_line.cursor_col + current_line.prompt_len as u16) < (*win_cols - 1)
|
||||
{
|
||||
@ -737,7 +803,7 @@ async fn handle_event(
|
||||
execute!(stdout, cursor::MoveRight(width))?;
|
||||
current_line.cursor_col += width;
|
||||
current_line.line_col += 1;
|
||||
return Ok(false);
|
||||
return Ok(Some(false));
|
||||
} else {
|
||||
// virtual scroll rightward through line
|
||||
current_line.line_col += 1;
|
||||
@ -791,14 +857,6 @@ async fn handle_event(
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
// some terminal event we don't care about, yet
|
||||
}
|
||||
}
|
||||
if state.search_mode {
|
||||
state.search(&our.name)?;
|
||||
} else {
|
||||
state.display_current_input_line(false)?;
|
||||
}
|
||||
Ok(false)
|
||||
Ok(None)
|
||||
}
|
||||
|
@ -412,7 +412,11 @@ fn make_log_writer(log_dir_path: &Path) -> anyhow::Result<BufWriter<std::fs::Fil
|
||||
std::fs::create_dir(log_dir_path)?;
|
||||
}
|
||||
let now = chrono::Local::now();
|
||||
#[cfg(unix)]
|
||||
let log_name = format!("{}.log", now.format("%Y-%m-%d-%H:%M:%S"));
|
||||
#[cfg(target_os = "windows")]
|
||||
let log_name = format!("{}.log", now.format("%Y-%m-%d-%H_%M_%S"));
|
||||
|
||||
let log_path = log_dir_path.join(log_name);
|
||||
let log_handle = OpenOptions::new()
|
||||
.append(true)
|
||||
|
@ -40,9 +40,9 @@ pub async fn vfs(
|
||||
send_to_terminal: PrintSender,
|
||||
mut recv_from_loop: MessageReceiver,
|
||||
send_to_caps_oracle: CapMessageSender,
|
||||
home_directory_path: String,
|
||||
home_directory_path: PathBuf,
|
||||
) -> anyhow::Result<()> {
|
||||
let vfs_path = format!("{home_directory_path}/vfs");
|
||||
let vfs_path = home_directory_path.join("vfs");
|
||||
|
||||
fs::create_dir_all(&vfs_path)
|
||||
.await
|
||||
@ -342,8 +342,8 @@ async fn handle_request(
|
||||
}
|
||||
|
||||
// current prepend to filepaths needs to be: /package_id/drive/path
|
||||
let (package_id, drive, rest) = parse_package_and_drive(&request.path, &vfs_path).await?;
|
||||
let drive = format!("/{package_id}/{drive}");
|
||||
let (package_id, drive, rest) = parse_package_and_drive(&request.path, &vfs_path)?;
|
||||
let drive = format!("{package_id}/{drive}");
|
||||
let action = request.action;
|
||||
let path = PathBuf::from(&request.path);
|
||||
|
||||
@ -364,10 +364,15 @@ async fn handle_request(
|
||||
let base_drive = join_paths_safely(&vfs_path, &drive);
|
||||
let path = join_paths_safely(&base_drive, &rest);
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
let (path, internal_path) = (internal_path_to_external(&path), path);
|
||||
|
||||
let (response_body, bytes) = match action {
|
||||
VfsAction::CreateDrive => {
|
||||
let drive_path = join_paths_safely(vfs_path, &drive);
|
||||
fs::create_dir_all(drive_path).await?;
|
||||
#[cfg(target_os = "windows")]
|
||||
let base_drive = internal_path_to_external(&base_drive);
|
||||
|
||||
fs::create_dir_all(&base_drive).await?;
|
||||
(VfsResponse::Ok, None)
|
||||
}
|
||||
VfsAction::CreateDir => {
|
||||
@ -461,8 +466,19 @@ async fn handle_request(
|
||||
|
||||
let metadata = entry.metadata().await?;
|
||||
let file_type = get_file_type(&metadata);
|
||||
|
||||
#[cfg(unix)]
|
||||
let relative_path = relative_path.display().to_string();
|
||||
#[cfg(target_os = "windows")]
|
||||
let relative_path = {
|
||||
let internal_path = internal_path
|
||||
.strip_prefix(vfs_path)
|
||||
.unwrap_or(&internal_path);
|
||||
replace_path_prefix(&internal_path, &relative_path)
|
||||
};
|
||||
|
||||
let dir_entry = DirEntry {
|
||||
path: relative_path.display().to_string(),
|
||||
path: relative_path,
|
||||
file_type,
|
||||
};
|
||||
entries.push(dir_entry);
|
||||
@ -588,7 +604,7 @@ async fn handle_request(
|
||||
}
|
||||
};
|
||||
|
||||
fs::create_dir_all(path.clone()).await?;
|
||||
fs::create_dir_all(&path).await?;
|
||||
|
||||
// loop through items in archive; recursively add to root
|
||||
for i in 0..zip.len() {
|
||||
@ -612,7 +628,7 @@ async fn handle_request(
|
||||
if is_file {
|
||||
fs::write(&local_path, &file_contents).await?;
|
||||
} else if is_dir {
|
||||
fs::create_dir_all(local_path).await?;
|
||||
fs::create_dir_all(&local_path).await?;
|
||||
} else {
|
||||
return Err(VfsError::CreateDirError {
|
||||
path: path.display().to_string(),
|
||||
@ -651,10 +667,10 @@ async fn handle_request(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn parse_package_and_drive(
|
||||
fn parse_package_and_drive(
|
||||
path: &str,
|
||||
vfs_path: &PathBuf,
|
||||
) -> Result<(PackageId, String, String), VfsError> {
|
||||
) -> Result<(PackageId, String, PathBuf), VfsError> {
|
||||
let joined_path = join_paths_safely(&vfs_path, path);
|
||||
|
||||
// sanitize path..
|
||||
@ -674,7 +690,10 @@ async fn parse_package_and_drive(
|
||||
.display()
|
||||
.to_string();
|
||||
|
||||
#[cfg(unix)]
|
||||
let mut parts: Vec<&str> = path.split('/').collect();
|
||||
#[cfg(target_os = "windows")]
|
||||
let mut parts: Vec<&str> = path.split('\\').collect();
|
||||
|
||||
if parts[0].is_empty() {
|
||||
parts.remove(0);
|
||||
@ -697,11 +716,51 @@ async fn parse_package_and_drive(
|
||||
};
|
||||
|
||||
let drive = parts[1].to_string();
|
||||
let remaining_path = parts[2..].join("/");
|
||||
let mut remaining_path = PathBuf::new();
|
||||
for part in &parts[2..] {
|
||||
remaining_path = remaining_path.join(part);
|
||||
}
|
||||
|
||||
Ok((package_id, drive, remaining_path))
|
||||
}
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
fn internal_path_to_external(internal: &Path) -> PathBuf {
|
||||
let mut external = PathBuf::new();
|
||||
for component in internal.components() {
|
||||
match component {
|
||||
Component::RootDir | Component::CurDir | Component::ParentDir => {}
|
||||
Component::Prefix(_) => {
|
||||
let component: &Path = component.as_ref();
|
||||
external = component.to_path_buf();
|
||||
}
|
||||
Component::Normal(item) => {
|
||||
external = external.join(item.to_string_lossy().into_owned().replace(":", "_"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
external
|
||||
}
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
fn replace_path_prefix(base_path: &Path, to_replace_path: &Path) -> String {
|
||||
let base_path = base_path.display().to_string();
|
||||
let base_path_parts: Vec<&str> = base_path.split('\\').collect();
|
||||
|
||||
let num_base_path_parts = base_path_parts.len();
|
||||
|
||||
let to_replace_path = to_replace_path.display().to_string();
|
||||
let parts: Vec<&str> = to_replace_path.split('\\').collect();
|
||||
|
||||
let mut new_path = base_path.to_string().replace("\\", "/");
|
||||
for part in parts.iter().skip(num_base_path_parts) {
|
||||
new_path.push('/');
|
||||
new_path.push_str(part);
|
||||
}
|
||||
new_path
|
||||
}
|
||||
|
||||
async fn check_caps(
|
||||
our_node: &str,
|
||||
source: &Address,
|
||||
@ -777,8 +836,7 @@ async fn check_caps(
|
||||
}
|
||||
VfsAction::CopyFile { new_path } | VfsAction::Rename { new_path } => {
|
||||
// these have 2 paths to validate
|
||||
let (new_package_id, new_drive, _rest) =
|
||||
parse_package_and_drive(new_path, &vfs_path).await?;
|
||||
let (new_package_id, new_drive, _rest) = parse_package_and_drive(new_path, &vfs_path)?;
|
||||
|
||||
let new_drive = format!("/{new_package_id}/{new_drive}");
|
||||
// if both new and old path are within the package_id path, ok
|
||||
@ -1002,11 +1060,13 @@ fn normalize_path(path: &Path) -> PathBuf {
|
||||
ret
|
||||
}
|
||||
|
||||
fn join_paths_safely(base: &PathBuf, extension: &str) -> PathBuf {
|
||||
let extension_str = Path::new(extension)
|
||||
fn join_paths_safely<P: AsRef<Path>>(base: &PathBuf, extension: P) -> PathBuf {
|
||||
let extension_str = extension
|
||||
.as_ref()
|
||||
.to_str()
|
||||
.unwrap_or("")
|
||||
.trim_start_matches('/');
|
||||
.trim_start_matches('/')
|
||||
.trim_start_matches('\\');
|
||||
|
||||
let extension_path = Path::new(extension_str);
|
||||
base.join(extension_path)
|
||||
|
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "lib"
|
||||
authors = ["KinodeDAO"]
|
||||
version = "0.9.6"
|
||||
version = "0.9.7"
|
||||
edition = "2021"
|
||||
description = "A general-purpose sovereign cloud computing platform"
|
||||
homepage = "https://kinode.org"
|
||||
@ -11,8 +11,10 @@ license = "Apache-2.0"
|
||||
[lib]
|
||||
|
||||
[build-dependencies]
|
||||
kit = { git = "https://github.com/kinode-dao/kit", tag = "v0.7.6" }
|
||||
tokio = "1.28"
|
||||
anyhow = "1.0.71"
|
||||
reqwest = "0.12.4"
|
||||
sha2 = "0.10.8"
|
||||
tokio = { version = "1.28", features = ["rt-multi-thread"] }
|
||||
|
||||
[dependencies]
|
||||
alloy = { git = "https://github.com/kinode-dao/alloy.git", rev = "e672f3e", features = [
|
||||
|
55
lib/build.rs
55
lib/build.rs
@ -1,9 +1,58 @@
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
const KIT_CACHE: &str = "/tmp/kinode-kit-cache";
|
||||
const KINODE_WIT_0_7_0_URL: &str =
|
||||
"https://raw.githubusercontent.com/kinode-dao/kinode-wit/aa2c8b11c9171b949d1991c32f58591c0e881f85/kinode.wit";
|
||||
|
||||
const KINODE_WIT_0_8_0_URL: &str =
|
||||
"https://raw.githubusercontent.com/kinode-dao/kinode-wit/v0.8/kinode.wit";
|
||||
|
||||
/// copied from `kit`
|
||||
async fn download_file(url: &str, path: &Path) -> anyhow::Result<()> {
|
||||
fs::create_dir_all(&KIT_CACHE)?;
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(url.as_bytes());
|
||||
let hashed_url = hasher.finalize();
|
||||
let hashed_url_path = Path::new(KIT_CACHE).join(format!("{hashed_url:x}"));
|
||||
|
||||
let content = if hashed_url_path.exists() {
|
||||
fs::read(hashed_url_path)?
|
||||
} else {
|
||||
let response = reqwest::get(url).await?;
|
||||
|
||||
// Check if response status is 200 (OK)
|
||||
if response.status() != reqwest::StatusCode::OK {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Failed to download file: HTTP Status {}",
|
||||
response.status()
|
||||
));
|
||||
}
|
||||
|
||||
let content = response.bytes().await?.to_vec();
|
||||
fs::write(hashed_url_path, &content)?;
|
||||
content
|
||||
};
|
||||
|
||||
if path.exists() {
|
||||
if path.is_dir() {
|
||||
fs::remove_dir_all(path)?;
|
||||
} else {
|
||||
let existing_content = fs::read(path)?;
|
||||
if content == existing_content {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
fs::create_dir_all(
|
||||
path.parent()
|
||||
.ok_or_else(|| anyhow::anyhow!("path doesn't have parent"))?,
|
||||
)?;
|
||||
fs::write(path, &content)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() {
|
||||
if std::env::var("SKIP_BUILD_SCRIPT").is_ok() {
|
||||
println!("Skipping build script");
|
||||
@ -16,7 +65,7 @@ fn main() {
|
||||
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
rt.block_on(async {
|
||||
kit::build::download_file(KINODE_WIT_0_7_0_URL, &wit_file)
|
||||
download_file(KINODE_WIT_0_7_0_URL, &wit_file)
|
||||
.await
|
||||
.expect("Failed to download WIT 0.7");
|
||||
});
|
||||
@ -25,7 +74,7 @@ fn main() {
|
||||
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
rt.block_on(async {
|
||||
kit::build::download_file(KINODE_WIT_0_8_0_URL, &wit_file)
|
||||
download_file(KINODE_WIT_0_8_0_URL, &wit_file)
|
||||
.await
|
||||
.expect("Failed to download WIT 0.8");
|
||||
})
|
||||
|
@ -1104,10 +1104,12 @@ impl Identity {
|
||||
match &self.routing {
|
||||
NodeRouting::Routers(_) => None,
|
||||
NodeRouting::Direct { ip, ports } | NodeRouting::Both { ip, ports, .. } => {
|
||||
if let Some(port) = ports.get("ws")
|
||||
&& *port != 0
|
||||
{
|
||||
Some((ip, port))
|
||||
if let Some(port) = ports.get("ws") {
|
||||
if *port != 0 {
|
||||
Some((ip, port))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
@ -1118,10 +1120,12 @@ impl Identity {
|
||||
match &self.routing {
|
||||
NodeRouting::Routers(_) => None,
|
||||
NodeRouting::Direct { ip, ports } | NodeRouting::Both { ip, ports, .. } => {
|
||||
if let Some(port) = ports.get("tcp")
|
||||
&& *port != 0
|
||||
{
|
||||
Some((ip, port))
|
||||
if let Some(port) = ports.get("tcp") {
|
||||
if *port != 0 {
|
||||
Some((ip, port))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
#![feature(let_chains)]
|
||||
//#![feature(let_chains)]
|
||||
|
||||
pub mod core;
|
||||
pub mod eth;
|
||||
|
@ -27,14 +27,32 @@ def build_and_move(feature, tmp_dir, architecture, os_name):
|
||||
|
||||
zip_prefix = f"kinode-{architecture}-{os_name}"
|
||||
release_env = os.environ.copy()
|
||||
release_env["CARGO_PROFILE_RELEASE_LTO"] = f"fat"
|
||||
release_env["CARGO_PROFILE_RELEASE_CODEGEN_UNITS"] = f"1"
|
||||
release_env["CARGO_PROFILE_RELEASE_STRIP"] = f"symbols"
|
||||
release_env["CARGO_PROFILE_RELEASE_LTO"] = "fat"
|
||||
release_env["CARGO_PROFILE_RELEASE_CODEGEN_UNITS"] = "1"
|
||||
release_env["CARGO_PROFILE_RELEASE_STRIP"] = "symbols"
|
||||
if feature:
|
||||
subprocess.run(["cargo", "+nightly", "build", "--release", "-p", "kinode", "--features", feature], check=True, env=release_env)
|
||||
release_env["PATH_TO_PACKAGES_ZIP"] = f"../target/packages-{feature}.zip"
|
||||
subprocess.run(
|
||||
["cargo", "run", "-p", "build_packages", "--", "--features", feature],
|
||||
check=True,
|
||||
#stdout=subprocess.PIPE,
|
||||
#stderr=subprocess.PIPE,
|
||||
)
|
||||
subprocess.run(
|
||||
["cargo", "build", "--release", "-p", "kinode", "--features", feature],
|
||||
check=True,
|
||||
env=release_env,
|
||||
#stdout=subprocess.PIPE,
|
||||
#stderr=subprocess.PIPE,
|
||||
)
|
||||
zip_name = f"{zip_prefix}-{feature}.zip"
|
||||
else:
|
||||
subprocess.run(["cargo", "+nightly", "build", "--release", "-p", "kinode"], check=True, env=release_env)
|
||||
subprocess.run(["cargo", "run", "-p", "build_packages"], check=True)
|
||||
subprocess.run(
|
||||
["cargo", "build", "--release", "-p", "kinode"],
|
||||
check=True,
|
||||
env=release_env,
|
||||
)
|
||||
zip_name = f"{zip_prefix}.zip"
|
||||
|
||||
# Move and rename the binary
|
||||
@ -74,4 +92,3 @@ def main():
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
|
97
scripts/build-windows-artifact.py
Executable file
97
scripts/build-windows-artifact.py
Executable file
@ -0,0 +1,97 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import zipfile
|
||||
|
||||
def is_excluded(path, excludes, include_files):
|
||||
path = os.path.abspath(path)
|
||||
# If the path is in include_files, do not exclude it
|
||||
if path in include_files:
|
||||
return False
|
||||
for exclude in excludes:
|
||||
if os.path.commonpath([path, exclude]) == exclude:
|
||||
return True
|
||||
return False
|
||||
|
||||
def parse_args(repo_root):
|
||||
parser = argparse.ArgumentParser(description='Build Windows artifact.')
|
||||
parser.add_argument(
|
||||
'--exclude',
|
||||
action='append',
|
||||
default=[],
|
||||
help='Exclude directories (relative to repo root). Can be used multiple times.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--output',
|
||||
default=os.path.join(repo_root, 'target', 'windows-artifact.zip'),
|
||||
help='Output zip file path.'
|
||||
)
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
def main():
|
||||
# Get the directory where the script is located
|
||||
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
# Assume the repo root is one level up from the script directory
|
||||
repo_root = os.path.abspath(os.path.join(script_dir, '..'))
|
||||
|
||||
args = parse_args(repo_root)
|
||||
|
||||
default_excludes = [
|
||||
'.git',
|
||||
'kinode/packages',
|
||||
'target',
|
||||
'kinode/src/register-ui/node_modules',
|
||||
]
|
||||
excludes = default_excludes + args.exclude
|
||||
|
||||
# Convert exclude paths to absolute paths
|
||||
excludes = [os.path.abspath(os.path.join(repo_root, p)) for p in excludes]
|
||||
|
||||
# Include 'target/packages.zip' even though 'target' is excluded
|
||||
include_files = [
|
||||
os.path.abspath(os.path.join(repo_root, 'target', 'packages.zip'))
|
||||
]
|
||||
|
||||
# Run the build scripts
|
||||
build_script_dir = os.path.join(repo_root, 'kinode', 'src', 'register-ui')
|
||||
build_script_name = 'build.sh'
|
||||
build_script = os.path.join(build_script_dir, build_script_name)
|
||||
if not os.path.exists(build_script):
|
||||
print(f'Build script not found at {build_script}')
|
||||
sys.exit(1)
|
||||
|
||||
# Execute the build script
|
||||
subprocess.check_call([f'./{build_script_name}'], cwd=build_script_dir)
|
||||
|
||||
# Run cargo build
|
||||
subprocess.check_call(['cargo', 'build', '-p', 'build_packages'], cwd=repo_root)
|
||||
|
||||
# Create the zip file
|
||||
output_zip = args.output
|
||||
output_zip_abs = os.path.abspath(output_zip)
|
||||
output_dir = os.path.dirname(output_zip_abs)
|
||||
if output_dir and not os.path.exists(output_dir):
|
||||
os.makedirs(output_dir)
|
||||
|
||||
# Exclude the output zip file itself
|
||||
excludes.append(output_zip_abs)
|
||||
|
||||
with zipfile.ZipFile(output_zip_abs, 'w', zipfile.ZIP_DEFLATED) as zipf:
|
||||
for root, dirs, files in os.walk(repo_root):
|
||||
for file in files:
|
||||
file_path = os.path.join(root, file)
|
||||
if is_excluded(file_path, excludes, include_files):
|
||||
continue
|
||||
rel_path = os.path.relpath(file_path, repo_root)
|
||||
if ':' in str(rel_path):
|
||||
# Replace ':' in filenames to make them valid on Windows
|
||||
rel_path = rel_path.replace(':', '_')
|
||||
print(f'Unexpected `:` in filename: {rel_path}; replacing with `_` in zip file')
|
||||
zipf.write(file_path, rel_path)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
14
scripts/build_packages/Cargo.toml
Normal file
14
scripts/build_packages/Cargo.toml
Normal file
@ -0,0 +1,14 @@
|
||||
[package]
|
||||
name = "build_packages"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.71"
|
||||
clap = "4"
|
||||
fs-err = "2.11"
|
||||
kit = { git = "https://github.com/kinode-dao/kit", rev = "9c94b4b" }
|
||||
serde_json = "1"
|
||||
tokio = "1.28"
|
||||
walkdir = "2.4"
|
||||
zip = "0.6"
|
185
scripts/build_packages/src/main.rs
Normal file
185
scripts/build_packages/src/main.rs
Normal file
@ -0,0 +1,185 @@
|
||||
use std::{
|
||||
io::{Cursor, Read, Write},
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
use clap::{Arg, Command};
|
||||
use fs_err as fs;
|
||||
use zip::write::FileOptions;
|
||||
|
||||
fn zip_directory(dir_path: &Path) -> anyhow::Result<Vec<u8>> {
|
||||
let mut writer = Cursor::new(Vec::new());
|
||||
let options = FileOptions::default()
|
||||
.compression_method(zip::CompressionMethod::Deflated)
|
||||
.unix_permissions(0o755)
|
||||
.last_modified_time(zip::DateTime::from_date_and_time(2023, 6, 19, 0, 0, 0).unwrap());
|
||||
{
|
||||
let mut zip = zip::ZipWriter::new(&mut writer);
|
||||
|
||||
for sub_entry in walkdir::WalkDir::new(dir_path) {
|
||||
let sub_entry = sub_entry?;
|
||||
let path = sub_entry.path();
|
||||
let name = path.strip_prefix(dir_path)?;
|
||||
|
||||
if path.is_file() {
|
||||
zip.start_file(name.to_string_lossy(), options)?;
|
||||
let mut file = fs::File::open(path)?;
|
||||
let mut buffer = Vec::new();
|
||||
file.read_to_end(&mut buffer)?;
|
||||
zip.write_all(&buffer)?;
|
||||
} else if !name.as_os_str().is_empty() {
|
||||
zip.add_directory(name.to_string_lossy(), options)?;
|
||||
}
|
||||
}
|
||||
zip.finish()?;
|
||||
}
|
||||
|
||||
let zip_contents = writer.into_inner();
|
||||
Ok(zip_contents)
|
||||
}
|
||||
|
||||
fn build_and_zip_package(
|
||||
entry_path: PathBuf,
|
||||
parent_pkg_path: &str,
|
||||
skip_frontend: bool,
|
||||
features: &str,
|
||||
) -> anyhow::Result<(PathBuf, String, Vec<u8>)> {
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
rt.block_on(async {
|
||||
kit::build::execute(
|
||||
&entry_path,
|
||||
skip_frontend,
|
||||
false,
|
||||
true,
|
||||
features,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
vec![],
|
||||
vec![],
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{:?}", e))?;
|
||||
|
||||
let zip_contents = zip_directory(&Path::new(parent_pkg_path))?;
|
||||
let zip_filename = format!("{}.zip", entry_path.file_name().unwrap().to_str().unwrap());
|
||||
Ok((entry_path, zip_filename, zip_contents))
|
||||
})
|
||||
}
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
let matches = Command::new("build_packages")
|
||||
.about("Build the core Kinode packages.")
|
||||
.arg(
|
||||
Arg::new("FEATURES")
|
||||
.long("features")
|
||||
.help("List of features to compile packages with")
|
||||
.action(clap::ArgAction::Append),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("SKIP_FRONTEND")
|
||||
.long("skip-build-frontend")
|
||||
.help("Skip building the frontend")
|
||||
.action(clap::ArgAction::SetTrue),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("OUTPUT_FILENAME")
|
||||
.long("output-filename")
|
||||
.help("Set output filename (default: packages-{features}.zip)")
|
||||
.action(clap::ArgAction::Set),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
// kinode/target/debug/build_package
|
||||
let current_exe_dir = std::env::current_exe() // build_package
|
||||
.unwrap();
|
||||
let top_level_dir = current_exe_dir
|
||||
.parent() // debug/
|
||||
.unwrap()
|
||||
.parent() // target/
|
||||
.unwrap()
|
||||
.parent() // kinode/
|
||||
.unwrap();
|
||||
let kinode_dir = top_level_dir.join("kinode");
|
||||
let packages_dir = kinode_dir.join("packages");
|
||||
|
||||
let mut features = matches
|
||||
.get_many::<String>("FEATURES")
|
||||
.unwrap_or_default()
|
||||
.map(|s| s.to_owned())
|
||||
.collect::<Vec<String>>();
|
||||
features.sort();
|
||||
let features = features.join(",");
|
||||
|
||||
let skip_frontend = matches.get_flag("SKIP_FRONTEND");
|
||||
|
||||
let results: Vec<anyhow::Result<(PathBuf, String, Vec<u8>)>> = fs::read_dir(&packages_dir)?
|
||||
.filter_map(|entry| {
|
||||
let entry_path = match entry {
|
||||
Ok(e) => e.path(),
|
||||
Err(_) => return None,
|
||||
};
|
||||
let child_pkg_path = entry_path.join("pkg");
|
||||
if !child_pkg_path.exists() {
|
||||
// don't run on, e.g., `.DS_Store`
|
||||
return None;
|
||||
}
|
||||
Some(build_and_zip_package(
|
||||
entry_path.clone(),
|
||||
child_pkg_path.to_str().unwrap(),
|
||||
skip_frontend,
|
||||
&features,
|
||||
))
|
||||
})
|
||||
.collect();
|
||||
|
||||
let mut file_to_metadata = std::collections::HashMap::new();
|
||||
|
||||
let target_dir = top_level_dir.join("target");
|
||||
let target_packages_dir = target_dir.join("packages");
|
||||
// fresh
|
||||
if target_packages_dir.exists() {
|
||||
fs::remove_dir_all(&target_packages_dir)?;
|
||||
}
|
||||
fs::create_dir_all(&target_packages_dir)?;
|
||||
|
||||
for result in results {
|
||||
match result {
|
||||
Ok((entry_path, zip_filename, zip_contents)) => {
|
||||
let metadata_path = entry_path.join("metadata.json");
|
||||
let metadata_contents = fs::read_to_string(&metadata_path)?;
|
||||
let metadata_contents: serde_json::Value =
|
||||
serde_json::from_str(&metadata_contents)?;
|
||||
file_to_metadata.insert(zip_filename.clone(), metadata_contents);
|
||||
let zip_path = target_packages_dir.join(&zip_filename);
|
||||
fs::write(&zip_path, &zip_contents)?;
|
||||
}
|
||||
Err(e) => return Err(anyhow::anyhow!("{e:?}")),
|
||||
}
|
||||
}
|
||||
|
||||
let file_to_metadata = serde_json::to_value(&file_to_metadata)?;
|
||||
let file_to_metadata = serde_json::to_string_pretty(&file_to_metadata)?;
|
||||
let file_to_metadata_path = target_packages_dir.join("file_to_metadata.json");
|
||||
fs::write(&file_to_metadata_path, file_to_metadata)?;
|
||||
|
||||
let package_zip_file_name = match matches.get_one::<String>("OUTPUT_FILENAME") {
|
||||
Some(filename) => filename.to_string(),
|
||||
None => {
|
||||
if features.is_empty() {
|
||||
"packages.zip".to_string()
|
||||
} else {
|
||||
format!("packages-{features}.zip")
|
||||
}
|
||||
}
|
||||
};
|
||||
let package_zip_path = target_dir.join(package_zip_file_name);
|
||||
let package_zip_contents = zip_directory(&target_packages_dir)?;
|
||||
fs::write(package_zip_path, package_zip_contents)?;
|
||||
|
||||
Ok(())
|
||||
}
|
Loading…
Reference in New Issue
Block a user