Merge branch 'develop' into bp/kinomap

This commit is contained in:
bitful-pannul 2024-07-05 22:30:30 +03:00
commit 93ffc18123
63 changed files with 3749 additions and 3802 deletions

23
.github/workflows/build_release.yml vendored Normal file
View File

@ -0,0 +1,23 @@
name: rust tagged release in main CI
on:
push:
tags: ['v[0-9].[0-9]+.[0-9]+']
jobs:
deploy:
runs-on: ubuntu-latest
if: github.ref == 'refs/heads/main'
timeout-minutes: 60
steps:
- name: build and deploy kinode
uses: appleboy/ssh-action@master
with:
host: ${{ secrets.SSH_PROD_API_HOST }}
username: ${{ secrets.SSH_PROD_USER }}
key: ${{ secrets.SSH_PROD_API_ED25519KEY }}
port: ${{ secrets.SSH_PROD_PORT }}
command_timeout: 60m
script: |
curl -X PUT http://localhost:8000/monitor/build-kinode

View File

@ -13,11 +13,10 @@ jobs:
- name: build and deploy kinode
uses: appleboy/ssh-action@master
with:
host: ${{ secrets.SSH_HOST }}
host: ${{ secrets.SSH_API_HOST }}
username: ${{ secrets.SSH_USER }}
key: ${{ secrets.SSH_ED25519KEY }}
key: ${{ secrets.SSH_API_ED25519KEY }}
port: ${{ secrets.SSH_PORT }}
command_timeout: 60m
script: |
cd ~
./build-kinode.sh
curl -X PUT http://localhost:8000/monitor/build-kinode

1372
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
[package]
name = "kinode_lib"
authors = ["KinodeDAO"]
version = "0.8.0"
version = "0.8.2"
edition = "2021"
description = "A general-purpose sovereign cloud computing platform"
homepage = "https://kinode.org"

View File

@ -120,22 +120,39 @@ The `sys` publisher is not a real node ID, but it's also not a special case valu
- UpArrow/DownArrow or CTRL+P/CTRL+N to move up and down through command history
- CTRL+R to search history, CTRL+R again to toggle through search results, CTRL+G to cancel search
### Built-in terminal scripts
The terminal package contains a number of built-in scripts.
Users may also call scripts from other packages in the terminal by entering the (full) ID of the script process followed by any arguments.
In order to call a script with shorthand, a user may apply an *alias* using the terminal `alias` script, like so:
```
alias <shorthand> <full_name>
```
Subsequent use of the shorthand will then be interpolated as the process ID.
A list of the other terminal scripts included in this distro:
- `cat <vfs-file-path>`: print the contents of a file in the terminal.
- Example: `cat /terminal:sys/pkg/scripts.json`
- `echo <text>`: print text to the terminal.
- Example: `echo foo`
- `hi <name> <string>`: send a text message to another node's command line.
- Example: `hi ben.os hello world`
- `kfetch`: print system information a la neofetch. No arguments.
- `kill <process-id>`: terminate a running process. This will bypass any restart behavioruse judiciously.
- Example: `kill chess:chess:sys`
- `m <address> '<json>'`: send an inter-process message. <address> is formatted as <node>@<process_id>. <process_id> is formatted as <process_name>:<package_name>:<publisher_node>. JSON containing spaces must be wrapped in single-quotes (`''`).
- Example: `m our@eth:distro:sys "SetPublic" -a 5`
- the '-a' flag is used to expect a response with a given timeout
- `our` will always be interpolated by the system as your node's name
- `hi <name> <string>`: send a text message to another node's command line.
- Example: `hi ben.os hello world`
- `namehash_to_name <namehash>`: print the name of a node given its namehash, if we have it indexed. Namehashes are used in the onchain PKI data structure.
- Example: `namehash_to_name 0x46dc6209a66b3a0ef4b72f5d26c0e81c77c7ac146a62e96babf1224484b46fa9`
- `net_diagnostics`: print some useful networking diagnostic data.
- `peer <name>`: print the peer's PKI info, if it exists.
- `peers`: print the peers the node currently hold connections with.
- `top <process_id>`: display kernel debugging info about a process. Leave the process ID blank to display info about all processes and get the total number of running processes.
- Example: `top net:distro:sys`
- Example: `top`
- `cat <vfs-file-path>`: print the contents of a file in the terminal
- Example: `cat /terminal:sys/pkg/scripts.json`
- `echo <text>`: print `text` to the terminal
- Example: `echo foo`
- `net_diagnostics`: print some useful networking diagnostic data
- `peers`: print the peers the node currently hold connections with
- `peer <name>`: print the peer's PKI info, if it exists
## Running as a Docker container

View File

@ -1,7 +1,7 @@
[package]
name = "kinode"
authors = ["KinodeDAO"]
version = "0.8.0"
version = "0.8.2"
edition = "2021"
description = "A general-purpose sovereign cloud computing platform"
homepage = "https://kinode.org"
@ -14,9 +14,8 @@ path = "src/main.rs"
[build-dependencies]
anyhow = "1.0.71"
kit = { git = "https://github.com/kinode-dao/kit", rev = "d319c5b" }
kit = { git = "https://github.com/kinode-dao/kit", tag = "v0.6.2" }
rayon = "1.8.1"
sha2 = "0.10"
tokio = "1.28"
walkdir = "2.4"
zip = "0.6"
@ -26,7 +25,7 @@ simulation-mode = []
[dependencies]
aes-gcm = "0.10.3"
alloy = { git = "https://github.com/alloy-rs/alloy", rev = "05f8162", features = [
alloy = { version = "0.1.3", features = [
"consensus",
"contract",
"json-rpc",
@ -34,46 +33,35 @@ alloy = { git = "https://github.com/alloy-rs/alloy", rev = "05f8162", features =
"provider-ws",
"providers",
"pubsub",
"rpc-client-ws",
"rpc",
"rpc-client",
"rpc-types-eth",
"rpc-client-ws",
"rpc-types",
"signer-wallet",
"rpc-types-eth",
"signers",
"signer-local",
] }
alloy-primitives = "0.7.5"
alloy-sol-macro = "0.7.5"
alloy-sol-types = "0.7.5"
alloy-primitives = "0.7.6"
alloy-sol-macro = "0.7.6"
alloy-sol-types = "0.7.6"
anyhow = "1.0.71"
async-trait = "0.1.71"
base64 = "0.22.0"
bincode = "1.3.3"
blake3 = "1.4.1"
bytes = "1.4.0"
chacha20poly1305 = "0.10.1"
chrono = "0.4.31"
clap = { version = "4.4", features = ["derive"] }
crossterm = { version = "0.27.0", features = ["event-stream", "bracketed-paste"] }
curve25519-dalek = "^4.1.2"
dashmap = "5.5.3"
digest = "0.10"
elliptic-curve = { version = "0.13.8", features = ["ecdh"] }
flate2 = "1.0"
futures = "0.3"
generic-array = "1.0.0"
getrandom = "0.2.10"
generic-array = "0.14.7"
hex = "0.4.3"
hkdf = "0.12.3"
hmac = "0.12"
http = "1.1.0"
jwt = "0.16"
lib = { path = "../lib" }
lazy_static = "1.4.0"
log = "0.4.20"
nohash-hasher = "0.2.0"
num-traits = "0.2"
open = "5.0.0"
open = "5.1.4"
public-ip = "0.2.2"
rand = "0.8.4"
reqwest = "0.12.4"
@ -84,8 +72,7 @@ route-recognizer = "0.3.1"
rusqlite = { version = "0.31.0", features = ["bundled"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
serde_urlencoded = "0.7"
sha2 = "0.10"
sha2 = "0.10.8"
sha3 = "0.10.8"
# snow = { version = "0.9.5", features = ["ring-resolver"] }
# unfortunately need to use forked version for async use and in-place encryption
@ -96,7 +83,6 @@ thiserror = "1.0"
tokio = { version = "1.28", features = ["fs", "macros", "rt-multi-thread", "signal", "sync"] }
tokio-tungstenite = { version = "0.21.0", features = ["native-tls"] }
url = "2.4.1"
uuid = { version = "1.1.2", features = ["serde", "v4"] }
warp = "0.3.5"
wasi-common = "19.0.1"
wasmtime = "19.0.1"

View File

@ -1,4 +1,4 @@
use rayon::prelude::*;
use rayon::iter::{IntoParallelRefIterator, ParallelIterator};
use std::{
collections::HashSet,
fs::{self, File},
@ -59,7 +59,7 @@ fn build_and_zip_package(
) -> anyhow::Result<(String, String, Vec<u8>)> {
let rt = tokio::runtime::Runtime::new().unwrap();
rt.block_on(async {
kit::build::execute(&entry_path, true, false, true, features, None, None) // TODO
kit::build::execute(&entry_path, true, false, true, features, None, None, true)
.await
.map_err(|e| anyhow::anyhow!("{:?}", e))?;

View File

@ -60,20 +60,79 @@ pub fn init_frontend(our: &Address) {
fn make_widget() -> String {
return r#"<html>
<head>
<script src="https://cdn.tailwindcss.com"></script>
<meta name="viewport" content="width=device-width, initial-scale=1">
<style>
* {
box-sizing: border-box;
margin: 0;
padding: 0;
}
a {
text-decoration: none;
color: inherit;
}
body {
color: white;
overflow: hidden;
}
#latest-apps {
display: flex;
flex-wrap: wrap;
padding: 0.5rem;
gap: 0.5rem;
align-items: center;
backdrop-filter: saturate(1.25);
border-radius: 1rem;
box-shadow: 0 10px 15px -3px rgba(0, 0, 0, 0.1), 0 4px 6px -2px rgba(0, 0, 0, 0.05);
height: 100vh;
width: 100vw;
overflow-y: auto;
scrollbar-color: transparent transparent;
scrollbar-width: none;
}
.app {
padding: 0.5rem;
display: flex;
flex-grow: 1;
align-items: stretch;
border-radius: 0.5rem;
box-shadow: 0 1px 2px 0 rgba(0, 0, 0, 0.05);
background-color: rgba(255, 255, 255, 0.1);
cursor: pointer;
font-family: sans-serif;
width: 100%;
}
.app:hover {
background-color: rgba(255, 255, 255, 0.2);
}
.app-image {
background-size: cover;
border-radius: 0.75rem;
margin-right: 0.5rem;
flex-grow: 1;
background-size: contain;
background-repeat: no-repeat;
background-position: center;
height: 92px;
width: 92px;
max-width: 33%;
}
.app-info {
max-width: 67%
display: flex;
flex-direction: column;
flex-grow: 1;
max-width: 67%;
}
.app-info h2 {
font-weight: bold;
font-size: medium;
}
@media screen and (min-width: 500px) {
@ -84,15 +143,7 @@ fn make_widget() -> String {
</style>
</head>
<body class="text-white overflow-hidden">
<div
id="latest-apps"
class="flex flex-wrap p-2 gap-2 items-center backdrop-brightness-125 rounded-xl shadow-lg h-screen w-screen overflow-y-auto"
style="
scrollbar-color: transparent transparent;
scrollbar-width: none;
"
>
</div>
<div id="latest-apps"></div>
<script>
document.addEventListener('DOMContentLoaded', function() {
fetch('/main:app_store:sys/apps/listed', { credentials: 'include' })
@ -102,22 +153,19 @@ fn make_widget() -> String {
data.forEach(app => {
if (app.metadata) {
const a = document.createElement('a');
a.className = 'app p-2 grow flex items-stretch rounded-lg shadow bg-white/10 hover:bg-white/20 font-sans cursor-pointer';
a.className = 'app';
a.href = `/main:app_store:sys/app-details/${app.package}:${app.publisher}`
a.target = '_blank';
a.rel = 'noopener noreferrer';
const iconLetter = app.metadata_hash.replace('0x', '')[0].toUpperCase();
a.innerHTML = `<div
class="app-image rounded mr-2 grow"
class="app-image"
style="
background-image: url('${app.metadata.image || `/icons/${iconLetter}`}');
height: 92px;
width: 92px;
max-width: 33%;
"
></div>
<div class="app-info flex flex-col grow">
<h2 class="font-bold">${app.metadata.name}</h2>
<div class="app-info">
<h2>${app.metadata.name}</h2>
<p>${app.metadata.description}</p>
</div>`;
container.appendChild(a);

File diff suppressed because one or more lines are too long

View File

@ -37,12 +37,13 @@ export default function AppEntry({ app, size = "medium", overrideImageSize, show
}}
>
<AppHeader app={app} size={size} overrideImageSize={overrideImageSize} />
<div className={classNames("flex items-center", {
<div className={classNames("flex", {
'items-center': size !== 'large',
'items-start': size === 'large',
'absolute': size !== 'large',
'top-2 right-2': size !== 'large' && showMoreActions,
'top-0 right-0': size !== 'large' && !showMoreActions,
'ml-auto': size === 'large' && isMobile,
'min-w-1/5': size === 'large'
})}>
<ActionButton
app={app}
@ -54,7 +55,13 @@ export default function AppEntry({ app, size = "medium", overrideImageSize, show
'w-full': size === 'large'
})}
/>
{showMoreActions && <MoreActions app={app} className="self-stretch" />}
{showMoreActions && <MoreActions
app={app}
className={classNames("self-stretch", {
'self-start': size === 'large',
})}
/>}
</div>
</div>
);

View File

@ -13,8 +13,8 @@ export default function Dropdown({ ...props }: DropdownProps) {
unmountOnClose={true}
className={classNames("relative", props.className)}
direction='left'
menuButton={<MenuButton className="small">
<FaEllipsisH className='-mb-1' />
menuButton={<MenuButton>
<FaEllipsisH className='mb-[3px]' />
</MenuButton>}
>
{props.children}

View File

@ -16,7 +16,7 @@ export default function InstallButton({ app, isIcon = false, ...props }: Install
const { installApp, getCaps, getMyApp, getMyApps } =
useAppsStore();
const [showModal, setShowModal] = useState(false);
const [caps, setCaps] = useState<string[]>([]);
const [caps, setCaps] = useState<any[]>([]);
const [installing, setInstalling] = useState("");
const onClick = useCallback(async (e: React.MouseEvent<HTMLButtonElement>) => {
@ -83,7 +83,7 @@ export default function InstallButton({ app, isIcon = false, ...props }: Install
</h5>
<ul className="flex flex-col items-start">
{caps.map((cap) => (
<li key={cap}>{cap}</li>
<li>{JSON.stringify(cap)}</li>
))}
</ul>
<button type="button" onClick={install}>

View File

@ -1,6 +1,7 @@
import classNames from 'classnames'
import React, { MouseEvent } from 'react'
import { FaX } from 'react-icons/fa6'
import { isMobileCheck } from '../utils/dimensions'
export interface ModalProps extends React.HTMLAttributes<HTMLDivElement> {
show: boolean
@ -25,10 +26,17 @@ const Modal: React.FC<ModalProps> = ({
return null
}
const isMobile = isMobileCheck()
return (
<div
className={classNames(`bg-black/25 backdrop-blur-lg fixed top-0 bottom-0 left-0 right-0 flex flex-col c z-30 min-h-[10em] min-w-[30em]`,
{ show }
className={classNames(`bg-black/25 backdrop-blur-lg fixed top-0 bottom-0 left-0 right-0 flex flex-col c z-30 min-h-[10em] isMobile-${isMobile}`,
{
isMobile,
show,
'min-w-[30em]': !isMobile,
'min-w-[75vw]': isMobile,
}
)}
onClick={hide}
>

View File

@ -17,13 +17,15 @@ export default function MoreActions({ app, className }: MoreActionsProps) {
const navigate = useNavigate();
const downloaded = Boolean(app.state);
const menuClass = "flex flex-col bg-black p-2 rounded-lg relative z-10 border border-orange -mr-[1px]"
if (!downloaded) {
if (!app.metadata) return <></>;
return (
<Dropdown className={className}>
<div className="flex flex-col backdrop-blur-lg bg-black/10 p-2 rounded-lg relative z-10">
<div className={menuClass}>
{app.metadata?.description && (
<button
className="my-1 whitespace-nowrap clear"
@ -48,7 +50,7 @@ export default function MoreActions({ app, className }: MoreActionsProps) {
return (
<Dropdown className={className}>
<div className="flex flex-col p-2 rounded-lg backdrop-blur-lg relative z-10">
<div className={menuClass}>
<button
className="my-1 whitespace-nowrap clear"
onClick={() => navigate(`/${APP_DETAILS_PATH}/${appId(app)}`)}

View File

@ -99,8 +99,8 @@ export default function SearchHeader({
})}
onClick={() => (isMyAppsPage ? navigate(-1) : navigate(MY_APPS_PATH))}
>
{!isMobile && <span>My Apps</span>}
<FaDownload />
<span>My Apps</span>
{!isMobile && <FaDownload />}
</button>
</div>
);

View File

@ -5,7 +5,7 @@ import Modal from "./Modal";
import { getAppName } from "../utils/app";
import Loader from "./Loader";
import classNames from "classnames";
import { FaU } from "react-icons/fa6";
import { FaExclamation } from "react-icons/fa6";
interface UpdateButtonProps extends React.HTMLAttributes<HTMLButtonElement> {
app: AppInfo;
@ -61,7 +61,7 @@ export default function UpdateButton({ app, isIcon = false, ...props }: UpdateBu
})}
onClick={onClick}
>
{isIcon ? <FaU /> : 'Update'}
{isIcon ? <FaExclamation /> : 'Update'}
</button>
<Modal show={showModal} hide={() => setShowModal(false)}>
{loading ? (

View File

@ -62,11 +62,11 @@ export default function MyAppsPage() { // eslint-disable-line
'gap-4 max-w-screen': isMobile,
'gap-8 max-w-[900px]': !isMobile,
})}>
<HomeButton />
{!isMobile && <HomeButton />}
<SearchHeader value={searchQuery} onChange={searchMyApps} />
<div className="flex justify-between items-center mt-2">
<h3>My Packages</h3>
<button onClick={() => navigate(PUBLISH_PATH)}>
<button className="alt" onClick={() => navigate(PUBLISH_PATH)}>
<FaUpload className="mr-2" />
Publish Package
</button>

View File

@ -10,6 +10,8 @@ import classNames from 'classnames';
import { FaArrowRotateRight } from "react-icons/fa6";
import { isMobileCheck } from "../utils/dimensions";
import HomeButton from "../components/HomeButton";
import Modal from "../components/Modal";
import Loader from "../components/Loader";
export default function StorePage() {
@ -22,6 +24,7 @@ export default function StorePage() {
const [page, setPage] = useState(1);
const [tags, setTags] = useState<string[]>([])
const [launchPaths, setLaunchPaths] = useState<{ [package_name: string]: string }>({})
const [isRebuildingIndex, setIsRebuildingIndex] = useState(false);
const pages = useMemo(
() =>
@ -95,12 +98,18 @@ export default function StorePage() {
);
const tryRebuildIndex = useCallback(async () => {
if (!window.confirm('Are you sure you want to rebuild the app index? This may take a few seconds.')) {
return;
}
setIsRebuildingIndex(true);
try {
await rebuildIndex();
alert("Index rebuilt successfully.");
await getListedApps();
} catch (error) {
console.error(error);
} finally {
setIsRebuildingIndex(false);
}
}, [rebuildIndex]);
@ -125,6 +134,7 @@ export default function StorePage() {
return (
<div className={classNames("flex flex-col w-full max-h-screen p-2", {
isMobile,
'gap-4 max-w-screen': isMobile,
'gap-6 max-w-[900px]': !isMobile
})}>
@ -172,41 +182,44 @@ export default function StorePage() {
</div>
{!searchQuery && <div className={classNames("flex flex-col", {
'gap-4': !isMobile,
'grow overflow-y-auto gap-2 items-center px-2': isMobile
'gap-2 items-center': isMobile
})}>
<h2>Featured Apps</h2>
<div className={classNames("flex gap-2", {
'flex-col': isMobile
'flex-wrap': isMobile
})}>
{listedApps.filter(app => {
return featuredPackageNames.indexOf(app.package) !== -1
}).map((app) => (
<AppEntry
key={appId(app) + (app.state?.our_version || "")}
size={'medium'}
size={isMobile ? 'small' : 'medium'}
app={app}
launchPath={launchPaths[app.package]}
className={classNames("grow", {
'w-1/4': !isMobile,
'w-full': isMobile
'w-1/3': isMobile
})}
/>
))}
</div>
</div>}
<h2>{searchQuery ? 'Search Results' : 'All Apps'}</h2>
<div className={classNames("flex flex-col grow overflow-y-auto", {
<h2 className={classNames({
'text-center': isMobile
})}>{searchQuery ? 'Search Results' : 'All Apps'}</h2>
<div className={classNames("flex flex-col grow", {
'gap-2': isMobile,
'gap-4': !isMobile,
'gap-4 overflow-y-auto': !isMobile,
})}>
{displayedApps
.filter(app => searchQuery ? true : featuredPackageNames.indexOf(app.package) === -1)
.map(app => <AppEntry
key={appId(app) + (app.state?.our_version || "")}
size='large'
size={isMobile ? 'medium' : 'large'}
app={app}
className="self-stretch"
overrideImageSize="medium"
showMoreActions={!isMobile}
/>)}
</div>
{pages.length > 1 && <div className="flex flex-wrap self-center gap-2">
@ -232,6 +245,9 @@ export default function StorePage() {
<FaChevronRight />
</button>
</div>}
<Modal title="Rebuilding index..." show={isRebuildingIndex} hide={() => { }}>
<Loader msg="This may take a few seconds." />
</Modal>
</div>
);
}

View File

@ -56,8 +56,8 @@ export interface PackageManifest {
process_wasm_path: string
on_exit: string
request_networking: boolean
request_capabilities: string[]
grant_capabilities: string[]
request_capabilities: any[]
grant_capabilities: any[]
public: boolean
}

View File

@ -88,6 +88,11 @@ export default defineConfig({
target: PROXY_URL,
changeOrigin: true,
},
'/api/*': {
target: PROXY_URL,
changeOrigin: true,
rewrite: (path) => path.replace('/api', ''),
},
// '/example': {
// target: PROXY_URL,
// changeOrigin: true,

View File

@ -222,13 +222,8 @@ checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c"
[[package]]
name = "kinode_process_lib"
<<<<<<< HEAD:modules/chess/chess/Cargo.lock
version = "0.5.7"
source = "git+https://github.com/kinode-dao/process_lib?tag=v0.5.9-alpha#c1ac7227951fbd8cabf6568704f0ce11e8558c8a"
=======
version = "0.5.6"
source = "git+https://github.com/kinode-dao/process_lib?rev=fccb6a0#fccb6a0c07ebda3e385bff7f76e4984b741f01c7"
>>>>>>> develop:kinode/packages/chess/chess/Cargo.lock
dependencies = [
"anyhow",
"bincode",

View File

@ -49,10 +49,14 @@ fn load_chess_state() -> ChessState {
games,
clients: HashSet::new(),
},
None => ChessState {
games: HashMap::new(),
clients: HashSet::new(),
},
None => {
let state = ChessState {
games: HashMap::new(),
clients: HashSet::new(),
};
save_chess_state(&state);
state
}
}
}

View File

@ -9,7 +9,7 @@
<meta httpEquiv="X-UA-Compatible" content="IE=edge" />
<meta name="viewport"
content="width=device-width, initial-scale=1, minimum-scale=1, maximum-scale=1.00001, viewport-fit=cover" />
<script type="module" crossorigin src="/assets/index-emIdaXB4.js"></script>
<script type="module" crossorigin src="/assets/index-BYvZ8GK9.js"></script>
<link rel="stylesheet" crossorigin href="/assets/index-BS5LP50I.css">
</head>

View File

@ -9,7 +9,7 @@
<meta httpEquiv="X-UA-Compatible" content="IE=edge" />
<meta name="viewport"
content="width=device-width, initial-scale=1, minimum-scale=1, maximum-scale=1.00001, viewport-fit=cover" />
<script type="module" crossorigin src="/assets/index-emIdaXB4.js"></script>
<script type="module" crossorigin src="/assets/index-BYvZ8GK9.js"></script>
<link rel="stylesheet" crossorigin href="/assets/index-BS5LP50I.css">
</head>

View File

@ -38,7 +38,7 @@ const AppDisplay: React.FC<AppDisplayProps> = ({ app }) => {
size={'small'}
className="h-16 w-16"
/>}
<h6>{app?.label}</h6>
<h6>{app?.label || app?.package_name}</h6>
{app?.path && isHovered && <button
className="absolute p-2 -top-2 -right-2 clear text-sm"
onClick={(e) => {

View File

@ -1,6 +1,5 @@
import classNames from "classnames"
import { FaEye, FaEyeSlash } from "react-icons/fa6"
import { useState } from "react"
import { useEffect, useState } from "react"
import usePersistentStore from "../store/persistentStore"
import useHomepageStore from "../store/homepageStore"
import { isMobileCheck } from "../utils/dimensions"
@ -13,19 +12,23 @@ interface WidgetProps {
const Widget: React.FC<WidgetProps> = ({ package_name, widget, forceLarge }) => {
const { apps } = useHomepageStore()
const { widgetSettings, toggleWidgetVisibility } = usePersistentStore()
const [isHovered, setIsHovered] = useState(false)
const { widgetSettings } = usePersistentStore()
const isMobile = isMobileCheck()
const isLarge = forceLarge || widgetSettings[package_name]?.size === "large"
const isSmall = !widgetSettings[package_name]?.size || widgetSettings[package_name]?.size === "small"
const [tallScreen, setTallScreen] = useState(window.innerHeight > window.innerWidth)
useEffect(() => {
setTallScreen(window.innerHeight > window.innerWidth)
}, [window.innerHeight, window.innerWidth])
return <div
className={classNames("self-stretch flex-col-center shadow-lg rounded-lg relative", {
"max-w-1/2 min-w-1/2": isLarge && !isMobile,
"max-w-1/4 min-w-1/4": isSmall && !isMobile,
"min-w-1/4": isSmall && !isMobile,
"max-w-1/4": isSmall && !tallScreen,
'w-full': isMobile
})}
onMouseEnter={() => setIsHovered(true)}
onMouseLeave={() => setIsHovered(false)}
>
<h6 className="flex-center my-2">
{apps.find(app => app.package_name === package_name)?.label || package_name}
@ -35,12 +38,6 @@ const Widget: React.FC<WidgetProps> = ({ package_name, widget, forceLarge }) =>
className="grow self-stretch"
data-widget-code={widget}
/>
{isHovered && <button
className="absolute top-0 left-0 icon"
onClick={() => toggleWidgetVisibility(package_name)}
>
{widgetSettings[package_name]?.hide ? <FaEye /> : <FaEyeSlash />}
</button>}
</div>
}

View File

@ -33,7 +33,7 @@ fn init(_our: Address) {
serde_json::json!({
"Add": {
"label": "KinoUpdates",
"widget": create_widget(fetch_most_recent_blog_posts(6)),
"widget": create_widget(fetch_most_recent_blog_posts(12)),
}
})
.to_string(),
@ -49,41 +49,87 @@ fn create_widget(posts: Vec<KinodeBlogPost>) -> String {
return format!(
r#"<html>
<head>
<script src="https://cdn.tailwindcss.com"></script>
<meta name="viewport" content="width=device-width, initial-scale=1">
<style>
* {{
box-sizing: border-box;
margin: 0;
padding: 0;
}}
a {{
text-decoration: none;
color: inherit;
}}
h2 {{
font-size: medium;
}}
body {{
color: white;
overflow: hidden;
height: 100vh;
width: 100vw;
display: flex;
flex-direction: column;
gap: 0.5rem;
font-family: sans-serif;
}}
#latest-blog-posts {{
display: flex;
flex-direction: column;
padding: 0.5rem;
gap: 0.5rem;
backdrop-filter: brightness(1.25);
border-radius: 0.75rem;
box-shadow: 0 10px 15px -3px rgba(0, 0, 0, 0.1), 0 4px 6px -2px rgba(0, 0, 0, 0.05);
height: 100vh;
width: 100vw;
overflow-y: auto;
scrollbar-color: transparent transparent;
scrollbar-width: none;
align-self: stretch;
}}
.post {{
width: 100%;
display: flex;
gap: 8px;
background-color: rgba(255, 255, 255, 0.1);
border-radius: 0.5em;
padding: 0.5em;
}}
.post-image {{
background-size: cover;
background-repeat: no-repeat;
background-position: center;
width: 100px;
height: 100px;
border-radius: 4px;
}}
.post-info {{
max-width: 67%;
overflow: hidden;
}}
@media screen and (min-width: 500px) {{
.post {{
width: 100%;
}}
.post-image {{
background-size: cover;
background-repeat: no-repeat;
background-position: center;
width: 100px;
height: 100px;
border-radius: 16px;
}}
.post-info {{
max-width: 67%
}}
@media screen and (min-width: 500px) {{
.post {{
width: 49%;
}}
width: 49%;
}}
}}
</style>
</head>
<body class="text-white overflow-hidden h-screen w-screen flex flex-col gap-2">
<body class="text-white overflow-hidden">
<div
id="latest-blog-posts"
class="flex flex-col p-2 gap-2 backdrop-brightness-125 rounded-xl shadow-lg h-screen w-screen overflow-y-auto self-stretch"
style="
scrollbar-color: transparent transparent;
scrollbar-width: none;
"
>
">
{}
</div>
</body>
@ -105,7 +151,10 @@ fn fetch_most_recent_blog_posts(n: usize) -> Vec<KinodeBlogPost> {
) {
Ok(response) => serde_json::from_slice::<Vec<KinodeBlogPost>>(response.body())
.expect("Invalid UTF-8 from kinode.org"),
Err(e) => panic!("Failed to fetch blog posts: {:?}", e),
Err(e) => {
println!("Failed to fetch blog posts: {e:?}");
vec![]
}
};
blog_posts.into_iter().rev().take(n as usize).collect()
@ -123,17 +172,17 @@ fn trim_content(content: &str) -> String {
fn post_to_html_string(post: KinodeBlogPost) -> String {
format!(
r#"<a
class="post p-2 grow self-stretch flex items-stretch rounded-lg shadow bg-white/10 hover:bg-white/20 font-sans w-full"
r#"<a
class="post"
href="https://kinode.org/blog/post/{}"
target="_blank"
target="_blank"
rel="noopener noreferrer"
>
<div
class="post-image rounded mr-2 grow self-stretch h-full"
style="background-image: url('https://kinode.org{}');"
class="post-image"
style="background-image: url('https://kinode.org{}-thumbnail');"
></div>
<div class="post-info flex flex-col grow">
<div class="post-info">
<h2 class="font-bold">{}</h2>
<p>{}</p>
</div>

View File

@ -222,13 +222,8 @@ checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c"
[[package]]
name = "kinode_process_lib"
<<<<<<< HEAD:modules/chess/chess/Cargo.lock
version = "0.5.7"
source = "git+https://github.com/kinode-dao/process_lib?tag=v0.5.9-alpha#c1ac7227951fbd8cabf6568704f0ce11e8558c8a"
=======
version = "0.5.6"
source = "git+https://github.com/kinode-dao/process_lib?rev=fccb6a0#fccb6a0c07ebda3e385bff7f76e4984b741f01c7"
>>>>>>> develop:kinode/packages/chess/chess/Cargo.lock
dependencies = [
"anyhow",
"bincode",

View File

@ -8,7 +8,7 @@ simulation-mode = []
[dependencies]
anyhow = "1.0"
clap = "4.4.18"
clap = "4.4"
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.8.0" }
regex = "1.10.3"
serde = { version = "1.0", features = ["derive"] }

View File

@ -2,8 +2,8 @@ use anyhow::anyhow;
use kinode_process_lib::kernel_types as kt;
use kinode_process_lib::kinode::process::standard as wit;
use kinode_process_lib::{
call_init, get_blob, get_typed_state, our_capabilities, print_to_terminal, println, set_state,
vfs, Address, Capability, ProcessId, Request,
call_init, get_blob, get_typed_state, our_capabilities, println, set_state, vfs, Address,
Capability, ProcessId, Request,
};
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet};
@ -42,69 +42,70 @@ fn parse_command(state: &mut TerminalState, line: &str) -> anyhow::Result<()> {
},
};
match handle_run(&state.our, &process, args.to_string()) {
Ok(_) => Ok(()), // TODO clean up process
Err(e) => Err(anyhow!("failed to instantiate script: {}", e)),
}
handle_run(&state.our, &process, args.to_string())
}
call_init!(init);
fn init(our: Address) {
let mut state: TerminalState = match get_typed_state(|bytes| Ok(bincode::deserialize(bytes)?)) {
Some(s) => s,
None => TerminalState {
our,
aliases: HashMap::from([
(
"alias".to_string(),
ProcessId::new(Some("alias"), "terminal", "sys"),
),
(
"cat".to_string(),
ProcessId::new(Some("cat"), "terminal", "sys"),
),
(
"echo".to_string(),
ProcessId::new(Some("echo"), "terminal", "sys"),
),
(
"hi".to_string(),
ProcessId::new(Some("hi"), "terminal", "sys"),
),
(
"kill".to_string(),
ProcessId::new(Some("kill"), "terminal", "sys"),
),
(
"kfetch".to_string(),
ProcessId::new(Some("kfetch"), "terminal", "sys"),
),
(
"m".to_string(),
ProcessId::new(Some("m"), "terminal", "sys"),
),
(
"namehash_to_name".to_string(),
ProcessId::new(Some("namehash_to_name"), "terminal", "sys"),
),
(
"net_diagnostics".to_string(),
ProcessId::new(Some("net_diagnostics"), "terminal", "sys"),
),
(
"peer".to_string(),
ProcessId::new(Some("peer"), "terminal", "sys"),
),
(
"peers".to_string(),
ProcessId::new(Some("peers"), "terminal", "sys"),
),
(
"top".to_string(),
ProcessId::new(Some("top"), "terminal", "sys"),
),
]),
},
None => {
let state = TerminalState {
our,
aliases: HashMap::from([
(
"alias".to_string(),
ProcessId::new(Some("alias"), "terminal", "sys"),
),
(
"cat".to_string(),
ProcessId::new(Some("cat"), "terminal", "sys"),
),
(
"echo".to_string(),
ProcessId::new(Some("echo"), "terminal", "sys"),
),
(
"hi".to_string(),
ProcessId::new(Some("hi"), "terminal", "sys"),
),
(
"kill".to_string(),
ProcessId::new(Some("kill"), "terminal", "sys"),
),
(
"kfetch".to_string(),
ProcessId::new(Some("kfetch"), "terminal", "sys"),
),
(
"m".to_string(),
ProcessId::new(Some("m"), "terminal", "sys"),
),
(
"namehash_to_name".to_string(),
ProcessId::new(Some("namehash_to_name"), "terminal", "sys"),
),
(
"net_diagnostics".to_string(),
ProcessId::new(Some("net_diagnostics"), "terminal", "sys"),
),
(
"peer".to_string(),
ProcessId::new(Some("peer"), "terminal", "sys"),
),
(
"peers".to_string(),
ProcessId::new(Some("peers"), "terminal", "sys"),
),
(
"top".to_string(),
ProcessId::new(Some("top"), "terminal", "sys"),
),
]),
};
set_state(&bincode::serialize(&state).unwrap());
state
}
};
loop {
@ -126,7 +127,7 @@ fn init(our: Address) {
// checks for a request from a terminal script (different process, same package)
} else if state.our.node == source.node && state.our.package() == source.package() {
let Ok(action) = serde_json::from_slice::<TerminalAction>(&body) else {
println!("failed to parse action from: {}", source);
println!("failed to parse action from {source}");
continue;
};
match action {
@ -138,7 +139,7 @@ fn init(our: Address) {
}
}
} else {
println!("ignoring message from: {}", source);
println!("ignoring message from {source}");
continue;
}
}
@ -154,26 +155,16 @@ fn init(our: Address) {
}
fn handle_run(our: &Address, process: &ProcessId, args: String) -> anyhow::Result<()> {
let wasm_path = format!("{}.wasm", process.process());
let package = format!("{}:{}", process.package(), process.publisher());
let drive_path = format!("/{}/pkg", package);
let drive_path = format!("/{}:{}/pkg", process.package(), process.publisher());
let Ok(entry) = get_entry(process) else {
return Err(anyhow::anyhow!("script not in scripts.json file"));
};
let wasm_path = if wasm_path.starts_with("/") {
wasm_path
} else {
format!("/{}", wasm_path)
};
let wasm_path = format!("{}{}", drive_path, wasm_path);
// build initial caps
let process_id = format!("{}:{}", rand::random::<u64>(), package); // all scripts are given random process IDs
let Ok(parsed_new_process_id) = process_id.parse::<ProcessId>() else {
return Err(anyhow::anyhow!("invalid process id!"));
};
let wasm_path = format!("{drive_path}/{}.wasm", process.process());
let _bytes_response = Request::new()
.target(("our", "vfs", "distro", "sys"))
// all scripts are given random process IDs
let process_id = ProcessId::new(None, process.package(), process.publisher());
Request::to(("our", "vfs", "distro", "sys"))
.body(serde_json::to_vec(&vfs::VfsRequest {
path: wasm_path.clone(),
action: vfs::VfsAction::Read,
@ -191,7 +182,7 @@ fn handle_run(our: &Address, process: &ProcessId, args: String) -> anyhow::Resul
Capability {
issuer: Address {
node: our.node.clone(),
process: parsed_new_process_id.clone(),
process: process_id.clone(),
},
params: "\"messaging\"".into(),
},
@ -211,7 +202,7 @@ fn handle_run(our: &Address, process: &ProcessId, args: String) -> anyhow::Resul
Capability {
issuer: Address {
node: our.node.clone(),
process: parsed_new_process_id.clone(),
process: process_id.clone(),
},
params: params.to_string(),
},
@ -227,8 +218,7 @@ fn handle_run(our: &Address, process: &ProcessId, args: String) -> anyhow::Resul
}
}
for (process, cap) in granted_caps.into_iter() {
Request::new()
.target(("our", "kernel", "distro", "sys"))
Request::to(("our", "kernel", "distro", "sys"))
.body(serde_json::to_vec(&kt::KernelCommand::GrantCapabilities {
target: process,
capabilities: vec![kt::de_wit_capability(cap)],
@ -237,10 +227,9 @@ fn handle_run(our: &Address, process: &ProcessId, args: String) -> anyhow::Resul
}
// inherits the blob from the previous request, `_bytes_response`,
// containing the wasm byte code of the process
Request::new()
.target(("our", "kernel", "distro", "sys"))
Request::to(("our", "kernel", "distro", "sys"))
.body(serde_json::to_vec(&kt::KernelCommand::InitializeProcess {
id: parsed_new_process_id.clone(),
id: process_id.clone(),
wasm_bytes_handle: wasm_path.clone(),
wit_version: entry.wit_version,
on_exit: kt::OnExit::None,
@ -305,42 +294,20 @@ fn handle_run(our: &Address, process: &ProcessId, args: String) -> anyhow::Resul
requested_caps.push(kt::de_wit_capability(cap.clone()));
}
}
print_to_terminal(
3,
&format!(
"{}: Process {{\n wasm_bytes_handle: {},\n on_exit: {:?},\n public: {}\n capabilities: {}\n}}",
parsed_new_process_id.clone(),
wasm_path.clone(),
kt::OnExit::None,
entry.public,
{
let mut caps_string = "[".to_string();
for cap in requested_caps.iter() {
caps_string += &format!("\n {}({})", cap.issuer.to_string(), cap.params);
}
caps_string + "\n ]"
},
),
);
Request::new()
.target(("our", "kernel", "distro", "sys"))
Request::to(("our", "kernel", "distro", "sys"))
.body(serde_json::to_vec(&kt::KernelCommand::GrantCapabilities {
target: parsed_new_process_id.clone(),
target: process_id.clone(),
capabilities: requested_caps,
})?)
.send()?;
let _ = Request::new()
.target(("our", "kernel", "distro", "sys"))
Request::to(("our", "kernel", "distro", "sys"))
.body(serde_json::to_vec(&kt::KernelCommand::RunProcess(
parsed_new_process_id.clone(),
process_id.clone(),
))?)
.send_and_await_response(5)??;
let req = Request::new()
.target(("our", parsed_new_process_id))
.body(args.into_bytes());
req.send().unwrap();
Request::to(("our", process_id))
.body(args.into_bytes())
.send()?;
Ok(())
}
@ -351,20 +318,15 @@ fn handle_alias_change(
) -> anyhow::Result<()> {
match process {
Some(process) => {
// first check to make sure the script is actually a script
let Ok(_) = get_entry(&process) else {
return Err(anyhow!("process {} not found", process));
};
state.aliases.insert(alias.clone(), process.clone());
println!("alias {} set to {}", alias, process);
println!("alias {alias} set for {process}");
state.aliases.insert(alias, process);
}
None => {
if state.aliases.contains_key(&alias) {
state.aliases.remove(&alias);
println!("alias {} removed", alias);
println!("alias {alias} removed");
} else {
println!("alias {} not found", alias);
println!("alias {alias} not found");
}
}
}
@ -374,10 +336,9 @@ fn handle_alias_change(
fn get_entry(process: &ProcessId) -> anyhow::Result<kt::DotScriptsEntry> {
let drive_path = format!("/{}:{}/pkg", process.package(), process.publisher());
Request::new()
.target(("our", "vfs", "distro", "sys"))
Request::to(("our", "vfs", "distro", "sys"))
.body(serde_json::to_vec(&vfs::VfsRequest {
path: format!("{}/scripts.json", drive_path),
path: format!("{drive_path}/scripts.json"),
action: vfs::VfsAction::Read,
})?)
.send_and_await_response(5)??;

View File

@ -8,6 +8,7 @@ simulation-mode = []
[dependencies]
anyhow = "1.0"
clap = "4.4"
kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.8.0" }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"

View File

@ -1,3 +1,4 @@
use clap::{Arg, Command};
use kinode_process_lib::kernel_types::{
KernelCommand, KernelPrint, KernelPrintResponse, KernelResponse, PersistedProcess,
};
@ -12,31 +13,47 @@ wit_bindgen::generate!({
call_init!(init);
fn init(_our: Address) {
let Ok(args) = await_next_message_body() else {
let Ok(body) = await_next_message_body() else {
println!("failed to get args");
return;
};
let body_string = format!("top {}", String::from_utf8(body).unwrap());
let Ok(proc_id) = String::from_utf8(args) else {
println!("failed to stringify arguments");
let Ok(parsed) = Command::new("top")
.disable_help_flag(true)
.arg(Arg::new("target").index(1))
.arg(
Arg::new("show-caps")
.short('c')
.long("show-caps")
.action(clap::ArgAction::SetTrue),
)
.try_get_matches_from(body_string.split_whitespace())
else {
println!("failed to parse args");
return;
};
let target = parsed
.get_one::<String>("target")
.map(|s| s.parse::<ProcessId>());
let show_caps = parsed.get_flag("show-caps");
let Ok(Message::Response { body, .. }) = Request::new()
.target(("our", "kernel", "distro", "sys"))
.body(if proc_id.is_empty() {
serde_json::to_vec(&KernelCommand::Debug(KernelPrint::ProcessMap)).unwrap()
} else {
match proc_id.parse::<ProcessId>() {
.body(if let Some(target) = &target {
match target {
Ok(proc_id) => {
serde_json::to_vec(&KernelCommand::Debug(KernelPrint::Process(proc_id)))
serde_json::to_vec(&KernelCommand::Debug(KernelPrint::Process(proc_id.clone())))
.unwrap()
}
Err(_) => {
println!("invalid process id");
Err(e) => {
println!("invalid process id: {e}");
return;
}
}
} else {
serde_json::to_vec(&KernelCommand::Debug(KernelPrint::ProcessMap)).unwrap()
})
.send_and_await_response(60)
.unwrap()
@ -56,18 +73,26 @@ fn init(_our: Address) {
let len = process_map.len();
let printout = process_map
.iter()
.map(|(proc_id, process)| print_process(proc_id, process))
.map(|(proc_id, process)| print_process(proc_id, process, show_caps))
.collect::<Vec<_>>()
.join("\r\n");
println!("\r\n{printout}\r\n\r\ntop: {len} running processes");
}
KernelPrintResponse::Process(process) => match process {
None => {
println!("process {} not running", proc_id);
println!(
"process {} not running",
target.map_or("(all)".to_string(), |t| t
.map(|t| t.to_string())
.unwrap_or_default())
);
return;
}
Some(process) => {
println!("{}", print_process(&proc_id.parse().unwrap(), &process));
println!(
"{}",
print_process(&target.unwrap().unwrap(), &process, show_caps)
);
}
},
KernelPrintResponse::HasCap(_) => {
@ -76,9 +101,9 @@ fn init(_our: Address) {
}
}
fn print_process(id: &ProcessId, process: &PersistedProcess) -> String {
fn print_process(id: &ProcessId, process: &PersistedProcess, show_caps: bool) -> String {
format!(
"{}:\r\n {}\r\n wit: {}\r\n on-exit: {:?}\r\n public: {}\r\n capabilities: {:?}",
"{}:\r\n {}\r\n wit: {}\r\n on-exit: {:?}\r\n public: {}\r\n capabilities:\r\n {}",
id,
if process.wasm_bytes_handle.is_empty() {
"(runtime)"
@ -88,10 +113,14 @@ fn print_process(id: &ProcessId, process: &PersistedProcess) -> String {
process.wit_version.unwrap_or_default(),
process.on_exit,
process.public,
process
.capabilities
.iter()
.map(|c| c.to_string())
.collect::<Vec<_>>()
if show_caps {
process
.capabilities
.iter()
.map(|c| format!("{}\r\n ", c.to_string()))
.collect::<String>()
} else {
format!("{}, use -c to display", process.capabilities.len())
}
)
}

View File

@ -5,10 +5,15 @@
"on_exit": "Restart",
"request_networking": true,
"request_capabilities": [
"eth:distro:sys",
"http_client:distro:sys",
"http_server:distro:sys",
"kernel:distro:sys",
"kv:distro:sys",
"http_server:distro:sys",
"sqlite:distro:sys",
"state:distro:sys",
"terminal:distro:sys",
"timer:distro:sys",
"vfs:distro:sys"
],
"grant_capabilities": [

View File

@ -1,13 +1,12 @@
use std::collections::HashMap;
use std::str::FromStr;
use crate::kinode::process::tester::{
FailResponse, Request as TesterRequest, Response as TesterResponse, RunRequest,
};
use kinode_process_lib::kernel_types as kt;
use kinode_process_lib::{
await_message, call_init, our_capabilities, println, spawn, vfs, Address, Message, OnExit,
ProcessId, Request, Response,
await_message, call_init, our_capabilities, println, spawn, vfs, Address, Capability, Message,
OnExit, ProcessId, Request, Response,
};
mod tester_lib;
@ -19,6 +18,9 @@ wit_bindgen::generate!({
additional_derives: [PartialEq, serde::Deserialize, serde::Serialize, process_macros::SerdeJsonInto],
});
const SETUP_PATH: &str = "/tester:sys/setup";
const TESTS_PATH: &str = "/tester:sys/tests";
fn make_vfs_address(our: &Address) -> anyhow::Result<Address> {
Ok(Address {
node: our.node.clone(),
@ -42,14 +44,16 @@ fn handle_response(message: &Message) -> anyhow::Result<()> {
fn read_caps_by_child(
dir_prefix: &str,
children: &mut Vec<vfs::DirEntry>,
) -> anyhow::Result<HashMap<String, Vec<String>>> {
let caps_file_path = format!("{}/grant_capabilities.json", dir_prefix);
let caps_index = children.iter().position(|i| *i.path == *caps_file_path);
let caps_by_child: HashMap<String, Vec<String>> = match caps_index {
files: &mut Vec<vfs::DirEntry>,
) -> anyhow::Result<HashMap<String, HashMap<String, Vec<String>>>> {
// find DirEntry with path caps_file_path
let caps_file_path = format!("{}/capabilities.json", dir_prefix);
let caps_index = files.iter().position(|i| *i.path == *caps_file_path);
let caps_by_child: HashMap<String, HashMap<String, Vec<String>>> = match caps_index {
None => HashMap::new(),
Some(caps_index) => {
children.remove(caps_index);
files.remove(caps_index);
let file = vfs::file::open_file(&caps_file_path, false, None)?;
let file_contents = file.read()?;
serde_json::from_slice(&file_contents)?
@ -124,21 +128,34 @@ fn handle_request(
for test_name in test_names {
let test_path = format!("{}/{}.wasm", dir_prefix, test_name);
let grant_caps = caps_by_child
let (mut request_caps, grant_caps) = caps_by_child
.get(test_name)
.and_then(|caps| {
Some(
caps.iter()
.map(|cap| ProcessId::from_str(cap).unwrap())
.and_then(|caps_map| {
Some((
caps_map["request_capabilities"]
.iter()
.map(|cap| {
serde_json::from_str(cap).unwrap_or_else(|_| {
Capability::new(
Address::new(our.node(), cap.parse::<ProcessId>().unwrap()),
"\"messaging\"",
)
})
})
.collect(),
)
caps_map["grant_capabilities"]
.iter()
.map(|cap| cap.parse().unwrap())
.collect(),
))
})
.unwrap_or(vec![]);
.unwrap_or((vec![], vec![]));
request_caps.extend(our_capabilities());
let child_process_id = match spawn(
None,
&test_path,
OnExit::None, // TODO: notify us
our_capabilities(),
request_caps,
grant_caps,
false, // not public
) {
@ -193,51 +210,53 @@ fn handle_message(our: &Address, node_names: &mut Vec<String>) -> anyhow::Result
call_init!(init);
fn init(our: Address) {
let mut node_names: Vec<String> = Vec::new();
match Request::new()
.target(make_vfs_address(&our).unwrap())
.body(
serde_json::to_vec(&vfs::VfsRequest {
path: "/tester:sys/tests".into(),
action: vfs::VfsAction::CreateDrive,
})
.unwrap(),
)
.send_and_await_response(5)
{
Err(_) => {
fail!("tester");
}
Ok(r) => {
if r.is_err() {
for path in [SETUP_PATH, TESTS_PATH] {
match Request::new()
.target(make_vfs_address(&our).unwrap())
.body(
serde_json::to_vec(&vfs::VfsRequest {
path: path.into(),
action: vfs::VfsAction::CreateDrive,
})
.unwrap(),
)
.send_and_await_response(5)
{
Err(_) => {
fail!("tester");
}
Ok(r) => {
if r.is_err() {
fail!("tester");
}
}
}
}
// orchestrate tests using external scripts
// -> must give drive cap to rpc
let sent = Request::new()
.target(("our", "kernel", "distro", "sys"))
.body(
serde_json::to_vec(&kt::KernelCommand::GrantCapabilities {
target: ProcessId::new(Some("http_server"), "distro", "sys"),
capabilities: vec![kt::Capability {
issuer: Address::new(
our.node.clone(),
ProcessId::new(Some("vfs"), "distro", "sys"),
),
params: serde_json::json!({
"kind": "write",
"drive": "/tester:sys/tests",
})
.to_string(),
}],
})
.unwrap(),
)
.send();
if sent.is_err() {
fail!("tester");
// orchestrate tests using external scripts
// -> must give drive cap to rpc
let sent = Request::new()
.target(("our", "kernel", "distro", "sys"))
.body(
serde_json::to_vec(&kt::KernelCommand::GrantCapabilities {
target: ProcessId::new(Some("http_server"), "distro", "sys"),
capabilities: vec![kt::Capability {
issuer: Address::new(
our.node.clone(),
ProcessId::new(Some("vfs"), "distro", "sys"),
),
params: serde_json::json!({
"kind": "write",
"drive": path,
})
.to_string(),
}],
})
.unwrap(),
)
.send();
if sent.is_err() {
fail!("tester");
}
}
loop {

View File

@ -1,3 +1,4 @@
#[allow(unused_imports)]
use crate::kinode::process::tester::{FailResponse, Response as TesterResponse};
#[macro_export]

View File

@ -1,6 +1,5 @@
use crate::kinode::process::tester::{
Response as TesterResponse, FailResponse,
};
#[allow(unused_imports)]
use crate::kinode::process::tester::{FailResponse, Response as TesterResponse};
#[macro_export]
macro_rules! fail {

View File

@ -908,16 +908,7 @@ async fn check_for_root_cap(
caps_oracle
.send(CapMessage::Has {
on: process.clone(),
cap: Capability {
issuer: Address {
node: our.to_string(),
process: ETH_PROCESS_ID.clone(),
},
params: serde_json::to_string(&serde_json::json!({
"root": true,
}))
.unwrap(),
},
cap: Capability::new((our, ETH_PROCESS_ID.clone()), "{\"root\":true}"),
responder: send_cap_bool,
})
.await

View File

@ -193,7 +193,7 @@ pub async fn http_server(
let rpc_bound_path = BoundPath {
app: Some(ProcessId::new(Some("rpc"), "distro", "sys")),
path: path.clone(),
secure_subdomain: None, // TODO maybe RPC *should* have subdomain?
secure_subdomain: None,
authenticated: false,
local_only: true,
static_content: None,
@ -218,7 +218,6 @@ pub async fn http_server(
));
while let Some(km) = recv_in_server.recv().await {
// we *can* move this into a dedicated task, but it's not necessary
handle_app_message(
km,
http_response_senders.clone(),
@ -585,10 +584,28 @@ async fn http_handler(
&jwt_secret_bytes,
) {
// redirect to login page so they can get an auth token
return Ok(warp::http::Response::builder()
.status(StatusCode::OK)
.body(login_html.to_string())
.into_response());
if original_path == "" {
return Ok(warp::http::Response::builder()
.status(StatusCode::OK)
.body(login_html.to_string())
.into_response());
} else {
return Ok(warp::http::Response::builder()
.status(StatusCode::TEMPORARY_REDIRECT)
.header(
"Location",
format!(
"{}://{}",
match headers.get("X-Forwarded-Proto") {
Some(proto) => proto.to_str().unwrap_or("http"),
None => "http",
},
host,
),
)
.body(vec![])
.into_response());
}
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,17 +1,15 @@
use crate::KERNEL_PROCESS_ID;
use anyhow::Result;
use lib::types::core as t;
pub use lib::v0::ProcessV0;
pub use lib::Process;
use ring::signature;
use std::collections::{HashMap, VecDeque};
use std::sync::Arc;
use tokio::fs;
use tokio::task::JoinHandle;
use lib::{types::core as t, v0::ProcessV0, Process};
use std::{
collections::{HashMap, VecDeque},
sync::Arc,
};
use tokio::{fs, task::JoinHandle};
use wasi_common::sync::Dir;
use wasmtime::component::ResourceTable as Table;
use wasmtime::component::*;
use wasmtime::{Engine, Store};
use wasmtime::{
component::{Component, Linker, ResourceTable as Table},
Engine, Store,
};
use wasmtime_wasi::{
pipe::MemoryOutputPipe, DirPerms, FilePerms, WasiCtx, WasiCtxBuilder, WasiView,
};
@ -27,7 +25,7 @@ pub struct ProcessContext {
pub struct ProcessState {
/// our node's networking keypair
pub keypair: Arc<signature::Ed25519KeyPair>,
pub keypair: Arc<ring::signature::Ed25519KeyPair>,
/// information about ourself
pub metadata: t::ProcessMetadata,
/// pipe from which we get messages from the main event loop
@ -85,29 +83,18 @@ impl WasiView for ProcessWasiV0 {
}
}
async fn make_component(
engine: Engine,
wasm_bytes: &[u8],
async fn make_table_and_wasi(
home_directory_path: String,
process_state: ProcessState,
) -> Result<(Process, Store<ProcessWasi>, MemoryOutputPipe)> {
let component = Component::new(&engine, wasm_bytes.to_vec())
.expect("make_process_loop: couldn't read file");
let mut linker = Linker::new(&engine);
Process::add_to_linker(&mut linker, |state: &mut ProcessWasi| state).unwrap();
process_state: &ProcessState,
) -> (Table, WasiCtx, MemoryOutputPipe) {
let table = Table::new();
let wasi_stderr = MemoryOutputPipe::new(STACK_TRACE_SIZE);
let our_process_id = process_state.metadata.our.process.clone();
let send_to_terminal = process_state.send_to_terminal.clone();
let tmp_path = format!(
"{}/vfs/{}:{}/tmp",
home_directory_path,
our_process_id.package(),
our_process_id.publisher()
process_state.metadata.our.process.package(),
process_state.metadata.our.process.publisher()
);
let mut wasi = WasiCtxBuilder::new();
@ -132,10 +119,26 @@ async fn make_component(
}
}
let wasi = wasi.stderr(wasi_stderr.clone()).build();
(table, wasi.stderr(wasi_stderr.clone()).build(), wasi_stderr)
}
async fn make_component(
engine: Engine,
wasm_bytes: &[u8],
home_directory_path: String,
process_state: ProcessState,
) -> anyhow::Result<(Process, Store<ProcessWasi>, MemoryOutputPipe)> {
let component =
Component::new(&engine, wasm_bytes.to_vec()).expect("make_component: couldn't read file");
let mut linker = Linker::new(&engine);
Process::add_to_linker(&mut linker, |state: &mut ProcessWasi| state).unwrap();
let (table, wasi, wasi_stderr) = make_table_and_wasi(home_directory_path, &process_state).await;
wasmtime_wasi::command::add_to_linker(&mut linker).unwrap();
let our_process_id = process_state.metadata.our.process.clone();
let send_to_terminal = process_state.send_to_terminal.clone();
let mut store = Store::new(
&engine,
ProcessWasi {
@ -149,15 +152,12 @@ async fn make_component(
match Process::instantiate_async(&mut store, &component, &linker).await {
Ok(b) => b,
Err(e) => {
let _ = send_to_terminal
.send(t::Printout {
verbosity: 0,
content: format!(
"mk: process {:?} failed to instantiate: {:?}",
our_process_id, e,
),
})
.await;
t::Printout::new(
0,
format!("kernel: process {our_process_id} failed to instantiate: {e:?}"),
)
.send(&send_to_terminal)
.await;
return Err(e);
}
};
@ -170,52 +170,18 @@ async fn make_component_v0(
wasm_bytes: &[u8],
home_directory_path: String,
process_state: ProcessState,
) -> Result<(ProcessV0, Store<ProcessWasiV0>, MemoryOutputPipe)> {
let component = Component::new(&engine, wasm_bytes.to_vec())
.expect("make_process_loop: couldn't read file");
) -> anyhow::Result<(ProcessV0, Store<ProcessWasiV0>, MemoryOutputPipe)> {
let component =
Component::new(&engine, wasm_bytes.to_vec()).expect("make_component: couldn't read file");
let mut linker = Linker::new(&engine);
ProcessV0::add_to_linker(&mut linker, |state: &mut ProcessWasiV0| state).unwrap();
let table = Table::new();
let wasi_stderr = MemoryOutputPipe::new(STACK_TRACE_SIZE);
let (table, wasi, wasi_stderr) = make_table_and_wasi(home_directory_path, &process_state).await;
wasmtime_wasi::command::add_to_linker(&mut linker).unwrap();
let our_process_id = process_state.metadata.our.process.clone();
let send_to_terminal = process_state.send_to_terminal.clone();
let tmp_path = format!(
"{}/vfs/{}:{}/tmp",
home_directory_path,
our_process_id.package(),
our_process_id.publisher()
);
let mut wasi = WasiCtxBuilder::new();
// TODO make guarantees about this
if let Ok(Ok(())) = tokio::time::timeout(
std::time::Duration::from_secs(5),
fs::create_dir_all(&tmp_path),
)
.await
{
if let Ok(wasi_tempdir) =
Dir::open_ambient_dir(tmp_path.clone(), wasi_common::sync::ambient_authority())
{
wasi.preopened_dir(
wasi_tempdir,
DirPerms::all(),
FilePerms::all(),
tmp_path.clone(),
)
.env("TEMP_DIR", tmp_path);
}
}
let wasi = wasi.stderr(wasi_stderr.clone()).build();
wasmtime_wasi::command::add_to_linker(&mut linker).unwrap();
let mut store = Store::new(
&engine,
ProcessWasiV0 {
@ -229,15 +195,12 @@ async fn make_component_v0(
match ProcessV0::instantiate_async(&mut store, &component, &linker).await {
Ok(b) => b,
Err(e) => {
let _ = send_to_terminal
.send(t::Printout {
verbosity: 0,
content: format!(
"mk: process {:?} failed to instantiate: {:?}",
our_process_id, e,
),
})
.await;
t::Printout::new(
0,
format!("kernel: process {our_process_id} failed to instantiate: {e:?}"),
)
.send(&send_to_terminal)
.await;
return Err(e);
}
};
@ -247,7 +210,7 @@ async fn make_component_v0(
/// create a specific process, and generate a task that will run it.
pub async fn make_process_loop(
keypair: Arc<signature::Ed25519KeyPair>,
keypair: Arc<ring::signature::Ed25519KeyPair>,
metadata: t::ProcessMetadata,
send_to_loop: t::MessageSender,
send_to_terminal: t::PrintSender,
@ -257,7 +220,7 @@ pub async fn make_process_loop(
caps_oracle: t::CapMessageSender,
engine: Engine,
home_directory_path: String,
) -> Result<()> {
) -> anyhow::Result<()> {
// before process can be instantiated, need to await 'run' message from kernel
let mut pre_boot_queue = Vec::<Result<t::KernelMessage, t::WrappedSendError>>::new();
while let Some(message) = recv_in_process.recv().await {
@ -292,9 +255,12 @@ pub async fn make_process_loop(
send_to_process.send(message).await?;
}
let our = metadata.our.clone();
let wit_version = metadata.wit_version.clone();
let process_state = ProcessState {
keypair: keypair.clone(),
metadata: metadata.clone(),
keypair,
metadata,
recv_in_process,
self_sender: send_to_process,
send_to_loop: send_to_loop.clone(),
@ -306,40 +272,28 @@ pub async fn make_process_loop(
caps_oracle: caps_oracle.clone(),
};
let metadata = match metadata.wit_version {
let metadata = match wit_version {
// assume missing version is oldest wit version
None => {
let (bindings, mut store, wasi_stderr) =
make_component(engine, &wasm_bytes, home_directory_path, process_state).await?;
// the process will run until it returns from init() or crashes
match bindings
.call_init(&mut store, &metadata.our.to_string())
.await
{
match bindings.call_init(&mut store, &our.to_string()).await {
Ok(()) => {
let _ = send_to_terminal
.send(t::Printout {
verbosity: 1,
content: format!(
"process {} returned without error",
metadata.our.process
),
})
t::Printout::new(1, format!("process {our} returned without error"))
.send(&send_to_terminal)
.await;
}
Err(_) => {
let stderr = wasi_stderr.contents().into();
let stderr = String::from_utf8(stderr)?;
let _ = send_to_terminal
.send(t::Printout {
verbosity: 0,
content: format!(
"\x1b[38;5;196mprocess {} ended with error:\x1b[0m\n{}",
metadata.our.process, stderr,
),
})
.await;
t::Printout::new(
0,
format!("\x1b[38;5;196mprocess {our} ended with error:\x1b[0m\n{stderr}",),
)
.send(&send_to_terminal)
.await;
}
};
@ -353,33 +307,21 @@ pub async fn make_process_loop(
make_component_v0(engine, &wasm_bytes, home_directory_path, process_state).await?;
// the process will run until it returns from init() or crashes
match bindings
.call_init(&mut store, &metadata.our.to_string())
.await
{
match bindings.call_init(&mut store, &our.to_string()).await {
Ok(()) => {
let _ = send_to_terminal
.send(t::Printout {
verbosity: 1,
content: format!(
"process {} returned without error",
metadata.our.process
),
})
t::Printout::new(1, format!("process {our} returned without error"))
.send(&send_to_terminal)
.await;
}
Err(_) => {
let stderr = wasi_stderr.contents().into();
let stderr = String::from_utf8(stderr)?;
let _ = send_to_terminal
.send(t::Printout {
verbosity: 0,
content: format!(
"\x1b[38;5;196mprocess {} ended with error:\x1b[0m\n{}",
metadata.our.process, stderr,
),
})
.await;
t::Printout::new(
0,
format!("\x1b[38;5;196mprocess {our} ended with error:\x1b[0m\n{stderr}",),
)
.send(&send_to_terminal)
.await;
}
};
@ -392,186 +334,157 @@ pub async fn make_process_loop(
// the process has completed, time to perform cleanup
//
let our_kernel = t::Address {
node: metadata.our.node.clone(),
process: KERNEL_PROCESS_ID.clone(),
};
// get caps before killing
let (tx, rx) = tokio::sync::oneshot::channel();
let _ = caps_oracle
.send(t::CapMessage::GetAll {
on: metadata.our.process.clone(),
responder: tx,
})
.await;
let initial_capabilities = rx
.await?
.iter()
.map(|c| t::Capability {
issuer: c.0.issuer.clone(),
params: c.0.params.clone(),
})
.collect();
t::Printout::new(
1,
format!(
"process {} has OnExit behavior {}",
metadata.our.process, metadata.on_exit
),
)
.send(&send_to_terminal)
.await;
// fulfill the designated OnExit behavior
match metadata.on_exit {
t::OnExit::None => {
send_to_loop
.send(t::KernelMessage {
id: rand::random(),
source: our_kernel.clone(),
target: our_kernel.clone(),
rsvp: None,
message: t::Message::Request(t::Request {
inherit: false,
expects_response: None,
body: serde_json::to_vec(&t::KernelCommand::KillProcess(
metadata.our.process.clone(),
))
.unwrap(),
metadata: None,
capabilities: vec![],
}),
lazy_load_blob: None,
})
.await?;
let _ = send_to_terminal
.send(t::Printout {
verbosity: 1,
content: format!("process {} had no OnExit behavior", metadata.our.process),
})
t::KernelMessage::builder()
.id(rand::random())
.source((&our.node, KERNEL_PROCESS_ID.clone()))
.target((&our.node, KERNEL_PROCESS_ID.clone()))
.message(t::Message::Request(t::Request {
inherit: false,
expects_response: None,
body: serde_json::to_vec(&t::KernelCommand::KillProcess(
metadata.our.process.clone(),
))
.unwrap(),
metadata: None,
capabilities: vec![],
}))
.build()
.unwrap()
.send(&send_to_loop)
.await;
}
// if restart, tell ourselves to init the app again, with same capabilities
t::OnExit::Restart => {
send_to_loop
.send(t::KernelMessage {
id: rand::random(),
source: our_kernel.clone(),
target: our_kernel.clone(),
rsvp: None,
message: t::Message::Request(t::Request {
inherit: false,
expects_response: None,
body: serde_json::to_vec(&t::KernelCommand::KillProcess(
metadata.our.process.clone(),
))
.unwrap(),
metadata: None,
capabilities: vec![],
}),
lazy_load_blob: None,
// get caps before killing
let (tx, rx) = tokio::sync::oneshot::channel();
caps_oracle
.send(t::CapMessage::GetAll {
on: metadata.our.process.clone(),
responder: tx,
})
.await?;
let _ = send_to_terminal
.send(t::Printout {
verbosity: 1,
content: format!(
"firing OnExit::Restart for process {}",
metadata.our.process
),
let initial_capabilities = rx
.await?
.iter()
.map(|c| t::Capability {
issuer: c.0.issuer.clone(),
params: c.0.params.clone(),
})
.collect();
// kill, **without** revoking capabilities from others!
t::KernelMessage::builder()
.id(rand::random())
.source((&our.node, KERNEL_PROCESS_ID.clone()))
.target((&our.node, KERNEL_PROCESS_ID.clone()))
.message(t::Message::Request(t::Request {
inherit: false,
expects_response: None,
body: serde_json::to_vec(&t::KernelCommand::KillProcess(
metadata.our.process.clone(),
))
.unwrap(),
metadata: Some("no-revoke".to_string()),
capabilities: vec![],
}))
.build()
.unwrap()
.send(&send_to_loop)
.await;
// then re-initialize with same capabilities
t::KernelMessage::builder()
.id(rand::random())
.source((&our.node, KERNEL_PROCESS_ID.clone()))
.target((&our.node, KERNEL_PROCESS_ID.clone()))
.message(t::Message::Request(t::Request {
inherit: false,
expects_response: None,
body: serde_json::to_vec(&t::KernelCommand::InitializeProcess {
id: metadata.our.process.clone(),
wasm_bytes_handle: metadata.wasm_bytes_handle,
wit_version: metadata.wit_version,
on_exit: metadata.on_exit,
initial_capabilities,
public: metadata.public,
})
.unwrap(),
metadata: None,
capabilities: vec![],
}))
.lazy_load_blob(Some(t::LazyLoadBlob {
mime: None,
bytes: wasm_bytes,
}))
.build()
.unwrap()
.send(&send_to_loop)
.await;
// then run
t::KernelMessage::builder()
.id(rand::random())
.source((&our.node, KERNEL_PROCESS_ID.clone()))
.target((&our.node, KERNEL_PROCESS_ID.clone()))
.message(t::Message::Request(t::Request {
inherit: false,
expects_response: None,
body: serde_json::to_vec(&t::KernelCommand::RunProcess(
metadata.our.process.clone(),
))
.unwrap(),
metadata: None,
capabilities: vec![],
}))
.build()
.unwrap()
.send(&send_to_loop)
.await;
send_to_loop
.send(t::KernelMessage {
id: rand::random(),
source: our_kernel.clone(),
target: our_kernel.clone(),
rsvp: None,
message: t::Message::Request(t::Request {
inherit: false,
expects_response: None,
body: serde_json::to_vec(&t::KernelCommand::InitializeProcess {
id: metadata.our.process.clone(),
wasm_bytes_handle: metadata.wasm_bytes_handle,
wit_version: metadata.wit_version,
on_exit: metadata.on_exit,
initial_capabilities,
public: metadata.public,
})
.unwrap(),
metadata: None,
capabilities: vec![],
}),
lazy_load_blob: Some(t::LazyLoadBlob {
mime: None,
bytes: wasm_bytes,
}),
})
.await?;
send_to_loop
.send(t::KernelMessage {
id: rand::random(),
source: our_kernel.clone(),
target: our_kernel.clone(),
rsvp: None,
message: t::Message::Request(t::Request {
inherit: false,
expects_response: None,
body: serde_json::to_vec(&t::KernelCommand::RunProcess(
metadata.our.process.clone(),
))
.unwrap(),
metadata: None,
capabilities: vec![],
}),
lazy_load_blob: None,
})
.await?;
}
// if requests, fire them
// even in death, a process can only message processes it has capabilities for
t::OnExit::Requests(requests) => {
send_to_terminal
.send(t::Printout {
verbosity: 1,
content: format!(
"firing OnExit::Requests for process {}",
metadata.our.process
),
})
.await?;
for (address, mut request, blob) in requests {
request.expects_response = None;
send_to_loop
.send(t::KernelMessage {
id: rand::random(),
source: metadata.our.clone(),
target: address,
rsvp: None,
message: t::Message::Request(request),
lazy_load_blob: blob,
})
.await?;
t::KernelMessage::builder()
.id(rand::random())
.source(metadata.our.clone())
.target(address)
.message(t::Message::Request(request))
.lazy_load_blob(blob)
.build()
.unwrap()
.send(&send_to_loop)
.await;
}
send_to_loop
.send(t::KernelMessage {
id: rand::random(),
source: our_kernel.clone(),
target: our_kernel.clone(),
rsvp: None,
message: t::Message::Request(t::Request {
inherit: false,
expects_response: None,
body: serde_json::to_vec(&t::KernelCommand::KillProcess(
metadata.our.process.clone(),
))
.unwrap(),
metadata: None,
capabilities: vec![],
}),
lazy_load_blob: None,
})
.await?;
t::KernelMessage::builder()
.id(rand::random())
.source((&our.node, KERNEL_PROCESS_ID.clone()))
.target((&our.node, KERNEL_PROCESS_ID.clone()))
.message(t::Message::Request(t::Request {
inherit: false,
expects_response: None,
body: serde_json::to_vec(&t::KernelCommand::KillProcess(
metadata.our.process.clone(),
))
.unwrap(),
metadata: None,
capabilities: vec![],
}))
.build()
.unwrap()
.send(&send_to_loop)
.await;
}
}
Ok(())
}
pub async fn print(sender: &t::PrintSender, verbosity: u8, content: String) {
let _ = sender
.send(t::Printout { verbosity, content })
.await
.expect("fatal: kernel terminal print pipe died!");
}

View File

@ -1,19 +1,13 @@
use crate::kernel::process;
use crate::KERNEL_PROCESS_ID;
use crate::VFS_PROCESS_ID;
use anyhow::Result;
use lib::types::core::{self as t, STATE_PROCESS_ID};
pub use lib::wit;
pub use lib::wit::Host as StandardHost;
use lib::types::core::{self as t, KERNEL_PROCESS_ID, STATE_PROCESS_ID, VFS_PROCESS_ID};
use lib::wit;
use lib::wit::Host as StandardHost;
use ring::signature::{self, KeyPair};
async fn print_debug(proc: &process::ProcessState, content: &str) {
let _ = proc
.send_to_terminal
.send(t::Printout {
verbosity: 2,
content: format!("{}: {}", proc.metadata.our.process, content),
})
t::Printout::new(2, format!("{}: {}", proc.metadata.our.process, content))
.send(&proc.send_to_terminal)
.await;
}
@ -101,10 +95,16 @@ impl process::ProcessState {
) -> Result<(wit::Address, wit::Message), (wit::SendError, Option<wit::Context>)> {
let (mut km, context) = match incoming {
Ok(mut km) => match km.message {
t::Message::Request(_) => {
t::Message::Request(t::Request {
ref expects_response,
..
}) => {
self.last_blob = km.lazy_load_blob;
km.lazy_load_blob = None;
self.prompting_message = Some(km.clone());
if expects_response.is_some() || km.rsvp.is_some() {
// update prompting_message iff there is someone to reply to
self.prompting_message = Some(km.clone());
}
(km, None)
}
t::Message::Response(_) => match self.contexts.remove(&km.id) {
@ -290,34 +290,34 @@ impl process::ProcessState {
// 1. whether this request expects a response -- if so, rsvp = our address, always
// 2. whether this request inherits -- if so, rsvp = prompting message's rsvp
// 3. if neither, rsvp = None
let kernel_message = t::KernelMessage {
id: request_id,
source,
target: t::Address::de_wit(target),
rsvp: match (
request.expects_response,
request.inherit,
&self.prompting_message,
) {
(Some(_), _, _) => {
// this request expects response, so receives any response
// make sure to use the real source, not a fake injected-by-kernel source
Some(self.metadata.our.clone())
}
(None, true, Some(ref prompt)) => {
// this request inherits, so response will be routed to prompting message
prompt.rsvp.clone()
}
_ => None,
},
message: t::Message::Request(request),
lazy_load_blob: blob,
};
self.send_to_loop
.send(kernel_message)
.await
.expect("fatal: kernel couldn't send request");
t::KernelMessage::builder()
.id(request_id)
.source(source)
.target(t::Address::de_wit(target))
.rsvp(
match (
request.expects_response,
request.inherit,
&self.prompting_message,
) {
(Some(_), _, _) => {
// this request expects response, so receives any response
// make sure to use the real source, not a fake injected-by-kernel source
Some(self.metadata.our.clone())
}
(None, true, Some(ref prompt)) => {
// this request inherits, so response will be routed to prompting message
prompt.rsvp.clone()
}
_ => None,
},
)
.message(t::Message::Request(request))
.lazy_load_blob(blob)
.build()
.unwrap()
.send(&self.send_to_loop)
.await;
Ok(request_id)
}
@ -328,11 +328,11 @@ impl process::ProcessState {
// the process requires a prompting_message in order to issue a response
let Some(ref prompting_message) = self.prompting_message else {
process::print(
&self.send_to_terminal,
t::Printout::new(
0,
format!("kernel: need non-None prompting_message to handle Response {response:?}"),
)
.send(&self.send_to_terminal)
.await;
return;
};
@ -372,21 +372,20 @@ impl process::ProcessState {
};
}
self.send_to_loop
.send(t::KernelMessage {
id,
source: self.metadata.our.clone(),
target,
rsvp: None,
message: t::Message::Response((
response,
// the context will be set by the process receiving this Response.
None,
)),
lazy_load_blob: blob,
})
.await
.expect("fatal: kernel couldn't send response");
t::KernelMessage::builder()
.id(id)
.source(self.metadata.our.clone())
.target(target)
.message(t::Message::Response((
response,
// the context will be set by the process receiving this Response.
None,
)))
.lazy_load_blob(blob)
.build()
.unwrap()
.send(&self.send_to_loop)
.await;
}
}
@ -654,7 +653,25 @@ impl StandardHost for process::ProcessWasi {
self.process.metadata.our.process.package(),
self.process.metadata.our.process.publisher(),
);
// TODO I think we need to kill this process first in case it already exists
let request_capabilities_filtered = {
let (tx, rx) = tokio::sync::oneshot::channel();
self.process
.caps_oracle
.send(t::CapMessage::FilterCaps {
on: self.process.metadata.our.process.clone(),
caps: request_capabilities
.into_iter()
.map(|cap| t::de_wit_capability(cap).0)
.collect(),
responder: tx,
})
.await
.expect("fatal: process couldn't access capabilities oracle");
rx.await
.expect("fatal: process couldn't receive capabilities")
};
let Ok(Ok((_, _response))) = send_and_await_response(
self,
Some(t::Address {
@ -673,12 +690,9 @@ impl StandardHost for process::ProcessWasi {
wasm_bytes_handle: wasm_path,
wit_version: self.process.metadata.wit_version,
on_exit: t::OnExit::de_wit(on_exit),
initial_capabilities: request_capabilities
.iter()
.map(|cap| t::Capability {
issuer: t::Address::de_wit(cap.clone().issuer),
params: cap.clone().params,
})
initial_capabilities: request_capabilities_filtered
.into_iter()
.map(|(cap, _sig)| cap)
.collect(),
public,
})
@ -704,14 +718,11 @@ impl StandardHost for process::ProcessWasi {
.caps_oracle
.send(t::CapMessage::Add {
on: t::ProcessId::de_wit(process),
caps: vec![t::Capability {
issuer: t::Address {
node: self.process.metadata.our.node.clone(),
process: new_process_id.clone(),
},
params: "\"messaging\"".into(),
}],
responder: tx,
caps: vec![t::Capability::messaging((
self.process.metadata.our.node.clone(),
new_process_id.clone(),
))],
responder: Some(tx),
})
.await
.unwrap();
@ -758,15 +769,12 @@ impl StandardHost for process::ProcessWasi {
.caps_oracle
.send(t::CapMessage::Add {
on: new_process_id.clone(),
caps: vec![t::Capability {
issuer: self.process.metadata.our.clone(),
params: "\"messaging\"".into(),
}],
responder: tx,
caps: vec![t::Capability::messaging(self.process.metadata.our.clone())],
responder: Some(tx),
})
.await
.unwrap();
let _ = rx.await.unwrap();
rx.await.unwrap();
// parent process is always able to Message child
let (tx, rx) = tokio::sync::oneshot::channel();
@ -774,18 +782,15 @@ impl StandardHost for process::ProcessWasi {
.caps_oracle
.send(t::CapMessage::Add {
on: self.process.metadata.our.process.clone(),
caps: vec![t::Capability {
issuer: t::Address {
node: self.process.metadata.our.node.clone(),
process: new_process_id.clone(),
},
params: "\"messaging\"".into(),
}],
responder: tx,
caps: vec![t::Capability::messaging((
self.process.metadata.our.node.clone(),
new_process_id.clone(),
))],
responder: Some(tx),
})
.await
.unwrap();
let _ = rx.await.unwrap();
rx.await.unwrap();
print_debug(&self.process, "spawned a new process").await;
Ok(Ok(new_process_id.en_wit().to_owned()))
}
@ -805,7 +810,7 @@ impl StandardHost for process::ProcessWasi {
.iter()
.map(|cap| t::de_wit_capability(cap.clone()).0)
.collect(),
responder: tx,
responder: Some(tx),
})
.await?;
let _ = rx.await?;
@ -823,7 +828,7 @@ impl StandardHost for process::ProcessWasi {
.iter()
.map(|cap| t::de_wit_capability(cap.clone()).0)
.collect(),
responder: tx,
responder: Some(tx),
})
.await?;
let _ = rx.await?;
@ -843,10 +848,7 @@ impl StandardHost for process::ProcessWasi {
let caps = rx.await?;
Ok(caps
.into_iter()
.map(|cap| wit::Capability {
issuer: t::Address::en_wit(&cap.0.issuer),
params: cap.0.params,
})
.map(|cap| t::en_wit_capability(cap))
.collect())
}

View File

@ -1,10 +1,8 @@
use crate::kernel::process;
use crate::KERNEL_PROCESS_ID;
use crate::VFS_PROCESS_ID;
use anyhow::Result;
use lib::types::core::{self as t, STATE_PROCESS_ID};
pub use lib::v0::wit;
pub use lib::v0::wit::Host as StandardHost;
use lib::types::core::{self as t, KERNEL_PROCESS_ID, STATE_PROCESS_ID, VFS_PROCESS_ID};
use lib::v0::wit;
use lib::v0::wit::Host as StandardHost;
use ring::signature::{self, KeyPair};
async fn print_debug(proc: &process::ProcessState, content: &str) {
@ -101,10 +99,16 @@ impl process::ProcessState {
) -> Result<(wit::Address, wit::Message), (wit::SendError, Option<wit::Context>)> {
let (mut km, context) = match incoming {
Ok(mut km) => match km.message {
t::Message::Request(_) => {
t::Message::Request(t::Request {
ref expects_response,
..
}) => {
self.last_blob = km.lazy_load_blob;
km.lazy_load_blob = None;
self.prompting_message = Some(km.clone());
if expects_response.is_some() || km.rsvp.is_some() {
// update prompting_message iff there is someone to reply to
self.prompting_message = Some(km.clone());
}
(km, None)
}
t::Message::Response(_) => match self.contexts.remove(&km.id) {
@ -330,11 +334,11 @@ impl process::ProcessState {
// the process requires a prompting_message in order to issue a response
let Some(ref prompting_message) = self.prompting_message else {
process::print(
&self.send_to_terminal,
t::Printout::new(
0,
format!("kernel: need non-None prompting_message to handle Response {response:?}"),
)
.send(&self.send_to_terminal)
.await;
return;
};
@ -660,7 +664,25 @@ impl StandardHost for process::ProcessWasiV0 {
self.process.metadata.our.process.package(),
self.process.metadata.our.process.publisher(),
);
// TODO I think we need to kill this process first in case it already exists
let request_capabilities_filtered = {
let (tx, rx) = tokio::sync::oneshot::channel();
self.process
.caps_oracle
.send(t::CapMessage::FilterCaps {
on: self.process.metadata.our.process.clone(),
caps: request_capabilities
.into_iter()
.map(|cap| t::de_wit_capability_v0(cap).0)
.collect(),
responder: tx,
})
.await
.expect("fatal: process couldn't access capabilities oracle");
rx.await
.expect("fatal: process couldn't receive capabilities")
};
let Ok(Ok((_, _response))) = send_and_await_response(
self,
Some(t::Address {
@ -679,12 +701,9 @@ impl StandardHost for process::ProcessWasiV0 {
wasm_bytes_handle: wasm_path,
wit_version: self.process.metadata.wit_version,
on_exit: t::OnExit::de_wit_v0(on_exit),
initial_capabilities: request_capabilities
.iter()
.map(|cap| t::Capability {
issuer: t::Address::de_wit_v0(cap.clone().issuer),
params: cap.clone().params,
})
initial_capabilities: request_capabilities_filtered
.into_iter()
.map(|(cap, _sig)| cap)
.collect(),
public,
})
@ -710,14 +729,11 @@ impl StandardHost for process::ProcessWasiV0 {
.caps_oracle
.send(t::CapMessage::Add {
on: t::ProcessId::de_wit_v0(process),
caps: vec![t::Capability {
issuer: t::Address {
node: self.process.metadata.our.node.clone(),
process: new_process_id.clone(),
},
params: "\"messaging\"".into(),
}],
responder: tx,
caps: vec![t::Capability::messaging((
self.process.metadata.our.node.clone(),
new_process_id.clone(),
))],
responder: Some(tx),
})
.await
.unwrap();
@ -764,15 +780,12 @@ impl StandardHost for process::ProcessWasiV0 {
.caps_oracle
.send(t::CapMessage::Add {
on: new_process_id.clone(),
caps: vec![t::Capability {
issuer: self.process.metadata.our.clone(),
params: "\"messaging\"".into(),
}],
responder: tx,
caps: vec![t::Capability::messaging(self.process.metadata.our.clone())],
responder: Some(tx),
})
.await
.unwrap();
let _ = rx.await.unwrap();
rx.await.unwrap();
// parent process is always able to Message child
let (tx, rx) = tokio::sync::oneshot::channel();
@ -780,18 +793,15 @@ impl StandardHost for process::ProcessWasiV0 {
.caps_oracle
.send(t::CapMessage::Add {
on: self.process.metadata.our.process.clone(),
caps: vec![t::Capability {
issuer: t::Address {
node: self.process.metadata.our.node.clone(),
process: new_process_id.clone(),
},
params: "\"messaging\"".into(),
}],
responder: tx,
caps: vec![t::Capability::messaging((
self.process.metadata.our.node.clone(),
new_process_id.clone(),
))],
responder: Some(tx),
})
.await
.unwrap();
let _ = rx.await.unwrap();
rx.await.unwrap();
print_debug(&self.process, "spawned a new process").await;
Ok(Ok(new_process_id.en_wit_v0().to_owned()))
}
@ -811,7 +821,7 @@ impl StandardHost for process::ProcessWasiV0 {
.iter()
.map(|cap| t::de_wit_capability_v0(cap.clone()).0)
.collect(),
responder: tx,
responder: Some(tx),
})
.await?;
let _ = rx.await?;
@ -829,7 +839,7 @@ impl StandardHost for process::ProcessWasiV0 {
.iter()
.map(|cap| t::de_wit_capability_v0(cap.clone()).0)
.collect(),
responder: tx,
responder: Some(tx),
})
.await?;
let _ = rx.await?;
@ -849,10 +859,7 @@ impl StandardHost for process::ProcessWasiV0 {
let caps = rx.await?;
Ok(caps
.into_iter()
.map(|cap| wit::Capability {
issuer: t::Address::en_wit_v0(&cap.0.issuer),
params: cap.0.params,
})
.map(|cap| t::en_wit_capability_v0(cap))
.collect())
}

View File

@ -9,7 +9,6 @@ use hmac::Hmac;
use jwt::SignWithKey;
use lib::types::core::Keyfile;
use ring::pbkdf2;
use ring::pkcs8::Document;
use ring::rand::SystemRandom;
use ring::signature::{self, KeyPair};
use ring::{digest as ring_digest, rand::SecureRandom};
@ -21,7 +20,7 @@ use std::{
type DiskKey = [u8; CREDENTIAL_LEN];
pub const CREDENTIAL_LEN: usize = ring_digest::SHA256_OUTPUT_LEN;
pub const CREDENTIAL_LEN: usize = ring::digest::SHA256_OUTPUT_LEN;
pub const ITERATIONS: u32 = 1_000_000;
pub static PBKDF2_ALG: pbkdf2::Algorithm = pbkdf2::PBKDF2_HMAC_SHA256; // TODO maybe look into Argon2
@ -33,8 +32,9 @@ pub fn encode_keyfile(
jwt: &[u8],
file_key: &[u8],
) -> Vec<u8> {
let mut disk_key: DiskKey = [0u8; CREDENTIAL_LEN];
use ring::rand::SecureRandom;
let mut disk_key: DiskKey = [0u8; CREDENTIAL_LEN];
let rng = SystemRandom::new();
let mut salt = [0u8; 32]; // generate a unique salt
rng.fill(&mut salt).unwrap();
@ -70,6 +70,8 @@ pub fn encode_keyfile(
}
pub fn decode_keyfile(keyfile: &[u8], password: &str) -> Result<Keyfile, &'static str> {
use generic_array::GenericArray;
let (username, routers, salt, key_enc, jwt_enc, file_enc) =
bincode::deserialize::<(String, Vec<String>, Vec<u8>, Vec<u8>, Vec<u8>, Vec<u8>)>(keyfile)
.map_err(|_| "failed to deserialize keyfile")?;
@ -120,8 +122,11 @@ pub fn generate_jwt(
username: &str,
subdomain: &Option<String>,
) -> Option<String> {
let jwt_secret: Hmac<Sha256> = Hmac::new_from_slice(jwt_secret_bytes).ok()?;
use hmac::Hmac;
use jwt::SignWithKey;
use sha2::Sha256;
let jwt_secret: Hmac<Sha256> = Hmac::new_from_slice(jwt_secret_bytes).ok()?;
let subdomain = match subdomain.clone().unwrap_or_default().as_str() {
"" => None,
subdomain => Some(subdomain.to_string()),
@ -206,6 +211,8 @@ pub fn port_to_bytes(port: u16) -> [u8; 2] {
/// randomly generated key to encrypt file chunks,
pub fn generate_file_key() -> Vec<u8> {
use ring::rand::SecureRandom;
let mut key = [0u8; 32];
let rng = SystemRandom::new();
rng.fill(&mut key).unwrap();
@ -214,7 +221,7 @@ pub fn generate_file_key() -> Vec<u8> {
/// # Returns
/// a pair of (public key (encoded as a hex string), serialized key as a pkcs8 Document)
pub fn generate_networking_key() -> (String, Document) {
pub fn generate_networking_key() -> (String, ring::pkcs8::Document) {
let seed = SystemRandom::new();
let doc = signature::Ed25519KeyPair::generate_pkcs8(&seed).unwrap();
let keys = signature::Ed25519KeyPair::from_pkcs8(doc.as_ref()).unwrap();

View File

@ -1,101 +1,120 @@
use anyhow::Result;
use dashmap::DashMap;
// use rocksdb::checkpoint::Checkpoint;
use lib::types::core::{
Address, CapMessage, CapMessageSender, Capability, KernelMessage, KvAction, KvError, KvRequest,
KvResponse, LazyLoadBlob, Message, MessageReceiver, MessageSender, PackageId, PrintSender,
Printout, ProcessId, Request, Response, KV_PROCESS_ID,
};
use rocksdb::OptimisticTransactionDB;
use std::collections::{HashMap, VecDeque};
use std::sync::Arc;
use tokio::fs;
use tokio::sync::Mutex;
use lib::types::core::*;
use std::{
collections::{HashMap, VecDeque},
sync::Arc,
};
use tokio::{fs, sync::Mutex};
pub async fn kv(
our_node: String,
our_node: Arc<String>,
send_to_loop: MessageSender,
send_to_terminal: PrintSender,
mut recv_from_loop: MessageReceiver,
send_to_caps_oracle: CapMessageSender,
home_directory_path: String,
) -> anyhow::Result<()> {
let kv_path = format!("{}/kv", &home_directory_path);
if let Err(e) = fs::create_dir_all(&kv_path).await {
panic!("failed creating kv dir! {:?}", e);
let kv_path = Arc::new(format!("{home_directory_path}/kv"));
if let Err(e) = fs::create_dir_all(&*kv_path).await {
panic!("failed creating kv dir! {e:?}");
}
let open_kvs: Arc<DashMap<(PackageId, String), OptimisticTransactionDB>> =
Arc::new(DashMap::new());
let txs: Arc<DashMap<u64, Vec<(KvAction, Option<Vec<u8>>)>>> = Arc::new(DashMap::new());
let mut process_queues: HashMap<ProcessId, Arc<Mutex<VecDeque<KernelMessage>>>> =
HashMap::new();
let process_queues: HashMap<ProcessId, Arc<Mutex<VecDeque<KernelMessage>>>> = HashMap::new();
loop {
tokio::select! {
Some(km) = recv_from_loop.recv() => {
if our_node.clone() != km.source.node {
println!(
"kv: request must come from our_node={}, got: {}",
our_node,
km.source.node,
);
continue;
}
let queue = process_queues
.entry(km.source.process.clone())
.or_insert_with(|| Arc::new(Mutex::new(VecDeque::new())))
.clone();
{
let mut queue_lock = queue.lock().await;
queue_lock.push_back(km.clone());
}
// clone Arcs
let our_node = our_node.clone();
let send_to_caps_oracle = send_to_caps_oracle.clone();
let send_to_terminal = send_to_terminal.clone();
let send_to_loop = send_to_loop.clone();
let open_kvs = open_kvs.clone();
let txs = txs.clone();
let kv_path = kv_path.clone();
tokio::spawn(async move {
let mut queue_lock = queue.lock().await;
if let Some(km) = queue_lock.pop_front() {
if let Err(e) = handle_request(
our_node.clone(),
km.clone(),
open_kvs.clone(),
txs.clone(),
send_to_loop.clone(),
send_to_terminal.clone(),
send_to_caps_oracle.clone(),
kv_path.clone(),
)
.await
{
let _ = send_to_loop
.send(make_error_message(our_node.clone(), &km, e))
.await;
}
}
});
}
while let Some(km) = recv_from_loop.recv().await {
if *our_node != km.source.node {
Printout::new(
1,
format!(
"kv: got request from {}, but requests must come from our node {our_node}",
km.source.node
),
)
.send(&send_to_terminal)
.await;
continue;
}
let queue = process_queues
.get(&km.source.process)
.cloned()
.unwrap_or_else(|| Arc::new(Mutex::new(VecDeque::new())));
{
let mut queue_lock = queue.lock().await;
queue_lock.push_back(km);
}
// clone Arcs
let our_node = our_node.clone();
let send_to_loop = send_to_loop.clone();
let send_to_terminal = send_to_terminal.clone();
let send_to_caps_oracle = send_to_caps_oracle.clone();
let open_kvs = open_kvs.clone();
let txs = txs.clone();
let kv_path = kv_path.clone();
tokio::spawn(async move {
let mut queue_lock = queue.lock().await;
if let Some(km) = queue_lock.pop_front() {
let (km_id, km_rsvp) =
(km.id.clone(), km.rsvp.clone().unwrap_or(km.source.clone()));
if let Err(e) = handle_request(
&our_node,
km,
open_kvs,
txs,
&send_to_loop,
&send_to_caps_oracle,
&kv_path,
)
.await
{
Printout::new(1, format!("kv: {e}"))
.send(&send_to_terminal)
.await;
KernelMessage::builder()
.id(km_id)
.source((our_node.as_str(), KV_PROCESS_ID.clone()))
.target(km_rsvp)
.message(Message::Response((
Response {
inherit: false,
body: serde_json::to_vec(&KvResponse::Err { error: e }).unwrap(),
metadata: None,
capabilities: vec![],
},
None,
)))
.build()
.unwrap()
.send(&send_to_loop)
.await;
}
}
});
}
Ok(())
}
async fn handle_request(
our_node: String,
our_node: &str,
km: KernelMessage,
open_kvs: Arc<DashMap<(PackageId, String), OptimisticTransactionDB>>,
txs: Arc<DashMap<u64, Vec<(KvAction, Option<Vec<u8>>)>>>,
send_to_loop: MessageSender,
send_to_terminal: PrintSender,
send_to_caps_oracle: CapMessageSender,
kv_path: String,
send_to_loop: &MessageSender,
send_to_caps_oracle: &CapMessageSender,
kv_path: &str,
) -> Result<(), KvError> {
let KernelMessage {
id,
@ -103,13 +122,13 @@ async fn handle_request(
message,
lazy_load_blob: blob,
..
} = km.clone();
} = km;
let Message::Request(Request {
body,
expects_response,
metadata,
..
}) = message.clone()
}) = message
else {
return Err(KvError::InputError {
error: "not a request".into(),
@ -127,12 +146,12 @@ async fn handle_request(
};
check_caps(
our_node.clone(),
source.clone(),
open_kvs.clone(),
send_to_caps_oracle.clone(),
our_node,
&source,
&open_kvs,
send_to_caps_oracle,
&request,
kv_path.clone(),
kv_path,
)
.await?;
@ -280,21 +299,12 @@ async fn handle_request(
}
};
if let Some(target) = km.rsvp.or_else(|| {
expects_response.map(|_| Address {
node: our_node.clone(),
process: source.process.clone(),
})
}) {
let response = KernelMessage {
id,
source: Address {
node: our_node.clone(),
process: KV_PROCESS_ID.clone(),
},
target,
rsvp: None,
message: Message::Response((
if let Some(target) = km.rsvp.or_else(|| expects_response.map(|_| source)) {
KernelMessage::builder()
.id(id)
.source((our_node, KV_PROCESS_ID.clone()))
.target(target)
.message(Message::Response((
Response {
inherit: false,
body,
@ -302,37 +312,27 @@ async fn handle_request(
capabilities: vec![],
},
None,
)),
lazy_load_blob: bytes.map(|bytes| LazyLoadBlob {
)))
.lazy_load_blob(bytes.map(|bytes| LazyLoadBlob {
mime: Some("application/octet-stream".into()),
bytes,
}),
};
let _ = send_to_loop.send(response).await;
} else {
send_to_terminal
.send(Printout {
verbosity: 2,
content: format!(
"kv: not sending response: {:?}",
serde_json::from_slice::<KvResponse>(&body)
),
})
.await
.unwrap();
}))
.build()
.unwrap()
.send(send_to_loop)
.await;
}
Ok(())
}
async fn check_caps(
our_node: String,
source: Address,
open_kvs: Arc<DashMap<(PackageId, String), OptimisticTransactionDB>>,
mut send_to_caps_oracle: CapMessageSender,
our_node: &str,
source: &Address,
open_kvs: &Arc<DashMap<(PackageId, String), OptimisticTransactionDB>>,
send_to_caps_oracle: &CapMessageSender,
request: &KvRequest,
kv_path: String,
kv_path: &str,
) -> Result<(), KvError> {
let (send_cap_bool, recv_cap_bool) = tokio::sync::oneshot::channel();
let src_package_id = PackageId::new(source.process.package(), source.process.publisher());
@ -347,14 +347,14 @@ async fn check_caps(
on: source.process.clone(),
cap: Capability {
issuer: Address {
node: our_node.clone(),
node: our_node.to_string(),
process: KV_PROCESS_ID.clone(),
},
params: serde_json::to_string(&serde_json::json!({
params: serde_json::json!({
"kind": "write",
"db": request.db.to_string(),
}))
.unwrap(),
})
.to_string(),
},
responder: send_cap_bool,
})
@ -373,14 +373,14 @@ async fn check_caps(
on: source.process.clone(),
cap: Capability {
issuer: Address {
node: our_node.clone(),
node: our_node.to_string(),
process: KV_PROCESS_ID.clone(),
},
params: serde_json::to_string(&serde_json::json!({
params: serde_json::json!({
"kind": "read",
"db": request.db.to_string(),
}))
.unwrap(),
})
.to_string(),
},
responder: send_cap_bool,
})
@ -405,7 +405,7 @@ async fn check_caps(
&request.db.to_string(),
&our_node,
&source,
&mut send_to_caps_oracle,
send_to_caps_oracle,
)
.await?;
add_capability(
@ -413,7 +413,7 @@ async fn check_caps(
&request.db.to_string(),
&our_node,
&source,
&mut send_to_caps_oracle,
send_to_caps_oracle,
)
.await?;
@ -451,52 +451,27 @@ async fn add_capability(
db: &str,
our_node: &str,
source: &Address,
send_to_caps_oracle: &mut CapMessageSender,
send_to_caps_oracle: &CapMessageSender,
) -> Result<(), KvError> {
let cap = Capability {
issuer: Address {
node: our_node.to_string(),
process: KV_PROCESS_ID.clone(),
},
params: serde_json::to_string(&serde_json::json!({ "kind": kind, "db": db })).unwrap(),
params: serde_json::json!({ "kind": kind, "db": db }).to_string(),
};
let (send_cap_bool, recv_cap_bool) = tokio::sync::oneshot::channel();
send_to_caps_oracle
.send(CapMessage::Add {
on: source.process.clone(),
caps: vec![cap],
responder: send_cap_bool,
responder: Some(send_cap_bool),
})
.await?;
let _ = recv_cap_bool.await?;
Ok(())
}
fn make_error_message(our_name: String, km: &KernelMessage, error: KvError) -> KernelMessage {
KernelMessage {
id: km.id,
source: Address {
node: our_name.clone(),
process: KV_PROCESS_ID.clone(),
},
target: match &km.rsvp {
None => km.source.clone(),
Some(rsvp) => rsvp.clone(),
},
rsvp: None,
message: Message::Response((
Response {
inherit: false,
body: serde_json::to_vec(&KvResponse::Err { error }).unwrap(),
metadata: None,
capabilities: vec![],
},
None,
)),
lazy_load_blob: None,
}
}
fn rocks_to_kv_err(error: rocksdb::Error) -> KvError {
KvError::RocksDBError {
action: "".into(),

View File

@ -2,7 +2,12 @@
#![feature(btree_extract_if)]
use anyhow::Result;
use clap::{arg, value_parser, Command};
use lib::types::core::*;
use lib::types::core::{
CapMessageReceiver, CapMessageSender, DebugReceiver, DebugSender, Identity, KernelCommand,
KernelMessage, Keyfile, Message, MessageReceiver, MessageSender, NetworkErrorReceiver,
NetworkErrorSender, NodeRouting, PrintReceiver, PrintSender, ProcessId, Request,
KERNEL_PROCESS_ID,
};
#[cfg(feature = "simulation-mode")]
use ring::{rand::SystemRandom, signature, signature::KeyPair};
use std::env;
@ -38,6 +43,9 @@ const CAP_CHANNEL_CAPACITY: usize = 1_000;
const KV_CHANNEL_CAPACITY: usize = 1_000;
const SQLITE_CHANNEL_CAPACITY: usize = 1_000;
const VERSION: &str = env!("CARGO_PKG_VERSION");
const WS_MIN_PORT: u16 = 9_000;
const TCP_MIN_PORT: u16 = 10_000;
const MAX_PORT: u16 = 65_535;
/// default routers as a eth-provider fallback
const DEFAULT_ETH_PROVIDERS: &str = include_str!("eth/default_providers_mainnet.json");
#[cfg(not(feature = "simulation-mode"))]
@ -169,9 +177,9 @@ async fn main() {
mpsc::channel(TERMINAL_CHANNEL_CAPACITY);
let our_ip = find_public_ip().await;
let (ws_tcp_handle, ws_flag_used) = setup_networking(ws_networking_port).await;
let (ws_tcp_handle, ws_flag_used) = setup_networking("ws", ws_networking_port).await;
#[cfg(not(feature = "simulation-mode"))]
let (tcp_tcp_handle, tcp_flag_used) = setup_networking(tcp_networking_port).await;
let (tcp_tcp_handle, tcp_flag_used) = setup_networking("tcp", tcp_networking_port).await;
#[cfg(feature = "simulation-mode")]
let (our, encoded_keyfile, decoded_keyfile) = simulate_node(
@ -179,7 +187,7 @@ async fn main() {
password.cloned(),
home_directory_path,
(
ws_tcp_handle.expect("fakenode ws setup failed"),
ws_tcp_handle.expect("need ws networking for simulation mode"),
ws_flag_used,
),
// NOTE: fakenodes only using WS protocol at the moment
@ -279,6 +287,7 @@ async fn main() {
* if any of these modules fail, the program exits with an error.
*/
let networking_keypair_arc = Arc::new(decoded_keyfile.networking_keypair);
let our_name_arc = Arc::new(our.name.clone());
let (kernel_process_map, db, reverse_cap_index) = state::load_state(
our.name.clone(),
@ -331,7 +340,7 @@ async fn main() {
*matches.get_one::<bool>("reveal-ip").unwrap_or(&true),
));
tasks.spawn(state::state_sender(
our.name.clone(),
our_name_arc.clone(),
kernel_message_sender.clone(),
print_sender.clone(),
state_receiver,
@ -339,7 +348,7 @@ async fn main() {
home_directory_path.clone(),
));
tasks.spawn(kv::kv(
our.name.clone(),
our_name_arc.clone(),
kernel_message_sender.clone(),
print_sender.clone(),
kv_receiver,
@ -347,7 +356,7 @@ async fn main() {
home_directory_path.clone(),
));
tasks.spawn(sqlite::sqlite(
our.name.clone(),
our_name_arc.clone(),
kernel_message_sender.clone(),
print_sender.clone(),
sqlite_receiver,
@ -386,7 +395,7 @@ async fn main() {
print_sender.clone(),
));
tasks.spawn(vfs::vfs(
our.name.clone(),
our_name_arc,
kernel_message_sender.clone(),
print_sender.clone(),
vfs_message_receiver,
@ -419,34 +428,24 @@ async fn main() {
verbose_mode,
) => {
match quit {
Ok(_) => match kernel_message_sender
.send(KernelMessage {
id: rand::random(),
source: Address {
node: our.name.clone(),
process: KERNEL_PROCESS_ID.clone(),
},
target: Address {
node: our.name.clone(),
process: KERNEL_PROCESS_ID.clone(),
},
rsvp: None,
message: Message::Request(Request {
Ok(()) => {
KernelMessage::builder()
.id(rand::random())
.source((our.name.as_str(), KERNEL_PROCESS_ID.clone()))
.target((our.name.as_str(), KERNEL_PROCESS_ID.clone()))
.message(Message::Request(Request {
inherit: false,
expects_response: None,
body: serde_json::to_vec(&KernelCommand::Shutdown).unwrap(),
metadata: None,
capabilities: vec![],
}),
lazy_load_blob: None,
})
.await
{
Ok(()) => "graceful exit".into(),
Err(_) => {
"failed to gracefully shut down kernel".into()
}
},
}))
.build()
.unwrap()
.send(&kernel_message_sender)
.await;
"graceful exit".into()
}
Err(e) => e.to_string(),
}
}
@ -454,17 +453,8 @@ async fn main() {
// abort all remaining tasks
tasks.shutdown().await;
let stdout = std::io::stdout();
let mut stdout = stdout.lock();
crossterm::execute!(
stdout,
crossterm::event::DisableBracketedPaste,
crossterm::terminal::SetTitle(""),
crossterm::style::SetForegroundColor(crossterm::style::Color::Red),
crossterm::style::Print(format!("\r\n{quit_msg}\r\n")),
crossterm::style::ResetColor,
)
.expect("failed to clean up terminal visual state! your terminal window might be funky now");
// reset all modified aspects of terminal -- clean ourselves up
terminal::utils::cleanup(&quit_msg);
}
async fn set_http_server_port(set_port: Option<&u16>) -> u16 {
@ -505,9 +495,10 @@ async fn set_http_server_port(set_port: Option<&u16>) -> u16 {
/// If no port is provided, it searches for the first available port between 9000 and 65535.
/// Returns a tuple containing the TcpListener and a boolean indicating if a specific port was used.
async fn setup_networking(
protocol: &str,
networking_port: Option<&u16>,
) -> (Option<tokio::net::TcpListener>, bool) {
if let Some(0) = networking_port {
if let Some(&0) = networking_port {
return (None, true);
}
match networking_port {
@ -518,7 +509,12 @@ async fn setup_networking(
(Some(listener), true)
}
None => {
let listener = http::utils::find_open_port(9000, 65535)
let min_port = if protocol == "ws" {
WS_MIN_PORT
} else {
TCP_MIN_PORT
};
let listener = http::utils::find_open_port(min_port, MAX_PORT)
.await
.expect("no ports found in range 9000-65535 for kinode networking");
(Some(listener), false)
@ -648,7 +644,7 @@ fn build_command() -> Command {
.value_parser(value_parser!(u16)),
)
.arg(
arg!(--"tcp-port" <PORT> "Kinode internal TCP protocol port [default: first unbound at or above 9000]")
arg!(--"tcp-port" <PORT> "Kinode internal TCP protocol port [default: first unbound at or above 10000]")
.alias("--tcp-port")
.value_parser(value_parser!(u16)),
)
@ -663,7 +659,7 @@ fn build_command() -> Command {
.value_parser(value_parser!(bool)),
)
.arg(arg!(--rpc <RPC> "Add a WebSockets RPC URL at boot"))
.arg(arg!(--password <PASSWORD> "Node password"));
.arg(arg!(--password <PASSWORD> "Node password (in double quotes)"));
#[cfg(feature = "simulation-mode")]
let app = app
@ -736,8 +732,8 @@ async fn serve_register_fe(
tx,
kill_rx,
our_ip,
ws_networking,
tcp_networking,
(ws_networking.0.as_ref(), ws_networking.1),
(tcp_networking.0.as_ref(), tcp_networking.1),
http_server_port,
disk_keyfile,
maybe_rpc) => {
@ -748,15 +744,15 @@ async fn serve_register_fe(
}
};
tokio::fs::write(
format!("{}/.keys", home_directory_path),
encoded_keyfile.clone(),
)
.await
.unwrap();
tokio::fs::write(format!("{}/.keys", home_directory_path), &encoded_keyfile)
.await
.unwrap();
let _ = kill_tx.send(true);
drop(ws_networking.0);
drop(tcp_networking.0);
(our, encoded_keyfile, decoded_keyfile)
}
@ -769,13 +765,17 @@ async fn login_with_password(
maybe_rpc: Option<String>,
password: &str,
) -> (Identity, Vec<u8>, Keyfile) {
use {alloy_primitives::Address as EthAddress, digest::Digest, ring::signature::KeyPair};
use {
alloy_primitives::Address as EthAddress,
ring::signature::KeyPair,
sha2::{Digest, Sha256},
};
let disk_keyfile: Vec<u8> = tokio::fs::read(format!("{}/.keys", home_directory_path))
.await
.expect("could not read keyfile");
let password_hash = format!("0x{}", hex::encode(sha2::Sha256::digest(password)));
let password_hash = format!("0x{}", hex::encode(Sha256::digest(password)));
let provider = Arc::new(register::connect_to_provider(maybe_rpc).await);
@ -813,12 +813,9 @@ async fn login_with_password(
.await
.expect("information used to boot does not match information onchain");
tokio::fs::write(
format!("{}/.keys", home_directory_path),
disk_keyfile.clone(),
)
.await
.unwrap();
tokio::fs::write(format!("{}/.keys", home_directory_path), &disk_keyfile)
.await
.unwrap();
(our, disk_keyfile, k)
}

View File

@ -1,6 +1,6 @@
use lib::types::core::{
Address, Identity, KernelMessage, MessageReceiver, MessageSender, NetAction, NetResponse,
NetworkErrorSender, NodeRouting, PrintSender, ProcessId,
Identity, KernelMessage, MessageReceiver, MessageSender, NetAction, NetResponse,
NetworkErrorSender, NodeRouting, PrintSender,
};
use types::{
IdentityExt, NetData, OnchainPKI, PKINames, Peers, PendingPassthroughs, TCP_PROTOCOL,
@ -83,10 +83,10 @@ pub async fn networking(
"net: fatal error: need at least one networking protocol"
));
}
if ports.contains_key(WS_PROTOCOL) {
if ext.our.ws_routing().is_some() {
tasks.spawn(ws::receiver(ext.clone(), net_data.clone()));
}
if ports.contains_key(TCP_PROTOCOL) {
if ext.our.tcp_routing().is_some() {
tasks.spawn(tcp::receiver(ext.clone(), net_data.clone()));
}
}
@ -274,29 +274,25 @@ async fn handle_local_request(
return;
}
};
ext.kernel_message_tx
.send(KernelMessage {
id: km.id,
source: Address {
node: ext.our.name.clone(),
process: ProcessId::new(Some("net"), "distro", "sys"),
KernelMessage::builder()
.id(km.id)
.source((ext.our.name.as_str(), "net", "distro", "sys"))
.target(km.rsvp.as_ref().unwrap_or(&km.source).clone())
.message(lib::core::Message::Response((
lib::core::Response {
inherit: false,
body: rmp_serde::to_vec(&response_body)
.expect("net: failed to serialize response"),
metadata: None,
capabilities: vec![],
},
target: km.rsvp.as_ref().unwrap_or(&km.source).clone(),
rsvp: None,
message: lib::core::Message::Response((
lib::core::Response {
inherit: false,
body: rmp_serde::to_vec(&response_body)
.expect("net: failed to serialize response"),
metadata: None,
capabilities: vec![],
},
None,
)),
lazy_load_blob: response_blob,
})
.await
.expect("net: kernel channel was dropped");
None,
)))
.lazy_load_blob(response_blob)
.build()
.unwrap()
.send(&ext.kernel_message_tx)
.await;
}
}
}

View File

@ -21,7 +21,10 @@ pub struct PeerConnection {
}
pub async fn receiver(ext: IdentityExt, data: NetData) -> anyhow::Result<()> {
let tcp_port = ext.our.get_protocol_port(TCP_PROTOCOL).unwrap();
let tcp_port = ext
.our
.get_protocol_port(TCP_PROTOCOL)
.expect("tcp port not found");
let tcp = match TcpListener::bind(format!("0.0.0.0:{tcp_port}")).await {
Ok(tcp) => tcp,
Err(_e) => {

View File

@ -3,9 +3,9 @@ use crate::net::types::{
RoutingRequest, TCP_PROTOCOL, WS_PROTOCOL,
};
use lib::types::core::{
Address, Identity, KernelMessage, KnsUpdate, Message, MessageSender, NetAction,
NetworkErrorSender, NodeRouting, PrintSender, Printout, ProcessId, Request, Response,
SendError, SendErrorKind, WrappedSendError,
Identity, KernelMessage, KnsUpdate, Message, MessageSender, NetAction, NetworkErrorSender,
NodeRouting, PrintSender, Printout, Request, Response, SendError, SendErrorKind,
WrappedSendError,
};
use {
futures::{SinkExt, StreamExt},
@ -94,26 +94,21 @@ pub async fn create_passthrough(
}
// send their net:distro:sys process a message, notifying it to create a *matching*
// passthrough request, which we can pair with this pending one.
target_peer.sender.send(KernelMessage {
id: rand::random(),
source: Address {
node: our.name.clone(),
process: ProcessId::new(Some("net"), "distro", "sys"),
},
target: Address {
node: target_id.name.clone(),
process: ProcessId::new(Some("net"), "distro", "sys"),
},
rsvp: None,
message: Message::Request(Request {
inherit: false,
expects_response: Some(5),
body: rmp_serde::to_vec(&NetAction::ConnectionRequest(from_id.name.clone()))?,
metadata: None,
capabilities: vec![],
}),
lazy_load_blob: None,
})?;
target_peer.sender.send(
KernelMessage::builder()
.id(rand::random())
.source((our.name.as_str(), "net", "distro", "sys"))
.target((target_id.name.as_str(), "net", "distro", "sys"))
.message(Message::Request(Request {
inherit: false,
expects_response: Some(5),
body: rmp_serde::to_vec(&NetAction::ConnectionRequest(from_id.name.clone()))?,
metadata: None,
capabilities: vec![],
}))
.build()
.unwrap(),
)?;
// we'll remove this either if the above message gets a negative response,
// or if the target node connects to us with a matching passthrough.
// TODO it is currently possible to have dangling passthroughs in the map
@ -335,46 +330,31 @@ pub async fn parse_hello_message(
),
)
.await;
kernel_message_tx
.send(KernelMessage {
id: km.id,
source: Address {
node: our.name.clone(),
process: ProcessId::new(Some("net"), "distro", "sys"),
KernelMessage::builder()
.id(km.id)
.source((our.name.as_str(), "net", "distro", "sys"))
.target(km.rsvp.as_ref().unwrap_or(&km.source).clone())
.message(Message::Response((
Response {
inherit: false,
body: "delivered".as_bytes().to_vec(),
metadata: None,
capabilities: vec![],
},
target: km.rsvp.as_ref().unwrap_or(&km.source).clone(),
rsvp: None,
message: Message::Response((
Response {
inherit: false,
body: "delivered".as_bytes().to_vec(),
metadata: None,
capabilities: vec![],
},
None,
)),
lazy_load_blob: None,
})
.await
.expect("net: kernel_message_tx was dropped");
None,
)))
.build()
.unwrap()
.send(kernel_message_tx)
.await;
}
/// Create a terminal printout at verbosity level 0.
pub async fn print_loud(print_tx: &PrintSender, content: &str) {
let _ = print_tx
.send(Printout {
verbosity: 0,
content: content.into(),
})
.await;
Printout::new(0, content).send(print_tx).await;
}
/// Create a terminal printout at verbosity level 2.
pub async fn print_debug(print_tx: &PrintSender, content: &str) {
let _ = print_tx
.send(Printout {
verbosity: 2,
content: content.into(),
})
.await;
Printout::new(2, content).send(print_tx).await;
}

View File

@ -27,7 +27,10 @@ pub struct PeerConnection {
pub type WebSocket = WebSocketStream<MaybeTlsStream<tokio::net::TcpStream>>;
pub async fn receiver(ext: IdentityExt, data: NetData) -> Result<()> {
let ws_port = ext.our.get_protocol_port(WS_PROTOCOL).unwrap();
let ws_port = ext
.our
.get_protocol_port(WS_PROTOCOL)
.expect("ws port not found");
let ws = match TcpListener::bind(format!("0.0.0.0:{ws_port}")).await {
Ok(ws) => ws,
Err(_e) => {

View File

@ -9,14 +9,16 @@ use alloy_primitives::{Address as EthAddress, Bytes, FixedBytes, U256};
use alloy_sol_macro::sol;
use alloy_sol_types::{eip712_domain, SolCall, SolStruct, SolValue};
use base64::{engine::general_purpose::STANDARD as base64_standard, Engine};
use lib::types::core::*;
use ring::rand::SystemRandom;
use ring::signature;
use ring::signature::KeyPair;
use static_dir::static_dir;
use std::str::FromStr;
use std::sync::Arc;
use std::time::{SystemTime, UNIX_EPOCH};
use lib::types::core::{
BootInfo, Identity, ImportKeyfileInfo, Keyfile, KeyfileVet, KeyfileVetted, LoginAndResetInfo,
LoginInfo, NodeRouting, UnencryptedIdentity,
};
use ring::{rand::SystemRandom, signature, signature::KeyPair};
use std::{
str::FromStr,
sync::Arc,
time::{SystemTime, UNIX_EPOCH},
};
use tokio::sync::{mpsc, oneshot};
use warp::{
http::{
@ -33,8 +35,8 @@ pub async fn register(
tx: RegistrationSender,
kill_rx: oneshot::Receiver<bool>,
ip: String,
ws_networking: (Option<tokio::net::TcpListener>, bool),
tcp_networking: (Option<tokio::net::TcpListener>, bool),
ws_networking: (Option<&tokio::net::TcpListener>, bool),
tcp_networking: (Option<&tokio::net::TcpListener>, bool),
http_port: u16,
keyfile: Option<Vec<u8>>,
maybe_rpc: Option<String>,
@ -90,29 +92,29 @@ pub async fn register(
let ws_port = warp::any().map(move || (ws_port, ws_flag_used));
let tcp_port = warp::any().map(move || (tcp_port, tcp_flag_used));
let static_files = warp::path("assets").and(static_dir!("src/register-ui/build/assets/"));
let static_files =
warp::path("assets").and(static_dir::static_dir!("src/register-ui/build/assets/"));
let react_app = warp::path::end()
.or(warp::path("login"))
.or(warp::path("register-name"))
.or(warp::path("claim-invite"))
.or(warp::path("reset"))
.or(warp::path("import-keyfile"))
.or(warp::path("set-password"))
.and(warp::get())
.map(move || warp::reply::html(include_str!("register-ui/build/index.html")))
.or(warp::path("login")
.and(warp::get())
.map(move || warp::reply::html(include_str!("register-ui/build/index.html"))))
.or(warp::path("register-name")
.and(warp::get())
.map(move || warp::reply::html(include_str!("register-ui/build/index.html"))))
.or(warp::path("claim-invite")
.and(warp::get())
.map(move || warp::reply::html(include_str!("register-ui/build/index.html"))))
.or(warp::path("reset")
.and(warp::get())
.map(move || warp::reply::html(include_str!("register-ui/build/index.html"))))
.or(warp::path("import-keyfile")
.and(warp::get())
.map(move || warp::reply::html(include_str!("register-ui/build/index.html"))))
.or(warp::path("set-password")
.and(warp::get())
.map(move || warp::reply::html(include_str!("register-ui/build/index.html"))))
.map(move |_| warp::reply::html(include_str!("register-ui/build/index.html")));
let boot_provider = provider.clone();
let login_provider = provider.clone();
let import_provider = provider.clone();
let api = warp::path("info")
.and(
warp::get()
.and(keyfile.clone())
.and_then(get_unencrypted_info),
)
.or(warp::path("current-chain")
.and(warp::get())
.map(move || warp::reply::json(&"0xa")))
@ -133,18 +135,7 @@ pub async fn register(
}
warp::reply::html(String::new())
},
));
let boot_provider = provider.clone();
let login_provider = provider.clone();
let import_provider = provider.clone();
let api = warp::path("info")
.and(
warp::get()
.and(keyfile.clone())
.and_then(get_unencrypted_info),
)
))
.or(warp::path("generate-networking-info").and(
warp::post()
.and(our_temp_id.clone())
@ -236,7 +227,6 @@ pub async fn register(
}
pub async fn connect_to_provider(maybe_rpc: Option<String>) -> RootProvider<PubSubFrontend> {
// This ETH provider uses public rpc endpoints to verify registration signatures.
let url = if let Some(rpc_url) = maybe_rpc {
rpc_url
} else {
@ -246,19 +236,20 @@ pub async fn connect_to_provider(maybe_rpc: Option<String>) -> RootProvider<PubS
"Connecting to Optimism RPC at {url}\n\
Specify a different RPC URL with the --rpc flag."
);
let ws = WsConnect::new(url);
// this fails occasionally in certain networking environments. i'm not sure why.
// frustratingly, the exact same call does not fail in the eth module. more investigation needed.
let Ok(client) = ProviderBuilder::new().on_ws(ws).await else {
panic!(
"Error: runtime could not connect to ETH RPC.\n\
This is necessary in order to verify node identity onchain.\n\
Please make sure you are using a valid WebSockets URL if using \
the --rpc flag, and you are connected to the internet."
);
};
println!("Connected to Optimism RPC");
let client = match ProviderBuilder::new().on_ws(WsConnect::new(url)).await {
Ok(client) => client,
Err(e) => {
panic!(
"Error: runtime could not connect to ETH RPC: {e}\n\
This is necessary in order to verify node identity onchain.\n\
Please make sure you are using a valid WebSockets URL if using \
the --rpc flag, and you are connected to the internet."
);
}
};
println!("Connected to Optimism RPC");
client
}

View File

@ -1,121 +1,129 @@
use anyhow::Result;
use base64::{engine::general_purpose::STANDARD as base64_standard, Engine};
use dashmap::DashMap;
use lib::types::core::{
Address, CapMessage, CapMessageSender, Capability, KernelMessage, LazyLoadBlob, Message,
MessageReceiver, MessageSender, PackageId, PrintSender, Printout, ProcessId, Request, Response,
SqlValue, SqliteAction, SqliteError, SqliteRequest, SqliteResponse, SQLITE_PROCESS_ID,
};
use rusqlite::Connection;
use std::collections::{HashMap, HashSet, VecDeque};
use std::sync::Arc;
use tokio::fs;
use tokio::sync::Mutex;
use lib::types::core::*;
use std::{
collections::{HashMap, HashSet, VecDeque},
sync::Arc,
};
use tokio::{fs, sync::Mutex};
lazy_static::lazy_static! {
static ref READ_KEYWORDS: HashSet<String> = {
let mut set = HashSet::new();
let keywords = ["ANALYZE", "ATTACH", "BEGIN", "EXPLAIN", "PRAGMA", "SELECT", "VALUES", "WITH"];
for &keyword in &keywords {
set.insert(keyword.to_string());
}
set
};
static ref READ_KEYWORDS: HashSet<&'static str> =
HashSet::from(["ANALYZE", "ATTACH", "BEGIN", "EXPLAIN", "PRAGMA", "SELECT", "VALUES", "WITH"]);
static ref WRITE_KEYWORDS: HashSet<String> = {
let mut set = HashSet::new();
let keywords = ["ALTER", "ANALYZE", "COMMIT", "CREATE", "DELETE", "DETACH", "DROP", "END", "INSERT", "REINDEX", "RELEASE", "RENAME", "REPLACE", "ROLLBACK", "SAVEPOINT", "UPDATE", "VACUUM"];
for &keyword in &keywords {
set.insert(keyword.to_string());
}
set
};
static ref WRITE_KEYWORDS: HashSet<&'static str> =
HashSet::from(["ALTER", "ANALYZE", "COMMIT", "CREATE", "DELETE", "DETACH", "DROP", "END", "INSERT", "REINDEX", "RELEASE", "RENAME", "REPLACE", "ROLLBACK", "SAVEPOINT", "UPDATE", "VACUUM"]);
}
pub async fn sqlite(
our_node: String,
our_node: Arc<String>,
send_to_loop: MessageSender,
send_to_terminal: PrintSender,
mut recv_from_loop: MessageReceiver,
send_to_caps_oracle: CapMessageSender,
home_directory_path: String,
) -> anyhow::Result<()> {
let sqlite_path = format!("{}/sqlite", &home_directory_path);
if let Err(e) = fs::create_dir_all(&sqlite_path).await {
panic!("failed creating sqlite dir! {:?}", e);
let sqlite_path = Arc::new(format!("{home_directory_path}/sqlite"));
if let Err(e) = fs::create_dir_all(&*sqlite_path).await {
panic!("failed creating sqlite dir! {e:?}");
}
let open_dbs: Arc<DashMap<(PackageId, String), Mutex<Connection>>> = Arc::new(DashMap::new());
let txs: Arc<DashMap<u64, Vec<(String, Vec<SqlValue>)>>> = Arc::new(DashMap::new());
let mut process_queues: HashMap<ProcessId, Arc<Mutex<VecDeque<KernelMessage>>>> =
HashMap::new();
let process_queues: HashMap<ProcessId, Arc<Mutex<VecDeque<KernelMessage>>>> = HashMap::new();
loop {
tokio::select! {
Some(km) = recv_from_loop.recv() => {
if our_node.clone() != km.source.node {
println!(
"sqlite: request must come from our_node={}, got: {}",
our_node,
km.source.node,
);
continue;
}
let queue = process_queues
.entry(km.source.process.clone())
.or_insert_with(|| Arc::new(Mutex::new(VecDeque::new())))
.clone();
{
let mut queue_lock = queue.lock().await;
queue_lock.push_back(km.clone());
}
// clone Arcs
let our_node = our_node.clone();
let send_to_caps_oracle = send_to_caps_oracle.clone();
let send_to_terminal = send_to_terminal.clone();
let send_to_loop = send_to_loop.clone();
let open_dbs = open_dbs.clone();
let txs = txs.clone();
let sqlite_path = sqlite_path.clone();
tokio::spawn(async move {
let mut queue_lock = queue.lock().await;
if let Some(km) = queue_lock.pop_front() {
if let Err(e) = handle_request(
our_node.clone(),
km.clone(),
open_dbs.clone(),
txs.clone(),
send_to_loop.clone(),
send_to_terminal.clone(),
send_to_caps_oracle.clone(),
sqlite_path.clone(),
)
.await
{
let _ = send_to_loop
.send(make_error_message(our_node.clone(), &km, e))
.await;
}
}
});
}
while let Some(km) = recv_from_loop.recv().await {
if *our_node != km.source.node {
Printout::new(
1,
format!(
"sqlite: got request from {}, but requests must come from our node {our_node}",
km.source.node
),
)
.send(&send_to_terminal)
.await;
continue;
}
let queue = process_queues
.get(&km.source.process)
.cloned()
.unwrap_or_else(|| Arc::new(Mutex::new(VecDeque::new())));
{
let mut queue_lock = queue.lock().await;
queue_lock.push_back(km);
}
// clone Arcs
let our_node = our_node.clone();
let send_to_loop = send_to_loop.clone();
let send_to_terminal = send_to_terminal.clone();
let send_to_caps_oracle = send_to_caps_oracle.clone();
let open_dbs = open_dbs.clone();
let txs = txs.clone();
let sqlite_path = sqlite_path.clone();
tokio::spawn(async move {
let mut queue_lock = queue.lock().await;
if let Some(km) = queue_lock.pop_front() {
let (km_id, km_rsvp) =
(km.id.clone(), km.rsvp.clone().unwrap_or(km.source.clone()));
if let Err(e) = handle_request(
&our_node,
km,
open_dbs,
txs,
&send_to_loop,
&send_to_caps_oracle,
&sqlite_path,
)
.await
{
Printout::new(1, format!("sqlite: {e}"))
.send(&send_to_terminal)
.await;
KernelMessage::builder()
.id(km_id)
.source((our_node.as_str(), SQLITE_PROCESS_ID.clone()))
.target(km_rsvp)
.message(Message::Response((
Response {
inherit: false,
body: serde_json::to_vec(&SqliteResponse::Err { error: e })
.unwrap(),
metadata: None,
capabilities: vec![],
},
None,
)))
.build()
.unwrap()
.send(&send_to_loop)
.await;
}
}
});
}
Ok(())
}
async fn handle_request(
our_node: String,
our_node: &str,
km: KernelMessage,
open_dbs: Arc<DashMap<(PackageId, String), Mutex<Connection>>>,
txs: Arc<DashMap<u64, Vec<(String, Vec<SqlValue>)>>>,
send_to_loop: MessageSender,
send_to_terminal: PrintSender,
send_to_caps_oracle: CapMessageSender,
sqlite_path: String,
send_to_loop: &MessageSender,
send_to_caps_oracle: &CapMessageSender,
sqlite_path: &str,
) -> Result<(), SqliteError> {
let KernelMessage {
id,
@ -123,13 +131,13 @@ async fn handle_request(
message,
lazy_load_blob: blob,
..
} = km.clone();
} = km;
let Message::Request(Request {
body,
expects_response,
metadata,
..
}) = message.clone()
}) = message
else {
return Err(SqliteError::InputError {
error: "not a request".into(),
@ -147,12 +155,12 @@ async fn handle_request(
};
check_caps(
our_node.clone(),
source.clone(),
open_dbs.clone(),
send_to_caps_oracle.clone(),
our_node,
&source,
&open_dbs,
send_to_caps_oracle,
&request,
sqlite_path.clone(),
sqlite_path,
)
.await?;
@ -178,7 +186,7 @@ async fn handle_request(
.next()
.map(|word| word.to_uppercase())
.unwrap_or("".to_string());
if !READ_KEYWORDS.contains(&first_word) {
if !READ_KEYWORDS.contains(first_word.as_str()) {
return Err(SqliteError::NotAReadKeyword);
}
@ -236,7 +244,7 @@ async fn handle_request(
.map(|word| word.to_uppercase())
.unwrap_or("".to_string());
if !WRITE_KEYWORDS.contains(&first_word) {
if !WRITE_KEYWORDS.contains(first_word.as_str()) {
return Err(SqliteError::NotAWriteKeyword);
}
@ -304,21 +312,12 @@ async fn handle_request(
}
};
if let Some(target) = km.rsvp.or_else(|| {
expects_response.map(|_| Address {
node: our_node.clone(),
process: source.process.clone(),
})
}) {
let response = KernelMessage {
id,
source: Address {
node: our_node.clone(),
process: SQLITE_PROCESS_ID.clone(),
},
target,
rsvp: None,
message: Message::Response((
if let Some(target) = km.rsvp.or_else(|| expects_response.map(|_| source)) {
KernelMessage::builder()
.id(id)
.source((our_node, SQLITE_PROCESS_ID.clone()))
.target(target)
.message(Message::Response((
Response {
inherit: false,
body,
@ -326,37 +325,27 @@ async fn handle_request(
capabilities: vec![],
},
None,
)),
lazy_load_blob: bytes.map(|bytes| LazyLoadBlob {
)))
.lazy_load_blob(bytes.map(|bytes| LazyLoadBlob {
mime: Some("application/octet-stream".into()),
bytes,
}),
};
let _ = send_to_loop.send(response).await;
} else {
send_to_terminal
.send(Printout {
verbosity: 2,
content: format!(
"sqlite: not sending response: {:?}",
serde_json::from_slice::<SqliteResponse>(&body)
),
})
.await
.unwrap();
}))
.build()
.unwrap()
.send(send_to_loop)
.await;
}
Ok(())
}
async fn check_caps(
our_node: String,
source: Address,
open_dbs: Arc<DashMap<(PackageId, String), Mutex<Connection>>>,
mut send_to_caps_oracle: CapMessageSender,
our_node: &str,
source: &Address,
open_dbs: &Arc<DashMap<(PackageId, String), Mutex<Connection>>>,
send_to_caps_oracle: &CapMessageSender,
request: &SqliteRequest,
sqlite_path: String,
sqlite_path: &str,
) -> Result<(), SqliteError> {
let (send_cap_bool, recv_cap_bool) = tokio::sync::oneshot::channel();
let src_package_id = PackageId::new(source.process.package(), source.process.publisher());
@ -366,17 +355,14 @@ async fn check_caps(
send_to_caps_oracle
.send(CapMessage::Has {
on: source.process.clone(),
cap: Capability {
issuer: Address {
node: our_node.clone(),
process: SQLITE_PROCESS_ID.clone(),
},
params: serde_json::to_string(&serde_json::json!({
cap: Capability::new(
(our_node, SQLITE_PROCESS_ID.clone()),
serde_json::json!({
"kind": "write",
"db": request.db.to_string(),
}))
.unwrap(),
},
})
.to_string(),
),
responder: send_cap_bool,
})
.await?;
@ -392,17 +378,14 @@ async fn check_caps(
send_to_caps_oracle
.send(CapMessage::Has {
on: source.process.clone(),
cap: Capability {
issuer: Address {
node: our_node.clone(),
process: SQLITE_PROCESS_ID.clone(),
},
params: serde_json::to_string(&serde_json::json!({
cap: Capability::new(
(our_node, SQLITE_PROCESS_ID.clone()),
serde_json::json!({
"kind": "read",
"db": request.db.to_string(),
}))
.unwrap(),
},
})
.to_string(),
),
responder: send_cap_bool,
})
.await?;
@ -426,7 +409,7 @@ async fn check_caps(
&request.db.to_string(),
&our_node,
&source,
&mut send_to_caps_oracle,
send_to_caps_oracle,
)
.await?;
add_capability(
@ -434,7 +417,7 @@ async fn check_caps(
&request.db.to_string(),
&our_node,
&source,
&mut send_to_caps_oracle,
send_to_caps_oracle,
)
.await?;
@ -481,21 +464,21 @@ async fn add_capability(
db: &str,
our_node: &str,
source: &Address,
send_to_caps_oracle: &mut CapMessageSender,
send_to_caps_oracle: &CapMessageSender,
) -> Result<(), SqliteError> {
let cap = Capability {
issuer: Address {
node: our_node.to_string(),
process: SQLITE_PROCESS_ID.clone(),
},
params: serde_json::to_string(&serde_json::json!({ "kind": kind, "db": db })).unwrap(),
params: serde_json::json!({ "kind": kind, "db": db }).to_string(),
};
let (send_cap_bool, recv_cap_bool) = tokio::sync::oneshot::channel();
send_to_caps_oracle
.send(CapMessage::Add {
on: source.process.clone(),
caps: vec![cap],
responder: send_cap_bool,
responder: Some(send_cap_bool),
})
.await?;
let _ = recv_cap_bool.await?;
@ -543,28 +526,3 @@ fn get_json_params(blob: Option<LazyLoadBlob>) -> Result<Vec<SqlValue>, SqliteEr
},
}
}
fn make_error_message(our_name: String, km: &KernelMessage, error: SqliteError) -> KernelMessage {
KernelMessage {
id: km.id,
source: Address {
node: our_name.clone(),
process: SQLITE_PROCESS_ID.clone(),
},
target: match &km.rsvp {
None => km.source.clone(),
Some(rsvp) => rsvp.clone(),
},
rsvp: None,
message: Message::Response((
Response {
inherit: false,
body: serde_json::to_vec(&SqliteResponse::Err { error }).unwrap(),
metadata: None,
capabilities: vec![],
},
None,
)),
lazy_load_blob: None,
}
}

View File

@ -1,16 +1,18 @@
use anyhow::Result;
use lib::types::core::{
Address, Capability, Erc721Metadata, KernelMessage, LazyLoadBlob, Message, MessageReceiver,
MessageSender, NetworkErrorSender, OnExit, PackageManifestEntry, PersistedProcess, PrintSender,
Printout, ProcessId, ProcessMap, Request, Response, ReverseCapIndex, StateAction, StateError,
StateResponse, KERNEL_PROCESS_ID, STATE_PROCESS_ID, VFS_PROCESS_ID,
};
use ring::signature;
use rocksdb::checkpoint::Checkpoint;
use rocksdb::{Options, DB};
use std::collections::{HashMap, VecDeque};
use std::io::Read;
use std::path::Path;
use std::sync::Arc;
use tokio::fs;
use tokio::io::AsyncWriteExt;
use tokio::sync::Mutex;
use lib::types::core::*;
use rocksdb::{checkpoint::Checkpoint, Options, DB};
use std::{
collections::{HashMap, VecDeque},
io::Read,
path::Path,
sync::Arc,
};
use tokio::{fs, io::AsyncWriteExt, sync::Mutex};
include!("bootstrapped_processes.rs");
@ -20,29 +22,22 @@ pub async fn load_state(
home_directory_path: String,
runtime_extensions: Vec<(ProcessId, MessageSender, Option<NetworkErrorSender>, bool)>,
) -> Result<(ProcessMap, DB, ReverseCapIndex), StateError> {
let state_path = format!("{}/kernel", &home_directory_path);
let state_path = format!("{home_directory_path}/kernel");
if let Err(e) = fs::create_dir_all(&state_path).await {
panic!("failed creating kernel state dir! {:?}", e);
panic!("failed creating kernel state dir! {e:?}");
}
// more granular kernel_state in column families
// let mut options = Option::default().unwrap();
// options.create_if_missing(true);
//let db = DB::open_default(&state_directory_path_str).unwrap();
let mut opts = Options::default();
opts.create_if_missing(true);
// let cf_name = "kernel_state";
// let cf_descriptor = ColumnFamilyDescriptor::new(cf_name, Options::default());
let db = DB::open_default(state_path).unwrap();
let mut process_map: ProcessMap = HashMap::new();
let mut reverse_cap_index: ReverseCapIndex = HashMap::new();
let kernel_id = process_to_vec(KERNEL_PROCESS_ID.clone());
match db.get(&kernel_id) {
let kernel_id_vec = process_to_vec(KERNEL_PROCESS_ID.clone());
match db.get(&kernel_id_vec) {
Ok(Some(value)) => {
process_map = bincode::deserialize::<ProcessMap>(&value).unwrap();
process_map = bincode::deserialize::<ProcessMap>(&value)
.expect("failed to deserialize kernel process map");
// if our networking key changed, we need to re-sign all local caps
process_map.iter_mut().for_each(|(_id, process)| {
process.capabilities.iter_mut().for_each(|(cap, sig)| {
@ -56,11 +51,11 @@ pub async fn load_state(
});
}
Ok(None) => {
db.put(&kernel_id, bincode::serialize(&process_map).unwrap())
db.put(&kernel_id_vec, bincode::serialize(&process_map).unwrap())
.unwrap();
}
Err(e) => {
panic!("failed to load kernel state from db: {:?}", e);
panic!("failed to load kernel state from db: {e:?}");
}
}
@ -71,7 +66,7 @@ pub async fn load_state(
bootstrap(
&our_name,
keypair,
home_directory_path.clone(),
home_directory_path,
runtime_extensions,
&mut process_map,
&mut reverse_cap_index,
@ -83,7 +78,7 @@ pub async fn load_state(
}
pub async fn state_sender(
our_name: String,
our_node: Arc<String>,
send_to_loop: MessageSender,
send_to_terminal: PrintSender,
mut recv_state: MessageReceiver,
@ -91,68 +86,79 @@ pub async fn state_sender(
home_directory_path: String,
) -> Result<(), anyhow::Error> {
let db = Arc::new(db);
let home_directory_path = Arc::new(home_directory_path);
let mut process_queues: HashMap<ProcessId, Arc<Mutex<VecDeque<KernelMessage>>>> =
HashMap::new();
let process_queues: HashMap<ProcessId, Arc<Mutex<VecDeque<KernelMessage>>>> = HashMap::new();
loop {
tokio::select! {
Some(km) = recv_state.recv() => {
if our_name != km.source.node {
println!(
"state: request must come from our_name={}, got: {}",
our_name, &km,
);
continue;
}
let queue = process_queues
.entry(km.source.process.clone())
.or_insert_with(|| Arc::new(Mutex::new(VecDeque::new())))
.clone();
{
let mut queue_lock = queue.lock().await;
queue_lock.push_back(km.clone());
}
let db_clone = db.clone();
let send_to_loop = send_to_loop.clone();
let send_to_terminal = send_to_terminal.clone();
let our_name = our_name.clone();
let home_directory_path = home_directory_path.clone();
tokio::spawn(async move {
let mut queue_lock = queue.lock().await;
if let Some(km) = queue_lock.pop_front() {
if let Err(e) = handle_request(
our_name.clone(),
km.clone(),
db_clone,
send_to_loop.clone(),
send_to_terminal,
home_directory_path,
)
.await
{
let _ = send_to_loop
.send(make_error_message(our_name.clone(), &km, e))
.await;
}
}
});
}
while let Some(km) = recv_state.recv().await {
if *our_node != km.source.node {
Printout::new(
1,
format!(
"state: got request from {}, but requests must come from our node {our_node}",
km.source.node
),
)
.send(&send_to_terminal)
.await;
continue;
}
let queue = process_queues
.get(&km.source.process)
.cloned()
.unwrap_or_else(|| Arc::new(Mutex::new(VecDeque::new())));
{
let mut queue_lock = queue.lock().await;
queue_lock.push_back(km);
}
let our_node = our_node.clone();
let db_clone = db.clone();
let send_to_loop = send_to_loop.clone();
let home_directory_path = home_directory_path.clone();
tokio::spawn(async move {
let mut queue_lock = queue.lock().await;
if let Some(km) = queue_lock.pop_front() {
let (km_id, km_rsvp) =
(km.id.clone(), km.rsvp.clone().unwrap_or(km.source.clone()));
if let Err(e) =
handle_request(&our_node, km, db_clone, &send_to_loop, &home_directory_path)
.await
{
KernelMessage::builder()
.id(km_id)
.source((our_node.as_str(), STATE_PROCESS_ID.clone()))
.target(km_rsvp)
.message(Message::Response((
Response {
inherit: false,
body: serde_json::to_vec(&StateResponse::Err(e)).unwrap(),
metadata: None,
capabilities: vec![],
},
None,
)))
.build()
.unwrap()
.send(&send_to_loop)
.await;
}
}
});
}
Ok(())
}
async fn handle_request(
our_name: String,
our_node: &str,
kernel_message: KernelMessage,
db: Arc<DB>,
send_to_loop: MessageSender,
_send_to_terminal: PrintSender,
home_directory_path: String,
send_to_loop: &MessageSender,
home_directory_path: &str,
) -> Result<(), StateError> {
let KernelMessage {
id,
@ -178,7 +184,7 @@ async fn handle_request(
Ok(r) => r,
Err(e) => {
return Err(StateError::BadJson {
error: format!("parse into StateAction failed: {:?}", e),
error: format!("parse into StateAction failed: {e:?}"),
})
}
};
@ -214,7 +220,6 @@ async fn handle_request(
});
}
Err(e) => {
println!("get state error: {:?}", e);
return Err(StateError::RocksDBError {
action: "GetState".into(),
error: e.to_string(),
@ -230,7 +235,6 @@ async fn handle_request(
None,
),
Err(e) => {
println!("delete state error: {:?}", e);
return Err(StateError::RocksDBError {
action: "DeleteState".into(),
error: e.to_string(),
@ -239,7 +243,7 @@ async fn handle_request(
}
}
StateAction::Backup => {
let checkpoint_dir = format!("{}/kernel/backup", &home_directory_path);
let checkpoint_dir = format!("{home_directory_path}/kernel/backup");
if Path::new(&checkpoint_dir).exists() {
fs::remove_dir_all(&checkpoint_dir).await?;
@ -260,21 +264,12 @@ async fn handle_request(
}
};
if let Some(target) = rsvp.or_else(|| {
expects_response.map(|_| Address {
node: our_name.clone(),
process: source.process.clone(),
})
}) {
let response = KernelMessage {
id,
source: Address {
node: our_name.clone(),
process: STATE_PROCESS_ID.clone(),
},
target,
rsvp: None,
message: Message::Response((
if let Some(target) = rsvp.or_else(|| expects_response.map(|_| source)) {
KernelMessage::builder()
.id(id)
.source((our_node, STATE_PROCESS_ID.clone()))
.target(target)
.message(Message::Response((
Response {
inherit: false,
body,
@ -282,14 +277,15 @@ async fn handle_request(
capabilities: vec![],
},
None,
)),
lazy_load_blob: bytes.map(|bytes| LazyLoadBlob {
)))
.lazy_load_blob(bytes.map(|bytes| LazyLoadBlob {
mime: Some("application/octet-stream".into()),
bytes,
}),
};
let _ = send_to_loop.send(response).await;
}))
.build()
.unwrap()
.send(send_to_loop)
.await;
};
Ok(())
@ -310,9 +306,7 @@ async fn bootstrap(
runtime_extensions: Vec<(ProcessId, MessageSender, Option<NetworkErrorSender>, bool)>,
process_map: &mut ProcessMap,
reverse_cap_index: &mut ReverseCapIndex,
) -> Result<()> {
// println!("bootstrapping node...\r");
) -> anyhow::Result<()> {
let mut runtime_caps: HashMap<Capability, Vec<u8>> = HashMap::new();
// kernel is a special case
let k_cap = Capability {
@ -562,11 +556,11 @@ async fn bootstrap(
node: our_name.into(),
process: VFS_PROCESS_ID.clone(),
},
params: serde_json::to_string(&serde_json::json!({
params: serde_json::json!({
"kind": "read",
"drive": drive_path,
}))
.unwrap(),
})
.to_string(),
};
requested_caps.insert(read_cap.clone(), sign_cap(read_cap, keypair.clone()));
let write_cap = Capability {
@ -574,11 +568,11 @@ async fn bootstrap(
node: our_name.into(),
process: VFS_PROCESS_ID.clone(),
},
params: serde_json::to_string(&serde_json::json!({
params: serde_json::json!({
"kind": "write",
"drive": drive_path,
}))
.unwrap(),
})
.to_string(),
};
requested_caps.insert(write_cap.clone(), sign_cap(write_cap, keypair.clone()));
@ -730,10 +724,7 @@ async fn get_zipped_packages() -> Vec<(
if let Ok(metadata) = serde_json::from_slice::<Erc721Metadata>(metadata_bytes) {
packages.push((metadata, zip));
} else {
println!(
"fs: metadata for package {} is not valid Erc721Metadata\r",
package_name
);
println!("fs: metadata for package {package_name} is not valid Erc721Metadata!\r",);
}
}
}
@ -741,31 +732,6 @@ async fn get_zipped_packages() -> Vec<(
packages
}
fn make_error_message(our_name: String, km: &KernelMessage, error: StateError) -> KernelMessage {
KernelMessage {
id: km.id,
source: Address {
node: our_name.clone(),
process: STATE_PROCESS_ID.clone(),
},
target: match &km.rsvp {
None => km.source.clone(),
Some(rsvp) => rsvp.clone(),
},
rsvp: None,
message: Message::Response((
Response {
inherit: false,
body: serde_json::to_vec(&StateResponse::Err(error)).unwrap(),
metadata: None,
capabilities: vec![],
},
None,
)),
lazy_load_blob: None,
}
}
fn process_to_vec(process: ProcessId) -> Vec<u8> {
process.to_string().as_bytes().to_vec()
}

View File

@ -1,41 +1,23 @@
use anyhow::Result;
use chrono::{Datelike, Local, Timelike};
use crossterm::{
cursor,
event::{
DisableBracketedPaste, EnableBracketedPaste, Event, EventStream, KeyCode, KeyEvent,
KeyModifiers,
},
event::{Event, EventStream, KeyCode, KeyEvent, KeyModifiers},
execute, style,
style::Print,
terminal::{self, disable_raw_mode, enable_raw_mode, ClearType},
terminal::{self, ClearType},
};
use futures::{future::FutureExt, StreamExt};
use std::fs::{read_to_string, OpenOptions};
use std::io::{stdout, BufWriter, Write};
use lib::types::core::{
DebugCommand, DebugSender, Identity, KernelMessage, Message, MessageSender, PrintReceiver,
PrintSender, Printout, Request, TERMINAL_PROCESS_ID,
};
use std::{
fs::{read_to_string, OpenOptions},
io::{BufWriter, Write},
};
use tokio::signal::unix::{signal, SignalKind};
use lib::types::core::*;
mod utils;
struct RawMode;
impl RawMode {
fn new() -> anyhow::Result<Self> {
enable_raw_mode()?;
Ok(RawMode)
}
}
impl Drop for RawMode {
fn drop(&mut self) {
match disable_raw_mode() {
Ok(_) => {}
Err(e) => {
println!("terminal: failed to disable raw mode: {e:?}\r");
}
}
}
}
pub mod utils;
/*
* terminal driver
@ -50,122 +32,50 @@ pub async fn terminal(
mut print_rx: PrintReceiver,
is_detached: bool,
mut verbose_mode: u8,
) -> Result<()> {
let mut stdout = stdout();
execute!(
stdout,
EnableBracketedPaste,
terminal::SetTitle(format!("{}", our.name))
)?;
) -> anyhow::Result<()> {
let (stdout, _maybe_raw_mode) = utils::startup(&our, version, is_detached)?;
let (mut win_cols, mut win_rows) = terminal::size().unwrap();
// print initial splash screen, large if there's room, small otherwise
if win_cols >= 90 {
execute!(
stdout,
style::SetForegroundColor(style::Color::Magenta),
Print(format!(
r#"
.`
`@@,, ,* 888 d8P d8b 888
`@%@@@, ,~-##` 888 d8P Y8P 888
~@@#@%#@@, ##### 888 d8P 888
~-%######@@@, ##### 888d88K 888 88888b. .d88b. .d88888 .d88b.
-%%#######@#####, 8888888b 888 888 "88b d88""88b d88" 888 d8P Y8b
~^^%##########@ 888 Y88b 888 888 888 888 888 888 888 88888888
>^#########@ 888 Y88b 888 888 888 Y88..88P Y88b 888 Y8b.
`>#######` 888 Y88b 888 888 888 "Y88P" "Y88888 "Y8888
.>######%
/###%^#% {} ({})
/##%@# ` runtime version {}
./######` a general purpose sovereign cloud computer
/.^`.#^#^`
` ,#`#`#,
,/ /` `
.*`
networking public key: {}
"#,
our.name,
if our.is_direct() {
"direct"
} else {
"indirect"
},
version,
our.networking_key,
)),
style::ResetColor
)?;
} else {
execute!(
stdout,
style::SetForegroundColor(style::Color::Magenta),
Print(format!(
r#"
888 d8P d8b 888
888 d8P Y8P 888
888 d8P 888
888d88K 888 88888b. .d88b. .d88888 .d88b.
8888888b 888 888 "88b d88""88b d88" 888 d8P Y8b
888 Y88b 888 888 888 888 888 888 888 88888888
888 Y88b 888 888 888 Y88..88P Y88b 888 Y8b.
888 Y88b 888 888 888 "Y88P" "Y88888 "Y8888
// mutable because we adjust them on window resize events
let (mut win_cols, mut win_rows) =
crossterm::terminal::size().expect("terminal: couldn't fetch size");
{} ({})
version {}
a general purpose sovereign cloud computer
net pubkey: {}
"#,
our.name,
if our.is_direct() {
"direct"
} else {
"indirect"
},
version,
our.networking_key,
)),
style::ResetColor
)?;
}
let _raw_mode = if is_detached {
None
} else {
Some(RawMode::new()?)
};
let mut reader = EventStream::new();
let mut current_line = format!("{} > ", our.name);
let prompt_len: usize = our.name.len() + 3;
let mut cursor_col: u16 = prompt_len.try_into().unwrap();
let mut cursor_col: u16 = prompt_len as u16;
let mut line_col: usize = cursor_col as usize;
let mut in_step_through: bool = false;
let mut search_mode: bool = false;
let mut search_depth: usize = 0;
let mut logging_mode: bool = false;
// the terminal stores the most recent 1000 lines entered by user
// in history. TODO should make history size adjustable.
let history_path = std::fs::canonicalize(&home_directory_path)
.unwrap()
.expect("terminal: could not get path for .terminal_history file")
.join(".terminal_history");
let history = read_to_string(&history_path).unwrap_or_default();
let history_handle = OpenOptions::new()
.append(true)
.create(true)
.open(&history_path)
.unwrap();
.expect("terminal: could not open/create .terminal_history");
let history_writer = BufWriter::new(history_handle);
// TODO make adjustable max history length
let mut command_history = utils::CommandHistory::new(1000, history, history_writer);
// if CTRL+L is used to turn on logging, all prints to terminal
// will also be written with their full timestamp to the .terminal_log file.
// logging mode is always off by default. TODO add a boot flag to change this.
let log_path = std::fs::canonicalize(&home_directory_path)
.unwrap()
.expect("terminal: could not get path for .terminal_log file")
.join(".terminal_log");
let log_handle = OpenOptions::new()
.append(true)
.create(true)
.open(&log_path)
.unwrap();
.expect("terminal: could not open/create .terminal_log");
let mut log_writer = BufWriter::new(log_handle);
// use to trigger cleanup if receive signal to kill process
@ -186,21 +96,32 @@ pub async fn terminal(
let mut sigusr2 =
signal(SignalKind::user_defined2()).expect("terminal: failed to set up SIGUSR2 handler");
loop {
let event = reader.next().fuse();
// if the verbosity boot flag was **not** set to "full event loop", tell kernel
// the kernel will try and print all events by default so that booting with
// verbosity mode 3 guarantees all events from boot are shown.
if verbose_mode != 3 {
let _ = debug_event_loop.send(DebugCommand::ToggleEventLoop).await;
}
let mut reader = EventStream::new();
let mut stdout = stdout.lock();
loop {
tokio::select! {
Some(printout) = print_rx.recv() => {
let now = Local::now();
// always write print to log if in logging mode
if logging_mode {
let _ = writeln!(log_writer, "[{}] {}", now.to_rfc2822(), printout.content);
writeln!(log_writer, "[{}] {}", now.to_rfc2822(), printout.content)?;
}
// skip writing print to terminal if it's of a greater
// verbosity level than our current mode
if printout.verbosity > verbose_mode {
continue;
}
let mut stdout = stdout.lock();
execute!(
stdout,
// print goes immediately above the dedicated input line at bottom
cursor::MoveTo(0, win_rows - 1),
terminal::Clear(ClearType::CurrentLine),
Print(format!("{} {:02}:{:02} ",
@ -208,41 +129,45 @@ pub async fn terminal(
now.hour(),
now.minute(),
)),
)?;
let color = match printout.verbosity {
style::SetForegroundColor(match printout.verbosity {
0 => style::Color::Reset,
1 => style::Color::Green,
2 => style::Color::Magenta,
_ => style::Color::Red,
};
}),
)?;
for line in printout.content.lines() {
execute!(
stdout,
style::SetForegroundColor(color),
Print(format!("{}\r\n", line)),
style::ResetColor,
)?;
}
// reset color and re-display the current input line
// re-place cursor where user had it at input line
execute!(
stdout,
style::ResetColor,
cursor::MoveTo(0, win_rows),
Print(utils::truncate_in_place(&current_line, prompt_len, win_cols, (line_col, cursor_col))),
cursor::MoveTo(cursor_col, win_rows),
)?;
}
Some(Ok(event)) = event => {
let mut stdout = stdout.lock();
Some(Ok(event)) = reader.next().fuse() => {
match event {
// resize is super annoying because this event trigger often
//
// RESIZE: resize is super annoying because this event trigger often
// comes "too late" to stop terminal from messing with the
// already-printed lines. TODO figure out the right way
// to compensate for this cross-platform and do this in a
// generally stable way.
//
Event::Resize(width, height) => {
win_cols = width;
win_rows = height;
},
// handle pasting of text from outside
//
// PASTE: handle pasting of text from outside
//
Event::Paste(pasted) => {
// strip out control characters and newlines
let pasted = pasted.chars().filter(|c| !c.is_control() && !c.is_ascii_control()).collect::<String>();
@ -256,7 +181,9 @@ pub async fn terminal(
cursor::MoveTo(cursor_col, win_rows),
)?;
}
//
// CTRL+C, CTRL+D: turn off the node
//
Event::Key(KeyEvent {
code: KeyCode::Char('c'),
modifiers: KeyModifiers::CONTROL,
@ -267,10 +194,18 @@ pub async fn terminal(
modifiers: KeyModifiers::CONTROL,
..
}) => {
execute!(stdout, DisableBracketedPaste, terminal::SetTitle(""))?;
execute!(
stdout,
// print goes immediately above the dedicated input line at bottom
cursor::MoveTo(0, win_rows - 1),
terminal::Clear(ClearType::CurrentLine),
Print("exit code received"),
)?;
break;
},
//
// CTRL+V: toggle through verbosity modes
//
Event::Key(KeyEvent {
code: KeyCode::Char('v'),
modifiers: KeyModifiers::CONTROL,
@ -283,37 +218,37 @@ pub async fn terminal(
2 => verbose_mode = 3,
_ => verbose_mode = 0,
}
let _ = print_tx.send(
Printout {
verbosity: 0,
content: match verbose_mode {
0 => "verbose mode: off".into(),
1 => "verbose mode: debug".into(),
2 => "verbose mode: super-debug".into(),
_ => "verbose mode: full event loop".into(),
}
}
).await;
Printout::new(0, format!("verbose mode: {}", match verbose_mode {
0 => "off",
1 => "debug",
2 => "super-debug",
_ => "full event loop",
})).send(&print_tx).await;
if verbose_mode == 3 {
let _ = debug_event_loop.send(DebugCommand::ToggleEventLoop).await;
}
},
//
// CTRL+J: toggle debug mode -- makes system-level event loop step-through
// CTRL+S: step through system-level event loop
//
Event::Key(KeyEvent {
code: KeyCode::Char('j'),
modifiers: KeyModifiers::CONTROL,
..
}) => {
let _ = print_tx.send(
Printout {
verbosity: 0,
content: match in_step_through {
true => "debug mode off".into(),
false => "debug mode on: use CTRL+S to step through events".into(),
}
}
).await;
let _ = debug_event_loop.send(DebugCommand::Toggle).await;
let _ = debug_event_loop.send(DebugCommand::ToggleStepthrough).await;
in_step_through = !in_step_through;
Printout::new(0, format!("debug mode {}", match in_step_through {
false => "off",
true => "on: use CTRL+S to step through events",
}))
.send(&print_tx)
.await;
},
//
// CTRL+S: step through system-level event loop (when in step-through mode)
//
Event::Key(KeyEvent {
code: KeyCode::Char('s'),
modifiers: KeyModifiers::CONTROL,
@ -330,19 +265,15 @@ pub async fn terminal(
..
}) => {
logging_mode = !logging_mode;
let _ = print_tx.send(
Printout {
verbosity: 0,
content: match logging_mode {
true => "logging mode: on".into(),
false => "logging mode: off".into(),
}
}
).await;
Printout::new(
0,
format!("logging mode: {}", if logging_mode { "on" } else { "off" })
)
.send(&print_tx)
.await;
},
//
// UP / CTRL+P: go up one command in history
// DOWN / CTRL+N: go down one command in history
//
Event::Key(KeyEvent { code: KeyCode::Up, .. }) |
Event::Key(KeyEvent {
@ -357,6 +288,7 @@ pub async fn terminal(
line_col = current_line.len();
},
None => {
// the "no-no" ding
print!("\x07");
},
}
@ -368,6 +300,9 @@ pub async fn terminal(
Print(utils::truncate_rightward(&current_line, prompt_len, win_cols)),
)?;
},
//
// DOWN / CTRL+N: go down one command in history
//
Event::Key(KeyEvent { code: KeyCode::Down, .. }) |
Event::Key(KeyEvent {
code: KeyCode::Char('n'),
@ -381,6 +316,7 @@ pub async fn terminal(
line_col = current_line.len();
},
None => {
// the "no-no" ding
print!("\x07");
},
}
@ -401,7 +337,7 @@ pub async fn terminal(
..
}) => {
line_col = prompt_len;
cursor_col = prompt_len.try_into().unwrap();
cursor_col = prompt_len as u16;
execute!(
stdout,
cursor::MoveTo(0, win_rows),
@ -438,32 +374,16 @@ pub async fn terminal(
search_depth += 1;
}
search_mode = true;
let search_query = &current_line[prompt_len..];
if search_query.is_empty() {
continue;
}
if let Some(result) = command_history.search(search_query, search_depth) {
let result_underlined = utils::underline(result, search_query);
execute!(
stdout,
cursor::MoveTo(0, win_rows),
terminal::Clear(ClearType::CurrentLine),
Print(utils::truncate_in_place(
&format!("{} * {}", our.name, result_underlined),
prompt_len,
win_cols,
(line_col, cursor_col))),
cursor::MoveTo(cursor_col, win_rows),
)?;
} else {
execute!(
stdout,
cursor::MoveTo(0, win_rows),
terminal::Clear(ClearType::CurrentLine),
Print(utils::truncate_in_place(&current_line, prompt_len, win_cols, (line_col, cursor_col))),
cursor::MoveTo(cursor_col, win_rows),
)?;
}
utils::execute_search(
&our,
&mut stdout,
&current_line,
prompt_len,
(win_cols, win_rows),
(line_col, cursor_col),
&mut command_history,
search_depth,
)?;
},
//
// CTRL+G: exit search mode
@ -480,15 +400,22 @@ pub async fn terminal(
stdout,
cursor::MoveTo(0, win_rows),
terminal::Clear(ClearType::CurrentLine),
Print(utils::truncate_in_place(&current_line, prompt_len, win_cols, (line_col, cursor_col))),
Print(utils::truncate_in_place(
&format!("{} > {}", our.name, &current_line[prompt_len..]),
prompt_len,
win_cols,
(line_col, cursor_col))),
cursor::MoveTo(cursor_col, win_rows),
)?;
},
//
// handle keypress events
// KEY: handle keypress events
//
Event::Key(k) => {
match k.code {
//
// CHAR: write a single character
//
KeyCode::Char(c) => {
current_line.insert(line_col, c);
if cursor_col < win_cols {
@ -496,22 +423,17 @@ pub async fn terminal(
}
line_col += 1;
if search_mode {
let search_query = &current_line[prompt_len..];
if let Some(result) = command_history.search(search_query, search_depth) {
let result_underlined = utils::underline(result, search_query);
execute!(
stdout,
cursor::MoveTo(0, win_rows),
terminal::Clear(ClearType::CurrentLine),
Print(utils::truncate_in_place(
&format!("{} * {}", our.name, result_underlined),
prompt_len,
win_cols,
(line_col, cursor_col))),
cursor::MoveTo(cursor_col, win_rows),
)?;
continue;
}
utils::execute_search(
&our,
&mut stdout,
&current_line,
prompt_len,
(win_cols, win_rows),
(line_col, cursor_col),
&mut command_history,
search_depth,
)?;
continue;
}
execute!(
stdout,
@ -521,6 +443,9 @@ pub async fn terminal(
cursor::MoveTo(cursor_col, win_rows),
)?;
},
//
// BACKSPACE or DELETE: delete a single character at cursor
//
KeyCode::Backspace | KeyCode::Delete => {
if line_col == prompt_len {
continue;
@ -531,22 +456,17 @@ pub async fn terminal(
line_col -= 1;
current_line.remove(line_col);
if search_mode {
let search_query = &current_line[prompt_len..];
if let Some(result) = command_history.search(search_query, search_depth) {
let result_underlined = utils::underline(result, search_query);
execute!(
stdout,
cursor::MoveTo(0, win_rows),
terminal::Clear(ClearType::CurrentLine),
Print(utils::truncate_in_place(
&format!("{} * {}", our.name, result_underlined),
prompt_len,
win_cols,
(line_col, cursor_col))),
cursor::MoveTo(cursor_col, win_rows),
)?;
continue;
}
utils::execute_search(
&our,
&mut stdout,
&current_line,
prompt_len,
(win_cols, win_rows),
(line_col, cursor_col),
&mut command_history,
search_depth,
)?;
continue;
}
execute!(
stdout,
@ -556,6 +476,9 @@ pub async fn terminal(
cursor::MoveTo(cursor_col, win_rows),
)?;
},
//
// LEFT: move cursor one spot left
//
KeyCode::Left => {
if cursor_col as usize == prompt_len {
if line_col == prompt_len {
@ -581,6 +504,9 @@ pub async fn terminal(
line_col -= 1;
}
},
//
// RIGHT: move cursor one spot right
//
KeyCode::Right => {
if line_col == current_line.len() {
// at the very end of the current typed line
@ -604,6 +530,9 @@ pub async fn terminal(
)?;
}
},
//
// ENTER: send current input to terminal process, clearing input line
//
KeyCode::Enter => {
// if we were in search mode, pull command from that
let command = if !search_mode {
@ -612,7 +541,7 @@ pub async fn terminal(
command_history.search(
&current_line[prompt_len..],
search_depth
).unwrap_or(&current_line[prompt_len..]).to_string()
).unwrap_or_default().to_string()
};
let next = format!("{} > ", our.name);
execute!(
@ -627,35 +556,32 @@ pub async fn terminal(
search_depth = 0;
current_line = next;
command_history.add(command.clone());
cursor_col = prompt_len.try_into().unwrap();
cursor_col = prompt_len as u16;
line_col = prompt_len;
event_loop.send(
KernelMessage {
id: rand::random(),
source: Address {
node: our.name.clone(),
process: TERMINAL_PROCESS_ID.clone(),
},
target: Address {
node: our.name.clone(),
process: TERMINAL_PROCESS_ID.clone(),
},
rsvp: None,
message: Message::Request(Request {
inherit: false,
expects_response: None,
body: command.into_bytes(),
metadata: None,
capabilities: vec![],
}),
lazy_load_blob: None,
}
).await.expect("terminal: couldn't execute command!");
KernelMessage::builder()
.id(rand::random())
.source((our.name.as_str(), TERMINAL_PROCESS_ID.clone()))
.target((our.name.as_str(), TERMINAL_PROCESS_ID.clone()))
.message(Message::Request(Request {
inherit: false,
expects_response: None,
body: command.into_bytes(),
metadata: None,
capabilities: vec![],
}))
.build()
.unwrap()
.send(&event_loop)
.await;
},
_ => {
// some keycode we don't care about, yet
},
_ => {},
}
},
_ => {},
_ => {
// some terminal event we don't care about, yet
},
}
}
_ = sigalrm.recv() => return Err(anyhow::anyhow!("exiting due to SIGALRM")),
@ -668,6 +594,5 @@ pub async fn terminal(
_ = sigusr2.recv() => return Err(anyhow::anyhow!("exiting due to SIGUSR2")),
}
}
execute!(stdout.lock(), DisableBracketedPaste, terminal::SetTitle(""))?;
Ok(())
}

View File

@ -1,6 +1,137 @@
use std::collections::VecDeque;
use std::fs::File;
use std::io::{BufWriter, Write};
use crossterm::terminal::{disable_raw_mode, enable_raw_mode};
use lib::types::core::Identity;
use std::{
collections::VecDeque,
fs::File,
io::{BufWriter, Stdout, Write},
};
pub struct RawMode;
impl RawMode {
fn new() -> std::io::Result<Self> {
enable_raw_mode()?;
Ok(RawMode)
}
}
impl Drop for RawMode {
fn drop(&mut self) {
match disable_raw_mode() {
Ok(_) => {}
Err(e) => {
println!("terminal: failed to disable raw mode: {e:?}\r");
}
}
}
}
pub fn startup(
our: &Identity,
version: &str,
is_detached: bool,
) -> std::io::Result<(Stdout, Option<RawMode>)> {
let mut stdout = std::io::stdout();
crossterm::execute!(
stdout,
crossterm::event::EnableBracketedPaste,
crossterm::terminal::SetTitle(format!("kinode {}", our.name))
)?;
let (win_cols, _) = crossterm::terminal::size().expect("terminal: couldn't fetch size");
// print initial splash screen, large if there's room, small otherwise
if win_cols >= 90 {
crossterm::execute!(
stdout,
crossterm::style::SetForegroundColor(crossterm::style::Color::Magenta),
crossterm::style::Print(format!(
r#"
.`
`@@,, ,* 888 d8P d8b 888
`@%@@@, ,~-##` 888 d8P Y8P 888
~@@#@%#@@, ##### 888 d8P 888
~-%######@@@, ##### 888d88K 888 88888b. .d88b. .d88888 .d88b.
-%%#######@#####, 8888888b 888 888 "88b d88""88b d88" 888 d8P Y8b
~^^%##########@ 888 Y88b 888 888 888 888 888 888 888 88888888
>^#########@ 888 Y88b 888 888 888 Y88..88P Y88b 888 Y8b.
`>#######` 888 Y88b 888 888 888 "Y88P" "Y88888 "Y8888
.>######%
/###%^#% {} ({})
/##%@# ` runtime version {}
./######` a general purpose sovereign cloud computer
/.^`.#^#^`
` ,#`#`#,
,/ /` `
.*`
networking public key: {}
"#,
our.name,
if our.is_direct() {
"direct"
} else {
"indirect"
},
version,
our.networking_key,
)),
crossterm::style::ResetColor
)
.expect("terminal: couldn't print splash");
} else {
crossterm::execute!(
stdout,
crossterm::style::SetForegroundColor(crossterm::style::Color::Magenta),
crossterm::style::Print(format!(
r#"
888 d8P d8b 888
888 d8P Y8P 888
888 d8P 888
888d88K 888 88888b. .d88b. .d88888 .d88b.
8888888b 888 888 "88b d88""88b d88" 888 d8P Y8b
888 Y88b 888 888 888 888 888 888 888 88888888
888 Y88b 888 888 888 Y88..88P Y88b 888 Y8b.
888 Y88b 888 888 888 "Y88P" "Y88888 "Y8888
{} ({})
version {}
a general purpose sovereign cloud computer
net pubkey: {}
"#,
our.name,
if our.is_direct() {
"direct"
} else {
"indirect"
},
version,
our.networking_key,
)),
crossterm::style::ResetColor
)?;
}
Ok((
stdout,
if is_detached {
None
} else {
Some(RawMode::new()?)
},
))
}
pub fn cleanup(quit_msg: &str) {
let stdout = std::io::stdout();
let mut stdout = stdout.lock();
crossterm::execute!(
stdout,
crossterm::event::DisableBracketedPaste,
crossterm::terminal::SetTitle(""),
crossterm::style::SetForegroundColor(crossterm::style::Color::Red),
crossterm::style::Print(format!("\r\n{quit_msg}\r\n")),
crossterm::style::ResetColor,
)
.expect("failed to clean up terminal visual state! your terminal window might be funky now");
}
#[derive(Debug)]
pub struct CommandHistory {
@ -70,6 +201,9 @@ impl CommandHistory {
/// yes this is O(n) to provide desired ordering, can revisit if slow
pub fn search(&mut self, find: &str, depth: usize) -> Option<&str> {
let mut skips = 0;
if find.is_empty() {
return None;
}
// if there is at least one match, and we've skipped past it, return oldest match
let mut last_match: Option<&str> = None;
for line in self.lines.iter() {
@ -86,14 +220,56 @@ impl CommandHistory {
}
}
pub fn underline(s: &str, to_underline: &str) -> String {
pub fn execute_search(
our: &Identity,
stdout: &mut std::io::StdoutLock,
current_line: &str,
prompt_len: usize,
(win_cols, win_rows): (u16, u16),
(line_col, cursor_col): (usize, u16),
command_history: &mut CommandHistory,
search_depth: usize,
) -> Result<(), std::io::Error> {
let search_query = &current_line[prompt_len..];
if let Some(result) = command_history.search(search_query, search_depth) {
let (result_underlined, u_end) = underline(result, search_query);
let search_cursor_col = u_end + prompt_len as u16;
crossterm::execute!(
stdout,
crossterm::cursor::MoveTo(0, win_rows),
crossterm::terminal::Clear(crossterm::terminal::ClearType::CurrentLine),
crossterm::style::Print(truncate_in_place(
&format!("{} * {}", our.name, result_underlined),
prompt_len,
win_cols,
(line_col, search_cursor_col)
)),
crossterm::cursor::MoveTo(search_cursor_col, win_rows),
)
} else {
crossterm::execute!(
stdout,
crossterm::cursor::MoveTo(0, win_rows),
crossterm::terminal::Clear(crossterm::terminal::ClearType::CurrentLine),
crossterm::style::Print(truncate_in_place(
&format!("{} * {}: no results", our.name, &current_line[prompt_len..]),
prompt_len,
win_cols,
(line_col, cursor_col)
)),
crossterm::cursor::MoveTo(cursor_col, win_rows),
)
}
}
pub fn underline(s: &str, to_underline: &str) -> (String, u16) {
// format result string to have query portion underlined
let mut result = s.to_string();
let u_start = s.find(to_underline).unwrap();
let u_end = u_start + to_underline.len();
result.insert_str(u_end, "\x1b[24m");
result.insert_str(u_start, "\x1b[4m");
result
(result, u_end as u16)
}
pub fn truncate_rightward(s: &str, prompt_len: usize, width: u16) -> String {

View File

@ -1,103 +1,9 @@
use anyhow::Result;
use lib::types::core::{
Address, KernelMessage, Message, MessageReceiver, MessageSender, PrintSender, Printout,
Response, TimerAction, TIMER_PROCESS_ID,
};
use serde::{Deserialize, Serialize};
/// A runtime module that allows processes to set timers. Interacting with the
/// timer is done with a simple Request/Response pattern, and the timer module
/// is public, so it can be used by any local process. It will not respond to
/// requests made by other nodes.
///
/// The interface of the timer module is as follows:
/// One kind of request is accepted: TimerAction::SetTimer(u64), where the u64 is the time to wait
/// in milliseconds. This request should always expect a Response.
/// If the request does not expect a Response, the timer will not be set.
///
/// A proper Request will trigger the timer module to send a Response. The Response will be
/// empty, so the user should either `send_and_await` the Request, or attach a `context` so
/// they can match the Response with their purpose.
///
pub async fn timer_service(
our: String,
kernel_message_sender: MessageSender,
mut timer_message_receiver: MessageReceiver,
print_tx: PrintSender,
) -> Result<()> {
// if we have a persisted state file, load it
let mut timer_map = TimerMap {
timers: nohash_hasher::IntMap::default(),
};
// joinset holds 1 active timer per expiration-time
let mut timer_tasks = tokio::task::JoinSet::<u64>::new();
loop {
tokio::select! {
Some(km) = timer_message_receiver.recv() => {
// ignore Requests sent from other nodes
if km.source.node != our { continue };
// we only handle Requests
let Message::Request(req) = km.message else { continue };
let Ok(timer_action) = serde_json::from_slice::<TimerAction>(&req.body) else {
let _ = print_tx.send(Printout {
verbosity: 1,
content: "timer service received a request with an invalid body".to_string(),
}).await;
continue
};
match timer_action {
TimerAction::Debug => {
let _ = print_tx.send(Printout {
verbosity: 0,
content: format!("timer service active timers ({}):", timer_map.timers.len()),
}).await;
for (k, v) in timer_map.timers.iter() {
let _ = print_tx.send(Printout {
verbosity: 0,
content: format!("{}: {:?}", k, v),
}).await;
}
continue
}
TimerAction::SetTimer(timer_millis) => {
// if the timer is set to pop in 0 millis, we immediately respond
// otherwise, store in our persisted map, and spawn a task that
// sleeps for the given time, then sends the response
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_millis() as u64;
let pop_time = now + timer_millis;
if timer_millis == 0 {
send_response(&our, km.id, km.rsvp.unwrap_or(km.source), &kernel_message_sender).await;
continue
}
let _ = print_tx.send(Printout {
verbosity: 3,
content: format!("set timer to pop in {}ms", timer_millis),
}).await;
if !timer_map.contains(pop_time) {
timer_tasks.spawn(async move {
tokio::time::sleep(std::time::Duration::from_millis(timer_millis - 1)).await;
pop_time
});
}
timer_map.insert(pop_time, km.id, km.rsvp.unwrap_or(km.source));
}
}
}
Some(Ok(time)) = timer_tasks.join_next() => {
// when a timer pops, we send the response to the process(es) that set
// the timer(s), and then remove it from our persisted map
let Some(timers) = timer_map.remove(time) else { continue };
for (id, addr) in timers {
send_response(&our, id, addr, &kernel_message_sender).await;
}
}
}
}
}
#[derive(Serialize, Deserialize, Debug)]
struct TimerMap {
// key: the unix timestamp in milliseconds at which the timer pops
@ -120,26 +26,112 @@ impl TimerMap {
}
}
async fn send_response(our_node: &str, id: u64, target: Address, send_to_loop: &MessageSender) {
let _ = send_to_loop
.send(KernelMessage {
id,
source: Address {
node: our_node.to_string(),
process: TIMER_PROCESS_ID.clone(),
},
target,
rsvp: None,
message: Message::Response((
Response {
inherit: false,
body: vec![],
metadata: None,
capabilities: vec![],
},
None,
)),
lazy_load_blob: None,
})
.await;
/// A runtime module that allows processes to set timers. Interacting with the
/// timer is done with a simple Request/Response pattern, and the timer module
/// is public, so it can be used by any local process. It will not respond to
/// requests made by other nodes.
///
/// The interface of the timer module is as follows:
/// One kind of request is accepted: TimerAction::SetTimer(u64), where the u64 is the
/// time to wait in milliseconds. This request should always expect a Response.
/// If the request does not expect a Response, the timer will not be set.
///
/// A proper Request will trigger the timer module to send a Response. The Response will be
/// empty, so the user should either `send_and_await` the Request, or attach a `context` so
/// they can match the Response with their purpose.
///
pub async fn timer_service(
our: String,
kernel_message_sender: MessageSender,
mut timer_message_receiver: MessageReceiver,
print_tx: PrintSender,
) -> anyhow::Result<()> {
let mut timer_map = TimerMap {
timers: nohash_hasher::IntMap::default(),
};
// joinset holds 1 active timer per expiration-time
let mut timer_tasks = tokio::task::JoinSet::<u64>::new();
loop {
tokio::select! {
Some(km) = timer_message_receiver.recv() => {
// ignore Requests sent from other nodes
if km.source.node != our { continue };
// we only handle Requests
let Message::Request(req) = km.message else { continue };
let Ok(timer_action) = serde_json::from_slice::<TimerAction>(&req.body) else {
Printout::new(1, "timer service received a request with an invalid body").send(&print_tx).await;
continue
};
match timer_action {
TimerAction::Debug => {
Printout::new(0, format!("timer service active timers ({}):", timer_map.timers.len())).send(&print_tx).await;
for (k, v) in timer_map.timers.iter() {
Printout::new(0, format!("{k}: {v:?}")).send(&print_tx).await;
}
continue
}
TimerAction::SetTimer(timer_millis) => {
// if the timer is set to pop in 0 millis, we immediately respond
// otherwise, store in our persisted map, and spawn a task that
// sleeps for the given time, then sends the response
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_millis() as u64;
let pop_time = now + timer_millis;
if timer_millis == 0 {
KernelMessage::builder()
.id(km.id)
.source((our.as_str(), TIMER_PROCESS_ID.clone()))
.target(km.rsvp.unwrap_or(km.source))
.message(Message::Response((
Response {
inherit: false,
body: vec![],
metadata: None,
capabilities: vec![],
},
None,
)))
.build()
.unwrap()
.send(&kernel_message_sender).await;
continue
}
Printout::new(3, format!("set timer to pop in {timer_millis}ms")).send(&print_tx).await;
if !timer_map.contains(pop_time) {
timer_tasks.spawn(async move {
tokio::time::sleep(std::time::Duration::from_millis(timer_millis - 1)).await;
pop_time
});
}
timer_map.insert(pop_time, km.id, km.rsvp.unwrap_or(km.source));
}
}
}
Some(Ok(time)) = timer_tasks.join_next() => {
// when a timer pops, we send the response to the process(es) that set
// the timer(s), and then remove it from our persisted map
let Some(timers) = timer_map.remove(time) else { continue };
for (id, addr) in timers {
KernelMessage::builder()
.id(id)
.source((our.as_str(), TIMER_PROCESS_ID.clone()))
.target(addr)
.message(Message::Response((
Response {
inherit: false,
body: vec![],
metadata: None,
capabilities: vec![],
},
None,
)))
.build()
.unwrap()
.send(&kernel_message_sender).await;
}
}
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
[package]
name = "lib"
authors = ["KinodeDAO"]
version = "0.8.0"
version = "0.8.2"
edition = "2021"
description = "A general-purpose sovereign cloud computing platform"
homepage = "https://kinode.org"
@ -11,14 +11,15 @@ license = "Apache-2.0"
[lib]
[build-dependencies]
anyhow = "1.0.71"
kit = { git = "https://github.com/kinode-dao/kit", rev = "d319c5b" }
reqwest = { version = "0.12.4", features = ["blocking"] }
kit = { git = "https://github.com/kinode-dao/kit", tag = "v0.6.2" }
tokio = "1.28"
[dependencies]
alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "05f8162" }
alloy-json-rpc = { git = "https://github.com/alloy-rs/alloy", rev = "05f8162" }
alloy = { version = "0.1.3", features = [
"json-rpc",
"rpc-types",
"rpc-types-eth",
] }
lazy_static = "1.4.0"
rand = "0.8.4"
ring = "0.17.8"

View File

@ -4,13 +4,13 @@ const KINODE_WIT_0_7_0_URL: &str =
const KINODE_WIT_0_8_0_URL: &str =
"https://raw.githubusercontent.com/kinode-dao/kinode-wit/v0.8/kinode.wit";
fn main() -> anyhow::Result<()> {
fn main() {
if std::env::var("SKIP_BUILD_SCRIPT").is_ok() {
println!("Skipping build script");
return Ok(());
return;
}
let pwd = std::env::current_dir()?;
let pwd = std::env::current_dir().expect("Failed to get current directory");
let wit_file = pwd.join("wit-v0.7.0").join("kinode.wit");
@ -18,9 +18,8 @@ fn main() -> anyhow::Result<()> {
rt.block_on(async {
kit::build::download_file(KINODE_WIT_0_7_0_URL, &wit_file)
.await
.map_err(|e| anyhow::anyhow!("{:?}", e))?;
Ok::<(), anyhow::Error>(())
})?;
.expect("Failed to download WIT 0.7");
});
let wit_file = pwd.join("wit-v0.8.0").join("kinode.wit");
@ -28,7 +27,6 @@ fn main() -> anyhow::Result<()> {
rt.block_on(async {
kit::build::download_file(KINODE_WIT_0_8_0_URL, &wit_file)
.await
.map_err(|e| anyhow::anyhow!("{:?}", e))?;
Ok(())
.expect("Failed to download WIT 0.8");
})
}

View File

@ -279,12 +279,13 @@ pub struct Address {
}
impl Address {
pub fn new<T>(node: &str, process: T) -> Address
pub fn new<T, U>(node: T, process: U) -> Address
where
T: Into<ProcessId>,
T: Into<String>,
U: Into<ProcessId>,
{
Address {
node: node.to_string(),
node: node.into(),
process: process.into(),
}
}
@ -399,11 +400,12 @@ impl From<(&str, &str, &str, &str)> for Address {
}
}
impl<T> From<(&str, T)> for Address
impl<T, U> From<(T, U)> for Address
where
T: Into<ProcessId>,
T: Into<String>,
U: Into<ProcessId>,
{
fn from(input: (&str, T)) -> Self {
fn from(input: (T, U)) -> Self {
Address::new(input.0, input.1)
}
}
@ -468,10 +470,45 @@ pub enum Message {
Response((Response, Option<Context>)),
}
#[derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)]
#[derive(Clone, Debug, Hash, Serialize, Deserialize)]
pub struct Capability {
pub issuer: Address,
pub params: String, // JSON-string
pub params: String,
}
impl Eq for Capability {}
impl PartialEq for Capability {
fn eq(&self, other: &Self) -> bool {
let self_json_params: serde_json::Value =
serde_json::from_str(&self.params).unwrap_or_default();
let other_json_params: serde_json::Value =
serde_json::from_str(&other.params).unwrap_or_default();
self.issuer == other.issuer && self_json_params == other_json_params
}
}
impl Capability {
pub fn new<T, U>(issuer: T, params: U) -> Self
where
T: Into<Address>,
U: Into<String>,
{
Capability {
issuer: issuer.into(),
params: params.into(),
}
}
pub fn messaging<T>(issuer: T) -> Self
where
T: Into<Address>,
{
Capability {
issuer: issuer.into(),
params: "\"messaging\"".into(),
}
}
}
impl std::fmt::Display for Capability {
@ -480,8 +517,7 @@ impl std::fmt::Display for Capability {
f,
"{}({})",
self.issuer,
serde_json::from_str::<serde_json::Value>(&self.params)
.unwrap_or(serde_json::json!("invalid JSON in capability"))
serde_json::from_str::<serde_json::Value>(&self.params).unwrap_or_default()
)
}
}
@ -597,6 +633,20 @@ impl OnExit {
}
}
impl std::fmt::Display for OnExit {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(
f,
"{}",
match self {
OnExit::None => "None",
OnExit::Restart => "Restart",
OnExit::Requests(_) => "Requests",
}
)
}
}
impl std::fmt::Display for Message {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{}", display_message(self, "\n "))
@ -865,14 +915,14 @@ pub fn de_wit_capability_v0(wit: crate::v0::wit::Capability) -> (Capability, Vec
pub fn en_wit_capability(cap: (Capability, Vec<u8>)) -> wit::Capability {
wit::Capability {
issuer: cap.0.issuer.en_wit(),
params: cap.0.params,
params: cap.0.params.to_string(),
}
}
pub fn en_wit_capability_v0(cap: (Capability, Vec<u8>)) -> crate::v0::wit::Capability {
crate::v0::wit::Capability {
issuer: cap.0.issuer.en_wit_v0(),
params: cap.0.params,
params: cap.0.params.to_string(),
}
}
@ -1138,6 +1188,75 @@ pub struct KernelMessage {
pub lazy_load_blob: Option<LazyLoadBlob>,
}
impl KernelMessage {
pub fn builder() -> KernelMessageBuilder {
KernelMessageBuilder::default()
}
pub async fn send(self, sender: &MessageSender) {
sender.send(self).await.expect("kernel message sender died");
}
}
#[derive(Default)]
pub struct KernelMessageBuilder {
id: u64,
source: Option<Address>,
target: Option<Address>,
rsvp: Rsvp,
message: Option<Message>,
lazy_load_blob: Option<LazyLoadBlob>,
}
impl KernelMessageBuilder {
pub fn id(mut self, id: u64) -> Self {
self.id = id;
self
}
pub fn source<T>(mut self, source: T) -> Self
where
T: Into<Address>,
{
self.source = Some(source.into());
self
}
pub fn target<T>(mut self, target: T) -> Self
where
T: Into<Address>,
{
self.target = Some(target.into());
self
}
pub fn rsvp(mut self, rsvp: Rsvp) -> Self {
self.rsvp = rsvp;
self
}
pub fn message(mut self, message: Message) -> Self {
self.message = Some(message);
self
}
pub fn lazy_load_blob(mut self, blob: Option<LazyLoadBlob>) -> Self {
self.lazy_load_blob = blob;
self
}
pub fn build(self) -> Result<KernelMessage, String> {
Ok(KernelMessage {
id: self.id,
source: self.source.ok_or("Source address is required")?,
target: self.target.ok_or("Target address is required")?,
rsvp: self.rsvp,
message: self.message.ok_or("Message is required")?,
lazy_load_blob: self.lazy_load_blob,
})
}
}
impl std::fmt::Display for KernelMessage {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(
@ -1173,6 +1292,22 @@ pub struct Printout {
pub content: String,
}
impl Printout {
pub fn new<T>(verbosity: u8, content: T) -> Self
where
T: Into<String>,
{
Self {
verbosity,
content: content.into(),
}
}
pub async fn send(self, sender: &PrintSender) {
sender.send(self).await.expect("print sender died");
}
}
// kernel sets in case, e.g.,
// A requests response from B does not request response from C
// -> kernel sets `Some(A) = Rsvp` for B's request to C
@ -1180,8 +1315,9 @@ pub type Rsvp = Option<Address>;
#[derive(Debug, Serialize, Deserialize)]
pub enum DebugCommand {
Toggle,
ToggleStepthrough,
Step,
ToggleEventLoop,
}
/// IPC format for requests sent to kernel runtime module
@ -1261,13 +1397,13 @@ pub enum CapMessage {
Add {
on: ProcessId,
caps: Vec<Capability>,
responder: tokio::sync::oneshot::Sender<bool>,
responder: Option<tokio::sync::oneshot::Sender<bool>>,
},
/// root delete: uncritically remove all `caps` from `on`
Drop {
on: ProcessId,
caps: Vec<Capability>,
responder: tokio::sync::oneshot::Sender<bool>,
responder: Option<tokio::sync::oneshot::Sender<bool>>,
},
/// does `on` have `cap` in its store?
Has {
@ -1284,7 +1420,7 @@ pub enum CapMessage {
/// Remove all caps issued by `on` from every process on the entire system
RevokeAll {
on: ProcessId,
responder: tokio::sync::oneshot::Sender<bool>,
responder: Option<tokio::sync::oneshot::Sender<bool>>,
},
/// before `on` sends a message, filter out any bogus caps it may have attached, sign any new
/// caps it may have created, and retreive the signature for the caps in its store.
@ -1295,6 +1431,42 @@ pub enum CapMessage {
},
}
impl std::fmt::Display for CapMessage {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
CapMessage::Add { on, caps, .. } => write!(
f,
"caps: add {} on {on}",
caps.iter()
.map(|c| c.to_string())
.collect::<Vec<String>>()
.join(", ")
),
CapMessage::Drop { on, caps, .. } => write!(
f,
"caps: drop {} on {on}",
caps.iter()
.map(|c| c.to_string())
.collect::<Vec<String>>()
.join(", ")
),
CapMessage::Has { on, cap, .. } => write!(f, "caps: has {} on {on}", cap),
CapMessage::GetAll { on, .. } => write!(f, "caps: get all on {on}"),
CapMessage::RevokeAll { on, .. } => write!(f, "caps: revoke all on {on}"),
CapMessage::FilterCaps { on, caps, .. } => {
write!(
f,
"caps: filter for {} on {on}",
caps.iter()
.map(|c| c.to_string())
.collect::<Vec<String>>()
.join(", ")
)
}
}
}
}
pub type ReverseCapIndex = HashMap<ProcessId, HashMap<ProcessId, Vec<Capability>>>;
pub type ProcessMap = HashMap<ProcessId, PersistedProcess>;
@ -1406,19 +1578,19 @@ pub enum StateResponse {
#[derive(Error, Debug, Serialize, Deserialize)]
pub enum StateError {
#[error("kernel_state: rocksdb internal error: {error}")]
#[error("rocksdb internal error: {error}")]
RocksDBError { action: String, error: String },
#[error("kernel_state: startup error")]
#[error("startup error")]
StartupError { action: String },
#[error("kernel_state: bytes blob required for {action}")]
#[error("bytes blob required for {action}")]
BadBytes { action: String },
#[error("kernel_state: bad request error: {error}")]
#[error("bad request error: {error}")]
BadRequest { error: String },
#[error("kernel_state: Bad JSON blob: {error}")]
#[error("Bad JSON blob: {error}")]
BadJson { error: String },
#[error("kernel_state: state not found for ProcessId {process_id}")]
#[error("state not found for ProcessId {process_id}")]
NotFound { process_id: ProcessId },
#[error("kernel_state: IO error: {error}")]
#[error("IO error: {error}")]
IOError { error: String },
}
@ -1515,23 +1687,23 @@ pub enum VfsResponse {
#[derive(Error, Debug, Serialize, Deserialize)]
pub enum VfsError {
#[error("vfs: No capability for action {action} at path {path}")]
#[error("No capability for action {action} at path {path}")]
NoCap { action: String, path: String },
#[error("vfs: Bytes blob required for {action} at path {path}")]
#[error("Bytes blob required for {action} at path {path}")]
BadBytes { action: String, path: String },
#[error("vfs: bad request error: {error}")]
#[error("bad request error: {error}")]
BadRequest { error: String },
#[error("vfs: error parsing path: {path}: {error}")]
#[error("error parsing path: {path}: {error}")]
ParseError { error: String, path: String },
#[error("vfs: IO error: {error}, at path {path}")]
#[error("IO error: {error}, at path {path}")]
IOError { error: String, path: String },
#[error("vfs: kernel capability channel error: {error}")]
#[error("kernel capability channel error: {error}")]
CapChannelFail { error: String },
#[error("vfs: Bad JSON blob: {error}")]
#[error("Bad JSON blob: {error}")]
BadJson { error: String },
#[error("vfs: File not found at path {path}")]
#[error("File not found at path {path}")]
NotFound { path: String },
#[error("vfs: Creating directory failed at path: {path}: {error}")]
#[error("Creating directory failed at path: {path}: {error}")]
CreateDirError { path: String, error: String },
}
@ -1581,19 +1753,19 @@ pub enum KvResponse {
#[derive(Debug, Serialize, Deserialize, Error)]
pub enum KvError {
#[error("kv: DbDoesNotExist")]
#[error("DbDoesNotExist")]
NoDb,
#[error("kv: KeyNotFound")]
#[error("KeyNotFound")]
KeyNotFound,
#[error("kv: no Tx found")]
#[error("no Tx found")]
NoTx,
#[error("kv: No capability: {error}")]
#[error("No capability: {error}")]
NoCap { error: String },
#[error("kv: rocksdb internal error: {error}")]
#[error("rocksdb internal error: {error}")]
RocksDBError { action: String, error: String },
#[error("kv: input bytes/json/key error: {error}")]
#[error("input bytes/json/key error: {error}")]
InputError { error: String },
#[error("kv: IO error: {error}")]
#[error("IO error: {error}")]
IOError { error: String },
}

View File

@ -1,5 +1,5 @@
use alloy_json_rpc::ErrorPayload;
use alloy_rpc_types::pubsub::{Params, SubscriptionKind, SubscriptionResult};
use alloy::rpc::json_rpc::ErrorPayload;
use alloy::rpc::types::eth::pubsub::{Params, SubscriptionKind, SubscriptionResult};
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet};
@ -84,9 +84,6 @@ pub enum EthError {
/// The action type used for configuring eth:distro:sys. Only processes which have the "root"
/// capability from eth:distro:sys can successfully send this action.
///
/// NOTE: changes to config will not be persisted between boots, they must be saved in .env
/// to be reflected between boots. TODO: can change this
#[derive(Debug, Serialize, Deserialize)]
pub enum EthConfigAction {
/// Add a new provider to the list of providers.

View File

@ -0,0 +1 @@
[{"chain_id":1,"trusted":false,"provider":{"Node":{"kns_update":{"name":"providerfren.os","owner":"","node":"","public_key":"0x54f5a8a4c625d5925e63ed3f0203b63e007e3f822d7858bd98b1fd9704c99451","ips":["147.135.114.167"],"ports":{"ws":9999},"routers":[]},"use_as_provider":true}}},{"chain_id":11155111,"trusted":false,"provider":{"Node":{"kns_update":{"name":"providerfren.os","owner":"","node":"","public_key":"0x54f5a8a4c625d5925e63ed3f0203b63e007e3f822d7858bd98b1fd9704c99451","ips":["147.135.114.167"],"ports":{"ws":9999},"routers":[]},"use_as_provider":true}}},{"chain_id":10,"trusted":true,"provider":{"RpcUrl":"wss://opt-mainnet.g.alchemy.com/v2/phOnE7X9A3mnzAVjfyR1idu1yYX1mqSL"}},{"chain_id":10,"trusted":false,"provider":{"Node":{"kns_update":{"name":"providerfren.os","owner":"","node":"","public_key":"0x54f5a8a4c625d5925e63ed3f0203b63e007e3f822d7858bd98b1fd9704c99451","ips":["147.135.114.167"],"ports":{"ws":9999},"routers":[]},"use_as_provider":true}}}]

View File

@ -68,7 +68,8 @@ def main():
for feature in features:
build_and_move(feature, tmp_dir, architecture, os_name)
print(f"Build and move process completed.\nFind release in {tmp_dir}.")
linked_dir = f"\033]8;;file://{tmp_dir}\033\\{tmp_dir}\033]8;;\033\\"
print(f"Build and move process completed.\nFind release in {linked_dir}.")
if __name__ == "__main__":
main()