mirror of
https://github.com/zed-industries/zed.git
synced 2024-12-24 06:12:25 +03:00
Include contents of the zed-server repo
We're going full monorepo. Co-Authored-By: Max Brunsfeld <maxbrunsfeld@gmail.com>
This commit is contained in:
parent
34abda3a04
commit
1537500fcb
3
.dockerignore
Normal file
3
.dockerignore
Normal file
@ -0,0 +1,3 @@
|
||||
/target
|
||||
/manifest.yml
|
||||
/migrate.yml
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -1,3 +1,6 @@
|
||||
/target
|
||||
/zed.xcworkspace
|
||||
.DS_Store
|
||||
/script/node_modules
|
||||
/server/.env.toml
|
||||
/server/static/styles.css
|
||||
|
1476
Cargo.lock
generated
1476
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
11
Cargo.toml
11
Cargo.toml
@ -1,10 +1,17 @@
|
||||
[workspace]
|
||||
members = ["zed", "zed-rpc", "gpui", "gpui_macros", "fsevent", "scoped_pool"]
|
||||
members = [
|
||||
"fsevent",
|
||||
"gpui",
|
||||
"gpui_macros",
|
||||
"scoped_pool",
|
||||
"server",
|
||||
"zed",
|
||||
"zed-rpc"
|
||||
]
|
||||
|
||||
[patch.crates-io]
|
||||
async-task = { git = "https://github.com/zed-industries/async-task", rev = "341b57d6de98cdfd7b418567b8de2022ca993a6e" }
|
||||
tree-sitter = { git = "https://github.com/tree-sitter/tree-sitter", rev = "d72771a19f4143530b1cfd23808e344f1276e176" }
|
||||
|
||||
# TODO - Remove when a version is released with this PR: https://github.com/servo/core-foundation-rs/pull/457
|
||||
cocoa = { git = "https://github.com/servo/core-foundation-rs", rev = "025dcb3c0d1ef01530f57ef65f3b1deb948f5737" }
|
||||
cocoa-foundation = { git = "https://github.com/servo/core-foundation-rs", rev = "025dcb3c0d1ef01530f57ef65f3b1deb948f5737" }
|
||||
|
33
Dockerfile
Normal file
33
Dockerfile
Normal file
@ -0,0 +1,33 @@
|
||||
# syntax = docker/dockerfile:1.2
|
||||
|
||||
FROM rust as builder
|
||||
WORKDIR app
|
||||
RUN curl -fsSL https://deb.nodesource.com/setup_16.x | bash -
|
||||
RUN apt-get install -y nodejs
|
||||
COPY . .
|
||||
|
||||
# Install script dependencies
|
||||
RUN --mount=type=cache,target=./script/node_modules \
|
||||
cd ./script && npm install --quiet
|
||||
|
||||
# Build CSS
|
||||
RUN --mount=type=cache,target=./script/node_modules \
|
||||
script/build-css --release
|
||||
|
||||
# Compile server
|
||||
RUN --mount=type=cache,target=./script/node_modules \
|
||||
--mount=type=cache,target=/usr/local/cargo/registry \
|
||||
--mount=type=cache,target=./target \
|
||||
cargo build --release --bin zed-server
|
||||
|
||||
# Copy server binary out of cached directory
|
||||
RUN --mount=type=cache,target=./target \
|
||||
cp /app/target/release/zed-server /app/zed-server
|
||||
|
||||
# Copy server binary to the runtime image
|
||||
FROM debian:buster-slim as runtime
|
||||
RUN apt-get update; \
|
||||
apt-get install -y --no-install-recommends libcurl4-openssl-dev ca-certificates
|
||||
WORKDIR app
|
||||
COPY --from=builder /app/zed-server /app
|
||||
ENTRYPOINT ["/app/zed-server"]
|
15
Dockerfile.migrator
Normal file
15
Dockerfile.migrator
Normal file
@ -0,0 +1,15 @@
|
||||
# syntax = docker/dockerfile:1.2
|
||||
|
||||
FROM rust as builder
|
||||
WORKDIR app
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
--mount=type=cache,target=./target \
|
||||
cargo install sqlx-cli --root=/app --target-dir=/app/target --version 0.5.5
|
||||
|
||||
FROM debian:buster-slim as runtime
|
||||
RUN apt-get update; \
|
||||
apt-get install -y --no-install-recommends libssl1.1
|
||||
WORKDIR app
|
||||
COPY --from=builder /app/bin/sqlx /app
|
||||
COPY ./server/migrations /app/migrations
|
||||
ENTRYPOINT ["/app/sqlx", "migrate", "run"]
|
@ -8,22 +8,22 @@ version = "0.1.0"
|
||||
async-task = "4.0.3"
|
||||
ctor = "0.1"
|
||||
etagere = "0.2"
|
||||
gpui_macros = {path = "../gpui_macros"}
|
||||
gpui_macros = { path = "../gpui_macros" }
|
||||
log = "0.4"
|
||||
num_cpus = "1.13"
|
||||
ordered-float = "2.1.1"
|
||||
parking_lot = "0.11.1"
|
||||
pathfinder_color = "0.5"
|
||||
pathfinder_geometry = "0.5"
|
||||
postage = {version = "0.4.1", features = ["futures-traits"]}
|
||||
postage = { version = "0.4.1", features = ["futures-traits"] }
|
||||
rand = "0.8.3"
|
||||
replace_with = "0.1.7"
|
||||
resvg = "0.14"
|
||||
scoped-pool = {path = "../scoped_pool"}
|
||||
scoped-pool = { path = "../scoped_pool" }
|
||||
seahash = "4.1"
|
||||
serde = {version = "1.0.125", features = ["derive"]}
|
||||
serde = { version = "1.0.125", features = ["derive"] }
|
||||
serde_json = "1.0.64"
|
||||
smallvec = {version = "1.6", features = ["union"]}
|
||||
smallvec = { version = "1.6", features = ["union"] }
|
||||
smol = "1.2"
|
||||
tiny-skia = "0.5"
|
||||
tree-sitter = "0.19"
|
||||
@ -45,7 +45,7 @@ cocoa = "0.24"
|
||||
core-foundation = "0.9"
|
||||
core-graphics = "0.22.2"
|
||||
core-text = "19.2"
|
||||
font-kit = {git = "https://github.com/zed-industries/font-kit", rev = "8eaf7a918eafa28b0a37dc759e2e0e7683fa24f1"}
|
||||
font-kit = { git = "https://github.com/zed-industries/font-kit", rev = "8eaf7a918eafa28b0a37dc759e2e0e7683fa24f1" }
|
||||
foreign-types = "0.3"
|
||||
log = "0.4"
|
||||
metal = "0.21.0"
|
||||
|
@ -7,14 +7,8 @@ categories = ["parsing", "text-editors"]
|
||||
repository = "https://github.com/tree-sitter/tree-sitter-javascript"
|
||||
edition = "2018"
|
||||
license = "MIT"
|
||||
|
||||
build = "bindings/rust/build.rs"
|
||||
include = [
|
||||
"bindings/rust/*",
|
||||
"grammar.js",
|
||||
"queries/*",
|
||||
"src/*",
|
||||
]
|
||||
include = ["bindings/rust/*", "grammar.js", "queries/*", "src/*"]
|
||||
|
||||
[lib]
|
||||
path = "bindings/rust/lib.rs"
|
||||
|
@ -9,4 +9,4 @@ proc-macro = true
|
||||
[dependencies]
|
||||
syn = "1.0"
|
||||
quote = "1.0"
|
||||
proc-macro2 = "1.0"
|
||||
proc-macro2 = "1.0"
|
||||
|
10
script/build-css
Executable file
10
script/build-css
Executable file
@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
cd ./script
|
||||
[ -d node_modules ] || npm install
|
||||
if [[ $1 == --release ]]; then
|
||||
export NODE_ENV=production # Purge unused styles in --release mode
|
||||
fi
|
||||
npx tailwindcss build ../server/styles.css --output ../server/static/styles.css
|
17
script/deploy
Executable file
17
script/deploy
Executable file
@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Prerequisites:
|
||||
#
|
||||
# - Log in to the DigitalOcean docker registry
|
||||
# doctl registry login
|
||||
#
|
||||
# - Set the default K8s context to production
|
||||
# doctl kubernetes cluster kubeconfig save zed-1
|
||||
|
||||
set -e
|
||||
|
||||
IMAGE_ID=registry.digitalocean.com/zed/zed-server
|
||||
|
||||
docker build . --tag $IMAGE_ID
|
||||
docker push $IMAGE_ID
|
||||
kubectl rollout restart deployment zed
|
11
script/deploy-migration
Executable file
11
script/deploy-migration
Executable file
@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
IMAGE_ID=registry.digitalocean.com/zed/zed-migrator
|
||||
|
||||
docker build . \
|
||||
--file ./Dockerfile.migrator \
|
||||
--tag $IMAGE_ID
|
||||
docker push $IMAGE_ID
|
||||
kubectl apply -f ./server/migrate.yml
|
2452
script/package-lock.json
generated
Normal file
2452
script/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
6
script/package.json
Normal file
6
script/package.json
Normal file
@ -0,0 +1,6 @@
|
||||
{
|
||||
"devDependencies": {
|
||||
"@tailwindcss/typography": "^0.4.0",
|
||||
"tailwindcss-cli": "^0.1.2"
|
||||
}
|
||||
}
|
6
script/server
Executable file
6
script/server
Executable file
@ -0,0 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
cd server
|
||||
cargo run
|
12
script/sqlx
Executable file
12
script/sqlx
Executable file
@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
# Install sqlx-cli if needed
|
||||
[[ "$(sqlx --version)" == "sqlx-cli 0.5.5" ]] || cargo install sqlx-cli --version 0.5.5
|
||||
|
||||
# Export contents of .env.toml
|
||||
eval "$(cargo run --bin dotenv)"
|
||||
|
||||
# Run sqlx command
|
||||
sqlx $@
|
44
script/tailwind.config.js
Normal file
44
script/tailwind.config.js
Normal file
@ -0,0 +1,44 @@
|
||||
module.exports = {
|
||||
theme: {
|
||||
fontFamily: {
|
||||
display: [
|
||||
"Visby CF", "ui-sans-serif", "system-ui", "-apple-system", "BlinkMacSystemFont", "Segoe UI", "Roboto",
|
||||
"Helvetica Neue", "Arial", "Noto Sans", "sans-serif", "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol",
|
||||
"Noto Color Emoji"
|
||||
],
|
||||
body: [
|
||||
"Open Sans", "ui-sans-serif", "system-ui", "-apple-system", "BlinkMacSystemFont", "Segoe UI", "Roboto",
|
||||
"Helvetica Neue", "Arial", "Noto Sans", "sans-serif", "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol",
|
||||
"Noto Color Emoji"
|
||||
],
|
||||
},
|
||||
extend: {
|
||||
typography: (theme) => ({
|
||||
DEFAULT: {
|
||||
css: {
|
||||
h1: {
|
||||
fontFamily: theme("fontFamily.display").join(", ")
|
||||
},
|
||||
h2: {
|
||||
fontFamily: theme("fontFamily.display").join(", ")
|
||||
},
|
||||
h3: {
|
||||
fontFamily: theme("fontFamily.display").join(", ")
|
||||
},
|
||||
h4: {
|
||||
fontFamily: theme("fontFamily.display").join(", ")
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
},
|
||||
variants: {
|
||||
},
|
||||
plugins: [
|
||||
require('@tailwindcss/typography'),
|
||||
],
|
||||
purge: [
|
||||
"../templates/**/*.hbs"
|
||||
]
|
||||
}
|
11
server/.env.template.toml
Normal file
11
server/.env.template.toml
Normal file
@ -0,0 +1,11 @@
|
||||
DATABASE_URL = "postgres://postgres@localhost/zed"
|
||||
SESSION_SECRET = "6E1GS6IQNOLIBKWMEVWF1AFO4H78KNU8"
|
||||
|
||||
HTTP_PORT = 8080
|
||||
|
||||
# Available at https://github.com/organizations/zed-industries/settings/apps/zed-local-development
|
||||
GITHUB_APP_ID = 115633
|
||||
GITHUB_CLIENT_ID = "Iv1.768076c9becc75c4"
|
||||
GITHUB_CLIENT_SECRET = ""
|
||||
GITHUB_PRIVATE_KEY = """\
|
||||
"""
|
50
server/Cargo.toml
Normal file
50
server/Cargo.toml
Normal file
@ -0,0 +1,50 @@
|
||||
[package]
|
||||
authors = ["Nathan Sobo <nathan@warp.dev>"]
|
||||
default-run = "zed-server"
|
||||
edition = "2018"
|
||||
name = "zed-server"
|
||||
version = "0.1.0"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.40"
|
||||
async-std = { version = "1.8.0", features = ["attributes"] }
|
||||
async-trait = "0.1.50"
|
||||
async-tungstenite = "0.14"
|
||||
base64 = "0.13"
|
||||
clap = "=3.0.0-beta.2"
|
||||
comrak = "0.10"
|
||||
either = "1.6"
|
||||
envy = "0.4.2"
|
||||
futures = "0.3"
|
||||
handlebars = "3.5"
|
||||
http-auth-basic = "0.1.3"
|
||||
jwt-simple = "0.10.0"
|
||||
oauth2 = { version = "4.0.0", default_features = false }
|
||||
oauth2-surf = "0.1.1"
|
||||
parking_lot = "0.11.1"
|
||||
postage = { version = "0.4.1", features = ["futures-traits"] }
|
||||
rand = "0.8"
|
||||
rust-embed = "5.9.0"
|
||||
scrypt = "0.7"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
sha-1 = "0.9"
|
||||
surf = "2.2.0"
|
||||
tide = "0.16.0"
|
||||
tide-compress = "0.9.0"
|
||||
toml = "0.5.8"
|
||||
zed-rpc = { path = "../zed-rpc" }
|
||||
|
||||
[dependencies.async-sqlx-session]
|
||||
version = "0.3.0"
|
||||
features = ["pg", "rustls"]
|
||||
default-features = false
|
||||
|
||||
[dependencies.sqlx]
|
||||
version = "0.5.2"
|
||||
features = ["runtime-async-std-rustls", "postgres"]
|
||||
|
||||
[dev-dependencies]
|
||||
gpui = { path = "../gpui" }
|
||||
zed = { path = "../zed", features = ["test-support"] }
|
||||
lazy_static = "1.4"
|
||||
serde_json = { version = "1.0.64", features = ["preserve_order"] }
|
2
server/Procfile
Normal file
2
server/Procfile
Normal file
@ -0,0 +1,2 @@
|
||||
web: ./target/release/zed-server
|
||||
release: ./target/release/sqlx migrate run
|
17
server/README.md
Normal file
17
server/README.md
Normal file
@ -0,0 +1,17 @@
|
||||
# Zed Server
|
||||
|
||||
This crate is what we run at https://zed.dev.
|
||||
|
||||
It contains our web presence as well as the backend logic for collaboration, to which we connect from the Zed client via a websocket.
|
||||
|
||||
## Templates
|
||||
|
||||
We use handlebars templates that are interpreted at runtime. When running in debug mode, you can change templates and see the latest content without restarting the server. This is enabled by the `rust-embed` crate, which we use to access the contents of the `/templates` folder at runtime. In debug mode it reads contents from the file system, but in release the templates will be embedded in the server binary.
|
||||
|
||||
## Static assets
|
||||
|
||||
We also use `rust-embed` to access the contents of the `/static` folder via the `/static/*` route. The app will pick up changes to the contents of this folder when running in debug mode.
|
||||
|
||||
## CSS
|
||||
|
||||
This site uses Tailwind CSS, which means our stylesheets don't need to change very frequently. We check `static/styles.css` into the repository, but it's actually compiled from `/styles.css` via `script/build-css`. This script runs the Tailwind compilation flow to regenerate `static/styles.css` via PostCSS.
|
12
server/basic.conf
Normal file
12
server/basic.conf
Normal file
@ -0,0 +1,12 @@
|
||||
|
||||
[Interface]
|
||||
PrivateKey = B5Fp/yVfP0QYlb+YJv9ea+EMI1mWODPD3akh91cVjvc=
|
||||
Address = fdaa:0:2ce3:a7b:bea:0:a:2/120
|
||||
DNS = fdaa:0:2ce3::3
|
||||
|
||||
[Peer]
|
||||
PublicKey = RKAYPljEJiuaELNDdQIEJmQienT9+LRISfIHwH45HAw=
|
||||
AllowedIPs = fdaa:0:2ce3::/48
|
||||
Endpoint = ord1.gateway.6pn.dev:51820
|
||||
PersistentKeepalive = 15
|
||||
|
71
server/manifest.yml
Normal file
71
server/manifest.yml
Normal file
@ -0,0 +1,71 @@
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: zed
|
||||
annotations:
|
||||
service.beta.kubernetes.io/do-loadbalancer-tls-ports: "443"
|
||||
service.beta.kubernetes.io/do-loadbalancer-certificate-id: "606e2db9-2b58-4ae7-b12c-a0c7d56af49b"
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
selector:
|
||||
app: zed
|
||||
ports:
|
||||
- name: web
|
||||
protocol: TCP
|
||||
port: 443
|
||||
targetPort: 8080
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: zed
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: zed
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: zed
|
||||
spec:
|
||||
containers:
|
||||
- name: zed
|
||||
image: registry.digitalocean.com/zed/zed-server
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: HTTP_PORT
|
||||
value: "8080"
|
||||
- name: DATABASE_URL
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: database
|
||||
key: url
|
||||
- name: SESSION_SECRET
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: session
|
||||
key: secret
|
||||
- name: GITHUB_APP_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: github
|
||||
key: appId
|
||||
- name: GITHUB_CLIENT_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: github
|
||||
key: clientId
|
||||
- name: GITHUB_CLIENT_SECRET
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: github
|
||||
key: clientSecret
|
||||
- name: GITHUB_PRIVATE_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: github
|
||||
key: privateKey
|
17
server/migrate.yml
Normal file
17
server/migrate.yml
Normal file
@ -0,0 +1,17 @@
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: migrate
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: migrator
|
||||
image: registry.digitalocean.com/zed/zed-migrator
|
||||
env:
|
||||
- name: DATABASE_URL
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: database
|
||||
key: url
|
26
server/migrations/20210527024318_initial_schema.sql
Normal file
26
server/migrations/20210527024318_initial_schema.sql
Normal file
@ -0,0 +1,26 @@
|
||||
CREATE TABLE IF NOT EXISTS "sessions" (
|
||||
"id" VARCHAR NOT NULL PRIMARY KEY,
|
||||
"expires" TIMESTAMP WITH TIME ZONE NULL,
|
||||
"session" TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "users" (
|
||||
"id" SERIAL PRIMARY KEY,
|
||||
"github_login" VARCHAR,
|
||||
"admin" BOOLEAN
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX "index_users_github_login" ON "users" ("github_login");
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "signups" (
|
||||
"id" SERIAL PRIMARY KEY,
|
||||
"github_login" VARCHAR,
|
||||
"email_address" VARCHAR,
|
||||
"about" TEXT
|
||||
);
|
||||
|
||||
INSERT INTO users (github_login, admin)
|
||||
VALUES
|
||||
('nathansobo', true),
|
||||
('maxbrunsfeld', true),
|
||||
('as-cii', true);
|
@ -0,0 +1,7 @@
|
||||
CREATE TABLE IF NOT EXISTS "access_tokens" (
|
||||
"id" SERIAL PRIMARY KEY,
|
||||
"user_id" INTEGER REFERENCES users (id),
|
||||
"hash" VARCHAR(128)
|
||||
);
|
||||
|
||||
CREATE INDEX "index_access_tokens_user_id" ON "access_tokens" ("user_id");
|
160
server/src/admin.rs
Normal file
160
server/src/admin.rs
Normal file
@ -0,0 +1,160 @@
|
||||
use crate::{auth::RequestExt as _, AppState, DbPool, LayoutData, Request, RequestExt as _};
|
||||
use async_trait::async_trait;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::{Executor, FromRow};
|
||||
use std::sync::Arc;
|
||||
use surf::http::mime;
|
||||
|
||||
#[async_trait]
|
||||
pub trait RequestExt {
|
||||
async fn require_admin(&self) -> tide::Result<()>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RequestExt for Request {
|
||||
async fn require_admin(&self) -> tide::Result<()> {
|
||||
let current_user = self
|
||||
.current_user()
|
||||
.await?
|
||||
.ok_or_else(|| tide::Error::from_str(401, "not logged in"))?;
|
||||
|
||||
if current_user.is_admin {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(tide::Error::from_str(
|
||||
403,
|
||||
"authenticated user is not an admin",
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_routes(app: &mut tide::Server<Arc<AppState>>) {
|
||||
app.at("/admin").get(get_admin_page);
|
||||
app.at("/users").post(post_user);
|
||||
app.at("/users/:id").put(put_user);
|
||||
app.at("/users/:id/delete").post(delete_user);
|
||||
app.at("/signups/:id/delete").post(delete_signup);
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct AdminData {
|
||||
#[serde(flatten)]
|
||||
layout: Arc<LayoutData>,
|
||||
users: Vec<User>,
|
||||
signups: Vec<Signup>,
|
||||
}
|
||||
|
||||
#[derive(Debug, FromRow, Serialize)]
|
||||
pub struct User {
|
||||
pub id: i32,
|
||||
pub github_login: String,
|
||||
pub admin: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, FromRow, Serialize)]
|
||||
pub struct Signup {
|
||||
pub id: i32,
|
||||
pub github_login: String,
|
||||
pub email_address: String,
|
||||
pub about: String,
|
||||
}
|
||||
|
||||
async fn get_admin_page(mut request: Request) -> tide::Result {
|
||||
request.require_admin().await?;
|
||||
|
||||
let data = AdminData {
|
||||
layout: request.layout_data().await?,
|
||||
users: sqlx::query_as("SELECT * FROM users ORDER BY github_login ASC")
|
||||
.fetch_all(request.db())
|
||||
.await?,
|
||||
signups: sqlx::query_as("SELECT * FROM signups ORDER BY id DESC")
|
||||
.fetch_all(request.db())
|
||||
.await?,
|
||||
};
|
||||
|
||||
Ok(tide::Response::builder(200)
|
||||
.body(request.state().render_template("admin.hbs", &data)?)
|
||||
.content_type(mime::HTML)
|
||||
.build())
|
||||
}
|
||||
|
||||
async fn post_user(mut request: Request) -> tide::Result {
|
||||
request.require_admin().await?;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct Form {
|
||||
github_login: String,
|
||||
#[serde(default)]
|
||||
admin: bool,
|
||||
}
|
||||
|
||||
let form = request.body_form::<Form>().await?;
|
||||
let github_login = form
|
||||
.github_login
|
||||
.strip_prefix("@")
|
||||
.unwrap_or(&form.github_login);
|
||||
|
||||
if !github_login.is_empty() {
|
||||
create_user(request.db(), github_login, form.admin).await?;
|
||||
}
|
||||
|
||||
Ok(tide::Redirect::new("/admin").into())
|
||||
}
|
||||
|
||||
async fn put_user(mut request: Request) -> tide::Result {
|
||||
request.require_admin().await?;
|
||||
|
||||
let user_id = request.param("id")?.parse::<i32>()?;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct Body {
|
||||
admin: bool,
|
||||
}
|
||||
|
||||
let body: Body = request.body_json().await?;
|
||||
|
||||
request
|
||||
.db()
|
||||
.execute(
|
||||
sqlx::query("UPDATE users SET admin = $1 WHERE id = $2;")
|
||||
.bind(body.admin)
|
||||
.bind(user_id),
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(tide::Response::builder(200).build())
|
||||
}
|
||||
|
||||
async fn delete_user(request: Request) -> tide::Result {
|
||||
request.require_admin().await?;
|
||||
|
||||
let user_id = request.param("id")?.parse::<i32>()?;
|
||||
request
|
||||
.db()
|
||||
.execute(sqlx::query("DELETE FROM users WHERE id = $1;").bind(user_id))
|
||||
.await?;
|
||||
|
||||
Ok(tide::Redirect::new("/admin").into())
|
||||
}
|
||||
|
||||
pub async fn create_user(db: &DbPool, github_login: &str, admin: bool) -> tide::Result<i32> {
|
||||
let id: i32 =
|
||||
sqlx::query_scalar("INSERT INTO users (github_login, admin) VALUES ($1, $2) RETURNING id;")
|
||||
.bind(github_login)
|
||||
.bind(admin)
|
||||
.fetch_one(db)
|
||||
.await?;
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
async fn delete_signup(request: Request) -> tide::Result {
|
||||
request.require_admin().await?;
|
||||
let signup_id = request.param("id")?.parse::<i32>()?;
|
||||
request
|
||||
.db()
|
||||
.execute(sqlx::query("DELETE FROM signups WHERE id = $1;").bind(signup_id))
|
||||
.await?;
|
||||
|
||||
Ok(tide::Redirect::new("/admin").into())
|
||||
}
|
31
server/src/assets.rs
Normal file
31
server/src/assets.rs
Normal file
@ -0,0 +1,31 @@
|
||||
use crate::{AppState, Request};
|
||||
use anyhow::anyhow;
|
||||
use rust_embed::RustEmbed;
|
||||
use std::sync::Arc;
|
||||
use tide::{http::mime, Server};
|
||||
|
||||
#[derive(RustEmbed)]
|
||||
#[folder = "static"]
|
||||
struct Static;
|
||||
|
||||
pub fn add_routes(app: &mut Server<Arc<AppState>>) {
|
||||
app.at("/static/*path").get(get_static_asset);
|
||||
}
|
||||
|
||||
async fn get_static_asset(request: Request) -> tide::Result {
|
||||
let path = request.param("path").unwrap();
|
||||
let content = Static::get(path).ok_or_else(|| anyhow!("asset not found at {}", path))?;
|
||||
|
||||
let content_type = if path.starts_with("svg") {
|
||||
mime::SVG
|
||||
} else if path.starts_with("styles") {
|
||||
mime::CSS
|
||||
} else {
|
||||
mime::BYTE_STREAM
|
||||
};
|
||||
|
||||
Ok(tide::Response::builder(200)
|
||||
.content_type(content_type)
|
||||
.body(content.as_ref())
|
||||
.build())
|
||||
}
|
336
server/src/auth.rs
Normal file
336
server/src/auth.rs
Normal file
@ -0,0 +1,336 @@
|
||||
use super::errors::TideResultExt;
|
||||
use crate::{github, rpc, AppState, DbPool, Request, RequestExt as _};
|
||||
use anyhow::{anyhow, Context};
|
||||
use async_std::stream::StreamExt;
|
||||
use async_trait::async_trait;
|
||||
pub use oauth2::basic::BasicClient as Client;
|
||||
use oauth2::{
|
||||
AuthUrl, AuthorizationCode, ClientId, CsrfToken, PkceCodeChallenge, RedirectUrl,
|
||||
TokenResponse as _, TokenUrl,
|
||||
};
|
||||
use rand::thread_rng;
|
||||
use scrypt::{
|
||||
password_hash::{PasswordHash, PasswordHasher, PasswordVerifier, SaltString},
|
||||
Scrypt,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::FromRow;
|
||||
use std::{borrow::Cow, convert::TryFrom, sync::Arc};
|
||||
use surf::Url;
|
||||
use tide::Server;
|
||||
use zed_rpc::{auth as zed_auth, proto, Peer};
|
||||
|
||||
static CURRENT_GITHUB_USER: &'static str = "current_github_user";
|
||||
static GITHUB_AUTH_URL: &'static str = "https://github.com/login/oauth/authorize";
|
||||
static GITHUB_TOKEN_URL: &'static str = "https://github.com/login/oauth/access_token";
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct User {
|
||||
pub github_login: String,
|
||||
pub avatar_url: String,
|
||||
pub is_insider: bool,
|
||||
pub is_admin: bool,
|
||||
}
|
||||
|
||||
pub struct VerifyToken;
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct UserId(pub i32);
|
||||
|
||||
#[async_trait]
|
||||
impl tide::Middleware<Arc<AppState>> for VerifyToken {
|
||||
async fn handle(
|
||||
&self,
|
||||
mut request: Request,
|
||||
next: tide::Next<'_, Arc<AppState>>,
|
||||
) -> tide::Result {
|
||||
let mut auth_header = request
|
||||
.header("Authorization")
|
||||
.ok_or_else(|| anyhow!("no authorization header"))?
|
||||
.last()
|
||||
.as_str()
|
||||
.split_whitespace();
|
||||
|
||||
let user_id: i32 = auth_header
|
||||
.next()
|
||||
.ok_or_else(|| anyhow!("missing user id in authorization header"))?
|
||||
.parse()?;
|
||||
let access_token = auth_header
|
||||
.next()
|
||||
.ok_or_else(|| anyhow!("missing access token in authorization header"))?;
|
||||
|
||||
let state = request.state().clone();
|
||||
|
||||
let mut password_hashes =
|
||||
sqlx::query_scalar::<_, String>("SELECT hash FROM access_tokens WHERE user_id = $1")
|
||||
.bind(&user_id)
|
||||
.fetch_many(&state.db);
|
||||
|
||||
let mut credentials_valid = false;
|
||||
while let Some(password_hash) = password_hashes.next().await {
|
||||
if let either::Either::Right(password_hash) = password_hash? {
|
||||
if verify_access_token(&access_token, &password_hash)? {
|
||||
credentials_valid = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if credentials_valid {
|
||||
request.set_ext(UserId(user_id));
|
||||
Ok(next.run(request).await)
|
||||
} else {
|
||||
Err(anyhow!("invalid credentials").into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait RequestExt {
|
||||
async fn current_user(&self) -> tide::Result<Option<User>>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RequestExt for Request {
|
||||
async fn current_user(&self) -> tide::Result<Option<User>> {
|
||||
if let Some(details) = self.session().get::<github::User>(CURRENT_GITHUB_USER) {
|
||||
#[derive(FromRow)]
|
||||
struct UserRow {
|
||||
admin: bool,
|
||||
}
|
||||
|
||||
let user_row: Option<UserRow> =
|
||||
sqlx::query_as("SELECT admin FROM users WHERE github_login = $1")
|
||||
.bind(&details.login)
|
||||
.fetch_optional(self.db())
|
||||
.await?;
|
||||
|
||||
let is_insider = user_row.is_some();
|
||||
let is_admin = user_row.map_or(false, |row| row.admin);
|
||||
|
||||
Ok(Some(User {
|
||||
github_login: details.login,
|
||||
avatar_url: details.avatar_url,
|
||||
is_insider,
|
||||
is_admin,
|
||||
}))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait PeerExt {
|
||||
async fn sign_out(
|
||||
self: &Arc<Self>,
|
||||
connection_id: zed_rpc::ConnectionId,
|
||||
state: &AppState,
|
||||
) -> tide::Result<()>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl PeerExt for Peer {
|
||||
async fn sign_out(
|
||||
self: &Arc<Self>,
|
||||
connection_id: zed_rpc::ConnectionId,
|
||||
state: &AppState,
|
||||
) -> tide::Result<()> {
|
||||
self.disconnect(connection_id).await;
|
||||
let worktree_ids = state.rpc.write().await.remove_connection(connection_id);
|
||||
for worktree_id in worktree_ids {
|
||||
let state = state.rpc.read().await;
|
||||
if let Some(worktree) = state.worktrees.get(&worktree_id) {
|
||||
rpc::broadcast(connection_id, worktree.connection_ids(), |conn_id| {
|
||||
self.send(
|
||||
conn_id,
|
||||
proto::RemovePeer {
|
||||
worktree_id,
|
||||
peer_id: connection_id.0,
|
||||
},
|
||||
)
|
||||
})
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn build_client(client_id: &str, client_secret: &str) -> Client {
|
||||
Client::new(
|
||||
ClientId::new(client_id.to_string()),
|
||||
Some(oauth2::ClientSecret::new(client_secret.to_string())),
|
||||
AuthUrl::new(GITHUB_AUTH_URL.into()).unwrap(),
|
||||
Some(TokenUrl::new(GITHUB_TOKEN_URL.into()).unwrap()),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn add_routes(app: &mut Server<Arc<AppState>>) {
|
||||
app.at("/sign_in").get(get_sign_in);
|
||||
app.at("/sign_out").post(post_sign_out);
|
||||
app.at("/auth_callback").get(get_auth_callback);
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct NativeAppSignInParams {
|
||||
native_app_port: String,
|
||||
native_app_public_key: String,
|
||||
}
|
||||
|
||||
async fn get_sign_in(mut request: Request) -> tide::Result {
|
||||
let (pkce_challenge, pkce_verifier) = PkceCodeChallenge::new_random_sha256();
|
||||
|
||||
request
|
||||
.session_mut()
|
||||
.insert("pkce_verifier", pkce_verifier)?;
|
||||
|
||||
let mut redirect_url = Url::parse(&format!(
|
||||
"{}://{}/auth_callback",
|
||||
request
|
||||
.header("X-Forwarded-Proto")
|
||||
.and_then(|values| values.get(0))
|
||||
.map(|value| value.as_str())
|
||||
.unwrap_or("http"),
|
||||
request.host().unwrap()
|
||||
))?;
|
||||
|
||||
let app_sign_in_params: Option<NativeAppSignInParams> = request.query().ok();
|
||||
if let Some(query) = app_sign_in_params {
|
||||
redirect_url
|
||||
.query_pairs_mut()
|
||||
.clear()
|
||||
.append_pair("native_app_port", &query.native_app_port)
|
||||
.append_pair("native_app_public_key", &query.native_app_public_key);
|
||||
}
|
||||
|
||||
let (auth_url, csrf_token) = request
|
||||
.state()
|
||||
.auth_client
|
||||
.authorize_url(CsrfToken::new_random)
|
||||
.set_redirect_uri(Cow::Owned(RedirectUrl::from_url(redirect_url)))
|
||||
.set_pkce_challenge(pkce_challenge)
|
||||
.url();
|
||||
|
||||
request
|
||||
.session_mut()
|
||||
.insert("auth_csrf_token", csrf_token)?;
|
||||
|
||||
Ok(tide::Redirect::new(auth_url).into())
|
||||
}
|
||||
|
||||
async fn get_auth_callback(mut request: Request) -> tide::Result {
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct Query {
|
||||
code: String,
|
||||
state: String,
|
||||
|
||||
#[serde(flatten)]
|
||||
native_app_sign_in_params: Option<NativeAppSignInParams>,
|
||||
}
|
||||
|
||||
let query: Query = request.query()?;
|
||||
|
||||
let pkce_verifier = request
|
||||
.session()
|
||||
.get("pkce_verifier")
|
||||
.ok_or_else(|| anyhow!("could not retrieve pkce_verifier from session"))?;
|
||||
|
||||
let csrf_token = request
|
||||
.session()
|
||||
.get::<CsrfToken>("auth_csrf_token")
|
||||
.ok_or_else(|| anyhow!("could not retrieve auth_csrf_token from session"))?;
|
||||
|
||||
if &query.state != csrf_token.secret() {
|
||||
return Err(anyhow!("csrf token does not match").into());
|
||||
}
|
||||
|
||||
let github_access_token = request
|
||||
.state()
|
||||
.auth_client
|
||||
.exchange_code(AuthorizationCode::new(query.code))
|
||||
.set_pkce_verifier(pkce_verifier)
|
||||
.request_async(oauth2_surf::http_client)
|
||||
.await
|
||||
.context("failed to exchange oauth code")?
|
||||
.access_token()
|
||||
.secret()
|
||||
.clone();
|
||||
|
||||
let user_details = request
|
||||
.state()
|
||||
.github_client
|
||||
.user(github_access_token)
|
||||
.details()
|
||||
.await
|
||||
.context("failed to fetch user")?;
|
||||
|
||||
let user_id: Option<i32> = sqlx::query_scalar("SELECT id from users where github_login = $1")
|
||||
.bind(&user_details.login)
|
||||
.fetch_optional(request.db())
|
||||
.await?;
|
||||
|
||||
request
|
||||
.session_mut()
|
||||
.insert(CURRENT_GITHUB_USER, user_details.clone())?;
|
||||
|
||||
// When signing in from the native app, generate a new access token for the current user. Return
|
||||
// a redirect so that the user's browser sends this access token to the locally-running app.
|
||||
if let Some((user_id, app_sign_in_params)) = user_id.zip(query.native_app_sign_in_params) {
|
||||
let access_token = create_access_token(request.db(), user_id).await?;
|
||||
let native_app_public_key =
|
||||
zed_auth::PublicKey::try_from(app_sign_in_params.native_app_public_key.clone())
|
||||
.context("failed to parse app public key")?;
|
||||
let encrypted_access_token = native_app_public_key
|
||||
.encrypt_string(&access_token)
|
||||
.context("failed to encrypt access token with public key")?;
|
||||
|
||||
return Ok(tide::Redirect::new(&format!(
|
||||
"http://127.0.0.1:{}?user_id={}&access_token={}",
|
||||
app_sign_in_params.native_app_port, user_id, encrypted_access_token,
|
||||
))
|
||||
.into());
|
||||
}
|
||||
|
||||
Ok(tide::Redirect::new("/").into())
|
||||
}
|
||||
|
||||
async fn post_sign_out(mut request: Request) -> tide::Result {
|
||||
request.session_mut().remove(CURRENT_GITHUB_USER);
|
||||
Ok(tide::Redirect::new("/").into())
|
||||
}
|
||||
|
||||
pub async fn create_access_token(db: &DbPool, user_id: i32) -> tide::Result<String> {
|
||||
let access_token = zed_auth::random_token();
|
||||
let access_token_hash =
|
||||
hash_access_token(&access_token).context("failed to hash access token")?;
|
||||
sqlx::query("INSERT INTO access_tokens (user_id, hash) values ($1, $2)")
|
||||
.bind(user_id)
|
||||
.bind(access_token_hash)
|
||||
.fetch_optional(db)
|
||||
.await?;
|
||||
Ok(access_token)
|
||||
}
|
||||
|
||||
fn hash_access_token(token: &str) -> tide::Result<String> {
|
||||
// Avoid slow hashing in debug mode.
|
||||
let params = if cfg!(debug_assertions) {
|
||||
scrypt::Params::new(1, 1, 1).unwrap()
|
||||
} else {
|
||||
scrypt::Params::recommended()
|
||||
};
|
||||
|
||||
Ok(Scrypt
|
||||
.hash_password(
|
||||
token.as_bytes(),
|
||||
None,
|
||||
params,
|
||||
&SaltString::generate(thread_rng()),
|
||||
)?
|
||||
.to_string())
|
||||
}
|
||||
|
||||
pub fn verify_access_token(token: &str, hash: &str) -> tide::Result<bool> {
|
||||
let hash = PasswordHash::new(hash)?;
|
||||
Ok(Scrypt.verify_password(token.as_bytes(), &hash).is_ok())
|
||||
}
|
20
server/src/bin/dotenv.rs
Normal file
20
server/src/bin/dotenv.rs
Normal file
@ -0,0 +1,20 @@
|
||||
use anyhow::anyhow;
|
||||
use std::fs;
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
let env: toml::map::Map<String, toml::Value> = toml::de::from_str(
|
||||
&fs::read_to_string("./.env.toml").map_err(|_| anyhow!("no .env.toml file found"))?,
|
||||
)?;
|
||||
|
||||
for (key, value) in env {
|
||||
let value = match value {
|
||||
toml::Value::String(value) => value,
|
||||
toml::Value::Integer(value) => value.to_string(),
|
||||
toml::Value::Float(value) => value.to_string(),
|
||||
_ => panic!("unsupported TOML value in .env.toml for key {}", key),
|
||||
};
|
||||
println!("export {}=\"{}\"", key, value);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
20
server/src/env.rs
Normal file
20
server/src/env.rs
Normal file
@ -0,0 +1,20 @@
|
||||
use anyhow::anyhow;
|
||||
use std::fs;
|
||||
|
||||
pub fn load_dotenv() -> anyhow::Result<()> {
|
||||
let env: toml::map::Map<String, toml::Value> = toml::de::from_str(
|
||||
&fs::read_to_string("./.env.toml").map_err(|_| anyhow!("no .env.toml file found"))?,
|
||||
)?;
|
||||
|
||||
for (key, value) in env {
|
||||
let value = match value {
|
||||
toml::Value::String(value) => value,
|
||||
toml::Value::Integer(value) => value.to_string(),
|
||||
toml::Value::Float(value) => value.to_string(),
|
||||
_ => panic!("unsupported TOML value in .env.toml for key {}", key),
|
||||
};
|
||||
std::env::set_var(key, value);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
73
server/src/errors.rs
Normal file
73
server/src/errors.rs
Normal file
@ -0,0 +1,73 @@
|
||||
use crate::{AppState, LayoutData, Request, RequestExt};
|
||||
use async_trait::async_trait;
|
||||
use serde::Serialize;
|
||||
use std::sync::Arc;
|
||||
use tide::http::mime;
|
||||
|
||||
pub struct Middleware;
|
||||
|
||||
#[async_trait]
|
||||
impl tide::Middleware<Arc<AppState>> for Middleware {
|
||||
async fn handle(
|
||||
&self,
|
||||
mut request: Request,
|
||||
next: tide::Next<'_, Arc<AppState>>,
|
||||
) -> tide::Result {
|
||||
let app = request.state().clone();
|
||||
let layout_data = request.layout_data().await?;
|
||||
|
||||
let mut response = next.run(request).await;
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct ErrorData {
|
||||
#[serde(flatten)]
|
||||
layout: Arc<LayoutData>,
|
||||
status: u16,
|
||||
reason: &'static str,
|
||||
}
|
||||
|
||||
if !response.status().is_success() {
|
||||
response.set_body(app.render_template(
|
||||
"error.hbs",
|
||||
&ErrorData {
|
||||
layout: layout_data,
|
||||
status: response.status().into(),
|
||||
reason: response.status().canonical_reason(),
|
||||
},
|
||||
)?);
|
||||
response.set_content_type(mime::HTML);
|
||||
}
|
||||
|
||||
Ok(response)
|
||||
}
|
||||
}
|
||||
|
||||
// Allow tide Results to accept context like other Results do when
|
||||
// using anyhow.
|
||||
pub trait TideResultExt {
|
||||
fn context<C>(self, cx: C) -> Self
|
||||
where
|
||||
C: std::fmt::Display + Send + Sync + 'static;
|
||||
|
||||
fn with_context<C, F>(self, f: F) -> Self
|
||||
where
|
||||
C: std::fmt::Display + Send + Sync + 'static,
|
||||
F: FnOnce() -> C;
|
||||
}
|
||||
|
||||
impl<T> TideResultExt for tide::Result<T> {
|
||||
fn context<C>(self, cx: C) -> Self
|
||||
where
|
||||
C: std::fmt::Display + Send + Sync + 'static,
|
||||
{
|
||||
self.map_err(|e| tide::Error::new(e.status(), e.into_inner().context(cx)))
|
||||
}
|
||||
|
||||
fn with_context<C, F>(self, f: F) -> Self
|
||||
where
|
||||
C: std::fmt::Display + Send + Sync + 'static,
|
||||
F: FnOnce() -> C,
|
||||
{
|
||||
self.map_err(|e| tide::Error::new(e.status(), e.into_inner().context(f())))
|
||||
}
|
||||
}
|
43
server/src/expiring.rs
Normal file
43
server/src/expiring.rs
Normal file
@ -0,0 +1,43 @@
|
||||
use std::{future::Future, time::Instant};
|
||||
|
||||
use async_std::sync::Mutex;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Expiring<T>(Mutex<Option<ExpiringState<T>>>);
|
||||
|
||||
pub struct ExpiringState<T> {
|
||||
value: T,
|
||||
expires_at: Instant,
|
||||
}
|
||||
|
||||
impl<T: Clone> Expiring<T> {
|
||||
pub async fn get_or_refresh<F, G>(&self, f: F) -> tide::Result<T>
|
||||
where
|
||||
F: FnOnce() -> G,
|
||||
G: Future<Output = tide::Result<(T, Instant)>>,
|
||||
{
|
||||
let mut state = self.0.lock().await;
|
||||
|
||||
if let Some(state) = state.as_mut() {
|
||||
if Instant::now() >= state.expires_at {
|
||||
let (value, expires_at) = f().await?;
|
||||
state.value = value.clone();
|
||||
state.expires_at = expires_at;
|
||||
Ok(value)
|
||||
} else {
|
||||
Ok(state.value.clone())
|
||||
}
|
||||
} else {
|
||||
let (value, expires_at) = f().await?;
|
||||
*state = Some(ExpiringState {
|
||||
value: value.clone(),
|
||||
expires_at,
|
||||
});
|
||||
Ok(value)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn clear(&self) {
|
||||
self.0.lock().await.take();
|
||||
}
|
||||
}
|
265
server/src/github.rs
Normal file
265
server/src/github.rs
Normal file
@ -0,0 +1,265 @@
|
||||
use crate::expiring::Expiring;
|
||||
use anyhow::{anyhow, Context};
|
||||
use serde::{de::DeserializeOwned, Deserialize, Serialize};
|
||||
use std::{
|
||||
future::Future,
|
||||
sync::Arc,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use surf::{http::Method, RequestBuilder, Url};
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
pub struct Release {
|
||||
pub tag_name: String,
|
||||
pub name: String,
|
||||
pub body: String,
|
||||
pub draft: bool,
|
||||
pub assets: Vec<Asset>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
pub struct Asset {
|
||||
pub name: String,
|
||||
pub url: String,
|
||||
}
|
||||
|
||||
pub struct AppClient {
|
||||
id: usize,
|
||||
private_key: String,
|
||||
jwt_bearer_header: Expiring<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct Installation {
|
||||
#[allow(unused)]
|
||||
id: usize,
|
||||
}
|
||||
|
||||
impl AppClient {
|
||||
#[cfg(test)]
|
||||
pub fn test() -> Arc<Self> {
|
||||
Arc::new(Self {
|
||||
id: Default::default(),
|
||||
private_key: Default::default(),
|
||||
jwt_bearer_header: Default::default(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn new(id: usize, private_key: String) -> Arc<Self> {
|
||||
Arc::new(Self {
|
||||
id,
|
||||
private_key,
|
||||
jwt_bearer_header: Default::default(),
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn repo(self: &Arc<Self>, nwo: String) -> tide::Result<RepoClient> {
|
||||
let installation: Installation = self
|
||||
.request(
|
||||
Method::Get,
|
||||
&format!("/repos/{}/installation", &nwo),
|
||||
|refresh| self.bearer_header(refresh),
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(RepoClient {
|
||||
app: self.clone(),
|
||||
nwo,
|
||||
installation_id: installation.id,
|
||||
installation_token_header: Default::default(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn user(self: &Arc<Self>, access_token: String) -> UserClient {
|
||||
UserClient {
|
||||
app: self.clone(),
|
||||
access_token,
|
||||
}
|
||||
}
|
||||
|
||||
async fn request<T, F, G>(
|
||||
&self,
|
||||
method: Method,
|
||||
path: &str,
|
||||
get_auth_header: F,
|
||||
) -> tide::Result<T>
|
||||
where
|
||||
T: DeserializeOwned,
|
||||
F: Fn(bool) -> G,
|
||||
G: Future<Output = tide::Result<String>>,
|
||||
{
|
||||
let mut retried = false;
|
||||
|
||||
loop {
|
||||
let response = RequestBuilder::new(
|
||||
method,
|
||||
Url::parse(&format!("https://api.github.com{}", path))?,
|
||||
)
|
||||
.header("Accept", "application/vnd.github.v3+json")
|
||||
.header("Authorization", get_auth_header(retried).await?)
|
||||
.recv_json()
|
||||
.await;
|
||||
|
||||
if let Err(error) = response.as_ref() {
|
||||
if error.status() == 401 && !retried {
|
||||
retried = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
return response;
|
||||
}
|
||||
}
|
||||
|
||||
async fn bearer_header(&self, refresh: bool) -> tide::Result<String> {
|
||||
if refresh {
|
||||
self.jwt_bearer_header.clear().await;
|
||||
}
|
||||
|
||||
self.jwt_bearer_header
|
||||
.get_or_refresh(|| async {
|
||||
use jwt_simple::{algorithms::RS256KeyPair, prelude::*};
|
||||
use std::time;
|
||||
|
||||
let key_pair = RS256KeyPair::from_pem(&self.private_key)
|
||||
.with_context(|| format!("invalid private key {:?}", self.private_key))?;
|
||||
let mut claims = Claims::create(Duration::from_mins(10));
|
||||
claims.issued_at = Some(Clock::now_since_epoch() - Duration::from_mins(1));
|
||||
claims.issuer = Some(self.id.to_string());
|
||||
let token = key_pair.sign(claims).context("failed to sign claims")?;
|
||||
let expires_at = time::Instant::now() + time::Duration::from_secs(9 * 60);
|
||||
|
||||
Ok((format!("Bearer {}", token), expires_at))
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
async fn installation_token_header(
|
||||
&self,
|
||||
header: &Expiring<String>,
|
||||
installation_id: usize,
|
||||
refresh: bool,
|
||||
) -> tide::Result<String> {
|
||||
if refresh {
|
||||
header.clear().await;
|
||||
}
|
||||
|
||||
header
|
||||
.get_or_refresh(|| async {
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct AccessToken {
|
||||
token: String,
|
||||
}
|
||||
|
||||
let access_token: AccessToken = self
|
||||
.request(
|
||||
Method::Post,
|
||||
&format!("/app/installations/{}/access_tokens", installation_id),
|
||||
|refresh| self.bearer_header(refresh),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let header = format!("Token {}", access_token.token);
|
||||
let expires_at = Instant::now() + Duration::from_secs(60 * 30);
|
||||
|
||||
Ok((header, expires_at))
|
||||
})
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RepoClient {
|
||||
app: Arc<AppClient>,
|
||||
nwo: String,
|
||||
installation_id: usize,
|
||||
installation_token_header: Expiring<String>,
|
||||
}
|
||||
|
||||
impl RepoClient {
|
||||
#[cfg(test)]
|
||||
pub fn test(app_client: &Arc<AppClient>) -> Self {
|
||||
Self {
|
||||
app: app_client.clone(),
|
||||
nwo: String::new(),
|
||||
installation_id: 0,
|
||||
installation_token_header: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn releases(&self) -> tide::Result<Vec<Release>> {
|
||||
self.get(&format!("/repos/{}/releases?per_page=100", self.nwo))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn release_asset(&self, tag: &str, name: &str) -> tide::Result<surf::Body> {
|
||||
let release: Release = self
|
||||
.get(&format!("/repos/{}/releases/tags/{}", self.nwo, tag))
|
||||
.await?;
|
||||
|
||||
let asset = release
|
||||
.assets
|
||||
.iter()
|
||||
.find(|asset| asset.name == name)
|
||||
.ok_or_else(|| anyhow!("no asset found with name {}", name))?;
|
||||
|
||||
let request = surf::get(&asset.url)
|
||||
.header("Accept", "application/octet-stream'")
|
||||
.header(
|
||||
"Authorization",
|
||||
self.installation_token_header(false).await?,
|
||||
);
|
||||
let client = surf::client().with(surf::middleware::Redirect::new(5));
|
||||
let mut response = client.send(request).await?;
|
||||
|
||||
Ok(response.take_body())
|
||||
}
|
||||
|
||||
async fn get<T: DeserializeOwned>(&self, path: &str) -> tide::Result<T> {
|
||||
self.request::<T>(Method::Get, path).await
|
||||
}
|
||||
|
||||
async fn request<T: DeserializeOwned>(&self, method: Method, path: &str) -> tide::Result<T> {
|
||||
Ok(self
|
||||
.app
|
||||
.request(method, path, |refresh| {
|
||||
self.installation_token_header(refresh)
|
||||
})
|
||||
.await?)
|
||||
}
|
||||
|
||||
async fn installation_token_header(&self, refresh: bool) -> tide::Result<String> {
|
||||
self.app
|
||||
.installation_token_header(
|
||||
&self.installation_token_header,
|
||||
self.installation_id,
|
||||
refresh,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
pub struct UserClient {
|
||||
app: Arc<AppClient>,
|
||||
access_token: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
pub struct User {
|
||||
pub login: String,
|
||||
pub avatar_url: String,
|
||||
}
|
||||
|
||||
impl UserClient {
|
||||
pub async fn details(&self) -> tide::Result<User> {
|
||||
Ok(self
|
||||
.app
|
||||
.request(Method::Get, "/user", |_| async {
|
||||
Ok(self.access_token_header())
|
||||
})
|
||||
.await?)
|
||||
}
|
||||
|
||||
fn access_token_header(&self) -> String {
|
||||
format!("Token {}", self.access_token)
|
||||
}
|
||||
}
|
112
server/src/home.rs
Normal file
112
server/src/home.rs
Normal file
@ -0,0 +1,112 @@
|
||||
use crate::{
|
||||
auth::RequestExt as _, github::Release, AppState, LayoutData, Request, RequestExt as _,
|
||||
};
|
||||
use comrak::ComrakOptions;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::Executor as _;
|
||||
use std::sync::Arc;
|
||||
use tide::{http::mime, log, Server};
|
||||
|
||||
pub fn add_routes(app: &mut Server<Arc<AppState>>) {
|
||||
app.at("/").get(get_home);
|
||||
app.at("/signups").post(post_signup);
|
||||
app.at("/releases/:tag_name/:name").get(get_release_asset);
|
||||
}
|
||||
|
||||
async fn get_home(mut request: Request) -> tide::Result {
|
||||
#[derive(Serialize)]
|
||||
struct HomeData {
|
||||
#[serde(flatten)]
|
||||
layout: Arc<LayoutData>,
|
||||
releases: Option<Vec<Release>>,
|
||||
}
|
||||
|
||||
let mut data = HomeData {
|
||||
layout: request.layout_data().await?,
|
||||
releases: None,
|
||||
};
|
||||
|
||||
if let Some(user) = request.current_user().await? {
|
||||
if user.is_insider {
|
||||
data.releases = Some(
|
||||
request
|
||||
.state()
|
||||
.repo_client
|
||||
.releases()
|
||||
.await?
|
||||
.into_iter()
|
||||
.filter_map(|mut release| {
|
||||
if release.draft {
|
||||
None
|
||||
} else {
|
||||
let mut options = ComrakOptions::default();
|
||||
options.render.unsafe_ = true; // Allow raw HTML in the markup. We control these release notes anyway.
|
||||
release.body = comrak::markdown_to_html(&release.body, &options);
|
||||
Some(release)
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(tide::Response::builder(200)
|
||||
.body(request.state().render_template("home.hbs", &data)?)
|
||||
.content_type(mime::HTML)
|
||||
.build())
|
||||
}
|
||||
|
||||
async fn post_signup(mut request: Request) -> tide::Result {
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct Form {
|
||||
github_login: String,
|
||||
email_address: String,
|
||||
about: String,
|
||||
}
|
||||
|
||||
let mut form: Form = request.body_form().await?;
|
||||
form.github_login = form
|
||||
.github_login
|
||||
.strip_prefix("@")
|
||||
.map(str::to_string)
|
||||
.unwrap_or(form.github_login);
|
||||
|
||||
log::info!("Signup submitted: {:?}", form);
|
||||
|
||||
// Save signup in the database
|
||||
request
|
||||
.db()
|
||||
.execute(
|
||||
sqlx::query(
|
||||
"INSERT INTO signups (github_login, email_address, about) VALUES ($1, $2, $3);",
|
||||
)
|
||||
.bind(&form.github_login)
|
||||
.bind(&form.email_address)
|
||||
.bind(&form.about),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let layout_data = request.layout_data().await?;
|
||||
Ok(tide::Response::builder(200)
|
||||
.body(
|
||||
request
|
||||
.state()
|
||||
.render_template("signup.hbs", &layout_data)?,
|
||||
)
|
||||
.content_type(mime::HTML)
|
||||
.build())
|
||||
}
|
||||
|
||||
async fn get_release_asset(request: Request) -> tide::Result {
|
||||
let body = request
|
||||
.state()
|
||||
.repo_client
|
||||
.release_asset(request.param("tag_name")?, request.param("name")?)
|
||||
.await?;
|
||||
|
||||
Ok(tide::Response::builder(200)
|
||||
.header("Cache-Control", "no-transform")
|
||||
.content_type(mime::BYTE_STREAM)
|
||||
.body(body)
|
||||
.build())
|
||||
}
|
197
server/src/main.rs
Normal file
197
server/src/main.rs
Normal file
@ -0,0 +1,197 @@
|
||||
mod admin;
|
||||
mod assets;
|
||||
mod auth;
|
||||
mod env;
|
||||
mod errors;
|
||||
mod expiring;
|
||||
mod github;
|
||||
mod home;
|
||||
mod rpc;
|
||||
mod team;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
use self::errors::TideResultExt as _;
|
||||
use anyhow::{Context, Result};
|
||||
use async_sqlx_session::PostgresSessionStore;
|
||||
use async_std::{net::TcpListener, sync::RwLock as AsyncRwLock};
|
||||
use async_trait::async_trait;
|
||||
use auth::RequestExt as _;
|
||||
use handlebars::{Handlebars, TemplateRenderError};
|
||||
use parking_lot::RwLock;
|
||||
use rust_embed::RustEmbed;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::postgres::{PgPool, PgPoolOptions};
|
||||
use std::sync::Arc;
|
||||
use surf::http::cookies::SameSite;
|
||||
use tide::{log, sessions::SessionMiddleware};
|
||||
use tide_compress::CompressMiddleware;
|
||||
use zed_rpc::Peer;
|
||||
|
||||
type Request = tide::Request<Arc<AppState>>;
|
||||
type DbPool = PgPool;
|
||||
|
||||
#[derive(RustEmbed)]
|
||||
#[folder = "templates"]
|
||||
struct Templates;
|
||||
|
||||
#[derive(Default, Deserialize)]
|
||||
pub struct Config {
|
||||
pub http_port: u16,
|
||||
pub database_url: String,
|
||||
pub session_secret: String,
|
||||
pub github_app_id: usize,
|
||||
pub github_client_id: String,
|
||||
pub github_client_secret: String,
|
||||
pub github_private_key: String,
|
||||
}
|
||||
|
||||
pub struct AppState {
|
||||
db: sqlx::PgPool,
|
||||
handlebars: RwLock<Handlebars<'static>>,
|
||||
auth_client: auth::Client,
|
||||
github_client: Arc<github::AppClient>,
|
||||
repo_client: github::RepoClient,
|
||||
rpc: AsyncRwLock<rpc::State>,
|
||||
config: Config,
|
||||
}
|
||||
|
||||
impl AppState {
|
||||
async fn new(config: Config) -> tide::Result<Arc<Self>> {
|
||||
let db = PgPoolOptions::new()
|
||||
.max_connections(5)
|
||||
.connect(&config.database_url)
|
||||
.await
|
||||
.context("failed to connect to postgres database")?;
|
||||
|
||||
let github_client =
|
||||
github::AppClient::new(config.github_app_id, config.github_private_key.clone());
|
||||
let repo_client = github_client
|
||||
.repo("zed-industries/zed".into())
|
||||
.await
|
||||
.context("failed to initialize github client")?;
|
||||
|
||||
let this = Self {
|
||||
db,
|
||||
handlebars: Default::default(),
|
||||
auth_client: auth::build_client(&config.github_client_id, &config.github_client_secret),
|
||||
github_client,
|
||||
repo_client,
|
||||
rpc: Default::default(),
|
||||
config,
|
||||
};
|
||||
this.register_partials();
|
||||
Ok(Arc::new(this))
|
||||
}
|
||||
|
||||
fn register_partials(&self) {
|
||||
for path in Templates::iter() {
|
||||
if let Some(partial_name) = path
|
||||
.strip_prefix("partials/")
|
||||
.and_then(|path| path.strip_suffix(".hbs"))
|
||||
{
|
||||
let partial = Templates::get(path.as_ref()).unwrap();
|
||||
self.handlebars
|
||||
.write()
|
||||
.register_partial(partial_name, std::str::from_utf8(partial.as_ref()).unwrap())
|
||||
.unwrap()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn render_template(
|
||||
&self,
|
||||
path: &'static str,
|
||||
data: &impl Serialize,
|
||||
) -> Result<String, TemplateRenderError> {
|
||||
#[cfg(debug_assertions)]
|
||||
self.register_partials();
|
||||
|
||||
self.handlebars.read().render_template(
|
||||
std::str::from_utf8(Templates::get(path).unwrap().as_ref()).unwrap(),
|
||||
data,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
trait RequestExt {
|
||||
async fn layout_data(&mut self) -> tide::Result<Arc<LayoutData>>;
|
||||
fn db(&self) -> &DbPool;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RequestExt for Request {
|
||||
async fn layout_data(&mut self) -> tide::Result<Arc<LayoutData>> {
|
||||
if self.ext::<Arc<LayoutData>>().is_none() {
|
||||
self.set_ext(Arc::new(LayoutData {
|
||||
current_user: self.current_user().await?,
|
||||
}));
|
||||
}
|
||||
Ok(self.ext::<Arc<LayoutData>>().unwrap().clone())
|
||||
}
|
||||
|
||||
fn db(&self) -> &DbPool {
|
||||
&self.state().db
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct LayoutData {
|
||||
current_user: Option<auth::User>,
|
||||
}
|
||||
|
||||
#[async_std::main]
|
||||
async fn main() -> tide::Result<()> {
|
||||
log::start();
|
||||
|
||||
if let Err(error) = env::load_dotenv() {
|
||||
log::error!(
|
||||
"error loading .env.toml (this is expected in production): {}",
|
||||
error
|
||||
);
|
||||
}
|
||||
|
||||
let config = envy::from_env::<Config>().expect("error loading config");
|
||||
let state = AppState::new(config).await?;
|
||||
let rpc = Peer::new();
|
||||
run_server(
|
||||
state.clone(),
|
||||
rpc,
|
||||
TcpListener::bind(&format!("0.0.0.0:{}", state.config.http_port)).await?,
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn run_server(
|
||||
state: Arc<AppState>,
|
||||
rpc: Arc<Peer>,
|
||||
listener: TcpListener,
|
||||
) -> tide::Result<()> {
|
||||
let mut web = tide::with_state(state.clone());
|
||||
web.with(CompressMiddleware::new());
|
||||
web.with(
|
||||
SessionMiddleware::new(
|
||||
PostgresSessionStore::new_with_table_name(&state.config.database_url, "sessions")
|
||||
.await
|
||||
.unwrap(),
|
||||
state.config.session_secret.as_bytes(),
|
||||
)
|
||||
.with_same_site_policy(SameSite::Lax), // Required obtain our session in /auth_callback
|
||||
);
|
||||
web.with(errors::Middleware);
|
||||
home::add_routes(&mut web);
|
||||
team::add_routes(&mut web);
|
||||
admin::add_routes(&mut web);
|
||||
auth::add_routes(&mut web);
|
||||
assets::add_routes(&mut web);
|
||||
|
||||
let mut app = tide::with_state(state.clone());
|
||||
rpc::add_routes(&mut app, &rpc);
|
||||
app.at("/").nest(web);
|
||||
|
||||
app.listen(listener).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
652
server/src/rpc.rs
Normal file
652
server/src/rpc.rs
Normal file
@ -0,0 +1,652 @@
|
||||
use crate::auth::{self, UserId};
|
||||
|
||||
use super::{auth::PeerExt as _, AppState};
|
||||
use anyhow::anyhow;
|
||||
use async_std::task;
|
||||
use async_tungstenite::{
|
||||
tungstenite::{protocol::Role, Error as WebSocketError, Message as WebSocketMessage},
|
||||
WebSocketStream,
|
||||
};
|
||||
use sha1::{Digest as _, Sha1};
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
future::Future,
|
||||
mem,
|
||||
sync::Arc,
|
||||
time::Instant,
|
||||
};
|
||||
use surf::StatusCode;
|
||||
use tide::log;
|
||||
use tide::{
|
||||
http::headers::{HeaderName, CONNECTION, UPGRADE},
|
||||
Request, Response,
|
||||
};
|
||||
use zed_rpc::{
|
||||
auth::random_token,
|
||||
proto::{self, EnvelopedMessage},
|
||||
ConnectionId, Peer, Router, TypedEnvelope,
|
||||
};
|
||||
|
||||
type ReplicaId = u16;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct State {
|
||||
connections: HashMap<ConnectionId, ConnectionState>,
|
||||
pub worktrees: HashMap<u64, WorktreeState>,
|
||||
next_worktree_id: u64,
|
||||
}
|
||||
|
||||
struct ConnectionState {
|
||||
_user_id: i32,
|
||||
worktrees: HashSet<u64>,
|
||||
}
|
||||
|
||||
pub struct WorktreeState {
|
||||
host_connection_id: Option<ConnectionId>,
|
||||
guest_connection_ids: HashMap<ConnectionId, ReplicaId>,
|
||||
active_replica_ids: HashSet<ReplicaId>,
|
||||
access_token: String,
|
||||
root_name: String,
|
||||
entries: HashMap<u64, proto::Entry>,
|
||||
}
|
||||
|
||||
impl WorktreeState {
|
||||
pub fn connection_ids(&self) -> Vec<ConnectionId> {
|
||||
self.guest_connection_ids
|
||||
.keys()
|
||||
.copied()
|
||||
.chain(self.host_connection_id)
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn host_connection_id(&self) -> tide::Result<ConnectionId> {
|
||||
Ok(self
|
||||
.host_connection_id
|
||||
.ok_or_else(|| anyhow!("host disconnected from worktree"))?)
|
||||
}
|
||||
}
|
||||
|
||||
impl State {
|
||||
// Add a new connection associated with a given user.
|
||||
pub fn add_connection(&mut self, connection_id: ConnectionId, _user_id: i32) {
|
||||
self.connections.insert(
|
||||
connection_id,
|
||||
ConnectionState {
|
||||
_user_id,
|
||||
worktrees: Default::default(),
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
// Remove the given connection and its association with any worktrees.
|
||||
pub fn remove_connection(&mut self, connection_id: ConnectionId) -> Vec<u64> {
|
||||
let mut worktree_ids = Vec::new();
|
||||
if let Some(connection_state) = self.connections.remove(&connection_id) {
|
||||
for worktree_id in connection_state.worktrees {
|
||||
if let Some(worktree) = self.worktrees.get_mut(&worktree_id) {
|
||||
if worktree.host_connection_id == Some(connection_id) {
|
||||
worktree_ids.push(worktree_id);
|
||||
} else if let Some(replica_id) =
|
||||
worktree.guest_connection_ids.remove(&connection_id)
|
||||
{
|
||||
worktree.active_replica_ids.remove(&replica_id);
|
||||
worktree_ids.push(worktree_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
worktree_ids
|
||||
}
|
||||
|
||||
// Add the given connection as a guest of the given worktree
|
||||
pub fn join_worktree(
|
||||
&mut self,
|
||||
connection_id: ConnectionId,
|
||||
worktree_id: u64,
|
||||
access_token: &str,
|
||||
) -> Option<(ReplicaId, &WorktreeState)> {
|
||||
if let Some(worktree_state) = self.worktrees.get_mut(&worktree_id) {
|
||||
if access_token == worktree_state.access_token {
|
||||
if let Some(connection_state) = self.connections.get_mut(&connection_id) {
|
||||
connection_state.worktrees.insert(worktree_id);
|
||||
}
|
||||
|
||||
let mut replica_id = 1;
|
||||
while worktree_state.active_replica_ids.contains(&replica_id) {
|
||||
replica_id += 1;
|
||||
}
|
||||
worktree_state.active_replica_ids.insert(replica_id);
|
||||
worktree_state
|
||||
.guest_connection_ids
|
||||
.insert(connection_id, replica_id);
|
||||
Some((replica_id, worktree_state))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn read_worktree(
|
||||
&self,
|
||||
worktree_id: u64,
|
||||
connection_id: ConnectionId,
|
||||
) -> tide::Result<&WorktreeState> {
|
||||
let worktree = self
|
||||
.worktrees
|
||||
.get(&worktree_id)
|
||||
.ok_or_else(|| anyhow!("worktree not found"))?;
|
||||
|
||||
if worktree.host_connection_id == Some(connection_id)
|
||||
|| worktree.guest_connection_ids.contains_key(&connection_id)
|
||||
{
|
||||
Ok(worktree)
|
||||
} else {
|
||||
Err(anyhow!(
|
||||
"{} is not a member of worktree {}",
|
||||
connection_id,
|
||||
worktree_id
|
||||
))?
|
||||
}
|
||||
}
|
||||
|
||||
fn write_worktree(
|
||||
&mut self,
|
||||
worktree_id: u64,
|
||||
connection_id: ConnectionId,
|
||||
) -> tide::Result<&mut WorktreeState> {
|
||||
let worktree = self
|
||||
.worktrees
|
||||
.get_mut(&worktree_id)
|
||||
.ok_or_else(|| anyhow!("worktree not found"))?;
|
||||
|
||||
if worktree.host_connection_id == Some(connection_id)
|
||||
|| worktree.guest_connection_ids.contains_key(&connection_id)
|
||||
{
|
||||
Ok(worktree)
|
||||
} else {
|
||||
Err(anyhow!(
|
||||
"{} is not a member of worktree {}",
|
||||
connection_id,
|
||||
worktree_id
|
||||
))?
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
trait MessageHandler<'a, M: proto::EnvelopedMessage> {
|
||||
type Output: 'a + Send + Future<Output = tide::Result<()>>;
|
||||
|
||||
fn handle(
|
||||
&self,
|
||||
message: TypedEnvelope<M>,
|
||||
rpc: &'a Arc<Peer>,
|
||||
app_state: &'a Arc<AppState>,
|
||||
) -> Self::Output;
|
||||
}
|
||||
|
||||
impl<'a, M, F, Fut> MessageHandler<'a, M> for F
|
||||
where
|
||||
M: proto::EnvelopedMessage,
|
||||
F: Fn(TypedEnvelope<M>, &'a Arc<Peer>, &'a Arc<AppState>) -> Fut,
|
||||
Fut: 'a + Send + Future<Output = tide::Result<()>>,
|
||||
{
|
||||
type Output = Fut;
|
||||
|
||||
fn handle(
|
||||
&self,
|
||||
message: TypedEnvelope<M>,
|
||||
rpc: &'a Arc<Peer>,
|
||||
app_state: &'a Arc<AppState>,
|
||||
) -> Self::Output {
|
||||
(self)(message, rpc, app_state)
|
||||
}
|
||||
}
|
||||
|
||||
fn on_message<M, H>(router: &mut Router, rpc: &Arc<Peer>, app_state: &Arc<AppState>, handler: H)
|
||||
where
|
||||
M: EnvelopedMessage,
|
||||
H: 'static + Clone + Send + Sync + for<'a> MessageHandler<'a, M>,
|
||||
{
|
||||
let rpc = rpc.clone();
|
||||
let handler = handler.clone();
|
||||
let app_state = app_state.clone();
|
||||
router.add_message_handler(move |message| {
|
||||
let rpc = rpc.clone();
|
||||
let handler = handler.clone();
|
||||
let app_state = app_state.clone();
|
||||
async move {
|
||||
let sender_id = message.sender_id;
|
||||
let message_id = message.message_id;
|
||||
let start_time = Instant::now();
|
||||
log::info!(
|
||||
"RPC message received. id: {}.{}, type:{}",
|
||||
sender_id,
|
||||
message_id,
|
||||
M::NAME
|
||||
);
|
||||
if let Err(err) = handler.handle(message, &rpc, &app_state).await {
|
||||
log::error!("error handling message: {:?}", err);
|
||||
} else {
|
||||
log::info!(
|
||||
"RPC message handled. id:{}.{}, duration:{:?}",
|
||||
sender_id,
|
||||
message_id,
|
||||
start_time.elapsed()
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
pub fn add_rpc_routes(router: &mut Router, state: &Arc<AppState>, rpc: &Arc<Peer>) {
|
||||
on_message(router, rpc, state, share_worktree);
|
||||
on_message(router, rpc, state, join_worktree);
|
||||
on_message(router, rpc, state, update_worktree);
|
||||
on_message(router, rpc, state, close_worktree);
|
||||
on_message(router, rpc, state, open_buffer);
|
||||
on_message(router, rpc, state, close_buffer);
|
||||
on_message(router, rpc, state, update_buffer);
|
||||
on_message(router, rpc, state, buffer_saved);
|
||||
on_message(router, rpc, state, save_buffer);
|
||||
}
|
||||
|
||||
pub fn add_routes(app: &mut tide::Server<Arc<AppState>>, rpc: &Arc<Peer>) {
|
||||
let mut router = Router::new();
|
||||
add_rpc_routes(&mut router, app.state(), rpc);
|
||||
let router = Arc::new(router);
|
||||
|
||||
let rpc = rpc.clone();
|
||||
app.at("/rpc").with(auth::VerifyToken).get(move |request: Request<Arc<AppState>>| {
|
||||
let user_id = request.ext::<UserId>().copied();
|
||||
let rpc = rpc.clone();
|
||||
let router = router.clone();
|
||||
async move {
|
||||
const WEBSOCKET_GUID: &str = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11";
|
||||
|
||||
let connection_upgrade = header_contains_ignore_case(&request, CONNECTION, "upgrade");
|
||||
let upgrade_to_websocket = header_contains_ignore_case(&request, UPGRADE, "websocket");
|
||||
let upgrade_requested = connection_upgrade && upgrade_to_websocket;
|
||||
|
||||
if !upgrade_requested {
|
||||
return Ok(Response::new(StatusCode::UpgradeRequired));
|
||||
}
|
||||
|
||||
let header = match request.header("Sec-Websocket-Key") {
|
||||
Some(h) => h.as_str(),
|
||||
None => return Err(anyhow!("expected sec-websocket-key"))?,
|
||||
};
|
||||
|
||||
let mut response = Response::new(StatusCode::SwitchingProtocols);
|
||||
response.insert_header(UPGRADE, "websocket");
|
||||
response.insert_header(CONNECTION, "Upgrade");
|
||||
let hash = Sha1::new().chain(header).chain(WEBSOCKET_GUID).finalize();
|
||||
response.insert_header("Sec-Websocket-Accept", base64::encode(&hash[..]));
|
||||
response.insert_header("Sec-Websocket-Version", "13");
|
||||
|
||||
let http_res: &mut tide::http::Response = response.as_mut();
|
||||
let upgrade_receiver = http_res.recv_upgrade().await;
|
||||
let addr = request.remote().unwrap_or("unknown").to_string();
|
||||
let state = request.state().clone();
|
||||
let user_id = user_id.ok_or_else(|| anyhow!("user_id is not present on request. ensure auth::VerifyToken middleware is present"))?.0;
|
||||
task::spawn(async move {
|
||||
if let Some(stream) = upgrade_receiver.await {
|
||||
let stream = WebSocketStream::from_raw_socket(stream, Role::Server, None).await;
|
||||
handle_connection(rpc, router, state, addr, stream, user_id).await;
|
||||
}
|
||||
});
|
||||
|
||||
Ok(response)
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
pub async fn handle_connection<Conn>(
|
||||
rpc: Arc<Peer>,
|
||||
router: Arc<Router>,
|
||||
state: Arc<AppState>,
|
||||
addr: String,
|
||||
stream: Conn,
|
||||
user_id: i32,
|
||||
) where
|
||||
Conn: 'static
|
||||
+ futures::Sink<WebSocketMessage, Error = WebSocketError>
|
||||
+ futures::Stream<Item = Result<WebSocketMessage, WebSocketError>>
|
||||
+ Send
|
||||
+ Unpin,
|
||||
{
|
||||
log::info!("accepted rpc connection: {:?}", addr);
|
||||
let (connection_id, handle_io, handle_messages) = rpc.add_connection(stream, router).await;
|
||||
state
|
||||
.rpc
|
||||
.write()
|
||||
.await
|
||||
.add_connection(connection_id, user_id);
|
||||
|
||||
let handle_messages = async move {
|
||||
handle_messages.await;
|
||||
Ok(())
|
||||
};
|
||||
|
||||
if let Err(e) = futures::try_join!(handle_messages, handle_io) {
|
||||
log::error!("error handling rpc connection {:?} - {:?}", addr, e);
|
||||
}
|
||||
|
||||
log::info!("closing connection to {:?}", addr);
|
||||
if let Err(e) = rpc.sign_out(connection_id, &state).await {
|
||||
log::error!("error signing out connection {:?} - {:?}", addr, e);
|
||||
}
|
||||
}
|
||||
|
||||
async fn share_worktree(
|
||||
mut request: TypedEnvelope<proto::ShareWorktree>,
|
||||
rpc: &Arc<Peer>,
|
||||
state: &Arc<AppState>,
|
||||
) -> tide::Result<()> {
|
||||
let mut state = state.rpc.write().await;
|
||||
let worktree_id = state.next_worktree_id;
|
||||
state.next_worktree_id += 1;
|
||||
let access_token = random_token();
|
||||
let worktree = request
|
||||
.payload
|
||||
.worktree
|
||||
.as_mut()
|
||||
.ok_or_else(|| anyhow!("missing worktree"))?;
|
||||
let entries = mem::take(&mut worktree.entries)
|
||||
.into_iter()
|
||||
.map(|entry| (entry.id, entry))
|
||||
.collect();
|
||||
state.worktrees.insert(
|
||||
worktree_id,
|
||||
WorktreeState {
|
||||
host_connection_id: Some(request.sender_id),
|
||||
guest_connection_ids: Default::default(),
|
||||
active_replica_ids: Default::default(),
|
||||
access_token: access_token.clone(),
|
||||
root_name: mem::take(&mut worktree.root_name),
|
||||
entries,
|
||||
},
|
||||
);
|
||||
|
||||
rpc.respond(
|
||||
request.receipt(),
|
||||
proto::ShareWorktreeResponse {
|
||||
worktree_id,
|
||||
access_token,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn join_worktree(
|
||||
request: TypedEnvelope<proto::OpenWorktree>,
|
||||
rpc: &Arc<Peer>,
|
||||
state: &Arc<AppState>,
|
||||
) -> tide::Result<()> {
|
||||
let worktree_id = request.payload.worktree_id;
|
||||
let access_token = &request.payload.access_token;
|
||||
|
||||
let mut state = state.rpc.write().await;
|
||||
if let Some((peer_replica_id, worktree)) =
|
||||
state.join_worktree(request.sender_id, worktree_id, access_token)
|
||||
{
|
||||
let mut peers = Vec::new();
|
||||
if let Some(host_connection_id) = worktree.host_connection_id {
|
||||
peers.push(proto::Peer {
|
||||
peer_id: host_connection_id.0,
|
||||
replica_id: 0,
|
||||
});
|
||||
}
|
||||
for (peer_conn_id, peer_replica_id) in &worktree.guest_connection_ids {
|
||||
if *peer_conn_id != request.sender_id {
|
||||
peers.push(proto::Peer {
|
||||
peer_id: peer_conn_id.0,
|
||||
replica_id: *peer_replica_id as u32,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
broadcast(request.sender_id, worktree.connection_ids(), |conn_id| {
|
||||
rpc.send(
|
||||
conn_id,
|
||||
proto::AddPeer {
|
||||
worktree_id,
|
||||
peer: Some(proto::Peer {
|
||||
peer_id: request.sender_id.0,
|
||||
replica_id: peer_replica_id as u32,
|
||||
}),
|
||||
},
|
||||
)
|
||||
})
|
||||
.await?;
|
||||
rpc.respond(
|
||||
request.receipt(),
|
||||
proto::OpenWorktreeResponse {
|
||||
worktree_id,
|
||||
worktree: Some(proto::Worktree {
|
||||
root_name: worktree.root_name.clone(),
|
||||
entries: worktree.entries.values().cloned().collect(),
|
||||
}),
|
||||
replica_id: peer_replica_id as u32,
|
||||
peers,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
} else {
|
||||
rpc.respond(
|
||||
request.receipt(),
|
||||
proto::OpenWorktreeResponse {
|
||||
worktree_id,
|
||||
worktree: None,
|
||||
replica_id: 0,
|
||||
peers: Vec::new(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn update_worktree(
|
||||
request: TypedEnvelope<proto::UpdateWorktree>,
|
||||
rpc: &Arc<Peer>,
|
||||
state: &Arc<AppState>,
|
||||
) -> tide::Result<()> {
|
||||
{
|
||||
let mut state = state.rpc.write().await;
|
||||
let worktree = state.write_worktree(request.payload.worktree_id, request.sender_id)?;
|
||||
for entry_id in &request.payload.removed_entries {
|
||||
worktree.entries.remove(&entry_id);
|
||||
}
|
||||
|
||||
for entry in &request.payload.updated_entries {
|
||||
worktree.entries.insert(entry.id, entry.clone());
|
||||
}
|
||||
}
|
||||
|
||||
broadcast_in_worktree(request.payload.worktree_id, request, rpc, state).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn close_worktree(
|
||||
request: TypedEnvelope<proto::CloseWorktree>,
|
||||
rpc: &Arc<Peer>,
|
||||
state: &Arc<AppState>,
|
||||
) -> tide::Result<()> {
|
||||
let connection_ids;
|
||||
{
|
||||
let mut state = state.rpc.write().await;
|
||||
let worktree = state.write_worktree(request.payload.worktree_id, request.sender_id)?;
|
||||
connection_ids = worktree.connection_ids();
|
||||
if worktree.host_connection_id == Some(request.sender_id) {
|
||||
worktree.host_connection_id = None;
|
||||
} else if let Some(replica_id) = worktree.guest_connection_ids.remove(&request.sender_id) {
|
||||
worktree.active_replica_ids.remove(&replica_id);
|
||||
}
|
||||
}
|
||||
|
||||
broadcast(request.sender_id, connection_ids, |conn_id| {
|
||||
rpc.send(
|
||||
conn_id,
|
||||
proto::RemovePeer {
|
||||
worktree_id: request.payload.worktree_id,
|
||||
peer_id: request.sender_id.0,
|
||||
},
|
||||
)
|
||||
})
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn open_buffer(
|
||||
request: TypedEnvelope<proto::OpenBuffer>,
|
||||
rpc: &Arc<Peer>,
|
||||
state: &Arc<AppState>,
|
||||
) -> tide::Result<()> {
|
||||
let receipt = request.receipt();
|
||||
let worktree_id = request.payload.worktree_id;
|
||||
let host_connection_id = state
|
||||
.rpc
|
||||
.read()
|
||||
.await
|
||||
.read_worktree(worktree_id, request.sender_id)?
|
||||
.host_connection_id()?;
|
||||
|
||||
let response = rpc
|
||||
.forward_request(request.sender_id, host_connection_id, request.payload)
|
||||
.await?;
|
||||
rpc.respond(receipt, response).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn close_buffer(
|
||||
request: TypedEnvelope<proto::CloseBuffer>,
|
||||
rpc: &Arc<Peer>,
|
||||
state: &Arc<AppState>,
|
||||
) -> tide::Result<()> {
|
||||
let host_connection_id = state
|
||||
.rpc
|
||||
.read()
|
||||
.await
|
||||
.read_worktree(request.payload.worktree_id, request.sender_id)?
|
||||
.host_connection_id()?;
|
||||
|
||||
rpc.forward_send(request.sender_id, host_connection_id, request.payload)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn save_buffer(
|
||||
request: TypedEnvelope<proto::SaveBuffer>,
|
||||
rpc: &Arc<Peer>,
|
||||
state: &Arc<AppState>,
|
||||
) -> tide::Result<()> {
|
||||
let host;
|
||||
let guests;
|
||||
{
|
||||
let state = state.rpc.read().await;
|
||||
let worktree = state.read_worktree(request.payload.worktree_id, request.sender_id)?;
|
||||
host = worktree.host_connection_id()?;
|
||||
guests = worktree
|
||||
.guest_connection_ids
|
||||
.keys()
|
||||
.copied()
|
||||
.collect::<Vec<_>>();
|
||||
}
|
||||
|
||||
let sender = request.sender_id;
|
||||
let receipt = request.receipt();
|
||||
let response = rpc
|
||||
.forward_request(sender, host, request.payload.clone())
|
||||
.await?;
|
||||
|
||||
broadcast(host, guests, |conn_id| {
|
||||
let response = response.clone();
|
||||
async move {
|
||||
if conn_id == sender {
|
||||
rpc.respond(receipt, response).await
|
||||
} else {
|
||||
rpc.forward_send(host, conn_id, response).await
|
||||
}
|
||||
}
|
||||
})
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn update_buffer(
|
||||
request: TypedEnvelope<proto::UpdateBuffer>,
|
||||
rpc: &Arc<Peer>,
|
||||
state: &Arc<AppState>,
|
||||
) -> tide::Result<()> {
|
||||
broadcast_in_worktree(request.payload.worktree_id, request, rpc, state).await
|
||||
}
|
||||
|
||||
async fn buffer_saved(
|
||||
request: TypedEnvelope<proto::BufferSaved>,
|
||||
rpc: &Arc<Peer>,
|
||||
state: &Arc<AppState>,
|
||||
) -> tide::Result<()> {
|
||||
broadcast_in_worktree(request.payload.worktree_id, request, rpc, state).await
|
||||
}
|
||||
|
||||
async fn broadcast_in_worktree<T: proto::EnvelopedMessage>(
|
||||
worktree_id: u64,
|
||||
request: TypedEnvelope<T>,
|
||||
rpc: &Arc<Peer>,
|
||||
state: &Arc<AppState>,
|
||||
) -> tide::Result<()> {
|
||||
let connection_ids = state
|
||||
.rpc
|
||||
.read()
|
||||
.await
|
||||
.read_worktree(worktree_id, request.sender_id)?
|
||||
.connection_ids();
|
||||
|
||||
broadcast(request.sender_id, connection_ids, |conn_id| {
|
||||
rpc.forward_send(request.sender_id, conn_id, request.payload.clone())
|
||||
})
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn broadcast<F, T>(
|
||||
sender_id: ConnectionId,
|
||||
receiver_ids: Vec<ConnectionId>,
|
||||
mut f: F,
|
||||
) -> anyhow::Result<()>
|
||||
where
|
||||
F: FnMut(ConnectionId) -> T,
|
||||
T: Future<Output = anyhow::Result<()>>,
|
||||
{
|
||||
let futures = receiver_ids
|
||||
.into_iter()
|
||||
.filter(|id| *id != sender_id)
|
||||
.map(|id| f(id));
|
||||
futures::future::try_join_all(futures).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn header_contains_ignore_case<T>(
|
||||
request: &tide::Request<T>,
|
||||
header_name: HeaderName,
|
||||
value: &str,
|
||||
) -> bool {
|
||||
request
|
||||
.header(header_name)
|
||||
.map(|h| {
|
||||
h.as_str()
|
||||
.split(',')
|
||||
.any(|s| s.trim().eq_ignore_ascii_case(value.trim()))
|
||||
})
|
||||
.unwrap_or(false)
|
||||
}
|
15
server/src/team.rs
Normal file
15
server/src/team.rs
Normal file
@ -0,0 +1,15 @@
|
||||
use crate::{AppState, Request, RequestExt};
|
||||
use std::sync::Arc;
|
||||
use tide::http::mime;
|
||||
|
||||
pub fn add_routes(app: &mut tide::Server<Arc<AppState>>) {
|
||||
app.at("/team").get(get_team);
|
||||
}
|
||||
|
||||
async fn get_team(mut request: Request) -> tide::Result {
|
||||
let data = request.layout_data().await?;
|
||||
Ok(tide::Response::builder(200)
|
||||
.body(request.state().render_template("team.hbs", &data)?)
|
||||
.content_type(mime::HTML)
|
||||
.build())
|
||||
}
|
538
server/src/tests.rs
Normal file
538
server/src/tests.rs
Normal file
@ -0,0 +1,538 @@
|
||||
use crate::{
|
||||
admin, auth, github,
|
||||
rpc::{self, add_rpc_routes},
|
||||
AppState, Config,
|
||||
};
|
||||
use async_std::task;
|
||||
use gpui::TestAppContext;
|
||||
use rand::prelude::*;
|
||||
use serde_json::json;
|
||||
use sqlx::{
|
||||
migrate::{MigrateDatabase, Migrator},
|
||||
postgres::PgPoolOptions,
|
||||
Executor as _, Postgres,
|
||||
};
|
||||
use std::{fs, path::Path, sync::Arc};
|
||||
use zed::{
|
||||
editor::Editor,
|
||||
language::LanguageRegistry,
|
||||
rpc::Client,
|
||||
settings,
|
||||
test::{temp_tree, Channel},
|
||||
worktree::{Fs, InMemoryFs, Worktree},
|
||||
};
|
||||
use zed_rpc::{ForegroundRouter, Peer, Router};
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_share_worktree(mut cx_a: TestAppContext, mut cx_b: TestAppContext) {
|
||||
let (window_b, _) = cx_b.add_window(|_| EmptyView);
|
||||
let settings = settings::channel(&cx_b.font_cache()).unwrap().1;
|
||||
let lang_registry = Arc::new(LanguageRegistry::new());
|
||||
|
||||
// Connect to a server as 2 clients.
|
||||
let mut server = TestServer::start().await;
|
||||
let client_a = server.create_client(&mut cx_a, "user_a").await;
|
||||
let client_b = server.create_client(&mut cx_b, "user_b").await;
|
||||
|
||||
// Share a local worktree as client A
|
||||
let dir = temp_tree(json!({
|
||||
"a.txt": "a-contents",
|
||||
"b.txt": "b-contents",
|
||||
}));
|
||||
let worktree_a = cx_a.add_model(|cx| Worktree::local(dir.path(), lang_registry.clone(), cx));
|
||||
worktree_a
|
||||
.read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete())
|
||||
.await;
|
||||
let (worktree_id, worktree_token) = worktree_a
|
||||
.update(&mut cx_a, |tree, cx| {
|
||||
tree.as_local_mut().unwrap().share(client_a.clone(), cx)
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Join that worktree as client B, and see that a guest has joined as client A.
|
||||
let worktree_b = Worktree::open_remote(
|
||||
client_b.clone(),
|
||||
worktree_id,
|
||||
worktree_token,
|
||||
lang_registry.clone(),
|
||||
&mut cx_b.to_async(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let replica_id_b = worktree_b.read_with(&cx_b, |tree, _| tree.replica_id());
|
||||
worktree_a
|
||||
.condition(&cx_a, |tree, _| {
|
||||
tree.peers()
|
||||
.values()
|
||||
.any(|replica_id| *replica_id == replica_id_b)
|
||||
})
|
||||
.await;
|
||||
|
||||
// Open the same file as client B and client A.
|
||||
let buffer_b = worktree_b
|
||||
.update(&mut cx_b, |worktree, cx| worktree.open_buffer("b.txt", cx))
|
||||
.await
|
||||
.unwrap();
|
||||
buffer_b.read_with(&cx_b, |buf, _| assert_eq!(buf.text(), "b-contents"));
|
||||
worktree_a.read_with(&cx_a, |tree, cx| assert!(tree.has_open_buffer("b.txt", cx)));
|
||||
let buffer_a = worktree_a
|
||||
.update(&mut cx_a, |tree, cx| tree.open_buffer("b.txt", cx))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Create a selection set as client B and see that selection set as client A.
|
||||
let editor_b = cx_b.add_view(window_b, |cx| Editor::for_buffer(buffer_b, settings, cx));
|
||||
buffer_a
|
||||
.condition(&cx_a, |buffer, _| buffer.selection_sets().count() == 1)
|
||||
.await;
|
||||
|
||||
// Edit the buffer as client B and see that edit as client A.
|
||||
editor_b.update(&mut cx_b, |editor, cx| {
|
||||
editor.insert(&"ok, ".to_string(), cx)
|
||||
});
|
||||
buffer_a
|
||||
.condition(&cx_a, |buffer, _| buffer.text() == "ok, b-contents")
|
||||
.await;
|
||||
|
||||
// Remove the selection set as client B, see those selections disappear as client A.
|
||||
cx_b.update(move |_| drop(editor_b));
|
||||
buffer_a
|
||||
.condition(&cx_a, |buffer, _| buffer.selection_sets().count() == 0)
|
||||
.await;
|
||||
|
||||
// Close the buffer as client A, see that the buffer is closed.
|
||||
drop(buffer_a);
|
||||
worktree_a
|
||||
.condition(&cx_a, |tree, cx| !tree.has_open_buffer("b.txt", cx))
|
||||
.await;
|
||||
|
||||
// Dropping the worktree removes client B from client A's peers.
|
||||
cx_b.update(move |_| drop(worktree_b));
|
||||
worktree_a
|
||||
.condition(&cx_a, |tree, _| tree.peers().is_empty())
|
||||
.await;
|
||||
}
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_propagate_saves_and_fs_changes_in_shared_worktree(
|
||||
mut cx_a: TestAppContext,
|
||||
mut cx_b: TestAppContext,
|
||||
mut cx_c: TestAppContext,
|
||||
) {
|
||||
let lang_registry = Arc::new(LanguageRegistry::new());
|
||||
|
||||
// Connect to a server as 3 clients.
|
||||
let mut server = TestServer::start().await;
|
||||
let client_a = server.create_client(&mut cx_a, "user_a").await;
|
||||
let client_b = server.create_client(&mut cx_b, "user_b").await;
|
||||
let client_c = server.create_client(&mut cx_c, "user_c").await;
|
||||
|
||||
// Share a worktree as client A.
|
||||
let dir = temp_tree(json!({
|
||||
"file1": "",
|
||||
"file2": ""
|
||||
}));
|
||||
let worktree_a = cx_a.add_model(|cx| Worktree::local(dir.path(), lang_registry.clone(), cx));
|
||||
worktree_a
|
||||
.read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete())
|
||||
.await;
|
||||
let (worktree_id, worktree_token) = worktree_a
|
||||
.update(&mut cx_a, |tree, cx| {
|
||||
tree.as_local_mut().unwrap().share(client_a.clone(), cx)
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Join that worktree as clients B and C.
|
||||
let worktree_b = Worktree::open_remote(
|
||||
client_b.clone(),
|
||||
worktree_id,
|
||||
worktree_token.clone(),
|
||||
lang_registry.clone(),
|
||||
&mut cx_b.to_async(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let worktree_c = Worktree::open_remote(
|
||||
client_c.clone(),
|
||||
worktree_id,
|
||||
worktree_token,
|
||||
lang_registry.clone(),
|
||||
&mut cx_c.to_async(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Open and edit a buffer as both guests B and C.
|
||||
let buffer_b = worktree_b
|
||||
.update(&mut cx_b, |tree, cx| tree.open_buffer("file1", cx))
|
||||
.await
|
||||
.unwrap();
|
||||
let buffer_c = worktree_c
|
||||
.update(&mut cx_c, |tree, cx| tree.open_buffer("file1", cx))
|
||||
.await
|
||||
.unwrap();
|
||||
buffer_b.update(&mut cx_b, |buf, cx| buf.edit([0..0], "i-am-b, ", cx));
|
||||
buffer_c.update(&mut cx_c, |buf, cx| buf.edit([0..0], "i-am-c, ", cx));
|
||||
|
||||
// Open and edit that buffer as the host.
|
||||
let buffer_a = worktree_a
|
||||
.update(&mut cx_a, |tree, cx| tree.open_buffer("file1", cx))
|
||||
.await
|
||||
.unwrap();
|
||||
buffer_a.update(&mut cx_a, |buf, cx| buf.edit([0..0], "i-am-a", cx));
|
||||
|
||||
// Wait for edits to propagate
|
||||
buffer_a
|
||||
.condition(&mut cx_a, |buf, _| buf.text() == "i-am-c, i-am-b, i-am-a")
|
||||
.await;
|
||||
buffer_b
|
||||
.condition(&mut cx_b, |buf, _| buf.text() == "i-am-c, i-am-b, i-am-a")
|
||||
.await;
|
||||
buffer_c
|
||||
.condition(&mut cx_c, |buf, _| buf.text() == "i-am-c, i-am-b, i-am-a")
|
||||
.await;
|
||||
|
||||
// Edit the buffer as the host and concurrently save as guest B.
|
||||
let save_b = buffer_b.update(&mut cx_b, |buf, cx| buf.save(cx).unwrap());
|
||||
buffer_a.update(&mut cx_a, |buf, cx| buf.edit([0..0], "hi-a, ", cx));
|
||||
save_b.await.unwrap();
|
||||
assert_eq!(
|
||||
fs::read_to_string(dir.path().join("file1")).unwrap(),
|
||||
"hi-a, i-am-c, i-am-b, i-am-a"
|
||||
);
|
||||
buffer_a.read_with(&cx_a, |buf, _| assert!(!buf.is_dirty()));
|
||||
buffer_b.read_with(&cx_b, |buf, _| assert!(!buf.is_dirty()));
|
||||
buffer_c.condition(&cx_c, |buf, _| !buf.is_dirty()).await;
|
||||
|
||||
// Make changes on host's file system, see those changes on the guests.
|
||||
fs::rename(dir.path().join("file2"), dir.path().join("file3")).unwrap();
|
||||
fs::write(dir.path().join("file4"), "4").unwrap();
|
||||
worktree_b
|
||||
.condition(&cx_b, |tree, _| tree.file_count() == 3)
|
||||
.await;
|
||||
worktree_c
|
||||
.condition(&cx_c, |tree, _| tree.file_count() == 3)
|
||||
.await;
|
||||
worktree_b.read_with(&cx_b, |tree, _| {
|
||||
assert_eq!(
|
||||
tree.paths()
|
||||
.map(|p| p.to_string_lossy())
|
||||
.collect::<Vec<_>>(),
|
||||
&["file1", "file3", "file4"]
|
||||
)
|
||||
});
|
||||
worktree_c.read_with(&cx_c, |tree, _| {
|
||||
assert_eq!(
|
||||
tree.paths()
|
||||
.map(|p| p.to_string_lossy())
|
||||
.collect::<Vec<_>>(),
|
||||
&["file1", "file3", "file4"]
|
||||
)
|
||||
});
|
||||
}
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_buffer_conflict_after_save(mut cx_a: TestAppContext, mut cx_b: TestAppContext) {
|
||||
let lang_registry = Arc::new(LanguageRegistry::new());
|
||||
|
||||
// Connect to a server as 2 clients.
|
||||
let mut server = TestServer::start().await;
|
||||
let client_a = server.create_client(&mut cx_a, "user_a").await;
|
||||
let client_b = server.create_client(&mut cx_b, "user_b").await;
|
||||
|
||||
// Share a local worktree as client A
|
||||
let fs = Arc::new(InMemoryFs::new());
|
||||
fs.save(Path::new("/a.txt"), &"a-contents".into())
|
||||
.await
|
||||
.unwrap();
|
||||
let worktree_a =
|
||||
cx_a.add_model(|cx| Worktree::test(Path::new("/"), lang_registry.clone(), fs.clone(), cx));
|
||||
worktree_a
|
||||
.read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete())
|
||||
.await;
|
||||
let (worktree_id, worktree_token) = worktree_a
|
||||
.update(&mut cx_a, |tree, cx| {
|
||||
tree.as_local_mut().unwrap().share(client_a.clone(), cx)
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Join that worktree as client B, and see that a guest has joined as client A.
|
||||
let worktree_b = Worktree::open_remote(
|
||||
client_b.clone(),
|
||||
worktree_id,
|
||||
worktree_token,
|
||||
lang_registry.clone(),
|
||||
&mut cx_b.to_async(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let buffer_b = worktree_b
|
||||
.update(&mut cx_b, |worktree, cx| worktree.open_buffer("a.txt", cx))
|
||||
.await
|
||||
.unwrap();
|
||||
let mtime = buffer_b.read_with(&cx_b, |buf, _| buf.file().unwrap().mtime);
|
||||
|
||||
buffer_b.update(&mut cx_b, |buf, cx| buf.edit([0..0], "world ", cx));
|
||||
buffer_b.read_with(&cx_b, |buf, _| {
|
||||
assert!(buf.is_dirty());
|
||||
assert!(!buf.has_conflict());
|
||||
});
|
||||
|
||||
buffer_b
|
||||
.update(&mut cx_b, |buf, cx| buf.save(cx))
|
||||
.unwrap()
|
||||
.await
|
||||
.unwrap();
|
||||
worktree_b
|
||||
.condition(&cx_b, |_, cx| {
|
||||
buffer_b.read(cx).file().unwrap().mtime != mtime
|
||||
})
|
||||
.await;
|
||||
buffer_b.read_with(&cx_b, |buf, _| {
|
||||
assert!(!buf.is_dirty());
|
||||
assert!(!buf.has_conflict());
|
||||
});
|
||||
|
||||
buffer_b.update(&mut cx_b, |buf, cx| buf.edit([0..0], "hello ", cx));
|
||||
buffer_b.read_with(&cx_b, |buf, _| {
|
||||
assert!(buf.is_dirty());
|
||||
assert!(!buf.has_conflict());
|
||||
});
|
||||
}
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_editing_while_guest_opens_buffer(mut cx_a: TestAppContext, mut cx_b: TestAppContext) {
|
||||
let lang_registry = Arc::new(LanguageRegistry::new());
|
||||
|
||||
// Connect to a server as 2 clients.
|
||||
let mut server = TestServer::start().await;
|
||||
let client_a = server.create_client(&mut cx_a, "user_a").await;
|
||||
let client_b = server.create_client(&mut cx_b, "user_b").await;
|
||||
|
||||
// Share a local worktree as client A
|
||||
let fs = Arc::new(InMemoryFs::new());
|
||||
fs.save(Path::new("/a.txt"), &"a-contents".into())
|
||||
.await
|
||||
.unwrap();
|
||||
let worktree_a =
|
||||
cx_a.add_model(|cx| Worktree::test(Path::new("/"), lang_registry.clone(), fs.clone(), cx));
|
||||
worktree_a
|
||||
.read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete())
|
||||
.await;
|
||||
let (worktree_id, worktree_token) = worktree_a
|
||||
.update(&mut cx_a, |tree, cx| {
|
||||
tree.as_local_mut().unwrap().share(client_a.clone(), cx)
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Join that worktree as client B, and see that a guest has joined as client A.
|
||||
let worktree_b = Worktree::open_remote(
|
||||
client_b.clone(),
|
||||
worktree_id,
|
||||
worktree_token,
|
||||
lang_registry.clone(),
|
||||
&mut cx_b.to_async(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let buffer_a = worktree_a
|
||||
.update(&mut cx_a, |tree, cx| tree.open_buffer("a.txt", cx))
|
||||
.await
|
||||
.unwrap();
|
||||
let buffer_b = cx_b
|
||||
.background()
|
||||
.spawn(worktree_b.update(&mut cx_b, |worktree, cx| worktree.open_buffer("a.txt", cx)));
|
||||
|
||||
task::yield_now().await;
|
||||
buffer_a.update(&mut cx_a, |buf, cx| buf.edit([0..0], "z", cx));
|
||||
|
||||
let text = buffer_a.read_with(&cx_a, |buf, _| buf.text());
|
||||
let buffer_b = buffer_b.await.unwrap();
|
||||
buffer_b.condition(&cx_b, |buf, _| buf.text() == text).await;
|
||||
}
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_peer_disconnection(mut cx_a: TestAppContext, cx_b: TestAppContext) {
|
||||
let lang_registry = Arc::new(LanguageRegistry::new());
|
||||
|
||||
// Connect to a server as 2 clients.
|
||||
let mut server = TestServer::start().await;
|
||||
let client_a = server.create_client(&mut cx_a, "user_a").await;
|
||||
let client_b = server.create_client(&mut cx_a, "user_b").await;
|
||||
|
||||
// Share a local worktree as client A
|
||||
let dir = temp_tree(json!({
|
||||
"a.txt": "a-contents",
|
||||
"b.txt": "b-contents",
|
||||
}));
|
||||
let worktree_a = cx_a.add_model(|cx| Worktree::local(dir.path(), lang_registry.clone(), cx));
|
||||
worktree_a
|
||||
.read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete())
|
||||
.await;
|
||||
let (worktree_id, worktree_token) = worktree_a
|
||||
.update(&mut cx_a, |tree, cx| {
|
||||
tree.as_local_mut().unwrap().share(client_a.clone(), cx)
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Join that worktree as client B, and see that a guest has joined as client A.
|
||||
let _worktree_b = Worktree::open_remote(
|
||||
client_b.clone(),
|
||||
worktree_id,
|
||||
worktree_token,
|
||||
lang_registry.clone(),
|
||||
&mut cx_b.to_async(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
worktree_a
|
||||
.condition(&cx_a, |tree, _| tree.peers().len() == 1)
|
||||
.await;
|
||||
|
||||
// Drop client B's connection and ensure client A observes client B leaving the worktree.
|
||||
client_b.disconnect().await.unwrap();
|
||||
worktree_a
|
||||
.condition(&cx_a, |tree, _| tree.peers().len() == 0)
|
||||
.await;
|
||||
}
|
||||
|
||||
struct TestServer {
|
||||
peer: Arc<Peer>,
|
||||
app_state: Arc<AppState>,
|
||||
db_name: String,
|
||||
router: Arc<Router>,
|
||||
}
|
||||
|
||||
impl TestServer {
|
||||
async fn start() -> Self {
|
||||
let mut rng = StdRng::from_entropy();
|
||||
let db_name = format!("zed-test-{}", rng.gen::<u128>());
|
||||
let app_state = Self::build_app_state(&db_name).await;
|
||||
let peer = Peer::new();
|
||||
let mut router = Router::new();
|
||||
add_rpc_routes(&mut router, &app_state, &peer);
|
||||
Self {
|
||||
peer,
|
||||
router: Arc::new(router),
|
||||
app_state,
|
||||
db_name,
|
||||
}
|
||||
}
|
||||
|
||||
async fn create_client(&mut self, cx: &mut TestAppContext, name: &str) -> Client {
|
||||
let user_id = admin::create_user(&self.app_state.db, name, false)
|
||||
.await
|
||||
.unwrap();
|
||||
let lang_registry = Arc::new(LanguageRegistry::new());
|
||||
let client = Client::new(lang_registry.clone());
|
||||
let mut client_router = ForegroundRouter::new();
|
||||
cx.update(|cx| zed::worktree::init(cx, &client, &mut client_router));
|
||||
|
||||
let (client_conn, server_conn) = Channel::bidirectional();
|
||||
cx.background()
|
||||
.spawn(rpc::handle_connection(
|
||||
self.peer.clone(),
|
||||
self.router.clone(),
|
||||
self.app_state.clone(),
|
||||
name.to_string(),
|
||||
server_conn,
|
||||
user_id,
|
||||
))
|
||||
.detach();
|
||||
client
|
||||
.add_connection(client_conn, Arc::new(client_router), cx.to_async())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Reset the executor because running SQL queries has a non-deterministic impact on it.
|
||||
cx.foreground().reset();
|
||||
client
|
||||
}
|
||||
|
||||
async fn build_app_state(db_name: &str) -> Arc<AppState> {
|
||||
let mut config = Config::default();
|
||||
config.session_secret = "a".repeat(32);
|
||||
config.database_url = format!("postgres://postgres@localhost/{}", db_name);
|
||||
|
||||
Self::create_db(&config.database_url).await;
|
||||
let db = PgPoolOptions::new()
|
||||
.max_connections(5)
|
||||
.connect(&config.database_url)
|
||||
.await
|
||||
.expect("failed to connect to postgres database");
|
||||
let migrator = Migrator::new(Path::new("./migrations")).await.unwrap();
|
||||
migrator.run(&db).await.unwrap();
|
||||
|
||||
let github_client = github::AppClient::test();
|
||||
Arc::new(AppState {
|
||||
db,
|
||||
handlebars: Default::default(),
|
||||
auth_client: auth::build_client("", ""),
|
||||
repo_client: github::RepoClient::test(&github_client),
|
||||
github_client,
|
||||
rpc: Default::default(),
|
||||
config,
|
||||
})
|
||||
}
|
||||
|
||||
async fn create_db(url: &str) {
|
||||
// Enable tests to run in parallel by serializing the creation of each test database.
|
||||
lazy_static::lazy_static! {
|
||||
static ref DB_CREATION: async_std::sync::Mutex<()> = async_std::sync::Mutex::new(());
|
||||
}
|
||||
|
||||
let _lock = DB_CREATION.lock().await;
|
||||
Postgres::create_database(url)
|
||||
.await
|
||||
.expect("failed to create test database");
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for TestServer {
|
||||
fn drop(&mut self) {
|
||||
task::block_on(async {
|
||||
self.peer.reset().await;
|
||||
self.app_state
|
||||
.db
|
||||
.execute(
|
||||
format!(
|
||||
"
|
||||
SELECT pg_terminate_backend(pg_stat_activity.pid)
|
||||
FROM pg_stat_activity
|
||||
WHERE pg_stat_activity.datname = '{}' AND pid <> pg_backend_pid();",
|
||||
self.db_name,
|
||||
)
|
||||
.as_str(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
self.app_state.db.close().await;
|
||||
Postgres::drop_database(&self.app_state.config.database_url)
|
||||
.await
|
||||
.unwrap();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
struct EmptyView;
|
||||
|
||||
impl gpui::Entity for EmptyView {
|
||||
type Event = ();
|
||||
}
|
||||
|
||||
impl gpui::View for EmptyView {
|
||||
fn ui_name() -> &'static str {
|
||||
"empty view"
|
||||
}
|
||||
|
||||
fn render<'a>(&self, _: &gpui::AppContext) -> gpui::ElementBox {
|
||||
gpui::Element::boxed(gpui::elements::Empty)
|
||||
}
|
||||
}
|
BIN
server/static/fonts/VisbyCF-Bold.eot
Normal file
BIN
server/static/fonts/VisbyCF-Bold.eot
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-Bold.woff
Normal file
BIN
server/static/fonts/VisbyCF-Bold.woff
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-Bold.woff2
Normal file
BIN
server/static/fonts/VisbyCF-Bold.woff2
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-BoldOblique.eot
Normal file
BIN
server/static/fonts/VisbyCF-BoldOblique.eot
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-BoldOblique.woff
Normal file
BIN
server/static/fonts/VisbyCF-BoldOblique.woff
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-BoldOblique.woff2
Normal file
BIN
server/static/fonts/VisbyCF-BoldOblique.woff2
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-DemiBold.eot
Normal file
BIN
server/static/fonts/VisbyCF-DemiBold.eot
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-DemiBold.woff
Normal file
BIN
server/static/fonts/VisbyCF-DemiBold.woff
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-DemiBold.woff2
Normal file
BIN
server/static/fonts/VisbyCF-DemiBold.woff2
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-DemiBoldOblique.eot
Normal file
BIN
server/static/fonts/VisbyCF-DemiBoldOblique.eot
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-DemiBoldOblique.woff
Normal file
BIN
server/static/fonts/VisbyCF-DemiBoldOblique.woff
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-DemiBoldOblique.woff2
Normal file
BIN
server/static/fonts/VisbyCF-DemiBoldOblique.woff2
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-ExtraBold.eot
Normal file
BIN
server/static/fonts/VisbyCF-ExtraBold.eot
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-ExtraBold.woff
Normal file
BIN
server/static/fonts/VisbyCF-ExtraBold.woff
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-ExtraBold.woff2
Normal file
BIN
server/static/fonts/VisbyCF-ExtraBold.woff2
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-ExtraBoldOblique.eot
Normal file
BIN
server/static/fonts/VisbyCF-ExtraBoldOblique.eot
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-ExtraBoldOblique.woff
Normal file
BIN
server/static/fonts/VisbyCF-ExtraBoldOblique.woff
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-ExtraBoldOblique.woff2
Normal file
BIN
server/static/fonts/VisbyCF-ExtraBoldOblique.woff2
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-Heavy.eot
Normal file
BIN
server/static/fonts/VisbyCF-Heavy.eot
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-Heavy.woff
Normal file
BIN
server/static/fonts/VisbyCF-Heavy.woff
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-Heavy.woff2
Normal file
BIN
server/static/fonts/VisbyCF-Heavy.woff2
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-HeavyOblique.eot
Normal file
BIN
server/static/fonts/VisbyCF-HeavyOblique.eot
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-HeavyOblique.woff
Normal file
BIN
server/static/fonts/VisbyCF-HeavyOblique.woff
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-HeavyOblique.woff2
Normal file
BIN
server/static/fonts/VisbyCF-HeavyOblique.woff2
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-Light.eot
Normal file
BIN
server/static/fonts/VisbyCF-Light.eot
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-Light.woff
Normal file
BIN
server/static/fonts/VisbyCF-Light.woff
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-Light.woff2
Normal file
BIN
server/static/fonts/VisbyCF-Light.woff2
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-LightOblique.eot
Normal file
BIN
server/static/fonts/VisbyCF-LightOblique.eot
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-LightOblique.woff
Normal file
BIN
server/static/fonts/VisbyCF-LightOblique.woff
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-LightOblique.woff2
Normal file
BIN
server/static/fonts/VisbyCF-LightOblique.woff2
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-Medium.eot
Normal file
BIN
server/static/fonts/VisbyCF-Medium.eot
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-Medium.woff
Normal file
BIN
server/static/fonts/VisbyCF-Medium.woff
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-Medium.woff2
Normal file
BIN
server/static/fonts/VisbyCF-Medium.woff2
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-MediumOblique.eot
Normal file
BIN
server/static/fonts/VisbyCF-MediumOblique.eot
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-MediumOblique.woff
Normal file
BIN
server/static/fonts/VisbyCF-MediumOblique.woff
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-MediumOblique.woff2
Normal file
BIN
server/static/fonts/VisbyCF-MediumOblique.woff2
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-Regular.eot
Normal file
BIN
server/static/fonts/VisbyCF-Regular.eot
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-Regular.woff
Normal file
BIN
server/static/fonts/VisbyCF-Regular.woff
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-Regular.woff2
Normal file
BIN
server/static/fonts/VisbyCF-Regular.woff2
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-RegularOblique.eot
Normal file
BIN
server/static/fonts/VisbyCF-RegularOblique.eot
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-RegularOblique.woff
Normal file
BIN
server/static/fonts/VisbyCF-RegularOblique.woff
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-RegularOblique.woff2
Normal file
BIN
server/static/fonts/VisbyCF-RegularOblique.woff2
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-Thin.eot
Normal file
BIN
server/static/fonts/VisbyCF-Thin.eot
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-Thin.woff
Normal file
BIN
server/static/fonts/VisbyCF-Thin.woff
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-Thin.woff2
Normal file
BIN
server/static/fonts/VisbyCF-Thin.woff2
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-ThinOblique.eot
Normal file
BIN
server/static/fonts/VisbyCF-ThinOblique.eot
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-ThinOblique.woff
Normal file
BIN
server/static/fonts/VisbyCF-ThinOblique.woff
Normal file
Binary file not shown.
BIN
server/static/fonts/VisbyCF-ThinOblique.woff2
Normal file
BIN
server/static/fonts/VisbyCF-ThinOblique.woff2
Normal file
Binary file not shown.
BIN
server/static/images/favicon.png
Normal file
BIN
server/static/images/favicon.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 1.8 KiB |
6
server/static/svg/hero.svg
Normal file
6
server/static/svg/hero.svg
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 11 KiB |
108
server/styles.css
Normal file
108
server/styles.css
Normal file
@ -0,0 +1,108 @@
|
||||
/* This file is compiled to /assets/styles/tailwind.css via script/tailwind */
|
||||
|
||||
@import url('https://fonts.googleapis.com/css2?family=Open+Sans:ital,wght@0,400;0,700;1,400;1,700&display=swap');
|
||||
|
||||
@font-face {
|
||||
font-family: 'Visby CF';
|
||||
src:
|
||||
url('/static/fonts/VisbyCF-Thin.woff2') format('woff2'),
|
||||
url('/static/fonts/VisbyCF-Thin.woff') format('woff');
|
||||
font-weight: 100;
|
||||
font-style: normal;
|
||||
}
|
||||
|
||||
|
||||
@font-face {
|
||||
font-family: 'Visby CF';
|
||||
src:
|
||||
url('/static/fonts/VisbyCF-Light.woff2') format('woff2'),
|
||||
url('/static/fonts/VisbyCF-Light.woff') format('woff');
|
||||
font-weight: 300;
|
||||
font-style: normal;
|
||||
}
|
||||
|
||||
@font-face {
|
||||
font-family: 'Visby CF';
|
||||
src:
|
||||
url('/static/fonts/VisbyCF-Regular.woff2') format('woff2'),
|
||||
url('/static/fonts/VisbyCF-Regular.woff') format('woff');
|
||||
font-weight: 400;
|
||||
font-style: normal;
|
||||
}
|
||||
|
||||
@font-face {
|
||||
font-family: 'Visby CF';
|
||||
src:
|
||||
url('/static/fonts/VisbyCF-Medium.woff2') format('woff2'),
|
||||
url('/static/fonts/VisbyCF-Medium.woff') format('woff');
|
||||
font-weight: 500;
|
||||
font-style: normal;
|
||||
}
|
||||
|
||||
@font-face {
|
||||
font-family: 'Visby CF';
|
||||
src:
|
||||
url('/static/fonts/VisbyCF-DemiBold.woff2') format('woff2'),
|
||||
url('/static/fonts/VisbyCF-DemiBold.woff') format('woff');
|
||||
font-weight: 600;
|
||||
font-style: normal;
|
||||
}
|
||||
|
||||
@font-face {
|
||||
font-family: 'Visby CF';
|
||||
src:
|
||||
url('/static/fonts/VisbyCF-Bold.woff2') format('woff2'),
|
||||
url('/static/fonts/VisbyCF-Bold.woff') format('woff');
|
||||
font-weight: 700;
|
||||
font-style: normal;
|
||||
}
|
||||
|
||||
@font-face {
|
||||
font-family: 'Visby CF';
|
||||
src:
|
||||
url('/static/fonts/VisbyCF-ExtraBold.woff2') format('woff2'),
|
||||
url('/static/fonts/VisbyCF-ExtraBold.woff') format('woff');
|
||||
font-weight: 800;
|
||||
font-style: normal;
|
||||
}
|
||||
|
||||
@font-face {
|
||||
font-family: 'Visby CF';
|
||||
src:
|
||||
url('/static/fonts/VisbyCF-Heavy.woff2') format('woff2'),
|
||||
url('/static/fonts/VisbyCF-Heavy.woff') format('woff');
|
||||
font-weight: 900;
|
||||
font-style: normal;
|
||||
}
|
||||
|
||||
@tailwind base;
|
||||
@tailwind components;
|
||||
@tailwind utilities;
|
||||
|
||||
@layer utilities {
|
||||
@responsive {
|
||||
.bg-dotgrid-sm {
|
||||
background:
|
||||
linear-gradient(90deg, theme('colors.gray.50') 38px, transparent 1%) center,
|
||||
linear-gradient(theme('colors.gray.50') 38px, transparent 1%) center,
|
||||
theme('colors.gray.600');
|
||||
background-size: 40px 40px;
|
||||
}
|
||||
|
||||
.bg-dotgrid-md {
|
||||
background:
|
||||
linear-gradient(90deg, theme('colors.gray.50') 58px, transparent 1%) center,
|
||||
linear-gradient(theme('colors.gray.50') 58px, transparent 1%) center,
|
||||
theme('colors.gray.600');
|
||||
background-size: 60px 60px;
|
||||
}
|
||||
|
||||
.bg-dotgrid-lg {
|
||||
background:
|
||||
linear-gradient(90deg, theme('colors.gray.50') 88px, transparent 1%) center,
|
||||
linear-gradient(theme('colors.gray.50') 88px, transparent 1%) center,
|
||||
theme('colors.gray.600');
|
||||
background-size: 90px 90px;
|
||||
}
|
||||
}
|
||||
}
|
81
server/templates/admin.hbs
Normal file
81
server/templates/admin.hbs
Normal file
@ -0,0 +1,81 @@
|
||||
{{#> layout }}
|
||||
<script>
|
||||
window.addEventListener("DOMContentLoaded", function () {
|
||||
let users = document.getElementById("users");
|
||||
if (users) {
|
||||
users.addEventListener("change", async function (event) {
|
||||
const action = event.target.getAttribute("action");
|
||||
if (action) {
|
||||
console.log(action, event.target.checked);
|
||||
const response = await fetch(action, {
|
||||
method: 'PUT',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({ admin: event.target.checked })
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
</script>
|
||||
|
||||
<div class="bg-white">
|
||||
<div class="container py-4 px-8 md:px-12 mx-auto">
|
||||
<h1 class="text-xl font-bold border-b border-gray-300 mb-4">Users</h1>
|
||||
<table class="table" id="users">
|
||||
<tr>
|
||||
<th class="text-left pr-2">GitHub Login</th>
|
||||
<th class="text-left pr-2">Admin</th>
|
||||
<th></th>
|
||||
</tr>
|
||||
<form action="/users" method="post" class="m-0 mb-4">
|
||||
<tr>
|
||||
<td>
|
||||
<input name="github_login" type="text" class="border border-gray-300 p-1 mr-2 w-48"
|
||||
placeholder="@github_handle">
|
||||
</td>
|
||||
<td>
|
||||
<input type="checkbox" id="admin" name="admin" value="true">
|
||||
</td>
|
||||
<td class="text-right">
|
||||
<button class="p-1 w-20 text-white rounded-md bg-gray-600 hover:bg-black">Add</button>
|
||||
</td>
|
||||
</tr>
|
||||
</form>
|
||||
|
||||
{{#each users}}
|
||||
<tr>
|
||||
<form action="/users/{{id}}/delete" method="post">
|
||||
<td class="py-1">
|
||||
{{github_login}}
|
||||
</td>
|
||||
<td>
|
||||
<input action="/users/{{id}}" type="checkbox" {{#if admin}}checked{{/if}}>
|
||||
</td>
|
||||
<td class="text-right">
|
||||
<button class="p-1 w-20 rounded-md bg-gray-600 hover:bg-black text-white">Remove</button>
|
||||
</td>
|
||||
</form>
|
||||
</tr>
|
||||
{{/each}}
|
||||
</table>
|
||||
|
||||
<h1 class="text-xl font-bold border-b border-gray-300 mb-4 mt-8">Signups</h1>
|
||||
<table class="table">
|
||||
{{#each signups}}
|
||||
<tr>
|
||||
<form action="/signups/{{id}}/delete" method="post">
|
||||
<td class="align-top">{{github_login}}</td>
|
||||
<td class="pl-4 align-top">{{email_address}}</td>
|
||||
<td class="pl-4 align-top">{{about}}</td>
|
||||
<td class="text-right">
|
||||
<button class="p-1 w-20 rounded-md bg-gray-600 hover:bg-black text-white">Remove</button>
|
||||
</td>
|
||||
</form>
|
||||
</tr>
|
||||
{{/each}}
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
{{/layout}}
|
41
server/templates/docs.hbs
Normal file
41
server/templates/docs.hbs
Normal file
@ -0,0 +1,41 @@
|
||||
{{#> layout }}
|
||||
|
||||
<div class="bg-white">
|
||||
<div class="container mx-auto py-12 px-8 md:px-12">
|
||||
<h1 class="text-4xl font-black font-display mb-8">Bypassing code signing restrictions</h1>
|
||||
<div class="lg:flex lg:flex-row items-start">
|
||||
<div class="prose xl:prose-xl lg:mr-12">
|
||||
<p>
|
||||
We haven't yet applied to Apple for the required certificate to sign our application bundle, which
|
||||
means there's a small speed bump when you run our app.
|
||||
</p>
|
||||
<p>
|
||||
Instead of double-clicking the app, right click it and choose Open.
|
||||
</p>
|
||||
<p>
|
||||
You need to attempt open the app <b>twice</b>. On the second attempt, you should see the option
|
||||
to open the application anyway in the dialog.
|
||||
</p>
|
||||
</div>
|
||||
<img class="float-1 lg:w-1/3 object-contain mt-8 lg:mt-0" alt="Screen Shot 2021-06-02 at 2 38 12 PM"
|
||||
src="https://user-images.githubusercontent.com/1789/120550754-86514480-c3b2-11eb-8995-32f5eea79664.png">
|
||||
<img class="float-1 lg:w-1/3 object-contain -ml-10 lg:ml-0 lg:-mt-10"
|
||||
alt="Screen Shot 2021-06-02 at 2 38 19 PM"
|
||||
src="https://user-images.githubusercontent.com/1789/120550759-88b39e80-c3b2-11eb-88e2-ddfc1b1c7a03.png">
|
||||
</div>
|
||||
|
||||
<h1 class="text-4xl font-black font-display my-8">Key bindings</h1>
|
||||
<div class="prose">
|
||||
<dl>
|
||||
<dt>
|
||||
<pre>cmd-shift-L</pre>
|
||||
</dt>
|
||||
<dd>
|
||||
Split selection into lines
|
||||
</dd>
|
||||
</dl>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{{/layout}}
|
7
server/templates/error.hbs
Normal file
7
server/templates/error.hbs
Normal file
@ -0,0 +1,7 @@
|
||||
{{#> layout }}
|
||||
<div class="bg-white py-8">
|
||||
<div class="container mx-auto my-16 px-8 md:px-12 text-2xl md:text-4xl">
|
||||
Sorry, we encountered a {{status}} error: {{reason}}.
|
||||
</div>
|
||||
</div>
|
||||
{{/layout}}
|
69
server/templates/home.hbs
Normal file
69
server/templates/home.hbs
Normal file
@ -0,0 +1,69 @@
|
||||
{{#> layout }}
|
||||
{{#if releases}}
|
||||
|
||||
<div class="bg-white">
|
||||
<div class="container mx-auto py-12 px-8 md:px-12 lg:flex lg:flex-row">
|
||||
{{#each releases}}
|
||||
<div class="md:flex md:flex-row">
|
||||
<div class="font-display mb-8 md:mb-0 md:text-right">
|
||||
<div class="text-2xl font-bold whitespace-nowrap">
|
||||
VERSION {{name}}
|
||||
</div>
|
||||
<a class="text-md underline text-yellow-600 hover:text-yellow-700"
|
||||
href="/releases/{{tag_name}}/{{assets.0.name}}">
|
||||
DOWNLOAD
|
||||
</a>
|
||||
</div>
|
||||
<div
|
||||
class="prose prose-lg xl:prose-xl border-t md:border-t-0 pt-8 md:border-l border-gray-400 md:ml-8 md:pl-8 md:pt-0 xl:ml-16 xl:pl-16 max-w-5xl font-body">
|
||||
{{{body}}}
|
||||
</div>
|
||||
</div>
|
||||
{{/each}}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{{else}}
|
||||
|
||||
<div class="bg-dotgrid-sm md:bg-dotgrid-md lg:bg-dotgrid-lg">
|
||||
<img src="/static/svg/hero.svg" class="container mx-auto px-8 md:px-12 py-16 md:py-24 lg:py-32" />
|
||||
</div>
|
||||
|
||||
<div class="container mx-auto py-24 lg:py-32 px-8 md:px-12 lg:flex lg:flex-row lg:items-center">
|
||||
<div class="prose prose-xl md:prose-2xl text-gray-50 prose-gray-50 w-full lg:w-1/2">
|
||||
<p>
|
||||
We’re the team behind GitHub’s Atom text editor, and we’re building something new:
|
||||
</p>
|
||||
|
||||
<p>
|
||||
<b>Zed</b> is a fully-native desktop code editor focused on high performance,
|
||||
clean design, and seamless collaboration.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
We’re in early development, but we’d like to build a small community of developers who care deeply about
|
||||
their tools and are willing to give us feedback. We'll be sharing alpha builds with community members and
|
||||
telling our story along the way.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
If you’re interested in joining us, please let us know.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<form class="my-16 lg:my-0 lg:ml-16 flex-1 text-xl md:text-2xl" action="/signups" method="post">
|
||||
<input name="github_login" placeholder="@github_handle"
|
||||
class="w-3/5 xl:w-1/2 p-3 mb-8 block bg-gray-50 placeholder-gray-500">
|
||||
<input name="email_address" placeholder="email@addre.ss"
|
||||
class="w-4/5 xl:w-3/4 p-3 my-8 block bg-gray-50 placeholder-gray-500">
|
||||
<textarea name="about" class="block w-full xl:w-full h-48 p-3 my-8 bg-gray-50 placeholder-gray-500 my-6"
|
||||
placeholder="Please tell us a bit about you and why you're interested in Zed. What code editor do you use today? What do you love and hate about it?"></textarea>
|
||||
<button
|
||||
class="p-4 rounded-md text-gray-50 bg-gray-500 inline-block cursor-pointer hover:bg-gray-400 font-display">
|
||||
ENGAGE
|
||||
</button>
|
||||
</form>
|
||||
</div>
|
||||
|
||||
{{/if}}
|
||||
{{/layout}}
|
62
server/templates/partials/layout.hbs
Normal file
62
server/templates/partials/layout.hbs
Normal file
@ -0,0 +1,62 @@
|
||||
<html>
|
||||
|
||||
<head>
|
||||
<link rel="icon" href="/static/images/favicon.png">
|
||||
<link rel="stylesheet" href="/static/styles.css">
|
||||
<title>Zed Industries</title>
|
||||
|
||||
|
||||
<script>
|
||||
window.addEventListener("DOMContentLoaded", function () {
|
||||
let avatar = document.getElementById("avatar");
|
||||
let sign_out = document.getElementById("sign_out");
|
||||
if (avatar && sign_out) {
|
||||
avatar.addEventListener("click", function (event) {
|
||||
sign_out.classList.toggle("hidden");
|
||||
event.stopPropagation();
|
||||
});
|
||||
document.addEventListener("click", function (event) {
|
||||
sign_out.classList.add("hidden");
|
||||
});
|
||||
}
|
||||
});
|
||||
</script>
|
||||
</head>
|
||||
|
||||
<body class="font-body bg-black">
|
||||
<div class="text-lg text-gray-50">
|
||||
<div class="container mx-auto flex flex-row items-center py-4 px-8 md:px-12 font-display">
|
||||
<a href="/" class="font-display">
|
||||
<span class="font-black">ZED</span><span class="font-light" style="padding-left: 1px">INDUSTRIES</span>
|
||||
</a>
|
||||
<div class="flex-1"></div>
|
||||
<a href="/team" class="text-sm mr-4 hover:underline">
|
||||
Team
|
||||
</a>
|
||||
{{#if current_user}}
|
||||
{{#if current_user.is_admin }}
|
||||
<a href="/admin" class="text-sm mr-4 hover:underline">
|
||||
Admin
|
||||
</a>
|
||||
{{/if}}
|
||||
<div class="relative">
|
||||
<img id="avatar" src="{{current_user.avatar_url}}"
|
||||
class="w-8 rounded-full border-gray-400 border cursor-pointer" />
|
||||
<form id="sign_out" action="/sign_out" method="post"
|
||||
class="hidden absolute mt-1 right-0 bg-black rounded border border-gray-400 text-center text-sm p-2 px-4 whitespace-nowrap">
|
||||
<button class="hover:underline">Sign out</button>
|
||||
</form>
|
||||
</div>
|
||||
{{else}}
|
||||
<a href=" /sign_in"
|
||||
class="text-sm align-middle p-1 px-2 rounded-md border border-gray-50 cursor-pointer hover:bg-gray-800">
|
||||
Log in
|
||||
</a>
|
||||
{{/if}}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{{> @partial-block}}
|
||||
</body>
|
||||
|
||||
</html>
|
19
server/templates/signup.hbs
Normal file
19
server/templates/signup.hbs
Normal file
@ -0,0 +1,19 @@
|
||||
{{#> layout }}
|
||||
<div class="bg-gray-50 py-10 text-black">
|
||||
<div class="container mx-auto px-8 md:px-12">
|
||||
<div class=" text-6xl font-black mb-8">
|
||||
THANKS
|
||||
</div>
|
||||
<div class="text-xl max-w-md">
|
||||
<p class="mb-8">
|
||||
Thanks a ton for your interest! We'll add you to our list and let you know when we have something ready
|
||||
for you to try out.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
<a href="/" class="font-bold text-yellow-600 hover:text-yellow-700">Back to /</a>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{{/layout}}
|
62
server/templates/team.hbs
Normal file
62
server/templates/team.hbs
Normal file
@ -0,0 +1,62 @@
|
||||
{{#> layout }}
|
||||
|
||||
<div class="bg-white">
|
||||
<div class="container mx-auto py-12 px-8 md:px-12 lg:flex lg:flex-row">
|
||||
<div class="mb-16 lg:mb-0 lg:flex-1 lg:mr-8 xl:mr-16">
|
||||
<img src="https://github.com/nathansobo.png?size=200" class="mx-auto mb-4 h-28 rounded-full">
|
||||
<div>
|
||||
<a href="https://github.com/nathansobo"
|
||||
class="block text-center mb-4 font-display text-2xl font-bold whitespace-nowrap hover:underline">
|
||||
NATHAN SOBO
|
||||
</a>
|
||||
<div class="prose md:prose-lg lg:prose xl:prose-lg">
|
||||
Nathan joined GitHub in late 2011 to build the <a href="https://atom.io">Atom text editor</a>, and
|
||||
he led the Atom team until 2018. He also co-led development of <a
|
||||
href="https://teletype.atom.io">Teletype for Atom</a>, pioneering one of the first production
|
||||
uses of conflict-free replicated data types for collaborative text editing. He's been dreaming about
|
||||
building the world’s best text editor since he graduated from college, and is excited to finally
|
||||
have
|
||||
the knowledge, tools, and resources to achieve this vision.
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="mb-16 lg:mb-0 lg:flex-1 lg:mr-8 xl:mr-16">
|
||||
<img src="https://github.com/as-cii.png?size=200" class="mx-auto mb-4 h-28 rounded-full">
|
||||
<div>
|
||||
<a href="https://github.com/as-cii"
|
||||
class="block text-center mb-4 font-display text-2xl font-bold whitespace-nowrap hover:underline">
|
||||
ANTONIO SCANDURRA
|
||||
</a>
|
||||
<div class="prose md:prose-lg lg:prose xl:prose-lg">
|
||||
Antonio joined the Atom team in 2014 while still in university after his outstanding open source
|
||||
contributions caught the attention of the team. He later joined Nathan in architecting <a
|
||||
href="https://teletype.atom.io">Teletype for
|
||||
Atom</a> and researching the foundations of what has turned into Zed. For the last two years,
|
||||
he’s
|
||||
become an expert in distributed systems and conflict-free replicated data types through the
|
||||
development of a real-time, distributed, conflict-free database implemented in Rust for <a
|
||||
href="https://ditto.live">Ditto</a>.
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="mb-16 lg:mb-0 lg:flex-1">
|
||||
<img src="https://github.com/maxbrunsfeld.png?size=200" class="mx-auto mb-4 h-28 rounded-full">
|
||||
<div>
|
||||
<a href="https://github.com/maxbrunsfeld"
|
||||
class="block text-center mb-4 font-display text-2xl font-bold whitespace-nowrap hover:underline">
|
||||
MAX BRUNSFELD
|
||||
</a>
|
||||
<div class="prose md:prose-lg lg:prose xl:prose-lg">
|
||||
Max joined the Atom team in 2013 after working at Pivotal Labs. While driving Atom towards its 1.0
|
||||
launch during the day, Max spent nights and weekends building <a
|
||||
href="https://tree-sitter.github.io">Tree-sitter</a>, a blazing-fast and
|
||||
expressive incremental parsing framework that currently powers all code analysis at GitHub. Before
|
||||
leaving to start Zed, Max helped GitHub's semantic analysis team integrate Tree-sitter to support
|
||||
syntax highlighting and code navigation on <a href="https://github.com">github.com</a>.
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{{/layout}}
|
@ -15,14 +15,14 @@ base64 = "0.13"
|
||||
futures = "0.3"
|
||||
log = "0.4"
|
||||
parking_lot = "0.11.1"
|
||||
postage = {version = "0.4.1", features = ["futures-traits"]}
|
||||
postage = { version = "0.4.1", features = ["futures-traits"] }
|
||||
prost = "0.7"
|
||||
rand = "0.8"
|
||||
rsa = "0.4"
|
||||
serde = {version = "1", features = ["derive"]}
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
|
||||
[build-dependencies]
|
||||
prost-build = {git = "https://github.com/tokio-rs/prost", rev = "6cf97ea422b09d98de34643c4dda2d4f8b7e23e6"}
|
||||
prost-build = { git = "https://github.com/tokio-rs/prost", rev = "6cf97ea422b09d98de34643c4dda2d4f8b7e23e6" }
|
||||
|
||||
[dev-dependencies]
|
||||
smol = "1.2.5"
|
||||
|
@ -20,14 +20,14 @@ test-support = ["tempdir", "serde_json", "zed-rpc/test-support"]
|
||||
anyhow = "1.0.38"
|
||||
arrayvec = "0.5.2"
|
||||
async-trait = "0.1"
|
||||
async-tungstenite = { version="0.14", features=["async-tls"] }
|
||||
async-tungstenite = { version = "0.14", features = ["async-tls"] }
|
||||
crossbeam-channel = "0.5.0"
|
||||
ctor = "0.1.20"
|
||||
dirs = "3.0"
|
||||
easy-parallel = "3.1.0"
|
||||
fsevent = { path="../fsevent" }
|
||||
fsevent = { path = "../fsevent" }
|
||||
futures = "0.3"
|
||||
gpui = { path="../gpui" }
|
||||
gpui = { path = "../gpui" }
|
||||
http-auth-basic = "0.1.3"
|
||||
ignore = "0.4"
|
||||
lazy_static = "1.4.0"
|
||||
@ -35,31 +35,33 @@ libc = "0.2"
|
||||
log = "0.4"
|
||||
num_cpus = "1.13.0"
|
||||
parking_lot = "0.11.1"
|
||||
postage = { version="0.4.1", features=["futures-traits"] }
|
||||
postage = { version = "0.4.1", features = ["futures-traits"] }
|
||||
rand = "0.8.3"
|
||||
rsa = "0.4"
|
||||
rust-embed = "5.9.0"
|
||||
seahash = "4.1"
|
||||
serde = { version="1", features=["derive"] }
|
||||
serde_json = { version="1.0.64", features=["preserve_order"], optional=true }
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = { version = "1.0.64", features = [
|
||||
"preserve_order"
|
||||
], optional = true }
|
||||
similar = "1.3"
|
||||
simplelog = "0.9"
|
||||
smallvec = { version="1.6", features=["union"] }
|
||||
smallvec = { version = "1.6", features = ["union"] }
|
||||
smol = "1.2.5"
|
||||
surf = "2.2"
|
||||
tempdir = { version="0.3.7", optional=true }
|
||||
tempdir = { version = "0.3.7", optional = true }
|
||||
tiny_http = "0.8"
|
||||
toml = "0.5"
|
||||
tree-sitter = "0.19.5"
|
||||
tree-sitter-rust = "0.19.0"
|
||||
url = "2.2"
|
||||
zed-rpc = { path="../zed-rpc" }
|
||||
zed-rpc = { path = "../zed-rpc" }
|
||||
|
||||
[dev-dependencies]
|
||||
cargo-bundle = "0.5.0"
|
||||
env_logger = "0.8"
|
||||
serde_json = { version="1.0.64", features=["preserve_order"] }
|
||||
tempdir = { version="0.3.7" }
|
||||
serde_json = { version = "1.0.64", features = ["preserve_order"] }
|
||||
tempdir = { version = "0.3.7" }
|
||||
unindent = "0.1.7"
|
||||
|
||||
[package.metadata.bundle]
|
||||
|
Loading…
Reference in New Issue
Block a user