Merge branch 'main' into go-to-line2

This commit is contained in:
Conrad Irwin 2023-11-08 14:46:52 -07:00
commit cbdd4aca89
147 changed files with 32668 additions and 79 deletions

79
Cargo.lock generated
View File

@ -1710,6 +1710,78 @@ dependencies = [
"workspace",
]
[[package]]
name = "collab2"
version = "0.28.0"
dependencies = [
"anyhow",
"async-trait",
"async-tungstenite",
"audio2",
"axum",
"axum-extra",
"base64 0.13.1",
"call2",
"channel2",
"clap 3.2.25",
"client2",
"clock",
"collab_ui",
"collections",
"ctor",
"dashmap",
"editor2",
"env_logger 0.9.3",
"envy",
"fs2",
"futures 0.3.28",
"git3",
"gpui2",
"hyper",
"indoc",
"language2",
"lazy_static",
"lipsum",
"live_kit_client2",
"live_kit_server",
"log",
"lsp2",
"nanoid",
"node_runtime",
"parking_lot 0.11.2",
"pretty_assertions",
"project2",
"prometheus",
"prost 0.8.0",
"rand 0.8.5",
"reqwest",
"rpc2",
"scrypt",
"sea-orm",
"serde",
"serde_derive",
"serde_json",
"settings2",
"sha-1 0.9.8",
"smallvec",
"sqlx",
"text2",
"theme2",
"time",
"tokio",
"tokio-tungstenite",
"toml 0.5.11",
"tonic",
"tower",
"tracing",
"tracing-log",
"tracing-subscriber",
"unindent",
"util",
"uuid 1.4.1",
"workspace2",
]
[[package]]
name = "collab_ui"
version = "0.1.0"
@ -2262,9 +2334,8 @@ dependencies = [
[[package]]
name = "ctor"
version = "0.1.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096"
version = "0.1.20"
source = "git+https://github.com/zed-industries/rust-ctor?rev=7f824cf6a7943885a649b579f33f9ac53f0d1db6#7f824cf6a7943885a649b579f33f9ac53f0d1db6"
dependencies = [
"quote",
"syn 1.0.109",
@ -7241,8 +7312,10 @@ dependencies = [
"rsa 0.4.0",
"serde",
"serde_derive",
"serde_json",
"smol",
"smol-timeout",
"strum",
"tempdir",
"tracing",
"util",

View File

@ -16,6 +16,7 @@ members = [
"crates/client2",
"crates/clock",
"crates/collab",
"crates/collab2",
"crates/collab_ui",
"crates/collections",
"crates/command_palette",
@ -116,7 +117,10 @@ resolver = "2"
[workspace.dependencies]
anyhow = { version = "1.0.57" }
async-trait = { version = "0.1" }
ctor = { version = "0.1" }
# TODO: Switch back to the published version of `ctor` once:
# 1. A new version of `ctor` is published with this change: https://github.com/mmastrac/rust-ctor/pull/295
# 2. We've confirmed it's fine to update to the latest version of `ctor` (we're currently on v0.1.20).
ctor = { git = "https://github.com/zed-industries/rust-ctor", rev = "7f824cf6a7943885a649b579f33f9ac53f0d1db6" }
derive_more = { version = "0.99.17" }
env_logger = { version = "0.9" }
futures = { version = "0.3" }

View File

@ -10,7 +10,7 @@ use client::{
ZED_ALWAYS_ACTIVE,
};
use collections::HashSet;
use futures::{future::Shared, FutureExt};
use futures::{channel::oneshot, future::Shared, Future, FutureExt};
use gpui::{
AppContext, AsyncAppContext, Context, EventEmitter, Model, ModelContext, Subscription, Task,
WeakModel,
@ -30,6 +30,37 @@ pub fn init(client: Arc<Client>, user_store: Model<UserStore>, cx: &mut AppConte
cx.set_global(active_call);
}
pub struct OneAtATime {
cancel: Option<oneshot::Sender<()>>,
}
impl OneAtATime {
/// spawn a task in the given context.
/// if another task is spawned before that resolves, or if the OneAtATime itself is dropped, the first task will be cancelled and return Ok(None)
/// otherwise you'll see the result of the task.
fn spawn<F, Fut, R>(&mut self, cx: &mut AppContext, f: F) -> Task<Result<Option<R>>>
where
F: 'static + FnOnce(AsyncAppContext) -> Fut,
Fut: Future<Output = Result<R>>,
R: 'static,
{
let (tx, rx) = oneshot::channel();
self.cancel.replace(tx);
cx.spawn(|cx| async move {
futures::select_biased! {
_ = rx.fuse() => Ok(None),
result = f(cx).fuse() => result.map(Some),
}
})
}
fn running(&self) -> bool {
self.cancel
.as_ref()
.is_some_and(|cancel| !cancel.is_canceled())
}
}
#[derive(Clone)]
pub struct IncomingCall {
pub room_id: u64,
@ -43,6 +74,7 @@ pub struct ActiveCall {
room: Option<(Model<Room>, Vec<Subscription>)>,
pending_room_creation: Option<Shared<Task<Result<Model<Room>, Arc<anyhow::Error>>>>>,
location: Option<WeakModel<Project>>,
_join_debouncer: OneAtATime,
pending_invites: HashSet<u64>,
incoming_call: (
watch::Sender<Option<IncomingCall>>,
@ -65,7 +97,7 @@ impl ActiveCall {
location: None,
pending_invites: Default::default(),
incoming_call: watch::channel(),
_join_debouncer: OneAtATime { cancel: None },
_subscriptions: vec![
client.add_request_handler(cx.weak_model(), Self::handle_incoming_call),
client.add_message_handler(cx.weak_model(), Self::handle_call_canceled),
@ -140,6 +172,10 @@ impl ActiveCall {
}
cx.notify();
if self._join_debouncer.running() {
return Task::ready(Ok(()));
}
let room = if let Some(room) = self.room().cloned() {
Some(Task::ready(Ok(room)).shared())
} else {
@ -256,11 +292,20 @@ impl ActiveCall {
return Task::ready(Err(anyhow!("no incoming call")));
};
let join = Room::join(&call, self.client.clone(), self.user_store.clone(), cx);
if self.pending_room_creation.is_some() {
return Task::ready(Ok(()));
}
let room_id = call.room_id.clone();
let client = self.client.clone();
let user_store = self.user_store.clone();
let join = self
._join_debouncer
.spawn(cx, move |cx| Room::join(room_id, client, user_store, cx));
cx.spawn(|this, mut cx| async move {
let room = join.await?;
this.update(&mut cx, |this, cx| this.set_room(Some(room.clone()), cx))?
this.update(&mut cx, |this, cx| this.set_room(room.clone(), cx))?
.await?;
this.update(&mut cx, |this, cx| {
this.report_call_event("accept incoming", cx)
@ -287,20 +332,28 @@ impl ActiveCall {
&mut self,
channel_id: u64,
cx: &mut ModelContext<Self>,
) -> Task<Result<Model<Room>>> {
) -> Task<Result<Option<Model<Room>>>> {
if let Some(room) = self.room().cloned() {
if room.read(cx).channel_id() == Some(channel_id) {
return Task::ready(Ok(room));
return Task::ready(Ok(Some(room)));
} else {
room.update(cx, |room, cx| room.clear_state(cx));
}
}
let join = Room::join_channel(channel_id, self.client.clone(), self.user_store.clone(), cx);
if self.pending_room_creation.is_some() {
return Task::ready(Ok(None));
}
let client = self.client.clone();
let user_store = self.user_store.clone();
let join = self._join_debouncer.spawn(cx, move |cx| async move {
Room::join_channel(channel_id, client, user_store, cx).await
});
cx.spawn(|this, mut cx| async move {
let room = join.await?;
this.update(&mut cx, |this, cx| this.set_room(Some(room.clone()), cx))?
this.update(&mut cx, |this, cx| this.set_room(room.clone(), cx))?
.await?;
this.update(&mut cx, |this, cx| {
this.report_call_event("join channel", cx)
@ -459,3 +512,40 @@ pub fn report_call_event_for_channel(
};
telemetry.report_clickhouse_event(event, telemetry_settings);
}
#[cfg(test)]
mod test {
use gpui::TestAppContext;
use crate::OneAtATime;
#[gpui::test]
async fn test_one_at_a_time(cx: &mut TestAppContext) {
let mut one_at_a_time = OneAtATime { cancel: None };
assert_eq!(
cx.update(|cx| one_at_a_time.spawn(cx, |_| async { Ok(1) }))
.await
.unwrap(),
Some(1)
);
let (a, b) = cx.update(|cx| {
(
one_at_a_time.spawn(cx, |_| async {
assert!(false);
Ok(2)
}),
one_at_a_time.spawn(cx, |_| async { Ok(3) }),
)
});
assert_eq!(a.await.unwrap(), None);
assert_eq!(b.await.unwrap(), Some(3));
let promise = cx.update(|cx| one_at_a_time.spawn(cx, |_| async { Ok(4) }));
drop(one_at_a_time);
assert_eq!(promise.await.unwrap(), None);
}
}

View File

@ -1,7 +1,6 @@
use crate::{
call_settings::CallSettings,
participant::{LocalParticipant, ParticipantLocation, RemoteParticipant},
IncomingCall,
};
use anyhow::{anyhow, Result};
use audio::{Audio, Sound};
@ -284,37 +283,32 @@ impl Room {
})
}
pub(crate) fn join_channel(
pub(crate) async fn join_channel(
channel_id: u64,
client: Arc<Client>,
user_store: Model<UserStore>,
cx: &mut AppContext,
) -> Task<Result<Model<Self>>> {
cx.spawn(move |cx| async move {
cx: AsyncAppContext,
) -> Result<Model<Self>> {
Self::from_join_response(
client.request(proto::JoinChannel { channel_id }).await?,
client,
user_store,
cx,
)
})
}
pub(crate) fn join(
call: &IncomingCall,
pub(crate) async fn join(
room_id: u64,
client: Arc<Client>,
user_store: Model<UserStore>,
cx: &mut AppContext,
) -> Task<Result<Model<Self>>> {
let id = call.room_id;
cx.spawn(move |cx| async move {
cx: AsyncAppContext,
) -> Result<Model<Self>> {
Self::from_join_response(
client.request(proto::JoinRoom { id }).await?,
client.request(proto::JoinRoom { id: room_id }).await?,
client,
user_store,
cx,
)
})
}
fn released(&mut self, cx: &mut AppContext) {

View File

@ -124,6 +124,7 @@ impl TestServer {
if cx.has_global::<SettingsStore>() {
panic!("Same cx used to create two test clients")
}
cx.set_global(SettingsStore::test(cx));
});

12
crates/collab2/.env.toml Normal file
View File

@ -0,0 +1,12 @@
DATABASE_URL = "postgres://postgres@localhost/zed"
DATABASE_MAX_CONNECTIONS = 5
HTTP_PORT = 8080
API_TOKEN = "secret"
INVITE_LINK_PREFIX = "http://localhost:3000/invites/"
ZED_ENVIRONMENT = "development"
LIVE_KIT_SERVER = "http://localhost:7880"
LIVE_KIT_KEY = "devkey"
LIVE_KIT_SECRET = "secret"
# RUST_LOG=info
# LOG_JSON=true

101
crates/collab2/Cargo.toml Normal file
View File

@ -0,0 +1,101 @@
[package]
authors = ["Nathan Sobo <nathan@zed.dev>"]
default-run = "collab2"
edition = "2021"
name = "collab2"
version = "0.28.0"
publish = false
[[bin]]
name = "collab2"
[[bin]]
name = "seed"
required-features = ["seed-support"]
[dependencies]
clock = { path = "../clock" }
collections = { path = "../collections" }
live_kit_server = { path = "../live_kit_server" }
text = { package = "text2", path = "../text2" }
rpc = { package = "rpc2", path = "../rpc2" }
util = { path = "../util" }
anyhow.workspace = true
async-tungstenite = "0.16"
axum = { version = "0.5", features = ["json", "headers", "ws"] }
axum-extra = { version = "0.3", features = ["erased-json"] }
base64 = "0.13"
clap = { version = "3.1", features = ["derive"], optional = true }
dashmap = "5.4"
envy = "0.4.2"
futures.workspace = true
hyper = "0.14"
lazy_static.workspace = true
lipsum = { version = "0.8", optional = true }
log.workspace = true
nanoid = "0.4"
parking_lot.workspace = true
prometheus = "0.13"
prost.workspace = true
rand.workspace = true
reqwest = { version = "0.11", features = ["json"], optional = true }
scrypt = "0.7"
smallvec.workspace = true
sea-orm = { version = "0.12.x", features = ["sqlx-postgres", "postgres-array", "runtime-tokio-rustls", "with-uuid"] }
serde.workspace = true
serde_derive.workspace = true
serde_json.workspace = true
sha-1 = "0.9"
sqlx = { version = "0.7", features = ["runtime-tokio-rustls", "postgres", "json", "time", "uuid", "any"] }
time.workspace = true
tokio = { version = "1", features = ["full"] }
tokio-tungstenite = "0.17"
tonic = "0.6"
tower = "0.4"
toml.workspace = true
tracing = "0.1.34"
tracing-log = "0.1.3"
tracing-subscriber = { version = "0.3.11", features = ["env-filter", "json"] }
uuid.workspace = true
[dev-dependencies]
audio = { package = "audio2", path = "../audio2" }
collections = { path = "../collections", features = ["test-support"] }
gpui = { package = "gpui2", path = "../gpui2", features = ["test-support"] }
call = { package = "call2", path = "../call2", features = ["test-support"] }
client = { package = "client2", path = "../client2", features = ["test-support"] }
channel = { package = "channel2", path = "../channel2" }
editor = { package = "editor2", path = "../editor2", features = ["test-support"] }
language = { package = "language2", path = "../language2", features = ["test-support"] }
fs = { package = "fs2", path = "../fs2", features = ["test-support"] }
git = { package = "git3", path = "../git3", features = ["test-support"] }
live_kit_client = { package = "live_kit_client2", path = "../live_kit_client2", features = ["test-support"] }
lsp = { package = "lsp2", path = "../lsp2", features = ["test-support"] }
node_runtime = { path = "../node_runtime" }
#todo!(notifications)
#notifications = { path = "../notifications", features = ["test-support"] }
project = { package = "project2", path = "../project2", features = ["test-support"] }
rpc = { package = "rpc2", path = "../rpc2", features = ["test-support"] }
settings = { package = "settings2", path = "../settings2", features = ["test-support"] }
theme = { package = "theme2", path = "../theme2" }
workspace = { package = "workspace2", path = "../workspace2", features = ["test-support"] }
collab_ui = { path = "../collab_ui", features = ["test-support"] }
async-trait.workspace = true
pretty_assertions.workspace = true
ctor.workspace = true
env_logger.workspace = true
indoc.workspace = true
util = { path = "../util" }
lazy_static.workspace = true
sea-orm = { version = "0.12.x", features = ["sqlx-sqlite"] }
serde_json.workspace = true
sqlx = { version = "0.7", features = ["sqlite"] }
unindent.workspace = true
[features]
seed-support = ["clap", "lipsum", "reqwest"]

5
crates/collab2/README.md Normal file
View File

@ -0,0 +1,5 @@
# Zed Server
This crate is what we run at https://collab.zed.dev.
It contains our back-end logic for collaboration, to which we connect from the Zed client via a websocket after authenticating via https://zed.dev, which is a separate repo running on Vercel.

View File

@ -0,0 +1,4 @@
db-uri = "postgres://postgres@localhost/zed"
server-port = 8081
jwt-secret = "the-postgrest-jwt-secret-for-authorization"
log-level = "info"

12
crates/collab2/basic.conf Normal file
View File

@ -0,0 +1,12 @@
[Interface]
PrivateKey = B5Fp/yVfP0QYlb+YJv9ea+EMI1mWODPD3akh91cVjvc=
Address = fdaa:0:2ce3:a7b:bea:0:a:2/120
DNS = fdaa:0:2ce3::3
[Peer]
PublicKey = RKAYPljEJiuaELNDdQIEJmQienT9+LRISfIHwH45HAw=
AllowedIPs = fdaa:0:2ce3::/48
Endpoint = ord1.gateway.6pn.dev:51820
PersistentKeepalive = 15

View File

@ -0,0 +1,4 @@
ZED_ENVIRONMENT=preview
RUST_LOG=info
INVITE_LINK_PREFIX=https://zed.dev/invites/
DATABASE_MAX_CONNECTIONS=10

View File

@ -0,0 +1,4 @@
ZED_ENVIRONMENT=production
RUST_LOG=info
INVITE_LINK_PREFIX=https://zed.dev/invites/
DATABASE_MAX_CONNECTIONS=85

View File

@ -0,0 +1,4 @@
ZED_ENVIRONMENT=staging
RUST_LOG=info
INVITE_LINK_PREFIX=https://staging.zed.dev/invites/
DATABASE_MAX_CONNECTIONS=5

View File

@ -0,0 +1,177 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: ${ZED_KUBE_NAMESPACE}
---
kind: Service
apiVersion: v1
metadata:
namespace: ${ZED_KUBE_NAMESPACE}
name: collab
annotations:
service.beta.kubernetes.io/do-loadbalancer-tls-ports: "443"
service.beta.kubernetes.io/do-loadbalancer-certificate-id: ${ZED_DO_CERTIFICATE_ID}
spec:
type: LoadBalancer
selector:
app: collab
ports:
- name: web
protocol: TCP
port: 443
targetPort: 8080
---
kind: Service
apiVersion: v1
metadata:
namespace: ${ZED_KUBE_NAMESPACE}
name: pgadmin
annotations:
service.beta.kubernetes.io/do-loadbalancer-tls-ports: "443"
service.beta.kubernetes.io/do-loadbalancer-certificate-id: ${ZED_DO_CERTIFICATE_ID}
spec:
type: LoadBalancer
selector:
app: postgrest
ports:
- name: web
protocol: TCP
port: 443
targetPort: 8080
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: ${ZED_KUBE_NAMESPACE}
name: collab
spec:
replicas: 1
selector:
matchLabels:
app: collab
template:
metadata:
labels:
app: collab
annotations:
ad.datadoghq.com/collab.check_names: |
["openmetrics"]
ad.datadoghq.com/collab.init_configs: |
[{}]
ad.datadoghq.com/collab.instances: |
[
{
"openmetrics_endpoint": "http://%%host%%:%%port%%/metrics",
"namespace": "collab_${ZED_KUBE_NAMESPACE}",
"metrics": [".*"]
}
]
spec:
containers:
- name: collab
image: "${ZED_IMAGE_ID}"
args:
- serve
ports:
- containerPort: 8080
protocol: TCP
livenessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 1
periodSeconds: 1
env:
- name: HTTP_PORT
value: "8080"
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: database
key: url
- name: DATABASE_MAX_CONNECTIONS
value: "${DATABASE_MAX_CONNECTIONS}"
- name: API_TOKEN
valueFrom:
secretKeyRef:
name: api
key: token
- name: LIVE_KIT_SERVER
valueFrom:
secretKeyRef:
name: livekit
key: server
- name: LIVE_KIT_KEY
valueFrom:
secretKeyRef:
name: livekit
key: key
- name: LIVE_KIT_SECRET
valueFrom:
secretKeyRef:
name: livekit
key: secret
- name: INVITE_LINK_PREFIX
value: ${INVITE_LINK_PREFIX}
- name: RUST_BACKTRACE
value: "1"
- name: RUST_LOG
value: ${RUST_LOG}
- name: LOG_JSON
value: "true"
- name: ZED_ENVIRONMENT
value: ${ZED_ENVIRONMENT}
securityContext:
capabilities:
# FIXME - Switch to the more restrictive `PERFMON` capability.
# This capability isn't yet available in a stable version of Debian.
add: ["SYS_ADMIN"]
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: ${ZED_KUBE_NAMESPACE}
name: postgrest
spec:
replicas: 1
selector:
matchLabels:
app: postgrest
template:
metadata:
labels:
app: postgrest
spec:
containers:
- name: postgrest
image: "postgrest/postgrest"
ports:
- containerPort: 8080
protocol: TCP
env:
- name: PGRST_SERVER_PORT
value: "8080"
- name: PGRST_DB_URI
valueFrom:
secretKeyRef:
name: database
key: url
- name: PGRST_JWT_SECRET
valueFrom:
secretKeyRef:
name: postgrest
key: jwt_secret

View File

@ -0,0 +1,21 @@
apiVersion: batch/v1
kind: Job
metadata:
namespace: ${ZED_KUBE_NAMESPACE}
name: ${ZED_MIGRATE_JOB_NAME}
spec:
template:
spec:
restartPolicy: Never
containers:
- name: migrator
imagePullPolicy: Always
image: ${ZED_IMAGE_ID}
args:
- migrate
env:
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: database
key: url

View File

@ -0,0 +1,344 @@
CREATE TABLE "users" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"github_login" VARCHAR,
"admin" BOOLEAN,
"email_address" VARCHAR(255) DEFAULT NULL,
"invite_code" VARCHAR(64),
"invite_count" INTEGER NOT NULL DEFAULT 0,
"inviter_id" INTEGER REFERENCES users (id),
"connected_once" BOOLEAN NOT NULL DEFAULT false,
"created_at" TIMESTAMP NOT NULL DEFAULT now,
"metrics_id" TEXT,
"github_user_id" INTEGER
);
CREATE UNIQUE INDEX "index_users_github_login" ON "users" ("github_login");
CREATE UNIQUE INDEX "index_invite_code_users" ON "users" ("invite_code");
CREATE INDEX "index_users_on_email_address" ON "users" ("email_address");
CREATE INDEX "index_users_on_github_user_id" ON "users" ("github_user_id");
CREATE TABLE "access_tokens" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"user_id" INTEGER REFERENCES users (id),
"hash" VARCHAR(128)
);
CREATE INDEX "index_access_tokens_user_id" ON "access_tokens" ("user_id");
CREATE TABLE "contacts" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"user_id_a" INTEGER REFERENCES users (id) NOT NULL,
"user_id_b" INTEGER REFERENCES users (id) NOT NULL,
"a_to_b" BOOLEAN NOT NULL,
"should_notify" BOOLEAN NOT NULL,
"accepted" BOOLEAN NOT NULL
);
CREATE UNIQUE INDEX "index_contacts_user_ids" ON "contacts" ("user_id_a", "user_id_b");
CREATE INDEX "index_contacts_user_id_b" ON "contacts" ("user_id_b");
CREATE TABLE "rooms" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"live_kit_room" VARCHAR NOT NULL,
"enviroment" VARCHAR,
"channel_id" INTEGER REFERENCES channels (id) ON DELETE CASCADE
);
CREATE UNIQUE INDEX "index_rooms_on_channel_id" ON "rooms" ("channel_id");
CREATE TABLE "projects" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"room_id" INTEGER REFERENCES rooms (id) ON DELETE CASCADE NOT NULL,
"host_user_id" INTEGER REFERENCES users (id) NOT NULL,
"host_connection_id" INTEGER,
"host_connection_server_id" INTEGER REFERENCES servers (id) ON DELETE CASCADE,
"unregistered" BOOLEAN NOT NULL DEFAULT FALSE
);
CREATE INDEX "index_projects_on_host_connection_server_id" ON "projects" ("host_connection_server_id");
CREATE INDEX "index_projects_on_host_connection_id_and_host_connection_server_id" ON "projects" ("host_connection_id", "host_connection_server_id");
CREATE TABLE "worktrees" (
"project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
"id" INTEGER NOT NULL,
"root_name" VARCHAR NOT NULL,
"abs_path" VARCHAR NOT NULL,
"visible" BOOL NOT NULL,
"scan_id" INTEGER NOT NULL,
"is_complete" BOOL NOT NULL DEFAULT FALSE,
"completed_scan_id" INTEGER NOT NULL,
PRIMARY KEY(project_id, id)
);
CREATE INDEX "index_worktrees_on_project_id" ON "worktrees" ("project_id");
CREATE TABLE "worktree_entries" (
"project_id" INTEGER NOT NULL,
"worktree_id" INTEGER NOT NULL,
"scan_id" INTEGER NOT NULL,
"id" INTEGER NOT NULL,
"is_dir" BOOL NOT NULL,
"path" VARCHAR NOT NULL,
"inode" INTEGER NOT NULL,
"mtime_seconds" INTEGER NOT NULL,
"mtime_nanos" INTEGER NOT NULL,
"is_symlink" BOOL NOT NULL,
"is_external" BOOL NOT NULL,
"is_ignored" BOOL NOT NULL,
"is_deleted" BOOL NOT NULL,
"git_status" INTEGER,
PRIMARY KEY(project_id, worktree_id, id),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE
);
CREATE INDEX "index_worktree_entries_on_project_id" ON "worktree_entries" ("project_id");
CREATE INDEX "index_worktree_entries_on_project_id_and_worktree_id" ON "worktree_entries" ("project_id", "worktree_id");
CREATE TABLE "worktree_repositories" (
"project_id" INTEGER NOT NULL,
"worktree_id" INTEGER NOT NULL,
"work_directory_id" INTEGER NOT NULL,
"branch" VARCHAR,
"scan_id" INTEGER NOT NULL,
"is_deleted" BOOL NOT NULL,
PRIMARY KEY(project_id, worktree_id, work_directory_id),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE,
FOREIGN KEY(project_id, worktree_id, work_directory_id) REFERENCES worktree_entries (project_id, worktree_id, id) ON DELETE CASCADE
);
CREATE INDEX "index_worktree_repositories_on_project_id" ON "worktree_repositories" ("project_id");
CREATE INDEX "index_worktree_repositories_on_project_id_and_worktree_id" ON "worktree_repositories" ("project_id", "worktree_id");
CREATE TABLE "worktree_settings_files" (
"project_id" INTEGER NOT NULL,
"worktree_id" INTEGER NOT NULL,
"path" VARCHAR NOT NULL,
"content" TEXT,
PRIMARY KEY(project_id, worktree_id, path),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE
);
CREATE INDEX "index_worktree_settings_files_on_project_id" ON "worktree_settings_files" ("project_id");
CREATE INDEX "index_worktree_settings_files_on_project_id_and_worktree_id" ON "worktree_settings_files" ("project_id", "worktree_id");
CREATE TABLE "worktree_diagnostic_summaries" (
"project_id" INTEGER NOT NULL,
"worktree_id" INTEGER NOT NULL,
"path" VARCHAR NOT NULL,
"language_server_id" INTEGER NOT NULL,
"error_count" INTEGER NOT NULL,
"warning_count" INTEGER NOT NULL,
PRIMARY KEY(project_id, worktree_id, path),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE
);
CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id" ON "worktree_diagnostic_summaries" ("project_id");
CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id_and_worktree_id" ON "worktree_diagnostic_summaries" ("project_id", "worktree_id");
CREATE TABLE "language_servers" (
"id" INTEGER NOT NULL,
"project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
"name" VARCHAR NOT NULL,
PRIMARY KEY(project_id, id)
);
CREATE INDEX "index_language_servers_on_project_id" ON "language_servers" ("project_id");
CREATE TABLE "project_collaborators" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
"connection_id" INTEGER NOT NULL,
"connection_server_id" INTEGER NOT NULL REFERENCES servers (id) ON DELETE CASCADE,
"user_id" INTEGER NOT NULL,
"replica_id" INTEGER NOT NULL,
"is_host" BOOLEAN NOT NULL
);
CREATE INDEX "index_project_collaborators_on_project_id" ON "project_collaborators" ("project_id");
CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_and_replica_id" ON "project_collaborators" ("project_id", "replica_id");
CREATE INDEX "index_project_collaborators_on_connection_server_id" ON "project_collaborators" ("connection_server_id");
CREATE INDEX "index_project_collaborators_on_connection_id" ON "project_collaborators" ("connection_id");
CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_connection_id_and_server_id" ON "project_collaborators" ("project_id", "connection_id", "connection_server_id");
CREATE TABLE "room_participants" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"room_id" INTEGER NOT NULL REFERENCES rooms (id),
"user_id" INTEGER NOT NULL REFERENCES users (id),
"answering_connection_id" INTEGER,
"answering_connection_server_id" INTEGER REFERENCES servers (id) ON DELETE CASCADE,
"answering_connection_lost" BOOLEAN NOT NULL,
"location_kind" INTEGER,
"location_project_id" INTEGER,
"initial_project_id" INTEGER,
"calling_user_id" INTEGER NOT NULL REFERENCES users (id),
"calling_connection_id" INTEGER NOT NULL,
"calling_connection_server_id" INTEGER REFERENCES servers (id) ON DELETE SET NULL,
"participant_index" INTEGER
);
CREATE UNIQUE INDEX "index_room_participants_on_user_id" ON "room_participants" ("user_id");
CREATE INDEX "index_room_participants_on_room_id" ON "room_participants" ("room_id");
CREATE INDEX "index_room_participants_on_answering_connection_server_id" ON "room_participants" ("answering_connection_server_id");
CREATE INDEX "index_room_participants_on_calling_connection_server_id" ON "room_participants" ("calling_connection_server_id");
CREATE INDEX "index_room_participants_on_answering_connection_id" ON "room_participants" ("answering_connection_id");
CREATE UNIQUE INDEX "index_room_participants_on_answering_connection_id_and_answering_connection_server_id" ON "room_participants" ("answering_connection_id", "answering_connection_server_id");
CREATE TABLE "servers" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"environment" VARCHAR NOT NULL
);
CREATE TABLE "followers" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"room_id" INTEGER NOT NULL REFERENCES rooms (id) ON DELETE CASCADE,
"project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
"leader_connection_server_id" INTEGER NOT NULL REFERENCES servers (id) ON DELETE CASCADE,
"leader_connection_id" INTEGER NOT NULL,
"follower_connection_server_id" INTEGER NOT NULL REFERENCES servers (id) ON DELETE CASCADE,
"follower_connection_id" INTEGER NOT NULL
);
CREATE UNIQUE INDEX
"index_followers_on_project_id_and_leader_connection_server_id_and_leader_connection_id_and_follower_connection_server_id_and_follower_connection_id"
ON "followers" ("project_id", "leader_connection_server_id", "leader_connection_id", "follower_connection_server_id", "follower_connection_id");
CREATE INDEX "index_followers_on_room_id" ON "followers" ("room_id");
CREATE TABLE "channels" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"name" VARCHAR NOT NULL,
"created_at" TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
"visibility" VARCHAR NOT NULL,
"parent_path" TEXT
);
CREATE INDEX "index_channels_on_parent_path" ON "channels" ("parent_path");
CREATE TABLE IF NOT EXISTS "channel_chat_participants" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"user_id" INTEGER NOT NULL REFERENCES users (id),
"channel_id" INTEGER NOT NULL REFERENCES channels (id) ON DELETE CASCADE,
"connection_id" INTEGER NOT NULL,
"connection_server_id" INTEGER NOT NULL REFERENCES servers (id) ON DELETE CASCADE
);
CREATE INDEX "index_channel_chat_participants_on_channel_id" ON "channel_chat_participants" ("channel_id");
CREATE TABLE IF NOT EXISTS "channel_messages" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"channel_id" INTEGER NOT NULL REFERENCES channels (id) ON DELETE CASCADE,
"sender_id" INTEGER NOT NULL REFERENCES users (id),
"body" TEXT NOT NULL,
"sent_at" TIMESTAMP,
"nonce" BLOB NOT NULL
);
CREATE INDEX "index_channel_messages_on_channel_id" ON "channel_messages" ("channel_id");
CREATE UNIQUE INDEX "index_channel_messages_on_sender_id_nonce" ON "channel_messages" ("sender_id", "nonce");
CREATE TABLE "channel_message_mentions" (
"message_id" INTEGER NOT NULL REFERENCES channel_messages (id) ON DELETE CASCADE,
"start_offset" INTEGER NOT NULL,
"end_offset" INTEGER NOT NULL,
"user_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
PRIMARY KEY(message_id, start_offset)
);
CREATE TABLE "channel_members" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"channel_id" INTEGER NOT NULL REFERENCES channels (id) ON DELETE CASCADE,
"user_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
"admin" BOOLEAN NOT NULL DEFAULT false,
"role" VARCHAR,
"accepted" BOOLEAN NOT NULL DEFAULT false,
"updated_at" TIMESTAMP NOT NULL DEFAULT now
);
CREATE UNIQUE INDEX "index_channel_members_on_channel_id_and_user_id" ON "channel_members" ("channel_id", "user_id");
CREATE TABLE "buffers" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"channel_id" INTEGER NOT NULL REFERENCES channels (id) ON DELETE CASCADE,
"epoch" INTEGER NOT NULL DEFAULT 0
);
CREATE INDEX "index_buffers_on_channel_id" ON "buffers" ("channel_id");
CREATE TABLE "buffer_operations" (
"buffer_id" INTEGER NOT NULL REFERENCES buffers (id) ON DELETE CASCADE,
"epoch" INTEGER NOT NULL,
"replica_id" INTEGER NOT NULL,
"lamport_timestamp" INTEGER NOT NULL,
"value" BLOB NOT NULL,
PRIMARY KEY(buffer_id, epoch, lamport_timestamp, replica_id)
);
CREATE TABLE "buffer_snapshots" (
"buffer_id" INTEGER NOT NULL REFERENCES buffers (id) ON DELETE CASCADE,
"epoch" INTEGER NOT NULL,
"text" TEXT NOT NULL,
"operation_serialization_version" INTEGER NOT NULL,
PRIMARY KEY(buffer_id, epoch)
);
CREATE TABLE "channel_buffer_collaborators" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"channel_id" INTEGER NOT NULL REFERENCES channels (id) ON DELETE CASCADE,
"connection_id" INTEGER NOT NULL,
"connection_server_id" INTEGER NOT NULL REFERENCES servers (id) ON DELETE CASCADE,
"connection_lost" BOOLEAN NOT NULL DEFAULT false,
"user_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
"replica_id" INTEGER NOT NULL
);
CREATE INDEX "index_channel_buffer_collaborators_on_channel_id" ON "channel_buffer_collaborators" ("channel_id");
CREATE UNIQUE INDEX "index_channel_buffer_collaborators_on_channel_id_and_replica_id" ON "channel_buffer_collaborators" ("channel_id", "replica_id");
CREATE INDEX "index_channel_buffer_collaborators_on_connection_server_id" ON "channel_buffer_collaborators" ("connection_server_id");
CREATE INDEX "index_channel_buffer_collaborators_on_connection_id" ON "channel_buffer_collaborators" ("connection_id");
CREATE UNIQUE INDEX "index_channel_buffer_collaborators_on_channel_id_connection_id_and_server_id" ON "channel_buffer_collaborators" ("channel_id", "connection_id", "connection_server_id");
CREATE TABLE "feature_flags" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"flag" TEXT NOT NULL UNIQUE
);
CREATE INDEX "index_feature_flags" ON "feature_flags" ("id");
CREATE TABLE "user_features" (
"user_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
"feature_id" INTEGER NOT NULL REFERENCES feature_flags (id) ON DELETE CASCADE,
PRIMARY KEY (user_id, feature_id)
);
CREATE UNIQUE INDEX "index_user_features_user_id_and_feature_id" ON "user_features" ("user_id", "feature_id");
CREATE INDEX "index_user_features_on_user_id" ON "user_features" ("user_id");
CREATE INDEX "index_user_features_on_feature_id" ON "user_features" ("feature_id");
CREATE TABLE "observed_buffer_edits" (
"user_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
"buffer_id" INTEGER NOT NULL REFERENCES buffers (id) ON DELETE CASCADE,
"epoch" INTEGER NOT NULL,
"lamport_timestamp" INTEGER NOT NULL,
"replica_id" INTEGER NOT NULL,
PRIMARY KEY (user_id, buffer_id)
);
CREATE UNIQUE INDEX "index_observed_buffers_user_and_buffer_id" ON "observed_buffer_edits" ("user_id", "buffer_id");
CREATE TABLE IF NOT EXISTS "observed_channel_messages" (
"user_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
"channel_id" INTEGER NOT NULL REFERENCES channels (id) ON DELETE CASCADE,
"channel_message_id" INTEGER NOT NULL,
PRIMARY KEY (user_id, channel_id)
);
CREATE UNIQUE INDEX "index_observed_channel_messages_user_and_channel_id" ON "observed_channel_messages" ("user_id", "channel_id");
CREATE TABLE "notification_kinds" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"name" VARCHAR NOT NULL
);
CREATE UNIQUE INDEX "index_notification_kinds_on_name" ON "notification_kinds" ("name");
CREATE TABLE "notifications" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"created_at" TIMESTAMP NOT NULL default CURRENT_TIMESTAMP,
"recipient_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
"kind" INTEGER NOT NULL REFERENCES notification_kinds (id),
"entity_id" INTEGER,
"content" TEXT,
"is_read" BOOLEAN NOT NULL DEFAULT FALSE,
"response" BOOLEAN
);
CREATE INDEX
"index_notifications_on_recipient_id_is_read_kind_entity_id"
ON "notifications"
("recipient_id", "is_read", "kind", "entity_id");

View File

@ -0,0 +1,20 @@
CREATE TABLE IF NOT EXISTS "sessions" (
"id" VARCHAR NOT NULL PRIMARY KEY,
"expires" TIMESTAMP WITH TIME ZONE NULL,
"session" TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS "users" (
"id" SERIAL PRIMARY KEY,
"github_login" VARCHAR,
"admin" BOOLEAN
);
CREATE UNIQUE INDEX "index_users_github_login" ON "users" ("github_login");
CREATE TABLE IF NOT EXISTS "signups" (
"id" SERIAL PRIMARY KEY,
"github_login" VARCHAR,
"email_address" VARCHAR,
"about" TEXT
);

View File

@ -0,0 +1,7 @@
CREATE TABLE IF NOT EXISTS "access_tokens" (
"id" SERIAL PRIMARY KEY,
"user_id" INTEGER REFERENCES users (id),
"hash" VARCHAR(128)
);
CREATE INDEX "index_access_tokens_user_id" ON "access_tokens" ("user_id");

View File

@ -0,0 +1,46 @@
CREATE TABLE IF NOT EXISTS "orgs" (
"id" SERIAL PRIMARY KEY,
"name" VARCHAR NOT NULL,
"slug" VARCHAR NOT NULL
);
CREATE UNIQUE INDEX "index_orgs_slug" ON "orgs" ("slug");
CREATE TABLE IF NOT EXISTS "org_memberships" (
"id" SERIAL PRIMARY KEY,
"org_id" INTEGER REFERENCES orgs (id) NOT NULL,
"user_id" INTEGER REFERENCES users (id) NOT NULL,
"admin" BOOLEAN NOT NULL
);
CREATE INDEX "index_org_memberships_user_id" ON "org_memberships" ("user_id");
CREATE UNIQUE INDEX "index_org_memberships_org_id_and_user_id" ON "org_memberships" ("org_id", "user_id");
CREATE TABLE IF NOT EXISTS "channels" (
"id" SERIAL PRIMARY KEY,
"owner_id" INTEGER NOT NULL,
"owner_is_user" BOOLEAN NOT NULL,
"name" VARCHAR NOT NULL
);
CREATE UNIQUE INDEX "index_channels_owner_and_name" ON "channels" ("owner_is_user", "owner_id", "name");
CREATE TABLE IF NOT EXISTS "channel_memberships" (
"id" SERIAL PRIMARY KEY,
"channel_id" INTEGER REFERENCES channels (id) NOT NULL,
"user_id" INTEGER REFERENCES users (id) NOT NULL,
"admin" BOOLEAN NOT NULL
);
CREATE INDEX "index_channel_memberships_user_id" ON "channel_memberships" ("user_id");
CREATE UNIQUE INDEX "index_channel_memberships_channel_id_and_user_id" ON "channel_memberships" ("channel_id", "user_id");
CREATE TABLE IF NOT EXISTS "channel_messages" (
"id" SERIAL PRIMARY KEY,
"channel_id" INTEGER REFERENCES channels (id) NOT NULL,
"sender_id" INTEGER REFERENCES users (id) NOT NULL,
"body" TEXT NOT NULL,
"sent_at" TIMESTAMP
);
CREATE INDEX "index_channel_messages_channel_id" ON "channel_messages" ("channel_id");

View File

@ -0,0 +1,4 @@
ALTER TABLE "channel_messages"
ADD "nonce" UUID NOT NULL DEFAULT gen_random_uuid();
CREATE UNIQUE INDEX "index_channel_messages_nonce" ON "channel_messages" ("nonce");

View File

@ -0,0 +1,4 @@
ALTER TABLE "signups"
ADD "wants_releases" BOOLEAN,
ADD "wants_updates" BOOLEAN,
ADD "wants_community" BOOLEAN;

View File

@ -0,0 +1 @@
DROP TABLE IF EXISTS "signups";

View File

@ -0,0 +1,2 @@
CREATE EXTENSION IF NOT EXISTS pg_trgm;
CREATE INDEX trigram_index_users_on_github_login ON users USING GIN(github_login gin_trgm_ops);

View File

@ -0,0 +1,11 @@
CREATE TABLE IF NOT EXISTS "contacts" (
"id" SERIAL PRIMARY KEY,
"user_id_a" INTEGER REFERENCES users (id) NOT NULL,
"user_id_b" INTEGER REFERENCES users (id) NOT NULL,
"a_to_b" BOOLEAN NOT NULL,
"should_notify" BOOLEAN NOT NULL,
"accepted" BOOLEAN NOT NULL
);
CREATE UNIQUE INDEX "index_contacts_user_ids" ON "contacts" ("user_id_a", "user_id_b");
CREATE INDEX "index_contacts_user_id_b" ON "contacts" ("user_id_b");

View File

@ -0,0 +1,9 @@
ALTER TABLE users
ADD email_address VARCHAR(255) DEFAULT NULL,
ADD invite_code VARCHAR(64),
ADD invite_count INTEGER NOT NULL DEFAULT 0,
ADD inviter_id INTEGER REFERENCES users (id),
ADD connected_once BOOLEAN NOT NULL DEFAULT false,
ADD created_at TIMESTAMP NOT NULL DEFAULT NOW();
CREATE UNIQUE INDEX "index_invite_code_users" ON "users" ("invite_code");

View File

@ -0,0 +1,6 @@
ALTER TABLE contacts DROP CONSTRAINT contacts_user_id_a_fkey;
ALTER TABLE contacts DROP CONSTRAINT contacts_user_id_b_fkey;
ALTER TABLE contacts ADD CONSTRAINT contacts_user_id_a_fkey FOREIGN KEY (user_id_a) REFERENCES users(id) ON DELETE CASCADE;
ALTER TABLE contacts ADD CONSTRAINT contacts_user_id_b_fkey FOREIGN KEY (user_id_b) REFERENCES users(id) ON DELETE CASCADE;
ALTER TABLE users DROP CONSTRAINT users_inviter_id_fkey;
ALTER TABLE users ADD CONSTRAINT users_inviter_id_fkey FOREIGN KEY (inviter_id) REFERENCES users(id) ON DELETE SET NULL;

View File

@ -0,0 +1,24 @@
CREATE TABLE IF NOT EXISTS "projects" (
"id" SERIAL PRIMARY KEY,
"host_user_id" INTEGER REFERENCES users (id) NOT NULL,
"unregistered" BOOLEAN NOT NULL DEFAULT false
);
CREATE TABLE IF NOT EXISTS "worktree_extensions" (
"id" SERIAL PRIMARY KEY,
"project_id" INTEGER REFERENCES projects (id) NOT NULL,
"worktree_id" INTEGER NOT NULL,
"extension" VARCHAR(255),
"count" INTEGER NOT NULL
);
CREATE TABLE IF NOT EXISTS "project_activity_periods" (
"id" SERIAL PRIMARY KEY,
"duration_millis" INTEGER NOT NULL,
"ended_at" TIMESTAMP NOT NULL,
"user_id" INTEGER REFERENCES users (id) NOT NULL,
"project_id" INTEGER REFERENCES projects (id) NOT NULL
);
CREATE INDEX "index_project_activity_periods_on_ended_at" ON "project_activity_periods" ("ended_at");
CREATE UNIQUE INDEX "index_worktree_extensions_on_project_id_and_worktree_id_and_extension" ON "worktree_extensions" ("project_id", "worktree_id", "extension");

View File

@ -0,0 +1,27 @@
CREATE TABLE IF NOT EXISTS "signups" (
"id" SERIAL PRIMARY KEY,
"email_address" VARCHAR NOT NULL,
"email_confirmation_code" VARCHAR(64) NOT NULL,
"email_confirmation_sent" BOOLEAN NOT NULL,
"created_at" TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
"device_id" VARCHAR,
"user_id" INTEGER REFERENCES users (id) ON DELETE CASCADE,
"inviting_user_id" INTEGER REFERENCES users (id) ON DELETE SET NULL,
"platform_mac" BOOLEAN NOT NULL,
"platform_linux" BOOLEAN NOT NULL,
"platform_windows" BOOLEAN NOT NULL,
"platform_unknown" BOOLEAN NOT NULL,
"editor_features" VARCHAR[],
"programming_languages" VARCHAR[]
);
CREATE UNIQUE INDEX "index_signups_on_email_address" ON "signups" ("email_address");
CREATE INDEX "index_signups_on_email_confirmation_sent" ON "signups" ("email_confirmation_sent");
ALTER TABLE "users"
ADD "github_user_id" INTEGER;
CREATE INDEX "index_users_on_email_address" ON "users" ("email_address");
CREATE INDEX "index_users_on_github_user_id" ON "users" ("github_user_id");

View File

@ -0,0 +1,2 @@
ALTER TABLE "users"
ADD "metrics_id" uuid NOT NULL DEFAULT gen_random_uuid();

View File

@ -0,0 +1,90 @@
CREATE TABLE IF NOT EXISTS "rooms" (
"id" SERIAL PRIMARY KEY,
"live_kit_room" VARCHAR NOT NULL
);
ALTER TABLE "projects"
ADD "room_id" INTEGER REFERENCES rooms (id),
ADD "host_connection_id" INTEGER,
ADD "host_connection_epoch" UUID;
CREATE INDEX "index_projects_on_host_connection_epoch" ON "projects" ("host_connection_epoch");
CREATE TABLE "worktrees" (
"project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
"id" INT8 NOT NULL,
"root_name" VARCHAR NOT NULL,
"abs_path" VARCHAR NOT NULL,
"visible" BOOL NOT NULL,
"scan_id" INT8 NOT NULL,
"is_complete" BOOL NOT NULL,
PRIMARY KEY(project_id, id)
);
CREATE INDEX "index_worktrees_on_project_id" ON "worktrees" ("project_id");
CREATE TABLE "worktree_entries" (
"project_id" INTEGER NOT NULL,
"worktree_id" INT8 NOT NULL,
"id" INT8 NOT NULL,
"is_dir" BOOL NOT NULL,
"path" VARCHAR NOT NULL,
"inode" INT8 NOT NULL,
"mtime_seconds" INT8 NOT NULL,
"mtime_nanos" INTEGER NOT NULL,
"is_symlink" BOOL NOT NULL,
"is_ignored" BOOL NOT NULL,
PRIMARY KEY(project_id, worktree_id, id),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE
);
CREATE INDEX "index_worktree_entries_on_project_id" ON "worktree_entries" ("project_id");
CREATE INDEX "index_worktree_entries_on_project_id_and_worktree_id" ON "worktree_entries" ("project_id", "worktree_id");
CREATE TABLE "worktree_diagnostic_summaries" (
"project_id" INTEGER NOT NULL,
"worktree_id" INT8 NOT NULL,
"path" VARCHAR NOT NULL,
"language_server_id" INT8 NOT NULL,
"error_count" INTEGER NOT NULL,
"warning_count" INTEGER NOT NULL,
PRIMARY KEY(project_id, worktree_id, path),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE
);
CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id" ON "worktree_diagnostic_summaries" ("project_id");
CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id_and_worktree_id" ON "worktree_diagnostic_summaries" ("project_id", "worktree_id");
CREATE TABLE "language_servers" (
"project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
"id" INT8 NOT NULL,
"name" VARCHAR NOT NULL,
PRIMARY KEY(project_id, id)
);
CREATE INDEX "index_language_servers_on_project_id" ON "language_servers" ("project_id");
CREATE TABLE "project_collaborators" (
"id" SERIAL PRIMARY KEY,
"project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
"connection_id" INTEGER NOT NULL,
"connection_epoch" UUID NOT NULL,
"user_id" INTEGER NOT NULL,
"replica_id" INTEGER NOT NULL,
"is_host" BOOLEAN NOT NULL
);
CREATE INDEX "index_project_collaborators_on_project_id" ON "project_collaborators" ("project_id");
CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_and_replica_id" ON "project_collaborators" ("project_id", "replica_id");
CREATE INDEX "index_project_collaborators_on_connection_epoch" ON "project_collaborators" ("connection_epoch");
CREATE TABLE "room_participants" (
"id" SERIAL PRIMARY KEY,
"room_id" INTEGER NOT NULL REFERENCES rooms (id),
"user_id" INTEGER NOT NULL REFERENCES users (id),
"answering_connection_id" INTEGER,
"answering_connection_epoch" UUID,
"location_kind" INTEGER,
"location_project_id" INTEGER,
"initial_project_id" INTEGER,
"calling_user_id" INTEGER NOT NULL REFERENCES users (id),
"calling_connection_id" INTEGER NOT NULL,
"calling_connection_epoch" UUID NOT NULL
);
CREATE UNIQUE INDEX "index_room_participants_on_user_id" ON "room_participants" ("user_id");
CREATE INDEX "index_room_participants_on_answering_connection_epoch" ON "room_participants" ("answering_connection_epoch");
CREATE INDEX "index_room_participants_on_calling_connection_epoch" ON "room_participants" ("calling_connection_epoch");

View File

@ -0,0 +1,2 @@
ALTER TABLE "signups"
ADD "added_to_mailing_list" BOOLEAN NOT NULL DEFAULT FALSE;

View File

@ -0,0 +1,7 @@
ALTER TABLE "room_participants"
ADD "answering_connection_lost" BOOLEAN NOT NULL DEFAULT FALSE;
CREATE INDEX "index_project_collaborators_on_connection_id" ON "project_collaborators" ("connection_id");
CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_connection_id_and_epoch" ON "project_collaborators" ("project_id", "connection_id", "connection_epoch");
CREATE INDEX "index_room_participants_on_answering_connection_id" ON "room_participants" ("answering_connection_id");
CREATE UNIQUE INDEX "index_room_participants_on_answering_connection_id_and_answering_connection_epoch" ON "room_participants" ("answering_connection_id", "answering_connection_epoch");

View File

@ -0,0 +1 @@
CREATE INDEX "index_room_participants_on_room_id" ON "room_participants" ("room_id");

View File

@ -0,0 +1,30 @@
CREATE TABLE servers (
id SERIAL PRIMARY KEY,
environment VARCHAR NOT NULL
);
DROP TABLE worktree_extensions;
DROP TABLE project_activity_periods;
DELETE from projects;
ALTER TABLE projects
DROP COLUMN host_connection_epoch,
ADD COLUMN host_connection_server_id INTEGER REFERENCES servers (id) ON DELETE CASCADE;
CREATE INDEX "index_projects_on_host_connection_server_id" ON "projects" ("host_connection_server_id");
CREATE INDEX "index_projects_on_host_connection_id_and_host_connection_server_id" ON "projects" ("host_connection_id", "host_connection_server_id");
DELETE FROM project_collaborators;
ALTER TABLE project_collaborators
DROP COLUMN connection_epoch,
ADD COLUMN connection_server_id INTEGER NOT NULL REFERENCES servers (id) ON DELETE CASCADE;
CREATE INDEX "index_project_collaborators_on_connection_server_id" ON "project_collaborators" ("connection_server_id");
CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_connection_id_and_server_id" ON "project_collaborators" ("project_id", "connection_id", "connection_server_id");
DELETE FROM room_participants;
ALTER TABLE room_participants
DROP COLUMN answering_connection_epoch,
DROP COLUMN calling_connection_epoch,
ADD COLUMN answering_connection_server_id INTEGER REFERENCES servers (id) ON DELETE CASCADE,
ADD COLUMN calling_connection_server_id INTEGER REFERENCES servers (id) ON DELETE SET NULL;
CREATE INDEX "index_room_participants_on_answering_connection_server_id" ON "room_participants" ("answering_connection_server_id");
CREATE INDEX "index_room_participants_on_calling_connection_server_id" ON "room_participants" ("calling_connection_server_id");
CREATE UNIQUE INDEX "index_room_participants_on_answering_connection_id_and_answering_connection_server_id" ON "room_participants" ("answering_connection_id", "answering_connection_server_id");

View File

@ -0,0 +1,3 @@
ALTER TABLE "worktree_entries"
ADD COLUMN "scan_id" INT8,
ADD COLUMN "is_deleted" BOOL;

View File

@ -0,0 +1,3 @@
ALTER TABLE worktrees
ALTER COLUMN is_complete SET DEFAULT FALSE,
ADD COLUMN completed_scan_id INT8;

View File

@ -0,0 +1,15 @@
CREATE TABLE IF NOT EXISTS "followers" (
"id" SERIAL PRIMARY KEY,
"room_id" INTEGER NOT NULL REFERENCES rooms (id) ON DELETE CASCADE,
"project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
"leader_connection_server_id" INTEGER NOT NULL REFERENCES servers (id) ON DELETE CASCADE,
"leader_connection_id" INTEGER NOT NULL,
"follower_connection_server_id" INTEGER NOT NULL REFERENCES servers (id) ON DELETE CASCADE,
"follower_connection_id" INTEGER NOT NULL
);
CREATE UNIQUE INDEX
"index_followers_on_project_id_and_leader_connection_server_id_and_leader_connection_id_and_follower_connection_server_id_and_follower_connection_id"
ON "followers" ("project_id", "leader_connection_server_id", "leader_connection_id", "follower_connection_server_id", "follower_connection_id");
CREATE INDEX "index_followers_on_room_id" ON "followers" ("room_id");

View File

@ -0,0 +1,13 @@
CREATE TABLE "worktree_repositories" (
"project_id" INTEGER NOT NULL,
"worktree_id" INT8 NOT NULL,
"work_directory_id" INT8 NOT NULL,
"scan_id" INT8 NOT NULL,
"branch" VARCHAR,
"is_deleted" BOOL NOT NULL,
PRIMARY KEY(project_id, worktree_id, work_directory_id),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE,
FOREIGN KEY(project_id, worktree_id, work_directory_id) REFERENCES worktree_entries (project_id, worktree_id, id) ON DELETE CASCADE
);
CREATE INDEX "index_worktree_repositories_on_project_id" ON "worktree_repositories" ("project_id");
CREATE INDEX "index_worktree_repositories_on_project_id_and_worktree_id" ON "worktree_repositories" ("project_id", "worktree_id");

View File

@ -0,0 +1,15 @@
CREATE TABLE "worktree_repository_statuses" (
"project_id" INTEGER NOT NULL,
"worktree_id" INT8 NOT NULL,
"work_directory_id" INT8 NOT NULL,
"repo_path" VARCHAR NOT NULL,
"status" INT8 NOT NULL,
"scan_id" INT8 NOT NULL,
"is_deleted" BOOL NOT NULL,
PRIMARY KEY(project_id, worktree_id, work_directory_id, repo_path),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE,
FOREIGN KEY(project_id, worktree_id, work_directory_id) REFERENCES worktree_entries (project_id, worktree_id, id) ON DELETE CASCADE
);
CREATE INDEX "index_wt_repos_statuses_on_project_id" ON "worktree_repository_statuses" ("project_id");
CREATE INDEX "index_wt_repos_statuses_on_project_id_and_wt_id" ON "worktree_repository_statuses" ("project_id", "worktree_id");
CREATE INDEX "index_wt_repos_statuses_on_project_id_and_wt_id_and_wd_id" ON "worktree_repository_statuses" ("project_id", "worktree_id", "work_directory_id");

View File

@ -0,0 +1,10 @@
CREATE TABLE "worktree_settings_files" (
"project_id" INTEGER NOT NULL,
"worktree_id" INT8 NOT NULL,
"path" VARCHAR NOT NULL,
"content" TEXT NOT NULL,
PRIMARY KEY(project_id, worktree_id, path),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE
);
CREATE INDEX "index_settings_files_on_project_id" ON "worktree_settings_files" ("project_id");
CREATE INDEX "index_settings_files_on_project_id_and_wt_id" ON "worktree_settings_files" ("project_id", "worktree_id");

View File

@ -0,0 +1,2 @@
ALTER TABLE "worktree_entries"
ADD "git_status" INT8;

View File

@ -0,0 +1,2 @@
ALTER TABLE "worktree_entries"
ADD "is_external" BOOL NOT NULL DEFAULT FALSE;

View File

@ -0,0 +1,30 @@
DROP TABLE "channel_messages";
DROP TABLE "channel_memberships";
DROP TABLE "org_memberships";
DROP TABLE "orgs";
DROP TABLE "channels";
CREATE TABLE "channels" (
"id" SERIAL PRIMARY KEY,
"name" VARCHAR NOT NULL,
"created_at" TIMESTAMP NOT NULL DEFAULT now()
);
CREATE TABLE "channel_paths" (
"id_path" VARCHAR NOT NULL PRIMARY KEY,
"channel_id" INTEGER NOT NULL REFERENCES channels (id) ON DELETE CASCADE
);
CREATE INDEX "index_channel_paths_on_channel_id" ON "channel_paths" ("channel_id");
CREATE TABLE "channel_members" (
"id" SERIAL PRIMARY KEY,
"channel_id" INTEGER NOT NULL REFERENCES channels (id) ON DELETE CASCADE,
"user_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
"admin" BOOLEAN NOT NULL DEFAULT false,
"accepted" BOOLEAN NOT NULL DEFAULT false,
"updated_at" TIMESTAMP NOT NULL DEFAULT now()
);
CREATE UNIQUE INDEX "index_channel_members_on_channel_id_and_user_id" ON "channel_members" ("channel_id", "user_id");
ALTER TABLE rooms ADD COLUMN "channel_id" INTEGER REFERENCES channels (id) ON DELETE CASCADE;

View File

@ -0,0 +1,40 @@
CREATE TABLE "buffers" (
"id" SERIAL PRIMARY KEY,
"channel_id" INTEGER NOT NULL REFERENCES channels (id) ON DELETE CASCADE,
"epoch" INTEGER NOT NULL DEFAULT 0
);
CREATE INDEX "index_buffers_on_channel_id" ON "buffers" ("channel_id");
CREATE TABLE "buffer_operations" (
"buffer_id" INTEGER NOT NULL REFERENCES buffers (id) ON DELETE CASCADE,
"epoch" INTEGER NOT NULL,
"replica_id" INTEGER NOT NULL,
"lamport_timestamp" INTEGER NOT NULL,
"value" BYTEA NOT NULL,
PRIMARY KEY(buffer_id, epoch, lamport_timestamp, replica_id)
);
CREATE TABLE "buffer_snapshots" (
"buffer_id" INTEGER NOT NULL REFERENCES buffers (id) ON DELETE CASCADE,
"epoch" INTEGER NOT NULL,
"text" TEXT NOT NULL,
"operation_serialization_version" INTEGER NOT NULL,
PRIMARY KEY(buffer_id, epoch)
);
CREATE TABLE "channel_buffer_collaborators" (
"id" SERIAL PRIMARY KEY,
"channel_id" INTEGER NOT NULL REFERENCES channels (id) ON DELETE CASCADE,
"connection_id" INTEGER NOT NULL,
"connection_server_id" INTEGER NOT NULL REFERENCES servers (id) ON DELETE CASCADE,
"connection_lost" BOOLEAN NOT NULL DEFAULT FALSE,
"user_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
"replica_id" INTEGER NOT NULL
);
CREATE INDEX "index_channel_buffer_collaborators_on_channel_id" ON "channel_buffer_collaborators" ("channel_id");
CREATE UNIQUE INDEX "index_channel_buffer_collaborators_on_channel_id_and_replica_id" ON "channel_buffer_collaborators" ("channel_id", "replica_id");
CREATE INDEX "index_channel_buffer_collaborators_on_connection_server_id" ON "channel_buffer_collaborators" ("connection_server_id");
CREATE INDEX "index_channel_buffer_collaborators_on_connection_id" ON "channel_buffer_collaborators" ("connection_id");
CREATE UNIQUE INDEX "index_channel_buffer_collaborators_on_channel_id_connection_id_and_server_id" ON "channel_buffer_collaborators" ("channel_id", "connection_id", "connection_server_id");

View File

@ -0,0 +1,16 @@
CREATE TABLE "feature_flags" (
"id" SERIAL PRIMARY KEY,
"flag" VARCHAR(255) NOT NULL UNIQUE
);
CREATE UNIQUE INDEX "index_feature_flags" ON "feature_flags" ("id");
CREATE TABLE "user_features" (
"user_id" INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
"feature_id" INTEGER NOT NULL REFERENCES feature_flags(id) ON DELETE CASCADE,
PRIMARY KEY (user_id, feature_id)
);
CREATE UNIQUE INDEX "index_user_features_user_id_and_feature_id" ON "user_features" ("user_id", "feature_id");
CREATE INDEX "index_user_features_on_user_id" ON "user_features" ("user_id");
CREATE INDEX "index_user_features_on_feature_id" ON "user_features" ("feature_id");

View File

@ -0,0 +1,19 @@
CREATE TABLE IF NOT EXISTS "channel_messages" (
"id" SERIAL PRIMARY KEY,
"channel_id" INTEGER NOT NULL REFERENCES channels (id) ON DELETE CASCADE,
"sender_id" INTEGER NOT NULL REFERENCES users (id),
"body" TEXT NOT NULL,
"sent_at" TIMESTAMP,
"nonce" UUID NOT NULL
);
CREATE INDEX "index_channel_messages_on_channel_id" ON "channel_messages" ("channel_id");
CREATE UNIQUE INDEX "index_channel_messages_on_nonce" ON "channel_messages" ("nonce");
CREATE TABLE IF NOT EXISTS "channel_chat_participants" (
"id" SERIAL PRIMARY KEY,
"user_id" INTEGER NOT NULL REFERENCES users (id),
"channel_id" INTEGER NOT NULL REFERENCES channels (id) ON DELETE CASCADE,
"connection_id" INTEGER NOT NULL,
"connection_server_id" INTEGER NOT NULL REFERENCES servers (id) ON DELETE CASCADE
);
CREATE INDEX "index_channel_chat_participants_on_channel_id" ON "channel_chat_participants" ("channel_id");

View File

@ -0,0 +1,19 @@
CREATE TABLE IF NOT EXISTS "observed_buffer_edits" (
"user_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
"buffer_id" INTEGER NOT NULL REFERENCES buffers (id) ON DELETE CASCADE,
"epoch" INTEGER NOT NULL,
"lamport_timestamp" INTEGER NOT NULL,
"replica_id" INTEGER NOT NULL,
PRIMARY KEY (user_id, buffer_id)
);
CREATE UNIQUE INDEX "index_observed_buffer_user_and_buffer_id" ON "observed_buffer_edits" ("user_id", "buffer_id");
CREATE TABLE IF NOT EXISTS "observed_channel_messages" (
"user_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
"channel_id" INTEGER NOT NULL REFERENCES channels (id) ON DELETE CASCADE,
"channel_message_id" INTEGER NOT NULL,
PRIMARY KEY (user_id, channel_id)
);
CREATE UNIQUE INDEX "index_observed_channel_messages_user_and_channel_id" ON "observed_channel_messages" ("user_id", "channel_id");

View File

@ -0,0 +1 @@
ALTER TABLE room_participants ADD COLUMN participant_index INTEGER;

View File

@ -0,0 +1,22 @@
CREATE TABLE "notification_kinds" (
"id" SERIAL PRIMARY KEY,
"name" VARCHAR NOT NULL
);
CREATE UNIQUE INDEX "index_notification_kinds_on_name" ON "notification_kinds" ("name");
CREATE TABLE notifications (
"id" SERIAL PRIMARY KEY,
"created_at" TIMESTAMP NOT NULL DEFAULT now(),
"recipient_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
"kind" INTEGER NOT NULL REFERENCES notification_kinds (id),
"entity_id" INTEGER,
"content" TEXT,
"is_read" BOOLEAN NOT NULL DEFAULT FALSE,
"response" BOOLEAN
);
CREATE INDEX
"index_notifications_on_recipient_id_is_read_kind_entity_id"
ON "notifications"
("recipient_id", "is_read", "kind", "entity_id");

View File

@ -0,0 +1 @@
ALTER TABLE rooms ADD COLUMN enviroment TEXT;

View File

@ -0,0 +1 @@
CREATE UNIQUE INDEX "index_rooms_on_channel_id" ON "rooms" ("channel_id");

View File

@ -0,0 +1,4 @@
ALTER TABLE channel_members ADD COLUMN role TEXT;
UPDATE channel_members SET role = CASE WHEN admin THEN 'admin' ELSE 'member' END;
ALTER TABLE channels ADD COLUMN visibility TEXT NOT NULL DEFAULT 'members';

View File

@ -0,0 +1,8 @@
-- Add migration script here
ALTER TABLE projects
DROP CONSTRAINT projects_room_id_fkey,
ADD CONSTRAINT projects_room_id_fkey
FOREIGN KEY (room_id)
REFERENCES rooms (id)
ON DELETE CASCADE;

View File

@ -0,0 +1,11 @@
CREATE TABLE "channel_message_mentions" (
"message_id" INTEGER NOT NULL REFERENCES channel_messages (id) ON DELETE CASCADE,
"start_offset" INTEGER NOT NULL,
"end_offset" INTEGER NOT NULL,
"user_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
PRIMARY KEY(message_id, start_offset)
);
-- We use 'on conflict update' with this index, so it should be per-user.
CREATE UNIQUE INDEX "index_channel_messages_on_sender_id_nonce" ON "channel_messages" ("sender_id", "nonce");
DROP INDEX "index_channel_messages_on_nonce";

View File

@ -0,0 +1,12 @@
ALTER TABLE channels ADD COLUMN parent_path TEXT;
UPDATE channels
SET parent_path = substr(
channel_paths.id_path,
2,
length(channel_paths.id_path) - length('/' || channel_paths.channel_id::text || '/')
)
FROM channel_paths
WHERE channel_paths.channel_id = channels.id;
CREATE INDEX "index_channels_on_parent_path" ON "channels" ("parent_path");

184
crates/collab2/src/api.rs Normal file
View File

@ -0,0 +1,184 @@
use crate::{
auth,
db::{User, UserId},
rpc, AppState, Error, Result,
};
use anyhow::anyhow;
use axum::{
body::Body,
extract::{Path, Query},
http::{self, Request, StatusCode},
middleware::{self, Next},
response::IntoResponse,
routing::{get, post},
Extension, Json, Router,
};
use axum_extra::response::ErasedJson;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use tower::ServiceBuilder;
use tracing::instrument;
pub fn routes(rpc_server: Arc<rpc::Server>, state: Arc<AppState>) -> Router<Body> {
Router::new()
.route("/user", get(get_authenticated_user))
.route("/users/:id/access_tokens", post(create_access_token))
.route("/panic", post(trace_panic))
.route("/rpc_server_snapshot", get(get_rpc_server_snapshot))
.layer(
ServiceBuilder::new()
.layer(Extension(state))
.layer(Extension(rpc_server))
.layer(middleware::from_fn(validate_api_token)),
)
}
pub async fn validate_api_token<B>(req: Request<B>, next: Next<B>) -> impl IntoResponse {
let token = req
.headers()
.get(http::header::AUTHORIZATION)
.and_then(|header| header.to_str().ok())
.ok_or_else(|| {
Error::Http(
StatusCode::BAD_REQUEST,
"missing authorization header".to_string(),
)
})?
.strip_prefix("token ")
.ok_or_else(|| {
Error::Http(
StatusCode::BAD_REQUEST,
"invalid authorization header".to_string(),
)
})?;
let state = req.extensions().get::<Arc<AppState>>().unwrap();
if token != state.config.api_token {
Err(Error::Http(
StatusCode::UNAUTHORIZED,
"invalid authorization token".to_string(),
))?
}
Ok::<_, Error>(next.run(req).await)
}
#[derive(Debug, Deserialize)]
struct AuthenticatedUserParams {
github_user_id: Option<i32>,
github_login: String,
github_email: Option<String>,
}
#[derive(Debug, Serialize)]
struct AuthenticatedUserResponse {
user: User,
metrics_id: String,
}
async fn get_authenticated_user(
Query(params): Query<AuthenticatedUserParams>,
Extension(app): Extension<Arc<AppState>>,
) -> Result<Json<AuthenticatedUserResponse>> {
let user = app
.db
.get_or_create_user_by_github_account(
&params.github_login,
params.github_user_id,
params.github_email.as_deref(),
)
.await?
.ok_or_else(|| Error::Http(StatusCode::NOT_FOUND, "user not found".into()))?;
let metrics_id = app.db.get_user_metrics_id(user.id).await?;
return Ok(Json(AuthenticatedUserResponse { user, metrics_id }));
}
#[derive(Deserialize, Debug)]
struct CreateUserParams {
github_user_id: i32,
github_login: String,
email_address: String,
email_confirmation_code: Option<String>,
#[serde(default)]
admin: bool,
#[serde(default)]
invite_count: i32,
}
#[derive(Serialize, Debug)]
struct CreateUserResponse {
user: User,
signup_device_id: Option<String>,
metrics_id: String,
}
#[derive(Debug, Deserialize)]
struct Panic {
version: String,
text: String,
}
#[instrument(skip(panic))]
async fn trace_panic(panic: Json<Panic>) -> Result<()> {
tracing::error!(version = %panic.version, text = %panic.text, "panic report");
Ok(())
}
async fn get_rpc_server_snapshot(
Extension(rpc_server): Extension<Arc<rpc::Server>>,
) -> Result<ErasedJson> {
Ok(ErasedJson::pretty(rpc_server.snapshot().await))
}
#[derive(Deserialize)]
struct CreateAccessTokenQueryParams {
public_key: String,
impersonate: Option<String>,
}
#[derive(Serialize)]
struct CreateAccessTokenResponse {
user_id: UserId,
encrypted_access_token: String,
}
async fn create_access_token(
Path(user_id): Path<UserId>,
Query(params): Query<CreateAccessTokenQueryParams>,
Extension(app): Extension<Arc<AppState>>,
) -> Result<Json<CreateAccessTokenResponse>> {
let user = app
.db
.get_user_by_id(user_id)
.await?
.ok_or_else(|| anyhow!("user not found"))?;
let mut user_id = user.id;
if let Some(impersonate) = params.impersonate {
if user.admin {
if let Some(impersonated_user) = app.db.get_user_by_github_login(&impersonate).await? {
user_id = impersonated_user.id;
} else {
return Err(Error::Http(
StatusCode::UNPROCESSABLE_ENTITY,
format!("user {impersonate} does not exist"),
));
}
} else {
return Err(Error::Http(
StatusCode::UNAUTHORIZED,
"you do not have permission to impersonate other users".to_string(),
));
}
}
let access_token = auth::create_access_token(app.db.as_ref(), user_id).await?;
let encrypted_access_token =
auth::encrypt_access_token(&access_token, params.public_key.clone())?;
Ok(Json(CreateAccessTokenResponse {
user_id,
encrypted_access_token,
}))
}

151
crates/collab2/src/auth.rs Normal file
View File

@ -0,0 +1,151 @@
use crate::{
db::{self, AccessTokenId, Database, UserId},
AppState, Error, Result,
};
use anyhow::{anyhow, Context};
use axum::{
http::{self, Request, StatusCode},
middleware::Next,
response::IntoResponse,
};
use lazy_static::lazy_static;
use prometheus::{exponential_buckets, register_histogram, Histogram};
use rand::thread_rng;
use scrypt::{
password_hash::{PasswordHash, PasswordHasher, PasswordVerifier, SaltString},
Scrypt,
};
use serde::{Deserialize, Serialize};
use std::{sync::Arc, time::Instant};
lazy_static! {
static ref METRIC_ACCESS_TOKEN_HASHING_TIME: Histogram = register_histogram!(
"access_token_hashing_time",
"time spent hashing access tokens",
exponential_buckets(10.0, 2.0, 10).unwrap(),
)
.unwrap();
}
pub async fn validate_header<B>(mut req: Request<B>, next: Next<B>) -> impl IntoResponse {
let mut auth_header = req
.headers()
.get(http::header::AUTHORIZATION)
.and_then(|header| header.to_str().ok())
.ok_or_else(|| {
Error::Http(
StatusCode::UNAUTHORIZED,
"missing authorization header".to_string(),
)
})?
.split_whitespace();
let user_id = UserId(auth_header.next().unwrap_or("").parse().map_err(|_| {
Error::Http(
StatusCode::BAD_REQUEST,
"missing user id in authorization header".to_string(),
)
})?);
let access_token = auth_header.next().ok_or_else(|| {
Error::Http(
StatusCode::BAD_REQUEST,
"missing access token in authorization header".to_string(),
)
})?;
let state = req.extensions().get::<Arc<AppState>>().unwrap();
let credentials_valid = if let Some(admin_token) = access_token.strip_prefix("ADMIN_TOKEN:") {
state.config.api_token == admin_token
} else {
verify_access_token(&access_token, user_id, &state.db)
.await
.unwrap_or(false)
};
if credentials_valid {
let user = state
.db
.get_user_by_id(user_id)
.await?
.ok_or_else(|| anyhow!("user {} not found", user_id))?;
req.extensions_mut().insert(user);
Ok::<_, Error>(next.run(req).await)
} else {
Err(Error::Http(
StatusCode::UNAUTHORIZED,
"invalid credentials".to_string(),
))
}
}
const MAX_ACCESS_TOKENS_TO_STORE: usize = 8;
#[derive(Serialize, Deserialize)]
struct AccessTokenJson {
version: usize,
id: AccessTokenId,
token: String,
}
pub async fn create_access_token(db: &db::Database, user_id: UserId) -> Result<String> {
const VERSION: usize = 1;
let access_token = rpc::auth::random_token();
let access_token_hash =
hash_access_token(&access_token).context("failed to hash access token")?;
let id = db
.create_access_token(user_id, &access_token_hash, MAX_ACCESS_TOKENS_TO_STORE)
.await?;
Ok(serde_json::to_string(&AccessTokenJson {
version: VERSION,
id,
token: access_token,
})?)
}
fn hash_access_token(token: &str) -> Result<String> {
// Avoid slow hashing in debug mode.
let params = if cfg!(debug_assertions) {
scrypt::Params::new(1, 1, 1).unwrap()
} else {
scrypt::Params::new(14, 8, 1).unwrap()
};
Ok(Scrypt
.hash_password(
token.as_bytes(),
None,
params,
&SaltString::generate(thread_rng()),
)
.map_err(anyhow::Error::new)?
.to_string())
}
pub fn encrypt_access_token(access_token: &str, public_key: String) -> Result<String> {
let native_app_public_key =
rpc::auth::PublicKey::try_from(public_key).context("failed to parse app public key")?;
let encrypted_access_token = native_app_public_key
.encrypt_string(access_token)
.context("failed to encrypt access token with public key")?;
Ok(encrypted_access_token)
}
pub async fn verify_access_token(token: &str, user_id: UserId, db: &Arc<Database>) -> Result<bool> {
let token: AccessTokenJson = serde_json::from_str(&token)?;
let db_token = db.get_access_token(token.id).await?;
if db_token.user_id != user_id {
return Err(anyhow!("no such access token"))?;
}
let db_hash = PasswordHash::new(&db_token.hash).map_err(anyhow::Error::new)?;
let t0 = Instant::now();
let is_valid = Scrypt
.verify_password(token.token.as_bytes(), &db_hash)
.is_ok();
let duration = t0.elapsed();
log::info!("hashed access token in {:?}", duration);
METRIC_ACCESS_TOKEN_HASHING_TIME.observe(duration.as_millis() as f64);
Ok(is_valid)
}

View File

@ -0,0 +1,20 @@
use anyhow::anyhow;
use std::fs;
fn main() -> anyhow::Result<()> {
let env: toml::map::Map<String, toml::Value> = toml::de::from_str(
&fs::read_to_string("./.env.toml").map_err(|_| anyhow!("no .env.toml file found"))?,
)?;
for (key, value) in env {
let value = match value {
toml::Value::String(value) => value,
toml::Value::Integer(value) => value.to_string(),
toml::Value::Float(value) => value.to_string(),
_ => panic!("unsupported TOML value in .env.toml for key {}", key),
};
println!("export {}=\"{}\"", key, value);
}
Ok(())
}

View File

@ -0,0 +1,107 @@
use collab2::{db, executor::Executor};
use db::{ConnectOptions, Database};
use serde::{de::DeserializeOwned, Deserialize};
use std::fmt::Write;
#[derive(Debug, Deserialize)]
struct GitHubUser {
id: i32,
login: String,
email: Option<String>,
}
#[tokio::main]
async fn main() {
let database_url = std::env::var("DATABASE_URL").expect("missing DATABASE_URL env var");
let db = Database::new(ConnectOptions::new(database_url), Executor::Production)
.await
.expect("failed to connect to postgres database");
let github_token = std::env::var("GITHUB_TOKEN").expect("missing GITHUB_TOKEN env var");
let client = reqwest::Client::new();
let mut current_user =
fetch_github::<GitHubUser>(&client, &github_token, "https://api.github.com/user").await;
current_user
.email
.get_or_insert_with(|| "placeholder@example.com".to_string());
let staff_users = fetch_github::<Vec<GitHubUser>>(
&client,
&github_token,
"https://api.github.com/orgs/zed-industries/teams/staff/members",
)
.await;
let mut zed_users = Vec::new();
zed_users.push((current_user, true));
zed_users.extend(staff_users.into_iter().map(|user| (user, true)));
let user_count = db
.get_all_users(0, 200)
.await
.expect("failed to load users from db")
.len();
if user_count < 100 {
let mut last_user_id = None;
for _ in 0..10 {
let mut uri = "https://api.github.com/users?per_page=100".to_string();
if let Some(last_user_id) = last_user_id {
write!(&mut uri, "&since={}", last_user_id).unwrap();
}
let users = fetch_github::<Vec<GitHubUser>>(&client, &github_token, &uri).await;
if let Some(last_user) = users.last() {
last_user_id = Some(last_user.id);
zed_users.extend(users.into_iter().map(|user| (user, false)));
} else {
break;
}
}
}
for (github_user, admin) in zed_users {
if db
.get_user_by_github_login(&github_user.login)
.await
.expect("failed to fetch user")
.is_none()
{
if admin {
db.create_user(
&format!("{}@zed.dev", github_user.login),
admin,
db::NewUserParams {
github_login: github_user.login,
github_user_id: github_user.id,
},
)
.await
.expect("failed to insert user");
} else {
db.get_or_create_user_by_github_account(
&github_user.login,
Some(github_user.id),
github_user.email.as_deref(),
)
.await
.expect("failed to insert user");
}
}
}
}
async fn fetch_github<T: DeserializeOwned>(
client: &reqwest::Client,
access_token: &str,
url: &str,
) -> T {
let response = client
.get(url)
.bearer_auth(&access_token)
.header("user-agent", "zed")
.send()
.await
.expect(&format!("failed to fetch '{}'", url));
response
.json()
.await
.expect(&format!("failed to deserialize github user from '{}'", url))
}

672
crates/collab2/src/db.rs Normal file
View File

@ -0,0 +1,672 @@
#[cfg(test)]
pub mod tests;
#[cfg(test)]
pub use tests::TestDb;
mod ids;
mod queries;
mod tables;
use crate::{executor::Executor, Error, Result};
use anyhow::anyhow;
use collections::{BTreeMap, HashMap, HashSet};
use dashmap::DashMap;
use futures::StreamExt;
use rand::{prelude::StdRng, Rng, SeedableRng};
use rpc::{
proto::{self},
ConnectionId,
};
use sea_orm::{
entity::prelude::*,
sea_query::{Alias, Expr, OnConflict},
ActiveValue, Condition, ConnectionTrait, DatabaseConnection, DatabaseTransaction, DbErr,
FromQueryResult, IntoActiveModel, IsolationLevel, JoinType, QueryOrder, QuerySelect, Statement,
TransactionTrait,
};
use serde::{Deserialize, Serialize};
use sqlx::{
migrate::{Migrate, Migration, MigrationSource},
Connection,
};
use std::{
fmt::Write as _,
future::Future,
marker::PhantomData,
ops::{Deref, DerefMut},
path::Path,
rc::Rc,
sync::Arc,
time::Duration,
};
use tables::*;
use tokio::sync::{Mutex, OwnedMutexGuard};
pub use ids::*;
pub use sea_orm::ConnectOptions;
pub use tables::user::Model as User;
pub struct Database {
options: ConnectOptions,
pool: DatabaseConnection,
rooms: DashMap<RoomId, Arc<Mutex<()>>>,
rng: Mutex<StdRng>,
executor: Executor,
notification_kinds_by_id: HashMap<NotificationKindId, &'static str>,
notification_kinds_by_name: HashMap<String, NotificationKindId>,
#[cfg(test)]
runtime: Option<tokio::runtime::Runtime>,
}
// The `Database` type has so many methods that its impl blocks are split into
// separate files in the `queries` folder.
impl Database {
pub async fn new(options: ConnectOptions, executor: Executor) -> Result<Self> {
sqlx::any::install_default_drivers();
Ok(Self {
options: options.clone(),
pool: sea_orm::Database::connect(options).await?,
rooms: DashMap::with_capacity(16384),
rng: Mutex::new(StdRng::seed_from_u64(0)),
notification_kinds_by_id: HashMap::default(),
notification_kinds_by_name: HashMap::default(),
executor,
#[cfg(test)]
runtime: None,
})
}
#[cfg(test)]
pub fn reset(&self) {
self.rooms.clear();
}
pub async fn migrate(
&self,
migrations_path: &Path,
ignore_checksum_mismatch: bool,
) -> anyhow::Result<Vec<(Migration, Duration)>> {
let migrations = MigrationSource::resolve(migrations_path)
.await
.map_err(|err| anyhow!("failed to load migrations: {err:?}"))?;
let mut connection = sqlx::AnyConnection::connect(self.options.get_url()).await?;
connection.ensure_migrations_table().await?;
let applied_migrations: HashMap<_, _> = connection
.list_applied_migrations()
.await?
.into_iter()
.map(|m| (m.version, m))
.collect();
let mut new_migrations = Vec::new();
for migration in migrations {
match applied_migrations.get(&migration.version) {
Some(applied_migration) => {
if migration.checksum != applied_migration.checksum && !ignore_checksum_mismatch
{
Err(anyhow!(
"checksum mismatch for applied migration {}",
migration.description
))?;
}
}
None => {
let elapsed = connection.apply(&migration).await?;
new_migrations.push((migration, elapsed));
}
}
}
Ok(new_migrations)
}
pub async fn initialize_static_data(&mut self) -> Result<()> {
self.initialize_notification_kinds().await?;
Ok(())
}
pub async fn transaction<F, Fut, T>(&self, f: F) -> Result<T>
where
F: Send + Fn(TransactionHandle) -> Fut,
Fut: Send + Future<Output = Result<T>>,
{
let body = async {
let mut i = 0;
loop {
let (tx, result) = self.with_transaction(&f).await?;
match result {
Ok(result) => match tx.commit().await.map_err(Into::into) {
Ok(()) => return Ok(result),
Err(error) => {
if !self.retry_on_serialization_error(&error, i).await {
return Err(error);
}
}
},
Err(error) => {
tx.rollback().await?;
if !self.retry_on_serialization_error(&error, i).await {
return Err(error);
}
}
}
i += 1;
}
};
self.run(body).await
}
async fn optional_room_transaction<F, Fut, T>(&self, f: F) -> Result<Option<RoomGuard<T>>>
where
F: Send + Fn(TransactionHandle) -> Fut,
Fut: Send + Future<Output = Result<Option<(RoomId, T)>>>,
{
let body = async {
let mut i = 0;
loop {
let (tx, result) = self.with_transaction(&f).await?;
match result {
Ok(Some((room_id, data))) => {
let lock = self.rooms.entry(room_id).or_default().clone();
let _guard = lock.lock_owned().await;
match tx.commit().await.map_err(Into::into) {
Ok(()) => {
return Ok(Some(RoomGuard {
data,
_guard,
_not_send: PhantomData,
}));
}
Err(error) => {
if !self.retry_on_serialization_error(&error, i).await {
return Err(error);
}
}
}
}
Ok(None) => match tx.commit().await.map_err(Into::into) {
Ok(()) => return Ok(None),
Err(error) => {
if !self.retry_on_serialization_error(&error, i).await {
return Err(error);
}
}
},
Err(error) => {
tx.rollback().await?;
if !self.retry_on_serialization_error(&error, i).await {
return Err(error);
}
}
}
i += 1;
}
};
self.run(body).await
}
async fn room_transaction<F, Fut, T>(&self, room_id: RoomId, f: F) -> Result<RoomGuard<T>>
where
F: Send + Fn(TransactionHandle) -> Fut,
Fut: Send + Future<Output = Result<T>>,
{
let body = async {
let mut i = 0;
loop {
let lock = self.rooms.entry(room_id).or_default().clone();
let _guard = lock.lock_owned().await;
let (tx, result) = self.with_transaction(&f).await?;
match result {
Ok(data) => match tx.commit().await.map_err(Into::into) {
Ok(()) => {
return Ok(RoomGuard {
data,
_guard,
_not_send: PhantomData,
});
}
Err(error) => {
if !self.retry_on_serialization_error(&error, i).await {
return Err(error);
}
}
},
Err(error) => {
tx.rollback().await?;
if !self.retry_on_serialization_error(&error, i).await {
return Err(error);
}
}
}
i += 1;
}
};
self.run(body).await
}
async fn with_transaction<F, Fut, T>(&self, f: &F) -> Result<(DatabaseTransaction, Result<T>)>
where
F: Send + Fn(TransactionHandle) -> Fut,
Fut: Send + Future<Output = Result<T>>,
{
let tx = self
.pool
.begin_with_config(Some(IsolationLevel::Serializable), None)
.await?;
let mut tx = Arc::new(Some(tx));
let result = f(TransactionHandle(tx.clone())).await;
let Some(tx) = Arc::get_mut(&mut tx).and_then(|tx| tx.take()) else {
return Err(anyhow!(
"couldn't complete transaction because it's still in use"
))?;
};
Ok((tx, result))
}
async fn run<F, T>(&self, future: F) -> Result<T>
where
F: Future<Output = Result<T>>,
{
#[cfg(test)]
{
if let Executor::Deterministic(executor) = &self.executor {
executor.simulate_random_delay().await;
}
self.runtime.as_ref().unwrap().block_on(future)
}
#[cfg(not(test))]
{
future.await
}
}
async fn retry_on_serialization_error(&self, error: &Error, prev_attempt_count: u32) -> bool {
// If the error is due to a failure to serialize concurrent transactions, then retry
// this transaction after a delay. With each subsequent retry, double the delay duration.
// Also vary the delay randomly in order to ensure different database connections retry
// at different times.
if is_serialization_error(error) {
let base_delay = 4_u64 << prev_attempt_count.min(16);
let randomized_delay = base_delay as f32 * self.rng.lock().await.gen_range(0.5..=2.0);
log::info!(
"retrying transaction after serialization error. delay: {} ms.",
randomized_delay
);
self.executor
.sleep(Duration::from_millis(randomized_delay as u64))
.await;
true
} else {
false
}
}
}
fn is_serialization_error(error: &Error) -> bool {
const SERIALIZATION_FAILURE_CODE: &'static str = "40001";
match error {
Error::Database(
DbErr::Exec(sea_orm::RuntimeErr::SqlxError(error))
| DbErr::Query(sea_orm::RuntimeErr::SqlxError(error)),
) if error
.as_database_error()
.and_then(|error| error.code())
.as_deref()
== Some(SERIALIZATION_FAILURE_CODE) =>
{
true
}
_ => false,
}
}
pub struct TransactionHandle(Arc<Option<DatabaseTransaction>>);
impl Deref for TransactionHandle {
type Target = DatabaseTransaction;
fn deref(&self) -> &Self::Target {
self.0.as_ref().as_ref().unwrap()
}
}
pub struct RoomGuard<T> {
data: T,
_guard: OwnedMutexGuard<()>,
_not_send: PhantomData<Rc<()>>,
}
impl<T> Deref for RoomGuard<T> {
type Target = T;
fn deref(&self) -> &T {
&self.data
}
}
impl<T> DerefMut for RoomGuard<T> {
fn deref_mut(&mut self) -> &mut T {
&mut self.data
}
}
impl<T> RoomGuard<T> {
pub fn into_inner(self) -> T {
self.data
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Contact {
Accepted { user_id: UserId, busy: bool },
Outgoing { user_id: UserId },
Incoming { user_id: UserId },
}
impl Contact {
pub fn user_id(&self) -> UserId {
match self {
Contact::Accepted { user_id, .. } => *user_id,
Contact::Outgoing { user_id } => *user_id,
Contact::Incoming { user_id, .. } => *user_id,
}
}
}
pub type NotificationBatch = Vec<(UserId, proto::Notification)>;
pub struct CreatedChannelMessage {
pub message_id: MessageId,
pub participant_connection_ids: Vec<ConnectionId>,
pub channel_members: Vec<UserId>,
pub notifications: NotificationBatch,
}
#[derive(Clone, Debug, PartialEq, Eq, FromQueryResult, Serialize, Deserialize)]
pub struct Invite {
pub email_address: String,
pub email_confirmation_code: String,
}
#[derive(Clone, Debug, Deserialize)]
pub struct NewSignup {
pub email_address: String,
pub platform_mac: bool,
pub platform_windows: bool,
pub platform_linux: bool,
pub editor_features: Vec<String>,
pub programming_languages: Vec<String>,
pub device_id: Option<String>,
pub added_to_mailing_list: bool,
pub created_at: Option<DateTime>,
}
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize, FromQueryResult)]
pub struct WaitlistSummary {
pub count: i64,
pub linux_count: i64,
pub mac_count: i64,
pub windows_count: i64,
pub unknown_count: i64,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct NewUserParams {
pub github_login: String,
pub github_user_id: i32,
}
#[derive(Debug)]
pub struct NewUserResult {
pub user_id: UserId,
pub metrics_id: String,
pub inviting_user_id: Option<UserId>,
pub signup_device_id: Option<String>,
}
#[derive(Debug)]
pub struct MoveChannelResult {
pub participants_to_update: HashMap<UserId, ChannelsForUser>,
pub participants_to_remove: HashSet<UserId>,
pub moved_channels: HashSet<ChannelId>,
}
#[derive(Debug)]
pub struct RenameChannelResult {
pub channel: Channel,
pub participants_to_update: HashMap<UserId, Channel>,
}
#[derive(Debug)]
pub struct CreateChannelResult {
pub channel: Channel,
pub participants_to_update: Vec<(UserId, ChannelsForUser)>,
}
#[derive(Debug)]
pub struct SetChannelVisibilityResult {
pub participants_to_update: HashMap<UserId, ChannelsForUser>,
pub participants_to_remove: HashSet<UserId>,
pub channels_to_remove: Vec<ChannelId>,
}
#[derive(Debug)]
pub struct MembershipUpdated {
pub channel_id: ChannelId,
pub new_channels: ChannelsForUser,
pub removed_channels: Vec<ChannelId>,
}
#[derive(Debug)]
pub enum SetMemberRoleResult {
InviteUpdated(Channel),
MembershipUpdated(MembershipUpdated),
}
#[derive(Debug)]
pub struct InviteMemberResult {
pub channel: Channel,
pub notifications: NotificationBatch,
}
#[derive(Debug)]
pub struct RespondToChannelInvite {
pub membership_update: Option<MembershipUpdated>,
pub notifications: NotificationBatch,
}
#[derive(Debug)]
pub struct RemoveChannelMemberResult {
pub membership_update: MembershipUpdated,
pub notification_id: Option<NotificationId>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct Channel {
pub id: ChannelId,
pub name: String,
pub visibility: ChannelVisibility,
pub role: ChannelRole,
pub parent_path: Vec<ChannelId>,
}
impl Channel {
fn from_model(value: channel::Model, role: ChannelRole) -> Self {
Channel {
id: value.id,
visibility: value.visibility,
name: value.clone().name,
role,
parent_path: value.ancestors().collect(),
}
}
pub fn to_proto(&self) -> proto::Channel {
proto::Channel {
id: self.id.to_proto(),
name: self.name.clone(),
visibility: self.visibility.into(),
role: self.role.into(),
parent_path: self.parent_path.iter().map(|c| c.to_proto()).collect(),
}
}
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ChannelMember {
pub role: ChannelRole,
pub user_id: UserId,
pub kind: proto::channel_member::Kind,
}
impl ChannelMember {
pub fn to_proto(&self) -> proto::ChannelMember {
proto::ChannelMember {
role: self.role.into(),
user_id: self.user_id.to_proto(),
kind: self.kind.into(),
}
}
}
#[derive(Debug, PartialEq)]
pub struct ChannelsForUser {
pub channels: Vec<Channel>,
pub channel_participants: HashMap<ChannelId, Vec<UserId>>,
pub unseen_buffer_changes: Vec<proto::UnseenChannelBufferChange>,
pub channel_messages: Vec<proto::UnseenChannelMessage>,
}
#[derive(Debug)]
pub struct RejoinedChannelBuffer {
pub buffer: proto::RejoinedChannelBuffer,
pub old_connection_id: ConnectionId,
}
#[derive(Clone)]
pub struct JoinRoom {
pub room: proto::Room,
pub channel_id: Option<ChannelId>,
pub channel_members: Vec<UserId>,
}
pub struct RejoinedRoom {
pub room: proto::Room,
pub rejoined_projects: Vec<RejoinedProject>,
pub reshared_projects: Vec<ResharedProject>,
pub channel_id: Option<ChannelId>,
pub channel_members: Vec<UserId>,
}
pub struct ResharedProject {
pub id: ProjectId,
pub old_connection_id: ConnectionId,
pub collaborators: Vec<ProjectCollaborator>,
pub worktrees: Vec<proto::WorktreeMetadata>,
}
pub struct RejoinedProject {
pub id: ProjectId,
pub old_connection_id: ConnectionId,
pub collaborators: Vec<ProjectCollaborator>,
pub worktrees: Vec<RejoinedWorktree>,
pub language_servers: Vec<proto::LanguageServer>,
}
#[derive(Debug)]
pub struct RejoinedWorktree {
pub id: u64,
pub abs_path: String,
pub root_name: String,
pub visible: bool,
pub updated_entries: Vec<proto::Entry>,
pub removed_entries: Vec<u64>,
pub updated_repositories: Vec<proto::RepositoryEntry>,
pub removed_repositories: Vec<u64>,
pub diagnostic_summaries: Vec<proto::DiagnosticSummary>,
pub settings_files: Vec<WorktreeSettingsFile>,
pub scan_id: u64,
pub completed_scan_id: u64,
}
pub struct LeftRoom {
pub room: proto::Room,
pub channel_id: Option<ChannelId>,
pub channel_members: Vec<UserId>,
pub left_projects: HashMap<ProjectId, LeftProject>,
pub canceled_calls_to_user_ids: Vec<UserId>,
pub deleted: bool,
}
pub struct RefreshedRoom {
pub room: proto::Room,
pub channel_id: Option<ChannelId>,
pub channel_members: Vec<UserId>,
pub stale_participant_user_ids: Vec<UserId>,
pub canceled_calls_to_user_ids: Vec<UserId>,
}
pub struct RefreshedChannelBuffer {
pub connection_ids: Vec<ConnectionId>,
pub collaborators: Vec<proto::Collaborator>,
}
pub struct Project {
pub collaborators: Vec<ProjectCollaborator>,
pub worktrees: BTreeMap<u64, Worktree>,
pub language_servers: Vec<proto::LanguageServer>,
}
pub struct ProjectCollaborator {
pub connection_id: ConnectionId,
pub user_id: UserId,
pub replica_id: ReplicaId,
pub is_host: bool,
}
impl ProjectCollaborator {
pub fn to_proto(&self) -> proto::Collaborator {
proto::Collaborator {
peer_id: Some(self.connection_id.into()),
replica_id: self.replica_id.0 as u32,
user_id: self.user_id.to_proto(),
}
}
}
#[derive(Debug)]
pub struct LeftProject {
pub id: ProjectId,
pub host_user_id: UserId,
pub host_connection_id: ConnectionId,
pub connection_ids: Vec<ConnectionId>,
}
pub struct Worktree {
pub id: u64,
pub abs_path: String,
pub root_name: String,
pub visible: bool,
pub entries: Vec<proto::Entry>,
pub repository_entries: BTreeMap<u64, proto::RepositoryEntry>,
pub diagnostic_summaries: Vec<proto::DiagnosticSummary>,
pub settings_files: Vec<WorktreeSettingsFile>,
pub scan_id: u64,
pub completed_scan_id: u64,
}
#[derive(Debug)]
pub struct WorktreeSettingsFile {
pub path: String,
pub content: String,
}

View File

@ -0,0 +1,199 @@
use crate::Result;
use rpc::proto;
use sea_orm::{entity::prelude::*, DbErr};
use serde::{Deserialize, Serialize};
macro_rules! id_type {
($name:ident) => {
#[derive(
Clone,
Copy,
Debug,
Default,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Serialize,
Deserialize,
DeriveValueType,
)]
#[serde(transparent)]
pub struct $name(pub i32);
impl $name {
#[allow(unused)]
pub const MAX: Self = Self(i32::MAX);
#[allow(unused)]
pub fn from_proto(value: u64) -> Self {
Self(value as i32)
}
#[allow(unused)]
pub fn to_proto(self) -> u64 {
self.0 as u64
}
}
impl std::fmt::Display for $name {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
self.0.fmt(f)
}
}
impl sea_orm::TryFromU64 for $name {
fn try_from_u64(n: u64) -> Result<Self, DbErr> {
Ok(Self(n.try_into().map_err(|_| {
DbErr::ConvertFromU64(concat!(
"error converting ",
stringify!($name),
" to u64"
))
})?))
}
}
impl sea_orm::sea_query::Nullable for $name {
fn null() -> Value {
Value::Int(None)
}
}
};
}
id_type!(BufferId);
id_type!(AccessTokenId);
id_type!(ChannelChatParticipantId);
id_type!(ChannelId);
id_type!(ChannelMemberId);
id_type!(MessageId);
id_type!(ContactId);
id_type!(FollowerId);
id_type!(RoomId);
id_type!(RoomParticipantId);
id_type!(ProjectId);
id_type!(ProjectCollaboratorId);
id_type!(ReplicaId);
id_type!(ServerId);
id_type!(SignupId);
id_type!(UserId);
id_type!(ChannelBufferCollaboratorId);
id_type!(FlagId);
id_type!(NotificationId);
id_type!(NotificationKindId);
#[derive(Eq, PartialEq, Copy, Clone, Debug, EnumIter, DeriveActiveEnum, Default, Hash)]
#[sea_orm(rs_type = "String", db_type = "String(None)")]
pub enum ChannelRole {
#[sea_orm(string_value = "admin")]
Admin,
#[sea_orm(string_value = "member")]
#[default]
Member,
#[sea_orm(string_value = "guest")]
Guest,
#[sea_orm(string_value = "banned")]
Banned,
}
impl ChannelRole {
pub fn should_override(&self, other: Self) -> bool {
use ChannelRole::*;
match self {
Admin => matches!(other, Member | Banned | Guest),
Member => matches!(other, Banned | Guest),
Banned => matches!(other, Guest),
Guest => false,
}
}
pub fn max(&self, other: Self) -> Self {
if self.should_override(other) {
*self
} else {
other
}
}
pub fn can_see_all_descendants(&self) -> bool {
use ChannelRole::*;
match self {
Admin | Member => true,
Guest | Banned => false,
}
}
pub fn can_only_see_public_descendants(&self) -> bool {
use ChannelRole::*;
match self {
Guest => true,
Admin | Member | Banned => false,
}
}
}
impl From<proto::ChannelRole> for ChannelRole {
fn from(value: proto::ChannelRole) -> Self {
match value {
proto::ChannelRole::Admin => ChannelRole::Admin,
proto::ChannelRole::Member => ChannelRole::Member,
proto::ChannelRole::Guest => ChannelRole::Guest,
proto::ChannelRole::Banned => ChannelRole::Banned,
}
}
}
impl Into<proto::ChannelRole> for ChannelRole {
fn into(self) -> proto::ChannelRole {
match self {
ChannelRole::Admin => proto::ChannelRole::Admin,
ChannelRole::Member => proto::ChannelRole::Member,
ChannelRole::Guest => proto::ChannelRole::Guest,
ChannelRole::Banned => proto::ChannelRole::Banned,
}
}
}
impl Into<i32> for ChannelRole {
fn into(self) -> i32 {
let proto: proto::ChannelRole = self.into();
proto.into()
}
}
#[derive(Eq, PartialEq, Copy, Clone, Debug, EnumIter, DeriveActiveEnum, Default, Hash)]
#[sea_orm(rs_type = "String", db_type = "String(None)")]
pub enum ChannelVisibility {
#[sea_orm(string_value = "public")]
Public,
#[sea_orm(string_value = "members")]
#[default]
Members,
}
impl From<proto::ChannelVisibility> for ChannelVisibility {
fn from(value: proto::ChannelVisibility) -> Self {
match value {
proto::ChannelVisibility::Public => ChannelVisibility::Public,
proto::ChannelVisibility::Members => ChannelVisibility::Members,
}
}
}
impl Into<proto::ChannelVisibility> for ChannelVisibility {
fn into(self) -> proto::ChannelVisibility {
match self {
ChannelVisibility::Public => proto::ChannelVisibility::Public,
ChannelVisibility::Members => proto::ChannelVisibility::Members,
}
}
}
impl Into<i32> for ChannelVisibility {
fn into(self) -> i32 {
let proto: proto::ChannelVisibility = self.into();
proto.into()
}
}

View File

@ -0,0 +1,12 @@
use super::*;
pub mod access_tokens;
pub mod buffers;
pub mod channels;
pub mod contacts;
pub mod messages;
pub mod notifications;
pub mod projects;
pub mod rooms;
pub mod servers;
pub mod users;

View File

@ -0,0 +1,54 @@
use super::*;
use sea_orm::sea_query::Query;
impl Database {
pub async fn create_access_token(
&self,
user_id: UserId,
access_token_hash: &str,
max_access_token_count: usize,
) -> Result<AccessTokenId> {
self.transaction(|tx| async {
let tx = tx;
let token = access_token::ActiveModel {
user_id: ActiveValue::set(user_id),
hash: ActiveValue::set(access_token_hash.into()),
..Default::default()
}
.insert(&*tx)
.await?;
access_token::Entity::delete_many()
.filter(
access_token::Column::Id.in_subquery(
Query::select()
.column(access_token::Column::Id)
.from(access_token::Entity)
.and_where(access_token::Column::UserId.eq(user_id))
.order_by(access_token::Column::Id, sea_orm::Order::Desc)
.limit(10000)
.offset(max_access_token_count as u64)
.to_owned(),
),
)
.exec(&*tx)
.await?;
Ok(token.id)
})
.await
}
pub async fn get_access_token(
&self,
access_token_id: AccessTokenId,
) -> Result<access_token::Model> {
self.transaction(|tx| async move {
Ok(access_token::Entity::find_by_id(access_token_id)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no such access token"))?)
})
.await
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,353 @@
use super::*;
impl Database {
pub async fn get_contacts(&self, user_id: UserId) -> Result<Vec<Contact>> {
#[derive(Debug, FromQueryResult)]
struct ContactWithUserBusyStatuses {
user_id_a: UserId,
user_id_b: UserId,
a_to_b: bool,
accepted: bool,
user_a_busy: bool,
user_b_busy: bool,
}
self.transaction(|tx| async move {
let user_a_participant = Alias::new("user_a_participant");
let user_b_participant = Alias::new("user_b_participant");
let mut db_contacts = contact::Entity::find()
.column_as(
Expr::col((user_a_participant.clone(), room_participant::Column::Id))
.is_not_null(),
"user_a_busy",
)
.column_as(
Expr::col((user_b_participant.clone(), room_participant::Column::Id))
.is_not_null(),
"user_b_busy",
)
.filter(
contact::Column::UserIdA
.eq(user_id)
.or(contact::Column::UserIdB.eq(user_id)),
)
.join_as(
JoinType::LeftJoin,
contact::Relation::UserARoomParticipant.def(),
user_a_participant,
)
.join_as(
JoinType::LeftJoin,
contact::Relation::UserBRoomParticipant.def(),
user_b_participant,
)
.into_model::<ContactWithUserBusyStatuses>()
.stream(&*tx)
.await?;
let mut contacts = Vec::new();
while let Some(db_contact) = db_contacts.next().await {
let db_contact = db_contact?;
if db_contact.user_id_a == user_id {
if db_contact.accepted {
contacts.push(Contact::Accepted {
user_id: db_contact.user_id_b,
busy: db_contact.user_b_busy,
});
} else if db_contact.a_to_b {
contacts.push(Contact::Outgoing {
user_id: db_contact.user_id_b,
})
} else {
contacts.push(Contact::Incoming {
user_id: db_contact.user_id_b,
});
}
} else if db_contact.accepted {
contacts.push(Contact::Accepted {
user_id: db_contact.user_id_a,
busy: db_contact.user_a_busy,
});
} else if db_contact.a_to_b {
contacts.push(Contact::Incoming {
user_id: db_contact.user_id_a,
});
} else {
contacts.push(Contact::Outgoing {
user_id: db_contact.user_id_a,
});
}
}
contacts.sort_unstable_by_key(|contact| contact.user_id());
Ok(contacts)
})
.await
}
pub async fn is_user_busy(&self, user_id: UserId) -> Result<bool> {
self.transaction(|tx| async move {
let participant = room_participant::Entity::find()
.filter(room_participant::Column::UserId.eq(user_id))
.one(&*tx)
.await?;
Ok(participant.is_some())
})
.await
}
pub async fn has_contact(&self, user_id_1: UserId, user_id_2: UserId) -> Result<bool> {
self.transaction(|tx| async move {
let (id_a, id_b) = if user_id_1 < user_id_2 {
(user_id_1, user_id_2)
} else {
(user_id_2, user_id_1)
};
Ok(contact::Entity::find()
.filter(
contact::Column::UserIdA
.eq(id_a)
.and(contact::Column::UserIdB.eq(id_b))
.and(contact::Column::Accepted.eq(true)),
)
.one(&*tx)
.await?
.is_some())
})
.await
}
pub async fn send_contact_request(
&self,
sender_id: UserId,
receiver_id: UserId,
) -> Result<NotificationBatch> {
self.transaction(|tx| async move {
let (id_a, id_b, a_to_b) = if sender_id < receiver_id {
(sender_id, receiver_id, true)
} else {
(receiver_id, sender_id, false)
};
let rows_affected = contact::Entity::insert(contact::ActiveModel {
user_id_a: ActiveValue::set(id_a),
user_id_b: ActiveValue::set(id_b),
a_to_b: ActiveValue::set(a_to_b),
accepted: ActiveValue::set(false),
should_notify: ActiveValue::set(true),
..Default::default()
})
.on_conflict(
OnConflict::columns([contact::Column::UserIdA, contact::Column::UserIdB])
.values([
(contact::Column::Accepted, true.into()),
(contact::Column::ShouldNotify, false.into()),
])
.action_and_where(
contact::Column::Accepted.eq(false).and(
contact::Column::AToB
.eq(a_to_b)
.and(contact::Column::UserIdA.eq(id_b))
.or(contact::Column::AToB
.ne(a_to_b)
.and(contact::Column::UserIdA.eq(id_a))),
),
)
.to_owned(),
)
.exec_without_returning(&*tx)
.await?;
if rows_affected == 0 {
Err(anyhow!("contact already requested"))?;
}
Ok(self
.create_notification(
receiver_id,
rpc::Notification::ContactRequest {
sender_id: sender_id.to_proto(),
},
true,
&*tx,
)
.await?
.into_iter()
.collect())
})
.await
}
/// Returns a bool indicating whether the removed contact had originally accepted or not
///
/// Deletes the contact identified by the requester and responder ids, and then returns
/// whether the deleted contact had originally accepted or was a pending contact request.
///
/// # Arguments
///
/// * `requester_id` - The user that initiates this request
/// * `responder_id` - The user that will be removed
pub async fn remove_contact(
&self,
requester_id: UserId,
responder_id: UserId,
) -> Result<(bool, Option<NotificationId>)> {
self.transaction(|tx| async move {
let (id_a, id_b) = if responder_id < requester_id {
(responder_id, requester_id)
} else {
(requester_id, responder_id)
};
let contact = contact::Entity::find()
.filter(
contact::Column::UserIdA
.eq(id_a)
.and(contact::Column::UserIdB.eq(id_b)),
)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no such contact"))?;
contact::Entity::delete_by_id(contact.id).exec(&*tx).await?;
let mut deleted_notification_id = None;
if !contact.accepted {
deleted_notification_id = self
.remove_notification(
responder_id,
rpc::Notification::ContactRequest {
sender_id: requester_id.to_proto(),
},
&*tx,
)
.await?;
}
Ok((contact.accepted, deleted_notification_id))
})
.await
}
pub async fn dismiss_contact_notification(
&self,
user_id: UserId,
contact_user_id: UserId,
) -> Result<()> {
self.transaction(|tx| async move {
let (id_a, id_b, a_to_b) = if user_id < contact_user_id {
(user_id, contact_user_id, true)
} else {
(contact_user_id, user_id, false)
};
let result = contact::Entity::update_many()
.set(contact::ActiveModel {
should_notify: ActiveValue::set(false),
..Default::default()
})
.filter(
contact::Column::UserIdA
.eq(id_a)
.and(contact::Column::UserIdB.eq(id_b))
.and(
contact::Column::AToB
.eq(a_to_b)
.and(contact::Column::Accepted.eq(true))
.or(contact::Column::AToB
.ne(a_to_b)
.and(contact::Column::Accepted.eq(false))),
),
)
.exec(&*tx)
.await?;
if result.rows_affected == 0 {
Err(anyhow!("no such contact request"))?
} else {
Ok(())
}
})
.await
}
pub async fn respond_to_contact_request(
&self,
responder_id: UserId,
requester_id: UserId,
accept: bool,
) -> Result<NotificationBatch> {
self.transaction(|tx| async move {
let (id_a, id_b, a_to_b) = if responder_id < requester_id {
(responder_id, requester_id, false)
} else {
(requester_id, responder_id, true)
};
let rows_affected = if accept {
let result = contact::Entity::update_many()
.set(contact::ActiveModel {
accepted: ActiveValue::set(true),
should_notify: ActiveValue::set(true),
..Default::default()
})
.filter(
contact::Column::UserIdA
.eq(id_a)
.and(contact::Column::UserIdB.eq(id_b))
.and(contact::Column::AToB.eq(a_to_b)),
)
.exec(&*tx)
.await?;
result.rows_affected
} else {
let result = contact::Entity::delete_many()
.filter(
contact::Column::UserIdA
.eq(id_a)
.and(contact::Column::UserIdB.eq(id_b))
.and(contact::Column::AToB.eq(a_to_b))
.and(contact::Column::Accepted.eq(false)),
)
.exec(&*tx)
.await?;
result.rows_affected
};
if rows_affected == 0 {
Err(anyhow!("no such contact request"))?
}
let mut notifications = Vec::new();
notifications.extend(
self.mark_notification_as_read_with_response(
responder_id,
&rpc::Notification::ContactRequest {
sender_id: requester_id.to_proto(),
},
accept,
&*tx,
)
.await?,
);
if accept {
notifications.extend(
self.create_notification(
requester_id,
rpc::Notification::ContactRequestAccepted {
responder_id: responder_id.to_proto(),
},
true,
&*tx,
)
.await?,
);
}
Ok(notifications)
})
.await
}
}

View File

@ -0,0 +1,505 @@
use super::*;
use rpc::Notification;
use sea_orm::TryInsertResult;
use time::OffsetDateTime;
impl Database {
pub async fn join_channel_chat(
&self,
channel_id: ChannelId,
connection_id: ConnectionId,
user_id: UserId,
) -> Result<()> {
self.transaction(|tx| async move {
let channel = self.get_channel_internal(channel_id, &*tx).await?;
self.check_user_is_channel_participant(&channel, user_id, &*tx)
.await?;
channel_chat_participant::ActiveModel {
id: ActiveValue::NotSet,
channel_id: ActiveValue::Set(channel_id),
user_id: ActiveValue::Set(user_id),
connection_id: ActiveValue::Set(connection_id.id as i32),
connection_server_id: ActiveValue::Set(ServerId(connection_id.owner_id as i32)),
}
.insert(&*tx)
.await?;
Ok(())
})
.await
}
pub async fn channel_chat_connection_lost(
&self,
connection_id: ConnectionId,
tx: &DatabaseTransaction,
) -> Result<()> {
channel_chat_participant::Entity::delete_many()
.filter(
Condition::all()
.add(
channel_chat_participant::Column::ConnectionServerId
.eq(connection_id.owner_id),
)
.add(channel_chat_participant::Column::ConnectionId.eq(connection_id.id)),
)
.exec(tx)
.await?;
Ok(())
}
pub async fn leave_channel_chat(
&self,
channel_id: ChannelId,
connection_id: ConnectionId,
_user_id: UserId,
) -> Result<()> {
self.transaction(|tx| async move {
channel_chat_participant::Entity::delete_many()
.filter(
Condition::all()
.add(
channel_chat_participant::Column::ConnectionServerId
.eq(connection_id.owner_id),
)
.add(channel_chat_participant::Column::ConnectionId.eq(connection_id.id))
.add(channel_chat_participant::Column::ChannelId.eq(channel_id)),
)
.exec(&*tx)
.await?;
Ok(())
})
.await
}
pub async fn get_channel_messages(
&self,
channel_id: ChannelId,
user_id: UserId,
count: usize,
before_message_id: Option<MessageId>,
) -> Result<Vec<proto::ChannelMessage>> {
self.transaction(|tx| async move {
let channel = self.get_channel_internal(channel_id, &*tx).await?;
self.check_user_is_channel_participant(&channel, user_id, &*tx)
.await?;
let mut condition =
Condition::all().add(channel_message::Column::ChannelId.eq(channel_id));
if let Some(before_message_id) = before_message_id {
condition = condition.add(channel_message::Column::Id.lt(before_message_id));
}
let rows = channel_message::Entity::find()
.filter(condition)
.order_by_desc(channel_message::Column::Id)
.limit(count as u64)
.all(&*tx)
.await?;
self.load_channel_messages(rows, &*tx).await
})
.await
}
pub async fn get_channel_messages_by_id(
&self,
user_id: UserId,
message_ids: &[MessageId],
) -> Result<Vec<proto::ChannelMessage>> {
self.transaction(|tx| async move {
let rows = channel_message::Entity::find()
.filter(channel_message::Column::Id.is_in(message_ids.iter().copied()))
.order_by_desc(channel_message::Column::Id)
.all(&*tx)
.await?;
let mut channels = HashMap::<ChannelId, channel::Model>::default();
for row in &rows {
channels.insert(
row.channel_id,
self.get_channel_internal(row.channel_id, &*tx).await?,
);
}
for (_, channel) in channels {
self.check_user_is_channel_participant(&channel, user_id, &*tx)
.await?;
}
let messages = self.load_channel_messages(rows, &*tx).await?;
Ok(messages)
})
.await
}
async fn load_channel_messages(
&self,
rows: Vec<channel_message::Model>,
tx: &DatabaseTransaction,
) -> Result<Vec<proto::ChannelMessage>> {
let mut messages = rows
.into_iter()
.map(|row| {
let nonce = row.nonce.as_u64_pair();
proto::ChannelMessage {
id: row.id.to_proto(),
sender_id: row.sender_id.to_proto(),
body: row.body,
timestamp: row.sent_at.assume_utc().unix_timestamp() as u64,
mentions: vec![],
nonce: Some(proto::Nonce {
upper_half: nonce.0,
lower_half: nonce.1,
}),
}
})
.collect::<Vec<_>>();
messages.reverse();
let mut mentions = channel_message_mention::Entity::find()
.filter(channel_message_mention::Column::MessageId.is_in(messages.iter().map(|m| m.id)))
.order_by_asc(channel_message_mention::Column::MessageId)
.order_by_asc(channel_message_mention::Column::StartOffset)
.stream(&*tx)
.await?;
let mut message_ix = 0;
while let Some(mention) = mentions.next().await {
let mention = mention?;
let message_id = mention.message_id.to_proto();
while let Some(message) = messages.get_mut(message_ix) {
if message.id < message_id {
message_ix += 1;
} else {
if message.id == message_id {
message.mentions.push(proto::ChatMention {
range: Some(proto::Range {
start: mention.start_offset as u64,
end: mention.end_offset as u64,
}),
user_id: mention.user_id.to_proto(),
});
}
break;
}
}
}
Ok(messages)
}
pub async fn create_channel_message(
&self,
channel_id: ChannelId,
user_id: UserId,
body: &str,
mentions: &[proto::ChatMention],
timestamp: OffsetDateTime,
nonce: u128,
) -> Result<CreatedChannelMessage> {
self.transaction(|tx| async move {
let channel = self.get_channel_internal(channel_id, &*tx).await?;
self.check_user_is_channel_participant(&channel, user_id, &*tx)
.await?;
let mut rows = channel_chat_participant::Entity::find()
.filter(channel_chat_participant::Column::ChannelId.eq(channel_id))
.stream(&*tx)
.await?;
let mut is_participant = false;
let mut participant_connection_ids = Vec::new();
let mut participant_user_ids = Vec::new();
while let Some(row) = rows.next().await {
let row = row?;
if row.user_id == user_id {
is_participant = true;
}
participant_user_ids.push(row.user_id);
participant_connection_ids.push(row.connection());
}
drop(rows);
if !is_participant {
Err(anyhow!("not a chat participant"))?;
}
let timestamp = timestamp.to_offset(time::UtcOffset::UTC);
let timestamp = time::PrimitiveDateTime::new(timestamp.date(), timestamp.time());
let result = channel_message::Entity::insert(channel_message::ActiveModel {
channel_id: ActiveValue::Set(channel_id),
sender_id: ActiveValue::Set(user_id),
body: ActiveValue::Set(body.to_string()),
sent_at: ActiveValue::Set(timestamp),
nonce: ActiveValue::Set(Uuid::from_u128(nonce)),
id: ActiveValue::NotSet,
})
.on_conflict(
OnConflict::columns([
channel_message::Column::SenderId,
channel_message::Column::Nonce,
])
.do_nothing()
.to_owned(),
)
.do_nothing()
.exec(&*tx)
.await?;
let message_id;
let mut notifications = Vec::new();
match result {
TryInsertResult::Inserted(result) => {
message_id = result.last_insert_id;
let mentioned_user_ids =
mentions.iter().map(|m| m.user_id).collect::<HashSet<_>>();
let mentions = mentions
.iter()
.filter_map(|mention| {
let range = mention.range.as_ref()?;
if !body.is_char_boundary(range.start as usize)
|| !body.is_char_boundary(range.end as usize)
{
return None;
}
Some(channel_message_mention::ActiveModel {
message_id: ActiveValue::Set(message_id),
start_offset: ActiveValue::Set(range.start as i32),
end_offset: ActiveValue::Set(range.end as i32),
user_id: ActiveValue::Set(UserId::from_proto(mention.user_id)),
})
})
.collect::<Vec<_>>();
if !mentions.is_empty() {
channel_message_mention::Entity::insert_many(mentions)
.exec(&*tx)
.await?;
}
for mentioned_user in mentioned_user_ids {
notifications.extend(
self.create_notification(
UserId::from_proto(mentioned_user),
rpc::Notification::ChannelMessageMention {
message_id: message_id.to_proto(),
sender_id: user_id.to_proto(),
channel_id: channel_id.to_proto(),
},
false,
&*tx,
)
.await?,
);
}
self.observe_channel_message_internal(channel_id, user_id, message_id, &*tx)
.await?;
}
_ => {
message_id = channel_message::Entity::find()
.filter(channel_message::Column::Nonce.eq(Uuid::from_u128(nonce)))
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("failed to insert message"))?
.id;
}
}
let mut channel_members = self.get_channel_participants(&channel, &*tx).await?;
channel_members.retain(|member| !participant_user_ids.contains(member));
Ok(CreatedChannelMessage {
message_id,
participant_connection_ids,
channel_members,
notifications,
})
})
.await
}
pub async fn observe_channel_message(
&self,
channel_id: ChannelId,
user_id: UserId,
message_id: MessageId,
) -> Result<NotificationBatch> {
self.transaction(|tx| async move {
self.observe_channel_message_internal(channel_id, user_id, message_id, &*tx)
.await?;
let mut batch = NotificationBatch::default();
batch.extend(
self.mark_notification_as_read(
user_id,
&Notification::ChannelMessageMention {
message_id: message_id.to_proto(),
sender_id: Default::default(),
channel_id: Default::default(),
},
&*tx,
)
.await?,
);
Ok(batch)
})
.await
}
async fn observe_channel_message_internal(
&self,
channel_id: ChannelId,
user_id: UserId,
message_id: MessageId,
tx: &DatabaseTransaction,
) -> Result<()> {
observed_channel_messages::Entity::insert(observed_channel_messages::ActiveModel {
user_id: ActiveValue::Set(user_id),
channel_id: ActiveValue::Set(channel_id),
channel_message_id: ActiveValue::Set(message_id),
})
.on_conflict(
OnConflict::columns([
observed_channel_messages::Column::ChannelId,
observed_channel_messages::Column::UserId,
])
.update_column(observed_channel_messages::Column::ChannelMessageId)
.action_cond_where(observed_channel_messages::Column::ChannelMessageId.lt(message_id))
.to_owned(),
)
// TODO: Try to upgrade SeaORM so we don't have to do this hack around their bug
.exec_without_returning(&*tx)
.await?;
Ok(())
}
pub async fn unseen_channel_messages(
&self,
user_id: UserId,
channel_ids: &[ChannelId],
tx: &DatabaseTransaction,
) -> Result<Vec<proto::UnseenChannelMessage>> {
let mut observed_messages_by_channel_id = HashMap::default();
let mut rows = observed_channel_messages::Entity::find()
.filter(observed_channel_messages::Column::UserId.eq(user_id))
.filter(observed_channel_messages::Column::ChannelId.is_in(channel_ids.iter().copied()))
.stream(&*tx)
.await?;
while let Some(row) = rows.next().await {
let row = row?;
observed_messages_by_channel_id.insert(row.channel_id, row);
}
drop(rows);
let mut values = String::new();
for id in channel_ids {
if !values.is_empty() {
values.push_str(", ");
}
write!(&mut values, "({})", id).unwrap();
}
if values.is_empty() {
return Ok(Default::default());
}
let sql = format!(
r#"
SELECT
*
FROM (
SELECT
*,
row_number() OVER (
PARTITION BY channel_id
ORDER BY id DESC
) as row_number
FROM channel_messages
WHERE
channel_id in ({values})
) AS messages
WHERE
row_number = 1
"#,
);
let stmt = Statement::from_string(self.pool.get_database_backend(), sql);
let last_messages = channel_message::Model::find_by_statement(stmt)
.all(&*tx)
.await?;
let mut changes = Vec::new();
for last_message in last_messages {
if let Some(observed_message) =
observed_messages_by_channel_id.get(&last_message.channel_id)
{
if observed_message.channel_message_id == last_message.id {
continue;
}
}
changes.push(proto::UnseenChannelMessage {
channel_id: last_message.channel_id.to_proto(),
message_id: last_message.id.to_proto(),
});
}
Ok(changes)
}
pub async fn remove_channel_message(
&self,
channel_id: ChannelId,
message_id: MessageId,
user_id: UserId,
) -> Result<Vec<ConnectionId>> {
self.transaction(|tx| async move {
let mut rows = channel_chat_participant::Entity::find()
.filter(channel_chat_participant::Column::ChannelId.eq(channel_id))
.stream(&*tx)
.await?;
let mut is_participant = false;
let mut participant_connection_ids = Vec::new();
while let Some(row) = rows.next().await {
let row = row?;
if row.user_id == user_id {
is_participant = true;
}
participant_connection_ids.push(row.connection());
}
drop(rows);
if !is_participant {
Err(anyhow!("not a chat participant"))?;
}
let result = channel_message::Entity::delete_by_id(message_id)
.filter(channel_message::Column::SenderId.eq(user_id))
.exec(&*tx)
.await?;
if result.rows_affected == 0 {
let channel = self.get_channel_internal(channel_id, &*tx).await?;
if self
.check_user_is_channel_admin(&channel, user_id, &*tx)
.await
.is_ok()
{
let result = channel_message::Entity::delete_by_id(message_id)
.exec(&*tx)
.await?;
if result.rows_affected == 0 {
Err(anyhow!("no such message"))?;
}
} else {
Err(anyhow!("operation could not be completed"))?;
}
}
Ok(participant_connection_ids)
})
.await
}
}

View File

@ -0,0 +1,262 @@
use super::*;
use rpc::Notification;
impl Database {
pub async fn initialize_notification_kinds(&mut self) -> Result<()> {
notification_kind::Entity::insert_many(Notification::all_variant_names().iter().map(
|kind| notification_kind::ActiveModel {
name: ActiveValue::Set(kind.to_string()),
..Default::default()
},
))
.on_conflict(OnConflict::new().do_nothing().to_owned())
.exec_without_returning(&self.pool)
.await?;
let mut rows = notification_kind::Entity::find().stream(&self.pool).await?;
while let Some(row) = rows.next().await {
let row = row?;
self.notification_kinds_by_name.insert(row.name, row.id);
}
for name in Notification::all_variant_names() {
if let Some(id) = self.notification_kinds_by_name.get(*name).copied() {
self.notification_kinds_by_id.insert(id, name);
}
}
Ok(())
}
pub async fn get_notifications(
&self,
recipient_id: UserId,
limit: usize,
before_id: Option<NotificationId>,
) -> Result<Vec<proto::Notification>> {
self.transaction(|tx| async move {
let mut result = Vec::new();
let mut condition =
Condition::all().add(notification::Column::RecipientId.eq(recipient_id));
if let Some(before_id) = before_id {
condition = condition.add(notification::Column::Id.lt(before_id));
}
let mut rows = notification::Entity::find()
.filter(condition)
.order_by_desc(notification::Column::Id)
.limit(limit as u64)
.stream(&*tx)
.await?;
while let Some(row) = rows.next().await {
let row = row?;
let kind = row.kind;
if let Some(proto) = model_to_proto(self, row) {
result.push(proto);
} else {
log::warn!("unknown notification kind {:?}", kind);
}
}
result.reverse();
Ok(result)
})
.await
}
/// Create a notification. If `avoid_duplicates` is set to true, then avoid
/// creating a new notification if the given recipient already has an
/// unread notification with the given kind and entity id.
pub async fn create_notification(
&self,
recipient_id: UserId,
notification: Notification,
avoid_duplicates: bool,
tx: &DatabaseTransaction,
) -> Result<Option<(UserId, proto::Notification)>> {
if avoid_duplicates {
if self
.find_notification(recipient_id, &notification, tx)
.await?
.is_some()
{
return Ok(None);
}
}
let proto = notification.to_proto();
let kind = notification_kind_from_proto(self, &proto)?;
let model = notification::ActiveModel {
recipient_id: ActiveValue::Set(recipient_id),
kind: ActiveValue::Set(kind),
entity_id: ActiveValue::Set(proto.entity_id.map(|id| id as i32)),
content: ActiveValue::Set(proto.content.clone()),
..Default::default()
}
.save(&*tx)
.await?;
Ok(Some((
recipient_id,
proto::Notification {
id: model.id.as_ref().to_proto(),
kind: proto.kind,
timestamp: model.created_at.as_ref().assume_utc().unix_timestamp() as u64,
is_read: false,
response: None,
content: proto.content,
entity_id: proto.entity_id,
},
)))
}
/// Remove an unread notification with the given recipient, kind and
/// entity id.
pub async fn remove_notification(
&self,
recipient_id: UserId,
notification: Notification,
tx: &DatabaseTransaction,
) -> Result<Option<NotificationId>> {
let id = self
.find_notification(recipient_id, &notification, tx)
.await?;
if let Some(id) = id {
notification::Entity::delete_by_id(id).exec(tx).await?;
}
Ok(id)
}
/// Populate the response for the notification with the given kind and
/// entity id.
pub async fn mark_notification_as_read_with_response(
&self,
recipient_id: UserId,
notification: &Notification,
response: bool,
tx: &DatabaseTransaction,
) -> Result<Option<(UserId, proto::Notification)>> {
self.mark_notification_as_read_internal(recipient_id, notification, Some(response), tx)
.await
}
pub async fn mark_notification_as_read(
&self,
recipient_id: UserId,
notification: &Notification,
tx: &DatabaseTransaction,
) -> Result<Option<(UserId, proto::Notification)>> {
self.mark_notification_as_read_internal(recipient_id, notification, None, tx)
.await
}
pub async fn mark_notification_as_read_by_id(
&self,
recipient_id: UserId,
notification_id: NotificationId,
) -> Result<NotificationBatch> {
self.transaction(|tx| async move {
let row = notification::Entity::update(notification::ActiveModel {
id: ActiveValue::Unchanged(notification_id),
recipient_id: ActiveValue::Unchanged(recipient_id),
is_read: ActiveValue::Set(true),
..Default::default()
})
.exec(&*tx)
.await?;
Ok(model_to_proto(self, row)
.map(|notification| (recipient_id, notification))
.into_iter()
.collect())
})
.await
}
async fn mark_notification_as_read_internal(
&self,
recipient_id: UserId,
notification: &Notification,
response: Option<bool>,
tx: &DatabaseTransaction,
) -> Result<Option<(UserId, proto::Notification)>> {
if let Some(id) = self
.find_notification(recipient_id, notification, &*tx)
.await?
{
let row = notification::Entity::update(notification::ActiveModel {
id: ActiveValue::Unchanged(id),
recipient_id: ActiveValue::Unchanged(recipient_id),
is_read: ActiveValue::Set(true),
response: if let Some(response) = response {
ActiveValue::Set(Some(response))
} else {
ActiveValue::NotSet
},
..Default::default()
})
.exec(tx)
.await?;
Ok(model_to_proto(self, row).map(|notification| (recipient_id, notification)))
} else {
Ok(None)
}
}
/// Find an unread notification by its recipient, kind and entity id.
async fn find_notification(
&self,
recipient_id: UserId,
notification: &Notification,
tx: &DatabaseTransaction,
) -> Result<Option<NotificationId>> {
let proto = notification.to_proto();
let kind = notification_kind_from_proto(self, &proto)?;
#[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)]
enum QueryIds {
Id,
}
Ok(notification::Entity::find()
.select_only()
.column(notification::Column::Id)
.filter(
Condition::all()
.add(notification::Column::RecipientId.eq(recipient_id))
.add(notification::Column::IsRead.eq(false))
.add(notification::Column::Kind.eq(kind))
.add(if proto.entity_id.is_some() {
notification::Column::EntityId.eq(proto.entity_id)
} else {
notification::Column::EntityId.is_null()
}),
)
.into_values::<_, QueryIds>()
.one(&*tx)
.await?)
}
}
fn model_to_proto(this: &Database, row: notification::Model) -> Option<proto::Notification> {
let kind = this.notification_kinds_by_id.get(&row.kind)?;
Some(proto::Notification {
id: row.id.to_proto(),
kind: kind.to_string(),
timestamp: row.created_at.assume_utc().unix_timestamp() as u64,
is_read: row.is_read,
response: row.response,
content: row.content,
entity_id: row.entity_id.map(|id| id as u64),
})
}
fn notification_kind_from_proto(
this: &Database,
proto: &proto::Notification,
) -> Result<NotificationKindId> {
Ok(this
.notification_kinds_by_name
.get(&proto.kind)
.copied()
.ok_or_else(|| anyhow!("invalid notification kind {:?}", proto.kind))?)
}

View File

@ -0,0 +1,960 @@
use super::*;
impl Database {
pub async fn project_count_excluding_admins(&self) -> Result<usize> {
#[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)]
enum QueryAs {
Count,
}
self.transaction(|tx| async move {
Ok(project::Entity::find()
.select_only()
.column_as(project::Column::Id.count(), QueryAs::Count)
.inner_join(user::Entity)
.filter(user::Column::Admin.eq(false))
.into_values::<_, QueryAs>()
.one(&*tx)
.await?
.unwrap_or(0i64) as usize)
})
.await
}
pub async fn share_project(
&self,
room_id: RoomId,
connection: ConnectionId,
worktrees: &[proto::WorktreeMetadata],
) -> Result<RoomGuard<(ProjectId, proto::Room)>> {
self.room_transaction(room_id, |tx| async move {
let participant = room_participant::Entity::find()
.filter(
Condition::all()
.add(
room_participant::Column::AnsweringConnectionId
.eq(connection.id as i32),
)
.add(
room_participant::Column::AnsweringConnectionServerId
.eq(connection.owner_id as i32),
),
)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("could not find participant"))?;
if participant.room_id != room_id {
return Err(anyhow!("shared project on unexpected room"))?;
}
let project = project::ActiveModel {
room_id: ActiveValue::set(participant.room_id),
host_user_id: ActiveValue::set(participant.user_id),
host_connection_id: ActiveValue::set(Some(connection.id as i32)),
host_connection_server_id: ActiveValue::set(Some(ServerId(
connection.owner_id as i32,
))),
..Default::default()
}
.insert(&*tx)
.await?;
if !worktrees.is_empty() {
worktree::Entity::insert_many(worktrees.iter().map(|worktree| {
worktree::ActiveModel {
id: ActiveValue::set(worktree.id as i64),
project_id: ActiveValue::set(project.id),
abs_path: ActiveValue::set(worktree.abs_path.clone()),
root_name: ActiveValue::set(worktree.root_name.clone()),
visible: ActiveValue::set(worktree.visible),
scan_id: ActiveValue::set(0),
completed_scan_id: ActiveValue::set(0),
}
}))
.exec(&*tx)
.await?;
}
project_collaborator::ActiveModel {
project_id: ActiveValue::set(project.id),
connection_id: ActiveValue::set(connection.id as i32),
connection_server_id: ActiveValue::set(ServerId(connection.owner_id as i32)),
user_id: ActiveValue::set(participant.user_id),
replica_id: ActiveValue::set(ReplicaId(0)),
is_host: ActiveValue::set(true),
..Default::default()
}
.insert(&*tx)
.await?;
let room = self.get_room(room_id, &tx).await?;
Ok((project.id, room))
})
.await
}
pub async fn unshare_project(
&self,
project_id: ProjectId,
connection: ConnectionId,
) -> Result<RoomGuard<(proto::Room, Vec<ConnectionId>)>> {
let room_id = self.room_id_for_project(project_id).await?;
self.room_transaction(room_id, |tx| async move {
let guest_connection_ids = self.project_guest_connection_ids(project_id, &tx).await?;
let project = project::Entity::find_by_id(project_id)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("project not found"))?;
if project.host_connection()? == connection {
project::Entity::delete(project.into_active_model())
.exec(&*tx)
.await?;
let room = self.get_room(room_id, &tx).await?;
Ok((room, guest_connection_ids))
} else {
Err(anyhow!("cannot unshare a project hosted by another user"))?
}
})
.await
}
pub async fn update_project(
&self,
project_id: ProjectId,
connection: ConnectionId,
worktrees: &[proto::WorktreeMetadata],
) -> Result<RoomGuard<(proto::Room, Vec<ConnectionId>)>> {
let room_id = self.room_id_for_project(project_id).await?;
self.room_transaction(room_id, |tx| async move {
let project = project::Entity::find_by_id(project_id)
.filter(
Condition::all()
.add(project::Column::HostConnectionId.eq(connection.id as i32))
.add(
project::Column::HostConnectionServerId.eq(connection.owner_id as i32),
),
)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no such project"))?;
self.update_project_worktrees(project.id, worktrees, &tx)
.await?;
let guest_connection_ids = self.project_guest_connection_ids(project.id, &tx).await?;
let room = self.get_room(project.room_id, &tx).await?;
Ok((room, guest_connection_ids))
})
.await
}
pub(in crate::db) async fn update_project_worktrees(
&self,
project_id: ProjectId,
worktrees: &[proto::WorktreeMetadata],
tx: &DatabaseTransaction,
) -> Result<()> {
if !worktrees.is_empty() {
worktree::Entity::insert_many(worktrees.iter().map(|worktree| worktree::ActiveModel {
id: ActiveValue::set(worktree.id as i64),
project_id: ActiveValue::set(project_id),
abs_path: ActiveValue::set(worktree.abs_path.clone()),
root_name: ActiveValue::set(worktree.root_name.clone()),
visible: ActiveValue::set(worktree.visible),
scan_id: ActiveValue::set(0),
completed_scan_id: ActiveValue::set(0),
}))
.on_conflict(
OnConflict::columns([worktree::Column::ProjectId, worktree::Column::Id])
.update_column(worktree::Column::RootName)
.to_owned(),
)
.exec(&*tx)
.await?;
}
worktree::Entity::delete_many()
.filter(worktree::Column::ProjectId.eq(project_id).and(
worktree::Column::Id.is_not_in(worktrees.iter().map(|worktree| worktree.id as i64)),
))
.exec(&*tx)
.await?;
Ok(())
}
pub async fn update_worktree(
&self,
update: &proto::UpdateWorktree,
connection: ConnectionId,
) -> Result<RoomGuard<Vec<ConnectionId>>> {
let project_id = ProjectId::from_proto(update.project_id);
let worktree_id = update.worktree_id as i64;
let room_id = self.room_id_for_project(project_id).await?;
self.room_transaction(room_id, |tx| async move {
// Ensure the update comes from the host.
let _project = project::Entity::find_by_id(project_id)
.filter(
Condition::all()
.add(project::Column::HostConnectionId.eq(connection.id as i32))
.add(
project::Column::HostConnectionServerId.eq(connection.owner_id as i32),
),
)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no such project"))?;
// Update metadata.
worktree::Entity::update(worktree::ActiveModel {
id: ActiveValue::set(worktree_id),
project_id: ActiveValue::set(project_id),
root_name: ActiveValue::set(update.root_name.clone()),
scan_id: ActiveValue::set(update.scan_id as i64),
completed_scan_id: if update.is_last_update {
ActiveValue::set(update.scan_id as i64)
} else {
ActiveValue::default()
},
abs_path: ActiveValue::set(update.abs_path.clone()),
..Default::default()
})
.exec(&*tx)
.await?;
if !update.updated_entries.is_empty() {
worktree_entry::Entity::insert_many(update.updated_entries.iter().map(|entry| {
let mtime = entry.mtime.clone().unwrap_or_default();
worktree_entry::ActiveModel {
project_id: ActiveValue::set(project_id),
worktree_id: ActiveValue::set(worktree_id),
id: ActiveValue::set(entry.id as i64),
is_dir: ActiveValue::set(entry.is_dir),
path: ActiveValue::set(entry.path.clone()),
inode: ActiveValue::set(entry.inode as i64),
mtime_seconds: ActiveValue::set(mtime.seconds as i64),
mtime_nanos: ActiveValue::set(mtime.nanos as i32),
is_symlink: ActiveValue::set(entry.is_symlink),
is_ignored: ActiveValue::set(entry.is_ignored),
is_external: ActiveValue::set(entry.is_external),
git_status: ActiveValue::set(entry.git_status.map(|status| status as i64)),
is_deleted: ActiveValue::set(false),
scan_id: ActiveValue::set(update.scan_id as i64),
}
}))
.on_conflict(
OnConflict::columns([
worktree_entry::Column::ProjectId,
worktree_entry::Column::WorktreeId,
worktree_entry::Column::Id,
])
.update_columns([
worktree_entry::Column::IsDir,
worktree_entry::Column::Path,
worktree_entry::Column::Inode,
worktree_entry::Column::MtimeSeconds,
worktree_entry::Column::MtimeNanos,
worktree_entry::Column::IsSymlink,
worktree_entry::Column::IsIgnored,
worktree_entry::Column::GitStatus,
worktree_entry::Column::ScanId,
])
.to_owned(),
)
.exec(&*tx)
.await?;
}
if !update.removed_entries.is_empty() {
worktree_entry::Entity::update_many()
.filter(
worktree_entry::Column::ProjectId
.eq(project_id)
.and(worktree_entry::Column::WorktreeId.eq(worktree_id))
.and(
worktree_entry::Column::Id
.is_in(update.removed_entries.iter().map(|id| *id as i64)),
),
)
.set(worktree_entry::ActiveModel {
is_deleted: ActiveValue::Set(true),
scan_id: ActiveValue::Set(update.scan_id as i64),
..Default::default()
})
.exec(&*tx)
.await?;
}
if !update.updated_repositories.is_empty() {
worktree_repository::Entity::insert_many(update.updated_repositories.iter().map(
|repository| worktree_repository::ActiveModel {
project_id: ActiveValue::set(project_id),
worktree_id: ActiveValue::set(worktree_id),
work_directory_id: ActiveValue::set(repository.work_directory_id as i64),
scan_id: ActiveValue::set(update.scan_id as i64),
branch: ActiveValue::set(repository.branch.clone()),
is_deleted: ActiveValue::set(false),
},
))
.on_conflict(
OnConflict::columns([
worktree_repository::Column::ProjectId,
worktree_repository::Column::WorktreeId,
worktree_repository::Column::WorkDirectoryId,
])
.update_columns([
worktree_repository::Column::ScanId,
worktree_repository::Column::Branch,
])
.to_owned(),
)
.exec(&*tx)
.await?;
}
if !update.removed_repositories.is_empty() {
worktree_repository::Entity::update_many()
.filter(
worktree_repository::Column::ProjectId
.eq(project_id)
.and(worktree_repository::Column::WorktreeId.eq(worktree_id))
.and(
worktree_repository::Column::WorkDirectoryId
.is_in(update.removed_repositories.iter().map(|id| *id as i64)),
),
)
.set(worktree_repository::ActiveModel {
is_deleted: ActiveValue::Set(true),
scan_id: ActiveValue::Set(update.scan_id as i64),
..Default::default()
})
.exec(&*tx)
.await?;
}
let connection_ids = self.project_guest_connection_ids(project_id, &tx).await?;
Ok(connection_ids)
})
.await
}
pub async fn update_diagnostic_summary(
&self,
update: &proto::UpdateDiagnosticSummary,
connection: ConnectionId,
) -> Result<RoomGuard<Vec<ConnectionId>>> {
let project_id = ProjectId::from_proto(update.project_id);
let worktree_id = update.worktree_id as i64;
let room_id = self.room_id_for_project(project_id).await?;
self.room_transaction(room_id, |tx| async move {
let summary = update
.summary
.as_ref()
.ok_or_else(|| anyhow!("invalid summary"))?;
// Ensure the update comes from the host.
let project = project::Entity::find_by_id(project_id)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no such project"))?;
if project.host_connection()? != connection {
return Err(anyhow!("can't update a project hosted by someone else"))?;
}
// Update summary.
worktree_diagnostic_summary::Entity::insert(worktree_diagnostic_summary::ActiveModel {
project_id: ActiveValue::set(project_id),
worktree_id: ActiveValue::set(worktree_id),
path: ActiveValue::set(summary.path.clone()),
language_server_id: ActiveValue::set(summary.language_server_id as i64),
error_count: ActiveValue::set(summary.error_count as i32),
warning_count: ActiveValue::set(summary.warning_count as i32),
..Default::default()
})
.on_conflict(
OnConflict::columns([
worktree_diagnostic_summary::Column::ProjectId,
worktree_diagnostic_summary::Column::WorktreeId,
worktree_diagnostic_summary::Column::Path,
])
.update_columns([
worktree_diagnostic_summary::Column::LanguageServerId,
worktree_diagnostic_summary::Column::ErrorCount,
worktree_diagnostic_summary::Column::WarningCount,
])
.to_owned(),
)
.exec(&*tx)
.await?;
let connection_ids = self.project_guest_connection_ids(project_id, &tx).await?;
Ok(connection_ids)
})
.await
}
pub async fn start_language_server(
&self,
update: &proto::StartLanguageServer,
connection: ConnectionId,
) -> Result<RoomGuard<Vec<ConnectionId>>> {
let project_id = ProjectId::from_proto(update.project_id);
let room_id = self.room_id_for_project(project_id).await?;
self.room_transaction(room_id, |tx| async move {
let server = update
.server
.as_ref()
.ok_or_else(|| anyhow!("invalid language server"))?;
// Ensure the update comes from the host.
let project = project::Entity::find_by_id(project_id)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no such project"))?;
if project.host_connection()? != connection {
return Err(anyhow!("can't update a project hosted by someone else"))?;
}
// Add the newly-started language server.
language_server::Entity::insert(language_server::ActiveModel {
project_id: ActiveValue::set(project_id),
id: ActiveValue::set(server.id as i64),
name: ActiveValue::set(server.name.clone()),
..Default::default()
})
.on_conflict(
OnConflict::columns([
language_server::Column::ProjectId,
language_server::Column::Id,
])
.update_column(language_server::Column::Name)
.to_owned(),
)
.exec(&*tx)
.await?;
let connection_ids = self.project_guest_connection_ids(project_id, &tx).await?;
Ok(connection_ids)
})
.await
}
pub async fn update_worktree_settings(
&self,
update: &proto::UpdateWorktreeSettings,
connection: ConnectionId,
) -> Result<RoomGuard<Vec<ConnectionId>>> {
let project_id = ProjectId::from_proto(update.project_id);
let room_id = self.room_id_for_project(project_id).await?;
self.room_transaction(room_id, |tx| async move {
// Ensure the update comes from the host.
let project = project::Entity::find_by_id(project_id)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no such project"))?;
if project.host_connection()? != connection {
return Err(anyhow!("can't update a project hosted by someone else"))?;
}
if let Some(content) = &update.content {
worktree_settings_file::Entity::insert(worktree_settings_file::ActiveModel {
project_id: ActiveValue::Set(project_id),
worktree_id: ActiveValue::Set(update.worktree_id as i64),
path: ActiveValue::Set(update.path.clone()),
content: ActiveValue::Set(content.clone()),
})
.on_conflict(
OnConflict::columns([
worktree_settings_file::Column::ProjectId,
worktree_settings_file::Column::WorktreeId,
worktree_settings_file::Column::Path,
])
.update_column(worktree_settings_file::Column::Content)
.to_owned(),
)
.exec(&*tx)
.await?;
} else {
worktree_settings_file::Entity::delete(worktree_settings_file::ActiveModel {
project_id: ActiveValue::Set(project_id),
worktree_id: ActiveValue::Set(update.worktree_id as i64),
path: ActiveValue::Set(update.path.clone()),
..Default::default()
})
.exec(&*tx)
.await?;
}
let connection_ids = self.project_guest_connection_ids(project_id, &tx).await?;
Ok(connection_ids)
})
.await
}
pub async fn join_project(
&self,
project_id: ProjectId,
connection: ConnectionId,
) -> Result<RoomGuard<(Project, ReplicaId)>> {
let room_id = self.room_id_for_project(project_id).await?;
self.room_transaction(room_id, |tx| async move {
let participant = room_participant::Entity::find()
.filter(
Condition::all()
.add(
room_participant::Column::AnsweringConnectionId
.eq(connection.id as i32),
)
.add(
room_participant::Column::AnsweringConnectionServerId
.eq(connection.owner_id as i32),
),
)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("must join a room first"))?;
let project = project::Entity::find_by_id(project_id)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no such project"))?;
if project.room_id != participant.room_id {
return Err(anyhow!("no such project"))?;
}
let mut collaborators = project
.find_related(project_collaborator::Entity)
.all(&*tx)
.await?;
let replica_ids = collaborators
.iter()
.map(|c| c.replica_id)
.collect::<HashSet<_>>();
let mut replica_id = ReplicaId(1);
while replica_ids.contains(&replica_id) {
replica_id.0 += 1;
}
let new_collaborator = project_collaborator::ActiveModel {
project_id: ActiveValue::set(project_id),
connection_id: ActiveValue::set(connection.id as i32),
connection_server_id: ActiveValue::set(ServerId(connection.owner_id as i32)),
user_id: ActiveValue::set(participant.user_id),
replica_id: ActiveValue::set(replica_id),
is_host: ActiveValue::set(false),
..Default::default()
}
.insert(&*tx)
.await?;
collaborators.push(new_collaborator);
let db_worktrees = project.find_related(worktree::Entity).all(&*tx).await?;
let mut worktrees = db_worktrees
.into_iter()
.map(|db_worktree| {
(
db_worktree.id as u64,
Worktree {
id: db_worktree.id as u64,
abs_path: db_worktree.abs_path,
root_name: db_worktree.root_name,
visible: db_worktree.visible,
entries: Default::default(),
repository_entries: Default::default(),
diagnostic_summaries: Default::default(),
settings_files: Default::default(),
scan_id: db_worktree.scan_id as u64,
completed_scan_id: db_worktree.completed_scan_id as u64,
},
)
})
.collect::<BTreeMap<_, _>>();
// Populate worktree entries.
{
let mut db_entries = worktree_entry::Entity::find()
.filter(
Condition::all()
.add(worktree_entry::Column::ProjectId.eq(project_id))
.add(worktree_entry::Column::IsDeleted.eq(false)),
)
.stream(&*tx)
.await?;
while let Some(db_entry) = db_entries.next().await {
let db_entry = db_entry?;
if let Some(worktree) = worktrees.get_mut(&(db_entry.worktree_id as u64)) {
worktree.entries.push(proto::Entry {
id: db_entry.id as u64,
is_dir: db_entry.is_dir,
path: db_entry.path,
inode: db_entry.inode as u64,
mtime: Some(proto::Timestamp {
seconds: db_entry.mtime_seconds as u64,
nanos: db_entry.mtime_nanos as u32,
}),
is_symlink: db_entry.is_symlink,
is_ignored: db_entry.is_ignored,
is_external: db_entry.is_external,
git_status: db_entry.git_status.map(|status| status as i32),
});
}
}
}
// Populate repository entries.
{
let mut db_repository_entries = worktree_repository::Entity::find()
.filter(
Condition::all()
.add(worktree_repository::Column::ProjectId.eq(project_id))
.add(worktree_repository::Column::IsDeleted.eq(false)),
)
.stream(&*tx)
.await?;
while let Some(db_repository_entry) = db_repository_entries.next().await {
let db_repository_entry = db_repository_entry?;
if let Some(worktree) =
worktrees.get_mut(&(db_repository_entry.worktree_id as u64))
{
worktree.repository_entries.insert(
db_repository_entry.work_directory_id as u64,
proto::RepositoryEntry {
work_directory_id: db_repository_entry.work_directory_id as u64,
branch: db_repository_entry.branch,
},
);
}
}
}
// Populate worktree diagnostic summaries.
{
let mut db_summaries = worktree_diagnostic_summary::Entity::find()
.filter(worktree_diagnostic_summary::Column::ProjectId.eq(project_id))
.stream(&*tx)
.await?;
while let Some(db_summary) = db_summaries.next().await {
let db_summary = db_summary?;
if let Some(worktree) = worktrees.get_mut(&(db_summary.worktree_id as u64)) {
worktree
.diagnostic_summaries
.push(proto::DiagnosticSummary {
path: db_summary.path,
language_server_id: db_summary.language_server_id as u64,
error_count: db_summary.error_count as u32,
warning_count: db_summary.warning_count as u32,
});
}
}
}
// Populate worktree settings files
{
let mut db_settings_files = worktree_settings_file::Entity::find()
.filter(worktree_settings_file::Column::ProjectId.eq(project_id))
.stream(&*tx)
.await?;
while let Some(db_settings_file) = db_settings_files.next().await {
let db_settings_file = db_settings_file?;
if let Some(worktree) =
worktrees.get_mut(&(db_settings_file.worktree_id as u64))
{
worktree.settings_files.push(WorktreeSettingsFile {
path: db_settings_file.path,
content: db_settings_file.content,
});
}
}
}
// Populate language servers.
let language_servers = project
.find_related(language_server::Entity)
.all(&*tx)
.await?;
let project = Project {
collaborators: collaborators
.into_iter()
.map(|collaborator| ProjectCollaborator {
connection_id: collaborator.connection(),
user_id: collaborator.user_id,
replica_id: collaborator.replica_id,
is_host: collaborator.is_host,
})
.collect(),
worktrees,
language_servers: language_servers
.into_iter()
.map(|language_server| proto::LanguageServer {
id: language_server.id as u64,
name: language_server.name,
})
.collect(),
};
Ok((project, replica_id as ReplicaId))
})
.await
}
pub async fn leave_project(
&self,
project_id: ProjectId,
connection: ConnectionId,
) -> Result<RoomGuard<(proto::Room, LeftProject)>> {
let room_id = self.room_id_for_project(project_id).await?;
self.room_transaction(room_id, |tx| async move {
let result = project_collaborator::Entity::delete_many()
.filter(
Condition::all()
.add(project_collaborator::Column::ProjectId.eq(project_id))
.add(project_collaborator::Column::ConnectionId.eq(connection.id as i32))
.add(
project_collaborator::Column::ConnectionServerId
.eq(connection.owner_id as i32),
),
)
.exec(&*tx)
.await?;
if result.rows_affected == 0 {
Err(anyhow!("not a collaborator on this project"))?;
}
let project = project::Entity::find_by_id(project_id)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no such project"))?;
let collaborators = project
.find_related(project_collaborator::Entity)
.all(&*tx)
.await?;
let connection_ids = collaborators
.into_iter()
.map(|collaborator| collaborator.connection())
.collect();
follower::Entity::delete_many()
.filter(
Condition::any()
.add(
Condition::all()
.add(follower::Column::ProjectId.eq(Some(project_id)))
.add(
follower::Column::LeaderConnectionServerId
.eq(connection.owner_id),
)
.add(follower::Column::LeaderConnectionId.eq(connection.id)),
)
.add(
Condition::all()
.add(follower::Column::ProjectId.eq(Some(project_id)))
.add(
follower::Column::FollowerConnectionServerId
.eq(connection.owner_id),
)
.add(follower::Column::FollowerConnectionId.eq(connection.id)),
),
)
.exec(&*tx)
.await?;
let room = self.get_room(project.room_id, &tx).await?;
let left_project = LeftProject {
id: project_id,
host_user_id: project.host_user_id,
host_connection_id: project.host_connection()?,
connection_ids,
};
Ok((room, left_project))
})
.await
}
pub async fn project_collaborators(
&self,
project_id: ProjectId,
connection_id: ConnectionId,
) -> Result<RoomGuard<Vec<ProjectCollaborator>>> {
let room_id = self.room_id_for_project(project_id).await?;
self.room_transaction(room_id, |tx| async move {
let collaborators = project_collaborator::Entity::find()
.filter(project_collaborator::Column::ProjectId.eq(project_id))
.all(&*tx)
.await?
.into_iter()
.map(|collaborator| ProjectCollaborator {
connection_id: collaborator.connection(),
user_id: collaborator.user_id,
replica_id: collaborator.replica_id,
is_host: collaborator.is_host,
})
.collect::<Vec<_>>();
if collaborators
.iter()
.any(|collaborator| collaborator.connection_id == connection_id)
{
Ok(collaborators)
} else {
Err(anyhow!("no such project"))?
}
})
.await
}
pub async fn project_connection_ids(
&self,
project_id: ProjectId,
connection_id: ConnectionId,
) -> Result<RoomGuard<HashSet<ConnectionId>>> {
let room_id = self.room_id_for_project(project_id).await?;
self.room_transaction(room_id, |tx| async move {
let mut collaborators = project_collaborator::Entity::find()
.filter(project_collaborator::Column::ProjectId.eq(project_id))
.stream(&*tx)
.await?;
let mut connection_ids = HashSet::default();
while let Some(collaborator) = collaborators.next().await {
let collaborator = collaborator?;
connection_ids.insert(collaborator.connection());
}
if connection_ids.contains(&connection_id) {
Ok(connection_ids)
} else {
Err(anyhow!("no such project"))?
}
})
.await
}
async fn project_guest_connection_ids(
&self,
project_id: ProjectId,
tx: &DatabaseTransaction,
) -> Result<Vec<ConnectionId>> {
let mut collaborators = project_collaborator::Entity::find()
.filter(
project_collaborator::Column::ProjectId
.eq(project_id)
.and(project_collaborator::Column::IsHost.eq(false)),
)
.stream(tx)
.await?;
let mut guest_connection_ids = Vec::new();
while let Some(collaborator) = collaborators.next().await {
let collaborator = collaborator?;
guest_connection_ids.push(collaborator.connection());
}
Ok(guest_connection_ids)
}
pub async fn room_id_for_project(&self, project_id: ProjectId) -> Result<RoomId> {
self.transaction(|tx| async move {
let project = project::Entity::find_by_id(project_id)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("project {} not found", project_id))?;
Ok(project.room_id)
})
.await
}
pub async fn check_room_participants(
&self,
room_id: RoomId,
leader_id: ConnectionId,
follower_id: ConnectionId,
) -> Result<()> {
self.transaction(|tx| async move {
use room_participant::Column;
let count = room_participant::Entity::find()
.filter(
Condition::all().add(Column::RoomId.eq(room_id)).add(
Condition::any()
.add(Column::AnsweringConnectionId.eq(leader_id.id as i32).and(
Column::AnsweringConnectionServerId.eq(leader_id.owner_id as i32),
))
.add(Column::AnsweringConnectionId.eq(follower_id.id as i32).and(
Column::AnsweringConnectionServerId.eq(follower_id.owner_id as i32),
)),
),
)
.count(&*tx)
.await?;
if count < 2 {
Err(anyhow!("not room participants"))?;
}
Ok(())
})
.await
}
pub async fn follow(
&self,
room_id: RoomId,
project_id: ProjectId,
leader_connection: ConnectionId,
follower_connection: ConnectionId,
) -> Result<RoomGuard<proto::Room>> {
self.room_transaction(room_id, |tx| async move {
follower::ActiveModel {
room_id: ActiveValue::set(room_id),
project_id: ActiveValue::set(project_id),
leader_connection_server_id: ActiveValue::set(ServerId(
leader_connection.owner_id as i32,
)),
leader_connection_id: ActiveValue::set(leader_connection.id as i32),
follower_connection_server_id: ActiveValue::set(ServerId(
follower_connection.owner_id as i32,
)),
follower_connection_id: ActiveValue::set(follower_connection.id as i32),
..Default::default()
}
.insert(&*tx)
.await?;
let room = self.get_room(room_id, &*tx).await?;
Ok(room)
})
.await
}
pub async fn unfollow(
&self,
room_id: RoomId,
project_id: ProjectId,
leader_connection: ConnectionId,
follower_connection: ConnectionId,
) -> Result<RoomGuard<proto::Room>> {
self.room_transaction(room_id, |tx| async move {
follower::Entity::delete_many()
.filter(
Condition::all()
.add(follower::Column::RoomId.eq(room_id))
.add(follower::Column::ProjectId.eq(project_id))
.add(
follower::Column::LeaderConnectionServerId
.eq(leader_connection.owner_id),
)
.add(follower::Column::LeaderConnectionId.eq(leader_connection.id))
.add(
follower::Column::FollowerConnectionServerId
.eq(follower_connection.owner_id),
)
.add(follower::Column::FollowerConnectionId.eq(follower_connection.id)),
)
.exec(&*tx)
.await?;
let room = self.get_room(room_id, &*tx).await?;
Ok(room)
})
.await
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,99 @@
use super::*;
impl Database {
pub async fn create_server(&self, environment: &str) -> Result<ServerId> {
self.transaction(|tx| async move {
let server = server::ActiveModel {
environment: ActiveValue::set(environment.into()),
..Default::default()
}
.insert(&*tx)
.await?;
Ok(server.id)
})
.await
}
pub async fn stale_server_resource_ids(
&self,
environment: &str,
new_server_id: ServerId,
) -> Result<(Vec<RoomId>, Vec<ChannelId>)> {
self.transaction(|tx| async move {
#[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)]
enum QueryRoomIds {
RoomId,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)]
enum QueryChannelIds {
ChannelId,
}
let stale_server_epochs = self
.stale_server_ids(environment, new_server_id, &tx)
.await?;
let room_ids = room_participant::Entity::find()
.select_only()
.column(room_participant::Column::RoomId)
.distinct()
.filter(
room_participant::Column::AnsweringConnectionServerId
.is_in(stale_server_epochs.iter().copied()),
)
.into_values::<_, QueryRoomIds>()
.all(&*tx)
.await?;
let channel_ids = channel_buffer_collaborator::Entity::find()
.select_only()
.column(channel_buffer_collaborator::Column::ChannelId)
.distinct()
.filter(
channel_buffer_collaborator::Column::ConnectionServerId
.is_in(stale_server_epochs.iter().copied()),
)
.into_values::<_, QueryChannelIds>()
.all(&*tx)
.await?;
Ok((room_ids, channel_ids))
})
.await
}
pub async fn delete_stale_servers(
&self,
environment: &str,
new_server_id: ServerId,
) -> Result<()> {
self.transaction(|tx| async move {
server::Entity::delete_many()
.filter(
Condition::all()
.add(server::Column::Environment.eq(environment))
.add(server::Column::Id.ne(new_server_id)),
)
.exec(&*tx)
.await?;
Ok(())
})
.await
}
async fn stale_server_ids(
&self,
environment: &str,
new_server_id: ServerId,
tx: &DatabaseTransaction,
) -> Result<Vec<ServerId>> {
let stale_servers = server::Entity::find()
.filter(
Condition::all()
.add(server::Column::Environment.eq(environment))
.add(server::Column::Id.ne(new_server_id)),
)
.all(&*tx)
.await?;
Ok(stale_servers.into_iter().map(|server| server.id).collect())
}
}

View File

@ -0,0 +1,259 @@
use super::*;
impl Database {
pub async fn create_user(
&self,
email_address: &str,
admin: bool,
params: NewUserParams,
) -> Result<NewUserResult> {
self.transaction(|tx| async {
let tx = tx;
let user = user::Entity::insert(user::ActiveModel {
email_address: ActiveValue::set(Some(email_address.into())),
github_login: ActiveValue::set(params.github_login.clone()),
github_user_id: ActiveValue::set(Some(params.github_user_id)),
admin: ActiveValue::set(admin),
metrics_id: ActiveValue::set(Uuid::new_v4()),
..Default::default()
})
.on_conflict(
OnConflict::column(user::Column::GithubLogin)
.update_column(user::Column::GithubLogin)
.to_owned(),
)
.exec_with_returning(&*tx)
.await?;
Ok(NewUserResult {
user_id: user.id,
metrics_id: user.metrics_id.to_string(),
signup_device_id: None,
inviting_user_id: None,
})
})
.await
}
pub async fn get_user_by_id(&self, id: UserId) -> Result<Option<user::Model>> {
self.transaction(|tx| async move { Ok(user::Entity::find_by_id(id).one(&*tx).await?) })
.await
}
pub async fn get_users_by_ids(&self, ids: Vec<UserId>) -> Result<Vec<user::Model>> {
self.transaction(|tx| async {
let tx = tx;
Ok(user::Entity::find()
.filter(user::Column::Id.is_in(ids.iter().copied()))
.all(&*tx)
.await?)
})
.await
}
pub async fn get_user_by_github_login(&self, github_login: &str) -> Result<Option<User>> {
self.transaction(|tx| async move {
Ok(user::Entity::find()
.filter(user::Column::GithubLogin.eq(github_login))
.one(&*tx)
.await?)
})
.await
}
pub async fn get_or_create_user_by_github_account(
&self,
github_login: &str,
github_user_id: Option<i32>,
github_email: Option<&str>,
) -> Result<Option<User>> {
self.transaction(|tx| async move {
let tx = &*tx;
if let Some(github_user_id) = github_user_id {
if let Some(user_by_github_user_id) = user::Entity::find()
.filter(user::Column::GithubUserId.eq(github_user_id))
.one(tx)
.await?
{
let mut user_by_github_user_id = user_by_github_user_id.into_active_model();
user_by_github_user_id.github_login = ActiveValue::set(github_login.into());
Ok(Some(user_by_github_user_id.update(tx).await?))
} else if let Some(user_by_github_login) = user::Entity::find()
.filter(user::Column::GithubLogin.eq(github_login))
.one(tx)
.await?
{
let mut user_by_github_login = user_by_github_login.into_active_model();
user_by_github_login.github_user_id = ActiveValue::set(Some(github_user_id));
Ok(Some(user_by_github_login.update(tx).await?))
} else {
let user = user::Entity::insert(user::ActiveModel {
email_address: ActiveValue::set(github_email.map(|email| email.into())),
github_login: ActiveValue::set(github_login.into()),
github_user_id: ActiveValue::set(Some(github_user_id)),
admin: ActiveValue::set(false),
invite_count: ActiveValue::set(0),
invite_code: ActiveValue::set(None),
metrics_id: ActiveValue::set(Uuid::new_v4()),
..Default::default()
})
.exec_with_returning(&*tx)
.await?;
Ok(Some(user))
}
} else {
Ok(user::Entity::find()
.filter(user::Column::GithubLogin.eq(github_login))
.one(tx)
.await?)
}
})
.await
}
pub async fn get_all_users(&self, page: u32, limit: u32) -> Result<Vec<User>> {
self.transaction(|tx| async move {
Ok(user::Entity::find()
.order_by_asc(user::Column::GithubLogin)
.limit(limit as u64)
.offset(page as u64 * limit as u64)
.all(&*tx)
.await?)
})
.await
}
pub async fn get_user_metrics_id(&self, id: UserId) -> Result<String> {
#[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)]
enum QueryAs {
MetricsId,
}
self.transaction(|tx| async move {
let metrics_id: Uuid = user::Entity::find_by_id(id)
.select_only()
.column(user::Column::MetricsId)
.into_values::<_, QueryAs>()
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("could not find user"))?;
Ok(metrics_id.to_string())
})
.await
}
pub async fn set_user_connected_once(&self, id: UserId, connected_once: bool) -> Result<()> {
self.transaction(|tx| async move {
user::Entity::update_many()
.filter(user::Column::Id.eq(id))
.set(user::ActiveModel {
connected_once: ActiveValue::set(connected_once),
..Default::default()
})
.exec(&*tx)
.await?;
Ok(())
})
.await
}
pub async fn destroy_user(&self, id: UserId) -> Result<()> {
self.transaction(|tx| async move {
access_token::Entity::delete_many()
.filter(access_token::Column::UserId.eq(id))
.exec(&*tx)
.await?;
user::Entity::delete_by_id(id).exec(&*tx).await?;
Ok(())
})
.await
}
pub async fn fuzzy_search_users(&self, name_query: &str, limit: u32) -> Result<Vec<User>> {
self.transaction(|tx| async {
let tx = tx;
let like_string = Self::fuzzy_like_string(name_query);
let query = "
SELECT users.*
FROM users
WHERE github_login ILIKE $1
ORDER BY github_login <-> $2
LIMIT $3
";
Ok(user::Entity::find()
.from_raw_sql(Statement::from_sql_and_values(
self.pool.get_database_backend(),
query,
vec![like_string.into(), name_query.into(), limit.into()],
))
.all(&*tx)
.await?)
})
.await
}
pub fn fuzzy_like_string(string: &str) -> String {
let mut result = String::with_capacity(string.len() * 2 + 1);
for c in string.chars() {
if c.is_alphanumeric() {
result.push('%');
result.push(c);
}
}
result.push('%');
result
}
pub async fn create_user_flag(&self, flag: &str) -> Result<FlagId> {
self.transaction(|tx| async move {
let flag = feature_flag::Entity::insert(feature_flag::ActiveModel {
flag: ActiveValue::set(flag.to_string()),
..Default::default()
})
.exec(&*tx)
.await?
.last_insert_id;
Ok(flag)
})
.await
}
pub async fn add_user_flag(&self, user: UserId, flag: FlagId) -> Result<()> {
self.transaction(|tx| async move {
user_feature::Entity::insert(user_feature::ActiveModel {
user_id: ActiveValue::set(user),
feature_id: ActiveValue::set(flag),
})
.exec(&*tx)
.await?;
Ok(())
})
.await
}
pub async fn get_user_flags(&self, user: UserId) -> Result<Vec<String>> {
self.transaction(|tx| async move {
#[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)]
enum QueryAs {
Flag,
}
let flags = user::Model {
id: user,
..Default::default()
}
.find_linked(user::UserFlags)
.select_only()
.column(feature_flag::Column::Flag)
.into_values::<_, QueryAs>()
.all(&*tx)
.await?;
Ok(flags)
})
.await
}
}

View File

@ -0,0 +1,32 @@
pub mod access_token;
pub mod buffer;
pub mod buffer_operation;
pub mod buffer_snapshot;
pub mod channel;
pub mod channel_buffer_collaborator;
pub mod channel_chat_participant;
pub mod channel_member;
pub mod channel_message;
pub mod channel_message_mention;
pub mod contact;
pub mod feature_flag;
pub mod follower;
pub mod language_server;
pub mod notification;
pub mod notification_kind;
pub mod observed_buffer_edits;
pub mod observed_channel_messages;
pub mod project;
pub mod project_collaborator;
pub mod room;
pub mod room_participant;
pub mod server;
pub mod signup;
pub mod user;
pub mod user_feature;
pub mod worktree;
pub mod worktree_diagnostic_summary;
pub mod worktree_entry;
pub mod worktree_repository;
pub mod worktree_repository_statuses;
pub mod worktree_settings_file;

View File

@ -0,0 +1,29 @@
use crate::db::{AccessTokenId, UserId};
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "access_tokens")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: AccessTokenId,
pub user_id: UserId,
pub hash: String,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::UserId",
to = "super::user::Column::Id"
)]
User,
}
impl Related<super::user::Entity> for Entity {
fn to() -> RelationDef {
Relation::User.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@ -0,0 +1,45 @@
use crate::db::{BufferId, ChannelId};
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "buffers")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: BufferId,
pub epoch: i32,
pub channel_id: ChannelId,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(has_many = "super::buffer_operation::Entity")]
Operations,
#[sea_orm(has_many = "super::buffer_snapshot::Entity")]
Snapshots,
#[sea_orm(
belongs_to = "super::channel::Entity",
from = "Column::ChannelId",
to = "super::channel::Column::Id"
)]
Channel,
}
impl Related<super::buffer_operation::Entity> for Entity {
fn to() -> RelationDef {
Relation::Operations.def()
}
}
impl Related<super::buffer_snapshot::Entity> for Entity {
fn to() -> RelationDef {
Relation::Snapshots.def()
}
}
impl Related<super::channel::Entity> for Entity {
fn to() -> RelationDef {
Relation::Channel.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@ -0,0 +1,34 @@
use crate::db::BufferId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "buffer_operations")]
pub struct Model {
#[sea_orm(primary_key)]
pub buffer_id: BufferId,
#[sea_orm(primary_key)]
pub epoch: i32,
#[sea_orm(primary_key)]
pub lamport_timestamp: i32,
#[sea_orm(primary_key)]
pub replica_id: i32,
pub value: Vec<u8>,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::buffer::Entity",
from = "Column::BufferId",
to = "super::buffer::Column::Id"
)]
Buffer,
}
impl Related<super::buffer::Entity> for Entity {
fn to() -> RelationDef {
Relation::Buffer.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@ -0,0 +1,31 @@
use crate::db::BufferId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "buffer_snapshots")]
pub struct Model {
#[sea_orm(primary_key)]
pub buffer_id: BufferId,
#[sea_orm(primary_key)]
pub epoch: i32,
pub text: String,
pub operation_serialization_version: i32,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::buffer::Entity",
from = "Column::BufferId",
to = "super::buffer::Column::Id"
)]
Buffer,
}
impl Related<super::buffer::Entity> for Entity {
fn to() -> RelationDef {
Relation::Buffer.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@ -0,0 +1,79 @@
use crate::db::{ChannelId, ChannelVisibility};
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, Default, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "channels")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: ChannelId,
pub name: String,
pub visibility: ChannelVisibility,
pub parent_path: String,
}
impl Model {
pub fn parent_id(&self) -> Option<ChannelId> {
self.ancestors().last()
}
pub fn ancestors(&self) -> impl Iterator<Item = ChannelId> + '_ {
self.parent_path
.trim_end_matches('/')
.split('/')
.filter_map(|id| Some(ChannelId::from_proto(id.parse().ok()?)))
}
pub fn ancestors_including_self(&self) -> impl Iterator<Item = ChannelId> + '_ {
self.ancestors().chain(Some(self.id))
}
pub fn path(&self) -> String {
format!("{}{}/", self.parent_path, self.id)
}
}
impl ActiveModelBehavior for ActiveModel {}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(has_one = "super::room::Entity")]
Room,
#[sea_orm(has_one = "super::buffer::Entity")]
Buffer,
#[sea_orm(has_many = "super::channel_member::Entity")]
Member,
#[sea_orm(has_many = "super::channel_buffer_collaborator::Entity")]
BufferCollaborators,
#[sea_orm(has_many = "super::channel_chat_participant::Entity")]
ChatParticipants,
}
impl Related<super::channel_member::Entity> for Entity {
fn to() -> RelationDef {
Relation::Member.def()
}
}
impl Related<super::room::Entity> for Entity {
fn to() -> RelationDef {
Relation::Room.def()
}
}
impl Related<super::buffer::Entity> for Entity {
fn to() -> RelationDef {
Relation::Buffer.def()
}
}
impl Related<super::channel_buffer_collaborator::Entity> for Entity {
fn to() -> RelationDef {
Relation::BufferCollaborators.def()
}
}
impl Related<super::channel_chat_participant::Entity> for Entity {
fn to() -> RelationDef {
Relation::ChatParticipants.def()
}
}

View File

@ -0,0 +1,43 @@
use crate::db::{ChannelBufferCollaboratorId, ChannelId, ReplicaId, ServerId, UserId};
use rpc::ConnectionId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "channel_buffer_collaborators")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: ChannelBufferCollaboratorId,
pub channel_id: ChannelId,
pub connection_id: i32,
pub connection_server_id: ServerId,
pub connection_lost: bool,
pub user_id: UserId,
pub replica_id: ReplicaId,
}
impl Model {
pub fn connection(&self) -> ConnectionId {
ConnectionId {
owner_id: self.connection_server_id.0 as u32,
id: self.connection_id as u32,
}
}
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::channel::Entity",
from = "Column::ChannelId",
to = "super::channel::Column::Id"
)]
Channel,
}
impl Related<super::channel::Entity> for Entity {
fn to() -> RelationDef {
Relation::Channel.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@ -0,0 +1,41 @@
use crate::db::{ChannelChatParticipantId, ChannelId, ServerId, UserId};
use rpc::ConnectionId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "channel_chat_participants")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: ChannelChatParticipantId,
pub channel_id: ChannelId,
pub user_id: UserId,
pub connection_id: i32,
pub connection_server_id: ServerId,
}
impl Model {
pub fn connection(&self) -> ConnectionId {
ConnectionId {
owner_id: self.connection_server_id.0 as u32,
id: self.connection_id as u32,
}
}
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::channel::Entity",
from = "Column::ChannelId",
to = "super::channel::Column::Id"
)]
Channel,
}
impl Related<super::channel::Entity> for Entity {
fn to() -> RelationDef {
Relation::Channel.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@ -0,0 +1,59 @@
use crate::db::{channel_member, ChannelId, ChannelMemberId, ChannelRole, UserId};
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "channel_members")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: ChannelMemberId,
pub channel_id: ChannelId,
pub user_id: UserId,
pub accepted: bool,
pub role: ChannelRole,
}
impl ActiveModelBehavior for ActiveModel {}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::channel::Entity",
from = "Column::ChannelId",
to = "super::channel::Column::Id"
)]
Channel,
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::UserId",
to = "super::user::Column::Id"
)]
User,
}
impl Related<super::channel::Entity> for Entity {
fn to() -> RelationDef {
Relation::Channel.def()
}
}
impl Related<super::user::Entity> for Entity {
fn to() -> RelationDef {
Relation::User.def()
}
}
#[derive(Debug)]
pub struct UserToChannel;
impl Linked for UserToChannel {
type FromEntity = super::user::Entity;
type ToEntity = super::channel::Entity;
fn link(&self) -> Vec<RelationDef> {
vec![
channel_member::Relation::User.def().rev(),
channel_member::Relation::Channel.def(),
]
}
}

View File

@ -0,0 +1,45 @@
use crate::db::{ChannelId, MessageId, UserId};
use sea_orm::entity::prelude::*;
use time::PrimitiveDateTime;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "channel_messages")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: MessageId,
pub channel_id: ChannelId,
pub sender_id: UserId,
pub body: String,
pub sent_at: PrimitiveDateTime,
pub nonce: Uuid,
}
impl ActiveModelBehavior for ActiveModel {}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::channel::Entity",
from = "Column::ChannelId",
to = "super::channel::Column::Id"
)]
Channel,
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::SenderId",
to = "super::user::Column::Id"
)]
Sender,
}
impl Related<super::channel::Entity> for Entity {
fn to() -> RelationDef {
Relation::Channel.def()
}
}
impl Related<super::user::Entity> for Entity {
fn to() -> RelationDef {
Relation::Sender.def()
}
}

View File

@ -0,0 +1,43 @@
use crate::db::{MessageId, UserId};
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "channel_message_mentions")]
pub struct Model {
#[sea_orm(primary_key)]
pub message_id: MessageId,
#[sea_orm(primary_key)]
pub start_offset: i32,
pub end_offset: i32,
pub user_id: UserId,
}
impl ActiveModelBehavior for ActiveModel {}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::channel_message::Entity",
from = "Column::MessageId",
to = "super::channel_message::Column::Id"
)]
Message,
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::UserId",
to = "super::user::Column::Id"
)]
MentionedUser,
}
impl Related<super::channel::Entity> for Entity {
fn to() -> RelationDef {
Relation::Message.def()
}
}
impl Related<super::user::Entity> for Entity {
fn to() -> RelationDef {
Relation::MentionedUser.def()
}
}

View File

@ -0,0 +1,32 @@
use crate::db::{ContactId, UserId};
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, Default, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "contacts")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: ContactId,
pub user_id_a: UserId,
pub user_id_b: UserId,
pub a_to_b: bool,
pub should_notify: bool,
pub accepted: bool,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::room_participant::Entity",
from = "Column::UserIdA",
to = "super::room_participant::Column::UserId"
)]
UserARoomParticipant,
#[sea_orm(
belongs_to = "super::room_participant::Entity",
from = "Column::UserIdB",
to = "super::room_participant::Column::UserId"
)]
UserBRoomParticipant,
}
impl ActiveModelBehavior for ActiveModel {}

View File

@ -0,0 +1,40 @@
use sea_orm::entity::prelude::*;
use crate::db::FlagId;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "feature_flags")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: FlagId,
pub flag: String,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(has_many = "super::user_feature::Entity")]
UserFeature,
}
impl Related<super::user_feature::Entity> for Entity {
fn to() -> RelationDef {
Relation::UserFeature.def()
}
}
impl ActiveModelBehavior for ActiveModel {}
pub struct FlaggedUsers;
impl Linked for FlaggedUsers {
type FromEntity = Entity;
type ToEntity = super::user::Entity;
fn link(&self) -> Vec<RelationDef> {
vec![
super::user_feature::Relation::Flag.def().rev(),
super::user_feature::Relation::User.def(),
]
}
}

View File

@ -0,0 +1,50 @@
use crate::db::{FollowerId, ProjectId, RoomId, ServerId};
use rpc::ConnectionId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, Default, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "followers")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: FollowerId,
pub room_id: RoomId,
pub project_id: ProjectId,
pub leader_connection_server_id: ServerId,
pub leader_connection_id: i32,
pub follower_connection_server_id: ServerId,
pub follower_connection_id: i32,
}
impl Model {
pub fn leader_connection(&self) -> ConnectionId {
ConnectionId {
owner_id: self.leader_connection_server_id.0 as u32,
id: self.leader_connection_id as u32,
}
}
pub fn follower_connection(&self) -> ConnectionId {
ConnectionId {
owner_id: self.follower_connection_server_id.0 as u32,
id: self.follower_connection_id as u32,
}
}
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::room::Entity",
from = "Column::RoomId",
to = "super::room::Column::Id"
)]
Room,
}
impl Related<super::room::Entity> for Entity {
fn to() -> RelationDef {
Relation::Room.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@ -0,0 +1,30 @@
use crate::db::ProjectId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "language_servers")]
pub struct Model {
#[sea_orm(primary_key)]
pub project_id: ProjectId,
#[sea_orm(primary_key)]
pub id: i64,
pub name: String,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::project::Entity",
from = "Column::ProjectId",
to = "super::project::Column::Id"
)]
Project,
}
impl Related<super::project::Entity> for Entity {
fn to() -> RelationDef {
Relation::Project.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@ -0,0 +1,29 @@
use crate::db::{NotificationId, NotificationKindId, UserId};
use sea_orm::entity::prelude::*;
use time::PrimitiveDateTime;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "notifications")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: NotificationId,
pub created_at: PrimitiveDateTime,
pub recipient_id: UserId,
pub kind: NotificationKindId,
pub entity_id: Option<i32>,
pub content: String,
pub is_read: bool,
pub response: Option<bool>,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::RecipientId",
to = "super::user::Column::Id"
)]
Recipient,
}
impl ActiveModelBehavior for ActiveModel {}

View File

@ -0,0 +1,15 @@
use crate::db::NotificationKindId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "notification_kinds")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: NotificationKindId,
pub name: String,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {}
impl ActiveModelBehavior for ActiveModel {}

View File

@ -0,0 +1,43 @@
use crate::db::{BufferId, UserId};
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "observed_buffer_edits")]
pub struct Model {
#[sea_orm(primary_key)]
pub user_id: UserId,
pub buffer_id: BufferId,
pub epoch: i32,
pub lamport_timestamp: i32,
pub replica_id: i32,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::buffer::Entity",
from = "Column::BufferId",
to = "super::buffer::Column::Id"
)]
Buffer,
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::UserId",
to = "super::user::Column::Id"
)]
User,
}
impl Related<super::buffer::Entity> for Entity {
fn to() -> RelationDef {
Relation::Buffer.def()
}
}
impl Related<super::user::Entity> for Entity {
fn to() -> RelationDef {
Relation::User.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@ -0,0 +1,41 @@
use crate::db::{ChannelId, MessageId, UserId};
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "observed_channel_messages")]
pub struct Model {
#[sea_orm(primary_key)]
pub user_id: UserId,
pub channel_id: ChannelId,
pub channel_message_id: MessageId,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::channel::Entity",
from = "Column::ChannelId",
to = "super::channel::Column::Id"
)]
Channel,
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::UserId",
to = "super::user::Column::Id"
)]
User,
}
impl Related<super::channel::Entity> for Entity {
fn to() -> RelationDef {
Relation::Channel.def()
}
}
impl Related<super::user::Entity> for Entity {
fn to() -> RelationDef {
Relation::User.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@ -0,0 +1,84 @@
use crate::db::{ProjectId, Result, RoomId, ServerId, UserId};
use anyhow::anyhow;
use rpc::ConnectionId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "projects")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: ProjectId,
pub room_id: RoomId,
pub host_user_id: UserId,
pub host_connection_id: Option<i32>,
pub host_connection_server_id: Option<ServerId>,
}
impl Model {
pub fn host_connection(&self) -> Result<ConnectionId> {
let host_connection_server_id = self
.host_connection_server_id
.ok_or_else(|| anyhow!("empty host_connection_server_id"))?;
let host_connection_id = self
.host_connection_id
.ok_or_else(|| anyhow!("empty host_connection_id"))?;
Ok(ConnectionId {
owner_id: host_connection_server_id.0 as u32,
id: host_connection_id as u32,
})
}
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::HostUserId",
to = "super::user::Column::Id"
)]
HostUser,
#[sea_orm(
belongs_to = "super::room::Entity",
from = "Column::RoomId",
to = "super::room::Column::Id"
)]
Room,
#[sea_orm(has_many = "super::worktree::Entity")]
Worktrees,
#[sea_orm(has_many = "super::project_collaborator::Entity")]
Collaborators,
#[sea_orm(has_many = "super::language_server::Entity")]
LanguageServers,
}
impl Related<super::user::Entity> for Entity {
fn to() -> RelationDef {
Relation::HostUser.def()
}
}
impl Related<super::room::Entity> for Entity {
fn to() -> RelationDef {
Relation::Room.def()
}
}
impl Related<super::worktree::Entity> for Entity {
fn to() -> RelationDef {
Relation::Worktrees.def()
}
}
impl Related<super::project_collaborator::Entity> for Entity {
fn to() -> RelationDef {
Relation::Collaborators.def()
}
}
impl Related<super::language_server::Entity> for Entity {
fn to() -> RelationDef {
Relation::LanguageServers.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@ -0,0 +1,43 @@
use crate::db::{ProjectCollaboratorId, ProjectId, ReplicaId, ServerId, UserId};
use rpc::ConnectionId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "project_collaborators")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: ProjectCollaboratorId,
pub project_id: ProjectId,
pub connection_id: i32,
pub connection_server_id: ServerId,
pub user_id: UserId,
pub replica_id: ReplicaId,
pub is_host: bool,
}
impl Model {
pub fn connection(&self) -> ConnectionId {
ConnectionId {
owner_id: self.connection_server_id.0 as u32,
id: self.connection_id as u32,
}
}
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::project::Entity",
from = "Column::ProjectId",
to = "super::project::Column::Id"
)]
Project,
}
impl Related<super::project::Entity> for Entity {
fn to() -> RelationDef {
Relation::Project.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@ -0,0 +1,54 @@
use crate::db::{ChannelId, RoomId};
use sea_orm::entity::prelude::*;
#[derive(Clone, Default, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "rooms")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: RoomId,
pub live_kit_room: String,
pub channel_id: Option<ChannelId>,
pub enviroment: Option<String>,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(has_many = "super::room_participant::Entity")]
RoomParticipant,
#[sea_orm(has_many = "super::project::Entity")]
Project,
#[sea_orm(has_many = "super::follower::Entity")]
Follower,
#[sea_orm(
belongs_to = "super::channel::Entity",
from = "Column::ChannelId",
to = "super::channel::Column::Id"
)]
Channel,
}
impl Related<super::room_participant::Entity> for Entity {
fn to() -> RelationDef {
Relation::RoomParticipant.def()
}
}
impl Related<super::project::Entity> for Entity {
fn to() -> RelationDef {
Relation::Project.def()
}
}
impl Related<super::follower::Entity> for Entity {
fn to() -> RelationDef {
Relation::Follower.def()
}
}
impl Related<super::channel::Entity> for Entity {
fn to() -> RelationDef {
Relation::Channel.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@ -0,0 +1,61 @@
use crate::db::{ProjectId, RoomId, RoomParticipantId, ServerId, UserId};
use rpc::ConnectionId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "room_participants")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: RoomParticipantId,
pub room_id: RoomId,
pub user_id: UserId,
pub answering_connection_id: Option<i32>,
pub answering_connection_server_id: Option<ServerId>,
pub answering_connection_lost: bool,
pub location_kind: Option<i32>,
pub location_project_id: Option<ProjectId>,
pub initial_project_id: Option<ProjectId>,
pub calling_user_id: UserId,
pub calling_connection_id: i32,
pub calling_connection_server_id: Option<ServerId>,
pub participant_index: Option<i32>,
}
impl Model {
pub fn answering_connection(&self) -> Option<ConnectionId> {
Some(ConnectionId {
owner_id: self.answering_connection_server_id?.0 as u32,
id: self.answering_connection_id? as u32,
})
}
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::UserId",
to = "super::user::Column::Id"
)]
User,
#[sea_orm(
belongs_to = "super::room::Entity",
from = "Column::RoomId",
to = "super::room::Column::Id"
)]
Room,
}
impl Related<super::user::Entity> for Entity {
fn to() -> RelationDef {
Relation::User.def()
}
}
impl Related<super::room::Entity> for Entity {
fn to() -> RelationDef {
Relation::Room.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@ -0,0 +1,15 @@
use crate::db::ServerId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "servers")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: ServerId,
pub environment: String,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {}
impl ActiveModelBehavior for ActiveModel {}

View File

@ -0,0 +1,28 @@
use crate::db::{SignupId, UserId};
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "signups")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: SignupId,
pub email_address: String,
pub email_confirmation_code: String,
pub email_confirmation_sent: bool,
pub created_at: DateTime,
pub device_id: Option<String>,
pub user_id: Option<UserId>,
pub inviting_user_id: Option<UserId>,
pub platform_mac: bool,
pub platform_linux: bool,
pub platform_windows: bool,
pub platform_unknown: bool,
pub editor_features: Option<Vec<String>>,
pub programming_languages: Option<Vec<String>>,
pub added_to_mailing_list: bool,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {}
impl ActiveModelBehavior for ActiveModel {}

View File

@ -0,0 +1,80 @@
use crate::db::UserId;
use sea_orm::entity::prelude::*;
use serde::Serialize;
#[derive(Clone, Debug, Default, PartialEq, Eq, DeriveEntityModel, Serialize)]
#[sea_orm(table_name = "users")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: UserId,
pub github_login: String,
pub github_user_id: Option<i32>,
pub email_address: Option<String>,
pub admin: bool,
pub invite_code: Option<String>,
pub invite_count: i32,
pub inviter_id: Option<UserId>,
pub connected_once: bool,
pub metrics_id: Uuid,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(has_many = "super::access_token::Entity")]
AccessToken,
#[sea_orm(has_one = "super::room_participant::Entity")]
RoomParticipant,
#[sea_orm(has_many = "super::project::Entity")]
HostedProjects,
#[sea_orm(has_many = "super::channel_member::Entity")]
ChannelMemberships,
#[sea_orm(has_many = "super::user_feature::Entity")]
UserFeatures,
}
impl Related<super::access_token::Entity> for Entity {
fn to() -> RelationDef {
Relation::AccessToken.def()
}
}
impl Related<super::room_participant::Entity> for Entity {
fn to() -> RelationDef {
Relation::RoomParticipant.def()
}
}
impl Related<super::project::Entity> for Entity {
fn to() -> RelationDef {
Relation::HostedProjects.def()
}
}
impl Related<super::channel_member::Entity> for Entity {
fn to() -> RelationDef {
Relation::ChannelMemberships.def()
}
}
impl Related<super::user_feature::Entity> for Entity {
fn to() -> RelationDef {
Relation::UserFeatures.def()
}
}
impl ActiveModelBehavior for ActiveModel {}
pub struct UserFlags;
impl Linked for UserFlags {
type FromEntity = Entity;
type ToEntity = super::feature_flag::Entity;
fn link(&self) -> Vec<RelationDef> {
vec![
super::user_feature::Relation::User.def().rev(),
super::user_feature::Relation::Flag.def(),
]
}
}

View File

@ -0,0 +1,42 @@
use sea_orm::entity::prelude::*;
use crate::db::{FlagId, UserId};
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "user_features")]
pub struct Model {
#[sea_orm(primary_key)]
pub user_id: UserId,
#[sea_orm(primary_key)]
pub feature_id: FlagId,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::feature_flag::Entity",
from = "Column::FeatureId",
to = "super::feature_flag::Column::Id"
)]
Flag,
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::UserId",
to = "super::user::Column::Id"
)]
User,
}
impl Related<super::feature_flag::Entity> for Entity {
fn to() -> RelationDef {
Relation::Flag.def()
}
}
impl Related<super::user::Entity> for Entity {
fn to() -> RelationDef {
Relation::User.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@ -0,0 +1,36 @@
use crate::db::ProjectId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "worktrees")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: i64,
#[sea_orm(primary_key)]
pub project_id: ProjectId,
pub abs_path: String,
pub root_name: String,
pub visible: bool,
/// The last scan for which we've observed entries. It may be in progress.
pub scan_id: i64,
/// The last scan that fully completed.
pub completed_scan_id: i64,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::project::Entity",
from = "Column::ProjectId",
to = "super::project::Column::Id"
)]
Project,
}
impl Related<super::project::Entity> for Entity {
fn to() -> RelationDef {
Relation::Project.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

Some files were not shown because too many files have changed in this diff Show More