Remove 2 suffix for collab, rope, settings, menu

Co-authored-by: Mikayla <mikayla@zed.dev>
This commit is contained in:
Max Brunsfeld 2024-01-03 12:28:45 -08:00
parent 177e3028a9
commit 0cf65223ce
234 changed files with 3765 additions and 42229 deletions

289
Cargo.lock generated
View File

@ -13,7 +13,7 @@ dependencies = [
"gpui2",
"language",
"project",
"settings2",
"settings",
"smallvec",
"theme2",
"ui2",
@ -317,7 +317,7 @@ dependencies = [
"isahc",
"language",
"log",
"menu2",
"menu",
"multi_buffer",
"ordered-float 2.10.0",
"parking_lot 0.11.2",
@ -329,7 +329,7 @@ dependencies = [
"semantic_index",
"serde",
"serde_json",
"settings2",
"settings",
"smol",
"theme2",
"tiktoken-rs",
@ -658,19 +658,6 @@ dependencies = [
[[package]]
name = "audio"
version = "0.1.0"
dependencies = [
"anyhow",
"collections",
"gpui",
"log",
"parking_lot 0.11.2",
"rodio",
"util",
]
[[package]]
name = "audio2"
version = "0.1.0"
dependencies = [
"anyhow",
"collections",
@ -693,12 +680,12 @@ dependencies = [
"isahc",
"lazy_static",
"log",
"menu2",
"menu",
"project",
"serde",
"serde_derive",
"serde_json",
"settings2",
"settings",
"smol",
"tempdir",
"theme2",
@ -1029,7 +1016,7 @@ dependencies = [
"outline",
"project",
"search",
"settings2",
"settings",
"theme2",
"ui2",
"workspace",
@ -1119,7 +1106,7 @@ version = "0.1.0"
dependencies = [
"anyhow",
"async-broadcast",
"audio2",
"audio",
"client",
"collections",
"fs",
@ -1136,7 +1123,7 @@ dependencies = [
"serde",
"serde_derive",
"serde_json",
"settings2",
"settings",
"smallvec",
"util",
]
@ -1225,7 +1212,7 @@ dependencies = [
"schemars",
"serde",
"serde_derive",
"settings2",
"settings",
"smallvec",
"smol",
"sum_tree",
@ -1398,7 +1385,7 @@ dependencies = [
"schemars",
"serde",
"serde_derive",
"settings2",
"settings",
"smol",
"sum_tree",
"sysinfo",
@ -1459,79 +1446,6 @@ dependencies = [
[[package]]
name = "collab"
version = "0.32.0"
dependencies = [
"anyhow",
"async-trait",
"async-tungstenite",
"audio",
"axum",
"axum-extra",
"base64 0.13.1",
"call",
"channel",
"clap 3.2.25",
"client",
"clock",
"collab_ui",
"collections",
"ctor",
"dashmap",
"editor",
"env_logger",
"envy",
"fs",
"futures 0.3.28",
"git",
"gpui",
"hyper",
"indoc",
"language",
"lazy_static",
"lipsum",
"live_kit_client",
"live_kit_server",
"log",
"lsp",
"nanoid",
"node_runtime",
"notifications",
"parking_lot 0.11.2",
"pretty_assertions",
"project",
"prometheus",
"prost 0.8.0",
"rand 0.8.5",
"reqwest",
"rpc",
"scrypt",
"sea-orm",
"serde",
"serde_derive",
"serde_json",
"settings",
"sha-1 0.9.8",
"smallvec",
"sqlx",
"text",
"theme",
"time",
"tokio",
"tokio-tungstenite",
"toml 0.5.11",
"tonic",
"tower",
"tracing",
"tracing-log",
"tracing-subscriber",
"unindent",
"util",
"uuid 1.4.1",
"workspace",
]
[[package]]
name = "collab2"
version = "0.28.0"
dependencies = [
"anyhow",
@ -1582,7 +1496,7 @@ dependencies = [
"serde",
"serde_derive",
"serde_json",
"settings2",
"settings",
"sha-1 0.9.8",
"smallvec",
"sqlx",
@ -1624,7 +1538,7 @@ dependencies = [
"language",
"lazy_static",
"log",
"menu2",
"menu",
"notifications",
"picker",
"postage",
@ -1636,7 +1550,7 @@ dependencies = [
"schemars",
"serde",
"serde_derive",
"settings2",
"settings",
"smallvec",
"theme2",
"theme_selector",
@ -1691,12 +1605,12 @@ dependencies = [
"go_to_line",
"gpui2",
"language",
"menu2",
"menu",
"picker",
"project",
"serde",
"serde_json",
"settings2",
"settings",
"theme2",
"ui2",
"util",
@ -1797,7 +1711,7 @@ dependencies = [
"rpc",
"serde",
"serde_derive",
"settings2",
"settings",
"smol",
"theme2",
"ui2",
@ -1815,7 +1729,7 @@ dependencies = [
"futures 0.3.28",
"gpui2",
"language",
"settings2",
"settings",
"smol",
"theme2",
"util",
@ -2325,7 +2239,7 @@ dependencies = [
"serde",
"serde_derive",
"serde_json",
"settings2",
"settings",
"smallvec",
"theme2",
"ui2",
@ -2502,7 +2416,7 @@ dependencies = [
"serde",
"serde_derive",
"serde_json",
"settings2",
"settings",
"smallvec",
"smol",
"snippet",
@ -2711,14 +2625,14 @@ dependencies = [
"language",
"lazy_static",
"log",
"menu2",
"menu",
"postage",
"project",
"regex",
"search",
"serde",
"serde_derive",
"settings2",
"settings",
"smallvec",
"smol",
"sysinfo",
@ -2741,13 +2655,13 @@ dependencies = [
"fuzzy",
"gpui2",
"language",
"menu2",
"menu",
"picker",
"postage",
"project",
"serde",
"serde_json",
"settings2",
"settings",
"text2",
"theme2",
"ui2",
@ -2914,7 +2828,7 @@ dependencies = [
"log",
"parking_lot 0.11.2",
"regex",
"rope2",
"rope",
"serde",
"serde_derive",
"serde_json",
@ -3236,10 +3150,10 @@ version = "0.1.0"
dependencies = [
"editor",
"gpui2",
"menu2",
"menu",
"postage",
"serde",
"settings2",
"settings",
"text2",
"theme2",
"ui2",
@ -3937,7 +3851,7 @@ dependencies = [
"log",
"schemars",
"serde",
"settings2",
"settings",
"shellexpand",
"util",
"workspace",
@ -4041,7 +3955,7 @@ dependencies = [
"serde",
"serde_derive",
"serde_json",
"settings2",
"settings",
"similar",
"smallvec",
"smol",
@ -4075,7 +3989,7 @@ dependencies = [
"language",
"picker",
"project",
"settings2",
"settings",
"theme2",
"ui2",
"util",
@ -4097,7 +4011,7 @@ dependencies = [
"lsp",
"project",
"serde",
"settings2",
"settings",
"theme2",
"tree-sitter",
"ui2",
@ -4517,13 +4431,6 @@ dependencies = [
[[package]]
name = "menu"
version = "0.1.0"
dependencies = [
"gpui",
]
[[package]]
name = "menu2"
version = "0.1.0"
dependencies = [
"gpui2",
"serde",
@ -4712,7 +4619,7 @@ dependencies = [
"schemars",
"serde",
"serde_derive",
"settings2",
"settings",
"smallvec",
"smol",
"snippet",
@ -4889,7 +4796,7 @@ dependencies = [
"feature_flags",
"gpui2",
"rpc",
"settings2",
"settings",
"sum_tree",
"text2",
"time",
@ -5293,7 +5200,7 @@ dependencies = [
"ordered-float 2.10.0",
"picker",
"postage",
"settings2",
"settings",
"smol",
"text2",
"theme2",
@ -5513,10 +5420,10 @@ dependencies = [
"editor",
"env_logger",
"gpui2",
"menu2",
"menu",
"parking_lot 0.11.2",
"serde_json",
"settings2",
"settings",
"theme2",
"ui2",
"util",
@ -5832,13 +5739,13 @@ dependencies = [
"serde",
"serde_derive",
"serde_json",
"settings2",
"settings",
"sha2 0.10.7",
"similar",
"smol",
"sum_tree",
"tempdir",
"terminal2",
"terminal",
"text2",
"thiserror",
"toml 0.5.11",
@ -5858,7 +5765,7 @@ dependencies = [
"futures 0.3.28",
"gpui2",
"language",
"menu2",
"menu",
"postage",
"pretty_assertions",
"project",
@ -5867,7 +5774,7 @@ dependencies = [
"serde",
"serde_derive",
"serde_json",
"settings2",
"settings",
"smallvec",
"theme2",
"ui2",
@ -5891,7 +5798,7 @@ dependencies = [
"picker",
"postage",
"project",
"settings2",
"settings",
"smol",
"text2",
"theme2",
@ -6240,7 +6147,7 @@ dependencies = [
"ordered-float 2.10.0",
"picker",
"postage",
"settings2",
"settings",
"smol",
"text2",
"theme2",
@ -6520,20 +6427,6 @@ dependencies = [
[[package]]
name = "rope"
version = "0.1.0"
dependencies = [
"arrayvec 0.7.4",
"bromberg_sl2",
"gpui",
"log",
"rand 0.8.5",
"smallvec",
"sum_tree",
"util",
]
[[package]]
name = "rope2"
version = "0.1.0"
dependencies = [
"arrayvec 0.7.4",
"bromberg_sl2",
@ -7030,14 +6923,14 @@ dependencies = [
"gpui2",
"language",
"log",
"menu2",
"menu",
"postage",
"project",
"semantic_index",
"serde",
"serde_derive",
"serde_json",
"settings2",
"settings",
"smallvec",
"smol",
"theme2",
@ -7101,7 +6994,7 @@ dependencies = [
"schemars",
"serde",
"serde_json",
"settings2",
"settings",
"sha1",
"smol",
"tempdir",
@ -7244,35 +7137,6 @@ dependencies = [
[[package]]
name = "settings"
version = "0.1.0"
dependencies = [
"anyhow",
"collections",
"feature_flags",
"fs",
"futures 0.3.28",
"gpui",
"indoc",
"lazy_static",
"postage",
"pretty_assertions",
"rust-embed",
"schemars",
"serde",
"serde_derive",
"serde_json",
"serde_json_lenient",
"smallvec",
"sqlez",
"toml 0.5.11",
"tree-sitter",
"tree-sitter-json 0.19.0",
"unindent",
"util",
]
[[package]]
name = "settings2"
version = "0.1.0"
dependencies = [
"anyhow",
"collections",
@ -7904,11 +7768,11 @@ dependencies = [
"itertools 0.11.0",
"language",
"log",
"menu2",
"menu",
"picker",
"rust-embed",
"serde",
"settings2",
"settings",
"simplelog",
"smallvec",
"story",
@ -8196,35 +8060,6 @@ dependencies = [
[[package]]
name = "terminal"
version = "0.1.0"
dependencies = [
"alacritty_terminal",
"anyhow",
"db",
"dirs 4.0.0",
"futures 0.3.28",
"gpui",
"itertools 0.10.5",
"lazy_static",
"libc",
"mio-extras",
"ordered-float 2.10.0",
"procinfo",
"rand 0.8.5",
"schemars",
"serde",
"serde_derive",
"settings",
"shellexpand",
"smallvec",
"smol",
"theme",
"thiserror",
"util",
]
[[package]]
name = "terminal2"
version = "0.1.0"
dependencies = [
"alacritty_terminal",
"anyhow",
@ -8242,7 +8077,7 @@ dependencies = [
"schemars",
"serde",
"serde_derive",
"settings2",
"settings",
"shellexpand",
"smallvec",
"smol",
@ -8273,11 +8108,11 @@ dependencies = [
"rand 0.8.5",
"serde",
"serde_derive",
"settings2",
"settings",
"shellexpand",
"smallvec",
"smol",
"terminal2",
"terminal",
"theme2",
"thiserror",
"ui2",
@ -8325,7 +8160,7 @@ dependencies = [
"postage",
"rand 0.8.5",
"regex",
"rope2",
"rope",
"smallvec",
"sum_tree",
"util",
@ -8370,7 +8205,7 @@ dependencies = [
"serde",
"serde_derive",
"serde_json",
"settings2",
"settings",
"story",
"toml 0.5.11",
"util",
@ -8413,7 +8248,7 @@ dependencies = [
"parking_lot 0.11.2",
"picker",
"postage",
"settings2",
"settings",
"smol",
"theme2",
"ui2",
@ -9227,10 +9062,10 @@ dependencies = [
"chrono",
"gpui2",
"itertools 0.11.0",
"menu2",
"menu",
"rand 0.8.5",
"serde",
"settings2",
"settings",
"smallvec",
"story",
"strum",
@ -9516,7 +9351,7 @@ dependencies = [
"serde",
"serde_derive",
"serde_json",
"settings2",
"settings",
"theme2",
"tokio",
"ui2",
@ -9929,7 +9764,7 @@ dependencies = [
"project",
"schemars",
"serde",
"settings2",
"settings",
"theme2",
"theme_selector",
"ui2",
@ -10206,9 +10041,9 @@ dependencies = [
"serde",
"serde_derive",
"serde_json",
"settings2",
"settings",
"smallvec",
"terminal2",
"terminal",
"theme2",
"ui2",
"util",
@ -10305,7 +10140,7 @@ dependencies = [
"async-recursion 0.3.2",
"async-tar",
"async-trait",
"audio2",
"audio",
"auto_update",
"backtrace",
"breadcrumbs",
@ -10345,7 +10180,7 @@ dependencies = [
"libc",
"log",
"lsp",
"menu2",
"menu",
"node_runtime",
"notifications",
"num_cpus",
@ -10359,7 +10194,7 @@ dependencies = [
"rand 0.8.5",
"recent_projects",
"regex",
"rope2",
"rope",
"rpc",
"rsa 0.4.0",
"rust-embed",
@ -10369,7 +10204,7 @@ dependencies = [
"serde",
"serde_derive",
"serde_json",
"settings2",
"settings",
"shellexpand",
"simplelog",
"smallvec",

View File

@ -4,7 +4,6 @@ members = [
"crates/ai",
"crates/assistant",
"crates/audio",
"crates/audio2",
"crates/auto_update",
"crates/breadcrumbs",
"crates/call",
@ -13,7 +12,6 @@ members = [
"crates/client",
"crates/clock",
"crates/collab",
"crates/collab2",
"crates/collab_ui",
"crates/collections",
"crates/command_palette",
@ -50,7 +48,6 @@ members = [
"crates/lsp",
"crates/media",
"crates/menu",
"crates/menu2",
"crates/multi_buffer",
"crates/node_runtime",
"crates/notifications",
@ -69,7 +66,6 @@ members = [
"crates/search",
"crates/semantic_index",
"crates/settings",
"crates/settings2",
"crates/snippet",
"crates/sqlez",
"crates/sqlez_macros",
@ -77,7 +73,6 @@ members = [
"crates/storybook2",
"crates/sum_tree",
"crates/terminal",
"crates/terminal2",
"crates/terminal_view",
"crates/text",
"crates/theme",

View File

@ -14,7 +14,7 @@ editor = { path = "../editor" }
language = { path = "../language" }
gpui = { path = "../gpui2", package = "gpui2" }
project = { path = "../project" }
settings = { path = "../settings2", package = "settings2" }
settings = { path = "../settings" }
ui = { path = "../ui2", package = "ui2" }
util = { path = "../util" }
theme = { path = "../theme2", package = "theme2" }

View File

@ -16,12 +16,12 @@ editor = { path = "../editor" }
fs = { path = "../fs" }
gpui = { package = "gpui2", path = "../gpui2" }
language = { path = "../language" }
menu = { package = "menu2", path = "../menu2" }
menu = { path = "../menu" }
multi_buffer = { path = "../multi_buffer" }
project = { path = "../project" }
search = { path = "../search" }
semantic_index = { path = "../semantic_index" }
settings = { package = "settings2", path = "../settings2" }
settings = { path = "../settings" }
theme = { package = "theme2", path = "../theme2" }
ui = { package = "ui2", path = "../ui2" }
util = { path = "../util" }

View File

@ -9,15 +9,14 @@ path = "src/audio.rs"
doctest = false
[dependencies]
gpui = { path = "../gpui" }
gpui = { package = "gpui2", path = "../gpui2" }
collections = { path = "../collections" }
util = { path = "../util" }
rodio ={version = "0.17.1", default-features=false, features = ["wav"]}
log.workspace = true
futures.workspace = true
anyhow.workspace = true
parking_lot.workspace = true
[dev-dependencies]

View File

@ -60,7 +60,7 @@ impl Audio {
return;
}
cx.update_global::<Self, _, _>(|this, cx| {
cx.update_global::<Self, _>(|this, cx| {
let output_handle = this.ensure_output_exists()?;
let source = SoundRegistry::global(cx).get(sound.file()).log_err()?;
output_handle.play_raw(source).log_err()?;
@ -73,7 +73,7 @@ impl Audio {
return;
}
cx.update_global::<Self, _, _>(|this, _| {
cx.update_global::<Self, _>(|this, _| {
this._output_stream.take();
this.output_handle.take();
});

View File

@ -1,24 +0,0 @@
[package]
name = "audio2"
version = "0.1.0"
edition = "2021"
publish = false
[lib]
path = "src/audio2.rs"
doctest = false
[dependencies]
gpui = { package = "gpui2", path = "../gpui2" }
collections = { path = "../collections" }
util = { path = "../util" }
rodio ={version = "0.17.1", default-features=false, features = ["wav"]}
log.workspace = true
futures.workspace = true
anyhow.workspace = true
parking_lot.workspace = true
[dev-dependencies]

View File

@ -1,23 +0,0 @@
[package]
name = "audio"
version = "0.1.0"
edition = "2021"
publish = false
[lib]
path = "src/audio.rs"
doctest = false
[dependencies]
gpui = { path = "../gpui" }
collections = { path = "../collections" }
util = { path = "../util" }
rodio ={version = "0.17.1", default-features=false, features = ["wav"]}
log.workspace = true
anyhow.workspace = true
parking_lot.workspace = true
[dev-dependencies]

View File

@ -1,44 +0,0 @@
use std::{io::Cursor, sync::Arc};
use anyhow::Result;
use collections::HashMap;
use gpui::{AppContext, AssetSource};
use rodio::{
source::{Buffered, SamplesConverter},
Decoder, Source,
};
type Sound = Buffered<SamplesConverter<Decoder<Cursor<Vec<u8>>>, f32>>;
pub struct SoundRegistry {
cache: Arc<parking_lot::Mutex<HashMap<String, Sound>>>,
assets: Box<dyn AssetSource>,
}
impl SoundRegistry {
pub fn new(source: impl AssetSource) -> Arc<Self> {
Arc::new(Self {
cache: Default::default(),
assets: Box::new(source),
})
}
pub fn global(cx: &AppContext) -> Arc<Self> {
cx.global::<Arc<Self>>().clone()
}
pub fn get(&self, name: &str) -> Result<impl Source<Item = f32>> {
if let Some(wav) = self.cache.lock().get(name) {
return Ok(wav.clone());
}
let path = format!("sounds/{}.wav", name);
let bytes = self.assets.load(&path)?.into_owned();
let cursor = Cursor::new(bytes);
let source = Decoder::new(cursor)?.convert_samples::<f32>().buffered();
self.cache.lock().insert(name.to_string(), source.clone());
Ok(source)
}
}

View File

@ -1,81 +0,0 @@
use assets::SoundRegistry;
use gpui::{AppContext, AssetSource};
use rodio::{OutputStream, OutputStreamHandle};
use util::ResultExt;
mod assets;
pub fn init(source: impl AssetSource, cx: &mut AppContext) {
cx.set_global(SoundRegistry::new(source));
cx.set_global(Audio::new());
}
pub enum Sound {
Joined,
Leave,
Mute,
Unmute,
StartScreenshare,
StopScreenshare,
}
impl Sound {
fn file(&self) -> &'static str {
match self {
Self::Joined => "joined_call",
Self::Leave => "leave_call",
Self::Mute => "mute",
Self::Unmute => "unmute",
Self::StartScreenshare => "start_screenshare",
Self::StopScreenshare => "stop_screenshare",
}
}
}
pub struct Audio {
_output_stream: Option<OutputStream>,
output_handle: Option<OutputStreamHandle>,
}
impl Audio {
pub fn new() -> Self {
Self {
_output_stream: None,
output_handle: None,
}
}
fn ensure_output_exists(&mut self) -> Option<&OutputStreamHandle> {
if self.output_handle.is_none() {
let (_output_stream, output_handle) = OutputStream::try_default().log_err().unzip();
self.output_handle = output_handle;
self._output_stream = _output_stream;
}
self.output_handle.as_ref()
}
pub fn play_sound(sound: Sound, cx: &mut AppContext) {
if !cx.has_global::<Self>() {
return;
}
cx.update_global::<Self, _, _>(|this, cx| {
let output_handle = this.ensure_output_exists()?;
let source = SoundRegistry::global(cx).get(sound.file()).log_err()?;
output_handle.play_raw(source).log_err()?;
Some(())
});
}
pub fn end_call(cx: &mut AppContext) {
if !cx.has_global::<Self>() {
return;
}
cx.update_global::<Self, _, _>(|this, _| {
this._output_stream.take();
this.output_handle.take();
});
}
}

View File

@ -1,44 +0,0 @@
use std::{io::Cursor, sync::Arc};
use anyhow::Result;
use collections::HashMap;
use gpui::{AppContext, AssetSource};
use rodio::{
source::{Buffered, SamplesConverter},
Decoder, Source,
};
type Sound = Buffered<SamplesConverter<Decoder<Cursor<Vec<u8>>>, f32>>;
pub struct SoundRegistry {
cache: Arc<parking_lot::Mutex<HashMap<String, Sound>>>,
assets: Box<dyn AssetSource>,
}
impl SoundRegistry {
pub fn new(source: impl AssetSource) -> Arc<Self> {
Arc::new(Self {
cache: Default::default(),
assets: Box::new(source),
})
}
pub fn global(cx: &AppContext) -> Arc<Self> {
cx.global::<Arc<Self>>().clone()
}
pub fn get(&self, name: &str) -> Result<impl Source<Item = f32>> {
if let Some(wav) = self.cache.lock().get(name) {
return Ok(wav.clone());
}
let path = format!("sounds/{}.wav", name);
let bytes = self.assets.load(&path)?.into_owned();
let cursor = Cursor::new(bytes);
let source = Decoder::new(cursor)?.convert_samples::<f32>().buffered();
self.cache.lock().insert(name.to_string(), source.clone());
Ok(source)
}
}

View File

@ -1,81 +0,0 @@
use assets::SoundRegistry;
use gpui::{AppContext, AssetSource};
use rodio::{OutputStream, OutputStreamHandle};
use util::ResultExt;
mod assets;
pub fn init(source: impl AssetSource, cx: &mut AppContext) {
cx.set_global(SoundRegistry::new(source));
cx.set_global(Audio::new());
}
pub enum Sound {
Joined,
Leave,
Mute,
Unmute,
StartScreenshare,
StopScreenshare,
}
impl Sound {
fn file(&self) -> &'static str {
match self {
Self::Joined => "joined_call",
Self::Leave => "leave_call",
Self::Mute => "mute",
Self::Unmute => "unmute",
Self::StartScreenshare => "start_screenshare",
Self::StopScreenshare => "stop_screenshare",
}
}
}
pub struct Audio {
_output_stream: Option<OutputStream>,
output_handle: Option<OutputStreamHandle>,
}
impl Audio {
pub fn new() -> Self {
Self {
_output_stream: None,
output_handle: None,
}
}
fn ensure_output_exists(&mut self) -> Option<&OutputStreamHandle> {
if self.output_handle.is_none() {
let (_output_stream, output_handle) = OutputStream::try_default().log_err().unzip();
self.output_handle = output_handle;
self._output_stream = _output_stream;
}
self.output_handle.as_ref()
}
pub fn play_sound(sound: Sound, cx: &mut AppContext) {
if !cx.has_global::<Self>() {
return;
}
cx.update_global::<Self, _>(|this, cx| {
let output_handle = this.ensure_output_exists()?;
let source = SoundRegistry::global(cx).get(sound.file()).log_err()?;
output_handle.play_raw(source).log_err()?;
Some(())
});
}
pub fn end_call(cx: &mut AppContext) {
if !cx.has_global::<Self>() {
return;
}
cx.update_global::<Self, _>(|this, _| {
this._output_stream.take();
this.output_handle.take();
});
}
}

View File

@ -12,9 +12,9 @@ doctest = false
db = { path = "../db" }
client = { path = "../client" }
gpui = { package = "gpui2", path = "../gpui2" }
menu = { package = "menu2", path = "../menu2" }
menu = { path = "../menu" }
project = { path = "../project" }
settings = { package = "settings2", path = "../settings2" }
settings = { path = "../settings" }
theme = { package = "theme2", path = "../theme2" }
workspace = { path = "../workspace" }
util = { path = "../util" }

View File

@ -16,7 +16,7 @@ ui = { package = "ui2", path = "../ui2" }
language = { path = "../language" }
project = { path = "../project" }
search = { path = "../search" }
settings = { package = "settings2", path = "../settings2" }
settings = { path = "../settings" }
theme = { package = "theme2", path = "../theme2" }
workspace = { path = "../workspace" }
outline = { path = "../outline" }

View File

@ -19,7 +19,7 @@ test-support = [
]
[dependencies]
audio = { package = "audio2", path = "../audio2" }
audio = { path = "../audio" }
client = { path = "../client" }
collections = { path = "../collections" }
gpui = { package = "gpui2", path = "../gpui2" }
@ -29,7 +29,7 @@ fs = { path = "../fs" }
language = { path = "../language" }
media = { path = "../media" }
project = { path = "../project" }
settings = { package = "settings2", path = "../settings2" }
settings = { path = "../settings" }
util = { path = "../util" }
anyhow.workspace = true

View File

@ -20,7 +20,7 @@ util = { path = "../util" }
rpc = { path = "../rpc" }
text = { package = "text2", path = "../text2" }
language = { path = "../language" }
settings = { package = "settings2", path = "../settings2" }
settings = { path = "../settings" }
feature_flags = { path = "../feature_flags" }
sum_tree = { path = "../sum_tree" }
clock = { path = "../clock" }
@ -50,5 +50,5 @@ collections = { path = "../collections", features = ["test-support"] }
gpui = { package = "gpui2", path = "../gpui2", features = ["test-support"] }
rpc = { path = "../rpc", features = ["test-support"] }
client = { path = "../client", features = ["test-support"] }
settings = { package = "settings2", path = "../settings2", features = ["test-support"] }
settings = { path = "../settings", features = ["test-support"] }
util = { path = "../util", features = ["test-support"] }

View File

@ -19,7 +19,7 @@ gpui = { package = "gpui2", path = "../gpui2" }
util = { path = "../util" }
rpc = { path = "../rpc" }
text = { package = "text2", path = "../text2" }
settings = { package = "settings2", path = "../settings2" }
settings = { path = "../settings" }
feature_flags = { path = "../feature_flags" }
sum_tree = { path = "../sum_tree" }
@ -49,5 +49,5 @@ url = "2.2"
collections = { path = "../collections", features = ["test-support"] }
gpui = { package = "gpui2", path = "../gpui2", features = ["test-support"] }
rpc = { path = "../rpc", features = ["test-support"] }
settings = { package = "settings2", path = "../settings2", features = ["test-support"] }
settings = { path = "../settings", features = ["test-support"] }
util = { path = "../util", features = ["test-support"] }

View File

@ -3,7 +3,7 @@ authors = ["Nathan Sobo <nathan@zed.dev>"]
default-run = "collab"
edition = "2021"
name = "collab"
version = "0.32.0"
version = "0.28.0"
publish = false
[[bin]]
@ -17,7 +17,7 @@ required-features = ["seed-support"]
clock = { path = "../clock" }
collections = { path = "../collections" }
live_kit_server = { path = "../live_kit_server" }
text = { path = "../text" }
text = { package = "text2", path = "../text2" }
rpc = { path = "../rpc" }
util = { path = "../util" }
@ -62,7 +62,7 @@ uuid.workspace = true
[dev-dependencies]
audio = { path = "../audio" }
collections = { path = "../collections", features = ["test-support"] }
gpui = { path = "../gpui", features = ["test-support"] }
gpui = { package = "gpui2", path = "../gpui2", features = ["test-support"] }
call = { path = "../call", features = ["test-support"] }
client = { path = "../client", features = ["test-support"] }
channel = { path = "../channel" }
@ -70,15 +70,17 @@ editor = { path = "../editor", features = ["test-support"] }
language = { path = "../language", features = ["test-support"] }
fs = { path = "../fs", features = ["test-support"] }
git = { path = "../git", features = ["test-support"] }
live_kit_client = { path = "../live_kit_client", features = ["test-support"] }
live_kit_client = { package = "live_kit_client2", path = "../live_kit_client2", features = ["test-support"] }
lsp = { path = "../lsp", features = ["test-support"] }
node_runtime = { path = "../node_runtime" }
notifications = { path = "../notifications", features = ["test-support"] }
project = { path = "../project", features = ["test-support"] }
rpc = { path = "../rpc", features = ["test-support"] }
settings = { path = "../settings", features = ["test-support"] }
theme = { path = "../theme" }
theme = { package = "theme2", path = "../theme2" }
workspace = { path = "../workspace", features = ["test-support"] }
collab_ui = { path = "../collab_ui", features = ["test-support"] }
async-trait.workspace = true

View File

@ -1 +0,0 @@
../collab2/k8s

View File

@ -5,7 +5,7 @@ mod feature_flag_tests;
mod message_tests;
use super::*;
use gpui::executor::Background;
use gpui::BackgroundExecutor;
use parking_lot::Mutex;
use sea_orm::ConnectionTrait;
use sqlx::migrate::MigrateDatabase;
@ -22,7 +22,7 @@ pub struct TestDb {
}
impl TestDb {
pub fn sqlite(background: Arc<Background>) -> Self {
pub fn sqlite(background: BackgroundExecutor) -> Self {
let url = format!("sqlite::memory:");
let runtime = tokio::runtime::Builder::new_current_thread()
.enable_io()
@ -59,7 +59,7 @@ impl TestDb {
}
}
pub fn postgres(background: Arc<Background>) -> Self {
pub fn postgres(background: BackgroundExecutor) -> Self {
static LOCK: Mutex<()> = Mutex::new(());
let _guard = LOCK.lock();
@ -108,17 +108,14 @@ impl TestDb {
macro_rules! test_both_dbs {
($test_name:ident, $postgres_test_name:ident, $sqlite_test_name:ident) => {
#[gpui::test]
async fn $postgres_test_name() {
let test_db = crate::db::TestDb::postgres(
gpui::executor::Deterministic::new(0).build_background(),
);
async fn $postgres_test_name(cx: &mut gpui::TestAppContext) {
let test_db = crate::db::TestDb::postgres(cx.executor().clone());
$test_name(test_db.db()).await;
}
#[gpui::test]
async fn $sqlite_test_name() {
let test_db =
crate::db::TestDb::sqlite(gpui::executor::Deterministic::new(0).build_background());
async fn $sqlite_test_name(cx: &mut gpui::TestAppContext) {
let test_db = crate::db::TestDb::sqlite(cx.executor().clone());
$test_name(test_db.db()).await;
}
};

View File

@ -420,8 +420,6 @@ async fn test_db_channel_moving_bugs(db: &Arc<Database>) {
.await
.unwrap();
// Dag is: zed - projects - livestreaming
// Move to same parent should be a no-op
assert!(db
.move_channel(projects_id, Some(zed_id), user_id)

View File

@ -1,6 +1,6 @@
use super::*;
use crate::test_both_dbs;
use gpui::executor::{Background, Deterministic};
use gpui::TestAppContext;
use pretty_assertions::{assert_eq, assert_ne};
use std::sync::Arc;
use tests::TestDb;
@ -509,8 +509,8 @@ fn test_fuzzy_like_string() {
}
#[gpui::test]
async fn test_fuzzy_search_users() {
let test_db = TestDb::postgres(build_background_executor());
async fn test_fuzzy_search_users(cx: &mut TestAppContext) {
let test_db = TestDb::postgres(cx.executor());
let db = test_db.db();
for (i, github_login) in [
"California",
@ -631,7 +631,3 @@ async fn test_non_matching_release_channels(db: &Arc<Database>) {
assert!(result.is_ok())
}
fn build_background_executor() -> Arc<Background> {
Deterministic::new(0).build_background()
}

View File

@ -1,10 +1,13 @@
use std::{future::Future, time::Duration};
#[cfg(test)]
use gpui::BackgroundExecutor;
#[derive(Clone)]
pub enum Executor {
Production,
#[cfg(test)]
Deterministic(std::sync::Arc<gpui::executor::Background>),
Deterministic(BackgroundExecutor),
}
impl Executor {
@ -33,12 +36,4 @@ impl Executor {
}
}
}
pub fn record_backtrace(&self) {
match self {
Executor::Production => {}
#[cfg(test)]
Executor::Deterministic(background) => background.record_backtrace(),
}
}
}

View File

@ -110,7 +110,7 @@ struct Session {
peer: Arc<Peer>,
connection_pool: Arc<parking_lot::Mutex<ConnectionPool>>,
live_kit_client: Option<Arc<dyn live_kit_server::api::Client>>,
executor: Executor,
_executor: Executor,
}
impl Session {
@ -612,7 +612,7 @@ impl Server {
peer: this.peer.clone(),
connection_pool: this.connection_pool.clone(),
live_kit_client: this.app_state.live_kit_client.clone(),
executor: executor.clone(),
_executor: executor.clone()
};
update_user_contacts(user_id, &session).await?;
@ -1723,7 +1723,6 @@ async fn update_language_server(
request: proto::UpdateLanguageServer,
session: Session,
) -> Result<()> {
session.executor.record_backtrace();
let project_id = ProjectId::from_proto(request.project_id);
let project_connection_ids = session
.db()
@ -1750,7 +1749,6 @@ async fn forward_project_request<T>(
where
T: EntityMessage + RequestMessage,
{
session.executor.record_backtrace();
let project_id = ProjectId::from_proto(request.remote_entity_id());
let host_connection_id = {
let collaborators = session
@ -1778,7 +1776,6 @@ async fn create_buffer_for_peer(
request: proto::CreateBufferForPeer,
session: Session,
) -> Result<()> {
session.executor.record_backtrace();
let peer_id = request.peer_id.ok_or_else(|| anyhow!("invalid peer id"))?;
session
.peer
@ -1791,7 +1788,6 @@ async fn update_buffer(
response: Response<proto::UpdateBuffer>,
session: Session,
) -> Result<()> {
session.executor.record_backtrace();
let project_id = ProjectId::from_proto(request.project_id);
let mut guest_connection_ids;
let mut host_connection_id = None;
@ -1812,7 +1808,6 @@ async fn update_buffer(
}
let host_connection_id = host_connection_id.ok_or_else(|| anyhow!("host not found"))?;
session.executor.record_backtrace();
broadcast(
Some(session.connection_id),
guest_connection_ids,

View File

@ -1,9 +1,10 @@
use call::Room;
use gpui::{ModelHandle, TestAppContext};
use gpui::{Model, TestAppContext};
mod channel_buffer_tests;
mod channel_message_tests;
mod channel_tests;
mod editor_tests;
mod following_tests;
mod integration_tests;
mod notification_tests;
@ -23,7 +24,7 @@ struct RoomParticipants {
pending: Vec<String>,
}
fn room_participants(room: &ModelHandle<Room>, cx: &mut TestAppContext) -> RoomParticipants {
fn room_participants(room: &Model<Room>, cx: &mut TestAppContext) -> RoomParticipants {
room.read_with(cx, |room, _| {
let mut remote = room
.remote_participants()
@ -41,6 +42,6 @@ fn room_participants(room: &ModelHandle<Room>, cx: &mut TestAppContext) -> RoomP
})
}
fn channel_id(room: &ModelHandle<Room>, cx: &mut TestAppContext) -> Option<u64> {
fn channel_id(room: &Model<Room>, cx: &mut TestAppContext) -> Option<u64> {
cx.read(|cx| room.read(cx).channel_id())
}

View File

@ -4,25 +4,23 @@ use crate::{
};
use call::ActiveCall;
use channel::ACKNOWLEDGE_DEBOUNCE_INTERVAL;
use client::ParticipantIndex;
use client::{Collaborator, UserId};
use client::{Collaborator, ParticipantIndex, UserId};
use collab_ui::channel_view::ChannelView;
use collections::HashMap;
use editor::{Anchor, Editor, ToOffset};
use futures::future;
use gpui::{executor::Deterministic, ModelHandle, TestAppContext, ViewContext};
use gpui::{BackgroundExecutor, Model, TestAppContext, ViewContext};
use rpc::{proto::PeerId, RECEIVE_TIMEOUT};
use serde_json::json;
use std::{ops::Range, sync::Arc};
use std::ops::Range;
#[gpui::test]
async fn test_core_channel_buffers(
deterministic: Arc<Deterministic>,
executor: BackgroundExecutor,
cx_a: &mut TestAppContext,
cx_b: &mut TestAppContext,
) {
deterministic.forbid_parking();
let mut server = TestServer::start(&deterministic).await;
let mut server = TestServer::start(executor.clone()).await;
let client_a = server.create_client(cx_a, "user_a").await;
let client_b = server.create_client(cx_b, "user_b").await;
@ -50,7 +48,7 @@ async fn test_core_channel_buffers(
});
buffer_a.update(cx_a, |buffer, cx| buffer.undo(cx));
assert_eq!(buffer_text(&buffer_a, cx_a), "hello, cruel world");
deterministic.run_until_parked();
executor.run_until_parked();
// Client B joins the channel buffer
let channel_buffer_b = client_b
@ -77,13 +75,13 @@ async fn test_core_channel_buffers(
});
// Both A and B see the new edit
deterministic.run_until_parked();
executor.run_until_parked();
assert_eq!(buffer_text(&buffer_a, cx_a), "hello, beautiful world");
assert_eq!(buffer_text(&buffer_b, cx_b), "hello, beautiful world");
// Client A closes the channel buffer.
cx_a.update(|_| drop(channel_buffer_a));
deterministic.run_until_parked();
executor.run_until_parked();
// Client B sees that client A is gone from the channel buffer.
channel_buffer_b.read_with(cx_b, |buffer, _| {
@ -96,7 +94,7 @@ async fn test_core_channel_buffers(
.update(cx_a, |store, cx| store.open_channel_buffer(channel_id, cx))
.await
.unwrap();
deterministic.run_until_parked();
executor.run_until_parked();
// Sanity test, make sure we saw A rejoining
channel_buffer_b.read_with(cx_b, |buffer, _| {
@ -109,7 +107,7 @@ async fn test_core_channel_buffers(
// Client A loses connection.
server.forbid_connections();
server.disconnect_client(client_a.peer_id().unwrap());
deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT);
executor.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT);
// Client B observes A disconnect
channel_buffer_b.read_with(cx_b, |buffer, _| {
@ -123,13 +121,12 @@ async fn test_core_channel_buffers(
#[gpui::test]
async fn test_channel_notes_participant_indices(
deterministic: Arc<Deterministic>,
mut cx_a: &mut TestAppContext,
mut cx_b: &mut TestAppContext,
executor: BackgroundExecutor,
cx_a: &mut TestAppContext,
cx_b: &mut TestAppContext,
cx_c: &mut TestAppContext,
) {
deterministic.forbid_parking();
let mut server = TestServer::start(&deterministic).await;
let mut server = TestServer::start(executor.clone()).await;
let client_a = server.create_client(cx_a, "user_a").await;
let client_b = server.create_client(cx_b, "user_b").await;
let client_c = server.create_client(cx_c, "user_c").await;
@ -157,9 +154,10 @@ async fn test_channel_notes_participant_indices(
let (project_a, worktree_id_a) = client_a.build_local_project("/root", cx_a).await;
let project_b = client_b.build_empty_local_project(cx_b);
let project_c = client_c.build_empty_local_project(cx_c);
let workspace_a = client_a.build_workspace(&project_a, cx_a).root(cx_a);
let workspace_b = client_b.build_workspace(&project_b, cx_b).root(cx_b);
let workspace_c = client_c.build_workspace(&project_c, cx_c).root(cx_c);
let (workspace_a, mut cx_a) = client_a.build_workspace(&project_a, cx_a);
let (workspace_b, mut cx_b) = client_b.build_workspace(&project_b, cx_b);
let (workspace_c, cx_c) = client_c.build_workspace(&project_c, cx_c);
// Clients A, B, and C open the channel notes
let channel_view_a = cx_a
@ -184,7 +182,7 @@ async fn test_channel_notes_participant_indices(
});
});
});
deterministic.run_until_parked();
executor.run_until_parked();
channel_view_b.update(cx_b, |notes, cx| {
notes.editor.update(cx, |editor, cx| {
editor.move_down(&Default::default(), cx);
@ -194,7 +192,7 @@ async fn test_channel_notes_participant_indices(
});
});
});
deterministic.run_until_parked();
executor.run_until_parked();
channel_view_c.update(cx_c, |notes, cx| {
notes.editor.update(cx, |editor, cx| {
editor.move_down(&Default::default(), cx);
@ -207,7 +205,7 @@ async fn test_channel_notes_participant_indices(
// Client A sees clients B and C without assigned colors, because they aren't
// in a call together.
deterministic.run_until_parked();
executor.run_until_parked();
channel_view_a.update(cx_a, |notes, cx| {
notes.editor.update(cx, |editor, cx| {
assert_remote_selections(editor, &[(None, 1..2), (None, 2..3)], cx);
@ -223,7 +221,7 @@ async fn test_channel_notes_participant_indices(
// Clients A and B see each other with two different assigned colors. Client C
// still doesn't have a color.
deterministic.run_until_parked();
executor.run_until_parked();
channel_view_a.update(cx_a, |notes, cx| {
notes.editor.update(cx, |editor, cx| {
assert_remote_selections(
@ -249,7 +247,7 @@ async fn test_channel_notes_participant_indices(
.await
.unwrap();
let project_b = client_b.build_remote_project(project_id, cx_b).await;
let workspace_b = client_b.build_workspace(&project_b, cx_b).root(cx_b);
let (workspace_b, cx_b) = client_b.build_workspace(&project_b, cx_b);
// Clients A and B open the same file.
let editor_a = workspace_a
@ -279,7 +277,7 @@ async fn test_channel_notes_participant_indices(
selections.select_ranges(vec![2..3]);
});
});
deterministic.run_until_parked();
executor.run_until_parked();
// Clients A and B see each other with the same colors as in the channel notes.
editor_a.update(cx_a, |editor, cx| {
@ -314,11 +312,10 @@ fn assert_remote_selections(
#[gpui::test]
async fn test_multiple_handles_to_channel_buffer(
deterministic: Arc<Deterministic>,
deterministic: BackgroundExecutor,
cx_a: &mut TestAppContext,
) {
deterministic.forbid_parking();
let mut server = TestServer::start(&deterministic).await;
let mut server = TestServer::start(deterministic.clone()).await;
let client_a = server.create_client(cx_a, "user_a").await;
let channel_id = server
@ -340,7 +337,7 @@ async fn test_multiple_handles_to_channel_buffer(
future::try_join3(channel_buffer_1, channel_buffer_2, channel_buffer_3)
.await
.unwrap();
let channel_buffer_model_id = channel_buffer.id();
let channel_buffer_model_id = channel_buffer.entity_id();
assert_eq!(channel_buffer, channel_buffer_2);
assert_eq!(channel_buffer, channel_buffer_3);
@ -364,7 +361,7 @@ async fn test_multiple_handles_to_channel_buffer(
.update(cx_a, |store, cx| store.open_channel_buffer(channel_id, cx))
.await
.unwrap();
assert_ne!(channel_buffer.id(), channel_buffer_model_id);
assert_ne!(channel_buffer.entity_id(), channel_buffer_model_id);
channel_buffer.update(cx_a, |buffer, cx| {
buffer.buffer().update(cx, |buffer, _| {
assert_eq!(buffer.text(), "hello");
@ -374,12 +371,11 @@ async fn test_multiple_handles_to_channel_buffer(
#[gpui::test]
async fn test_channel_buffer_disconnect(
deterministic: Arc<Deterministic>,
deterministic: BackgroundExecutor,
cx_a: &mut TestAppContext,
cx_b: &mut TestAppContext,
) {
deterministic.forbid_parking();
let mut server = TestServer::start(&deterministic).await;
let mut server = TestServer::start(deterministic.clone()).await;
let client_a = server.create_client(cx_a, "user_a").await;
let client_b = server.create_client(cx_b, "user_b").await;
@ -397,6 +393,7 @@ async fn test_channel_buffer_disconnect(
.update(cx_a, |store, cx| store.open_channel_buffer(channel_id, cx))
.await
.unwrap();
let channel_buffer_b = client_b
.channel_store()
.update(cx_b, |store, cx| store.open_channel_buffer(channel_id, cx))
@ -437,12 +434,11 @@ async fn test_channel_buffer_disconnect(
#[gpui::test]
async fn test_rejoin_channel_buffer(
deterministic: Arc<Deterministic>,
deterministic: BackgroundExecutor,
cx_a: &mut TestAppContext,
cx_b: &mut TestAppContext,
) {
deterministic.forbid_parking();
let mut server = TestServer::start(&deterministic).await;
let mut server = TestServer::start(deterministic.clone()).await;
let client_a = server.create_client(cx_a, "user_a").await;
let client_b = server.create_client(cx_b, "user_b").await;
@ -518,13 +514,12 @@ async fn test_rejoin_channel_buffer(
#[gpui::test]
async fn test_channel_buffers_and_server_restarts(
deterministic: Arc<Deterministic>,
deterministic: BackgroundExecutor,
cx_a: &mut TestAppContext,
cx_b: &mut TestAppContext,
cx_c: &mut TestAppContext,
) {
deterministic.forbid_parking();
let mut server = TestServer::start(&deterministic).await;
let mut server = TestServer::start(deterministic.clone()).await;
let client_a = server.create_client(cx_a, "user_a").await;
let client_b = server.create_client(cx_b, "user_b").await;
let client_c = server.create_client(cx_c, "user_c").await;
@ -606,13 +601,12 @@ async fn test_channel_buffers_and_server_restarts(
#[gpui::test(iterations = 10)]
async fn test_following_to_channel_notes_without_a_shared_project(
deterministic: Arc<Deterministic>,
deterministic: BackgroundExecutor,
mut cx_a: &mut TestAppContext,
mut cx_b: &mut TestAppContext,
mut cx_c: &mut TestAppContext,
) {
deterministic.forbid_parking();
let mut server = TestServer::start(&deterministic).await;
let mut server = TestServer::start(deterministic.clone()).await;
let client_a = server.create_client(cx_a, "user_a").await;
let client_b = server.create_client(cx_b, "user_b").await;
@ -664,9 +658,9 @@ async fn test_following_to_channel_notes_without_a_shared_project(
let (project_a, _) = client_a.build_local_project("/a", cx_a).await;
let (project_b, _) = client_b.build_local_project("/b", cx_b).await;
let (project_c, _) = client_b.build_local_project("/c", cx_c).await;
let workspace_a = client_a.build_workspace(&project_a, cx_a).root(cx_a);
let workspace_b = client_b.build_workspace(&project_b, cx_b).root(cx_b);
let _workspace_c = client_c.build_workspace(&project_c, cx_c).root(cx_c);
let (workspace_a, cx_a) = client_a.build_workspace(&project_a, cx_a);
let (workspace_b, cx_b) = client_b.build_workspace(&project_b, cx_b);
let (_workspace_c, _cx_c) = client_c.build_workspace(&project_c, cx_c);
active_call_a
.update(cx_a, |call, cx| call.set_location(Some(&project_a), cx))
@ -691,7 +685,9 @@ async fn test_following_to_channel_notes_without_a_shared_project(
// Client B follows client A.
workspace_b
.update(cx_b, |workspace, cx| {
workspace.follow(client_a.peer_id().unwrap(), cx).unwrap()
workspace
.start_following(client_a.peer_id().unwrap(), cx)
.unwrap()
})
.await
.unwrap();
@ -699,7 +695,7 @@ async fn test_following_to_channel_notes_without_a_shared_project(
// Client B is taken to the notes for channel 1, with the same
// text selected as client A.
deterministic.run_until_parked();
let channel_view_1_b = workspace_b.read_with(cx_b, |workspace, cx| {
let channel_view_1_b = workspace_b.update(cx_b, |workspace, cx| {
assert_eq!(
workspace.leader_for_pane(workspace.active_pane()),
Some(client_a.peer_id().unwrap())
@ -710,7 +706,7 @@ async fn test_following_to_channel_notes_without_a_shared_project(
.downcast::<ChannelView>()
.expect("active item is not a channel view")
});
channel_view_1_b.read_with(cx_b, |notes, cx| {
channel_view_1_b.update(cx_b, |notes, cx| {
assert_eq!(notes.channel(cx).unwrap().name, "channel-1");
let editor = notes.editor.read(cx);
assert_eq!(editor.text(cx), "Hello from A.");
@ -718,17 +714,22 @@ async fn test_following_to_channel_notes_without_a_shared_project(
});
// Client A opens the notes for channel 2.
eprintln!("opening -------------------->");
let channel_view_2_a = cx_a
.update(|cx| ChannelView::open(channel_2_id, workspace_a.clone(), cx))
.await
.unwrap();
channel_view_2_a.read_with(cx_a, |notes, cx| {
channel_view_2_a.update(cx_a, |notes, cx| {
assert_eq!(notes.channel(cx).unwrap().name, "channel-2");
});
// Client B is taken to the notes for channel 2.
deterministic.run_until_parked();
let channel_view_2_b = workspace_b.read_with(cx_b, |workspace, cx| {
eprintln!("opening <--------------------");
let channel_view_2_b = workspace_b.update(cx_b, |workspace, cx| {
assert_eq!(
workspace.leader_for_pane(workspace.active_pane()),
Some(client_a.peer_id().unwrap())
@ -739,19 +740,18 @@ async fn test_following_to_channel_notes_without_a_shared_project(
.downcast::<ChannelView>()
.expect("active item is not a channel view")
});
channel_view_2_b.read_with(cx_b, |notes, cx| {
channel_view_2_b.update(cx_b, |notes, cx| {
assert_eq!(notes.channel(cx).unwrap().name, "channel-2");
});
}
#[gpui::test]
async fn test_channel_buffer_changes(
deterministic: Arc<Deterministic>,
deterministic: BackgroundExecutor,
cx_a: &mut TestAppContext,
cx_b: &mut TestAppContext,
) {
deterministic.forbid_parking();
let mut server = TestServer::start(&deterministic).await;
let mut server = TestServer::start(deterministic.clone()).await;
let client_a = server.create_client(cx_a, "user_a").await;
let client_b = server.create_client(cx_b, "user_b").await;
@ -778,7 +778,7 @@ async fn test_channel_buffer_changes(
});
deterministic.run_until_parked();
let has_buffer_changed = cx_b.read(|cx| {
let has_buffer_changed = cx_b.update(|cx| {
client_b
.channel_store()
.read(cx)
@ -789,14 +789,14 @@ async fn test_channel_buffer_changes(
// Opening the buffer should clear the changed flag.
let project_b = client_b.build_empty_local_project(cx_b);
let workspace_b = client_b.build_workspace(&project_b, cx_b).root(cx_b);
let (workspace_b, cx_b) = client_b.build_workspace(&project_b, cx_b);
let channel_view_b = cx_b
.update(|cx| ChannelView::open(channel_id, workspace_b.clone(), cx))
.await
.unwrap();
deterministic.run_until_parked();
let has_buffer_changed = cx_b.read(|cx| {
let has_buffer_changed = cx_b.update(|cx| {
client_b
.channel_store()
.read(cx)
@ -826,7 +826,8 @@ async fn test_channel_buffer_changes(
// Test that the server is tracking things correctly, and we retain our 'not changed'
// state across a disconnect
server.simulate_long_connection_interruption(client_b.peer_id().unwrap(), &deterministic);
server
.simulate_long_connection_interruption(client_b.peer_id().unwrap(), deterministic.clone());
let has_buffer_changed = cx_b.read(|cx| {
client_b
.channel_store()
@ -877,6 +878,6 @@ fn assert_collaborators(collaborators: &HashMap<PeerId, Collaborator>, ids: &[Op
);
}
fn buffer_text(channel_buffer: &ModelHandle<language::Buffer>, cx: &mut TestAppContext) -> String {
fn buffer_text(channel_buffer: &Model<language::Buffer>, cx: &mut TestAppContext) -> String {
channel_buffer.read_with(cx, |buffer, _| buffer.text())
}

View File

@ -1,20 +1,16 @@
use crate::{rpc::RECONNECT_TIMEOUT, tests::TestServer};
use channel::{ChannelChat, ChannelMessageId, MessageParams};
use collab_ui::chat_panel::ChatPanel;
use gpui::{executor::Deterministic, BorrowAppContext, ModelHandle, TestAppContext};
use gpui::{BackgroundExecutor, Model, TestAppContext};
use rpc::Notification;
use std::sync::Arc;
use workspace::dock::Panel;
#[gpui::test]
async fn test_basic_channel_messages(
deterministic: Arc<Deterministic>,
executor: BackgroundExecutor,
mut cx_a: &mut TestAppContext,
mut cx_b: &mut TestAppContext,
mut cx_c: &mut TestAppContext,
) {
deterministic.forbid_parking();
let mut server = TestServer::start(&deterministic).await;
let mut server = TestServer::start(executor.clone()).await;
let client_a = server.create_client(cx_a, "user_a").await;
let client_b = server.create_client(cx_b, "user_b").await;
let client_c = server.create_client(cx_c, "user_c").await;
@ -57,13 +53,13 @@ async fn test_basic_channel_messages(
.await
.unwrap();
deterministic.run_until_parked();
executor.run_until_parked();
channel_chat_b
.update(cx_b, |c, cx| c.send_message("three".into(), cx).unwrap())
.await
.unwrap();
deterministic.run_until_parked();
executor.run_until_parked();
let channel_chat_c = client_c
.channel_store()
@ -117,12 +113,11 @@ async fn test_basic_channel_messages(
#[gpui::test]
async fn test_rejoin_channel_chat(
deterministic: Arc<Deterministic>,
executor: BackgroundExecutor,
cx_a: &mut TestAppContext,
cx_b: &mut TestAppContext,
) {
deterministic.forbid_parking();
let mut server = TestServer::start(&deterministic).await;
let mut server = TestServer::start(executor.clone()).await;
let client_a = server.create_client(cx_a, "user_a").await;
let client_b = server.create_client(cx_b, "user_b").await;
@ -178,7 +173,7 @@ async fn test_rejoin_channel_chat(
// Client A reconnects.
server.allow_connections();
deterministic.advance_clock(RECONNECT_TIMEOUT);
executor.advance_clock(RECONNECT_TIMEOUT);
// Client A fetches the messages that were sent while they were disconnected
// and resends their own messages which failed to send.
@ -189,13 +184,12 @@ async fn test_rejoin_channel_chat(
#[gpui::test]
async fn test_remove_channel_message(
deterministic: Arc<Deterministic>,
executor: BackgroundExecutor,
cx_a: &mut TestAppContext,
cx_b: &mut TestAppContext,
cx_c: &mut TestAppContext,
) {
deterministic.forbid_parking();
let mut server = TestServer::start(&deterministic).await;
let mut server = TestServer::start(executor.clone()).await;
let client_a = server.create_client(cx_a, "user_a").await;
let client_b = server.create_client(cx_b, "user_b").await;
let client_c = server.create_client(cx_c, "user_c").await;
@ -235,7 +229,7 @@ async fn test_remove_channel_message(
.unwrap();
// Clients A and B see all of the messages.
deterministic.run_until_parked();
executor.run_until_parked();
let expected_messages = &["one", "two", "three"];
assert_messages(&channel_chat_a, expected_messages, cx_a);
assert_messages(&channel_chat_b, expected_messages, cx_b);
@ -252,7 +246,7 @@ async fn test_remove_channel_message(
.unwrap();
// Client B sees that the message is gone.
deterministic.run_until_parked();
executor.run_until_parked();
let expected_messages = &["one", "three"];
assert_messages(&channel_chat_a, expected_messages, cx_a);
assert_messages(&channel_chat_b, expected_messages, cx_b);
@ -267,146 +261,148 @@ async fn test_remove_channel_message(
}
#[track_caller]
fn assert_messages(chat: &ModelHandle<ChannelChat>, messages: &[&str], cx: &mut TestAppContext) {
fn assert_messages(chat: &Model<ChannelChat>, messages: &[&str], cx: &mut TestAppContext) {
// todo!(don't directly borrow here)
assert_eq!(
chat.read_with(cx, |chat, _| chat
.messages()
.iter()
.map(|m| m.body.clone())
.collect::<Vec<_>>(),),
chat.read_with(cx, |chat, _| {
chat.messages()
.iter()
.map(|m| m.body.clone())
.collect::<Vec<_>>()
}),
messages
);
}
#[gpui::test]
async fn test_channel_message_changes(
deterministic: Arc<Deterministic>,
cx_a: &mut TestAppContext,
cx_b: &mut TestAppContext,
) {
deterministic.forbid_parking();
let mut server = TestServer::start(&deterministic).await;
let client_a = server.create_client(cx_a, "user_a").await;
let client_b = server.create_client(cx_b, "user_b").await;
//todo!(collab_ui)
// #[gpui::test]
// async fn test_channel_message_changes(
// executor: BackgroundExecutor,
// cx_a: &mut TestAppContext,
// cx_b: &mut TestAppContext,
// ) {
// let mut server = TestServer::start(&executor).await;
// let client_a = server.create_client(cx_a, "user_a").await;
// let client_b = server.create_client(cx_b, "user_b").await;
let channel_id = server
.make_channel(
"the-channel",
None,
(&client_a, cx_a),
&mut [(&client_b, cx_b)],
)
.await;
// let channel_id = server
// .make_channel(
// "the-channel",
// None,
// (&client_a, cx_a),
// &mut [(&client_b, cx_b)],
// )
// .await;
// Client A sends a message, client B should see that there is a new message.
let channel_chat_a = client_a
.channel_store()
.update(cx_a, |store, cx| store.open_channel_chat(channel_id, cx))
.await
.unwrap();
// // Client A sends a message, client B should see that there is a new message.
// let channel_chat_a = client_a
// .channel_store()
// .update(cx_a, |store, cx| store.open_channel_chat(channel_id, cx))
// .await
// .unwrap();
channel_chat_a
.update(cx_a, |c, cx| c.send_message("one".into(), cx).unwrap())
.await
.unwrap();
// channel_chat_a
// .update(cx_a, |c, cx| c.send_message("one".into(), cx).unwrap())
// .await
// .unwrap();
deterministic.run_until_parked();
// executor.run_until_parked();
let b_has_messages = cx_b.read_with(|cx| {
client_b
.channel_store()
.read(cx)
.has_new_messages(channel_id)
.unwrap()
});
// let b_has_messages = cx_b.read_with(|cx| {
// client_b
// .channel_store()
// .read(cx)
// .has_new_messages(channel_id)
// .unwrap()
// });
assert!(b_has_messages);
// assert!(b_has_messages);
// Opening the chat should clear the changed flag.
cx_b.update(|cx| {
collab_ui::init(&client_b.app_state, cx);
});
let project_b = client_b.build_empty_local_project(cx_b);
let workspace_b = client_b.build_workspace(&project_b, cx_b).root(cx_b);
let chat_panel_b = workspace_b.update(cx_b, |workspace, cx| ChatPanel::new(workspace, cx));
chat_panel_b
.update(cx_b, |chat_panel, cx| {
chat_panel.set_active(true, cx);
chat_panel.select_channel(channel_id, None, cx)
})
.await
.unwrap();
// // Opening the chat should clear the changed flag.
// cx_b.update(|cx| {
// collab_ui::init(&client_b.app_state, cx);
// });
// let project_b = client_b.build_empty_local_project(cx_b);
// let workspace_b = client_b.build_workspace(&project_b, cx_b).root(cx_b);
// let chat_panel_b = workspace_b.update(cx_b, |workspace, cx| ChatPanel::new(workspace, cx));
// chat_panel_b
// .update(cx_b, |chat_panel, cx| {
// chat_panel.set_active(true, cx);
// chat_panel.select_channel(channel_id, None, cx)
// })
// .await
// .unwrap();
deterministic.run_until_parked();
// executor.run_until_parked();
let b_has_messages = cx_b.read_with(|cx| {
client_b
.channel_store()
.read(cx)
.has_new_messages(channel_id)
.unwrap()
});
// let b_has_messages = cx_b.read_with(|cx| {
// client_b
// .channel_store()
// .read(cx)
// .has_new_messages(channel_id)
// .unwrap()
// });
assert!(!b_has_messages);
// assert!(!b_has_messages);
// Sending a message while the chat is open should not change the flag.
channel_chat_a
.update(cx_a, |c, cx| c.send_message("two".into(), cx).unwrap())
.await
.unwrap();
// // Sending a message while the chat is open should not change the flag.
// channel_chat_a
// .update(cx_a, |c, cx| c.send_message("two".into(), cx).unwrap())
// .await
// .unwrap();
deterministic.run_until_parked();
// executor.run_until_parked();
let b_has_messages = cx_b.read_with(|cx| {
client_b
.channel_store()
.read(cx)
.has_new_messages(channel_id)
.unwrap()
});
// let b_has_messages = cx_b.read_with(|cx| {
// client_b
// .channel_store()
// .read(cx)
// .has_new_messages(channel_id)
// .unwrap()
// });
assert!(!b_has_messages);
// assert!(!b_has_messages);
// Sending a message while the chat is closed should change the flag.
chat_panel_b.update(cx_b, |chat_panel, cx| {
chat_panel.set_active(false, cx);
});
// // Sending a message while the chat is closed should change the flag.
// chat_panel_b.update(cx_b, |chat_panel, cx| {
// chat_panel.set_active(false, cx);
// });
// Sending a message while the chat is open should not change the flag.
channel_chat_a
.update(cx_a, |c, cx| c.send_message("three".into(), cx).unwrap())
.await
.unwrap();
// // Sending a message while the chat is open should not change the flag.
// channel_chat_a
// .update(cx_a, |c, cx| c.send_message("three".into(), cx).unwrap())
// .await
// .unwrap();
deterministic.run_until_parked();
// executor.run_until_parked();
let b_has_messages = cx_b.read_with(|cx| {
client_b
.channel_store()
.read(cx)
.has_new_messages(channel_id)
.unwrap()
});
// let b_has_messages = cx_b.read_with(|cx| {
// client_b
// .channel_store()
// .read(cx)
// .has_new_messages(channel_id)
// .unwrap()
// });
assert!(b_has_messages);
// assert!(b_has_messages);
// Closing the chat should re-enable change tracking
cx_b.update(|_| drop(chat_panel_b));
// // Closing the chat should re-enable change tracking
// cx_b.update(|_| drop(chat_panel_b));
channel_chat_a
.update(cx_a, |c, cx| c.send_message("four".into(), cx).unwrap())
.await
.unwrap();
// channel_chat_a
// .update(cx_a, |c, cx| c.send_message("four".into(), cx).unwrap())
// .await
// .unwrap();
deterministic.run_until_parked();
// executor.run_until_parked();
let b_has_messages = cx_b.read_with(|cx| {
client_b
.channel_store()
.read(cx)
.has_new_messages(channel_id)
.unwrap()
});
// let b_has_messages = cx_b.read_with(|cx| {
// client_b
// .channel_store()
// .read(cx)
// .has_new_messages(channel_id)
// .unwrap()
// });
assert!(b_has_messages);
}
// assert!(b_has_messages);
// }

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,18 +1,19 @@
use crate::tests::TestServer;
use gpui::{executor::Deterministic, TestAppContext};
use std::sync::Arc;
use gpui::{BackgroundExecutor, TestAppContext};
use notifications::NotificationEvent;
use parking_lot::Mutex;
use rpc::{proto, Notification};
use std::sync::Arc;
use crate::tests::TestServer;
#[gpui::test]
async fn test_notifications(
deterministic: Arc<Deterministic>,
executor: BackgroundExecutor,
cx_a: &mut TestAppContext,
cx_b: &mut TestAppContext,
) {
deterministic.forbid_parking();
let mut server = TestServer::start(&deterministic).await;
let mut server = TestServer::start(executor.clone()).await;
let client_a = server.create_client(cx_a, "user_a").await;
let client_b = server.create_client(cx_b, "user_b").await;
@ -42,7 +43,7 @@ async fn test_notifications(
// Client B receives a contact request notification and responds to the
// request, accepting it.
deterministic.run_until_parked();
executor.run_until_parked();
client_b.notification_store().update(cx_b, |store, cx| {
assert_eq!(store.notification_count(), 1);
assert_eq!(store.unread_notification_count(), 1);
@ -72,7 +73,7 @@ async fn test_notifications(
});
// Client B sees the notification is now read, and that they responded.
deterministic.run_until_parked();
executor.run_until_parked();
client_b.notification_store().read_with(cx_b, |store, _| {
assert_eq!(store.notification_count(), 1);
assert_eq!(store.unread_notification_count(), 0);
@ -127,7 +128,7 @@ async fn test_notifications(
// Client B receives a channel invitation notification and responds to the
// invitation, accepting it.
deterministic.run_until_parked();
executor.run_until_parked();
client_b.notification_store().update(cx_b, |store, cx| {
assert_eq!(store.notification_count(), 2);
assert_eq!(store.unread_notification_count(), 1);
@ -147,7 +148,7 @@ async fn test_notifications(
});
// Client B sees the notification is now read, and that they responded.
deterministic.run_until_parked();
executor.run_until_parked();
client_b.notification_store().read_with(cx_b, |store, _| {
assert_eq!(store.notification_count(), 2);
assert_eq!(store.unread_notification_count(), 0);

View File

@ -3,10 +3,14 @@ use crate::db::ChannelRole;
use super::{run_randomized_test, RandomizedTest, TestClient, TestError, TestServer, UserTestPlan};
use anyhow::Result;
use async_trait::async_trait;
use gpui::{executor::Deterministic, TestAppContext};
use gpui::{BackgroundExecutor, SharedString, TestAppContext};
use rand::prelude::*;
use serde_derive::{Deserialize, Serialize};
use std::{ops::Range, rc::Rc, sync::Arc};
use std::{
ops::{Deref, DerefMut, Range},
rc::Rc,
sync::Arc,
};
use text::Bias;
#[gpui::test(
@ -15,10 +19,10 @@ use text::Bias;
)]
async fn test_random_channel_buffers(
cx: &mut TestAppContext,
deterministic: Arc<Deterministic>,
executor: BackgroundExecutor,
rng: StdRng,
) {
run_randomized_test::<RandomChannelBufferTest>(cx, deterministic, rng).await;
run_randomized_test::<RandomChannelBufferTest>(cx, executor, rng).await;
}
struct RandomChannelBufferTest;
@ -26,13 +30,13 @@ struct RandomChannelBufferTest;
#[derive(Clone, Serialize, Deserialize)]
enum ChannelBufferOperation {
JoinChannelNotes {
channel_name: String,
channel_name: SharedString,
},
LeaveChannelNotes {
channel_name: String,
channel_name: SharedString,
},
EditChannelNotes {
channel_name: String,
channel_name: SharedString,
edits: Vec<(Range<usize>, Arc<str>)>,
},
Noop,
@ -69,11 +73,11 @@ impl RandomizedTest for RandomChannelBufferTest {
cx: &TestAppContext,
) -> ChannelBufferOperation {
let channel_store = client.channel_store().clone();
let channel_buffers = client.channel_buffers();
let mut channel_buffers = client.channel_buffers();
// When signed out, we can't do anything unless a channel buffer is
// already open.
if channel_buffers.is_empty()
if channel_buffers.deref_mut().is_empty()
&& channel_store.read_with(cx, |store, _| store.channel_count() == 0)
{
return ChannelBufferOperation::Noop;
@ -97,7 +101,7 @@ impl RandomizedTest for RandomChannelBufferTest {
}
30..=40 => {
if let Some(buffer) = channel_buffers.iter().choose(rng) {
if let Some(buffer) = channel_buffers.deref().iter().choose(rng) {
let channel_name =
buffer.read_with(cx, |b, cx| b.channel(cx).unwrap().name.clone());
break ChannelBufferOperation::LeaveChannelNotes { channel_name };
@ -105,7 +109,7 @@ impl RandomizedTest for RandomChannelBufferTest {
}
_ => {
if let Some(buffer) = channel_buffers.iter().choose(rng) {
if let Some(buffer) = channel_buffers.deref().iter().choose(rng) {
break buffer.read_with(cx, |b, cx| {
let channel_name = b.channel(cx).unwrap().name.clone();
let edits = b
@ -147,13 +151,13 @@ impl RandomizedTest for RandomChannelBufferTest {
"{}: opening notes for channel {channel_name}",
client.username
);
client.channel_buffers().insert(buffer.await?);
client.channel_buffers().deref_mut().insert(buffer.await?);
}
ChannelBufferOperation::LeaveChannelNotes { channel_name } => {
let buffer = cx.update(|cx| {
let mut left_buffer = Err(TestError::Inapplicable);
client.channel_buffers().retain(|buffer| {
client.channel_buffers().deref_mut().retain(|buffer| {
if buffer.read(cx).channel(cx).unwrap().name == channel_name {
left_buffer = Ok(buffer.clone());
false
@ -179,6 +183,7 @@ impl RandomizedTest for RandomChannelBufferTest {
.read(|cx| {
client
.channel_buffers()
.deref()
.iter()
.find(|buffer| {
buffer.read(cx).channel(cx).unwrap().name == channel_name
@ -215,13 +220,6 @@ impl RandomizedTest for RandomChannelBufferTest {
Ok(())
}
async fn on_client_added(client: &Rc<TestClient>, cx: &mut TestAppContext) {
let channel_store = client.channel_store();
while channel_store.read_with(cx, |store, _| store.channel_count() == 0) {
channel_store.next_notification(cx).await;
}
}
async fn on_quiesce(server: &mut TestServer, clients: &mut [(Rc<TestClient>, TestAppContext)]) {
let channels = server.app_state.db.all_channels().await.unwrap();
@ -229,6 +227,7 @@ impl RandomizedTest for RandomChannelBufferTest {
client_cx.update(|cx| {
client
.channel_buffers()
.deref_mut()
.retain(|b| b.read(cx).is_connected());
});
}
@ -252,6 +251,7 @@ impl RandomizedTest for RandomChannelBufferTest {
client_cx.read(|cx| {
if let Some(channel_buffer) = client
.channel_buffers()
.deref()
.iter()
.find(|b| b.read(cx).channel_id == channel_id.to_proto())
{

View File

@ -1,5 +1,5 @@
use super::{run_randomized_test, RandomizedTest, TestClient, TestError, TestServer, UserTestPlan};
use crate::db::UserId;
use super::{RandomizedTest, TestClient, TestError, TestServer, UserTestPlan};
use crate::{db::UserId, tests::run_randomized_test};
use anyhow::{anyhow, Result};
use async_trait::async_trait;
use call::ActiveCall;
@ -7,7 +7,7 @@ use collections::{BTreeMap, HashMap};
use editor::Bias;
use fs::{repository::GitFileStatus, FakeFs, Fs as _};
use futures::StreamExt;
use gpui::{executor::Deterministic, ModelHandle, TestAppContext};
use gpui::{BackgroundExecutor, Model, TestAppContext};
use language::{range_to_lsp, FakeLspAdapter, Language, LanguageConfig, PointUtf16};
use lsp::FakeLanguageServer;
use pretty_assertions::assert_eq;
@ -18,7 +18,7 @@ use rand::{
};
use serde::{Deserialize, Serialize};
use std::{
ops::Range,
ops::{Deref, Range},
path::{Path, PathBuf},
rc::Rc,
sync::Arc,
@ -31,10 +31,10 @@ use util::ResultExt;
)]
async fn test_random_project_collaboration(
cx: &mut TestAppContext,
deterministic: Arc<Deterministic>,
executor: BackgroundExecutor,
rng: StdRng,
) {
run_randomized_test::<ProjectCollaborationTest>(cx, deterministic, rng).await;
run_randomized_test::<ProjectCollaborationTest>(cx, executor, rng).await;
}
#[derive(Clone, Debug, Serialize, Deserialize)]
@ -295,7 +295,7 @@ impl RandomizedTest for ProjectCollaborationTest {
let is_local = project.read_with(cx, |project, _| project.is_local());
let worktree = project.read_with(cx, |project, cx| {
project
.worktrees(cx)
.worktrees()
.filter(|worktree| {
let worktree = worktree.read(cx);
worktree.is_visible()
@ -417,7 +417,7 @@ impl RandomizedTest for ProjectCollaborationTest {
81.. => {
let worktree = project.read_with(cx, |project, cx| {
project
.worktrees(cx)
.worktrees()
.filter(|worktree| {
let worktree = worktree.read(cx);
worktree.is_visible()
@ -624,7 +624,7 @@ impl RandomizedTest for ProjectCollaborationTest {
room.join_project(
project_id,
client.language_registry().clone(),
FakeFs::new(cx.background().clone()),
FakeFs::new(cx.background_executor().clone()),
cx,
)
}))
@ -782,6 +782,7 @@ impl RandomizedTest for ProjectCollaborationTest {
.map_err(|err| anyhow!("save request failed: {:?}", err))?;
assert!(buffer
.read_with(&cx, |buffer, _| { buffer.saved_version().to_owned() })
.expect("App should not be dropped")
.observed_all(&requested_version));
anyhow::Ok(())
});
@ -817,30 +818,30 @@ impl RandomizedTest for ProjectCollaborationTest {
use futures::{FutureExt as _, TryFutureExt as _};
let offset = buffer.read_with(cx, |b, _| b.clip_offset(offset, Bias::Left));
let request = cx.foreground().spawn(project.update(cx, |project, cx| {
match kind {
LspRequestKind::Rename => project
.prepare_rename(buffer, offset, cx)
.map_ok(|_| ())
.boxed(),
LspRequestKind::Completion => project
.completions(&buffer, offset, cx)
.map_ok(|_| ())
.boxed(),
LspRequestKind::CodeAction => project
.code_actions(&buffer, offset..offset, cx)
.map_ok(|_| ())
.boxed(),
LspRequestKind::Definition => project
.definition(&buffer, offset, cx)
.map_ok(|_| ())
.boxed(),
LspRequestKind::Highlights => project
.document_highlights(&buffer, offset, cx)
.map_ok(|_| ())
.boxed(),
}
}));
let process_lsp_request = project.update(cx, |project, cx| match kind {
LspRequestKind::Rename => project
.prepare_rename(buffer, offset, cx)
.map_ok(|_| ())
.boxed(),
LspRequestKind::Completion => project
.completions(&buffer, offset, cx)
.map_ok(|_| ())
.boxed(),
LspRequestKind::CodeAction => project
.code_actions(&buffer, offset..offset, cx)
.map_ok(|_| ())
.boxed(),
LspRequestKind::Definition => project
.definition(&buffer, offset, cx)
.map_ok(|_| ())
.boxed(),
LspRequestKind::Highlights => project
.document_highlights(&buffer, offset, cx)
.map_ok(|_| ())
.boxed(),
});
let request = cx.foreground_executor().spawn(process_lsp_request);
if detach {
request.detach();
} else {
@ -874,7 +875,7 @@ impl RandomizedTest for ProjectCollaborationTest {
)
});
drop(project);
let search = cx.background().spawn(async move {
let search = cx.executor().spawn(async move {
let mut results = HashMap::default();
while let Some((buffer, ranges)) = search.next().await {
results.entry(buffer).or_insert(ranges);
@ -1075,12 +1076,12 @@ impl RandomizedTest for ProjectCollaborationTest {
fake_server.handle_request::<lsp::request::GotoDefinition, _, _>({
let fs = fs.clone();
move |_, cx| {
let background = cx.background();
let background = cx.background_executor();
let mut rng = background.rng();
let count = rng.gen_range::<usize, _>(1..3);
let files = fs.as_fake().files();
let files = (0..count)
.map(|_| files.choose(&mut *rng).unwrap().clone())
.map(|_| files.choose(&mut rng).unwrap().clone())
.collect::<Vec<_>>();
async move {
log::info!("LSP: Returning definitions in files {:?}", &files);
@ -1100,7 +1101,7 @@ impl RandomizedTest for ProjectCollaborationTest {
fake_server.handle_request::<lsp::request::DocumentHighlightRequest, _, _>(
move |_, cx| {
let mut highlights = Vec::new();
let background = cx.background();
let background = cx.background_executor();
let mut rng = background.rng();
let highlight_count = rng.gen_range(1..=5);
@ -1153,7 +1154,7 @@ impl RandomizedTest for ProjectCollaborationTest {
let host_worktree_snapshots =
host_project.read_with(host_cx, |host_project, cx| {
host_project
.worktrees(cx)
.worktrees()
.map(|worktree| {
let worktree = worktree.read(cx);
(worktree.id(), worktree.snapshot())
@ -1161,7 +1162,7 @@ impl RandomizedTest for ProjectCollaborationTest {
.collect::<BTreeMap<_, _>>()
});
let guest_worktree_snapshots = guest_project
.worktrees(cx)
.worktrees()
.map(|worktree| {
let worktree = worktree.read(cx);
(worktree.id(), worktree.snapshot())
@ -1218,7 +1219,7 @@ impl RandomizedTest for ProjectCollaborationTest {
}
}
for buffer in guest_project.opened_buffers(cx) {
for buffer in guest_project.opened_buffers() {
let buffer = buffer.read(cx);
assert_eq!(
buffer.deferred_ops_len(),
@ -1268,8 +1269,8 @@ impl RandomizedTest for ProjectCollaborationTest {
for guest_buffer in guest_buffers {
let buffer_id =
guest_buffer.read_with(client_cx, |buffer, _| buffer.remote_id());
let host_buffer = host_project.read_with(host_cx, |project, cx| {
project.buffer_for_id(buffer_id, cx).unwrap_or_else(|| {
let host_buffer = host_project.read_with(host_cx, |project, _| {
project.buffer_for_id(buffer_id).unwrap_or_else(|| {
panic!(
"host does not have buffer for guest:{}, peer:{:?}, id:{}",
client.username,
@ -1457,10 +1458,10 @@ fn generate_git_operation(rng: &mut StdRng, client: &TestClient) -> GitOperation
fn buffer_for_full_path(
client: &TestClient,
project: &ModelHandle<Project>,
project: &Model<Project>,
full_path: &PathBuf,
cx: &TestAppContext,
) -> Option<ModelHandle<language::Buffer>> {
) -> Option<Model<language::Buffer>> {
client
.buffers_for_project(project)
.iter()
@ -1476,18 +1477,18 @@ fn project_for_root_name(
client: &TestClient,
root_name: &str,
cx: &TestAppContext,
) -> Option<ModelHandle<Project>> {
if let Some(ix) = project_ix_for_root_name(&*client.local_projects(), root_name, cx) {
) -> Option<Model<Project>> {
if let Some(ix) = project_ix_for_root_name(&*client.local_projects().deref(), root_name, cx) {
return Some(client.local_projects()[ix].clone());
}
if let Some(ix) = project_ix_for_root_name(&*client.remote_projects(), root_name, cx) {
if let Some(ix) = project_ix_for_root_name(&*client.remote_projects().deref(), root_name, cx) {
return Some(client.remote_projects()[ix].clone());
}
None
}
fn project_ix_for_root_name(
projects: &[ModelHandle<Project>],
projects: &[Model<Project>],
root_name: &str,
cx: &TestAppContext,
) -> Option<usize> {
@ -1499,7 +1500,7 @@ fn project_ix_for_root_name(
})
}
fn root_name_for_project(project: &ModelHandle<Project>, cx: &TestAppContext) -> String {
fn root_name_for_project(project: &Model<Project>, cx: &TestAppContext) -> String {
project.read_with(cx, |project, cx| {
project
.visible_worktrees(cx)
@ -1512,7 +1513,7 @@ fn root_name_for_project(project: &ModelHandle<Project>, cx: &TestAppContext) ->
}
fn project_path_for_full_path(
project: &ModelHandle<Project>,
project: &Model<Project>,
full_path: &Path,
cx: &TestAppContext,
) -> Option<ProjectPath> {
@ -1520,7 +1521,7 @@ fn project_path_for_full_path(
let root_name = components.next().unwrap().as_os_str().to_str().unwrap();
let path = components.as_path().into();
let worktree_id = project.read_with(cx, |project, cx| {
project.worktrees(cx).find_map(|worktree| {
project.worktrees().find_map(|worktree| {
let worktree = worktree.read(cx);
if worktree.root_name() == root_name {
Some(worktree.id())
@ -1533,7 +1534,7 @@ fn project_path_for_full_path(
}
async fn ensure_project_shared(
project: &ModelHandle<Project>,
project: &Model<Project>,
client: &TestClient,
cx: &mut TestAppContext,
) {
@ -1566,9 +1567,10 @@ async fn ensure_project_shared(
}
}
fn choose_random_project(client: &TestClient, rng: &mut StdRng) -> Option<ModelHandle<Project>> {
fn choose_random_project(client: &TestClient, rng: &mut StdRng) -> Option<Model<Project>> {
client
.local_projects()
.deref()
.iter()
.chain(client.remote_projects().iter())
.choose(rng)

View File

@ -5,7 +5,7 @@ use crate::{
};
use async_trait::async_trait;
use futures::StreamExt;
use gpui::{executor::Deterministic, Task, TestAppContext};
use gpui::{BackgroundExecutor, Task, TestAppContext};
use parking_lot::Mutex;
use rand::prelude::*;
use rpc::RECEIVE_TIMEOUT;
@ -115,18 +115,17 @@ pub trait RandomizedTest: 'static + Sized {
async fn initialize(server: &mut TestServer, users: &[UserTestPlan]);
async fn on_client_added(client: &Rc<TestClient>, cx: &mut TestAppContext);
async fn on_client_added(_client: &Rc<TestClient>, _cx: &mut TestAppContext) {}
async fn on_quiesce(server: &mut TestServer, client: &mut [(Rc<TestClient>, TestAppContext)]);
}
pub async fn run_randomized_test<T: RandomizedTest>(
cx: &mut TestAppContext,
deterministic: Arc<Deterministic>,
executor: BackgroundExecutor,
rng: StdRng,
) {
deterministic.forbid_parking();
let mut server = TestServer::start(&deterministic).await;
let mut server = TestServer::start(executor.clone()).await;
let plan = TestPlan::<T>::new(&mut server, rng).await;
LAST_PLAN.lock().replace({
@ -144,7 +143,7 @@ pub async fn run_randomized_test<T: RandomizedTest>(
applied.store(true, SeqCst);
let did_apply = TestPlan::apply_server_operation(
plan.clone(),
deterministic.clone(),
executor.clone(),
&mut server,
&mut clients,
&mut client_tasks,
@ -159,14 +158,14 @@ pub async fn run_randomized_test<T: RandomizedTest>(
}
drop(operation_channels);
deterministic.start_waiting();
executor.start_waiting();
futures::future::join_all(client_tasks).await;
deterministic.finish_waiting();
executor.finish_waiting();
deterministic.run_until_parked();
executor.run_until_parked();
T::on_quiesce(&mut server, &mut clients).await;
for (client, mut cx) in clients {
for (client, cx) in clients {
cx.update(|cx| {
let store = cx.remove_global::<SettingsStore>();
cx.clear_globals();
@ -174,7 +173,7 @@ pub async fn run_randomized_test<T: RandomizedTest>(
drop(client);
});
}
deterministic.run_until_parked();
executor.run_until_parked();
if let Some(path) = &*PLAN_SAVE_PATH {
eprintln!("saved test plan to path {:?}", path);
@ -450,7 +449,7 @@ impl<T: RandomizedTest> TestPlan<T> {
async fn apply_server_operation(
plan: Arc<Mutex<Self>>,
deterministic: Arc<Deterministic>,
deterministic: BackgroundExecutor,
server: &mut TestServer,
clients: &mut Vec<(Rc<TestClient>, TestAppContext)>,
client_tasks: &mut Vec<Task<()>>,
@ -471,28 +470,18 @@ impl<T: RandomizedTest> TestPlan<T> {
username = user.username.clone();
};
log::info!("adding new connection for {}", username);
let next_entity_id = (user_id.0 * 10_000) as usize;
let mut client_cx = TestAppContext::new(
cx.foreground_platform(),
cx.platform(),
deterministic.build_foreground(user_id.0 as usize),
deterministic.build_background(),
cx.font_cache(),
cx.leak_detector(),
next_entity_id,
cx.function_name.clone(),
);
let mut client_cx = cx.new_app();
let (operation_tx, operation_rx) = futures::channel::mpsc::unbounded();
let client = Rc::new(server.create_client(&mut client_cx, &username).await);
operation_channels.push(operation_tx);
clients.push((client.clone(), client_cx.clone()));
client_tasks.push(client_cx.foreground().spawn(Self::simulate_client(
plan.clone(),
client,
operation_rx,
client_cx,
)));
let foreground_executor = client_cx.foreground_executor().clone();
let simulate_client =
Self::simulate_client(plan.clone(), client, operation_rx, client_cx);
client_tasks.push(foreground_executor.spawn(simulate_client));
log::info!("added connection for {}", username);
}
@ -514,7 +503,7 @@ impl<T: RandomizedTest> TestPlan<T> {
.collect::<Vec<_>>();
assert_eq!(user_connection_ids.len(), 1);
let removed_peer_id = user_connection_ids[0].into();
let (client, mut client_cx) = clients.remove(client_ix);
let (client, client_cx) = clients.remove(client_ix);
let client_task = client_tasks.remove(client_ix);
operation_channels.remove(client_ix);
server.forbid_connections();
@ -647,7 +636,7 @@ impl<T: RandomizedTest> TestPlan<T> {
log::error!("{} error: {}", client.username, error);
}
}
cx.background().simulate_random_delay().await;
cx.executor().simulate_random_delay().await;
}
log::info!("{}: done", client.username);
}

View File

@ -13,9 +13,10 @@ use client::{
use collections::{HashMap, HashSet};
use fs::FakeFs;
use futures::{channel::oneshot, StreamExt as _};
use gpui::{executor::Deterministic, ModelHandle, Task, TestAppContext, WindowHandle};
use gpui::{BackgroundExecutor, Context, Model, TestAppContext, View, VisualTestContext};
use language::LanguageRegistry;
use node_runtime::FakeNodeRuntime;
use notifications::NotificationStore;
use parking_lot::Mutex;
use project::{Project, WorktreeId};
@ -46,17 +47,17 @@ pub struct TestServer {
pub struct TestClient {
pub username: String,
pub app_state: Arc<workspace::AppState>,
channel_store: ModelHandle<ChannelStore>,
notification_store: ModelHandle<NotificationStore>,
channel_store: Model<ChannelStore>,
notification_store: Model<NotificationStore>,
state: RefCell<TestClientState>,
}
#[derive(Default)]
struct TestClientState {
local_projects: Vec<ModelHandle<Project>>,
remote_projects: Vec<ModelHandle<Project>>,
buffers: HashMap<ModelHandle<Project>, HashSet<ModelHandle<language::Buffer>>>,
channel_buffers: HashSet<ModelHandle<ChannelBuffer>>,
local_projects: Vec<Model<Project>>,
remote_projects: Vec<Model<Project>>,
buffers: HashMap<Model<Project>, HashSet<Model<language::Buffer>>>,
channel_buffers: HashSet<Model<ChannelBuffer>>,
}
pub struct ContactsSummary {
@ -66,22 +67,22 @@ pub struct ContactsSummary {
}
impl TestServer {
pub async fn start(deterministic: &Arc<Deterministic>) -> Self {
pub async fn start(deterministic: BackgroundExecutor) -> Self {
static NEXT_LIVE_KIT_SERVER_ID: AtomicUsize = AtomicUsize::new(0);
let use_postgres = env::var("USE_POSTGRES").ok();
let use_postgres = use_postgres.as_deref();
let test_db = if use_postgres == Some("true") || use_postgres == Some("1") {
TestDb::postgres(deterministic.build_background())
TestDb::postgres(deterministic.clone())
} else {
TestDb::sqlite(deterministic.build_background())
TestDb::sqlite(deterministic.clone())
};
let live_kit_server_id = NEXT_LIVE_KIT_SERVER_ID.fetch_add(1, SeqCst);
let live_kit_server = live_kit_client::TestServer::create(
format!("http://livekit.{}.test", live_kit_server_id),
format!("devkey-{}", live_kit_server_id),
format!("secret-{}", live_kit_server_id),
deterministic.build_background(),
deterministic.clone(),
)
.unwrap();
let app_state = Self::build_app_state(&test_db, &live_kit_server).await;
@ -93,7 +94,7 @@ impl TestServer {
let server = Server::new(
epoch,
app_state.clone(),
Executor::Deterministic(deterministic.build_background()),
Executor::Deterministic(deterministic.clone()),
);
server.start().await.unwrap();
// Advance clock to ensure the server's cleanup task is finished.
@ -124,8 +125,8 @@ impl TestServer {
if cx.has_global::<SettingsStore>() {
panic!("Same cx used to create two test clients")
}
cx.set_global(SettingsStore::test(cx));
let settings = SettingsStore::test(cx);
cx.set_global(settings);
});
let http = FakeHttpClient::with_404_response();
@ -148,7 +149,7 @@ impl TestServer {
.user_id
};
let client_name = name.to_string();
let mut client = cx.read(|cx| Client::new(http.clone(), cx));
let mut client = cx.update(|cx| Client::new(http.clone(), cx));
let server = self.server.clone();
let db = self.app_state.db.clone();
let connection_killers = self.connection_killers.clone();
@ -182,20 +183,20 @@ impl TestServer {
)))
} else {
let (client_conn, server_conn, killed) =
Connection::in_memory(cx.background());
Connection::in_memory(cx.background_executor().clone());
let (connection_id_tx, connection_id_rx) = oneshot::channel();
let user = db
.get_user_by_id(user_id)
.await
.expect("retrieving user failed")
.unwrap();
cx.background()
cx.background_executor()
.spawn(server.handle_connection(
server_conn,
client_name,
user,
Some(connection_id_tx),
Executor::Deterministic(cx.background()),
Executor::Deterministic(cx.background_executor().clone()),
))
.detach();
let connection_id = connection_id_rx.await.unwrap();
@ -207,11 +208,11 @@ impl TestServer {
})
});
let fs = FakeFs::new(cx.background());
let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http, cx));
let workspace_store = cx.add_model(|cx| WorkspaceStore::new(client.clone(), cx));
let fs = FakeFs::new(cx.executor());
let user_store = cx.new_model(|cx| UserStore::new(client.clone(), cx));
let workspace_store = cx.new_model(|cx| WorkspaceStore::new(client.clone(), cx));
let mut language_registry = LanguageRegistry::test();
language_registry.set_executor(cx.background());
language_registry.set_executor(cx.executor());
let app_state = Arc::new(workspace::AppState {
client: client.clone(),
user_store: user_store.clone(),
@ -219,13 +220,11 @@ impl TestServer {
languages: Arc::new(language_registry),
fs: fs.clone(),
build_window_options: |_, _, _| Default::default(),
initialize_workspace: |_, _, _, _| Task::ready(Ok(())),
background_actions: || &[],
node_runtime: FakeNodeRuntime::new(),
});
cx.update(|cx| {
theme::init((), cx);
theme::init(theme::LoadThemes::JustBase, cx);
Project::init(&client, cx);
client::init(&client, cx);
language::init(cx);
@ -264,7 +263,7 @@ impl TestServer {
pub fn simulate_long_connection_interruption(
&self,
peer_id: PeerId,
deterministic: &Arc<Deterministic>,
deterministic: BackgroundExecutor,
) {
self.forbid_connections();
self.disconnect_client(peer_id);
@ -295,7 +294,7 @@ impl TestServer {
})
.await
.unwrap();
cx_a.foreground().run_until_parked();
cx_a.executor().run_until_parked();
client_b
.app_state
.user_store
@ -338,7 +337,7 @@ impl TestServer {
.await
.unwrap();
admin_cx.foreground().run_until_parked();
admin_cx.executor().run_until_parked();
member_cx
.read(ChannelStore::global)
@ -399,7 +398,7 @@ impl TestServer {
.await
.unwrap();
cx_b.foreground().run_until_parked();
cx_b.executor().run_until_parked();
let active_call_b = cx_b.read(ActiveCall::global);
active_call_b
.update(*cx_b, |call, cx| call.accept_incoming(cx))
@ -448,15 +447,15 @@ impl TestClient {
self.app_state.fs.as_fake()
}
pub fn channel_store(&self) -> &ModelHandle<ChannelStore> {
pub fn channel_store(&self) -> &Model<ChannelStore> {
&self.channel_store
}
pub fn notification_store(&self) -> &ModelHandle<NotificationStore> {
pub fn notification_store(&self) -> &Model<NotificationStore> {
&self.notification_store
}
pub fn user_store(&self) -> &ModelHandle<UserStore> {
pub fn user_store(&self) -> &Model<UserStore> {
&self.app_state.user_store
}
@ -491,30 +490,26 @@ impl TestClient {
.await;
}
pub fn local_projects<'a>(&'a self) -> impl Deref<Target = Vec<ModelHandle<Project>>> + 'a {
pub fn local_projects<'a>(&'a self) -> impl Deref<Target = Vec<Model<Project>>> + 'a {
Ref::map(self.state.borrow(), |state| &state.local_projects)
}
pub fn remote_projects<'a>(&'a self) -> impl Deref<Target = Vec<ModelHandle<Project>>> + 'a {
pub fn remote_projects<'a>(&'a self) -> impl Deref<Target = Vec<Model<Project>>> + 'a {
Ref::map(self.state.borrow(), |state| &state.remote_projects)
}
pub fn local_projects_mut<'a>(
&'a self,
) -> impl DerefMut<Target = Vec<ModelHandle<Project>>> + 'a {
pub fn local_projects_mut<'a>(&'a self) -> impl DerefMut<Target = Vec<Model<Project>>> + 'a {
RefMut::map(self.state.borrow_mut(), |state| &mut state.local_projects)
}
pub fn remote_projects_mut<'a>(
&'a self,
) -> impl DerefMut<Target = Vec<ModelHandle<Project>>> + 'a {
pub fn remote_projects_mut<'a>(&'a self) -> impl DerefMut<Target = Vec<Model<Project>>> + 'a {
RefMut::map(self.state.borrow_mut(), |state| &mut state.remote_projects)
}
pub fn buffers_for_project<'a>(
&'a self,
project: &ModelHandle<Project>,
) -> impl DerefMut<Target = HashSet<ModelHandle<language::Buffer>>> + 'a {
project: &Model<Project>,
) -> impl DerefMut<Target = HashSet<Model<language::Buffer>>> + 'a {
RefMut::map(self.state.borrow_mut(), |state| {
state.buffers.entry(project.clone()).or_default()
})
@ -522,14 +517,14 @@ impl TestClient {
pub fn buffers<'a>(
&'a self,
) -> impl DerefMut<Target = HashMap<ModelHandle<Project>, HashSet<ModelHandle<language::Buffer>>>> + 'a
) -> impl DerefMut<Target = HashMap<Model<Project>, HashSet<Model<language::Buffer>>>> + 'a
{
RefMut::map(self.state.borrow_mut(), |state| &mut state.buffers)
}
pub fn channel_buffers<'a>(
&'a self,
) -> impl DerefMut<Target = HashSet<ModelHandle<ChannelBuffer>>> + 'a {
) -> impl DerefMut<Target = HashSet<Model<ChannelBuffer>>> + 'a {
RefMut::map(self.state.borrow_mut(), |state| &mut state.channel_buffers)
}
@ -559,7 +554,7 @@ impl TestClient {
&self,
root_path: impl AsRef<Path>,
cx: &mut TestAppContext,
) -> (ModelHandle<Project>, WorktreeId) {
) -> (Model<Project>, WorktreeId) {
let project = self.build_empty_local_project(cx);
let (worktree, _) = project
.update(cx, |p, cx| {
@ -573,7 +568,7 @@ impl TestClient {
(project, worktree.read_with(cx, |tree, _| tree.id()))
}
pub fn build_empty_local_project(&self, cx: &mut TestAppContext) -> ModelHandle<Project> {
pub fn build_empty_local_project(&self, cx: &mut TestAppContext) -> Model<Project> {
cx.update(|cx| {
Project::local(
self.client().clone(),
@ -590,7 +585,7 @@ impl TestClient {
&self,
host_project_id: u64,
guest_cx: &mut TestAppContext,
) -> ModelHandle<Project> {
) -> Model<Project> {
let active_call = guest_cx.read(ActiveCall::global);
let room = active_call.read_with(guest_cx, |call, _| call.room().unwrap().clone());
room.update(guest_cx, |room, cx| {
@ -605,12 +600,12 @@ impl TestClient {
.unwrap()
}
pub fn build_workspace(
&self,
project: &ModelHandle<Project>,
cx: &mut TestAppContext,
) -> WindowHandle<Workspace> {
cx.add_window(|cx| Workspace::new(0, project.clone(), self.app_state.clone(), cx))
pub fn build_workspace<'a>(
&'a self,
project: &Model<Project>,
cx: &'a mut TestAppContext,
) -> (View<Workspace>, &'a mut VisualTestContext) {
cx.add_window_view(|cx| Workspace::new(0, project.clone(), self.app_state.clone(), cx))
}
}

View File

@ -1,12 +0,0 @@
DATABASE_URL = "postgres://postgres@localhost/zed"
DATABASE_MAX_CONNECTIONS = 5
HTTP_PORT = 8080
API_TOKEN = "secret"
INVITE_LINK_PREFIX = "http://localhost:3000/invites/"
ZED_ENVIRONMENT = "development"
LIVE_KIT_SERVER = "http://localhost:7880"
LIVE_KIT_KEY = "devkey"
LIVE_KIT_SECRET = "secret"
# RUST_LOG=info
# LOG_JSON=true

View File

@ -1,99 +0,0 @@
[package]
authors = ["Nathan Sobo <nathan@zed.dev>"]
default-run = "collab2"
edition = "2021"
name = "collab2"
version = "0.28.0"
publish = false
[[bin]]
name = "collab2"
[[bin]]
name = "seed2"
required-features = ["seed-support"]
[dependencies]
clock = { path = "../clock" }
collections = { path = "../collections" }
live_kit_server = { path = "../live_kit_server" }
text = { package = "text2", path = "../text2" }
rpc = { path = "../rpc" }
util = { path = "../util" }
anyhow.workspace = true
async-tungstenite = "0.16"
axum = { version = "0.5", features = ["json", "headers", "ws"] }
axum-extra = { version = "0.3", features = ["erased-json"] }
base64 = "0.13"
clap = { version = "3.1", features = ["derive"], optional = true }
dashmap = "5.4"
envy = "0.4.2"
futures.workspace = true
hyper = "0.14"
lazy_static.workspace = true
lipsum = { version = "0.8", optional = true }
log.workspace = true
nanoid = "0.4"
parking_lot.workspace = true
prometheus = "0.13"
prost.workspace = true
rand.workspace = true
reqwest = { version = "0.11", features = ["json"], optional = true }
scrypt = "0.7"
smallvec.workspace = true
sea-orm = { version = "0.12.x", features = ["sqlx-postgres", "postgres-array", "runtime-tokio-rustls", "with-uuid"] }
serde.workspace = true
serde_derive.workspace = true
serde_json.workspace = true
sha-1 = "0.9"
sqlx = { version = "0.7", features = ["runtime-tokio-rustls", "postgres", "json", "time", "uuid", "any"] }
time.workspace = true
tokio = { version = "1", features = ["full"] }
tokio-tungstenite = "0.17"
tonic = "0.6"
tower = "0.4"
toml.workspace = true
tracing = "0.1.34"
tracing-log = "0.1.3"
tracing-subscriber = { version = "0.3.11", features = ["env-filter", "json"] }
uuid.workspace = true
[dev-dependencies]
audio = { path = "../audio" }
collections = { path = "../collections", features = ["test-support"] }
gpui = { package = "gpui2", path = "../gpui2", features = ["test-support"] }
call = { path = "../call", features = ["test-support"] }
client = { path = "../client", features = ["test-support"] }
channel = { path = "../channel" }
editor = { path = "../editor", features = ["test-support"] }
language = { path = "../language", features = ["test-support"] }
fs = { path = "../fs", features = ["test-support"] }
git = { path = "../git", features = ["test-support"] }
live_kit_client = { package = "live_kit_client2", path = "../live_kit_client2", features = ["test-support"] }
lsp = { path = "../lsp", features = ["test-support"] }
node_runtime = { path = "../node_runtime" }
notifications = { path = "../notifications", features = ["test-support"] }
project = { path = "../project", features = ["test-support"] }
rpc = { path = "../rpc", features = ["test-support"] }
settings = { package = "settings2", path = "../settings2", features = ["test-support"] }
theme = { package = "theme2", path = "../theme2" }
workspace = { path = "../workspace", features = ["test-support"] }
collab_ui = { path = "../collab_ui", features = ["test-support"] }
async-trait.workspace = true
pretty_assertions.workspace = true
ctor.workspace = true
env_logger.workspace = true
indoc.workspace = true
util = { path = "../util" }
lazy_static.workspace = true
sea-orm = { version = "0.12.x", features = ["sqlx-sqlite"] }
serde_json.workspace = true
sqlx = { version = "0.7", features = ["sqlite"] }
unindent.workspace = true
[features]
seed-support = ["clap", "lipsum", "reqwest"]

View File

@ -1,5 +0,0 @@
# Zed Server
This crate is what we run at https://collab.zed.dev.
It contains our back-end logic for collaboration, to which we connect from the Zed client via a websocket after authenticating via https://zed.dev, which is a separate repo running on Vercel.

View File

@ -1,4 +0,0 @@
db-uri = "postgres://postgres@localhost/zed"
server-port = 8081
jwt-secret = "the-postgrest-jwt-secret-for-authorization"
log-level = "info"

View File

@ -1,12 +0,0 @@
[Interface]
PrivateKey = B5Fp/yVfP0QYlb+YJv9ea+EMI1mWODPD3akh91cVjvc=
Address = fdaa:0:2ce3:a7b:bea:0:a:2/120
DNS = fdaa:0:2ce3::3
[Peer]
PublicKey = RKAYPljEJiuaELNDdQIEJmQienT9+LRISfIHwH45HAw=
AllowedIPs = fdaa:0:2ce3::/48
Endpoint = ord1.gateway.6pn.dev:51820
PersistentKeepalive = 15

View File

@ -1,344 +0,0 @@
CREATE TABLE "users" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"github_login" VARCHAR,
"admin" BOOLEAN,
"email_address" VARCHAR(255) DEFAULT NULL,
"invite_code" VARCHAR(64),
"invite_count" INTEGER NOT NULL DEFAULT 0,
"inviter_id" INTEGER REFERENCES users (id),
"connected_once" BOOLEAN NOT NULL DEFAULT false,
"created_at" TIMESTAMP NOT NULL DEFAULT now,
"metrics_id" TEXT,
"github_user_id" INTEGER
);
CREATE UNIQUE INDEX "index_users_github_login" ON "users" ("github_login");
CREATE UNIQUE INDEX "index_invite_code_users" ON "users" ("invite_code");
CREATE INDEX "index_users_on_email_address" ON "users" ("email_address");
CREATE INDEX "index_users_on_github_user_id" ON "users" ("github_user_id");
CREATE TABLE "access_tokens" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"user_id" INTEGER REFERENCES users (id),
"hash" VARCHAR(128)
);
CREATE INDEX "index_access_tokens_user_id" ON "access_tokens" ("user_id");
CREATE TABLE "contacts" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"user_id_a" INTEGER REFERENCES users (id) NOT NULL,
"user_id_b" INTEGER REFERENCES users (id) NOT NULL,
"a_to_b" BOOLEAN NOT NULL,
"should_notify" BOOLEAN NOT NULL,
"accepted" BOOLEAN NOT NULL
);
CREATE UNIQUE INDEX "index_contacts_user_ids" ON "contacts" ("user_id_a", "user_id_b");
CREATE INDEX "index_contacts_user_id_b" ON "contacts" ("user_id_b");
CREATE TABLE "rooms" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"live_kit_room" VARCHAR NOT NULL,
"enviroment" VARCHAR,
"channel_id" INTEGER REFERENCES channels (id) ON DELETE CASCADE
);
CREATE UNIQUE INDEX "index_rooms_on_channel_id" ON "rooms" ("channel_id");
CREATE TABLE "projects" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"room_id" INTEGER REFERENCES rooms (id) ON DELETE CASCADE NOT NULL,
"host_user_id" INTEGER REFERENCES users (id) NOT NULL,
"host_connection_id" INTEGER,
"host_connection_server_id" INTEGER REFERENCES servers (id) ON DELETE CASCADE,
"unregistered" BOOLEAN NOT NULL DEFAULT FALSE
);
CREATE INDEX "index_projects_on_host_connection_server_id" ON "projects" ("host_connection_server_id");
CREATE INDEX "index_projects_on_host_connection_id_and_host_connection_server_id" ON "projects" ("host_connection_id", "host_connection_server_id");
CREATE TABLE "worktrees" (
"project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
"id" INTEGER NOT NULL,
"root_name" VARCHAR NOT NULL,
"abs_path" VARCHAR NOT NULL,
"visible" BOOL NOT NULL,
"scan_id" INTEGER NOT NULL,
"is_complete" BOOL NOT NULL DEFAULT FALSE,
"completed_scan_id" INTEGER NOT NULL,
PRIMARY KEY(project_id, id)
);
CREATE INDEX "index_worktrees_on_project_id" ON "worktrees" ("project_id");
CREATE TABLE "worktree_entries" (
"project_id" INTEGER NOT NULL,
"worktree_id" INTEGER NOT NULL,
"scan_id" INTEGER NOT NULL,
"id" INTEGER NOT NULL,
"is_dir" BOOL NOT NULL,
"path" VARCHAR NOT NULL,
"inode" INTEGER NOT NULL,
"mtime_seconds" INTEGER NOT NULL,
"mtime_nanos" INTEGER NOT NULL,
"is_symlink" BOOL NOT NULL,
"is_external" BOOL NOT NULL,
"is_ignored" BOOL NOT NULL,
"is_deleted" BOOL NOT NULL,
"git_status" INTEGER,
PRIMARY KEY(project_id, worktree_id, id),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE
);
CREATE INDEX "index_worktree_entries_on_project_id" ON "worktree_entries" ("project_id");
CREATE INDEX "index_worktree_entries_on_project_id_and_worktree_id" ON "worktree_entries" ("project_id", "worktree_id");
CREATE TABLE "worktree_repositories" (
"project_id" INTEGER NOT NULL,
"worktree_id" INTEGER NOT NULL,
"work_directory_id" INTEGER NOT NULL,
"branch" VARCHAR,
"scan_id" INTEGER NOT NULL,
"is_deleted" BOOL NOT NULL,
PRIMARY KEY(project_id, worktree_id, work_directory_id),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE,
FOREIGN KEY(project_id, worktree_id, work_directory_id) REFERENCES worktree_entries (project_id, worktree_id, id) ON DELETE CASCADE
);
CREATE INDEX "index_worktree_repositories_on_project_id" ON "worktree_repositories" ("project_id");
CREATE INDEX "index_worktree_repositories_on_project_id_and_worktree_id" ON "worktree_repositories" ("project_id", "worktree_id");
CREATE TABLE "worktree_settings_files" (
"project_id" INTEGER NOT NULL,
"worktree_id" INTEGER NOT NULL,
"path" VARCHAR NOT NULL,
"content" TEXT,
PRIMARY KEY(project_id, worktree_id, path),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE
);
CREATE INDEX "index_worktree_settings_files_on_project_id" ON "worktree_settings_files" ("project_id");
CREATE INDEX "index_worktree_settings_files_on_project_id_and_worktree_id" ON "worktree_settings_files" ("project_id", "worktree_id");
CREATE TABLE "worktree_diagnostic_summaries" (
"project_id" INTEGER NOT NULL,
"worktree_id" INTEGER NOT NULL,
"path" VARCHAR NOT NULL,
"language_server_id" INTEGER NOT NULL,
"error_count" INTEGER NOT NULL,
"warning_count" INTEGER NOT NULL,
PRIMARY KEY(project_id, worktree_id, path),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE
);
CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id" ON "worktree_diagnostic_summaries" ("project_id");
CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id_and_worktree_id" ON "worktree_diagnostic_summaries" ("project_id", "worktree_id");
CREATE TABLE "language_servers" (
"id" INTEGER NOT NULL,
"project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
"name" VARCHAR NOT NULL,
PRIMARY KEY(project_id, id)
);
CREATE INDEX "index_language_servers_on_project_id" ON "language_servers" ("project_id");
CREATE TABLE "project_collaborators" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
"connection_id" INTEGER NOT NULL,
"connection_server_id" INTEGER NOT NULL REFERENCES servers (id) ON DELETE CASCADE,
"user_id" INTEGER NOT NULL,
"replica_id" INTEGER NOT NULL,
"is_host" BOOLEAN NOT NULL
);
CREATE INDEX "index_project_collaborators_on_project_id" ON "project_collaborators" ("project_id");
CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_and_replica_id" ON "project_collaborators" ("project_id", "replica_id");
CREATE INDEX "index_project_collaborators_on_connection_server_id" ON "project_collaborators" ("connection_server_id");
CREATE INDEX "index_project_collaborators_on_connection_id" ON "project_collaborators" ("connection_id");
CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_connection_id_and_server_id" ON "project_collaborators" ("project_id", "connection_id", "connection_server_id");
CREATE TABLE "room_participants" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"room_id" INTEGER NOT NULL REFERENCES rooms (id),
"user_id" INTEGER NOT NULL REFERENCES users (id),
"answering_connection_id" INTEGER,
"answering_connection_server_id" INTEGER REFERENCES servers (id) ON DELETE CASCADE,
"answering_connection_lost" BOOLEAN NOT NULL,
"location_kind" INTEGER,
"location_project_id" INTEGER,
"initial_project_id" INTEGER,
"calling_user_id" INTEGER NOT NULL REFERENCES users (id),
"calling_connection_id" INTEGER NOT NULL,
"calling_connection_server_id" INTEGER REFERENCES servers (id) ON DELETE SET NULL,
"participant_index" INTEGER
);
CREATE UNIQUE INDEX "index_room_participants_on_user_id" ON "room_participants" ("user_id");
CREATE INDEX "index_room_participants_on_room_id" ON "room_participants" ("room_id");
CREATE INDEX "index_room_participants_on_answering_connection_server_id" ON "room_participants" ("answering_connection_server_id");
CREATE INDEX "index_room_participants_on_calling_connection_server_id" ON "room_participants" ("calling_connection_server_id");
CREATE INDEX "index_room_participants_on_answering_connection_id" ON "room_participants" ("answering_connection_id");
CREATE UNIQUE INDEX "index_room_participants_on_answering_connection_id_and_answering_connection_server_id" ON "room_participants" ("answering_connection_id", "answering_connection_server_id");
CREATE TABLE "servers" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"environment" VARCHAR NOT NULL
);
CREATE TABLE "followers" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"room_id" INTEGER NOT NULL REFERENCES rooms (id) ON DELETE CASCADE,
"project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
"leader_connection_server_id" INTEGER NOT NULL REFERENCES servers (id) ON DELETE CASCADE,
"leader_connection_id" INTEGER NOT NULL,
"follower_connection_server_id" INTEGER NOT NULL REFERENCES servers (id) ON DELETE CASCADE,
"follower_connection_id" INTEGER NOT NULL
);
CREATE UNIQUE INDEX
"index_followers_on_project_id_and_leader_connection_server_id_and_leader_connection_id_and_follower_connection_server_id_and_follower_connection_id"
ON "followers" ("project_id", "leader_connection_server_id", "leader_connection_id", "follower_connection_server_id", "follower_connection_id");
CREATE INDEX "index_followers_on_room_id" ON "followers" ("room_id");
CREATE TABLE "channels" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"name" VARCHAR NOT NULL,
"created_at" TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
"visibility" VARCHAR NOT NULL,
"parent_path" TEXT
);
CREATE INDEX "index_channels_on_parent_path" ON "channels" ("parent_path");
CREATE TABLE IF NOT EXISTS "channel_chat_participants" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"user_id" INTEGER NOT NULL REFERENCES users (id),
"channel_id" INTEGER NOT NULL REFERENCES channels (id) ON DELETE CASCADE,
"connection_id" INTEGER NOT NULL,
"connection_server_id" INTEGER NOT NULL REFERENCES servers (id) ON DELETE CASCADE
);
CREATE INDEX "index_channel_chat_participants_on_channel_id" ON "channel_chat_participants" ("channel_id");
CREATE TABLE IF NOT EXISTS "channel_messages" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"channel_id" INTEGER NOT NULL REFERENCES channels (id) ON DELETE CASCADE,
"sender_id" INTEGER NOT NULL REFERENCES users (id),
"body" TEXT NOT NULL,
"sent_at" TIMESTAMP,
"nonce" BLOB NOT NULL
);
CREATE INDEX "index_channel_messages_on_channel_id" ON "channel_messages" ("channel_id");
CREATE UNIQUE INDEX "index_channel_messages_on_sender_id_nonce" ON "channel_messages" ("sender_id", "nonce");
CREATE TABLE "channel_message_mentions" (
"message_id" INTEGER NOT NULL REFERENCES channel_messages (id) ON DELETE CASCADE,
"start_offset" INTEGER NOT NULL,
"end_offset" INTEGER NOT NULL,
"user_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
PRIMARY KEY(message_id, start_offset)
);
CREATE TABLE "channel_members" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"channel_id" INTEGER NOT NULL REFERENCES channels (id) ON DELETE CASCADE,
"user_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
"admin" BOOLEAN NOT NULL DEFAULT false,
"role" VARCHAR,
"accepted" BOOLEAN NOT NULL DEFAULT false,
"updated_at" TIMESTAMP NOT NULL DEFAULT now
);
CREATE UNIQUE INDEX "index_channel_members_on_channel_id_and_user_id" ON "channel_members" ("channel_id", "user_id");
CREATE TABLE "buffers" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"channel_id" INTEGER NOT NULL REFERENCES channels (id) ON DELETE CASCADE,
"epoch" INTEGER NOT NULL DEFAULT 0
);
CREATE INDEX "index_buffers_on_channel_id" ON "buffers" ("channel_id");
CREATE TABLE "buffer_operations" (
"buffer_id" INTEGER NOT NULL REFERENCES buffers (id) ON DELETE CASCADE,
"epoch" INTEGER NOT NULL,
"replica_id" INTEGER NOT NULL,
"lamport_timestamp" INTEGER NOT NULL,
"value" BLOB NOT NULL,
PRIMARY KEY(buffer_id, epoch, lamport_timestamp, replica_id)
);
CREATE TABLE "buffer_snapshots" (
"buffer_id" INTEGER NOT NULL REFERENCES buffers (id) ON DELETE CASCADE,
"epoch" INTEGER NOT NULL,
"text" TEXT NOT NULL,
"operation_serialization_version" INTEGER NOT NULL,
PRIMARY KEY(buffer_id, epoch)
);
CREATE TABLE "channel_buffer_collaborators" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"channel_id" INTEGER NOT NULL REFERENCES channels (id) ON DELETE CASCADE,
"connection_id" INTEGER NOT NULL,
"connection_server_id" INTEGER NOT NULL REFERENCES servers (id) ON DELETE CASCADE,
"connection_lost" BOOLEAN NOT NULL DEFAULT false,
"user_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
"replica_id" INTEGER NOT NULL
);
CREATE INDEX "index_channel_buffer_collaborators_on_channel_id" ON "channel_buffer_collaborators" ("channel_id");
CREATE UNIQUE INDEX "index_channel_buffer_collaborators_on_channel_id_and_replica_id" ON "channel_buffer_collaborators" ("channel_id", "replica_id");
CREATE INDEX "index_channel_buffer_collaborators_on_connection_server_id" ON "channel_buffer_collaborators" ("connection_server_id");
CREATE INDEX "index_channel_buffer_collaborators_on_connection_id" ON "channel_buffer_collaborators" ("connection_id");
CREATE UNIQUE INDEX "index_channel_buffer_collaborators_on_channel_id_connection_id_and_server_id" ON "channel_buffer_collaborators" ("channel_id", "connection_id", "connection_server_id");
CREATE TABLE "feature_flags" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"flag" TEXT NOT NULL UNIQUE
);
CREATE INDEX "index_feature_flags" ON "feature_flags" ("id");
CREATE TABLE "user_features" (
"user_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
"feature_id" INTEGER NOT NULL REFERENCES feature_flags (id) ON DELETE CASCADE,
PRIMARY KEY (user_id, feature_id)
);
CREATE UNIQUE INDEX "index_user_features_user_id_and_feature_id" ON "user_features" ("user_id", "feature_id");
CREATE INDEX "index_user_features_on_user_id" ON "user_features" ("user_id");
CREATE INDEX "index_user_features_on_feature_id" ON "user_features" ("feature_id");
CREATE TABLE "observed_buffer_edits" (
"user_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
"buffer_id" INTEGER NOT NULL REFERENCES buffers (id) ON DELETE CASCADE,
"epoch" INTEGER NOT NULL,
"lamport_timestamp" INTEGER NOT NULL,
"replica_id" INTEGER NOT NULL,
PRIMARY KEY (user_id, buffer_id)
);
CREATE UNIQUE INDEX "index_observed_buffers_user_and_buffer_id" ON "observed_buffer_edits" ("user_id", "buffer_id");
CREATE TABLE IF NOT EXISTS "observed_channel_messages" (
"user_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
"channel_id" INTEGER NOT NULL REFERENCES channels (id) ON DELETE CASCADE,
"channel_message_id" INTEGER NOT NULL,
PRIMARY KEY (user_id, channel_id)
);
CREATE UNIQUE INDEX "index_observed_channel_messages_user_and_channel_id" ON "observed_channel_messages" ("user_id", "channel_id");
CREATE TABLE "notification_kinds" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"name" VARCHAR NOT NULL
);
CREATE UNIQUE INDEX "index_notification_kinds_on_name" ON "notification_kinds" ("name");
CREATE TABLE "notifications" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"created_at" TIMESTAMP NOT NULL default CURRENT_TIMESTAMP,
"recipient_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
"kind" INTEGER NOT NULL REFERENCES notification_kinds (id),
"entity_id" INTEGER,
"content" TEXT,
"is_read" BOOLEAN NOT NULL DEFAULT FALSE,
"response" BOOLEAN
);
CREATE INDEX
"index_notifications_on_recipient_id_is_read_kind_entity_id"
ON "notifications"
("recipient_id", "is_read", "kind", "entity_id");

View File

@ -1,20 +0,0 @@
CREATE TABLE IF NOT EXISTS "sessions" (
"id" VARCHAR NOT NULL PRIMARY KEY,
"expires" TIMESTAMP WITH TIME ZONE NULL,
"session" TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS "users" (
"id" SERIAL PRIMARY KEY,
"github_login" VARCHAR,
"admin" BOOLEAN
);
CREATE UNIQUE INDEX "index_users_github_login" ON "users" ("github_login");
CREATE TABLE IF NOT EXISTS "signups" (
"id" SERIAL PRIMARY KEY,
"github_login" VARCHAR,
"email_address" VARCHAR,
"about" TEXT
);

View File

@ -1,7 +0,0 @@
CREATE TABLE IF NOT EXISTS "access_tokens" (
"id" SERIAL PRIMARY KEY,
"user_id" INTEGER REFERENCES users (id),
"hash" VARCHAR(128)
);
CREATE INDEX "index_access_tokens_user_id" ON "access_tokens" ("user_id");

View File

@ -1,46 +0,0 @@
CREATE TABLE IF NOT EXISTS "orgs" (
"id" SERIAL PRIMARY KEY,
"name" VARCHAR NOT NULL,
"slug" VARCHAR NOT NULL
);
CREATE UNIQUE INDEX "index_orgs_slug" ON "orgs" ("slug");
CREATE TABLE IF NOT EXISTS "org_memberships" (
"id" SERIAL PRIMARY KEY,
"org_id" INTEGER REFERENCES orgs (id) NOT NULL,
"user_id" INTEGER REFERENCES users (id) NOT NULL,
"admin" BOOLEAN NOT NULL
);
CREATE INDEX "index_org_memberships_user_id" ON "org_memberships" ("user_id");
CREATE UNIQUE INDEX "index_org_memberships_org_id_and_user_id" ON "org_memberships" ("org_id", "user_id");
CREATE TABLE IF NOT EXISTS "channels" (
"id" SERIAL PRIMARY KEY,
"owner_id" INTEGER NOT NULL,
"owner_is_user" BOOLEAN NOT NULL,
"name" VARCHAR NOT NULL
);
CREATE UNIQUE INDEX "index_channels_owner_and_name" ON "channels" ("owner_is_user", "owner_id", "name");
CREATE TABLE IF NOT EXISTS "channel_memberships" (
"id" SERIAL PRIMARY KEY,
"channel_id" INTEGER REFERENCES channels (id) NOT NULL,
"user_id" INTEGER REFERENCES users (id) NOT NULL,
"admin" BOOLEAN NOT NULL
);
CREATE INDEX "index_channel_memberships_user_id" ON "channel_memberships" ("user_id");
CREATE UNIQUE INDEX "index_channel_memberships_channel_id_and_user_id" ON "channel_memberships" ("channel_id", "user_id");
CREATE TABLE IF NOT EXISTS "channel_messages" (
"id" SERIAL PRIMARY KEY,
"channel_id" INTEGER REFERENCES channels (id) NOT NULL,
"sender_id" INTEGER REFERENCES users (id) NOT NULL,
"body" TEXT NOT NULL,
"sent_at" TIMESTAMP
);
CREATE INDEX "index_channel_messages_channel_id" ON "channel_messages" ("channel_id");

View File

@ -1,4 +0,0 @@
ALTER TABLE "channel_messages"
ADD "nonce" UUID NOT NULL DEFAULT gen_random_uuid();
CREATE UNIQUE INDEX "index_channel_messages_nonce" ON "channel_messages" ("nonce");

View File

@ -1,4 +0,0 @@
ALTER TABLE "signups"
ADD "wants_releases" BOOLEAN,
ADD "wants_updates" BOOLEAN,
ADD "wants_community" BOOLEAN;

View File

@ -1 +0,0 @@
DROP TABLE IF EXISTS "signups";

View File

@ -1,2 +0,0 @@
CREATE EXTENSION IF NOT EXISTS pg_trgm;
CREATE INDEX trigram_index_users_on_github_login ON users USING GIN(github_login gin_trgm_ops);

View File

@ -1,11 +0,0 @@
CREATE TABLE IF NOT EXISTS "contacts" (
"id" SERIAL PRIMARY KEY,
"user_id_a" INTEGER REFERENCES users (id) NOT NULL,
"user_id_b" INTEGER REFERENCES users (id) NOT NULL,
"a_to_b" BOOLEAN NOT NULL,
"should_notify" BOOLEAN NOT NULL,
"accepted" BOOLEAN NOT NULL
);
CREATE UNIQUE INDEX "index_contacts_user_ids" ON "contacts" ("user_id_a", "user_id_b");
CREATE INDEX "index_contacts_user_id_b" ON "contacts" ("user_id_b");

View File

@ -1,9 +0,0 @@
ALTER TABLE users
ADD email_address VARCHAR(255) DEFAULT NULL,
ADD invite_code VARCHAR(64),
ADD invite_count INTEGER NOT NULL DEFAULT 0,
ADD inviter_id INTEGER REFERENCES users (id),
ADD connected_once BOOLEAN NOT NULL DEFAULT false,
ADD created_at TIMESTAMP NOT NULL DEFAULT NOW();
CREATE UNIQUE INDEX "index_invite_code_users" ON "users" ("invite_code");

View File

@ -1,6 +0,0 @@
ALTER TABLE contacts DROP CONSTRAINT contacts_user_id_a_fkey;
ALTER TABLE contacts DROP CONSTRAINT contacts_user_id_b_fkey;
ALTER TABLE contacts ADD CONSTRAINT contacts_user_id_a_fkey FOREIGN KEY (user_id_a) REFERENCES users(id) ON DELETE CASCADE;
ALTER TABLE contacts ADD CONSTRAINT contacts_user_id_b_fkey FOREIGN KEY (user_id_b) REFERENCES users(id) ON DELETE CASCADE;
ALTER TABLE users DROP CONSTRAINT users_inviter_id_fkey;
ALTER TABLE users ADD CONSTRAINT users_inviter_id_fkey FOREIGN KEY (inviter_id) REFERENCES users(id) ON DELETE SET NULL;

View File

@ -1,24 +0,0 @@
CREATE TABLE IF NOT EXISTS "projects" (
"id" SERIAL PRIMARY KEY,
"host_user_id" INTEGER REFERENCES users (id) NOT NULL,
"unregistered" BOOLEAN NOT NULL DEFAULT false
);
CREATE TABLE IF NOT EXISTS "worktree_extensions" (
"id" SERIAL PRIMARY KEY,
"project_id" INTEGER REFERENCES projects (id) NOT NULL,
"worktree_id" INTEGER NOT NULL,
"extension" VARCHAR(255),
"count" INTEGER NOT NULL
);
CREATE TABLE IF NOT EXISTS "project_activity_periods" (
"id" SERIAL PRIMARY KEY,
"duration_millis" INTEGER NOT NULL,
"ended_at" TIMESTAMP NOT NULL,
"user_id" INTEGER REFERENCES users (id) NOT NULL,
"project_id" INTEGER REFERENCES projects (id) NOT NULL
);
CREATE INDEX "index_project_activity_periods_on_ended_at" ON "project_activity_periods" ("ended_at");
CREATE UNIQUE INDEX "index_worktree_extensions_on_project_id_and_worktree_id_and_extension" ON "worktree_extensions" ("project_id", "worktree_id", "extension");

View File

@ -1,27 +0,0 @@
CREATE TABLE IF NOT EXISTS "signups" (
"id" SERIAL PRIMARY KEY,
"email_address" VARCHAR NOT NULL,
"email_confirmation_code" VARCHAR(64) NOT NULL,
"email_confirmation_sent" BOOLEAN NOT NULL,
"created_at" TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
"device_id" VARCHAR,
"user_id" INTEGER REFERENCES users (id) ON DELETE CASCADE,
"inviting_user_id" INTEGER REFERENCES users (id) ON DELETE SET NULL,
"platform_mac" BOOLEAN NOT NULL,
"platform_linux" BOOLEAN NOT NULL,
"platform_windows" BOOLEAN NOT NULL,
"platform_unknown" BOOLEAN NOT NULL,
"editor_features" VARCHAR[],
"programming_languages" VARCHAR[]
);
CREATE UNIQUE INDEX "index_signups_on_email_address" ON "signups" ("email_address");
CREATE INDEX "index_signups_on_email_confirmation_sent" ON "signups" ("email_confirmation_sent");
ALTER TABLE "users"
ADD "github_user_id" INTEGER;
CREATE INDEX "index_users_on_email_address" ON "users" ("email_address");
CREATE INDEX "index_users_on_github_user_id" ON "users" ("github_user_id");

View File

@ -1,2 +0,0 @@
ALTER TABLE "users"
ADD "metrics_id" uuid NOT NULL DEFAULT gen_random_uuid();

View File

@ -1,90 +0,0 @@
CREATE TABLE IF NOT EXISTS "rooms" (
"id" SERIAL PRIMARY KEY,
"live_kit_room" VARCHAR NOT NULL
);
ALTER TABLE "projects"
ADD "room_id" INTEGER REFERENCES rooms (id),
ADD "host_connection_id" INTEGER,
ADD "host_connection_epoch" UUID;
CREATE INDEX "index_projects_on_host_connection_epoch" ON "projects" ("host_connection_epoch");
CREATE TABLE "worktrees" (
"project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
"id" INT8 NOT NULL,
"root_name" VARCHAR NOT NULL,
"abs_path" VARCHAR NOT NULL,
"visible" BOOL NOT NULL,
"scan_id" INT8 NOT NULL,
"is_complete" BOOL NOT NULL,
PRIMARY KEY(project_id, id)
);
CREATE INDEX "index_worktrees_on_project_id" ON "worktrees" ("project_id");
CREATE TABLE "worktree_entries" (
"project_id" INTEGER NOT NULL,
"worktree_id" INT8 NOT NULL,
"id" INT8 NOT NULL,
"is_dir" BOOL NOT NULL,
"path" VARCHAR NOT NULL,
"inode" INT8 NOT NULL,
"mtime_seconds" INT8 NOT NULL,
"mtime_nanos" INTEGER NOT NULL,
"is_symlink" BOOL NOT NULL,
"is_ignored" BOOL NOT NULL,
PRIMARY KEY(project_id, worktree_id, id),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE
);
CREATE INDEX "index_worktree_entries_on_project_id" ON "worktree_entries" ("project_id");
CREATE INDEX "index_worktree_entries_on_project_id_and_worktree_id" ON "worktree_entries" ("project_id", "worktree_id");
CREATE TABLE "worktree_diagnostic_summaries" (
"project_id" INTEGER NOT NULL,
"worktree_id" INT8 NOT NULL,
"path" VARCHAR NOT NULL,
"language_server_id" INT8 NOT NULL,
"error_count" INTEGER NOT NULL,
"warning_count" INTEGER NOT NULL,
PRIMARY KEY(project_id, worktree_id, path),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE
);
CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id" ON "worktree_diagnostic_summaries" ("project_id");
CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id_and_worktree_id" ON "worktree_diagnostic_summaries" ("project_id", "worktree_id");
CREATE TABLE "language_servers" (
"project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
"id" INT8 NOT NULL,
"name" VARCHAR NOT NULL,
PRIMARY KEY(project_id, id)
);
CREATE INDEX "index_language_servers_on_project_id" ON "language_servers" ("project_id");
CREATE TABLE "project_collaborators" (
"id" SERIAL PRIMARY KEY,
"project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
"connection_id" INTEGER NOT NULL,
"connection_epoch" UUID NOT NULL,
"user_id" INTEGER NOT NULL,
"replica_id" INTEGER NOT NULL,
"is_host" BOOLEAN NOT NULL
);
CREATE INDEX "index_project_collaborators_on_project_id" ON "project_collaborators" ("project_id");
CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_and_replica_id" ON "project_collaborators" ("project_id", "replica_id");
CREATE INDEX "index_project_collaborators_on_connection_epoch" ON "project_collaborators" ("connection_epoch");
CREATE TABLE "room_participants" (
"id" SERIAL PRIMARY KEY,
"room_id" INTEGER NOT NULL REFERENCES rooms (id),
"user_id" INTEGER NOT NULL REFERENCES users (id),
"answering_connection_id" INTEGER,
"answering_connection_epoch" UUID,
"location_kind" INTEGER,
"location_project_id" INTEGER,
"initial_project_id" INTEGER,
"calling_user_id" INTEGER NOT NULL REFERENCES users (id),
"calling_connection_id" INTEGER NOT NULL,
"calling_connection_epoch" UUID NOT NULL
);
CREATE UNIQUE INDEX "index_room_participants_on_user_id" ON "room_participants" ("user_id");
CREATE INDEX "index_room_participants_on_answering_connection_epoch" ON "room_participants" ("answering_connection_epoch");
CREATE INDEX "index_room_participants_on_calling_connection_epoch" ON "room_participants" ("calling_connection_epoch");

View File

@ -1,2 +0,0 @@
ALTER TABLE "signups"
ADD "added_to_mailing_list" BOOLEAN NOT NULL DEFAULT FALSE;

View File

@ -1,7 +0,0 @@
ALTER TABLE "room_participants"
ADD "answering_connection_lost" BOOLEAN NOT NULL DEFAULT FALSE;
CREATE INDEX "index_project_collaborators_on_connection_id" ON "project_collaborators" ("connection_id");
CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_connection_id_and_epoch" ON "project_collaborators" ("project_id", "connection_id", "connection_epoch");
CREATE INDEX "index_room_participants_on_answering_connection_id" ON "room_participants" ("answering_connection_id");
CREATE UNIQUE INDEX "index_room_participants_on_answering_connection_id_and_answering_connection_epoch" ON "room_participants" ("answering_connection_id", "answering_connection_epoch");

View File

@ -1 +0,0 @@
CREATE INDEX "index_room_participants_on_room_id" ON "room_participants" ("room_id");

View File

@ -1,30 +0,0 @@
CREATE TABLE servers (
id SERIAL PRIMARY KEY,
environment VARCHAR NOT NULL
);
DROP TABLE worktree_extensions;
DROP TABLE project_activity_periods;
DELETE from projects;
ALTER TABLE projects
DROP COLUMN host_connection_epoch,
ADD COLUMN host_connection_server_id INTEGER REFERENCES servers (id) ON DELETE CASCADE;
CREATE INDEX "index_projects_on_host_connection_server_id" ON "projects" ("host_connection_server_id");
CREATE INDEX "index_projects_on_host_connection_id_and_host_connection_server_id" ON "projects" ("host_connection_id", "host_connection_server_id");
DELETE FROM project_collaborators;
ALTER TABLE project_collaborators
DROP COLUMN connection_epoch,
ADD COLUMN connection_server_id INTEGER NOT NULL REFERENCES servers (id) ON DELETE CASCADE;
CREATE INDEX "index_project_collaborators_on_connection_server_id" ON "project_collaborators" ("connection_server_id");
CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_connection_id_and_server_id" ON "project_collaborators" ("project_id", "connection_id", "connection_server_id");
DELETE FROM room_participants;
ALTER TABLE room_participants
DROP COLUMN answering_connection_epoch,
DROP COLUMN calling_connection_epoch,
ADD COLUMN answering_connection_server_id INTEGER REFERENCES servers (id) ON DELETE CASCADE,
ADD COLUMN calling_connection_server_id INTEGER REFERENCES servers (id) ON DELETE SET NULL;
CREATE INDEX "index_room_participants_on_answering_connection_server_id" ON "room_participants" ("answering_connection_server_id");
CREATE INDEX "index_room_participants_on_calling_connection_server_id" ON "room_participants" ("calling_connection_server_id");
CREATE UNIQUE INDEX "index_room_participants_on_answering_connection_id_and_answering_connection_server_id" ON "room_participants" ("answering_connection_id", "answering_connection_server_id");

View File

@ -1,3 +0,0 @@
ALTER TABLE "worktree_entries"
ADD COLUMN "scan_id" INT8,
ADD COLUMN "is_deleted" BOOL;

View File

@ -1,3 +0,0 @@
ALTER TABLE worktrees
ALTER COLUMN is_complete SET DEFAULT FALSE,
ADD COLUMN completed_scan_id INT8;

View File

@ -1,15 +0,0 @@
CREATE TABLE IF NOT EXISTS "followers" (
"id" SERIAL PRIMARY KEY,
"room_id" INTEGER NOT NULL REFERENCES rooms (id) ON DELETE CASCADE,
"project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
"leader_connection_server_id" INTEGER NOT NULL REFERENCES servers (id) ON DELETE CASCADE,
"leader_connection_id" INTEGER NOT NULL,
"follower_connection_server_id" INTEGER NOT NULL REFERENCES servers (id) ON DELETE CASCADE,
"follower_connection_id" INTEGER NOT NULL
);
CREATE UNIQUE INDEX
"index_followers_on_project_id_and_leader_connection_server_id_and_leader_connection_id_and_follower_connection_server_id_and_follower_connection_id"
ON "followers" ("project_id", "leader_connection_server_id", "leader_connection_id", "follower_connection_server_id", "follower_connection_id");
CREATE INDEX "index_followers_on_room_id" ON "followers" ("room_id");

View File

@ -1,13 +0,0 @@
CREATE TABLE "worktree_repositories" (
"project_id" INTEGER NOT NULL,
"worktree_id" INT8 NOT NULL,
"work_directory_id" INT8 NOT NULL,
"scan_id" INT8 NOT NULL,
"branch" VARCHAR,
"is_deleted" BOOL NOT NULL,
PRIMARY KEY(project_id, worktree_id, work_directory_id),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE,
FOREIGN KEY(project_id, worktree_id, work_directory_id) REFERENCES worktree_entries (project_id, worktree_id, id) ON DELETE CASCADE
);
CREATE INDEX "index_worktree_repositories_on_project_id" ON "worktree_repositories" ("project_id");
CREATE INDEX "index_worktree_repositories_on_project_id_and_worktree_id" ON "worktree_repositories" ("project_id", "worktree_id");

View File

@ -1,15 +0,0 @@
CREATE TABLE "worktree_repository_statuses" (
"project_id" INTEGER NOT NULL,
"worktree_id" INT8 NOT NULL,
"work_directory_id" INT8 NOT NULL,
"repo_path" VARCHAR NOT NULL,
"status" INT8 NOT NULL,
"scan_id" INT8 NOT NULL,
"is_deleted" BOOL NOT NULL,
PRIMARY KEY(project_id, worktree_id, work_directory_id, repo_path),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE,
FOREIGN KEY(project_id, worktree_id, work_directory_id) REFERENCES worktree_entries (project_id, worktree_id, id) ON DELETE CASCADE
);
CREATE INDEX "index_wt_repos_statuses_on_project_id" ON "worktree_repository_statuses" ("project_id");
CREATE INDEX "index_wt_repos_statuses_on_project_id_and_wt_id" ON "worktree_repository_statuses" ("project_id", "worktree_id");
CREATE INDEX "index_wt_repos_statuses_on_project_id_and_wt_id_and_wd_id" ON "worktree_repository_statuses" ("project_id", "worktree_id", "work_directory_id");

View File

@ -1,10 +0,0 @@
CREATE TABLE "worktree_settings_files" (
"project_id" INTEGER NOT NULL,
"worktree_id" INT8 NOT NULL,
"path" VARCHAR NOT NULL,
"content" TEXT NOT NULL,
PRIMARY KEY(project_id, worktree_id, path),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE
);
CREATE INDEX "index_settings_files_on_project_id" ON "worktree_settings_files" ("project_id");
CREATE INDEX "index_settings_files_on_project_id_and_wt_id" ON "worktree_settings_files" ("project_id", "worktree_id");

View File

@ -1,2 +0,0 @@
ALTER TABLE "worktree_entries"
ADD "git_status" INT8;

View File

@ -1,2 +0,0 @@
ALTER TABLE "worktree_entries"
ADD "is_external" BOOL NOT NULL DEFAULT FALSE;

View File

@ -1,30 +0,0 @@
DROP TABLE "channel_messages";
DROP TABLE "channel_memberships";
DROP TABLE "org_memberships";
DROP TABLE "orgs";
DROP TABLE "channels";
CREATE TABLE "channels" (
"id" SERIAL PRIMARY KEY,
"name" VARCHAR NOT NULL,
"created_at" TIMESTAMP NOT NULL DEFAULT now()
);
CREATE TABLE "channel_paths" (
"id_path" VARCHAR NOT NULL PRIMARY KEY,
"channel_id" INTEGER NOT NULL REFERENCES channels (id) ON DELETE CASCADE
);
CREATE INDEX "index_channel_paths_on_channel_id" ON "channel_paths" ("channel_id");
CREATE TABLE "channel_members" (
"id" SERIAL PRIMARY KEY,
"channel_id" INTEGER NOT NULL REFERENCES channels (id) ON DELETE CASCADE,
"user_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
"admin" BOOLEAN NOT NULL DEFAULT false,
"accepted" BOOLEAN NOT NULL DEFAULT false,
"updated_at" TIMESTAMP NOT NULL DEFAULT now()
);
CREATE UNIQUE INDEX "index_channel_members_on_channel_id_and_user_id" ON "channel_members" ("channel_id", "user_id");
ALTER TABLE rooms ADD COLUMN "channel_id" INTEGER REFERENCES channels (id) ON DELETE CASCADE;

View File

@ -1,40 +0,0 @@
CREATE TABLE "buffers" (
"id" SERIAL PRIMARY KEY,
"channel_id" INTEGER NOT NULL REFERENCES channels (id) ON DELETE CASCADE,
"epoch" INTEGER NOT NULL DEFAULT 0
);
CREATE INDEX "index_buffers_on_channel_id" ON "buffers" ("channel_id");
CREATE TABLE "buffer_operations" (
"buffer_id" INTEGER NOT NULL REFERENCES buffers (id) ON DELETE CASCADE,
"epoch" INTEGER NOT NULL,
"replica_id" INTEGER NOT NULL,
"lamport_timestamp" INTEGER NOT NULL,
"value" BYTEA NOT NULL,
PRIMARY KEY(buffer_id, epoch, lamport_timestamp, replica_id)
);
CREATE TABLE "buffer_snapshots" (
"buffer_id" INTEGER NOT NULL REFERENCES buffers (id) ON DELETE CASCADE,
"epoch" INTEGER NOT NULL,
"text" TEXT NOT NULL,
"operation_serialization_version" INTEGER NOT NULL,
PRIMARY KEY(buffer_id, epoch)
);
CREATE TABLE "channel_buffer_collaborators" (
"id" SERIAL PRIMARY KEY,
"channel_id" INTEGER NOT NULL REFERENCES channels (id) ON DELETE CASCADE,
"connection_id" INTEGER NOT NULL,
"connection_server_id" INTEGER NOT NULL REFERENCES servers (id) ON DELETE CASCADE,
"connection_lost" BOOLEAN NOT NULL DEFAULT FALSE,
"user_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
"replica_id" INTEGER NOT NULL
);
CREATE INDEX "index_channel_buffer_collaborators_on_channel_id" ON "channel_buffer_collaborators" ("channel_id");
CREATE UNIQUE INDEX "index_channel_buffer_collaborators_on_channel_id_and_replica_id" ON "channel_buffer_collaborators" ("channel_id", "replica_id");
CREATE INDEX "index_channel_buffer_collaborators_on_connection_server_id" ON "channel_buffer_collaborators" ("connection_server_id");
CREATE INDEX "index_channel_buffer_collaborators_on_connection_id" ON "channel_buffer_collaborators" ("connection_id");
CREATE UNIQUE INDEX "index_channel_buffer_collaborators_on_channel_id_connection_id_and_server_id" ON "channel_buffer_collaborators" ("channel_id", "connection_id", "connection_server_id");

View File

@ -1,16 +0,0 @@
CREATE TABLE "feature_flags" (
"id" SERIAL PRIMARY KEY,
"flag" VARCHAR(255) NOT NULL UNIQUE
);
CREATE UNIQUE INDEX "index_feature_flags" ON "feature_flags" ("id");
CREATE TABLE "user_features" (
"user_id" INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
"feature_id" INTEGER NOT NULL REFERENCES feature_flags(id) ON DELETE CASCADE,
PRIMARY KEY (user_id, feature_id)
);
CREATE UNIQUE INDEX "index_user_features_user_id_and_feature_id" ON "user_features" ("user_id", "feature_id");
CREATE INDEX "index_user_features_on_user_id" ON "user_features" ("user_id");
CREATE INDEX "index_user_features_on_feature_id" ON "user_features" ("feature_id");

View File

@ -1,19 +0,0 @@
CREATE TABLE IF NOT EXISTS "channel_messages" (
"id" SERIAL PRIMARY KEY,
"channel_id" INTEGER NOT NULL REFERENCES channels (id) ON DELETE CASCADE,
"sender_id" INTEGER NOT NULL REFERENCES users (id),
"body" TEXT NOT NULL,
"sent_at" TIMESTAMP,
"nonce" UUID NOT NULL
);
CREATE INDEX "index_channel_messages_on_channel_id" ON "channel_messages" ("channel_id");
CREATE UNIQUE INDEX "index_channel_messages_on_nonce" ON "channel_messages" ("nonce");
CREATE TABLE IF NOT EXISTS "channel_chat_participants" (
"id" SERIAL PRIMARY KEY,
"user_id" INTEGER NOT NULL REFERENCES users (id),
"channel_id" INTEGER NOT NULL REFERENCES channels (id) ON DELETE CASCADE,
"connection_id" INTEGER NOT NULL,
"connection_server_id" INTEGER NOT NULL REFERENCES servers (id) ON DELETE CASCADE
);
CREATE INDEX "index_channel_chat_participants_on_channel_id" ON "channel_chat_participants" ("channel_id");

View File

@ -1,19 +0,0 @@
CREATE TABLE IF NOT EXISTS "observed_buffer_edits" (
"user_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
"buffer_id" INTEGER NOT NULL REFERENCES buffers (id) ON DELETE CASCADE,
"epoch" INTEGER NOT NULL,
"lamport_timestamp" INTEGER NOT NULL,
"replica_id" INTEGER NOT NULL,
PRIMARY KEY (user_id, buffer_id)
);
CREATE UNIQUE INDEX "index_observed_buffer_user_and_buffer_id" ON "observed_buffer_edits" ("user_id", "buffer_id");
CREATE TABLE IF NOT EXISTS "observed_channel_messages" (
"user_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
"channel_id" INTEGER NOT NULL REFERENCES channels (id) ON DELETE CASCADE,
"channel_message_id" INTEGER NOT NULL,
PRIMARY KEY (user_id, channel_id)
);
CREATE UNIQUE INDEX "index_observed_channel_messages_user_and_channel_id" ON "observed_channel_messages" ("user_id", "channel_id");

View File

@ -1 +0,0 @@
ALTER TABLE room_participants ADD COLUMN participant_index INTEGER;

View File

@ -1,22 +0,0 @@
CREATE TABLE "notification_kinds" (
"id" SERIAL PRIMARY KEY,
"name" VARCHAR NOT NULL
);
CREATE UNIQUE INDEX "index_notification_kinds_on_name" ON "notification_kinds" ("name");
CREATE TABLE notifications (
"id" SERIAL PRIMARY KEY,
"created_at" TIMESTAMP NOT NULL DEFAULT now(),
"recipient_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
"kind" INTEGER NOT NULL REFERENCES notification_kinds (id),
"entity_id" INTEGER,
"content" TEXT,
"is_read" BOOLEAN NOT NULL DEFAULT FALSE,
"response" BOOLEAN
);
CREATE INDEX
"index_notifications_on_recipient_id_is_read_kind_entity_id"
ON "notifications"
("recipient_id", "is_read", "kind", "entity_id");

View File

@ -1 +0,0 @@
ALTER TABLE rooms ADD COLUMN enviroment TEXT;

View File

@ -1 +0,0 @@
CREATE UNIQUE INDEX "index_rooms_on_channel_id" ON "rooms" ("channel_id");

View File

@ -1,4 +0,0 @@
ALTER TABLE channel_members ADD COLUMN role TEXT;
UPDATE channel_members SET role = CASE WHEN admin THEN 'admin' ELSE 'member' END;
ALTER TABLE channels ADD COLUMN visibility TEXT NOT NULL DEFAULT 'members';

View File

@ -1,8 +0,0 @@
-- Add migration script here
ALTER TABLE projects
DROP CONSTRAINT projects_room_id_fkey,
ADD CONSTRAINT projects_room_id_fkey
FOREIGN KEY (room_id)
REFERENCES rooms (id)
ON DELETE CASCADE;

View File

@ -1,11 +0,0 @@
CREATE TABLE "channel_message_mentions" (
"message_id" INTEGER NOT NULL REFERENCES channel_messages (id) ON DELETE CASCADE,
"start_offset" INTEGER NOT NULL,
"end_offset" INTEGER NOT NULL,
"user_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
PRIMARY KEY(message_id, start_offset)
);
-- We use 'on conflict update' with this index, so it should be per-user.
CREATE UNIQUE INDEX "index_channel_messages_on_sender_id_nonce" ON "channel_messages" ("sender_id", "nonce");
DROP INDEX "index_channel_messages_on_nonce";

View File

@ -1,12 +0,0 @@
ALTER TABLE channels ADD COLUMN parent_path TEXT;
UPDATE channels
SET parent_path = substr(
channel_paths.id_path,
2,
length(channel_paths.id_path) - length('/' || channel_paths.channel_id::text || '/')
)
FROM channel_paths
WHERE channel_paths.channel_id = channels.id;
CREATE INDEX "index_channels_on_parent_path" ON "channels" ("parent_path");

View File

@ -1,186 +0,0 @@
use crate::{
auth,
db::{User, UserId},
rpc, AppState, Error, Result,
};
use anyhow::anyhow;
use axum::{
body::Body,
extract::{Path, Query},
http::{self, Request, StatusCode},
middleware::{self, Next},
response::IntoResponse,
routing::{get, post},
Extension, Json, Router,
};
use axum_extra::response::ErasedJson;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use tower::ServiceBuilder;
use tracing::instrument;
pub fn routes(rpc_server: Arc<rpc::Server>, state: Arc<AppState>) -> Router<Body> {
Router::new()
.route("/user", get(get_authenticated_user))
.route("/users/:id/access_tokens", post(create_access_token))
.route("/panic", post(trace_panic))
.route("/rpc_server_snapshot", get(get_rpc_server_snapshot))
.layer(
ServiceBuilder::new()
.layer(Extension(state))
.layer(Extension(rpc_server))
.layer(middleware::from_fn(validate_api_token)),
)
}
pub async fn validate_api_token<B>(req: Request<B>, next: Next<B>) -> impl IntoResponse {
let token = req
.headers()
.get(http::header::AUTHORIZATION)
.and_then(|header| header.to_str().ok())
.ok_or_else(|| {
Error::Http(
StatusCode::BAD_REQUEST,
"missing authorization header".to_string(),
)
})?
.strip_prefix("token ")
.ok_or_else(|| {
Error::Http(
StatusCode::BAD_REQUEST,
"invalid authorization header".to_string(),
)
})?;
let state = req.extensions().get::<Arc<AppState>>().unwrap();
if token != state.config.api_token {
Err(Error::Http(
StatusCode::UNAUTHORIZED,
"invalid authorization token".to_string(),
))?
}
Ok::<_, Error>(next.run(req).await)
}
#[derive(Debug, Deserialize)]
struct AuthenticatedUserParams {
github_user_id: Option<i32>,
github_login: String,
github_email: Option<String>,
}
#[derive(Debug, Serialize)]
struct AuthenticatedUserResponse {
user: User,
metrics_id: String,
}
async fn get_authenticated_user(
Query(params): Query<AuthenticatedUserParams>,
Extension(app): Extension<Arc<AppState>>,
) -> Result<Json<AuthenticatedUserResponse>> {
let user = app
.db
.get_or_create_user_by_github_account(
&params.github_login,
params.github_user_id,
params.github_email.as_deref(),
)
.await?
.ok_or_else(|| Error::Http(StatusCode::NOT_FOUND, "user not found".into()))?;
let metrics_id = app.db.get_user_metrics_id(user.id).await?;
return Ok(Json(AuthenticatedUserResponse { user, metrics_id }));
}
#[derive(Deserialize, Debug)]
struct CreateUserParams {
github_user_id: i32,
github_login: String,
email_address: String,
email_confirmation_code: Option<String>,
#[serde(default)]
admin: bool,
#[serde(default)]
invite_count: i32,
}
#[derive(Serialize, Debug)]
struct CreateUserResponse {
user: User,
signup_device_id: Option<String>,
metrics_id: String,
}
#[derive(Debug, Deserialize)]
struct Panic {
version: String,
release_channel: String,
backtrace_hash: String,
text: String,
}
#[instrument(skip(panic))]
async fn trace_panic(panic: Json<Panic>) -> Result<()> {
tracing::error!(version = %panic.version, release_channel = %panic.release_channel, backtrace_hash = %panic.backtrace_hash, text = %panic.text, "panic report");
Ok(())
}
async fn get_rpc_server_snapshot(
Extension(rpc_server): Extension<Arc<rpc::Server>>,
) -> Result<ErasedJson> {
Ok(ErasedJson::pretty(rpc_server.snapshot().await))
}
#[derive(Deserialize)]
struct CreateAccessTokenQueryParams {
public_key: String,
impersonate: Option<String>,
}
#[derive(Serialize)]
struct CreateAccessTokenResponse {
user_id: UserId,
encrypted_access_token: String,
}
async fn create_access_token(
Path(user_id): Path<UserId>,
Query(params): Query<CreateAccessTokenQueryParams>,
Extension(app): Extension<Arc<AppState>>,
) -> Result<Json<CreateAccessTokenResponse>> {
let user = app
.db
.get_user_by_id(user_id)
.await?
.ok_or_else(|| anyhow!("user not found"))?;
let mut user_id = user.id;
if let Some(impersonate) = params.impersonate {
if user.admin {
if let Some(impersonated_user) = app.db.get_user_by_github_login(&impersonate).await? {
user_id = impersonated_user.id;
} else {
return Err(Error::Http(
StatusCode::UNPROCESSABLE_ENTITY,
format!("user {impersonate} does not exist"),
));
}
} else {
return Err(Error::Http(
StatusCode::UNAUTHORIZED,
"you do not have permission to impersonate other users".to_string(),
));
}
}
let access_token = auth::create_access_token(app.db.as_ref(), user_id).await?;
let encrypted_access_token =
auth::encrypt_access_token(&access_token, params.public_key.clone())?;
Ok(Json(CreateAccessTokenResponse {
user_id,
encrypted_access_token,
}))
}

View File

@ -1,151 +0,0 @@
use crate::{
db::{self, AccessTokenId, Database, UserId},
AppState, Error, Result,
};
use anyhow::{anyhow, Context};
use axum::{
http::{self, Request, StatusCode},
middleware::Next,
response::IntoResponse,
};
use lazy_static::lazy_static;
use prometheus::{exponential_buckets, register_histogram, Histogram};
use rand::thread_rng;
use scrypt::{
password_hash::{PasswordHash, PasswordHasher, PasswordVerifier, SaltString},
Scrypt,
};
use serde::{Deserialize, Serialize};
use std::{sync::Arc, time::Instant};
lazy_static! {
static ref METRIC_ACCESS_TOKEN_HASHING_TIME: Histogram = register_histogram!(
"access_token_hashing_time",
"time spent hashing access tokens",
exponential_buckets(10.0, 2.0, 10).unwrap(),
)
.unwrap();
}
pub async fn validate_header<B>(mut req: Request<B>, next: Next<B>) -> impl IntoResponse {
let mut auth_header = req
.headers()
.get(http::header::AUTHORIZATION)
.and_then(|header| header.to_str().ok())
.ok_or_else(|| {
Error::Http(
StatusCode::UNAUTHORIZED,
"missing authorization header".to_string(),
)
})?
.split_whitespace();
let user_id = UserId(auth_header.next().unwrap_or("").parse().map_err(|_| {
Error::Http(
StatusCode::BAD_REQUEST,
"missing user id in authorization header".to_string(),
)
})?);
let access_token = auth_header.next().ok_or_else(|| {
Error::Http(
StatusCode::BAD_REQUEST,
"missing access token in authorization header".to_string(),
)
})?;
let state = req.extensions().get::<Arc<AppState>>().unwrap();
let credentials_valid = if let Some(admin_token) = access_token.strip_prefix("ADMIN_TOKEN:") {
state.config.api_token == admin_token
} else {
verify_access_token(&access_token, user_id, &state.db)
.await
.unwrap_or(false)
};
if credentials_valid {
let user = state
.db
.get_user_by_id(user_id)
.await?
.ok_or_else(|| anyhow!("user {} not found", user_id))?;
req.extensions_mut().insert(user);
Ok::<_, Error>(next.run(req).await)
} else {
Err(Error::Http(
StatusCode::UNAUTHORIZED,
"invalid credentials".to_string(),
))
}
}
const MAX_ACCESS_TOKENS_TO_STORE: usize = 8;
#[derive(Serialize, Deserialize)]
struct AccessTokenJson {
version: usize,
id: AccessTokenId,
token: String,
}
pub async fn create_access_token(db: &db::Database, user_id: UserId) -> Result<String> {
const VERSION: usize = 1;
let access_token = rpc::auth::random_token();
let access_token_hash =
hash_access_token(&access_token).context("failed to hash access token")?;
let id = db
.create_access_token(user_id, &access_token_hash, MAX_ACCESS_TOKENS_TO_STORE)
.await?;
Ok(serde_json::to_string(&AccessTokenJson {
version: VERSION,
id,
token: access_token,
})?)
}
fn hash_access_token(token: &str) -> Result<String> {
// Avoid slow hashing in debug mode.
let params = if cfg!(debug_assertions) {
scrypt::Params::new(1, 1, 1).unwrap()
} else {
scrypt::Params::new(14, 8, 1).unwrap()
};
Ok(Scrypt
.hash_password(
token.as_bytes(),
None,
params,
&SaltString::generate(thread_rng()),
)
.map_err(anyhow::Error::new)?
.to_string())
}
pub fn encrypt_access_token(access_token: &str, public_key: String) -> Result<String> {
let native_app_public_key =
rpc::auth::PublicKey::try_from(public_key).context("failed to parse app public key")?;
let encrypted_access_token = native_app_public_key
.encrypt_string(access_token)
.context("failed to encrypt access token with public key")?;
Ok(encrypted_access_token)
}
pub async fn verify_access_token(token: &str, user_id: UserId, db: &Arc<Database>) -> Result<bool> {
let token: AccessTokenJson = serde_json::from_str(&token)?;
let db_token = db.get_access_token(token.id).await?;
if db_token.user_id != user_id {
return Err(anyhow!("no such access token"))?;
}
let db_hash = PasswordHash::new(&db_token.hash).map_err(anyhow::Error::new)?;
let t0 = Instant::now();
let is_valid = Scrypt
.verify_password(token.token.as_bytes(), &db_hash)
.is_ok();
let duration = t0.elapsed();
log::info!("hashed access token in {:?}", duration);
METRIC_ACCESS_TOKEN_HASHING_TIME.observe(duration.as_millis() as f64);
Ok(is_valid)
}

View File

@ -1,20 +0,0 @@
use anyhow::anyhow;
use std::fs;
fn main() -> anyhow::Result<()> {
let env: toml::map::Map<String, toml::Value> = toml::de::from_str(
&fs::read_to_string("./.env.toml").map_err(|_| anyhow!("no .env.toml file found"))?,
)?;
for (key, value) in env {
let value = match value {
toml::Value::String(value) => value,
toml::Value::Integer(value) => value.to_string(),
toml::Value::Float(value) => value.to_string(),
_ => panic!("unsupported TOML value in .env.toml for key {}", key),
};
println!("export {}=\"{}\"", key, value);
}
Ok(())
}

View File

@ -1,107 +0,0 @@
use collab2::{db, executor::Executor};
use db::{ConnectOptions, Database};
use serde::{de::DeserializeOwned, Deserialize};
use std::fmt::Write;
#[derive(Debug, Deserialize)]
struct GitHubUser {
id: i32,
login: String,
email: Option<String>,
}
#[tokio::main]
async fn main() {
let database_url = std::env::var("DATABASE_URL").expect("missing DATABASE_URL env var");
let db = Database::new(ConnectOptions::new(database_url), Executor::Production)
.await
.expect("failed to connect to postgres database");
let github_token = std::env::var("GITHUB_TOKEN").expect("missing GITHUB_TOKEN env var");
let client = reqwest::Client::new();
let mut current_user =
fetch_github::<GitHubUser>(&client, &github_token, "https://api.github.com/user").await;
current_user
.email
.get_or_insert_with(|| "placeholder@example.com".to_string());
let staff_users = fetch_github::<Vec<GitHubUser>>(
&client,
&github_token,
"https://api.github.com/orgs/zed-industries/teams/staff/members",
)
.await;
let mut zed_users = Vec::new();
zed_users.push((current_user, true));
zed_users.extend(staff_users.into_iter().map(|user| (user, true)));
let user_count = db
.get_all_users(0, 200)
.await
.expect("failed to load users from db")
.len();
if user_count < 100 {
let mut last_user_id = None;
for _ in 0..10 {
let mut uri = "https://api.github.com/users?per_page=100".to_string();
if let Some(last_user_id) = last_user_id {
write!(&mut uri, "&since={}", last_user_id).unwrap();
}
let users = fetch_github::<Vec<GitHubUser>>(&client, &github_token, &uri).await;
if let Some(last_user) = users.last() {
last_user_id = Some(last_user.id);
zed_users.extend(users.into_iter().map(|user| (user, false)));
} else {
break;
}
}
}
for (github_user, admin) in zed_users {
if db
.get_user_by_github_login(&github_user.login)
.await
.expect("failed to fetch user")
.is_none()
{
if admin {
db.create_user(
&format!("{}@zed.dev", github_user.login),
admin,
db::NewUserParams {
github_login: github_user.login,
github_user_id: github_user.id,
},
)
.await
.expect("failed to insert user");
} else {
db.get_or_create_user_by_github_account(
&github_user.login,
Some(github_user.id),
github_user.email.as_deref(),
)
.await
.expect("failed to insert user");
}
}
}
}
async fn fetch_github<T: DeserializeOwned>(
client: &reqwest::Client,
access_token: &str,
url: &str,
) -> T {
let response = client
.get(url)
.bearer_auth(&access_token)
.header("user-agent", "zed")
.send()
.await
.expect(&format!("failed to fetch '{}'", url));
response
.json()
.await
.expect(&format!("failed to deserialize github user from '{}'", url))
}

View File

@ -1,672 +0,0 @@
#[cfg(test)]
pub mod tests;
#[cfg(test)]
pub use tests::TestDb;
mod ids;
mod queries;
mod tables;
use crate::{executor::Executor, Error, Result};
use anyhow::anyhow;
use collections::{BTreeMap, HashMap, HashSet};
use dashmap::DashMap;
use futures::StreamExt;
use rand::{prelude::StdRng, Rng, SeedableRng};
use rpc::{
proto::{self},
ConnectionId,
};
use sea_orm::{
entity::prelude::*,
sea_query::{Alias, Expr, OnConflict},
ActiveValue, Condition, ConnectionTrait, DatabaseConnection, DatabaseTransaction, DbErr,
FromQueryResult, IntoActiveModel, IsolationLevel, JoinType, QueryOrder, QuerySelect, Statement,
TransactionTrait,
};
use serde::{Deserialize, Serialize};
use sqlx::{
migrate::{Migrate, Migration, MigrationSource},
Connection,
};
use std::{
fmt::Write as _,
future::Future,
marker::PhantomData,
ops::{Deref, DerefMut},
path::Path,
rc::Rc,
sync::Arc,
time::Duration,
};
use tables::*;
use tokio::sync::{Mutex, OwnedMutexGuard};
pub use ids::*;
pub use sea_orm::ConnectOptions;
pub use tables::user::Model as User;
pub struct Database {
options: ConnectOptions,
pool: DatabaseConnection,
rooms: DashMap<RoomId, Arc<Mutex<()>>>,
rng: Mutex<StdRng>,
executor: Executor,
notification_kinds_by_id: HashMap<NotificationKindId, &'static str>,
notification_kinds_by_name: HashMap<String, NotificationKindId>,
#[cfg(test)]
runtime: Option<tokio::runtime::Runtime>,
}
// The `Database` type has so many methods that its impl blocks are split into
// separate files in the `queries` folder.
impl Database {
pub async fn new(options: ConnectOptions, executor: Executor) -> Result<Self> {
sqlx::any::install_default_drivers();
Ok(Self {
options: options.clone(),
pool: sea_orm::Database::connect(options).await?,
rooms: DashMap::with_capacity(16384),
rng: Mutex::new(StdRng::seed_from_u64(0)),
notification_kinds_by_id: HashMap::default(),
notification_kinds_by_name: HashMap::default(),
executor,
#[cfg(test)]
runtime: None,
})
}
#[cfg(test)]
pub fn reset(&self) {
self.rooms.clear();
}
pub async fn migrate(
&self,
migrations_path: &Path,
ignore_checksum_mismatch: bool,
) -> anyhow::Result<Vec<(Migration, Duration)>> {
let migrations = MigrationSource::resolve(migrations_path)
.await
.map_err(|err| anyhow!("failed to load migrations: {err:?}"))?;
let mut connection = sqlx::AnyConnection::connect(self.options.get_url()).await?;
connection.ensure_migrations_table().await?;
let applied_migrations: HashMap<_, _> = connection
.list_applied_migrations()
.await?
.into_iter()
.map(|m| (m.version, m))
.collect();
let mut new_migrations = Vec::new();
for migration in migrations {
match applied_migrations.get(&migration.version) {
Some(applied_migration) => {
if migration.checksum != applied_migration.checksum && !ignore_checksum_mismatch
{
Err(anyhow!(
"checksum mismatch for applied migration {}",
migration.description
))?;
}
}
None => {
let elapsed = connection.apply(&migration).await?;
new_migrations.push((migration, elapsed));
}
}
}
Ok(new_migrations)
}
pub async fn initialize_static_data(&mut self) -> Result<()> {
self.initialize_notification_kinds().await?;
Ok(())
}
pub async fn transaction<F, Fut, T>(&self, f: F) -> Result<T>
where
F: Send + Fn(TransactionHandle) -> Fut,
Fut: Send + Future<Output = Result<T>>,
{
let body = async {
let mut i = 0;
loop {
let (tx, result) = self.with_transaction(&f).await?;
match result {
Ok(result) => match tx.commit().await.map_err(Into::into) {
Ok(()) => return Ok(result),
Err(error) => {
if !self.retry_on_serialization_error(&error, i).await {
return Err(error);
}
}
},
Err(error) => {
tx.rollback().await?;
if !self.retry_on_serialization_error(&error, i).await {
return Err(error);
}
}
}
i += 1;
}
};
self.run(body).await
}
async fn optional_room_transaction<F, Fut, T>(&self, f: F) -> Result<Option<RoomGuard<T>>>
where
F: Send + Fn(TransactionHandle) -> Fut,
Fut: Send + Future<Output = Result<Option<(RoomId, T)>>>,
{
let body = async {
let mut i = 0;
loop {
let (tx, result) = self.with_transaction(&f).await?;
match result {
Ok(Some((room_id, data))) => {
let lock = self.rooms.entry(room_id).or_default().clone();
let _guard = lock.lock_owned().await;
match tx.commit().await.map_err(Into::into) {
Ok(()) => {
return Ok(Some(RoomGuard {
data,
_guard,
_not_send: PhantomData,
}));
}
Err(error) => {
if !self.retry_on_serialization_error(&error, i).await {
return Err(error);
}
}
}
}
Ok(None) => match tx.commit().await.map_err(Into::into) {
Ok(()) => return Ok(None),
Err(error) => {
if !self.retry_on_serialization_error(&error, i).await {
return Err(error);
}
}
},
Err(error) => {
tx.rollback().await?;
if !self.retry_on_serialization_error(&error, i).await {
return Err(error);
}
}
}
i += 1;
}
};
self.run(body).await
}
async fn room_transaction<F, Fut, T>(&self, room_id: RoomId, f: F) -> Result<RoomGuard<T>>
where
F: Send + Fn(TransactionHandle) -> Fut,
Fut: Send + Future<Output = Result<T>>,
{
let body = async {
let mut i = 0;
loop {
let lock = self.rooms.entry(room_id).or_default().clone();
let _guard = lock.lock_owned().await;
let (tx, result) = self.with_transaction(&f).await?;
match result {
Ok(data) => match tx.commit().await.map_err(Into::into) {
Ok(()) => {
return Ok(RoomGuard {
data,
_guard,
_not_send: PhantomData,
});
}
Err(error) => {
if !self.retry_on_serialization_error(&error, i).await {
return Err(error);
}
}
},
Err(error) => {
tx.rollback().await?;
if !self.retry_on_serialization_error(&error, i).await {
return Err(error);
}
}
}
i += 1;
}
};
self.run(body).await
}
async fn with_transaction<F, Fut, T>(&self, f: &F) -> Result<(DatabaseTransaction, Result<T>)>
where
F: Send + Fn(TransactionHandle) -> Fut,
Fut: Send + Future<Output = Result<T>>,
{
let tx = self
.pool
.begin_with_config(Some(IsolationLevel::Serializable), None)
.await?;
let mut tx = Arc::new(Some(tx));
let result = f(TransactionHandle(tx.clone())).await;
let Some(tx) = Arc::get_mut(&mut tx).and_then(|tx| tx.take()) else {
return Err(anyhow!(
"couldn't complete transaction because it's still in use"
))?;
};
Ok((tx, result))
}
async fn run<F, T>(&self, future: F) -> Result<T>
where
F: Future<Output = Result<T>>,
{
#[cfg(test)]
{
if let Executor::Deterministic(executor) = &self.executor {
executor.simulate_random_delay().await;
}
self.runtime.as_ref().unwrap().block_on(future)
}
#[cfg(not(test))]
{
future.await
}
}
async fn retry_on_serialization_error(&self, error: &Error, prev_attempt_count: u32) -> bool {
// If the error is due to a failure to serialize concurrent transactions, then retry
// this transaction after a delay. With each subsequent retry, double the delay duration.
// Also vary the delay randomly in order to ensure different database connections retry
// at different times.
if is_serialization_error(error) {
let base_delay = 4_u64 << prev_attempt_count.min(16);
let randomized_delay = base_delay as f32 * self.rng.lock().await.gen_range(0.5..=2.0);
log::info!(
"retrying transaction after serialization error. delay: {} ms.",
randomized_delay
);
self.executor
.sleep(Duration::from_millis(randomized_delay as u64))
.await;
true
} else {
false
}
}
}
fn is_serialization_error(error: &Error) -> bool {
const SERIALIZATION_FAILURE_CODE: &'static str = "40001";
match error {
Error::Database(
DbErr::Exec(sea_orm::RuntimeErr::SqlxError(error))
| DbErr::Query(sea_orm::RuntimeErr::SqlxError(error)),
) if error
.as_database_error()
.and_then(|error| error.code())
.as_deref()
== Some(SERIALIZATION_FAILURE_CODE) =>
{
true
}
_ => false,
}
}
pub struct TransactionHandle(Arc<Option<DatabaseTransaction>>);
impl Deref for TransactionHandle {
type Target = DatabaseTransaction;
fn deref(&self) -> &Self::Target {
self.0.as_ref().as_ref().unwrap()
}
}
pub struct RoomGuard<T> {
data: T,
_guard: OwnedMutexGuard<()>,
_not_send: PhantomData<Rc<()>>,
}
impl<T> Deref for RoomGuard<T> {
type Target = T;
fn deref(&self) -> &T {
&self.data
}
}
impl<T> DerefMut for RoomGuard<T> {
fn deref_mut(&mut self) -> &mut T {
&mut self.data
}
}
impl<T> RoomGuard<T> {
pub fn into_inner(self) -> T {
self.data
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Contact {
Accepted { user_id: UserId, busy: bool },
Outgoing { user_id: UserId },
Incoming { user_id: UserId },
}
impl Contact {
pub fn user_id(&self) -> UserId {
match self {
Contact::Accepted { user_id, .. } => *user_id,
Contact::Outgoing { user_id } => *user_id,
Contact::Incoming { user_id, .. } => *user_id,
}
}
}
pub type NotificationBatch = Vec<(UserId, proto::Notification)>;
pub struct CreatedChannelMessage {
pub message_id: MessageId,
pub participant_connection_ids: Vec<ConnectionId>,
pub channel_members: Vec<UserId>,
pub notifications: NotificationBatch,
}
#[derive(Clone, Debug, PartialEq, Eq, FromQueryResult, Serialize, Deserialize)]
pub struct Invite {
pub email_address: String,
pub email_confirmation_code: String,
}
#[derive(Clone, Debug, Deserialize)]
pub struct NewSignup {
pub email_address: String,
pub platform_mac: bool,
pub platform_windows: bool,
pub platform_linux: bool,
pub editor_features: Vec<String>,
pub programming_languages: Vec<String>,
pub device_id: Option<String>,
pub added_to_mailing_list: bool,
pub created_at: Option<DateTime>,
}
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize, FromQueryResult)]
pub struct WaitlistSummary {
pub count: i64,
pub linux_count: i64,
pub mac_count: i64,
pub windows_count: i64,
pub unknown_count: i64,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct NewUserParams {
pub github_login: String,
pub github_user_id: i32,
}
#[derive(Debug)]
pub struct NewUserResult {
pub user_id: UserId,
pub metrics_id: String,
pub inviting_user_id: Option<UserId>,
pub signup_device_id: Option<String>,
}
#[derive(Debug)]
pub struct MoveChannelResult {
pub participants_to_update: HashMap<UserId, ChannelsForUser>,
pub participants_to_remove: HashSet<UserId>,
pub moved_channels: HashSet<ChannelId>,
}
#[derive(Debug)]
pub struct RenameChannelResult {
pub channel: Channel,
pub participants_to_update: HashMap<UserId, Channel>,
}
#[derive(Debug)]
pub struct CreateChannelResult {
pub channel: Channel,
pub participants_to_update: Vec<(UserId, ChannelsForUser)>,
}
#[derive(Debug)]
pub struct SetChannelVisibilityResult {
pub participants_to_update: HashMap<UserId, ChannelsForUser>,
pub participants_to_remove: HashSet<UserId>,
pub channels_to_remove: Vec<ChannelId>,
}
#[derive(Debug)]
pub struct MembershipUpdated {
pub channel_id: ChannelId,
pub new_channels: ChannelsForUser,
pub removed_channels: Vec<ChannelId>,
}
#[derive(Debug)]
pub enum SetMemberRoleResult {
InviteUpdated(Channel),
MembershipUpdated(MembershipUpdated),
}
#[derive(Debug)]
pub struct InviteMemberResult {
pub channel: Channel,
pub notifications: NotificationBatch,
}
#[derive(Debug)]
pub struct RespondToChannelInvite {
pub membership_update: Option<MembershipUpdated>,
pub notifications: NotificationBatch,
}
#[derive(Debug)]
pub struct RemoveChannelMemberResult {
pub membership_update: MembershipUpdated,
pub notification_id: Option<NotificationId>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct Channel {
pub id: ChannelId,
pub name: String,
pub visibility: ChannelVisibility,
pub role: ChannelRole,
pub parent_path: Vec<ChannelId>,
}
impl Channel {
fn from_model(value: channel::Model, role: ChannelRole) -> Self {
Channel {
id: value.id,
visibility: value.visibility,
name: value.clone().name,
role,
parent_path: value.ancestors().collect(),
}
}
pub fn to_proto(&self) -> proto::Channel {
proto::Channel {
id: self.id.to_proto(),
name: self.name.clone(),
visibility: self.visibility.into(),
role: self.role.into(),
parent_path: self.parent_path.iter().map(|c| c.to_proto()).collect(),
}
}
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ChannelMember {
pub role: ChannelRole,
pub user_id: UserId,
pub kind: proto::channel_member::Kind,
}
impl ChannelMember {
pub fn to_proto(&self) -> proto::ChannelMember {
proto::ChannelMember {
role: self.role.into(),
user_id: self.user_id.to_proto(),
kind: self.kind.into(),
}
}
}
#[derive(Debug, PartialEq)]
pub struct ChannelsForUser {
pub channels: Vec<Channel>,
pub channel_participants: HashMap<ChannelId, Vec<UserId>>,
pub unseen_buffer_changes: Vec<proto::UnseenChannelBufferChange>,
pub channel_messages: Vec<proto::UnseenChannelMessage>,
}
#[derive(Debug)]
pub struct RejoinedChannelBuffer {
pub buffer: proto::RejoinedChannelBuffer,
pub old_connection_id: ConnectionId,
}
#[derive(Clone)]
pub struct JoinRoom {
pub room: proto::Room,
pub channel_id: Option<ChannelId>,
pub channel_members: Vec<UserId>,
}
pub struct RejoinedRoom {
pub room: proto::Room,
pub rejoined_projects: Vec<RejoinedProject>,
pub reshared_projects: Vec<ResharedProject>,
pub channel_id: Option<ChannelId>,
pub channel_members: Vec<UserId>,
}
pub struct ResharedProject {
pub id: ProjectId,
pub old_connection_id: ConnectionId,
pub collaborators: Vec<ProjectCollaborator>,
pub worktrees: Vec<proto::WorktreeMetadata>,
}
pub struct RejoinedProject {
pub id: ProjectId,
pub old_connection_id: ConnectionId,
pub collaborators: Vec<ProjectCollaborator>,
pub worktrees: Vec<RejoinedWorktree>,
pub language_servers: Vec<proto::LanguageServer>,
}
#[derive(Debug)]
pub struct RejoinedWorktree {
pub id: u64,
pub abs_path: String,
pub root_name: String,
pub visible: bool,
pub updated_entries: Vec<proto::Entry>,
pub removed_entries: Vec<u64>,
pub updated_repositories: Vec<proto::RepositoryEntry>,
pub removed_repositories: Vec<u64>,
pub diagnostic_summaries: Vec<proto::DiagnosticSummary>,
pub settings_files: Vec<WorktreeSettingsFile>,
pub scan_id: u64,
pub completed_scan_id: u64,
}
pub struct LeftRoom {
pub room: proto::Room,
pub channel_id: Option<ChannelId>,
pub channel_members: Vec<UserId>,
pub left_projects: HashMap<ProjectId, LeftProject>,
pub canceled_calls_to_user_ids: Vec<UserId>,
pub deleted: bool,
}
pub struct RefreshedRoom {
pub room: proto::Room,
pub channel_id: Option<ChannelId>,
pub channel_members: Vec<UserId>,
pub stale_participant_user_ids: Vec<UserId>,
pub canceled_calls_to_user_ids: Vec<UserId>,
}
pub struct RefreshedChannelBuffer {
pub connection_ids: Vec<ConnectionId>,
pub collaborators: Vec<proto::Collaborator>,
}
pub struct Project {
pub collaborators: Vec<ProjectCollaborator>,
pub worktrees: BTreeMap<u64, Worktree>,
pub language_servers: Vec<proto::LanguageServer>,
}
pub struct ProjectCollaborator {
pub connection_id: ConnectionId,
pub user_id: UserId,
pub replica_id: ReplicaId,
pub is_host: bool,
}
impl ProjectCollaborator {
pub fn to_proto(&self) -> proto::Collaborator {
proto::Collaborator {
peer_id: Some(self.connection_id.into()),
replica_id: self.replica_id.0 as u32,
user_id: self.user_id.to_proto(),
}
}
}
#[derive(Debug)]
pub struct LeftProject {
pub id: ProjectId,
pub host_user_id: UserId,
pub host_connection_id: ConnectionId,
pub connection_ids: Vec<ConnectionId>,
}
pub struct Worktree {
pub id: u64,
pub abs_path: String,
pub root_name: String,
pub visible: bool,
pub entries: Vec<proto::Entry>,
pub repository_entries: BTreeMap<u64, proto::RepositoryEntry>,
pub diagnostic_summaries: Vec<proto::DiagnosticSummary>,
pub settings_files: Vec<WorktreeSettingsFile>,
pub scan_id: u64,
pub completed_scan_id: u64,
}
#[derive(Debug)]
pub struct WorktreeSettingsFile {
pub path: String,
pub content: String,
}

View File

@ -1,199 +0,0 @@
use crate::Result;
use rpc::proto;
use sea_orm::{entity::prelude::*, DbErr};
use serde::{Deserialize, Serialize};
macro_rules! id_type {
($name:ident) => {
#[derive(
Clone,
Copy,
Debug,
Default,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Serialize,
Deserialize,
DeriveValueType,
)]
#[serde(transparent)]
pub struct $name(pub i32);
impl $name {
#[allow(unused)]
pub const MAX: Self = Self(i32::MAX);
#[allow(unused)]
pub fn from_proto(value: u64) -> Self {
Self(value as i32)
}
#[allow(unused)]
pub fn to_proto(self) -> u64 {
self.0 as u64
}
}
impl std::fmt::Display for $name {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
self.0.fmt(f)
}
}
impl sea_orm::TryFromU64 for $name {
fn try_from_u64(n: u64) -> Result<Self, DbErr> {
Ok(Self(n.try_into().map_err(|_| {
DbErr::ConvertFromU64(concat!(
"error converting ",
stringify!($name),
" to u64"
))
})?))
}
}
impl sea_orm::sea_query::Nullable for $name {
fn null() -> Value {
Value::Int(None)
}
}
};
}
id_type!(BufferId);
id_type!(AccessTokenId);
id_type!(ChannelChatParticipantId);
id_type!(ChannelId);
id_type!(ChannelMemberId);
id_type!(MessageId);
id_type!(ContactId);
id_type!(FollowerId);
id_type!(RoomId);
id_type!(RoomParticipantId);
id_type!(ProjectId);
id_type!(ProjectCollaboratorId);
id_type!(ReplicaId);
id_type!(ServerId);
id_type!(SignupId);
id_type!(UserId);
id_type!(ChannelBufferCollaboratorId);
id_type!(FlagId);
id_type!(NotificationId);
id_type!(NotificationKindId);
#[derive(Eq, PartialEq, Copy, Clone, Debug, EnumIter, DeriveActiveEnum, Default, Hash)]
#[sea_orm(rs_type = "String", db_type = "String(None)")]
pub enum ChannelRole {
#[sea_orm(string_value = "admin")]
Admin,
#[sea_orm(string_value = "member")]
#[default]
Member,
#[sea_orm(string_value = "guest")]
Guest,
#[sea_orm(string_value = "banned")]
Banned,
}
impl ChannelRole {
pub fn should_override(&self, other: Self) -> bool {
use ChannelRole::*;
match self {
Admin => matches!(other, Member | Banned | Guest),
Member => matches!(other, Banned | Guest),
Banned => matches!(other, Guest),
Guest => false,
}
}
pub fn max(&self, other: Self) -> Self {
if self.should_override(other) {
*self
} else {
other
}
}
pub fn can_see_all_descendants(&self) -> bool {
use ChannelRole::*;
match self {
Admin | Member => true,
Guest | Banned => false,
}
}
pub fn can_only_see_public_descendants(&self) -> bool {
use ChannelRole::*;
match self {
Guest => true,
Admin | Member | Banned => false,
}
}
}
impl From<proto::ChannelRole> for ChannelRole {
fn from(value: proto::ChannelRole) -> Self {
match value {
proto::ChannelRole::Admin => ChannelRole::Admin,
proto::ChannelRole::Member => ChannelRole::Member,
proto::ChannelRole::Guest => ChannelRole::Guest,
proto::ChannelRole::Banned => ChannelRole::Banned,
}
}
}
impl Into<proto::ChannelRole> for ChannelRole {
fn into(self) -> proto::ChannelRole {
match self {
ChannelRole::Admin => proto::ChannelRole::Admin,
ChannelRole::Member => proto::ChannelRole::Member,
ChannelRole::Guest => proto::ChannelRole::Guest,
ChannelRole::Banned => proto::ChannelRole::Banned,
}
}
}
impl Into<i32> for ChannelRole {
fn into(self) -> i32 {
let proto: proto::ChannelRole = self.into();
proto.into()
}
}
#[derive(Eq, PartialEq, Copy, Clone, Debug, EnumIter, DeriveActiveEnum, Default, Hash)]
#[sea_orm(rs_type = "String", db_type = "String(None)")]
pub enum ChannelVisibility {
#[sea_orm(string_value = "public")]
Public,
#[sea_orm(string_value = "members")]
#[default]
Members,
}
impl From<proto::ChannelVisibility> for ChannelVisibility {
fn from(value: proto::ChannelVisibility) -> Self {
match value {
proto::ChannelVisibility::Public => ChannelVisibility::Public,
proto::ChannelVisibility::Members => ChannelVisibility::Members,
}
}
}
impl Into<proto::ChannelVisibility> for ChannelVisibility {
fn into(self) -> proto::ChannelVisibility {
match self {
ChannelVisibility::Public => proto::ChannelVisibility::Public,
ChannelVisibility::Members => proto::ChannelVisibility::Members,
}
}
}
impl Into<i32> for ChannelVisibility {
fn into(self) -> i32 {
let proto: proto::ChannelVisibility = self.into();
proto.into()
}
}

View File

@ -1,12 +0,0 @@
use super::*;
pub mod access_tokens;
pub mod buffers;
pub mod channels;
pub mod contacts;
pub mod messages;
pub mod notifications;
pub mod projects;
pub mod rooms;
pub mod servers;
pub mod users;

View File

@ -1,54 +0,0 @@
use super::*;
use sea_orm::sea_query::Query;
impl Database {
pub async fn create_access_token(
&self,
user_id: UserId,
access_token_hash: &str,
max_access_token_count: usize,
) -> Result<AccessTokenId> {
self.transaction(|tx| async {
let tx = tx;
let token = access_token::ActiveModel {
user_id: ActiveValue::set(user_id),
hash: ActiveValue::set(access_token_hash.into()),
..Default::default()
}
.insert(&*tx)
.await?;
access_token::Entity::delete_many()
.filter(
access_token::Column::Id.in_subquery(
Query::select()
.column(access_token::Column::Id)
.from(access_token::Entity)
.and_where(access_token::Column::UserId.eq(user_id))
.order_by(access_token::Column::Id, sea_orm::Order::Desc)
.limit(10000)
.offset(max_access_token_count as u64)
.to_owned(),
),
)
.exec(&*tx)
.await?;
Ok(token.id)
})
.await
}
pub async fn get_access_token(
&self,
access_token_id: AccessTokenId,
) -> Result<access_token::Model> {
self.transaction(|tx| async move {
Ok(access_token::Entity::find_by_id(access_token_id)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no such access token"))?)
})
.await
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,353 +0,0 @@
use super::*;
impl Database {
pub async fn get_contacts(&self, user_id: UserId) -> Result<Vec<Contact>> {
#[derive(Debug, FromQueryResult)]
struct ContactWithUserBusyStatuses {
user_id_a: UserId,
user_id_b: UserId,
a_to_b: bool,
accepted: bool,
user_a_busy: bool,
user_b_busy: bool,
}
self.transaction(|tx| async move {
let user_a_participant = Alias::new("user_a_participant");
let user_b_participant = Alias::new("user_b_participant");
let mut db_contacts = contact::Entity::find()
.column_as(
Expr::col((user_a_participant.clone(), room_participant::Column::Id))
.is_not_null(),
"user_a_busy",
)
.column_as(
Expr::col((user_b_participant.clone(), room_participant::Column::Id))
.is_not_null(),
"user_b_busy",
)
.filter(
contact::Column::UserIdA
.eq(user_id)
.or(contact::Column::UserIdB.eq(user_id)),
)
.join_as(
JoinType::LeftJoin,
contact::Relation::UserARoomParticipant.def(),
user_a_participant,
)
.join_as(
JoinType::LeftJoin,
contact::Relation::UserBRoomParticipant.def(),
user_b_participant,
)
.into_model::<ContactWithUserBusyStatuses>()
.stream(&*tx)
.await?;
let mut contacts = Vec::new();
while let Some(db_contact) = db_contacts.next().await {
let db_contact = db_contact?;
if db_contact.user_id_a == user_id {
if db_contact.accepted {
contacts.push(Contact::Accepted {
user_id: db_contact.user_id_b,
busy: db_contact.user_b_busy,
});
} else if db_contact.a_to_b {
contacts.push(Contact::Outgoing {
user_id: db_contact.user_id_b,
})
} else {
contacts.push(Contact::Incoming {
user_id: db_contact.user_id_b,
});
}
} else if db_contact.accepted {
contacts.push(Contact::Accepted {
user_id: db_contact.user_id_a,
busy: db_contact.user_a_busy,
});
} else if db_contact.a_to_b {
contacts.push(Contact::Incoming {
user_id: db_contact.user_id_a,
});
} else {
contacts.push(Contact::Outgoing {
user_id: db_contact.user_id_a,
});
}
}
contacts.sort_unstable_by_key(|contact| contact.user_id());
Ok(contacts)
})
.await
}
pub async fn is_user_busy(&self, user_id: UserId) -> Result<bool> {
self.transaction(|tx| async move {
let participant = room_participant::Entity::find()
.filter(room_participant::Column::UserId.eq(user_id))
.one(&*tx)
.await?;
Ok(participant.is_some())
})
.await
}
pub async fn has_contact(&self, user_id_1: UserId, user_id_2: UserId) -> Result<bool> {
self.transaction(|tx| async move {
let (id_a, id_b) = if user_id_1 < user_id_2 {
(user_id_1, user_id_2)
} else {
(user_id_2, user_id_1)
};
Ok(contact::Entity::find()
.filter(
contact::Column::UserIdA
.eq(id_a)
.and(contact::Column::UserIdB.eq(id_b))
.and(contact::Column::Accepted.eq(true)),
)
.one(&*tx)
.await?
.is_some())
})
.await
}
pub async fn send_contact_request(
&self,
sender_id: UserId,
receiver_id: UserId,
) -> Result<NotificationBatch> {
self.transaction(|tx| async move {
let (id_a, id_b, a_to_b) = if sender_id < receiver_id {
(sender_id, receiver_id, true)
} else {
(receiver_id, sender_id, false)
};
let rows_affected = contact::Entity::insert(contact::ActiveModel {
user_id_a: ActiveValue::set(id_a),
user_id_b: ActiveValue::set(id_b),
a_to_b: ActiveValue::set(a_to_b),
accepted: ActiveValue::set(false),
should_notify: ActiveValue::set(true),
..Default::default()
})
.on_conflict(
OnConflict::columns([contact::Column::UserIdA, contact::Column::UserIdB])
.values([
(contact::Column::Accepted, true.into()),
(contact::Column::ShouldNotify, false.into()),
])
.action_and_where(
contact::Column::Accepted.eq(false).and(
contact::Column::AToB
.eq(a_to_b)
.and(contact::Column::UserIdA.eq(id_b))
.or(contact::Column::AToB
.ne(a_to_b)
.and(contact::Column::UserIdA.eq(id_a))),
),
)
.to_owned(),
)
.exec_without_returning(&*tx)
.await?;
if rows_affected == 0 {
Err(anyhow!("contact already requested"))?;
}
Ok(self
.create_notification(
receiver_id,
rpc::Notification::ContactRequest {
sender_id: sender_id.to_proto(),
},
true,
&*tx,
)
.await?
.into_iter()
.collect())
})
.await
}
/// Returns a bool indicating whether the removed contact had originally accepted or not
///
/// Deletes the contact identified by the requester and responder ids, and then returns
/// whether the deleted contact had originally accepted or was a pending contact request.
///
/// # Arguments
///
/// * `requester_id` - The user that initiates this request
/// * `responder_id` - The user that will be removed
pub async fn remove_contact(
&self,
requester_id: UserId,
responder_id: UserId,
) -> Result<(bool, Option<NotificationId>)> {
self.transaction(|tx| async move {
let (id_a, id_b) = if responder_id < requester_id {
(responder_id, requester_id)
} else {
(requester_id, responder_id)
};
let contact = contact::Entity::find()
.filter(
contact::Column::UserIdA
.eq(id_a)
.and(contact::Column::UserIdB.eq(id_b)),
)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no such contact"))?;
contact::Entity::delete_by_id(contact.id).exec(&*tx).await?;
let mut deleted_notification_id = None;
if !contact.accepted {
deleted_notification_id = self
.remove_notification(
responder_id,
rpc::Notification::ContactRequest {
sender_id: requester_id.to_proto(),
},
&*tx,
)
.await?;
}
Ok((contact.accepted, deleted_notification_id))
})
.await
}
pub async fn dismiss_contact_notification(
&self,
user_id: UserId,
contact_user_id: UserId,
) -> Result<()> {
self.transaction(|tx| async move {
let (id_a, id_b, a_to_b) = if user_id < contact_user_id {
(user_id, contact_user_id, true)
} else {
(contact_user_id, user_id, false)
};
let result = contact::Entity::update_many()
.set(contact::ActiveModel {
should_notify: ActiveValue::set(false),
..Default::default()
})
.filter(
contact::Column::UserIdA
.eq(id_a)
.and(contact::Column::UserIdB.eq(id_b))
.and(
contact::Column::AToB
.eq(a_to_b)
.and(contact::Column::Accepted.eq(true))
.or(contact::Column::AToB
.ne(a_to_b)
.and(contact::Column::Accepted.eq(false))),
),
)
.exec(&*tx)
.await?;
if result.rows_affected == 0 {
Err(anyhow!("no such contact request"))?
} else {
Ok(())
}
})
.await
}
pub async fn respond_to_contact_request(
&self,
responder_id: UserId,
requester_id: UserId,
accept: bool,
) -> Result<NotificationBatch> {
self.transaction(|tx| async move {
let (id_a, id_b, a_to_b) = if responder_id < requester_id {
(responder_id, requester_id, false)
} else {
(requester_id, responder_id, true)
};
let rows_affected = if accept {
let result = contact::Entity::update_many()
.set(contact::ActiveModel {
accepted: ActiveValue::set(true),
should_notify: ActiveValue::set(true),
..Default::default()
})
.filter(
contact::Column::UserIdA
.eq(id_a)
.and(contact::Column::UserIdB.eq(id_b))
.and(contact::Column::AToB.eq(a_to_b)),
)
.exec(&*tx)
.await?;
result.rows_affected
} else {
let result = contact::Entity::delete_many()
.filter(
contact::Column::UserIdA
.eq(id_a)
.and(contact::Column::UserIdB.eq(id_b))
.and(contact::Column::AToB.eq(a_to_b))
.and(contact::Column::Accepted.eq(false)),
)
.exec(&*tx)
.await?;
result.rows_affected
};
if rows_affected == 0 {
Err(anyhow!("no such contact request"))?
}
let mut notifications = Vec::new();
notifications.extend(
self.mark_notification_as_read_with_response(
responder_id,
&rpc::Notification::ContactRequest {
sender_id: requester_id.to_proto(),
},
accept,
&*tx,
)
.await?,
);
if accept {
notifications.extend(
self.create_notification(
requester_id,
rpc::Notification::ContactRequestAccepted {
responder_id: responder_id.to_proto(),
},
true,
&*tx,
)
.await?,
);
}
Ok(notifications)
})
.await
}
}

View File

@ -1,505 +0,0 @@
use super::*;
use rpc::Notification;
use sea_orm::TryInsertResult;
use time::OffsetDateTime;
impl Database {
pub async fn join_channel_chat(
&self,
channel_id: ChannelId,
connection_id: ConnectionId,
user_id: UserId,
) -> Result<()> {
self.transaction(|tx| async move {
let channel = self.get_channel_internal(channel_id, &*tx).await?;
self.check_user_is_channel_participant(&channel, user_id, &*tx)
.await?;
channel_chat_participant::ActiveModel {
id: ActiveValue::NotSet,
channel_id: ActiveValue::Set(channel_id),
user_id: ActiveValue::Set(user_id),
connection_id: ActiveValue::Set(connection_id.id as i32),
connection_server_id: ActiveValue::Set(ServerId(connection_id.owner_id as i32)),
}
.insert(&*tx)
.await?;
Ok(())
})
.await
}
pub async fn channel_chat_connection_lost(
&self,
connection_id: ConnectionId,
tx: &DatabaseTransaction,
) -> Result<()> {
channel_chat_participant::Entity::delete_many()
.filter(
Condition::all()
.add(
channel_chat_participant::Column::ConnectionServerId
.eq(connection_id.owner_id),
)
.add(channel_chat_participant::Column::ConnectionId.eq(connection_id.id)),
)
.exec(tx)
.await?;
Ok(())
}
pub async fn leave_channel_chat(
&self,
channel_id: ChannelId,
connection_id: ConnectionId,
_user_id: UserId,
) -> Result<()> {
self.transaction(|tx| async move {
channel_chat_participant::Entity::delete_many()
.filter(
Condition::all()
.add(
channel_chat_participant::Column::ConnectionServerId
.eq(connection_id.owner_id),
)
.add(channel_chat_participant::Column::ConnectionId.eq(connection_id.id))
.add(channel_chat_participant::Column::ChannelId.eq(channel_id)),
)
.exec(&*tx)
.await?;
Ok(())
})
.await
}
pub async fn get_channel_messages(
&self,
channel_id: ChannelId,
user_id: UserId,
count: usize,
before_message_id: Option<MessageId>,
) -> Result<Vec<proto::ChannelMessage>> {
self.transaction(|tx| async move {
let channel = self.get_channel_internal(channel_id, &*tx).await?;
self.check_user_is_channel_participant(&channel, user_id, &*tx)
.await?;
let mut condition =
Condition::all().add(channel_message::Column::ChannelId.eq(channel_id));
if let Some(before_message_id) = before_message_id {
condition = condition.add(channel_message::Column::Id.lt(before_message_id));
}
let rows = channel_message::Entity::find()
.filter(condition)
.order_by_desc(channel_message::Column::Id)
.limit(count as u64)
.all(&*tx)
.await?;
self.load_channel_messages(rows, &*tx).await
})
.await
}
pub async fn get_channel_messages_by_id(
&self,
user_id: UserId,
message_ids: &[MessageId],
) -> Result<Vec<proto::ChannelMessage>> {
self.transaction(|tx| async move {
let rows = channel_message::Entity::find()
.filter(channel_message::Column::Id.is_in(message_ids.iter().copied()))
.order_by_desc(channel_message::Column::Id)
.all(&*tx)
.await?;
let mut channels = HashMap::<ChannelId, channel::Model>::default();
for row in &rows {
channels.insert(
row.channel_id,
self.get_channel_internal(row.channel_id, &*tx).await?,
);
}
for (_, channel) in channels {
self.check_user_is_channel_participant(&channel, user_id, &*tx)
.await?;
}
let messages = self.load_channel_messages(rows, &*tx).await?;
Ok(messages)
})
.await
}
async fn load_channel_messages(
&self,
rows: Vec<channel_message::Model>,
tx: &DatabaseTransaction,
) -> Result<Vec<proto::ChannelMessage>> {
let mut messages = rows
.into_iter()
.map(|row| {
let nonce = row.nonce.as_u64_pair();
proto::ChannelMessage {
id: row.id.to_proto(),
sender_id: row.sender_id.to_proto(),
body: row.body,
timestamp: row.sent_at.assume_utc().unix_timestamp() as u64,
mentions: vec![],
nonce: Some(proto::Nonce {
upper_half: nonce.0,
lower_half: nonce.1,
}),
}
})
.collect::<Vec<_>>();
messages.reverse();
let mut mentions = channel_message_mention::Entity::find()
.filter(channel_message_mention::Column::MessageId.is_in(messages.iter().map(|m| m.id)))
.order_by_asc(channel_message_mention::Column::MessageId)
.order_by_asc(channel_message_mention::Column::StartOffset)
.stream(&*tx)
.await?;
let mut message_ix = 0;
while let Some(mention) = mentions.next().await {
let mention = mention?;
let message_id = mention.message_id.to_proto();
while let Some(message) = messages.get_mut(message_ix) {
if message.id < message_id {
message_ix += 1;
} else {
if message.id == message_id {
message.mentions.push(proto::ChatMention {
range: Some(proto::Range {
start: mention.start_offset as u64,
end: mention.end_offset as u64,
}),
user_id: mention.user_id.to_proto(),
});
}
break;
}
}
}
Ok(messages)
}
pub async fn create_channel_message(
&self,
channel_id: ChannelId,
user_id: UserId,
body: &str,
mentions: &[proto::ChatMention],
timestamp: OffsetDateTime,
nonce: u128,
) -> Result<CreatedChannelMessage> {
self.transaction(|tx| async move {
let channel = self.get_channel_internal(channel_id, &*tx).await?;
self.check_user_is_channel_participant(&channel, user_id, &*tx)
.await?;
let mut rows = channel_chat_participant::Entity::find()
.filter(channel_chat_participant::Column::ChannelId.eq(channel_id))
.stream(&*tx)
.await?;
let mut is_participant = false;
let mut participant_connection_ids = Vec::new();
let mut participant_user_ids = Vec::new();
while let Some(row) = rows.next().await {
let row = row?;
if row.user_id == user_id {
is_participant = true;
}
participant_user_ids.push(row.user_id);
participant_connection_ids.push(row.connection());
}
drop(rows);
if !is_participant {
Err(anyhow!("not a chat participant"))?;
}
let timestamp = timestamp.to_offset(time::UtcOffset::UTC);
let timestamp = time::PrimitiveDateTime::new(timestamp.date(), timestamp.time());
let result = channel_message::Entity::insert(channel_message::ActiveModel {
channel_id: ActiveValue::Set(channel_id),
sender_id: ActiveValue::Set(user_id),
body: ActiveValue::Set(body.to_string()),
sent_at: ActiveValue::Set(timestamp),
nonce: ActiveValue::Set(Uuid::from_u128(nonce)),
id: ActiveValue::NotSet,
})
.on_conflict(
OnConflict::columns([
channel_message::Column::SenderId,
channel_message::Column::Nonce,
])
.do_nothing()
.to_owned(),
)
.do_nothing()
.exec(&*tx)
.await?;
let message_id;
let mut notifications = Vec::new();
match result {
TryInsertResult::Inserted(result) => {
message_id = result.last_insert_id;
let mentioned_user_ids =
mentions.iter().map(|m| m.user_id).collect::<HashSet<_>>();
let mentions = mentions
.iter()
.filter_map(|mention| {
let range = mention.range.as_ref()?;
if !body.is_char_boundary(range.start as usize)
|| !body.is_char_boundary(range.end as usize)
{
return None;
}
Some(channel_message_mention::ActiveModel {
message_id: ActiveValue::Set(message_id),
start_offset: ActiveValue::Set(range.start as i32),
end_offset: ActiveValue::Set(range.end as i32),
user_id: ActiveValue::Set(UserId::from_proto(mention.user_id)),
})
})
.collect::<Vec<_>>();
if !mentions.is_empty() {
channel_message_mention::Entity::insert_many(mentions)
.exec(&*tx)
.await?;
}
for mentioned_user in mentioned_user_ids {
notifications.extend(
self.create_notification(
UserId::from_proto(mentioned_user),
rpc::Notification::ChannelMessageMention {
message_id: message_id.to_proto(),
sender_id: user_id.to_proto(),
channel_id: channel_id.to_proto(),
},
false,
&*tx,
)
.await?,
);
}
self.observe_channel_message_internal(channel_id, user_id, message_id, &*tx)
.await?;
}
_ => {
message_id = channel_message::Entity::find()
.filter(channel_message::Column::Nonce.eq(Uuid::from_u128(nonce)))
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("failed to insert message"))?
.id;
}
}
let mut channel_members = self.get_channel_participants(&channel, &*tx).await?;
channel_members.retain(|member| !participant_user_ids.contains(member));
Ok(CreatedChannelMessage {
message_id,
participant_connection_ids,
channel_members,
notifications,
})
})
.await
}
pub async fn observe_channel_message(
&self,
channel_id: ChannelId,
user_id: UserId,
message_id: MessageId,
) -> Result<NotificationBatch> {
self.transaction(|tx| async move {
self.observe_channel_message_internal(channel_id, user_id, message_id, &*tx)
.await?;
let mut batch = NotificationBatch::default();
batch.extend(
self.mark_notification_as_read(
user_id,
&Notification::ChannelMessageMention {
message_id: message_id.to_proto(),
sender_id: Default::default(),
channel_id: Default::default(),
},
&*tx,
)
.await?,
);
Ok(batch)
})
.await
}
async fn observe_channel_message_internal(
&self,
channel_id: ChannelId,
user_id: UserId,
message_id: MessageId,
tx: &DatabaseTransaction,
) -> Result<()> {
observed_channel_messages::Entity::insert(observed_channel_messages::ActiveModel {
user_id: ActiveValue::Set(user_id),
channel_id: ActiveValue::Set(channel_id),
channel_message_id: ActiveValue::Set(message_id),
})
.on_conflict(
OnConflict::columns([
observed_channel_messages::Column::ChannelId,
observed_channel_messages::Column::UserId,
])
.update_column(observed_channel_messages::Column::ChannelMessageId)
.action_cond_where(observed_channel_messages::Column::ChannelMessageId.lt(message_id))
.to_owned(),
)
// TODO: Try to upgrade SeaORM so we don't have to do this hack around their bug
.exec_without_returning(&*tx)
.await?;
Ok(())
}
pub async fn unseen_channel_messages(
&self,
user_id: UserId,
channel_ids: &[ChannelId],
tx: &DatabaseTransaction,
) -> Result<Vec<proto::UnseenChannelMessage>> {
let mut observed_messages_by_channel_id = HashMap::default();
let mut rows = observed_channel_messages::Entity::find()
.filter(observed_channel_messages::Column::UserId.eq(user_id))
.filter(observed_channel_messages::Column::ChannelId.is_in(channel_ids.iter().copied()))
.stream(&*tx)
.await?;
while let Some(row) = rows.next().await {
let row = row?;
observed_messages_by_channel_id.insert(row.channel_id, row);
}
drop(rows);
let mut values = String::new();
for id in channel_ids {
if !values.is_empty() {
values.push_str(", ");
}
write!(&mut values, "({})", id).unwrap();
}
if values.is_empty() {
return Ok(Default::default());
}
let sql = format!(
r#"
SELECT
*
FROM (
SELECT
*,
row_number() OVER (
PARTITION BY channel_id
ORDER BY id DESC
) as row_number
FROM channel_messages
WHERE
channel_id in ({values})
) AS messages
WHERE
row_number = 1
"#,
);
let stmt = Statement::from_string(self.pool.get_database_backend(), sql);
let last_messages = channel_message::Model::find_by_statement(stmt)
.all(&*tx)
.await?;
let mut changes = Vec::new();
for last_message in last_messages {
if let Some(observed_message) =
observed_messages_by_channel_id.get(&last_message.channel_id)
{
if observed_message.channel_message_id == last_message.id {
continue;
}
}
changes.push(proto::UnseenChannelMessage {
channel_id: last_message.channel_id.to_proto(),
message_id: last_message.id.to_proto(),
});
}
Ok(changes)
}
pub async fn remove_channel_message(
&self,
channel_id: ChannelId,
message_id: MessageId,
user_id: UserId,
) -> Result<Vec<ConnectionId>> {
self.transaction(|tx| async move {
let mut rows = channel_chat_participant::Entity::find()
.filter(channel_chat_participant::Column::ChannelId.eq(channel_id))
.stream(&*tx)
.await?;
let mut is_participant = false;
let mut participant_connection_ids = Vec::new();
while let Some(row) = rows.next().await {
let row = row?;
if row.user_id == user_id {
is_participant = true;
}
participant_connection_ids.push(row.connection());
}
drop(rows);
if !is_participant {
Err(anyhow!("not a chat participant"))?;
}
let result = channel_message::Entity::delete_by_id(message_id)
.filter(channel_message::Column::SenderId.eq(user_id))
.exec(&*tx)
.await?;
if result.rows_affected == 0 {
let channel = self.get_channel_internal(channel_id, &*tx).await?;
if self
.check_user_is_channel_admin(&channel, user_id, &*tx)
.await
.is_ok()
{
let result = channel_message::Entity::delete_by_id(message_id)
.exec(&*tx)
.await?;
if result.rows_affected == 0 {
Err(anyhow!("no such message"))?;
}
} else {
Err(anyhow!("operation could not be completed"))?;
}
}
Ok(participant_connection_ids)
})
.await
}
}

Some files were not shown because too many files have changed in this diff Show More