From 06f6d02579cb7891ecbaa89bbeccf8712e51b10c Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 19 Dec 2022 16:05:22 +0100 Subject: [PATCH 01/56] Stop counting extensions in worktree --- crates/project/src/worktree.rs | 52 +--------------------------------- 1 file changed, 1 insertion(+), 51 deletions(-) diff --git a/crates/project/src/worktree.rs b/crates/project/src/worktree.rs index 4781e17541..77d055d5cd 100644 --- a/crates/project/src/worktree.rs +++ b/crates/project/src/worktree.rs @@ -125,7 +125,6 @@ pub struct LocalSnapshot { removed_entry_ids: HashMap, next_entry_id: Arc, snapshot: Snapshot, - extension_counts: HashMap, } impl Clone for LocalSnapshot { @@ -136,7 +135,6 @@ impl Clone for LocalSnapshot { removed_entry_ids: self.removed_entry_ids.clone(), next_entry_id: self.next_entry_id.clone(), snapshot: self.snapshot.clone(), - extension_counts: self.extension_counts.clone(), } } } @@ -427,7 +425,6 @@ impl LocalWorktree { scan_id: 0, is_complete: true, }, - extension_counts: Default::default(), }; if let Some(metadata) = metadata { let entry = Entry::new( @@ -1363,10 +1360,6 @@ impl Snapshot { } impl LocalSnapshot { - pub fn extension_counts(&self) -> &HashMap { - &self.extension_counts - } - // Gives the most specific git repository for a given path pub(crate) fn repo_for(&self, path: &Path) -> Option { self.git_repositories @@ -1508,11 +1501,6 @@ impl LocalSnapshot { &(), ); - if let Some(removed_entry) = removed_entry { - self.dec_extension_count(&removed_entry.path, removed_entry.is_ignored); - } - self.inc_extension_count(&entry.path, entry.is_ignored); - entry } @@ -1573,7 +1561,6 @@ impl LocalSnapshot { for mut entry in entries { self.reuse_entry_id(&mut entry); - self.inc_extension_count(&entry.path, entry.is_ignored); entries_by_id_edits.push(Edit::Insert(PathEntry { id: entry.id, path: entry.path.clone(), @@ -1584,33 +1571,7 @@ impl LocalSnapshot { } self.entries_by_path.edit(entries_by_path_edits, &()); - let removed_entries = self.entries_by_id.edit(entries_by_id_edits, &()); - - for removed_entry in removed_entries { - self.dec_extension_count(&removed_entry.path, removed_entry.is_ignored); - } - } - - fn inc_extension_count(&mut self, path: &Path, ignored: bool) { - if !ignored { - if let Some(extension) = path.extension() { - if let Some(count) = self.extension_counts.get_mut(extension) { - *count += 1; - } else { - self.extension_counts.insert(extension.into(), 1); - } - } - } - } - - fn dec_extension_count(&mut self, path: &Path, ignored: bool) { - if !ignored { - if let Some(extension) = path.extension() { - if let Some(count) = self.extension_counts.get_mut(extension) { - *count -= 1; - } - } - } + self.entries_by_id.edit(entries_by_id_edits, &()); } fn reuse_entry_id(&mut self, entry: &mut Entry) { @@ -1640,7 +1601,6 @@ impl LocalSnapshot { .or_insert(entry.id); *removed_entry_id = cmp::max(*removed_entry_id, entry.id); entries_by_id_edits.push(Edit::Remove(entry.id)); - self.dec_extension_count(&entry.path, entry.is_ignored); } self.entries_by_id.edit(entries_by_id_edits, &()); @@ -3481,7 +3441,6 @@ mod tests { scan_id: 0, is_complete: true, }, - extension_counts: Default::default(), }; initial_snapshot.insert_entry( Entry::new( @@ -3763,15 +3722,6 @@ mod tests { .entry_for_path(ignore_parent_path.join(&*GITIGNORE)) .is_some()); } - - // Ensure extension counts are correct. - let mut expected_extension_counts = HashMap::default(); - for extension in self.entries(false).filter_map(|e| e.path.extension()) { - *expected_extension_counts - .entry(extension.into()) - .or_insert(0) += 1; - } - assert_eq!(self.extension_counts, expected_extension_counts); } fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> { From d8219545c96c867df7483deb0a1a94c34e385915 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 19 Dec 2022 16:17:27 +0100 Subject: [PATCH 02/56] :lipstick: --- crates/project/src/worktree.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/project/src/worktree.rs b/crates/project/src/worktree.rs index 77d055d5cd..ee042ef9a3 100644 --- a/crates/project/src/worktree.rs +++ b/crates/project/src/worktree.rs @@ -36,7 +36,7 @@ use std::{ any::Any, cmp::{self, Ordering}, convert::TryFrom, - ffi::{OsStr, OsString}, + ffi::OsStr, fmt, future::Future, mem, @@ -1489,9 +1489,9 @@ impl LocalSnapshot { } } - self.entries_by_path.insert_or_replace(entry.clone(), &()); let scan_id = self.scan_id; - let removed_entry = self.entries_by_id.insert_or_replace( + self.entries_by_path.insert_or_replace(entry.clone(), &()); + self.entries_by_id.insert_or_replace( PathEntry { id: entry.id, path: entry.path.clone(), From 0ede89d82a0df42457278fcde9eb3bb69e4c82ff Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 19 Dec 2022 20:05:00 +0100 Subject: [PATCH 03/56] WIP --- ...219181850_project_reconnection_support.sql | 3 +++ crates/collab/src/integration_tests.rs | 1 + crates/rpc/proto/zed.proto | 25 +++++++++++++++++++ 3 files changed, 29 insertions(+) create mode 100644 crates/collab/migrations/20221219181850_project_reconnection_support.sql diff --git a/crates/collab/migrations/20221219181850_project_reconnection_support.sql b/crates/collab/migrations/20221219181850_project_reconnection_support.sql new file mode 100644 index 0000000000..6efef5571c --- /dev/null +++ b/crates/collab/migrations/20221219181850_project_reconnection_support.sql @@ -0,0 +1,3 @@ +ALTER TABLE "worktree_entries" + ADD COLUMN "scan_id" INT8, + ADD COLUMN "is_deleted" BOOL; diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index 0c26486667..4d08205b81 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -6077,6 +6077,7 @@ async fn test_random_collaboration( let mut user_ids = Vec::new(); let mut op_start_signals = Vec::new(); let mut next_entity_id = 100000; + let mut can_disconnect = rng.lock().gen_bool(0.2); let mut operations = 0; while operations < max_operations { diff --git a/crates/rpc/proto/zed.proto b/crates/rpc/proto/zed.proto index 9528bd10b7..6da9d0a7fc 100644 --- a/crates/rpc/proto/zed.proto +++ b/crates/rpc/proto/zed.proto @@ -253,6 +253,15 @@ message ShareProjectResponse { uint64 project_id = 1; } +message ReshareProject { + uint64 id = 1; + repeated WorktreeMetadata worktrees = 2; +} + +message ReshareProjectResponse { + repeated Collaborator collaborators = 1; +} + message UnshareProject { uint64 project_id = 1; } @@ -273,6 +282,22 @@ message JoinProjectResponse { repeated LanguageServer language_servers = 4; } +message RejoinProject { + uint64 project_id = 1; + repeated RejoinWorktree worktrees = 2; +} + +message RejoinWorktree { + uint64 id = 1; + uint64 scan_id = 2; +} + +message RejoinProjectResponse { + repeated WorktreeMetadata worktrees = 1; + repeated Collaborator collaborators = 2; + repeated LanguageServer language_servers = 3; +} + message LeaveProject { uint64 project_id = 1; } From 67b265b3d540466fa5a529040246f1ef538f5e09 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Fri, 16 Dec 2022 16:24:42 -0800 Subject: [PATCH 04/56] Add failing integration test for resharing projects on reconnect --- crates/collab/src/integration_tests.rs | 94 +++++++++++++++++++++++++- 1 file changed, 93 insertions(+), 1 deletion(-) diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index 4d08205b81..eeb9015876 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -15,7 +15,7 @@ use editor::{ self, ConfirmCodeAction, ConfirmCompletion, ConfirmRename, Editor, ExcerptRange, MultiBuffer, Redo, Rename, ToOffset, ToggleCodeActions, Undo, }; -use fs::{FakeFs, Fs as _, HomeDir, LineEnding}; +use fs::{FakeFs, Fs as _, HomeDir, LineEnding, RemoveOptions}; use futures::{channel::oneshot, StreamExt as _}; use gpui::{ executor::Deterministic, geometry::vector::vec2f, test::EmptyView, ModelHandle, Task, @@ -1306,6 +1306,98 @@ async fn test_host_disconnect( project_a.read_with(cx_a, |project, _| assert!(!project.is_shared())); } +#[gpui::test(iterations = 10)] +async fn test_host_reconnect( + deterministic: Arc, + cx_a: &mut TestAppContext, + cx_b: &mut TestAppContext, +) { + cx_b.update(editor::init); + deterministic.forbid_parking(); + let mut server = TestServer::start(&deterministic).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + server + .create_room(&mut [(&client_a, cx_a), (&client_b, cx_b)]) + .await; + + client_a + .fs + .insert_tree( + "/root", + json!({ + "dir1": { + "a.txt": "a-contents", + "b.txt": "b-contents", + "subdir1": { + "c.txt": "c-contents", + "d.txt": "d-contents", + "e.txt": "e-contents", + } + }, + "dir2": { + "x.txt": "x-contents", + }, + }), + ) + .await; + + let active_call_a = cx_a.read(ActiveCall::global); + let (project_a, _) = client_a.build_local_project("/root/dir1", cx_a).await; + let worktree_a = project_a.read_with(cx_a, |project, cx| project.worktrees(cx).next().unwrap()); + let project_id = active_call_a + .update(cx_a, |call, cx| call.share_project(project_a.clone(), cx)) + .await + .unwrap(); + + let project_b = client_b.build_remote_project(project_id, cx_b).await; + assert!(worktree_a.read_with(cx_a, |tree, _| tree.as_local().unwrap().is_shared())); + + // Drop client A's connection. + server.forbid_connections(); + server.disconnect_client(client_a.peer_id().unwrap()); + deterministic.advance_clock(RECEIVE_TIMEOUT); + project_a.read_with(cx_a, |project, _| { + assert!(project.collaborators().is_empty()) + }); + project_a.read_with(cx_a, |project, _| assert!(!project.is_shared())); + project_b.read_with(cx_b, |project, _| assert!(project.is_read_only())); + worktree_a.read_with(cx_a, |tree, _| { + assert!(!tree.as_local().unwrap().is_shared()) + }); + + // While disconnected, add and remove files from the client A's project. + client_a + .fs + .insert_tree( + "/root/dir1/subdir2", + json!({ + "f.txt": "f-contents", + "g.txt": "g-contents", + "h.txt": "h-contents", + "i.txt": "i-contents", + }), + ) + .await; + client_a + .fs + .remove_dir( + "/root/dir1/subdir1".as_ref(), + RemoveOptions { + recursive: true, + ..Default::default() + }, + ) + .await + .unwrap(); + + // Client A reconnects. Their project is re-shared, and client B re-joins it. + server.allow_connections(); + deterministic.advance_clock(RECEIVE_TIMEOUT); + project_a.read_with(cx_a, |project, _| assert!(project.is_shared())); + project_b.read_with(cx_b, |project, _| assert!(!project.is_read_only())); +} + #[gpui::test(iterations = 10)] async fn test_active_call_events( deterministic: Arc, From af85db9ea56af09856181b957b53d612a8411aaf Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Fri, 16 Dec 2022 17:33:30 -0800 Subject: [PATCH 05/56] WIP - Retain hosts' project state when they disconnect --- crates/call/src/room.rs | 18 ++++++++++++--- crates/collab/src/db.rs | 51 +++++++++++++++++++---------------------- 2 files changed, 39 insertions(+), 30 deletions(-) diff --git a/crates/call/src/room.rs b/crates/call/src/room.rs index 8e2c38b3f8..1d279717f7 100644 --- a/crates/call/src/room.rs +++ b/crates/call/src/room.rs @@ -43,6 +43,7 @@ pub struct Room { id: u64, live_kit: Option, status: RoomStatus, + shared_projects: HashSet>, local_participant: LocalParticipant, remote_participants: BTreeMap, pending_participants: Vec>, @@ -132,6 +133,7 @@ impl Room { id, live_kit: live_kit_room, status: RoomStatus::Online, + shared_projects: Default::default(), participant_user_ids: Default::default(), local_participant: Default::default(), remote_participants: Default::default(), @@ -291,9 +293,18 @@ impl Room { .ok_or_else(|| anyhow!("room was dropped"))? .update(&mut cx, |this, cx| { this.status = RoomStatus::Online; - this.apply_room_update(room_proto, cx) - })?; - anyhow::Ok(()) + this.apply_room_update(room_proto, cx)?; + this.shared_projects.retain(|project| { + let Some(project) = project.upgrade(cx) else { return false }; + project.update(cx, |project, cx| { + if let Some(remote_id) = project.remote_id() { + project.shared(remote_id, cx).detach() + } + }); + true + }); + anyhow::Ok(()) + }) }; if rejoin_room.await.log_err().is_some() { @@ -666,6 +677,7 @@ impl Room { // If the user's location is in this project, it changes from UnsharedProject to SharedProject. this.update(&mut cx, |this, cx| { + this.shared_projects.insert(project.downgrade()); let active_project = this.local_participant.active_project.as_ref(); if active_project.map_or(false, |location| *location == project) { this.set_location(Some(&project), cx) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index b1cbddc77e..6e9c365f0f 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1601,10 +1601,11 @@ impl Database { .exec(&*tx) .await?; - let collaborator_on_projects = project_collaborator::Entity::find() + let guest_collaborators_and_projects = project_collaborator::Entity::find() .find_also_related(project::Entity) .filter( Condition::all() + .add(project_collaborator::Column::IsHost.eq(false)) .add(project_collaborator::Column::ConnectionId.eq(connection.id as i32)) .add( project_collaborator::Column::ConnectionServerId @@ -1613,40 +1614,36 @@ impl Database { ) .all(&*tx) .await?; + project_collaborator::Entity::delete_many() .filter( - Condition::all() - .add(project_collaborator::Column::ConnectionId.eq(connection.id as i32)) - .add( - project_collaborator::Column::ConnectionServerId - .eq(connection.owner_id as i32), - ), + project_collaborator::Column::Id + .is_in(guest_collaborators_and_projects.iter().map(|e| e.0.id)), ) .exec(&*tx) .await?; let mut left_projects = Vec::new(); - for (_, project) in collaborator_on_projects { - if let Some(project) = project { - let collaborators = project - .find_related(project_collaborator::Entity) - .all(&*tx) - .await?; - let connection_ids = collaborators - .into_iter() - .map(|collaborator| ConnectionId { - id: collaborator.connection_id as u32, - owner_id: collaborator.connection_server_id.0 as u32, - }) - .collect(); + for (_, project) in guest_collaborators_and_projects { + let Some(project) = project else { continue }; + let collaborators = project + .find_related(project_collaborator::Entity) + .all(&*tx) + .await?; + let connection_ids = collaborators + .into_iter() + .map(|collaborator| ConnectionId { + id: collaborator.connection_id as u32, + owner_id: collaborator.connection_server_id.0 as u32, + }) + .collect(); - left_projects.push(LeftProject { - id: project.id, - host_user_id: project.host_user_id, - host_connection_id: project.host_connection()?, - connection_ids, - }); - } + left_projects.push(LeftProject { + id: project.id, + host_user_id: project.host_user_id, + host_connection_id: project.host_connection()?, + connection_ids, + }); } project::Entity::delete_many() From 70dd586be971d3883641bc6d83fc569005d051f1 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 19 Dec 2022 17:50:43 -0800 Subject: [PATCH 06/56] Start work on rejoining rooms, supplying all project info at once Co-authored-by: Nathan Sobo --- crates/call/src/room.rs | 146 ++++++++++++++++++++---------- crates/collab_ui/src/collab_ui.rs | 4 + crates/project/src/project.rs | 103 +++++++++++++-------- crates/rpc/proto/zed.proto | 63 ++++++++----- crates/rpc/src/proto.rs | 3 + 5 files changed, 207 insertions(+), 112 deletions(-) diff --git a/crates/call/src/room.rs b/crates/call/src/room.rs index 1d279717f7..14a3109517 100644 --- a/crates/call/src/room.rs +++ b/crates/call/src/room.rs @@ -7,7 +7,7 @@ use client::{ proto::{self, PeerId}, Client, TypedEnvelope, User, UserStore, }; -use collections::{BTreeMap, HashSet}; +use collections::{BTreeMap, HashMap, HashSet}; use futures::{FutureExt, StreamExt}; use gpui::{ AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task, WeakModelHandle, @@ -44,6 +44,7 @@ pub struct Room { live_kit: Option, status: RoomStatus, shared_projects: HashSet>, + joined_projects: HashSet>, local_participant: LocalParticipant, remote_participants: BTreeMap, pending_participants: Vec>, @@ -134,6 +135,7 @@ impl Room { live_kit: live_kit_room, status: RoomStatus::Online, shared_projects: Default::default(), + joined_projects: Default::default(), participant_user_ids: Default::default(), local_participant: Default::default(), remote_participants: Default::default(), @@ -259,16 +261,15 @@ impl Room { .next() .await .map_or(false, |s| s.is_connected()); + // Even if we're initially connected, any future change of the status means we momentarily disconnected. if !is_connected || client_status.next().await.is_some() { log::info!("detected client disconnection"); - let room_id = this - .upgrade(&cx) + this.upgrade(&cx) .ok_or_else(|| anyhow!("room was dropped"))? .update(&mut cx, |this, cx| { this.status = RoomStatus::Rejoining; cx.notify(); - this.id }); // Wait for client to re-establish a connection to the server. @@ -281,40 +282,21 @@ impl Room { "waiting for client status change, remaining attempts {}", remaining_attempts ); - if let Some(status) = client_status.next().await { - if status.is_connected() { - log::info!("client reconnected, attempting to rejoin room"); - let rejoin_room = async { - let response = - client.request(proto::JoinRoom { id: room_id }).await?; - let room_proto = - response.room.ok_or_else(|| anyhow!("invalid room"))?; - this.upgrade(&cx) - .ok_or_else(|| anyhow!("room was dropped"))? - .update(&mut cx, |this, cx| { - this.status = RoomStatus::Online; - this.apply_room_update(room_proto, cx)?; - this.shared_projects.retain(|project| { - let Some(project) = project.upgrade(cx) else { return false }; - project.update(cx, |project, cx| { - if let Some(remote_id) = project.remote_id() { - project.shared(remote_id, cx).detach() - } - }); - true - }); - anyhow::Ok(()) - }) - }; + let Some(status) = client_status.next().await else { break }; + if status.is_connected() { + log::info!("client reconnected, attempting to rejoin room"); - if rejoin_room.await.log_err().is_some() { - return true; - } else { - remaining_attempts -= 1; - } + let Some(this) = this.upgrade(&cx) else { break }; + if this + .update(&mut cx, |this, cx| this.rejoin(cx)) + .await + .log_err() + .is_some() + { + return true; + } else { + remaining_attempts -= 1; } - } else { - return false; } } false @@ -351,6 +333,73 @@ impl Room { } } + fn rejoin(&mut self, cx: &mut ModelContext) -> Task> { + let mut projects = HashMap::default(); + let mut reshared_projects = Vec::new(); + let mut rejoined_projects = Vec::new(); + self.shared_projects.retain(|project| { + if let Some(handle) = project.upgrade(cx) { + let project = handle.read(cx); + if let Some(project_id) = project.remote_id() { + projects.insert(project_id, handle.clone()); + reshared_projects.push(proto::UpdateProject { + project_id, + worktrees: project.worktree_metadata_protos(cx), + }); + return true; + } + } + false + }); + self.joined_projects.retain(|project| { + if let Some(handle) = project.upgrade(cx) { + let project = handle.read(cx); + if let Some(project_id) = project.remote_id() { + rejoined_projects.push(proto::RejoinProject { + project_id, + worktrees: project + .worktrees(cx) + .map(|worktree| { + let worktree = worktree.read(cx); + proto::RejoinWorktree { + id: worktree.id().to_proto(), + scan_id: worktree.scan_id() as u64, + } + }) + .collect(), + }); + } + return true; + } + false + }); + + let response = self.client.request(proto::RejoinRoom { + id: self.id, + reshared_projects, + rejoined_projects, + }); + + cx.spawn(|this, mut cx| async move { + let response = response.await?; + let room_proto = response.room.ok_or_else(|| anyhow!("invalid room"))?; + this.update(&mut cx, |this, cx| { + this.status = RoomStatus::Online; + this.apply_room_update(room_proto, cx)?; + + for shared_project in response.reshared_projects { + if let Some(project) = projects.get(&shared_project.id) { + project.update(cx, |project, cx| { + project.reshared(shared_project, cx).log_err(); + }); + } + } + + anyhow::Ok(()) + }) + }) + } + pub fn id(&self) -> u64 { self.id } @@ -641,6 +690,17 @@ impl Room { }) } + pub fn joined_project(&mut self, project: ModelHandle, cx: &mut ModelContext) { + self.joined_projects.retain(|project| { + if let Some(project) = project.upgrade(cx) { + !project.read(cx).is_read_only() + } else { + false + } + }); + self.joined_projects.insert(project.downgrade()); + } + pub(crate) fn share_project( &mut self, project: ModelHandle, @@ -652,19 +712,7 @@ impl Room { let request = self.client.request(proto::ShareProject { room_id: self.id(), - worktrees: project - .read(cx) - .worktrees(cx) - .map(|worktree| { - let worktree = worktree.read(cx); - proto::WorktreeMetadata { - id: worktree.id().to_proto(), - root_name: worktree.root_name().into(), - visible: worktree.is_visible(), - abs_path: worktree.abs_path().to_string_lossy().into(), - } - }) - .collect(), + worktrees: project.read(cx).worktree_metadata_protos(cx), }); cx.spawn(|this, mut cx| async move { let response = request.await?; diff --git a/crates/collab_ui/src/collab_ui.rs b/crates/collab_ui/src/collab_ui.rs index 1041382515..4984b84a81 100644 --- a/crates/collab_ui/src/collab_ui.rs +++ b/crates/collab_ui/src/collab_ui.rs @@ -68,6 +68,10 @@ pub fn init(app_state: Arc, cx: &mut MutableAppContext) { workspace.update(&mut cx, |workspace, cx| { if let Some(room) = ActiveCall::global(cx).read(cx).room().cloned() { + room.update(cx, |room, cx| { + room.joined_project(workspace.project().clone(), cx); + }); + let follow_peer_id = room .read(cx) .remote_participants() diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 7f2fcb516f..e71971c3d7 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -13,7 +13,7 @@ use collections::{hash_map, BTreeMap, HashMap, HashSet}; use futures::{ channel::{mpsc, oneshot}, future::Shared, - AsyncWriteExt, Future, FutureExt, StreamExt, TryFutureExt, + select_biased, AsyncWriteExt, Future, FutureExt, StreamExt, TryFutureExt, }; use gpui::{ AnyModelHandle, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, @@ -151,7 +151,6 @@ enum ProjectClientState { remote_id: u64, metadata_changed: mpsc::UnboundedSender>, _maintain_metadata: Task<()>, - _detect_unshare: Task>, }, Remote { sharing_has_stopped: bool, @@ -552,16 +551,12 @@ impl Project { user_store .update(&mut cx, |user_store, cx| user_store.get_users(user_ids, cx)) .await?; - let mut collaborators = HashMap::default(); - for message in response.collaborators { - let collaborator = Collaborator::from_proto(message)?; - collaborators.insert(collaborator.peer_id, collaborator); - } - this.update(&mut cx, |this, _| { - this.collaborators = collaborators; + this.update(&mut cx, |this, cx| { + this.set_collaborators_from_proto(response.collaborators, cx)?; this.client_subscriptions.push(subscription); - }); + anyhow::Ok(()) + })?; Ok(this) } @@ -1055,49 +1050,39 @@ impl Project { remote_id: project_id, metadata_changed: metadata_changed_tx, _maintain_metadata: cx.spawn_weak(move |this, cx| async move { - while let Some(tx) = metadata_changed_rx.next().await { - let mut txs = vec![tx]; - while let Ok(Some(next_tx)) = metadata_changed_rx.try_next() { - txs.push(next_tx); + let mut txs = Vec::new(); + loop { + select_biased! { + tx = metadata_changed_rx.next().fuse() => { + let Some(tx) = tx else { break }; + txs.push(tx); + while let Ok(Some(next_tx)) = metadata_changed_rx.try_next() { + txs.push(next_tx); + } + } + status = status.next().fuse() => { + let Some(status) = status else { break }; + if !status.is_connected() { + continue + } + } } let Some(this) = this.upgrade(&cx) else { break }; this.read_with(&cx, |this, cx| { - let worktrees = this - .worktrees - .iter() - .filter_map(|worktree| { - worktree.upgrade(cx).map(|worktree| { - worktree.read(cx).as_local().unwrap().metadata_proto() - }) - }) - .collect(); this.client.request(proto::UpdateProject { project_id, - worktrees, + worktrees: this.worktree_metadata_protos(cx), }) }) .await .log_err(); - for tx in txs { + for tx in txs.drain(..) { let _ = tx.send(()); } } }), - _detect_unshare: cx.spawn_weak(move |this, mut cx| { - async move { - let is_connected = status.next().await.map_or(false, |s| s.is_connected()); - // Even if we're initially connected, any future change of the status means we momentarily disconnected. - if !is_connected || status.next().await.is_some() { - if let Some(this) = this.upgrade(&cx) { - let _ = this.update(&mut cx, |this, cx| this.unshare(cx)); - } - } - Ok(()) - } - .log_err() - }), }); cx.foreground().spawn(async move { @@ -1106,6 +1091,29 @@ impl Project { }) } + pub fn reshared( + &mut self, + message: proto::ResharedProject, + cx: &mut ModelContext, + ) -> Result<()> { + self.set_collaborators_from_proto(message.collaborators, cx)?; + Ok(()) + } + + pub fn worktree_metadata_protos(&self, cx: &AppContext) -> Vec { + self.worktrees(cx) + .map(|worktree| { + let worktree = worktree.read(cx); + proto::WorktreeMetadata { + id: worktree.id().to_proto(), + root_name: worktree.root_name().into(), + visible: worktree.is_visible(), + abs_path: worktree.abs_path().to_string_lossy().into(), + } + }) + .collect() + } + pub fn unshare(&mut self, cx: &mut ModelContext) -> Result<()> { if self.is_remote() { return Err(anyhow!("attempted to unshare a remote project")); @@ -5637,6 +5645,25 @@ impl Project { }) } + fn set_collaborators_from_proto( + &mut self, + messages: Vec, + cx: &mut ModelContext, + ) -> Result<()> { + let mut collaborators = HashMap::default(); + for message in messages { + let collaborator = Collaborator::from_proto(message)?; + collaborators.insert(collaborator.peer_id, collaborator); + } + for old_peer_id in self.collaborators.keys() { + if !collaborators.contains_key(old_peer_id) { + cx.emit(Event::CollaboratorLeft(*old_peer_id)); + } + } + self.collaborators = collaborators; + Ok(()) + } + fn deserialize_symbol( &self, serialized_symbol: proto::Symbol, diff --git a/crates/rpc/proto/zed.proto b/crates/rpc/proto/zed.proto index 6da9d0a7fc..b3322b2923 100644 --- a/crates/rpc/proto/zed.proto +++ b/crates/rpc/proto/zed.proto @@ -21,6 +21,8 @@ message Envelope { CreateRoomResponse create_room_response = 10; JoinRoom join_room = 11; JoinRoomResponse join_room_response = 12; + RejoinRoom rejoin_room = 108; + RejoinRoomResponse rejoin_room_response = 109; LeaveRoom leave_room = 13; Call call = 14; IncomingCall incoming_call = 15; @@ -161,6 +163,42 @@ message JoinRoomResponse { optional LiveKitConnectionInfo live_kit_connection_info = 2; } +message RejoinRoom { + uint64 id = 1; + repeated UpdateProject reshared_projects = 2; + repeated RejoinProject rejoined_projects = 3; + // relay open buffers and their vector clock +} + +message RejoinProject { + uint64 project_id = 1; + repeated RejoinWorktree worktrees = 2; +} + +message RejoinWorktree { + uint64 id = 1; + uint64 scan_id = 2; +} + +message RejoinRoomResponse { + Room room = 1; + repeated ResharedProject reshared_projects = 2; + repeated RejoinedProject rejoined_projects = 3; +} + +message ResharedProject { + uint64 id = 1; + repeated Collaborator collaborators = 2; +} + +message RejoinedProject { + uint64 id = 1; + uint32 replica_id = 2; + repeated WorktreeMetadata worktrees = 3; + repeated Collaborator collaborators = 4; + repeated LanguageServer language_servers = 5; +} + message LeaveRoom {} message Room { @@ -253,15 +291,6 @@ message ShareProjectResponse { uint64 project_id = 1; } -message ReshareProject { - uint64 id = 1; - repeated WorktreeMetadata worktrees = 2; -} - -message ReshareProjectResponse { - repeated Collaborator collaborators = 1; -} - message UnshareProject { uint64 project_id = 1; } @@ -282,22 +311,6 @@ message JoinProjectResponse { repeated LanguageServer language_servers = 4; } -message RejoinProject { - uint64 project_id = 1; - repeated RejoinWorktree worktrees = 2; -} - -message RejoinWorktree { - uint64 id = 1; - uint64 scan_id = 2; -} - -message RejoinProjectResponse { - repeated WorktreeMetadata worktrees = 1; - repeated Collaborator collaborators = 2; - repeated LanguageServer language_servers = 3; -} - message LeaveProject { uint64 project_id = 1; } diff --git a/crates/rpc/src/proto.rs b/crates/rpc/src/proto.rs index f833db514d..ca70b7dbd9 100644 --- a/crates/rpc/src/proto.rs +++ b/crates/rpc/src/proto.rs @@ -188,6 +188,8 @@ messages!( (PrepareRename, Background), (PrepareRenameResponse, Background), (ProjectEntryResponse, Foreground), + (RejoinRoom, Foreground), + (RejoinRoomResponse, Foreground), (RemoveContact, Foreground), (ReloadBuffers, Foreground), (ReloadBuffersResponse, Foreground), @@ -254,6 +256,7 @@ request_messages!( (JoinChannel, JoinChannelResponse), (JoinProject, JoinProjectResponse), (JoinRoom, JoinRoomResponse), + (RejoinRoom, RejoinRoomResponse), (IncomingCall, Ack), (OpenBufferById, OpenBufferResponse), (OpenBufferByPath, OpenBufferResponse), From 1aec691b35f8208cba6dad8aadc3a13a04980fb8 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 20 Dec 2022 12:03:43 +0100 Subject: [PATCH 07/56] Sketch out project reconnection routine on the server --- crates/collab/src/db.rs | 111 ++++++++++++++++++---- crates/collab/src/rpc.rs | 190 ++++++++++++++++++++++++++++++------- crates/rpc/proto/zed.proto | 14 ++- crates/rpc/src/proto.rs | 2 + 4 files changed, 257 insertions(+), 60 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 6e9c365f0f..6679922855 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1319,15 +1319,7 @@ impl Database { Condition::all() .add(room_participant::Column::RoomId.eq(room_id)) .add(room_participant::Column::UserId.eq(user_id)) - .add( - Condition::any() - .add(room_participant::Column::AnsweringConnectionId.is_null()) - .add(room_participant::Column::AnsweringConnectionLost.eq(true)) - .add( - room_participant::Column::AnsweringConnectionServerId - .ne(connection.owner_id as i32), - ), - ), + .add(room_participant::Column::AnsweringConnectionId.is_null()), ) .set(room_participant::ActiveModel { answering_connection_id: ActiveValue::set(Some(connection.id as i32)), @@ -1349,6 +1341,15 @@ impl Database { .await } + pub async fn rejoin_room( + &self, + room_id: proto::RejoinRoom, + user_id: UserId, + connection_id: ConnectionId, + ) -> Result { + todo!() + } + pub async fn leave_room( &self, connection: ConnectionId, @@ -2287,7 +2288,18 @@ impl Database { let room_id = project.room_id; let project = Project { - collaborators, + collaborators: collaborators + .into_iter() + .map(|collaborator| ProjectCollaborator { + connection_id: ConnectionId { + owner_id: collaborator.connection_server_id.0 as u32, + id: collaborator.connection_id as u32, + }, + user_id: collaborator.user_id, + replica_id: collaborator.replica_id, + is_host: collaborator.is_host, + }) + .collect(), worktrees, language_servers: language_servers .into_iter() @@ -2354,8 +2366,8 @@ impl Database { pub async fn project_collaborators( &self, project_id: ProjectId, - connection: ConnectionId, - ) -> Result>> { + connection_id: ConnectionId, + ) -> Result>> { self.room_transaction(|tx| async move { let project = project::Entity::find_by_id(project_id) .one(&*tx) @@ -2364,15 +2376,23 @@ impl Database { let collaborators = project_collaborator::Entity::find() .filter(project_collaborator::Column::ProjectId.eq(project_id)) .all(&*tx) - .await?; + .await? + .into_iter() + .map(|collaborator| ProjectCollaborator { + connection_id: ConnectionId { + owner_id: collaborator.connection_server_id.0 as u32, + id: collaborator.connection_id as u32, + }, + user_id: collaborator.user_id, + replica_id: collaborator.replica_id, + is_host: collaborator.is_host, + }) + .collect::>(); - if collaborators.iter().any(|collaborator| { - let collaborator_connection = ConnectionId { - owner_id: collaborator.connection_server_id.0 as u32, - id: collaborator.connection_id as u32, - }; - collaborator_connection == connection - }) { + if collaborators + .iter() + .any(|collaborator| collaborator.connection_id == connection_id) + { Ok((project.room_id, collaborators)) } else { Err(anyhow!("no such project"))? @@ -2846,6 +2866,38 @@ id_type!(ServerId); id_type!(SignupId); id_type!(UserId); +pub struct RejoinedRoom { + pub room: proto::Room, + pub rejoined_projects: Vec, + pub reshared_projects: Vec, +} + +pub struct ResharedProject { + pub id: ProjectId, + pub old_connection_id: ConnectionId, + pub collaborators: Vec, +} + +pub struct RejoinedProject { + pub id: ProjectId, + pub old_connection_id: ConnectionId, + pub collaborators: Vec, + pub worktrees: Vec, + pub language_servers: Vec, +} + +pub struct RejoinedWorktree { + pub id: u64, + pub abs_path: String, + pub root_name: String, + pub visible: bool, + pub updated_entries: Vec, + pub removed_entries: Vec, + pub diagnostic_summaries: Vec, + pub scan_id: u64, + pub is_complete: bool, +} + pub struct LeftRoom { pub room: proto::Room, pub left_projects: HashMap, @@ -2859,11 +2911,28 @@ pub struct RefreshedRoom { } pub struct Project { - pub collaborators: Vec, + pub collaborators: Vec, pub worktrees: BTreeMap, pub language_servers: Vec, } +pub struct ProjectCollaborator { + pub connection_id: ConnectionId, + pub user_id: UserId, + pub replica_id: ReplicaId, + pub is_host: bool, +} + +impl ProjectCollaborator { + pub fn to_proto(&self) -> proto::Collaborator { + proto::Collaborator { + peer_id: Some(self.connection_id.into()), + replica_id: self.replica_id.0 as u32, + user_id: self.user_id.to_proto(), + } + } +} + pub struct LeftProject { pub id: ProjectId, pub host_user_id: UserId, diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 03e6eb50e2..beeb666da6 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -184,6 +184,7 @@ impl Server { .add_request_handler(ping) .add_request_handler(create_room) .add_request_handler(join_room) + .add_request_handler(rejoin_room) .add_message_handler(leave_room) .add_request_handler(call) .add_request_handler(cancel_call) @@ -941,6 +942,148 @@ async fn join_room( Ok(()) } +async fn rejoin_room( + request: proto::RejoinRoom, + response: Response, + session: Session, +) -> Result<()> { + let mut rejoined_room = session + .db() + .await + .rejoin_room(request, session.user_id, session.connection_id) + .await?; + + response.send(proto::RejoinRoomResponse { + room: Some(rejoined_room.room.clone()), + reshared_projects: rejoined_room + .reshared_projects + .iter() + .map(|project| proto::ResharedProject { + id: project.id.to_proto(), + collaborators: project + .collaborators + .iter() + .map(|collaborator| collaborator.to_proto()) + .collect(), + }) + .collect(), + rejoined_projects: rejoined_room + .rejoined_projects + .iter() + .map(|rejoined_project| proto::RejoinedProject { + id: rejoined_project.id.to_proto(), + worktrees: rejoined_project + .worktrees + .iter() + .map(|worktree| proto::WorktreeMetadata { + id: worktree.id, + root_name: worktree.root_name.clone(), + visible: worktree.visible, + abs_path: worktree.abs_path.clone(), + }) + .collect(), + collaborators: rejoined_project + .collaborators + .iter() + .map(|collaborator| collaborator.to_proto()) + .collect(), + language_servers: rejoined_project.language_servers.clone(), + }) + .collect(), + })?; + room_updated(&rejoined_room.room, &session.peer); + + // Notify other participants about this peer's reconnection to projects. + for project in &rejoined_room.reshared_projects { + for collaborator in &project.collaborators { + if collaborator.connection_id != session.connection_id { + session + .peer + .send( + collaborator.connection_id, + proto::UpdateProjectCollaborator { + project_id: project.id.to_proto(), + old_peer_id: Some(project.old_connection_id.into()), + new_peer_id: Some(session.connection_id.into()), + }, + ) + .trace_err(); + } + } + } + for project in &rejoined_room.rejoined_projects { + for collaborator in &project.collaborators { + if collaborator.connection_id != session.connection_id { + session + .peer + .send( + collaborator.connection_id, + proto::UpdateProjectCollaborator { + project_id: project.id.to_proto(), + old_peer_id: Some(project.old_connection_id.into()), + new_peer_id: Some(session.connection_id.into()), + }, + ) + .trace_err(); + } + } + } + + for project in &mut rejoined_room.rejoined_projects { + for worktree in mem::take(&mut project.worktrees) { + #[cfg(any(test, feature = "test-support"))] + const MAX_CHUNK_SIZE: usize = 2; + #[cfg(not(any(test, feature = "test-support")))] + const MAX_CHUNK_SIZE: usize = 256; + + // Stream this worktree's entries. + let message = proto::UpdateWorktree { + project_id: project.id.to_proto(), + worktree_id: worktree.id, + abs_path: worktree.abs_path.clone(), + root_name: worktree.root_name, + updated_entries: worktree.updated_entries, + removed_entries: worktree.removed_entries, + scan_id: worktree.scan_id, + is_last_update: worktree.is_complete, + }; + for update in proto::split_worktree_update(message, MAX_CHUNK_SIZE) { + session.peer.send(session.connection_id, update.clone())?; + } + + // Stream this worktree's diagnostics. + for summary in worktree.diagnostic_summaries { + session.peer.send( + session.connection_id, + proto::UpdateDiagnosticSummary { + project_id: project.id.to_proto(), + worktree_id: worktree.id, + summary: Some(summary), + }, + )?; + } + } + + for language_server in &project.language_servers { + session.peer.send( + session.connection_id, + proto::UpdateLanguageServer { + project_id: project.id.to_proto(), + language_server_id: language_server.id, + variant: Some( + proto::update_language_server::Variant::DiskBasedDiagnosticsUpdated( + proto::LspDiskBasedDiagnosticsUpdated {}, + ), + ), + }, + )?; + } + } + + update_user_contacts(session.user_id, &session).await?; + Ok(()) +} + async fn leave_room(_message: proto::LeaveRoom, session: Session) -> Result<()> { leave_room_for_session(&session).await } @@ -1160,18 +1303,8 @@ async fn join_project( let collaborators = project .collaborators .iter() - .map(|collaborator| { - let peer_id = proto::PeerId { - owner_id: collaborator.connection_server_id.0 as u32, - id: collaborator.connection_id as u32, - }; - proto::Collaborator { - peer_id: Some(peer_id), - replica_id: collaborator.replica_id.0 as u32, - user_id: collaborator.user_id.to_proto(), - } - }) - .filter(|collaborator| collaborator.peer_id != Some(session.connection_id.into())) + .filter(|collaborator| collaborator.connection_id != session.connection_id) + .map(|collaborator| collaborator.to_proto()) .collect::>(); let worktrees = project .worktrees @@ -1413,14 +1546,11 @@ where .await .project_collaborators(project_id, session.connection_id) .await?; - let host = collaborators + collaborators .iter() .find(|collaborator| collaborator.is_host) - .ok_or_else(|| anyhow!("host not found"))?; - ConnectionId { - owner_id: host.connection_server_id.0 as u32, - id: host.connection_id as u32, - } + .ok_or_else(|| anyhow!("host not found"))? + .connection_id }; let payload = session @@ -1444,14 +1574,11 @@ async fn save_buffer( .await .project_collaborators(project_id, session.connection_id) .await?; - let host = collaborators + collaborators .iter() .find(|collaborator| collaborator.is_host) - .ok_or_else(|| anyhow!("host not found"))?; - ConnectionId { - owner_id: host.connection_server_id.0 as u32, - id: host.connection_id as u32, - } + .ok_or_else(|| anyhow!("host not found"))? + .connection_id }; let response_payload = session .peer @@ -1463,17 +1590,10 @@ async fn save_buffer( .await .project_collaborators(project_id, session.connection_id) .await?; - collaborators.retain(|collaborator| { - let collaborator_connection = ConnectionId { - owner_id: collaborator.connection_server_id.0 as u32, - id: collaborator.connection_id as u32, - }; - collaborator_connection != session.connection_id - }); - let project_connection_ids = collaborators.iter().map(|collaborator| ConnectionId { - owner_id: collaborator.connection_server_id.0 as u32, - id: collaborator.connection_id as u32, - }); + collaborators.retain(|collaborator| collaborator.connection_id != session.connection_id); + let project_connection_ids = collaborators + .iter() + .map(|collaborator| collaborator.connection_id); broadcast(host_connection_id, project_connection_ids, |conn_id| { session .peer diff --git a/crates/rpc/proto/zed.proto b/crates/rpc/proto/zed.proto index b3322b2923..51cc8aa6cd 100644 --- a/crates/rpc/proto/zed.proto +++ b/crates/rpc/proto/zed.proto @@ -39,6 +39,7 @@ message Envelope { JoinProjectResponse join_project_response = 25; LeaveProject leave_project = 26; AddProjectCollaborator add_project_collaborator = 27; + UpdateProjectCollaborator update_project_collaborator = 110; RemoveProjectCollaborator remove_project_collaborator = 28; GetDefinition get_definition = 29; @@ -193,10 +194,9 @@ message ResharedProject { message RejoinedProject { uint64 id = 1; - uint32 replica_id = 2; - repeated WorktreeMetadata worktrees = 3; - repeated Collaborator collaborators = 4; - repeated LanguageServer language_servers = 5; + repeated WorktreeMetadata worktrees = 2; + repeated Collaborator collaborators = 3; + repeated LanguageServer language_servers = 4; } message LeaveRoom {} @@ -360,6 +360,12 @@ message AddProjectCollaborator { Collaborator collaborator = 2; } +message UpdateProjectCollaborator { + uint64 project_id = 1; + PeerId old_peer_id = 2; + PeerId new_peer_id = 3; +} + message RemoveProjectCollaborator { uint64 project_id = 1; PeerId peer_id = 2; diff --git a/crates/rpc/src/proto.rs b/crates/rpc/src/proto.rs index ca70b7dbd9..b2017b839a 100644 --- a/crates/rpc/src/proto.rs +++ b/crates/rpc/src/proto.rs @@ -219,6 +219,7 @@ messages!( (UpdateLanguageServer, Foreground), (UpdateParticipantLocation, Foreground), (UpdateProject, Foreground), + (UpdateProjectCollaborator, Foreground), (UpdateWorktree, Foreground), (UpdateDiffBase, Background), (GetPrivateUserInfo, Foreground), @@ -322,6 +323,7 @@ entity_messages!( UpdateFollowers, UpdateLanguageServer, UpdateProject, + UpdateProjectCollaborator, UpdateWorktree, UpdateDiffBase ); From 1a3940a12ef676e8dfd2e4ab73774694596159e5 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 20 Dec 2022 14:51:46 +0100 Subject: [PATCH 08/56] Fix project reconnection test to ensure rooms actually reconnects --- crates/call/src/room.rs | 15 ++++- crates/collab/src/db.rs | 67 ++--------------------- crates/collab/src/integration_tests.rs | 76 +++++++++++++++++++++++--- crates/collab/src/rpc.rs | 9 +-- crates/project/src/project.rs | 25 +++------ 5 files changed, 94 insertions(+), 98 deletions(-) diff --git a/crates/call/src/room.rs b/crates/call/src/room.rs index 14a3109517..9480297c0e 100644 --- a/crates/call/src/room.rs +++ b/crates/call/src/room.rs @@ -355,6 +355,7 @@ impl Room { if let Some(handle) = project.upgrade(cx) { let project = handle.read(cx); if let Some(project_id) = project.remote_id() { + projects.insert(project_id, handle.clone()); rejoined_projects.push(proto::RejoinProject { project_id, worktrees: project @@ -387,10 +388,18 @@ impl Room { this.status = RoomStatus::Online; this.apply_room_update(room_proto, cx)?; - for shared_project in response.reshared_projects { - if let Some(project) = projects.get(&shared_project.id) { + for reshared_project in response.reshared_projects { + if let Some(project) = projects.get(&reshared_project.id) { project.update(cx, |project, cx| { - project.reshared(shared_project, cx).log_err(); + project.reshared(reshared_project, cx).log_err(); + }); + } + } + + for rejoined_project in response.rejoined_projects { + if let Some(project) = projects.get(&rejoined_project.id) { + project.update(cx, |project, cx| { + project.rejoined(rejoined_project, cx).log_err(); }); } } diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 6679922855..14be4e2732 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1347,6 +1347,7 @@ impl Database { user_id: UserId, connection_id: ConnectionId, ) -> Result { + println!("=============="); todo!() } @@ -1573,11 +1574,8 @@ impl Database { .await } - pub async fn connection_lost( - &self, - connection: ConnectionId, - ) -> Result>> { - self.room_transaction(|tx| async move { + pub async fn connection_lost(&self, connection: ConnectionId) -> Result<()> { + self.transaction(|tx| async move { let participant = room_participant::Entity::find() .filter( Condition::all() @@ -1593,7 +1591,6 @@ impl Database { .one(&*tx) .await? .ok_or_else(|| anyhow!("not a participant in any room"))?; - let room_id = participant.room_id; room_participant::Entity::update(room_participant::ActiveModel { answering_connection_lost: ActiveValue::set(true), @@ -1602,63 +1599,7 @@ impl Database { .exec(&*tx) .await?; - let guest_collaborators_and_projects = project_collaborator::Entity::find() - .find_also_related(project::Entity) - .filter( - Condition::all() - .add(project_collaborator::Column::IsHost.eq(false)) - .add(project_collaborator::Column::ConnectionId.eq(connection.id as i32)) - .add( - project_collaborator::Column::ConnectionServerId - .eq(connection.owner_id as i32), - ), - ) - .all(&*tx) - .await?; - - project_collaborator::Entity::delete_many() - .filter( - project_collaborator::Column::Id - .is_in(guest_collaborators_and_projects.iter().map(|e| e.0.id)), - ) - .exec(&*tx) - .await?; - - let mut left_projects = Vec::new(); - for (_, project) in guest_collaborators_and_projects { - let Some(project) = project else { continue }; - let collaborators = project - .find_related(project_collaborator::Entity) - .all(&*tx) - .await?; - let connection_ids = collaborators - .into_iter() - .map(|collaborator| ConnectionId { - id: collaborator.connection_id as u32, - owner_id: collaborator.connection_server_id.0 as u32, - }) - .collect(); - - left_projects.push(LeftProject { - id: project.id, - host_user_id: project.host_user_id, - host_connection_id: project.host_connection()?, - connection_ids, - }); - } - - project::Entity::delete_many() - .filter( - Condition::all() - .add(project::Column::HostConnectionId.eq(connection.id as i32)) - .add( - project::Column::HostConnectionServerId.eq(connection.owner_id as i32), - ), - ) - .exec(&*tx) - .await?; - - Ok((room_id, left_projects)) + Ok(()) }) .await } diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index eeb9015876..2b41143d37 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -1351,19 +1351,27 @@ async fn test_host_reconnect( .unwrap(); let project_b = client_b.build_remote_project(project_id, cx_b).await; - assert!(worktree_a.read_with(cx_a, |tree, _| tree.as_local().unwrap().is_shared())); + deterministic.run_until_parked(); + + let worktree_id = worktree_a.read_with(cx_a, |worktree, _| { + assert!(worktree.as_local().unwrap().is_shared()); + worktree.id() + }); // Drop client A's connection. server.forbid_connections(); server.disconnect_client(client_a.peer_id().unwrap()); deterministic.advance_clock(RECEIVE_TIMEOUT); project_a.read_with(cx_a, |project, _| { - assert!(project.collaborators().is_empty()) + assert!(project.is_shared()); + assert_eq!(project.collaborators().len(), 1); + }); + project_b.read_with(cx_b, |project, _| { + assert!(!project.is_read_only()); + assert_eq!(project.collaborators().len(), 1); }); - project_a.read_with(cx_a, |project, _| assert!(!project.is_shared())); - project_b.read_with(cx_b, |project, _| assert!(project.is_read_only())); worktree_a.read_with(cx_a, |tree, _| { - assert!(!tree.as_local().unwrap().is_shared()) + assert!(tree.as_local().unwrap().is_shared()) }); // While disconnected, add and remove files from the client A's project. @@ -1393,9 +1401,60 @@ async fn test_host_reconnect( // Client A reconnects. Their project is re-shared, and client B re-joins it. server.allow_connections(); - deterministic.advance_clock(RECEIVE_TIMEOUT); - project_a.read_with(cx_a, |project, _| assert!(project.is_shared())); - project_b.read_with(cx_b, |project, _| assert!(!project.is_read_only())); + client_a + .authenticate_and_connect(false, &cx_a.to_async()) + .await + .unwrap(); + deterministic.run_until_parked(); + project_a.read_with(cx_a, |project, cx| { + assert!(project.is_shared()); + assert_eq!( + worktree_a + .read(cx) + .snapshot() + .paths() + .map(|p| p.to_str().unwrap()) + .collect::>(), + vec![ + "a.txt", + "b.txt", + "subdir1", + "subdir1/c.txt", + "subdir1/d.txt", + "subdir1/e.txt", + "subdir2", + "subdir2/f.txt", + "subdir2/g.txt", + "subdir2/h.txt", + "subdir2/i.txt" + ] + ); + }); + project_b.read_with(cx_b, |project, cx| { + assert!(!project.is_read_only()); + let worktree_b = project.worktree_for_id(worktree_id, cx).unwrap(); + assert_eq!( + worktree_b + .read(cx) + .snapshot() + .paths() + .map(|p| p.to_str().unwrap()) + .collect::>(), + vec![ + "a.txt", + "b.txt", + "subdir1", + "subdir1/c.txt", + "subdir1/d.txt", + "subdir1/e.txt", + "subdir2", + "subdir2/f.txt", + "subdir2/g.txt", + "subdir2/h.txt", + "subdir2/i.txt" + ] + ); + }); } #[gpui::test(iterations = 10)] @@ -6169,7 +6228,6 @@ async fn test_random_collaboration( let mut user_ids = Vec::new(); let mut op_start_signals = Vec::new(); let mut next_entity_id = 100000; - let mut can_disconnect = rng.lock().gen_bool(0.2); let mut operations = 0; while operations < max_operations { diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index beeb666da6..d75605d49a 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -799,17 +799,12 @@ async fn sign_out( .await .remove_connection(session.connection_id)?; - if let Some(mut left_projects) = session + session .db() .await .connection_lost(session.connection_id) .await - .trace_err() - { - for left_project in mem::take(&mut *left_projects) { - project_left(&left_project, &session); - } - } + .trace_err(); futures::select_biased! { _ = executor.sleep(RECONNECT_TIMEOUT).fuse() => { diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index e71971c3d7..c260fd9449 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -156,7 +156,6 @@ enum ProjectClientState { sharing_has_stopped: bool, remote_id: u64, replica_id: ReplicaId, - _detect_unshare: Task>, }, } @@ -495,21 +494,6 @@ impl Project { sharing_has_stopped: false, remote_id, replica_id, - _detect_unshare: cx.spawn_weak(move |this, mut cx| { - async move { - let mut status = client.status(); - let is_connected = - status.next().await.map_or(false, |s| s.is_connected()); - // Even if we're initially connected, any future change of the status means we momentarily disconnected. - if !is_connected || status.next().await.is_some() { - if let Some(this) = this.upgrade(&cx) { - this.update(&mut cx, |this, cx| this.disconnected_from_host(cx)) - } - } - Ok(()) - } - .log_err() - }), }), language_servers: Default::default(), language_server_ids: Default::default(), @@ -1100,6 +1084,15 @@ impl Project { Ok(()) } + pub fn rejoined( + &mut self, + message: proto::RejoinedProject, + cx: &mut ModelContext, + ) -> Result<()> { + self.set_collaborators_from_proto(message.collaborators, cx)?; + Ok(()) + } + pub fn worktree_metadata_protos(&self, cx: &AppContext) -> Vec { self.worktrees(cx) .map(|worktree| { From 52babc51a04d9d3224cbfbab790ec99b903ba986 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 20 Dec 2022 17:30:58 +0100 Subject: [PATCH 09/56] Make host reconnection test pass when mutating worktree while offline --- crates/call/src/room.rs | 2 +- crates/collab/src/db.rs | 157 ++++++++++--- crates/collab/src/db/project_collaborator.rs | 10 + crates/collab/src/rpc.rs | 222 ++++++++++--------- crates/project/src/project.rs | 11 + crates/project/src/worktree.rs | 112 +++++----- crates/rpc/proto/zed.proto | 2 +- 7 files changed, 319 insertions(+), 197 deletions(-) diff --git a/crates/call/src/room.rs b/crates/call/src/room.rs index 9480297c0e..4549497962 100644 --- a/crates/call/src/room.rs +++ b/crates/call/src/room.rs @@ -357,7 +357,7 @@ impl Room { if let Some(project_id) = project.remote_id() { projects.insert(project_id, handle.clone()); rejoined_projects.push(proto::RejoinProject { - project_id, + id: project_id, worktrees: project .worktrees(cx) .map(|worktree| { diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 14be4e2732..0d0d30edc1 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1343,12 +1343,116 @@ impl Database { pub async fn rejoin_room( &self, - room_id: proto::RejoinRoom, + rejoin_room: proto::RejoinRoom, user_id: UserId, - connection_id: ConnectionId, - ) -> Result { - println!("=============="); - todo!() + connection: ConnectionId, + ) -> Result> { + self.room_transaction(|tx| async { + let tx = tx; + let room_id = RoomId::from_proto(rejoin_room.id); + let participant_update = room_participant::Entity::update_many() + .filter( + Condition::all() + .add(room_participant::Column::RoomId.eq(room_id)) + .add(room_participant::Column::UserId.eq(user_id)) + .add(room_participant::Column::AnsweringConnectionId.is_not_null()) + .add( + Condition::any() + .add(room_participant::Column::AnsweringConnectionLost.eq(true)) + .add( + room_participant::Column::AnsweringConnectionServerId + .ne(connection.owner_id as i32), + ), + ), + ) + .set(room_participant::ActiveModel { + answering_connection_id: ActiveValue::set(Some(connection.id as i32)), + answering_connection_server_id: ActiveValue::set(Some(ServerId( + connection.owner_id as i32, + ))), + answering_connection_lost: ActiveValue::set(false), + ..Default::default() + }) + .exec(&*tx) + .await?; + if participant_update.rows_affected == 0 { + Err(anyhow!("room does not exist or was already joined"))? + } else { + let mut reshared_projects = Vec::new(); + for reshared_project in &rejoin_room.reshared_projects { + let project_id = ProjectId::from_proto(reshared_project.project_id); + let project = project::Entity::find_by_id(project_id) + .one(&*tx) + .await? + .ok_or_else(|| anyhow!("project does not exist"))?; + if project.host_user_id != user_id { + return Err(anyhow!("no such project"))?; + } + + let mut collaborators = project + .find_related(project_collaborator::Entity) + .all(&*tx) + .await?; + let host_ix = collaborators + .iter() + .position(|collaborator| { + collaborator.user_id == user_id && collaborator.is_host + }) + .ok_or_else(|| anyhow!("host not found among collaborators"))?; + let host = collaborators.swap_remove(host_ix); + let old_connection_id = host.connection(); + + project::Entity::update(project::ActiveModel { + host_connection_id: ActiveValue::set(Some(connection.id as i32)), + host_connection_server_id: ActiveValue::set(Some(ServerId( + connection.owner_id as i32, + ))), + ..project.into_active_model() + }) + .exec(&*tx) + .await?; + project_collaborator::Entity::update(project_collaborator::ActiveModel { + connection_id: ActiveValue::set(connection.id as i32), + connection_server_id: ActiveValue::set(ServerId( + connection.owner_id as i32, + )), + ..host.into_active_model() + }) + .exec(&*tx) + .await?; + + reshared_projects.push(ResharedProject { + id: project_id, + old_connection_id, + collaborators: collaborators + .iter() + .map(|collaborator| ProjectCollaborator { + connection_id: collaborator.connection(), + user_id: collaborator.user_id, + replica_id: collaborator.replica_id, + is_host: collaborator.is_host, + }) + .collect(), + worktrees: reshared_project.worktrees.clone(), + }); + } + + // TODO: handle unshared projects + // TODO: handle left projects + + let room = self.get_room(room_id, &tx).await?; + Ok(( + room_id, + RejoinedRoom { + room, + // TODO: handle rejoined projects + rejoined_projects: Default::default(), + reshared_projects, + }, + )) + } + }) + .await } pub async fn leave_room( @@ -1447,10 +1551,7 @@ impl Database { host_connection_id: Default::default(), }); - let collaborator_connection_id = ConnectionId { - owner_id: collaborator.connection_server_id.0 as u32, - id: collaborator.connection_id as u32, - }; + let collaborator_connection_id = collaborator.connection(); if collaborator_connection_id != connection { left_project.connection_ids.push(collaborator_connection_id); } @@ -2232,10 +2333,7 @@ impl Database { collaborators: collaborators .into_iter() .map(|collaborator| ProjectCollaborator { - connection_id: ConnectionId { - owner_id: collaborator.connection_server_id.0 as u32, - id: collaborator.connection_id as u32, - }, + connection_id: collaborator.connection(), user_id: collaborator.user_id, replica_id: collaborator.replica_id, is_host: collaborator.is_host, @@ -2287,10 +2385,7 @@ impl Database { .await?; let connection_ids = collaborators .into_iter() - .map(|collaborator| ConnectionId { - owner_id: collaborator.connection_server_id.0 as u32, - id: collaborator.connection_id as u32, - }) + .map(|collaborator| collaborator.connection()) .collect(); let left_project = LeftProject { @@ -2320,10 +2415,7 @@ impl Database { .await? .into_iter() .map(|collaborator| ProjectCollaborator { - connection_id: ConnectionId { - owner_id: collaborator.connection_server_id.0 as u32, - id: collaborator.connection_id as u32, - }, + connection_id: collaborator.connection(), user_id: collaborator.user_id, replica_id: collaborator.replica_id, is_host: collaborator.is_host, @@ -2352,18 +2444,15 @@ impl Database { .one(&*tx) .await? .ok_or_else(|| anyhow!("no such project"))?; - let mut participants = project_collaborator::Entity::find() + let mut collaborators = project_collaborator::Entity::find() .filter(project_collaborator::Column::ProjectId.eq(project_id)) .stream(&*tx) .await?; let mut connection_ids = HashSet::default(); - while let Some(participant) = participants.next().await { - let participant = participant?; - connection_ids.insert(ConnectionId { - owner_id: participant.connection_server_id.0 as u32, - id: participant.connection_id as u32, - }); + while let Some(collaborator) = collaborators.next().await { + let collaborator = collaborator?; + connection_ids.insert(collaborator.connection()); } if connection_ids.contains(&connection_id) { @@ -2380,7 +2469,7 @@ impl Database { project_id: ProjectId, tx: &DatabaseTransaction, ) -> Result> { - let mut participants = project_collaborator::Entity::find() + let mut collaborators = project_collaborator::Entity::find() .filter( project_collaborator::Column::ProjectId .eq(project_id) @@ -2390,12 +2479,9 @@ impl Database { .await?; let mut guest_connection_ids = Vec::new(); - while let Some(participant) = participants.next().await { - let participant = participant?; - guest_connection_ids.push(ConnectionId { - owner_id: participant.connection_server_id.0 as u32, - id: participant.connection_id as u32, - }); + while let Some(collaborator) = collaborators.next().await { + let collaborator = collaborator?; + guest_connection_ids.push(collaborator.connection()); } Ok(guest_connection_ids) } @@ -2817,6 +2903,7 @@ pub struct ResharedProject { pub id: ProjectId, pub old_connection_id: ConnectionId, pub collaborators: Vec, + pub worktrees: Vec, } pub struct RejoinedProject { diff --git a/crates/collab/src/db/project_collaborator.rs b/crates/collab/src/db/project_collaborator.rs index a1a99d1170..60b5f284e9 100644 --- a/crates/collab/src/db/project_collaborator.rs +++ b/crates/collab/src/db/project_collaborator.rs @@ -1,4 +1,5 @@ use super::{ProjectCollaboratorId, ProjectId, ReplicaId, ServerId, UserId}; +use rpc::ConnectionId; use sea_orm::entity::prelude::*; #[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] @@ -14,6 +15,15 @@ pub struct Model { pub is_host: bool, } +impl Model { + pub fn connection(&self) -> ConnectionId { + ConnectionId { + owner_id: self.connection_server_id.0 as u32, + id: self.connection_id as u32, + } + } +} + #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] pub enum Relation { #[sea_orm( diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index d75605d49a..87876ed651 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -942,56 +942,89 @@ async fn rejoin_room( response: Response, session: Session, ) -> Result<()> { - let mut rejoined_room = session - .db() - .await - .rejoin_room(request, session.user_id, session.connection_id) - .await?; + { + let mut rejoined_room = session + .db() + .await + .rejoin_room(request, session.user_id, session.connection_id) + .await?; - response.send(proto::RejoinRoomResponse { - room: Some(rejoined_room.room.clone()), - reshared_projects: rejoined_room - .reshared_projects - .iter() - .map(|project| proto::ResharedProject { - id: project.id.to_proto(), - collaborators: project + response.send(proto::RejoinRoomResponse { + room: Some(rejoined_room.room.clone()), + reshared_projects: rejoined_room + .reshared_projects + .iter() + .map(|project| proto::ResharedProject { + id: project.id.to_proto(), + collaborators: project + .collaborators + .iter() + .map(|collaborator| collaborator.to_proto()) + .collect(), + }) + .collect(), + rejoined_projects: rejoined_room + .rejoined_projects + .iter() + .map(|rejoined_project| proto::RejoinedProject { + id: rejoined_project.id.to_proto(), + worktrees: rejoined_project + .worktrees + .iter() + .map(|worktree| proto::WorktreeMetadata { + id: worktree.id, + root_name: worktree.root_name.clone(), + visible: worktree.visible, + abs_path: worktree.abs_path.clone(), + }) + .collect(), + collaborators: rejoined_project + .collaborators + .iter() + .map(|collaborator| collaborator.to_proto()) + .collect(), + language_servers: rejoined_project.language_servers.clone(), + }) + .collect(), + })?; + room_updated(&rejoined_room.room, &session.peer); + + for project in &rejoined_room.reshared_projects { + for collaborator in &project.collaborators { + session + .peer + .send( + collaborator.connection_id, + proto::UpdateProjectCollaborator { + project_id: project.id.to_proto(), + old_peer_id: Some(project.old_connection_id.into()), + new_peer_id: Some(session.connection_id.into()), + }, + ) + .trace_err(); + } + + broadcast( + session.connection_id, + project .collaborators .iter() - .map(|collaborator| collaborator.to_proto()) - .collect(), - }) - .collect(), - rejoined_projects: rejoined_room - .rejoined_projects - .iter() - .map(|rejoined_project| proto::RejoinedProject { - id: rejoined_project.id.to_proto(), - worktrees: rejoined_project - .worktrees - .iter() - .map(|worktree| proto::WorktreeMetadata { - id: worktree.id, - root_name: worktree.root_name.clone(), - visible: worktree.visible, - abs_path: worktree.abs_path.clone(), - }) - .collect(), - collaborators: rejoined_project - .collaborators - .iter() - .map(|collaborator| collaborator.to_proto()) - .collect(), - language_servers: rejoined_project.language_servers.clone(), - }) - .collect(), - })?; - room_updated(&rejoined_room.room, &session.peer); + .map(|collaborator| collaborator.connection_id), + |connection_id| { + session.peer.forward_send( + session.connection_id, + connection_id, + proto::UpdateProject { + project_id: project.id.to_proto(), + worktrees: project.worktrees.clone(), + }, + ) + }, + ); + } - // Notify other participants about this peer's reconnection to projects. - for project in &rejoined_room.reshared_projects { - for collaborator in &project.collaborators { - if collaborator.connection_id != session.connection_id { + for project in &rejoined_room.rejoined_projects { + for collaborator in &project.collaborators { session .peer .send( @@ -1005,74 +1038,57 @@ async fn rejoin_room( .trace_err(); } } - } - for project in &rejoined_room.rejoined_projects { - for collaborator in &project.collaborators { - if collaborator.connection_id != session.connection_id { - session - .peer - .send( - collaborator.connection_id, - proto::UpdateProjectCollaborator { + + for project in &mut rejoined_room.rejoined_projects { + for worktree in mem::take(&mut project.worktrees) { + #[cfg(any(test, feature = "test-support"))] + const MAX_CHUNK_SIZE: usize = 2; + #[cfg(not(any(test, feature = "test-support")))] + const MAX_CHUNK_SIZE: usize = 256; + + // Stream this worktree's entries. + let message = proto::UpdateWorktree { + project_id: project.id.to_proto(), + worktree_id: worktree.id, + abs_path: worktree.abs_path.clone(), + root_name: worktree.root_name, + updated_entries: worktree.updated_entries, + removed_entries: worktree.removed_entries, + scan_id: worktree.scan_id, + is_last_update: worktree.is_complete, + }; + for update in proto::split_worktree_update(message, MAX_CHUNK_SIZE) { + session.peer.send(session.connection_id, update.clone())?; + } + + // Stream this worktree's diagnostics. + for summary in worktree.diagnostic_summaries { + session.peer.send( + session.connection_id, + proto::UpdateDiagnosticSummary { project_id: project.id.to_proto(), - old_peer_id: Some(project.old_connection_id.into()), - new_peer_id: Some(session.connection_id.into()), + worktree_id: worktree.id, + summary: Some(summary), }, - ) - .trace_err(); - } - } - } - - for project in &mut rejoined_room.rejoined_projects { - for worktree in mem::take(&mut project.worktrees) { - #[cfg(any(test, feature = "test-support"))] - const MAX_CHUNK_SIZE: usize = 2; - #[cfg(not(any(test, feature = "test-support")))] - const MAX_CHUNK_SIZE: usize = 256; - - // Stream this worktree's entries. - let message = proto::UpdateWorktree { - project_id: project.id.to_proto(), - worktree_id: worktree.id, - abs_path: worktree.abs_path.clone(), - root_name: worktree.root_name, - updated_entries: worktree.updated_entries, - removed_entries: worktree.removed_entries, - scan_id: worktree.scan_id, - is_last_update: worktree.is_complete, - }; - for update in proto::split_worktree_update(message, MAX_CHUNK_SIZE) { - session.peer.send(session.connection_id, update.clone())?; + )?; + } } - // Stream this worktree's diagnostics. - for summary in worktree.diagnostic_summaries { + for language_server in &project.language_servers { session.peer.send( session.connection_id, - proto::UpdateDiagnosticSummary { + proto::UpdateLanguageServer { project_id: project.id.to_proto(), - worktree_id: worktree.id, - summary: Some(summary), + language_server_id: language_server.id, + variant: Some( + proto::update_language_server::Variant::DiskBasedDiagnosticsUpdated( + proto::LspDiskBasedDiagnosticsUpdated {}, + ), + ), }, )?; } } - - for language_server in &project.language_servers { - session.peer.send( - session.connection_id, - proto::UpdateLanguageServer { - project_id: project.id.to_proto(), - language_server_id: language_server.id, - variant: Some( - proto::update_language_server::Variant::DiskBasedDiagnosticsUpdated( - proto::LspDiskBasedDiagnosticsUpdated {}, - ), - ), - }, - )?; - } } update_user_contacts(session.user_id, &session).await?; diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index c260fd9449..182a169dd0 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -1081,6 +1081,17 @@ impl Project { cx: &mut ModelContext, ) -> Result<()> { self.set_collaborators_from_proto(message.collaborators, cx)?; + for worktree in self.worktrees.iter() { + if let Some(worktree) = worktree.upgrade(&cx) { + worktree.update(cx, |worktree, _| { + if let Some(worktree) = worktree.as_local_mut() { + worktree.reshare() + } else { + Ok(()) + } + })?; + } + } Ok(()) } diff --git a/crates/project/src/worktree.rs b/crates/project/src/worktree.rs index ee042ef9a3..7961c05506 100644 --- a/crates/project/src/worktree.rs +++ b/crates/project/src/worktree.rs @@ -166,6 +166,7 @@ enum ScanState { struct ShareState { project_id: u64, snapshots_tx: watch::Sender, + reshared: watch::Sender<()>, _maintain_remote_snapshot: Task>, } @@ -967,9 +968,11 @@ impl LocalWorktree { let (share_tx, share_rx) = oneshot::channel(); if self.share.is_some() { - let _ = share_tx.send(Ok(())); + let _ = share_tx.send(()); } else { let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot()); + let (reshared_tx, mut reshared_rx) = watch::channel(); + let _ = reshared_rx.try_recv(); let worktree_id = cx.model_id() as u64; for (path, summary) in self.diagnostic_summaries.iter() { @@ -982,47 +985,48 @@ impl LocalWorktree { } } - let maintain_remote_snapshot = cx.background().spawn({ - let rpc = self.client.clone(); + let _maintain_remote_snapshot = cx.background().spawn({ + let client = self.client.clone(); async move { - let mut prev_snapshot = match snapshots_rx.recv().await { - Some(snapshot) => { - let update = proto::UpdateWorktree { - project_id, - worktree_id, - abs_path: snapshot.abs_path().to_string_lossy().into(), - root_name: snapshot.root_name().to_string(), - updated_entries: snapshot - .entries_by_path - .iter() - .map(Into::into) - .collect(), - removed_entries: Default::default(), - scan_id: snapshot.scan_id as u64, - is_last_update: true, - }; - if let Err(error) = send_worktree_update(&rpc, update).await { - let _ = share_tx.send(Err(error)); - return Err(anyhow!("failed to send initial update worktree")); - } else { - let _ = share_tx.send(Ok(())); - snapshot + let mut share_tx = Some(share_tx); + let mut prev_snapshot = LocalSnapshot { + ignores_by_parent_abs_path: Default::default(), + git_repositories: Default::default(), + removed_entry_ids: Default::default(), + next_entry_id: Default::default(), + snapshot: Snapshot { + id: WorktreeId(worktree_id as usize), + abs_path: Path::new("").into(), + root_name: Default::default(), + root_char_bag: Default::default(), + entries_by_path: Default::default(), + entries_by_id: Default::default(), + scan_id: 0, + is_complete: true, + }, + }; + while let Some(snapshot) = snapshots_rx.recv().await { + #[cfg(any(test, feature = "test-support"))] + const MAX_CHUNK_SIZE: usize = 2; + #[cfg(not(any(test, feature = "test-support")))] + const MAX_CHUNK_SIZE: usize = 256; + + let update = + snapshot.build_update(&prev_snapshot, project_id, worktree_id, true); + for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) { + while let Err(error) = client.request(update.clone()).await { + log::error!("failed to send worktree update: {}", error); + log::info!("waiting for worktree to be reshared"); + if reshared_rx.next().await.is_none() { + return Ok(()); + } } } - None => { - share_tx - .send(Err(anyhow!("worktree dropped before share completed"))) - .ok(); - return Err(anyhow!("failed to send initial update worktree")); - } - }; - while let Some(snapshot) = snapshots_rx.recv().await { - send_worktree_update( - &rpc, - snapshot.build_update(&prev_snapshot, project_id, worktree_id, true), - ) - .await?; + if let Some(share_tx) = share_tx.take() { + let _ = share_tx.send(()); + } + prev_snapshot = snapshot; } @@ -1034,21 +1038,28 @@ impl LocalWorktree { self.share = Some(ShareState { project_id, snapshots_tx, - _maintain_remote_snapshot: maintain_remote_snapshot, + reshared: reshared_tx, + _maintain_remote_snapshot, }); } - cx.foreground().spawn(async move { - share_rx - .await - .unwrap_or_else(|_| Err(anyhow!("share ended"))) - }) + cx.foreground() + .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) }) } pub fn unshare(&mut self) { self.share.take(); } + pub fn reshare(&mut self) -> Result<()> { + let share = self + .share + .as_mut() + .ok_or_else(|| anyhow!("can't reshare a worktree that wasn't shared"))?; + *share.reshared.borrow_mut() = (); + Ok(()) + } + pub fn is_shared(&self) -> bool { self.share.is_some() } @@ -2936,19 +2947,6 @@ impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry { } } -async fn send_worktree_update(client: &Arc, update: proto::UpdateWorktree) -> Result<()> { - #[cfg(any(test, feature = "test-support"))] - const MAX_CHUNK_SIZE: usize = 2; - #[cfg(not(any(test, feature = "test-support")))] - const MAX_CHUNK_SIZE: usize = 256; - - for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) { - client.request(update).await?; - } - - Ok(()) -} - #[cfg(test)] mod tests { use super::*; diff --git a/crates/rpc/proto/zed.proto b/crates/rpc/proto/zed.proto index 51cc8aa6cd..a010ddf2ea 100644 --- a/crates/rpc/proto/zed.proto +++ b/crates/rpc/proto/zed.proto @@ -172,7 +172,7 @@ message RejoinRoom { } message RejoinProject { - uint64 project_id = 1; + uint64 id = 1; repeated RejoinWorktree worktrees = 2; } From d31fd9bbf21a31cd4ded128083f1f273bb7cc066 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 20 Dec 2022 17:42:08 +0100 Subject: [PATCH 10/56] Support adding worktrees to project while host is offline --- crates/collab/src/db.rs | 68 +++++++++++++++----------- crates/collab/src/integration_tests.rs | 54 +++++++++++++++++--- crates/project/src/project.rs | 33 +++++-------- 3 files changed, 96 insertions(+), 59 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 0d0d30edc1..55dbb3bd3d 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1421,6 +1421,9 @@ impl Database { .exec(&*tx) .await?; + self.update_project_worktrees(project_id, &reshared_project.worktrees, &tx) + .await?; + reshared_projects.push(ResharedProject { id: project_id, old_connection_id, @@ -1970,35 +1973,7 @@ impl Database { .await? .ok_or_else(|| anyhow!("no such project"))?; - if !worktrees.is_empty() { - worktree::Entity::insert_many(worktrees.iter().map(|worktree| { - worktree::ActiveModel { - id: ActiveValue::set(worktree.id as i64), - project_id: ActiveValue::set(project.id), - abs_path: ActiveValue::set(worktree.abs_path.clone()), - root_name: ActiveValue::set(worktree.root_name.clone()), - visible: ActiveValue::set(worktree.visible), - scan_id: ActiveValue::set(0), - is_complete: ActiveValue::set(false), - } - })) - .on_conflict( - OnConflict::columns([worktree::Column::ProjectId, worktree::Column::Id]) - .update_column(worktree::Column::RootName) - .to_owned(), - ) - .exec(&*tx) - .await?; - } - - worktree::Entity::delete_many() - .filter( - worktree::Column::ProjectId.eq(project.id).and( - worktree::Column::Id - .is_not_in(worktrees.iter().map(|worktree| worktree.id as i64)), - ), - ) - .exec(&*tx) + self.update_project_worktrees(project.id, worktrees, &tx) .await?; let guest_connection_ids = self.project_guest_connection_ids(project.id, &tx).await?; @@ -2008,6 +1983,41 @@ impl Database { .await } + async fn update_project_worktrees( + &self, + project_id: ProjectId, + worktrees: &[proto::WorktreeMetadata], + tx: &DatabaseTransaction, + ) -> Result<()> { + if !worktrees.is_empty() { + worktree::Entity::insert_many(worktrees.iter().map(|worktree| worktree::ActiveModel { + id: ActiveValue::set(worktree.id as i64), + project_id: ActiveValue::set(project_id), + abs_path: ActiveValue::set(worktree.abs_path.clone()), + root_name: ActiveValue::set(worktree.root_name.clone()), + visible: ActiveValue::set(worktree.visible), + scan_id: ActiveValue::set(0), + is_complete: ActiveValue::set(false), + })) + .on_conflict( + OnConflict::columns([worktree::Column::ProjectId, worktree::Column::Id]) + .update_column(worktree::Column::RootName) + .to_owned(), + ) + .exec(&*tx) + .await?; + } + + worktree::Entity::delete_many() + .filter(worktree::Column::ProjectId.eq(project_id).and( + worktree::Column::Id.is_not_in(worktrees.iter().map(|worktree| worktree.id as i64)), + )) + .exec(&*tx) + .await?; + + Ok(()) + } + pub async fn update_worktree( &self, update: &proto::UpdateWorktree, diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index 2b41143d37..03aba6aab5 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -1336,7 +1336,9 @@ async fn test_host_reconnect( } }, "dir2": { - "x.txt": "x-contents", + "x": "x-contents", + "y": "y-contents", + "z": "z-contents", }, }), ) @@ -1344,7 +1346,8 @@ async fn test_host_reconnect( let active_call_a = cx_a.read(ActiveCall::global); let (project_a, _) = client_a.build_local_project("/root/dir1", cx_a).await; - let worktree_a = project_a.read_with(cx_a, |project, cx| project.worktrees(cx).next().unwrap()); + let worktree_a1 = + project_a.read_with(cx_a, |project, cx| project.worktrees(cx).next().unwrap()); let project_id = active_call_a .update(cx_a, |call, cx| call.share_project(project_a.clone(), cx)) .await @@ -1353,7 +1356,7 @@ async fn test_host_reconnect( let project_b = client_b.build_remote_project(project_id, cx_b).await; deterministic.run_until_parked(); - let worktree_id = worktree_a.read_with(cx_a, |worktree, _| { + let worktree1_id = worktree_a1.read_with(cx_a, |worktree, _| { assert!(worktree.as_local().unwrap().is_shared()); worktree.id() }); @@ -1370,11 +1373,11 @@ async fn test_host_reconnect( assert!(!project.is_read_only()); assert_eq!(project.collaborators().len(), 1); }); - worktree_a.read_with(cx_a, |tree, _| { + worktree_a1.read_with(cx_a, |tree, _| { assert!(tree.as_local().unwrap().is_shared()) }); - // While disconnected, add and remove files from the client A's project. + // While disconnected, add/remove files and worktrees from client A's project. client_a .fs .insert_tree( @@ -1398,6 +1401,20 @@ async fn test_host_reconnect( ) .await .unwrap(); + let (worktree_a2, _) = project_a + .update(cx_a, |p, cx| { + p.find_or_create_local_worktree("/root/dir2", true, cx) + }) + .await + .unwrap(); + worktree_a2 + .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) + .await; + let worktree2_id = worktree_a2.read_with(cx_a, |tree, _| { + assert!(tree.as_local().unwrap().is_shared()); + tree.id() + }); + deterministic.run_until_parked(); // Client A reconnects. Their project is re-shared, and client B re-joins it. server.allow_connections(); @@ -1409,7 +1426,7 @@ async fn test_host_reconnect( project_a.read_with(cx_a, |project, cx| { assert!(project.is_shared()); assert_eq!( - worktree_a + worktree_a1 .read(cx) .snapshot() .paths() @@ -1429,12 +1446,22 @@ async fn test_host_reconnect( "subdir2/i.txt" ] ); + assert_eq!( + worktree_a2 + .read(cx) + .snapshot() + .paths() + .map(|p| p.to_str().unwrap()) + .collect::>(), + vec!["x", "y", "z"] + ); }); project_b.read_with(cx_b, |project, cx| { assert!(!project.is_read_only()); - let worktree_b = project.worktree_for_id(worktree_id, cx).unwrap(); assert_eq!( - worktree_b + project + .worktree_for_id(worktree1_id, cx) + .unwrap() .read(cx) .snapshot() .paths() @@ -1454,6 +1481,17 @@ async fn test_host_reconnect( "subdir2/i.txt" ] ); + assert_eq!( + project + .worktree_for_id(worktree2_id, cx) + .unwrap() + .read(cx) + .snapshot() + .paths() + .map(|p| p.to_str().unwrap()) + .collect::>(), + vec!["x", "y", "z"] + ); }); } diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 182a169dd0..4f4b6fda7d 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -1028,28 +1028,16 @@ impl Project { cx.emit(Event::RemoteIdChanged(Some(project_id))); cx.notify(); - let mut status = self.client.status(); let (metadata_changed_tx, mut metadata_changed_rx) = mpsc::unbounded(); self.client_state = Some(ProjectClientState::Local { remote_id: project_id, metadata_changed: metadata_changed_tx, _maintain_metadata: cx.spawn_weak(move |this, cx| async move { let mut txs = Vec::new(); - loop { - select_biased! { - tx = metadata_changed_rx.next().fuse() => { - let Some(tx) = tx else { break }; - txs.push(tx); - while let Ok(Some(next_tx)) = metadata_changed_rx.try_next() { - txs.push(next_tx); - } - } - status = status.next().fuse() => { - let Some(status) = status else { break }; - if !status.is_connected() { - continue - } - } + while let Some(tx) = metadata_changed_rx.next().await { + txs.push(tx); + while let Ok(Some(next_tx)) = metadata_changed_rx.try_next() { + txs.push(next_tx); } let Some(this) = this.upgrade(&cx) else { break }; @@ -4284,12 +4272,13 @@ impl Project { if let Some(project_id) = project.read_with(&cx, |project, _| project.remote_id()) { - worktree - .update(&mut cx, |worktree, cx| { - worktree.as_local_mut().unwrap().share(project_id, cx) - }) - .await - .log_err(); + worktree.update(&mut cx, |worktree, cx| { + worktree + .as_local_mut() + .unwrap() + .share(project_id, cx) + .detach_and_log_err(cx); + }); } Ok(worktree) From 9d15b3d2950007da304dc294a1c25f7e1db527d9 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 20 Dec 2022 17:47:14 +0100 Subject: [PATCH 11/56] Remove unused import --- crates/project/src/project.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 4f4b6fda7d..73ec7927e4 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -13,7 +13,7 @@ use collections::{hash_map, BTreeMap, HashMap, HashSet}; use futures::{ channel::{mpsc, oneshot}, future::Shared, - select_biased, AsyncWriteExt, Future, FutureExt, StreamExt, TryFutureExt, + AsyncWriteExt, Future, FutureExt, StreamExt, TryFutureExt, }; use gpui::{ AnyModelHandle, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, From 55ebfe83210d93b7359fa35c46772d3be0bb55dc Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 20 Dec 2022 11:10:46 -0800 Subject: [PATCH 12/56] Handle unshared projects when rejoining a room Also, construct remote projects via the room, to guarantee that the room can manage the projects' sharing lifecycle. Co-authored-by: Antonio Scandurra --- Cargo.lock | 2 + crates/call/Cargo.toml | 4 ++ crates/call/src/room.rs | 49 +++++++++++++++---- crates/collab/src/db.rs | 16 ++++++- crates/collab/src/integration_tests.rs | 66 +++++++++++++++++--------- crates/collab_ui/src/collab_ui.rs | 29 +++++------ crates/project/src/project.rs | 2 +- 7 files changed, 120 insertions(+), 48 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5063e35c70..e892961e59 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -820,8 +820,10 @@ dependencies = [ "async-broadcast", "client", "collections", + "fs", "futures 0.3.25", "gpui", + "language", "live_kit_client", "log", "media", diff --git a/crates/call/Cargo.toml b/crates/call/Cargo.toml index c0a6cedc62..e6aa357bef 100644 --- a/crates/call/Cargo.toml +++ b/crates/call/Cargo.toml @@ -23,6 +23,8 @@ collections = { path = "../collections" } gpui = { path = "../gpui" } log = "0.4" live_kit_client = { path = "../live_kit_client" } +fs = { path = "../fs" } +language = { path = "../language" } media = { path = "../media" } project = { path = "../project" } util = { path = "../util" } @@ -34,6 +36,8 @@ postage = { version = "0.4.1", features = ["futures-traits"] } [dev-dependencies] client = { path = "../client", features = ["test-support"] } +fs = { path = "../fs", features = ["test-support"] } +language = { path = "../language", features = ["test-support"] } collections = { path = "../collections", features = ["test-support"] } gpui = { path = "../gpui", features = ["test-support"] } live_kit_client = { path = "../live_kit_client", features = ["test-support"] } diff --git a/crates/call/src/room.rs b/crates/call/src/room.rs index 4549497962..f94e71d9a2 100644 --- a/crates/call/src/room.rs +++ b/crates/call/src/room.rs @@ -8,10 +8,12 @@ use client::{ Client, TypedEnvelope, User, UserStore, }; use collections::{BTreeMap, HashMap, HashSet}; +use fs::Fs; use futures::{FutureExt, StreamExt}; use gpui::{ AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task, WeakModelHandle, }; +use language::LanguageRegistry; use live_kit_client::{LocalTrackPublication, LocalVideoTrack, RemoteVideoTrackUpdate}; use postage::stream::Stream; use project::Project; @@ -523,6 +525,20 @@ impl Room { } for unshared_project_id in old_projects.difference(&new_projects) { + this.joined_projects.retain(|project| { + if let Some(project) = project.upgrade(cx) { + project.update(cx, |project, cx| { + if project.remote_id() == Some(*unshared_project_id) { + project.disconnected_from_host(cx); + false + } else { + true + } + }) + } else { + false + } + }); cx.emit(Event::RemoteProjectUnshared { project_id: *unshared_project_id, }); @@ -699,15 +715,30 @@ impl Room { }) } - pub fn joined_project(&mut self, project: ModelHandle, cx: &mut ModelContext) { - self.joined_projects.retain(|project| { - if let Some(project) = project.upgrade(cx) { - !project.read(cx).is_read_only() - } else { - false - } - }); - self.joined_projects.insert(project.downgrade()); + pub fn join_project( + &mut self, + id: u64, + language_registry: Arc, + fs: Arc, + cx: &mut ModelContext, + ) -> Task>> { + let client = self.client.clone(); + let user_store = self.user_store.clone(); + cx.spawn(|this, mut cx| async move { + let project = + Project::remote(id, client, user_store, language_registry, fs, cx.clone()).await?; + this.update(&mut cx, |this, cx| { + this.joined_projects.retain(|project| { + if let Some(project) = project.upgrade(cx) { + !project.read(cx).is_read_only() + } else { + false + } + }); + this.joined_projects.insert(project.downgrade()); + }); + Ok(project) + }) } pub(crate) fn share_project( diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 55dbb3bd3d..29c5cfba12 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1440,9 +1440,20 @@ impl Database { }); } - // TODO: handle unshared projects - // TODO: handle left projects + project::Entity::delete_many() + .filter( + Condition::all() + .add(project::Column::RoomId.eq(room_id)) + .add(project::Column::HostUserId.eq(user_id)) + .add( + project::Column::Id + .is_not_in(reshared_projects.iter().map(|project| project.id)), + ), + ) + .exec(&*tx) + .await?; + // TODO: handle left projects let room = self.get_room(room_id, &tx).await?; Ok(( room_id, @@ -2971,6 +2982,7 @@ impl ProjectCollaborator { } } +#[derive(Debug)] pub struct LeftProject { pub id: ProjectId, pub host_user_id: UserId, diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index 03aba6aab5..e4498fcc35 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -1324,7 +1324,7 @@ async fn test_host_reconnect( client_a .fs .insert_tree( - "/root", + "/root-1", json!({ "dir1": { "a.txt": "a-contents", @@ -1343,17 +1343,32 @@ async fn test_host_reconnect( }), ) .await; + client_a + .fs + .insert_tree( + "/root-2", + json!({ + "1.txt": "1-contents", + }), + ) + .await; let active_call_a = cx_a.read(ActiveCall::global); - let (project_a, _) = client_a.build_local_project("/root/dir1", cx_a).await; + let (project_a1, _) = client_a.build_local_project("/root-1/dir1", cx_a).await; + let (project_a2, _) = client_a.build_local_project("/root-2", cx_a).await; let worktree_a1 = - project_a.read_with(cx_a, |project, cx| project.worktrees(cx).next().unwrap()); - let project_id = active_call_a - .update(cx_a, |call, cx| call.share_project(project_a.clone(), cx)) + project_a1.read_with(cx_a, |project, cx| project.worktrees(cx).next().unwrap()); + let project1_id = active_call_a + .update(cx_a, |call, cx| call.share_project(project_a1.clone(), cx)) + .await + .unwrap(); + let project2_id = active_call_a + .update(cx_a, |call, cx| call.share_project(project_a2.clone(), cx)) .await .unwrap(); - let project_b = client_b.build_remote_project(project_id, cx_b).await; + let project_b1 = client_b.build_remote_project(project1_id, cx_b).await; + let project_b2 = client_b.build_remote_project(project2_id, cx_b).await; deterministic.run_until_parked(); let worktree1_id = worktree_a1.read_with(cx_a, |worktree, _| { @@ -1365,11 +1380,11 @@ async fn test_host_reconnect( server.forbid_connections(); server.disconnect_client(client_a.peer_id().unwrap()); deterministic.advance_clock(RECEIVE_TIMEOUT); - project_a.read_with(cx_a, |project, _| { + project_a1.read_with(cx_a, |project, _| { assert!(project.is_shared()); assert_eq!(project.collaborators().len(), 1); }); - project_b.read_with(cx_b, |project, _| { + project_b1.read_with(cx_b, |project, _| { assert!(!project.is_read_only()); assert_eq!(project.collaborators().len(), 1); }); @@ -1377,11 +1392,11 @@ async fn test_host_reconnect( assert!(tree.as_local().unwrap().is_shared()) }); - // While disconnected, add/remove files and worktrees from client A's project. + // While disconnected, add and remove files from client A's project. client_a .fs .insert_tree( - "/root/dir1/subdir2", + "/root-1/dir1/subdir2", json!({ "f.txt": "f-contents", "g.txt": "g-contents", @@ -1393,7 +1408,7 @@ async fn test_host_reconnect( client_a .fs .remove_dir( - "/root/dir1/subdir1".as_ref(), + "/root-1/dir1/subdir1".as_ref(), RemoveOptions { recursive: true, ..Default::default() @@ -1401,9 +1416,11 @@ async fn test_host_reconnect( ) .await .unwrap(); - let (worktree_a2, _) = project_a + + // While disconnected, add a worktree to client A's project. + let (worktree_a2, _) = project_a1 .update(cx_a, |p, cx| { - p.find_or_create_local_worktree("/root/dir2", true, cx) + p.find_or_create_local_worktree("/root-1/dir2", true, cx) }) .await .unwrap(); @@ -1416,6 +1433,9 @@ async fn test_host_reconnect( }); deterministic.run_until_parked(); + // While disconnected, close project 2 + cx_a.update(|_| drop(project_a2)); + // Client A reconnects. Their project is re-shared, and client B re-joins it. server.allow_connections(); client_a @@ -1423,7 +1443,7 @@ async fn test_host_reconnect( .await .unwrap(); deterministic.run_until_parked(); - project_a.read_with(cx_a, |project, cx| { + project_a1.read_with(cx_a, |project, cx| { assert!(project.is_shared()); assert_eq!( worktree_a1 @@ -1456,7 +1476,7 @@ async fn test_host_reconnect( vec!["x", "y", "z"] ); }); - project_b.read_with(cx_b, |project, cx| { + project_b1.read_with(cx_b, |project, cx| { assert!(!project.is_read_only()); assert_eq!( project @@ -1493,6 +1513,7 @@ async fn test_host_reconnect( vec!["x", "y", "z"] ); }); + project_b2.read_with(cx_b, |project, _| assert!(project.is_read_only())); } #[gpui::test(iterations = 10)] @@ -6930,17 +6951,18 @@ impl TestClient { host_project_id: u64, guest_cx: &mut TestAppContext, ) -> ModelHandle { - let project_b = guest_cx.spawn(|cx| { - Project::remote( + let active_call = guest_cx.read(ActiveCall::global); + let room = active_call.read_with(guest_cx, |call, _| call.room().unwrap().clone()); + room.update(guest_cx, |room, cx| { + room.join_project( host_project_id, - self.client.clone(), - self.user_store.clone(), self.language_registry.clone(), - FakeFs::new(cx.background()), + self.fs.clone(), cx, ) - }); - project_b.await.unwrap() + }) + .await + .unwrap() } fn build_workspace( diff --git a/crates/collab_ui/src/collab_ui.rs b/crates/collab_ui/src/collab_ui.rs index 4984b84a81..b19bc92455 100644 --- a/crates/collab_ui/src/collab_ui.rs +++ b/crates/collab_ui/src/collab_ui.rs @@ -7,10 +7,10 @@ mod incoming_call_notification; mod notifications; mod project_shared_notification; +use anyhow::anyhow; use call::ActiveCall; pub use collab_titlebar_item::{CollabTitlebarItem, ToggleCollaborationMenu}; use gpui::MutableAppContext; -use project::Project; use std::sync::Arc; use workspace::{AppState, JoinProject, ToggleFollow, Workspace}; @@ -39,15 +39,20 @@ pub fn init(app_state: Arc, cx: &mut MutableAppContext) { let workspace = if let Some(existing_workspace) = existing_workspace { existing_workspace } else { - let project = Project::remote( - project_id, - app_state.client.clone(), - app_state.user_store.clone(), - app_state.languages.clone(), - app_state.fs.clone(), - cx.clone(), - ) - .await?; + let active_call = cx.read(ActiveCall::global); + let room = active_call + .read_with(&cx, |call, _| call.room().cloned()) + .ok_or_else(|| anyhow!("not in a call"))?; + let project = room + .update(&mut cx, |room, cx| { + room.join_project( + project_id, + app_state.languages.clone(), + app_state.fs.clone(), + cx, + ) + }) + .await?; let (_, workspace) = cx.add_window((app_state.build_window_options)(), |cx| { let mut workspace = Workspace::new( @@ -68,10 +73,6 @@ pub fn init(app_state: Arc, cx: &mut MutableAppContext) { workspace.update(&mut cx, |workspace, cx| { if let Some(room) = ActiveCall::global(cx).read(cx).room().cloned() { - room.update(cx, |room, cx| { - room.joined_project(workspace.project().clone(), cx); - }); - let follow_peer_id = room .read(cx) .remote_participants() diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 73ec7927e4..582323c35f 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -1146,7 +1146,7 @@ impl Project { } } - fn disconnected_from_host(&mut self, cx: &mut ModelContext) { + pub fn disconnected_from_host(&mut self, cx: &mut ModelContext) { if let Some(ProjectClientState::Remote { sharing_has_stopped, .. From 6542b30d1f0a2c378bcb5a6f1c3f7a7c5d175deb Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 20 Dec 2022 15:02:26 -0800 Subject: [PATCH 13/56] Implement rejoining projects as guest when rejoining a room Co-authored-by: Julia Risley --- .../20221109000000_test_schema.sql | 2 + crates/collab/src/db.rs | 126 ++++++++++++++- crates/collab/src/db/worktree_entry.rs | 2 + crates/collab/src/integration_tests.rs | 120 +++++++++++++- crates/db/src/db.rs | 138 +++++++++------- crates/project/src/project.rs | 147 ++++++++++-------- crates/workspace/src/persistence.rs | 78 +++++----- 7 files changed, 447 insertions(+), 166 deletions(-) diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index d002c8a135..78a7043fb7 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -65,6 +65,7 @@ CREATE INDEX "index_worktrees_on_project_id" ON "worktrees" ("project_id"); CREATE TABLE "worktree_entries" ( "project_id" INTEGER NOT NULL, "worktree_id" INTEGER NOT NULL, + "scan_id" INTEGER NOT NULL, "id" INTEGER NOT NULL, "is_dir" BOOL NOT NULL, "path" VARCHAR NOT NULL, @@ -73,6 +74,7 @@ CREATE TABLE "worktree_entries" ( "mtime_nanos" INTEGER NOT NULL, "is_symlink" BOOL NOT NULL, "is_ignored" BOOL NOT NULL, + "is_deleted" BOOL NOT NULL, PRIMARY KEY(project_id, worktree_id, id), FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE ); diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 29c5cfba12..576fa8fa2a 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1453,14 +1453,124 @@ impl Database { .exec(&*tx) .await?; - // TODO: handle left projects + let mut rejoined_projects = Vec::new(); + for rejoined_project in &rejoin_room.rejoined_projects { + let project_id = ProjectId::from_proto(rejoined_project.id); + let Some(project) = project::Entity::find_by_id(project_id) + .one(&*tx) + .await? else { + continue + }; + + let db_worktrees = project.find_related(worktree::Entity).all(&*tx).await?; + let mut worktrees = Vec::new(); + for db_worktree in db_worktrees { + let mut worktree = RejoinedWorktree { + id: db_worktree.id as u64, + abs_path: db_worktree.abs_path, + root_name: db_worktree.root_name, + visible: db_worktree.visible, + updated_entries: Default::default(), + removed_entries: Default::default(), + diagnostic_summaries: Default::default(), + scan_id: db_worktree.scan_id as u64, + is_complete: db_worktree.is_complete, + }; + + let rejoined_worktree = rejoined_project + .worktrees + .iter() + .find(|worktree| worktree.id == db_worktree.id as u64); + + let entry_filter = if let Some(rejoined_worktree) = rejoined_worktree { + Condition::all() + .add(worktree_entry::Column::WorktreeId.eq(worktree.id)) + .add(worktree_entry::Column::ScanId.gt(rejoined_worktree.scan_id)) + } else { + Condition::all() + .add(worktree_entry::Column::WorktreeId.eq(worktree.id)) + .add(worktree_entry::Column::IsDeleted.eq(false)) + }; + + let mut db_entries = worktree_entry::Entity::find() + .filter(entry_filter) + .stream(&*tx) + .await?; + + while let Some(db_entry) = db_entries.next().await { + let db_entry = db_entry?; + + if db_entry.is_deleted { + worktree.removed_entries.push(db_entry.id as u64); + } else { + worktree.updated_entries.push(proto::Entry { + id: db_entry.id as u64, + is_dir: db_entry.is_dir, + path: db_entry.path, + inode: db_entry.inode as u64, + mtime: Some(proto::Timestamp { + seconds: db_entry.mtime_seconds as u64, + nanos: db_entry.mtime_nanos as u32, + }), + is_symlink: db_entry.is_symlink, + is_ignored: db_entry.is_ignored, + }); + } + } + + worktrees.push(worktree); + } + + let language_servers = project + .find_related(language_server::Entity) + .all(&*tx) + .await? + .into_iter() + .map(|language_server| proto::LanguageServer { + id: language_server.id as u64, + name: language_server.name, + }) + .collect::>(); + + let mut collaborators = project + .find_related(project_collaborator::Entity) + .all(&*tx) + .await? + .into_iter() + .map(|collaborator| ProjectCollaborator { + connection_id: collaborator.connection(), + user_id: collaborator.user_id, + replica_id: collaborator.replica_id, + is_host: collaborator.is_host, + }) + .collect::>(); + + let old_connection_id; + if let Some(self_collaborator_ix) = collaborators + .iter() + .position(|collaborator| collaborator.user_id == user_id) + { + let self_collaborator = collaborators.swap_remove(self_collaborator_ix); + old_connection_id = self_collaborator.connection_id; + } else { + continue; + } + + rejoined_projects.push(RejoinedProject { + id: project_id, + old_connection_id, + collaborators, + worktrees, + language_servers, + }); + } + let room = self.get_room(room_id, &tx).await?; Ok(( room_id, RejoinedRoom { room, - // TODO: handle rejoined projects - rejoined_projects: Default::default(), + rejoined_projects, reshared_projects, }, )) @@ -2079,6 +2189,8 @@ impl Database { mtime_nanos: ActiveValue::set(mtime.nanos as i32), is_symlink: ActiveValue::set(entry.is_symlink), is_ignored: ActiveValue::set(entry.is_ignored), + is_deleted: ActiveValue::set(false), + scan_id: ActiveValue::set(update.scan_id as i64), } })) .on_conflict( @@ -2103,7 +2215,7 @@ impl Database { } if !update.removed_entries.is_empty() { - worktree_entry::Entity::delete_many() + worktree_entry::Entity::update_many() .filter( worktree_entry::Column::ProjectId .eq(project_id) @@ -2113,6 +2225,11 @@ impl Database { .is_in(update.removed_entries.iter().map(|id| *id as i64)), ), ) + .set(worktree_entry::ActiveModel { + is_deleted: ActiveValue::Set(true), + scan_id: ActiveValue::Set(update.scan_id as i64), + ..Default::default() + }) .exec(&*tx) .await?; } @@ -2935,6 +3052,7 @@ pub struct RejoinedProject { pub language_servers: Vec, } +#[derive(Debug)] pub struct RejoinedWorktree { pub id: u64, pub abs_path: String, diff --git a/crates/collab/src/db/worktree_entry.rs b/crates/collab/src/db/worktree_entry.rs index 413821201a..4eb1648b81 100644 --- a/crates/collab/src/db/worktree_entry.rs +++ b/crates/collab/src/db/worktree_entry.rs @@ -17,6 +17,8 @@ pub struct Model { pub mtime_nanos: i32, pub is_symlink: bool, pub is_ignored: bool, + pub is_deleted: bool, + pub scan_id: i64, } #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index e4498fcc35..7ca225f91b 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -1307,7 +1307,7 @@ async fn test_host_disconnect( } #[gpui::test(iterations = 10)] -async fn test_host_reconnect( +async fn test_project_reconnect( deterministic: Arc, cx_a: &mut TestAppContext, cx_b: &mut TestAppContext, @@ -1336,9 +1336,12 @@ async fn test_host_reconnect( } }, "dir2": { - "x": "x-contents", - "y": "y-contents", - "z": "z-contents", + "x.txt": "x-contents", + "y.txt": "y-contents", + "z.txt": "z-contents", + }, + "dir3": { + "w.txt": "w-contents", }, }), ) @@ -1348,7 +1351,16 @@ async fn test_host_reconnect( .insert_tree( "/root-2", json!({ - "1.txt": "1-contents", + "2.txt": "2-contents", + }), + ) + .await; + client_a + .fs + .insert_tree( + "/root-3", + json!({ + "3.txt": "3-contents", }), ) .await; @@ -1356,6 +1368,7 @@ async fn test_host_reconnect( let active_call_a = cx_a.read(ActiveCall::global); let (project_a1, _) = client_a.build_local_project("/root-1/dir1", cx_a).await; let (project_a2, _) = client_a.build_local_project("/root-2", cx_a).await; + let (project_a3, _) = client_a.build_local_project("/root-3", cx_a).await; let worktree_a1 = project_a1.read_with(cx_a, |project, cx| project.worktrees(cx).next().unwrap()); let project1_id = active_call_a @@ -1366,9 +1379,14 @@ async fn test_host_reconnect( .update(cx_a, |call, cx| call.share_project(project_a2.clone(), cx)) .await .unwrap(); + let project3_id = active_call_a + .update(cx_a, |call, cx| call.share_project(project_a3.clone(), cx)) + .await + .unwrap(); let project_b1 = client_b.build_remote_project(project1_id, cx_b).await; let project_b2 = client_b.build_remote_project(project2_id, cx_b).await; + let project_b3 = client_b.build_remote_project(project3_id, cx_b).await; deterministic.run_until_parked(); let worktree1_id = worktree_a1.read_with(cx_a, |worktree, _| { @@ -1473,7 +1491,7 @@ async fn test_host_reconnect( .paths() .map(|p| p.to_str().unwrap()) .collect::>(), - vec!["x", "y", "z"] + vec!["x.txt", "y.txt", "z.txt"] ); }); project_b1.read_with(cx_b, |project, cx| { @@ -1510,10 +1528,98 @@ async fn test_host_reconnect( .paths() .map(|p| p.to_str().unwrap()) .collect::>(), - vec!["x", "y", "z"] + vec!["x.txt", "y.txt", "z.txt"] ); }); project_b2.read_with(cx_b, |project, _| assert!(project.is_read_only())); + project_b3.read_with(cx_b, |project, _| assert!(!project.is_read_only())); + + // Drop client B's connection. + server.forbid_connections(); + server.disconnect_client(client_b.peer_id().unwrap()); + deterministic.advance_clock(RECEIVE_TIMEOUT); + + // While client B is disconnected, add and remove files from client A's project + client_a + .fs + .insert_file("/root-1/dir1/subdir2/j.txt", "j-contents".into()) + .await; + client_a + .fs + .remove_file("/root-1/dir1/subdir2/i.txt".as_ref(), Default::default()) + .await + .unwrap(); + + // While client B is disconnected, add and remove worktrees from client A's project. + let (worktree_a3, _) = project_a1 + .update(cx_a, |p, cx| { + p.find_or_create_local_worktree("/root-1/dir3", true, cx) + }) + .await + .unwrap(); + worktree_a3 + .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) + .await; + let worktree3_id = worktree_a3.read_with(cx_a, |tree, _| { + assert!(tree.as_local().unwrap().is_shared()); + tree.id() + }); + project_a1 + .update(cx_a, |project, cx| { + project.remove_worktree(worktree2_id, cx) + }) + .await; + deterministic.run_until_parked(); + + // While disconnected, close project 3 + cx_a.update(|_| drop(project_a3)); + + // Client B reconnects. They re-join the room and the remaining shared project. + server.allow_connections(); + client_b + .authenticate_and_connect(false, &cx_b.to_async()) + .await + .unwrap(); + deterministic.run_until_parked(); + project_b1.read_with(cx_b, |project, cx| { + assert!(!project.is_read_only()); + assert_eq!( + project + .worktree_for_id(worktree1_id, cx) + .unwrap() + .read(cx) + .snapshot() + .paths() + .map(|p| p.to_str().unwrap()) + .collect::>(), + vec![ + "a.txt", + "b.txt", + "subdir1", + "subdir1/c.txt", + "subdir1/d.txt", + "subdir1/e.txt", + "subdir2", + "subdir2/f.txt", + "subdir2/g.txt", + "subdir2/h.txt", + "subdir2/j.txt" + ] + ); + assert!(project.worktree_for_id(worktree2_id, cx).is_none()); + assert_eq!( + project + .worktree_for_id(worktree3_id, cx) + .unwrap() + .read(cx) + .snapshot() + .paths() + .map(|p| p.to_str().unwrap()) + .collect::>(), + vec!["w.txt"] + ); + }); + project_b3.read_with(cx_b, |project, _| assert!(project.is_read_only())); } #[gpui::test(iterations = 10)] diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 704ac4049d..ab036786d0 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -20,8 +20,8 @@ use std::fs::create_dir_all; use std::path::{Path, PathBuf}; use std::sync::atomic::{AtomicBool, Ordering}; use std::time::{SystemTime, UNIX_EPOCH}; -use util::{async_iife, ResultExt}; use util::channel::ReleaseChannel; +use util::{async_iife, ResultExt}; const CONNECTION_INITIALIZE_QUERY: &'static str = sql!( PRAGMA foreign_keys=TRUE; @@ -41,14 +41,17 @@ const DB_FILE_NAME: &'static str = "db.sqlite"; lazy_static::lazy_static! { static ref DB_FILE_OPERATIONS: Mutex<()> = Mutex::new(()); pub static ref BACKUP_DB_PATH: RwLock> = RwLock::new(None); - pub static ref ALL_FILE_DB_FAILED: AtomicBool = AtomicBool::new(false); + pub static ref ALL_FILE_DB_FAILED: AtomicBool = AtomicBool::new(false); } /// Open or create a database at the given directory path. /// This will retry a couple times if there are failures. If opening fails once, the db directory /// is moved to a backup folder and a new one is created. If that fails, a shared in memory db is created. /// In either case, static variables are set so that the user can be notified. -pub async fn open_db(db_dir: &Path, release_channel: &ReleaseChannel) -> ThreadSafeConnection { +pub async fn open_db( + db_dir: &Path, + release_channel: &ReleaseChannel, +) -> ThreadSafeConnection { let release_channel_name = release_channel.dev_name(); let main_db_dir = db_dir.join(Path::new(&format!("0-{}", release_channel_name))); @@ -117,10 +120,10 @@ pub async fn open_db(db_dir: &Path, release_channel: &Rel if let Some(connection) = connection { return connection; } - + // Set another static ref so that we can escalate the notification ALL_FILE_DB_FAILED.store(true, Ordering::Release); - + // If still failed, create an in memory db with a known name open_fallback_db().await } @@ -174,15 +177,15 @@ macro_rules! define_connection { &self.0 } } - + impl $crate::sqlez::domain::Domain for $t { fn name() -> &'static str { stringify!($t) } - + fn migrations() -> &'static [&'static str] { $migrations - } + } } #[cfg(any(test, feature = "test-support"))] @@ -205,15 +208,15 @@ macro_rules! define_connection { &self.0 } } - + impl $crate::sqlez::domain::Domain for $t { fn name() -> &'static str { stringify!($t) } - + fn migrations() -> &'static [&'static str] { $migrations - } + } } #[cfg(any(test, feature = "test-support"))] @@ -232,134 +235,157 @@ macro_rules! define_connection { mod tests { use std::{fs, thread}; - use sqlez::{domain::Domain, connection::Connection}; + use sqlez::{connection::Connection, domain::Domain}; use sqlez_macros::sql; use tempdir::TempDir; use crate::{open_db, DB_FILE_NAME}; - + // Test bad migration panics #[gpui::test] #[should_panic] async fn test_bad_migration_panics() { enum BadDB {} - + impl Domain for BadDB { fn name() -> &'static str { "db_tests" } - + fn migrations() -> &'static [&'static str] { - &[sql!(CREATE TABLE test(value);), + &[ + sql!(CREATE TABLE test(value);), // failure because test already exists - sql!(CREATE TABLE test(value);)] + sql!(CREATE TABLE test(value);), + ] } } - + let tempdir = TempDir::new("DbTests").unwrap(); let _bad_db = open_db::(tempdir.path(), &util::channel::ReleaseChannel::Dev).await; } - + /// Test that DB exists but corrupted (causing recreate) #[gpui::test] async fn test_db_corruption() { enum CorruptedDB {} - + impl Domain for CorruptedDB { fn name() -> &'static str { "db_tests" } - + fn migrations() -> &'static [&'static str] { &[sql!(CREATE TABLE test(value);)] } } - + enum GoodDB {} - + impl Domain for GoodDB { fn name() -> &'static str { "db_tests" //Notice same name } - + fn migrations() -> &'static [&'static str] { &[sql!(CREATE TABLE test2(value);)] //But different migration } } - + let tempdir = TempDir::new("DbTests").unwrap(); { - let corrupt_db = open_db::(tempdir.path(), &util::channel::ReleaseChannel::Dev).await; + let corrupt_db = + open_db::(tempdir.path(), &util::channel::ReleaseChannel::Dev).await; assert!(corrupt_db.persistent()); } - - + let good_db = open_db::(tempdir.path(), &util::channel::ReleaseChannel::Dev).await; - assert!(good_db.select_row::("SELECT * FROM test2").unwrap()().unwrap().is_none()); - - let mut corrupted_backup_dir = fs::read_dir( - tempdir.path() - ).unwrap().find(|entry| { - !entry.as_ref().unwrap().file_name().to_str().unwrap().starts_with("0") - } - ).unwrap().unwrap().path(); + assert!( + good_db.select_row::("SELECT * FROM test2").unwrap()() + .unwrap() + .is_none() + ); + + let mut corrupted_backup_dir = fs::read_dir(tempdir.path()) + .unwrap() + .find(|entry| { + !entry + .as_ref() + .unwrap() + .file_name() + .to_str() + .unwrap() + .starts_with("0") + }) + .unwrap() + .unwrap() + .path(); corrupted_backup_dir.push(DB_FILE_NAME); - + dbg!(&corrupted_backup_dir); - + let backup = Connection::open_file(&corrupted_backup_dir.to_string_lossy()); - assert!(backup.select_row::("SELECT * FROM test").unwrap()().unwrap().is_none()); + assert!(backup.select_row::("SELECT * FROM test").unwrap()() + .unwrap() + .is_none()); } - + /// Test that DB exists but corrupted (causing recreate) #[gpui::test] async fn test_simultaneous_db_corruption() { enum CorruptedDB {} - + impl Domain for CorruptedDB { fn name() -> &'static str { "db_tests" } - + fn migrations() -> &'static [&'static str] { &[sql!(CREATE TABLE test(value);)] } } - + enum GoodDB {} - + impl Domain for GoodDB { fn name() -> &'static str { "db_tests" //Notice same name } - + fn migrations() -> &'static [&'static str] { &[sql!(CREATE TABLE test2(value);)] //But different migration } } - + let tempdir = TempDir::new("DbTests").unwrap(); { // Setup the bad database - let corrupt_db = open_db::(tempdir.path(), &util::channel::ReleaseChannel::Dev).await; + let corrupt_db = + open_db::(tempdir.path(), &util::channel::ReleaseChannel::Dev).await; assert!(corrupt_db.persistent()); } - + // Try to connect to it a bunch of times at once let mut guards = vec![]; for _ in 0..10 { let tmp_path = tempdir.path().to_path_buf(); let guard = thread::spawn(move || { - let good_db = smol::block_on(open_db::(tmp_path.as_path(), &util::channel::ReleaseChannel::Dev)); - assert!(good_db.select_row::("SELECT * FROM test2").unwrap()().unwrap().is_none()); + let good_db = smol::block_on(open_db::( + tmp_path.as_path(), + &util::channel::ReleaseChannel::Dev, + )); + assert!( + good_db.select_row::("SELECT * FROM test2").unwrap()() + .unwrap() + .is_none() + ); }); - + guards.push(guard); - } - - for guard in guards.into_iter() { - assert!(guard.join().is_ok()); - } + + for guard in guards.into_iter() { + assert!(guard.join().is_ok()); + } } } diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 582323c35f..97a1f1c537 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -1088,7 +1088,26 @@ impl Project { message: proto::RejoinedProject, cx: &mut ModelContext, ) -> Result<()> { + self.set_worktrees_from_proto(message.worktrees, cx)?; self.set_collaborators_from_proto(message.collaborators, cx)?; + + self.language_server_statuses = message + .language_servers + .into_iter() + .map(|server| { + ( + server.id as usize, + LanguageServerStatus { + name: server.name, + pending_work: Default::default(), + has_pending_diagnostic_updates: false, + progress_tokens: Default::default(), + }, + ) + }) + .collect(); + + cx.notify(); Ok(()) } @@ -4647,39 +4666,11 @@ impl Project { async fn handle_update_project( this: ModelHandle, envelope: TypedEnvelope, - client: Arc, + _: Arc, mut cx: AsyncAppContext, ) -> Result<()> { this.update(&mut cx, |this, cx| { - let replica_id = this.replica_id(); - let remote_id = this.remote_id().ok_or_else(|| anyhow!("invalid project"))?; - - let mut old_worktrees_by_id = this - .worktrees - .drain(..) - .filter_map(|worktree| { - let worktree = worktree.upgrade(cx)?; - Some((worktree.read(cx).id(), worktree)) - }) - .collect::>(); - - for worktree in envelope.payload.worktrees { - if let Some(old_worktree) = - old_worktrees_by_id.remove(&WorktreeId::from_proto(worktree.id)) - { - this.worktrees.push(WorktreeHandle::Strong(old_worktree)); - } else { - let worktree = - Worktree::remote(remote_id, replica_id, worktree, client.clone(), cx); - let _ = this.add_worktree(&worktree, cx); - } - } - - let _ = this.metadata_changed(cx); - for (id, _) in old_worktrees_by_id { - cx.emit(Event::WorktreeRemoved(id)); - } - + this.set_worktrees_from_proto(envelope.payload.worktrees, cx)?; Ok(()) }) } @@ -4871,14 +4862,15 @@ impl Project { _: Arc, mut cx: AsyncAppContext, ) -> Result<()> { - let language_server_id = envelope.payload.language_server_id as usize; - match envelope - .payload - .variant - .ok_or_else(|| anyhow!("invalid variant"))? - { - proto::update_language_server::Variant::WorkStart(payload) => { - this.update(&mut cx, |this, cx| { + this.update(&mut cx, |this, cx| { + let language_server_id = envelope.payload.language_server_id as usize; + + match envelope + .payload + .variant + .ok_or_else(|| anyhow!("invalid variant"))? + { + proto::update_language_server::Variant::WorkStart(payload) => { this.on_lsp_work_start( language_server_id, payload.token, @@ -4889,10 +4881,9 @@ impl Project { }, cx, ); - }) - } - proto::update_language_server::Variant::WorkProgress(payload) => { - this.update(&mut cx, |this, cx| { + } + + proto::update_language_server::Variant::WorkProgress(payload) => { this.on_lsp_work_progress( language_server_id, payload.token, @@ -4903,26 +4894,23 @@ impl Project { }, cx, ); - }) - } - proto::update_language_server::Variant::WorkEnd(payload) => { - this.update(&mut cx, |this, cx| { - this.on_lsp_work_end(language_server_id, payload.token, cx); - }) - } - proto::update_language_server::Variant::DiskBasedDiagnosticsUpdating(_) => { - this.update(&mut cx, |this, cx| { - this.disk_based_diagnostics_started(language_server_id, cx); - }) - } - proto::update_language_server::Variant::DiskBasedDiagnosticsUpdated(_) => { - this.update(&mut cx, |this, cx| { - this.disk_based_diagnostics_finished(language_server_id, cx) - }); - } - } + } - Ok(()) + proto::update_language_server::Variant::WorkEnd(payload) => { + this.on_lsp_work_end(language_server_id, payload.token, cx); + } + + proto::update_language_server::Variant::DiskBasedDiagnosticsUpdating(_) => { + this.disk_based_diagnostics_started(language_server_id, cx); + } + + proto::update_language_server::Variant::DiskBasedDiagnosticsUpdated(_) => { + this.disk_based_diagnostics_finished(language_server_id, cx) + } + } + + Ok(()) + }) } async fn handle_update_buffer( @@ -5638,6 +5626,43 @@ impl Project { }) } + fn set_worktrees_from_proto( + &mut self, + worktrees: Vec, + cx: &mut ModelContext, + ) -> Result<()> { + let replica_id = self.replica_id(); + let remote_id = self.remote_id().ok_or_else(|| anyhow!("invalid project"))?; + + let mut old_worktrees_by_id = self + .worktrees + .drain(..) + .filter_map(|worktree| { + let worktree = worktree.upgrade(cx)?; + Some((worktree.read(cx).id(), worktree)) + }) + .collect::>(); + + for worktree in worktrees { + if let Some(old_worktree) = + old_worktrees_by_id.remove(&WorktreeId::from_proto(worktree.id)) + { + self.worktrees.push(WorktreeHandle::Strong(old_worktree)); + } else { + let worktree = + Worktree::remote(remote_id, replica_id, worktree, self.client.clone(), cx); + let _ = self.add_worktree(&worktree, cx); + } + } + + let _ = self.metadata_changed(cx); + for (id, _) in old_worktrees_by_id { + cx.emit(Event::WorktreeRemoved(id)); + } + + Ok(()) + } + fn set_collaborators_from_proto( &mut self, messages: Vec, diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index 9f957f6e18..b8dda0eb99 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -8,7 +8,7 @@ use anyhow::{anyhow, bail, Context, Result}; use db::{define_connection, query, sqlez::connection::Connection, sqlez_macros::sql}; use gpui::Axis; -use util::{ unzip_option, ResultExt}; +use util::{unzip_option, ResultExt}; use crate::dock::DockPosition; use crate::WorkspaceId; @@ -31,7 +31,7 @@ define_connection! { timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL, FOREIGN KEY(dock_pane) REFERENCES panes(pane_id) ) STRICT; - + CREATE TABLE pane_groups( group_id INTEGER PRIMARY KEY, workspace_id INTEGER NOT NULL, @@ -43,7 +43,7 @@ define_connection! { ON UPDATE CASCADE, FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE ) STRICT; - + CREATE TABLE panes( pane_id INTEGER PRIMARY KEY, workspace_id INTEGER NOT NULL, @@ -52,7 +52,7 @@ define_connection! { ON DELETE CASCADE ON UPDATE CASCADE ) STRICT; - + CREATE TABLE center_panes( pane_id INTEGER PRIMARY KEY, parent_group_id INTEGER, // NULL means that this is a root pane @@ -61,7 +61,7 @@ define_connection! { ON DELETE CASCADE, FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE ) STRICT; - + CREATE TABLE items( item_id INTEGER NOT NULL, // This is the item's view id, so this is not unique workspace_id INTEGER NOT NULL, @@ -119,7 +119,7 @@ impl WorkspaceDb { .context("Getting center group") .log_err()?, dock_position, - left_sidebar_open + left_sidebar_open, }) } @@ -158,7 +158,12 @@ impl WorkspaceDb { dock_visible = ?4, dock_anchor = ?5, timestamp = CURRENT_TIMESTAMP - ))?((workspace.id, &workspace.location, workspace.left_sidebar_open, workspace.dock_position)) + ))?(( + workspace.id, + &workspace.location, + workspace.left_sidebar_open, + workspace.dock_position, + )) .context("Updating workspace")?; // Save center pane group and dock pane @@ -191,10 +196,10 @@ impl WorkspaceDb { query! { pub fn recent_workspaces(limit: usize) -> Result> { - SELECT workspace_id, workspace_location + SELECT workspace_id, workspace_location FROM workspaces WHERE workspace_location IS NOT NULL - ORDER BY timestamp DESC + ORDER BY timestamp DESC LIMIT ? } } @@ -210,10 +215,16 @@ impl WorkspaceDb { } fn get_center_pane_group(&self, workspace_id: WorkspaceId) -> Result { - Ok(self.get_pane_group(workspace_id, None)? + Ok(self + .get_pane_group(workspace_id, None)? .into_iter() .next() - .unwrap_or_else(|| SerializedPaneGroup::Pane(SerializedPane { active: true, children: vec![] }))) + .unwrap_or_else(|| { + SerializedPaneGroup::Pane(SerializedPane { + active: true, + children: vec![], + }) + })) } fn get_pane_group( @@ -225,7 +236,7 @@ impl WorkspaceDb { type GroupOrPane = (Option, Option, Option, Option); self.select_bound::(sql!( SELECT group_id, axis, pane_id, active - FROM (SELECT + FROM (SELECT group_id, axis, NULL as pane_id, @@ -233,18 +244,18 @@ impl WorkspaceDb { position, parent_group_id, workspace_id - FROM pane_groups + FROM pane_groups UNION - SELECT + SELECT + NULL, NULL, - NULL, center_panes.pane_id, panes.active as active, position, parent_group_id, panes.workspace_id as workspace_id FROM center_panes - JOIN panes ON center_panes.pane_id = panes.pane_id) + JOIN panes ON center_panes.pane_id = panes.pane_id) WHERE parent_group_id IS ? AND workspace_id = ? ORDER BY position ))?((group_id, workspace_id))? @@ -267,13 +278,12 @@ impl WorkspaceDb { // Filter out panes and pane groups which don't have any children or items .filter(|pane_group| match pane_group { Ok(SerializedPaneGroup::Group { children, .. }) => !children.is_empty(), - Ok(SerializedPaneGroup::Pane(pane)) => !pane.children.is_empty(), + Ok(SerializedPaneGroup::Pane(pane)) => !pane.children.is_empty(), _ => true, }) .collect::>() } - fn save_pane_group( conn: &Connection, workspace_id: WorkspaceId, @@ -285,15 +295,10 @@ impl WorkspaceDb { let (parent_id, position) = unzip_option(parent); let group_id = conn.select_row_bound::<_, i64>(sql!( - INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) - VALUES (?, ?, ?, ?) + INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) + VALUES (?, ?, ?, ?) RETURNING group_id - ))?(( - workspace_id, - parent_id, - position, - *axis, - ))? + ))?((workspace_id, parent_id, position, *axis))? .ok_or_else(|| anyhow!("Couldn't retrieve group_id from inserted pane_group"))?; for (position, group) in children.iter().enumerate() { @@ -314,9 +319,7 @@ impl WorkspaceDb { SELECT pane_id, active FROM panes WHERE pane_id = (SELECT dock_pane FROM workspaces WHERE workspace_id = ?) - ))?( - workspace_id, - )? + ))?(workspace_id)? .context("No dock pane for workspace")?; Ok(SerializedPane::new( @@ -333,8 +336,8 @@ impl WorkspaceDb { dock: bool, ) -> Result { let pane_id = conn.select_row_bound::<_, i64>(sql!( - INSERT INTO panes(workspace_id, active) - VALUES (?, ?) + INSERT INTO panes(workspace_id, active) + VALUES (?, ?) RETURNING pane_id ))?((workspace_id, pane.active))? .ok_or_else(|| anyhow!("Could not retrieve inserted pane_id"))?; @@ -376,14 +379,13 @@ impl WorkspaceDb { Ok(()) } - query!{ + query! { pub async fn update_timestamp(workspace_id: WorkspaceId) -> Result<()> { UPDATE workspaces SET timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ? } } - } #[cfg(test)] @@ -472,7 +474,7 @@ mod tests { dock_position: crate::dock::DockPosition::Shown(DockAnchor::Bottom), center_group: Default::default(), dock_pane: Default::default(), - left_sidebar_open: true + left_sidebar_open: true, }; let mut workspace_2 = SerializedWorkspace { @@ -481,7 +483,7 @@ mod tests { dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Expanded), center_group: Default::default(), dock_pane: Default::default(), - left_sidebar_open: false + left_sidebar_open: false, }; db.save_workspace(workspace_1.clone()).await; @@ -587,7 +589,7 @@ mod tests { dock_position: DockPosition::Shown(DockAnchor::Bottom), center_group, dock_pane, - left_sidebar_open: true + left_sidebar_open: true, }; db.save_workspace(workspace.clone()).await; @@ -660,7 +662,7 @@ mod tests { dock_position: DockPosition::Shown(DockAnchor::Right), center_group: Default::default(), dock_pane: Default::default(), - left_sidebar_open: false + left_sidebar_open: false, }; db.save_workspace(workspace_3.clone()).await; @@ -695,7 +697,7 @@ mod tests { dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Right), center_group: center_group.clone(), dock_pane, - left_sidebar_open: true + left_sidebar_open: true, } } From bb00134f5f34ad5a2706beb0b6c6d474179744ee Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 20 Dec 2022 16:44:39 -0800 Subject: [PATCH 14/56] Clean up projects when leaving a room --- crates/call/src/room.rs | 16 ++++++++++++++++ crates/collab/src/integration_tests.rs | 10 ++++++---- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/crates/call/src/room.rs b/crates/call/src/room.rs index f94e71d9a2..84f15e0602 100644 --- a/crates/call/src/room.rs +++ b/crates/call/src/room.rs @@ -240,6 +240,22 @@ impl Room { cx.notify(); cx.emit(Event::Left); log::info!("leaving room"); + + for project in self.shared_projects.drain() { + if let Some(project) = project.upgrade(cx) { + project.update(cx, |project, cx| { + project.unshare(cx).log_err(); + }); + } + } + for project in self.joined_projects.drain() { + if let Some(project) = project.upgrade(cx) { + project.update(cx, |project, cx| { + project.disconnected_from_host(cx); + }); + } + } + self.status = RoomStatus::Offline; self.remote_participants.clear(); self.pending_participants.clear(); diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index 1e27c0e107..e8198aea6b 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -1301,8 +1301,9 @@ async fn test_host_disconnect( .unwrap(); // Drop client A's connection again. We should still unshare it successfully. + server.forbid_connections(); server.disconnect_client(client_a.peer_id().unwrap()); - deterministic.advance_clock(RECEIVE_TIMEOUT); + deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); project_a.read_with(cx_a, |project, _| assert!(!project.is_shared())); } @@ -2920,7 +2921,7 @@ async fn test_leaving_project( // Drop client B's connection and ensure client A and client C observe client B leaving. client_b.disconnect(&cx_b.to_async()).unwrap(); - deterministic.run_until_parked(); + deterministic.advance_clock(RECONNECT_TIMEOUT); project_a.read_with(cx_a, |project, _| { assert_eq!(project.collaborators().len(), 1); }); @@ -2947,8 +2948,9 @@ async fn test_leaving_project( // Simulate connection loss for client C and ensure client A observes client C leaving the project. client_c.wait_for_current_user(cx_c).await; + server.forbid_connections(); server.disconnect_client(client_c.peer_id().unwrap()); - deterministic.advance_clock(RECEIVE_TIMEOUT); + deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); deterministic.run_until_parked(); project_a.read_with(cx_a, |project, _| { assert_eq!(project.collaborators().len(), 0); @@ -5784,7 +5786,7 @@ async fn test_following( // Following interrupts when client B disconnects. client_b.disconnect(&cx_b.to_async()).unwrap(); - deterministic.run_until_parked(); + deterministic.advance_clock(RECONNECT_TIMEOUT); assert_eq!( workspace_a.read_with(cx_a, |workspace, _| workspace.leader_for_pane(&pane_a)), None From cb8962691a90c0fb9b85a7ccd7a5a031d040058a Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 20 Dec 2022 16:58:19 -0800 Subject: [PATCH 15/56] Remove unnecessary UnshareProject message sent to clients leaving a project --- crates/collab/src/rpc.rs | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 87876ed651..510f864aa4 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -2234,16 +2234,6 @@ fn project_left(project: &db::LeftProject, session: &Session) { .trace_err(); } } - - session - .peer - .send( - session.connection_id, - proto::UnshareProject { - project_id: project.id.to_proto(), - }, - ) - .trace_err(); } pub trait ResultExt { From 6a2066af6ccc4ff0298249998707648645165a2b Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 20 Dec 2022 17:16:56 -0800 Subject: [PATCH 16/56] :lipstick: Reduce indentation in Database::rejoin_room --- crates/collab/src/db.rs | 358 ++++++++++++++++++++-------------------- 1 file changed, 178 insertions(+), 180 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 576fa8fa2a..5af4833cfd 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1376,205 +1376,203 @@ impl Database { .exec(&*tx) .await?; if participant_update.rows_affected == 0 { - Err(anyhow!("room does not exist or was already joined"))? - } else { - let mut reshared_projects = Vec::new(); - for reshared_project in &rejoin_room.reshared_projects { - let project_id = ProjectId::from_proto(reshared_project.project_id); - let project = project::Entity::find_by_id(project_id) - .one(&*tx) - .await? - .ok_or_else(|| anyhow!("project does not exist"))?; - if project.host_user_id != user_id { - return Err(anyhow!("no such project"))?; - } + return Err(anyhow!("room does not exist or was already joined"))?; + } - let mut collaborators = project - .find_related(project_collaborator::Entity) - .all(&*tx) - .await?; - let host_ix = collaborators - .iter() - .position(|collaborator| { - collaborator.user_id == user_id && collaborator.is_host - }) - .ok_or_else(|| anyhow!("host not found among collaborators"))?; - let host = collaborators.swap_remove(host_ix); - let old_connection_id = host.connection(); - - project::Entity::update(project::ActiveModel { - host_connection_id: ActiveValue::set(Some(connection.id as i32)), - host_connection_server_id: ActiveValue::set(Some(ServerId( - connection.owner_id as i32, - ))), - ..project.into_active_model() - }) - .exec(&*tx) - .await?; - project_collaborator::Entity::update(project_collaborator::ActiveModel { - connection_id: ActiveValue::set(connection.id as i32), - connection_server_id: ActiveValue::set(ServerId( - connection.owner_id as i32, - )), - ..host.into_active_model() - }) - .exec(&*tx) - .await?; - - self.update_project_worktrees(project_id, &reshared_project.worktrees, &tx) - .await?; - - reshared_projects.push(ResharedProject { - id: project_id, - old_connection_id, - collaborators: collaborators - .iter() - .map(|collaborator| ProjectCollaborator { - connection_id: collaborator.connection(), - user_id: collaborator.user_id, - replica_id: collaborator.replica_id, - is_host: collaborator.is_host, - }) - .collect(), - worktrees: reshared_project.worktrees.clone(), - }); + let mut reshared_projects = Vec::new(); + for reshared_project in &rejoin_room.reshared_projects { + let project_id = ProjectId::from_proto(reshared_project.project_id); + let project = project::Entity::find_by_id(project_id) + .one(&*tx) + .await? + .ok_or_else(|| anyhow!("project does not exist"))?; + if project.host_user_id != user_id { + return Err(anyhow!("no such project"))?; } - project::Entity::delete_many() - .filter( - Condition::all() - .add(project::Column::RoomId.eq(room_id)) - .add(project::Column::HostUserId.eq(user_id)) - .add( - project::Column::Id - .is_not_in(reshared_projects.iter().map(|project| project.id)), - ), - ) - .exec(&*tx) + let mut collaborators = project + .find_related(project_collaborator::Entity) + .all(&*tx) + .await?; + let host_ix = collaborators + .iter() + .position(|collaborator| { + collaborator.user_id == user_id && collaborator.is_host + }) + .ok_or_else(|| anyhow!("host not found among collaborators"))?; + let host = collaborators.swap_remove(host_ix); + let old_connection_id = host.connection(); + + project::Entity::update(project::ActiveModel { + host_connection_id: ActiveValue::set(Some(connection.id as i32)), + host_connection_server_id: ActiveValue::set(Some(ServerId( + connection.owner_id as i32, + ))), + ..project.into_active_model() + }) + .exec(&*tx) + .await?; + project_collaborator::Entity::update(project_collaborator::ActiveModel { + connection_id: ActiveValue::set(connection.id as i32), + connection_server_id: ActiveValue::set(ServerId(connection.owner_id as i32)), + ..host.into_active_model() + }) + .exec(&*tx) + .await?; + + self.update_project_worktrees(project_id, &reshared_project.worktrees, &tx) .await?; - let mut rejoined_projects = Vec::new(); - for rejoined_project in &rejoin_room.rejoined_projects { - let project_id = ProjectId::from_proto(rejoined_project.id); - let Some(project) = project::Entity::find_by_id(project_id) - .one(&*tx) - .await? else { - continue - }; - - let db_worktrees = project.find_related(worktree::Entity).all(&*tx).await?; - let mut worktrees = Vec::new(); - for db_worktree in db_worktrees { - let mut worktree = RejoinedWorktree { - id: db_worktree.id as u64, - abs_path: db_worktree.abs_path, - root_name: db_worktree.root_name, - visible: db_worktree.visible, - updated_entries: Default::default(), - removed_entries: Default::default(), - diagnostic_summaries: Default::default(), - scan_id: db_worktree.scan_id as u64, - is_complete: db_worktree.is_complete, - }; - - let rejoined_worktree = rejoined_project - .worktrees - .iter() - .find(|worktree| worktree.id == db_worktree.id as u64); - - let entry_filter = if let Some(rejoined_worktree) = rejoined_worktree { - Condition::all() - .add(worktree_entry::Column::WorktreeId.eq(worktree.id)) - .add(worktree_entry::Column::ScanId.gt(rejoined_worktree.scan_id)) - } else { - Condition::all() - .add(worktree_entry::Column::WorktreeId.eq(worktree.id)) - .add(worktree_entry::Column::IsDeleted.eq(false)) - }; - - let mut db_entries = worktree_entry::Entity::find() - .filter(entry_filter) - .stream(&*tx) - .await?; - - while let Some(db_entry) = db_entries.next().await { - let db_entry = db_entry?; - - if db_entry.is_deleted { - worktree.removed_entries.push(db_entry.id as u64); - } else { - worktree.updated_entries.push(proto::Entry { - id: db_entry.id as u64, - is_dir: db_entry.is_dir, - path: db_entry.path, - inode: db_entry.inode as u64, - mtime: Some(proto::Timestamp { - seconds: db_entry.mtime_seconds as u64, - nanos: db_entry.mtime_nanos as u32, - }), - is_symlink: db_entry.is_symlink, - is_ignored: db_entry.is_ignored, - }); - } - } - - worktrees.push(worktree); - } - - let language_servers = project - .find_related(language_server::Entity) - .all(&*tx) - .await? - .into_iter() - .map(|language_server| proto::LanguageServer { - id: language_server.id as u64, - name: language_server.name, - }) - .collect::>(); - - let mut collaborators = project - .find_related(project_collaborator::Entity) - .all(&*tx) - .await? - .into_iter() + reshared_projects.push(ResharedProject { + id: project_id, + old_connection_id, + collaborators: collaborators + .iter() .map(|collaborator| ProjectCollaborator { connection_id: collaborator.connection(), user_id: collaborator.user_id, replica_id: collaborator.replica_id, is_host: collaborator.is_host, }) - .collect::>(); + .collect(), + worktrees: reshared_project.worktrees.clone(), + }); + } - let old_connection_id; - if let Some(self_collaborator_ix) = collaborators + project::Entity::delete_many() + .filter( + Condition::all() + .add(project::Column::RoomId.eq(room_id)) + .add(project::Column::HostUserId.eq(user_id)) + .add( + project::Column::Id + .is_not_in(reshared_projects.iter().map(|project| project.id)), + ), + ) + .exec(&*tx) + .await?; + + let mut rejoined_projects = Vec::new(); + for rejoined_project in &rejoin_room.rejoined_projects { + let project_id = ProjectId::from_proto(rejoined_project.id); + let Some(project) = project::Entity::find_by_id(project_id) + .one(&*tx) + .await? else { + continue + }; + + let db_worktrees = project.find_related(worktree::Entity).all(&*tx).await?; + let mut worktrees = Vec::new(); + for db_worktree in db_worktrees { + let mut worktree = RejoinedWorktree { + id: db_worktree.id as u64, + abs_path: db_worktree.abs_path, + root_name: db_worktree.root_name, + visible: db_worktree.visible, + updated_entries: Default::default(), + removed_entries: Default::default(), + diagnostic_summaries: Default::default(), + scan_id: db_worktree.scan_id as u64, + is_complete: db_worktree.is_complete, + }; + + let rejoined_worktree = rejoined_project + .worktrees .iter() - .position(|collaborator| collaborator.user_id == user_id) - { - let self_collaborator = collaborators.swap_remove(self_collaborator_ix); - old_connection_id = self_collaborator.connection_id; + .find(|worktree| worktree.id == db_worktree.id as u64); + + let entry_filter = if let Some(rejoined_worktree) = rejoined_worktree { + Condition::all() + .add(worktree_entry::Column::WorktreeId.eq(worktree.id)) + .add(worktree_entry::Column::ScanId.gt(rejoined_worktree.scan_id)) } else { - continue; + Condition::all() + .add(worktree_entry::Column::WorktreeId.eq(worktree.id)) + .add(worktree_entry::Column::IsDeleted.eq(false)) + }; + + let mut db_entries = worktree_entry::Entity::find() + .filter(entry_filter) + .stream(&*tx) + .await?; + + while let Some(db_entry) = db_entries.next().await { + let db_entry = db_entry?; + + if db_entry.is_deleted { + worktree.removed_entries.push(db_entry.id as u64); + } else { + worktree.updated_entries.push(proto::Entry { + id: db_entry.id as u64, + is_dir: db_entry.is_dir, + path: db_entry.path, + inode: db_entry.inode as u64, + mtime: Some(proto::Timestamp { + seconds: db_entry.mtime_seconds as u64, + nanos: db_entry.mtime_nanos as u32, + }), + is_symlink: db_entry.is_symlink, + is_ignored: db_entry.is_ignored, + }); + } } - rejoined_projects.push(RejoinedProject { - id: project_id, - old_connection_id, - collaborators, - worktrees, - language_servers, - }); + worktrees.push(worktree); } - let room = self.get_room(room_id, &tx).await?; - Ok(( - room_id, - RejoinedRoom { - room, - rejoined_projects, - reshared_projects, - }, - )) + let language_servers = project + .find_related(language_server::Entity) + .all(&*tx) + .await? + .into_iter() + .map(|language_server| proto::LanguageServer { + id: language_server.id as u64, + name: language_server.name, + }) + .collect::>(); + + let mut collaborators = project + .find_related(project_collaborator::Entity) + .all(&*tx) + .await? + .into_iter() + .map(|collaborator| ProjectCollaborator { + connection_id: collaborator.connection(), + user_id: collaborator.user_id, + replica_id: collaborator.replica_id, + is_host: collaborator.is_host, + }) + .collect::>(); + + let old_connection_id; + if let Some(self_collaborator_ix) = collaborators + .iter() + .position(|collaborator| collaborator.user_id == user_id) + { + let self_collaborator = collaborators.swap_remove(self_collaborator_ix); + old_connection_id = self_collaborator.connection_id; + } else { + continue; + } + + rejoined_projects.push(RejoinedProject { + id: project_id, + old_connection_id, + collaborators, + worktrees, + language_servers, + }); } + + let room = self.get_room(room_id, &tx).await?; + Ok(( + room_id, + RejoinedRoom { + room, + rejoined_projects, + reshared_projects, + }, + )) }) .await } From 55800fc6960e29df5817e6968fd12eb6d9b0f240 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 20 Dec 2022 17:20:08 -0800 Subject: [PATCH 17/56] :lipstick: Avoid repeated sql condition in rejoin_room --- crates/collab/src/db.rs | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 5af4833cfd..4cdf1d38ef 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1461,8 +1461,8 @@ impl Database { continue }; - let db_worktrees = project.find_related(worktree::Entity).all(&*tx).await?; let mut worktrees = Vec::new(); + let db_worktrees = project.find_related(worktree::Entity).all(&*tx).await?; for db_worktree in db_worktrees { let mut worktree = RejoinedWorktree { id: db_worktree.id as u64, @@ -1480,25 +1480,23 @@ impl Database { .worktrees .iter() .find(|worktree| worktree.id == db_worktree.id as u64); - let entry_filter = if let Some(rejoined_worktree) = rejoined_worktree { - Condition::all() - .add(worktree_entry::Column::WorktreeId.eq(worktree.id)) - .add(worktree_entry::Column::ScanId.gt(rejoined_worktree.scan_id)) + worktree_entry::Column::ScanId.gt(rejoined_worktree.scan_id) } else { - Condition::all() - .add(worktree_entry::Column::WorktreeId.eq(worktree.id)) - .add(worktree_entry::Column::IsDeleted.eq(false)) + worktree_entry::Column::IsDeleted.eq(false) }; let mut db_entries = worktree_entry::Entity::find() - .filter(entry_filter) + .filter( + Condition::all() + .add(worktree_entry::Column::WorktreeId.eq(worktree.id)) + .add(entry_filter), + ) .stream(&*tx) .await?; while let Some(db_entry) = db_entries.next().await { let db_entry = db_entry?; - if db_entry.is_deleted { worktree.removed_entries.push(db_entry.id as u64); } else { @@ -1544,16 +1542,15 @@ impl Database { }) .collect::>(); - let old_connection_id; - if let Some(self_collaborator_ix) = collaborators + let old_connection_id = if let Some(self_collaborator_ix) = collaborators .iter() .position(|collaborator| collaborator.user_id == user_id) { let self_collaborator = collaborators.swap_remove(self_collaborator_ix); - old_connection_id = self_collaborator.connection_id; + self_collaborator.connection_id } else { continue; - } + }; rejoined_projects.push(RejoinedProject { id: project_id, From 213be3d6bd2400a6d0d1e456f5a819b43cbe6746 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 20 Dec 2022 17:27:27 -0800 Subject: [PATCH 18/56] Delete stale projects after cleanup interval, via server foreign key cascade --- crates/collab/src/db.rs | 30 +----------------------------- crates/collab/src/rpc.rs | 12 +----------- 2 files changed, 2 insertions(+), 40 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 4cdf1d38ef..7ef7edd8cc 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -123,34 +123,6 @@ impl Database { .await } - pub async fn delete_stale_projects( - &self, - environment: &str, - new_server_id: ServerId, - ) -> Result<()> { - self.transaction(|tx| async move { - let stale_server_epochs = self - .stale_server_ids(environment, new_server_id, &tx) - .await?; - project_collaborator::Entity::delete_many() - .filter( - project_collaborator::Column::ConnectionServerId - .is_in(stale_server_epochs.iter().copied()), - ) - .exec(&*tx) - .await?; - project::Entity::delete_many() - .filter( - project::Column::HostConnectionServerId - .is_in(stale_server_epochs.iter().copied()), - ) - .exec(&*tx) - .await?; - Ok(()) - }) - .await - } - pub async fn stale_room_ids( &self, environment: &str, @@ -235,8 +207,8 @@ impl Database { pub async fn delete_stale_servers( &self, - new_server_id: ServerId, environment: &str, + new_server_id: ServerId, ) -> Result<()> { self.transaction(|tx| async move { server::Entity::delete_many() diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 510f864aa4..d7cff90d61 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -250,16 +250,6 @@ impl Server { let live_kit_client = self.app_state.live_kit_client.clone(); let span = info_span!("start server"); - let span_enter = span.enter(); - - tracing::info!("begin deleting stale projects"); - app_state - .db - .delete_stale_projects(&app_state.config.zed_environment, server_id) - .await?; - tracing::info!("finish deleting stale projects"); - - drop(span_enter); self.executor.spawn_detached( async move { tracing::info!("waiting for cleanup timeout"); @@ -355,7 +345,7 @@ impl Server { app_state .db - .delete_stale_servers(server_id, &app_state.config.zed_environment) + .delete_stale_servers(&app_state.config.zed_environment, server_id) .await .trace_err(); } From ec6f2a3ad47763c5e492e15162c232ce525ad14c Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 20 Dec 2022 17:32:42 -0800 Subject: [PATCH 19/56] :lipstick: Reorder private Project method --- crates/project/src/project.rs | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 97a1f1c537..5a2a56f8ae 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -1111,20 +1111,6 @@ impl Project { Ok(()) } - pub fn worktree_metadata_protos(&self, cx: &AppContext) -> Vec { - self.worktrees(cx) - .map(|worktree| { - let worktree = worktree.read(cx); - proto::WorktreeMetadata { - id: worktree.id().to_proto(), - root_name: worktree.root_name().into(), - visible: worktree.is_visible(), - abs_path: worktree.abs_path().to_string_lossy().into(), - } - }) - .collect() - } - pub fn unshare(&mut self, cx: &mut ModelContext) -> Result<()> { if self.is_remote() { return Err(anyhow!("attempted to unshare a remote project")); @@ -5626,6 +5612,20 @@ impl Project { }) } + pub fn worktree_metadata_protos(&self, cx: &AppContext) -> Vec { + self.worktrees(cx) + .map(|worktree| { + let worktree = worktree.read(cx); + proto::WorktreeMetadata { + id: worktree.id().to_proto(), + root_name: worktree.root_name().into(), + visible: worktree.is_visible(), + abs_path: worktree.abs_path().to_string_lossy().into(), + } + }) + .collect() + } + fn set_worktrees_from_proto( &mut self, worktrees: Vec, From 15f666a50acc5cab4ac8468d3991cbc021458c51 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 20 Dec 2022 18:03:33 -0800 Subject: [PATCH 20/56] Refresh project collaborator connection id for rejoined projects --- crates/collab/src/db.rs | 36 +++++++++++++++++++++--------------- 1 file changed, 21 insertions(+), 15 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 7ef7edd8cc..8b275bb680 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1428,10 +1428,8 @@ impl Database { for rejoined_project in &rejoin_room.rejoined_projects { let project_id = ProjectId::from_proto(rejoined_project.id); let Some(project) = project::Entity::find_by_id(project_id) - .one(&*tx) - .await? else { - continue - }; + .one(&*tx) + .await? else { continue }; let mut worktrees = Vec::new(); let db_worktrees = project.find_related(worktree::Entity).all(&*tx).await?; @@ -1504,7 +1502,25 @@ impl Database { let mut collaborators = project .find_related(project_collaborator::Entity) .all(&*tx) - .await? + .await?; + let self_collaborator = if let Some(self_collaborator_ix) = collaborators + .iter() + .position(|collaborator| collaborator.user_id == user_id) + { + collaborators.swap_remove(self_collaborator_ix) + } else { + continue; + }; + let old_connection_id = self_collaborator.connection(); + project_collaborator::Entity::update(project_collaborator::ActiveModel { + connection_id: ActiveValue::set(connection.id as i32), + connection_server_id: ActiveValue::set(ServerId(connection.owner_id as i32)), + ..self_collaborator.into_active_model() + }) + .exec(&*tx) + .await?; + + let collaborators = collaborators .into_iter() .map(|collaborator| ProjectCollaborator { connection_id: collaborator.connection(), @@ -1514,16 +1530,6 @@ impl Database { }) .collect::>(); - let old_connection_id = if let Some(self_collaborator_ix) = collaborators - .iter() - .position(|collaborator| collaborator.user_id == user_id) - { - let self_collaborator = collaborators.swap_remove(self_collaborator_ix); - self_collaborator.connection_id - } else { - continue; - }; - rejoined_projects.push(RejoinedProject { id: project_id, old_connection_id, From 59d7f06c57cce1986b922ee9b718ae4c5b2a82ea Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 21 Dec 2022 11:09:27 +0100 Subject: [PATCH 21/56] Handle `proto::UpdateProjectCollaborator` message in `Project` --- crates/project/src/project.rs | 40 ++++++++++++++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 5a2a56f8ae..8dd4584ca2 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -182,6 +182,10 @@ pub enum Event { }, RemoteIdChanged(Option), DisconnectedFromHost, + CollaboratorUpdated { + old_peer_id: proto::PeerId, + new_peer_id: proto::PeerId, + }, CollaboratorLeft(proto::PeerId), } @@ -368,11 +372,12 @@ impl FormatTrigger { impl Project { pub fn init(client: &Arc) { client.add_model_message_handler(Self::handle_add_collaborator); + client.add_model_message_handler(Self::handle_update_collaborator); + client.add_model_message_handler(Self::handle_remove_collaborator); client.add_model_message_handler(Self::handle_buffer_reloaded); client.add_model_message_handler(Self::handle_buffer_saved); client.add_model_message_handler(Self::handle_start_language_server); client.add_model_message_handler(Self::handle_update_language_server); - client.add_model_message_handler(Self::handle_remove_collaborator); client.add_model_message_handler(Self::handle_update_project); client.add_model_message_handler(Self::handle_unshare_project); client.add_model_message_handler(Self::handle_create_buffer_for_peer); @@ -4620,6 +4625,39 @@ impl Project { Ok(()) } + async fn handle_update_collaborator( + this: ModelHandle, + envelope: TypedEnvelope, + _: Arc, + mut cx: AsyncAppContext, + ) -> Result<()> { + let old_peer_id = envelope + .payload + .old_peer_id + .ok_or_else(|| anyhow!("missing old peer id"))?; + let new_peer_id = envelope + .payload + .new_peer_id + .ok_or_else(|| anyhow!("missing new peer id"))?; + this.update(&mut cx, |this, cx| { + let collaborator = this + .collaborators + .remove(&old_peer_id) + .ok_or_else(|| anyhow!("received UpdateProjectCollaborator for unknown peer"))?; + this.collaborators.insert(new_peer_id, collaborator); + if let Some(buffers) = this.shared_buffers.remove(&old_peer_id) { + this.shared_buffers.insert(new_peer_id, buffers); + } + + cx.emit(Event::CollaboratorUpdated { + old_peer_id, + new_peer_id, + }); + cx.notify(); + Ok(()) + }) + } + async fn handle_remove_collaborator( this: ModelHandle, envelope: TypedEnvelope, From ecd80c553c70d686ea59192ea5a570288a714030 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 21 Dec 2022 11:47:01 +0100 Subject: [PATCH 22/56] Verify removing worktrees while host is offline --- crates/collab/src/integration_tests.rs | 85 ++++++++++++++++++-------- crates/project/src/project.rs | 1 + 2 files changed, 59 insertions(+), 27 deletions(-) diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index e8198aea6b..f17ad95086 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -1328,21 +1328,24 @@ async fn test_project_reconnect( "/root-1", json!({ "dir1": { - "a.txt": "a-contents", - "b.txt": "b-contents", + "a.txt": "a", + "b.txt": "b", "subdir1": { - "c.txt": "c-contents", - "d.txt": "d-contents", - "e.txt": "e-contents", + "c.txt": "c", + "d.txt": "d", + "e.txt": "e", } }, "dir2": { - "x.txt": "x-contents", - "y.txt": "y-contents", - "z.txt": "z-contents", + "v.txt": "v", }, "dir3": { - "w.txt": "w-contents", + "w.txt": "w", + "x.txt": "x", + "y.txt": "y", + }, + "dir4": { + "z.txt": "z", }, }), ) @@ -1352,7 +1355,7 @@ async fn test_project_reconnect( .insert_tree( "/root-2", json!({ - "2.txt": "2-contents", + "2.txt": "2", }), ) .await; @@ -1361,7 +1364,7 @@ async fn test_project_reconnect( .insert_tree( "/root-3", json!({ - "3.txt": "3-contents", + "3.txt": "3", }), ) .await; @@ -1394,6 +1397,23 @@ async fn test_project_reconnect( assert!(worktree.as_local().unwrap().is_shared()); worktree.id() }); + let (worktree_a2, _) = project_a1 + .update(cx_a, |p, cx| { + p.find_or_create_local_worktree("/root-1/dir2", true, cx) + }) + .await + .unwrap(); + worktree_a2 + .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) + .await; + let worktree2_id = worktree_a2.read_with(cx_a, |tree, _| { + assert!(tree.as_local().unwrap().is_shared()); + tree.id() + }); + deterministic.run_until_parked(); + project_b1.read_with(cx_b, |project, cx| { + assert!(project.worktree_for_id(worktree2_id, cx).is_some()) + }); // Drop client A's connection. server.forbid_connections(); @@ -1436,17 +1456,22 @@ async fn test_project_reconnect( .await .unwrap(); - // While disconnected, add a worktree to client A's project. - let (worktree_a2, _) = project_a1 + // While disconnected, add and remove worktrees from client A's project. + project_a1 + .update(cx_a, |project, cx| { + project.remove_worktree(worktree2_id, cx) + }) + .await; + let (worktree_a3, _) = project_a1 .update(cx_a, |p, cx| { - p.find_or_create_local_worktree("/root-1/dir2", true, cx) + p.find_or_create_local_worktree("/root-1/dir3", true, cx) }) .await .unwrap(); - worktree_a2 + worktree_a3 .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) .await; - let worktree2_id = worktree_a2.read_with(cx_a, |tree, _| { + let worktree3_id = worktree_a3.read_with(cx_a, |tree, _| { assert!(tree.as_local().unwrap().is_shared()); tree.id() }); @@ -1455,6 +1480,11 @@ async fn test_project_reconnect( // While disconnected, close project 2 cx_a.update(|_| drop(project_a2)); + // While disconnected, mutate a buffer on both the host and the guest. + buffer_a1.update(cx_a, |buf, cx| buf.edit([(0..0, "X")], None, cx)); + buffer_b1.update(cx_b, |buf, cx| buf.edit([(1..1, "Y")], None, cx)); + deterministic.run_until_parked(); + // Client A reconnects. Their project is re-shared, and client B re-joins it. server.allow_connections(); client_a @@ -1486,13 +1516,13 @@ async fn test_project_reconnect( ] ); assert_eq!( - worktree_a2 + worktree_a3 .read(cx) .snapshot() .paths() .map(|p| p.to_str().unwrap()) .collect::>(), - vec!["x.txt", "y.txt", "z.txt"] + vec!["w.txt", "x.txt", "y.txt"] ); }); project_b1.read_with(cx_b, |project, cx| { @@ -1520,16 +1550,17 @@ async fn test_project_reconnect( "subdir2/i.txt" ] ); + assert!(project.worktree_for_id(worktree2_id, cx).is_none()); assert_eq!( project - .worktree_for_id(worktree2_id, cx) + .worktree_for_id(worktree3_id, cx) .unwrap() .read(cx) .snapshot() .paths() .map(|p| p.to_str().unwrap()) .collect::>(), - vec!["x.txt", "y.txt", "z.txt"] + vec!["w.txt", "x.txt", "y.txt"] ); }); project_b2.read_with(cx_b, |project, _| assert!(project.is_read_only())); @@ -1552,22 +1583,22 @@ async fn test_project_reconnect( .unwrap(); // While client B is disconnected, add and remove worktrees from client A's project. - let (worktree_a3, _) = project_a1 + let (worktree_a4, _) = project_a1 .update(cx_a, |p, cx| { - p.find_or_create_local_worktree("/root-1/dir3", true, cx) + p.find_or_create_local_worktree("/root-1/dir4", true, cx) }) .await .unwrap(); - worktree_a3 + worktree_a4 .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) .await; - let worktree3_id = worktree_a3.read_with(cx_a, |tree, _| { + let worktree4_id = worktree_a4.read_with(cx_a, |tree, _| { assert!(tree.as_local().unwrap().is_shared()); tree.id() }); project_a1 .update(cx_a, |project, cx| { - project.remove_worktree(worktree2_id, cx) + project.remove_worktree(worktree3_id, cx) }) .await; deterministic.run_until_parked(); @@ -1610,14 +1641,14 @@ async fn test_project_reconnect( assert!(project.worktree_for_id(worktree2_id, cx).is_none()); assert_eq!( project - .worktree_for_id(worktree3_id, cx) + .worktree_for_id(worktree4_id, cx) .unwrap() .read(cx) .snapshot() .paths() .map(|p| p.to_str().unwrap()) .collect::>(), - vec!["w.txt"] + vec!["z.txt"] ); }); project_b3.read_with(cx_b, |project, _| assert!(project.is_read_only())); diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 8dd4584ca2..17a38e9cfb 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -4645,6 +4645,7 @@ impl Project { .remove(&old_peer_id) .ok_or_else(|| anyhow!("received UpdateProjectCollaborator for unknown peer"))?; this.collaborators.insert(new_peer_id, collaborator); + if let Some(buffers) = this.shared_buffers.remove(&old_peer_id) { this.shared_buffers.insert(new_peer_id, buffers); } From b0336cd27e9d69774e956a02dad0f7097b646290 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 21 Dec 2022 11:56:15 +0100 Subject: [PATCH 23/56] Add failing test for buffer synchronization after disconnecting --- crates/collab/src/integration_tests.rs | 30 ++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index f17ad95086..2014b5172b 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -1415,6 +1415,15 @@ async fn test_project_reconnect( assert!(project.worktree_for_id(worktree2_id, cx).is_some()) }); + let buffer_a1 = project_a1 + .update(cx_a, |p, cx| p.open_buffer((worktree1_id, "a.txt"), cx)) + .await + .unwrap(); + let buffer_b1 = project_b1 + .update(cx_b, |p, cx| p.open_buffer((worktree1_id, "a.txt"), cx)) + .await + .unwrap(); + // Drop client A's connection. server.forbid_connections(); server.disconnect_client(client_a.peer_id().unwrap()); @@ -1431,7 +1440,7 @@ async fn test_project_reconnect( assert!(tree.as_local().unwrap().is_shared()) }); - // While disconnected, add and remove files from client A's project. + // While client A is disconnected, add and remove files from client A's project. client_a .fs .insert_tree( @@ -1456,7 +1465,7 @@ async fn test_project_reconnect( .await .unwrap(); - // While disconnected, add and remove worktrees from client A's project. + // While client A is disconnected, add and remove worktrees from client A's project. project_a1 .update(cx_a, |project, cx| { project.remove_worktree(worktree2_id, cx) @@ -1477,12 +1486,12 @@ async fn test_project_reconnect( }); deterministic.run_until_parked(); - // While disconnected, close project 2 + // While client A is disconnected, close project 2 cx_a.update(|_| drop(project_a2)); - // While disconnected, mutate a buffer on both the host and the guest. - buffer_a1.update(cx_a, |buf, cx| buf.edit([(0..0, "X")], None, cx)); - buffer_b1.update(cx_b, |buf, cx| buf.edit([(1..1, "Y")], None, cx)); + // While client A is disconnected, mutate a buffer on both the host and the guest. + buffer_a1.update(cx_a, |buf, cx| buf.edit([(0..0, "W")], None, cx)); + buffer_b1.update(cx_b, |buf, cx| buf.edit([(1..1, "Z")], None, cx)); deterministic.run_until_parked(); // Client A reconnects. Their project is re-shared, and client B re-joins it. @@ -1565,6 +1574,8 @@ async fn test_project_reconnect( }); project_b2.read_with(cx_b, |project, _| assert!(project.is_read_only())); project_b3.read_with(cx_b, |project, _| assert!(!project.is_read_only())); + buffer_a1.read_with(cx_a, |buffer, _| assert_eq!(buffer.text(), "WaZ")); + buffer_b1.read_with(cx_b, |buffer, _| assert_eq!(buffer.text(), "WaZ")); // Drop client B's connection. server.forbid_connections(); @@ -1603,6 +1614,11 @@ async fn test_project_reconnect( .await; deterministic.run_until_parked(); + // While client B is disconnected, mutate a buffer on both the host and the guest. + buffer_a1.update(cx_a, |buf, cx| buf.edit([(1..1, "X")], None, cx)); + buffer_b1.update(cx_b, |buf, cx| buf.edit([(2..2, "Y")], None, cx)); + deterministic.run_until_parked(); + // While disconnected, close project 3 cx_a.update(|_| drop(project_a3)); @@ -1652,6 +1668,8 @@ async fn test_project_reconnect( ); }); project_b3.read_with(cx_b, |project, _| assert!(project.is_read_only())); + buffer_a1.read_with(cx_a, |buffer, _| assert_eq!(buffer.text(), "WXaYZ")); + buffer_b1.read_with(cx_b, |buffer, _| assert_eq!(buffer.text(), "WXaYZ")); } #[gpui::test(iterations = 10)] From b5fb8e6b8b325503f1c2c10e798d26e5d0bba6a5 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 21 Dec 2022 13:10:07 +0100 Subject: [PATCH 24/56] Remove unused `JoinProjectError` --- crates/project/src/project.rs | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 17a38e9cfb..73f11f09b0 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -62,7 +62,6 @@ use std::{ time::Instant, }; use terminal::{Terminal, TerminalBuilder}; -use thiserror::Error; use util::{defer, post_inc, ResultExt, TryFutureExt as _}; pub use fs::*; @@ -123,18 +122,6 @@ pub struct Project { _maintain_buffer_languages: Task<()>, } -#[derive(Error, Debug)] -pub enum JoinProjectError { - #[error("host declined join request")] - HostDeclined, - #[error("host closed the project")] - HostClosedProject, - #[error("host went offline")] - HostWentOffline, - #[error("{0}")] - Other(#[from] anyhow::Error), -} - enum OpenBuffer { Strong(ModelHandle), Weak(WeakModelHandle), @@ -457,7 +444,7 @@ impl Project { languages: Arc, fs: Arc, mut cx: AsyncAppContext, - ) -> Result, JoinProjectError> { + ) -> Result> { client.authenticate_and_connect(true, &cx).await?; let subscription = client.subscribe_to_entity(remote_id); From 47348542efeab4ec4d041da1bb41dae00bc94073 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 21 Dec 2022 14:20:56 +0100 Subject: [PATCH 25/56] Synchronize buffers when either the host or a guest reconnects --- crates/collab/src/rpc.rs | 1 + crates/editor/src/multi_buffer.rs | 2 +- crates/language/src/buffer.rs | 8 +- crates/language/src/buffer_tests.rs | 10 +- crates/project/src/project.rs | 176 +++++++++++++++++++++++++--- crates/rpc/proto/zed.proto | 16 +++ crates/rpc/src/proto.rs | 4 + 7 files changed, 194 insertions(+), 23 deletions(-) diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index d7cff90d61..9014ef7f40 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -216,6 +216,7 @@ impl Server { .add_request_handler(forward_project_request::) .add_request_handler(forward_project_request::) .add_request_handler(forward_project_request::) + .add_request_handler(forward_project_request::) .add_request_handler(forward_project_request::) .add_request_handler(forward_project_request::) .add_request_handler(forward_project_request::) diff --git a/crates/editor/src/multi_buffer.rs b/crates/editor/src/multi_buffer.rs index 0f6e357ddd..0a55fc1f4e 100644 --- a/crates/editor/src/multi_buffer.rs +++ b/crates/editor/src/multi_buffer.rs @@ -3651,7 +3651,7 @@ mod tests { let state = host_buffer.read(cx).to_proto(); let ops = cx .background() - .block(host_buffer.read(cx).serialize_ops(cx)); + .block(host_buffer.read(cx).serialize_ops(None, cx)); let mut buffer = Buffer::from_proto(1, state, None).unwrap(); buffer .apply_ops( diff --git a/crates/language/src/buffer.rs b/crates/language/src/buffer.rs index a78bb4af79..41bc2a8bab 100644 --- a/crates/language/src/buffer.rs +++ b/crates/language/src/buffer.rs @@ -398,7 +398,11 @@ impl Buffer { } } - pub fn serialize_ops(&self, cx: &AppContext) -> Task> { + pub fn serialize_ops( + &self, + since: Option, + cx: &AppContext, + ) -> Task> { let mut operations = Vec::new(); operations.extend(self.deferred_ops.iter().map(proto::serialize_operation)); operations.extend(self.remote_selections.iter().map(|(_, set)| { @@ -422,9 +426,11 @@ impl Buffer { let text_operations = self.text.operations().clone(); cx.background().spawn(async move { + let since = since.unwrap_or_default(); operations.extend( text_operations .iter() + .filter(|(_, op)| !since.observed(op.local_timestamp())) .map(|(_, op)| proto::serialize_operation(&Operation::Buffer(op.clone()))), ); operations.sort_unstable_by_key(proto::lamport_timestamp_for_operation); diff --git a/crates/language/src/buffer_tests.rs b/crates/language/src/buffer_tests.rs index 5f2fdf6e8e..e0b7d080cb 100644 --- a/crates/language/src/buffer_tests.rs +++ b/crates/language/src/buffer_tests.rs @@ -1275,7 +1275,9 @@ fn test_serialization(cx: &mut gpui::MutableAppContext) { assert_eq!(buffer1.read(cx).text(), "abcDF"); let state = buffer1.read(cx).to_proto(); - let ops = cx.background().block(buffer1.read(cx).serialize_ops(cx)); + let ops = cx + .background() + .block(buffer1.read(cx).serialize_ops(None, cx)); let buffer2 = cx.add_model(|cx| { let mut buffer = Buffer::from_proto(1, state, None).unwrap(); buffer @@ -1316,7 +1318,7 @@ fn test_random_collaboration(cx: &mut MutableAppContext, mut rng: StdRng) { let state = base_buffer.read(cx).to_proto(); let ops = cx .background() - .block(base_buffer.read(cx).serialize_ops(cx)); + .block(base_buffer.read(cx).serialize_ops(None, cx)); let mut buffer = Buffer::from_proto(i as ReplicaId, state, None).unwrap(); buffer .apply_ops( @@ -1413,7 +1415,9 @@ fn test_random_collaboration(cx: &mut MutableAppContext, mut rng: StdRng) { } 50..=59 if replica_ids.len() < max_peers => { let old_buffer_state = buffer.read(cx).to_proto(); - let old_buffer_ops = cx.background().block(buffer.read(cx).serialize_ops(cx)); + let old_buffer_ops = cx + .background() + .block(buffer.read(cx).serialize_ops(None, cx)); let new_replica_id = (0..=replica_ids.len() as ReplicaId) .filter(|replica_id| *replica_id != buffer.read(cx).replica_id()) .choose(&mut rng) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 73f11f09b0..7a6bce3b9d 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -379,6 +379,7 @@ impl Project { client.add_model_request_handler(Self::handle_apply_additional_edits_for_completion); client.add_model_request_handler(Self::handle_apply_code_action); client.add_model_request_handler(Self::handle_reload_buffers); + client.add_model_request_handler(Self::handle_synchronize_buffers); client.add_model_request_handler(Self::handle_format_buffers); client.add_model_request_handler(Self::handle_get_code_actions); client.add_model_request_handler(Self::handle_get_completions); @@ -1082,7 +1083,6 @@ impl Project { ) -> Result<()> { self.set_worktrees_from_proto(message.worktrees, cx)?; self.set_collaborators_from_proto(message.collaborators, cx)?; - self.language_server_statuses = message .language_servers .into_iter() @@ -1098,6 +1098,7 @@ impl Project { ) }) .collect(); + self.synchronize_remote_buffers(cx).detach_and_log_err(cx); cx.notify(); Ok(()) @@ -4631,12 +4632,17 @@ impl Project { .collaborators .remove(&old_peer_id) .ok_or_else(|| anyhow!("received UpdateProjectCollaborator for unknown peer"))?; + let is_host = collaborator.replica_id == 0; this.collaborators.insert(new_peer_id, collaborator); if let Some(buffers) = this.shared_buffers.remove(&old_peer_id) { this.shared_buffers.insert(new_peer_id, buffers); } + if is_host { + this.synchronize_remote_buffers(cx).detach_and_log_err(cx); + } + cx.emit(Event::CollaboratorUpdated { old_peer_id, new_peer_id, @@ -5131,6 +5137,55 @@ impl Project { }) } + async fn handle_synchronize_buffers( + this: ModelHandle, + envelope: TypedEnvelope, + _: Arc, + cx: AsyncAppContext, + ) -> Result { + let project_id = envelope.payload.project_id; + let mut response = proto::SynchronizeBuffersResponse { + buffers: Default::default(), + }; + + this.read_with(&cx, |this, cx| { + for buffer in envelope.payload.buffers { + let buffer_id = buffer.id; + let remote_version = language::proto::deserialize_version(buffer.version); + if let Some(buffer) = this.buffer_for_id(buffer_id, cx) { + let buffer = buffer.read(cx); + response.buffers.push(proto::BufferVersion { + id: buffer_id, + version: language::proto::serialize_version(&buffer.version), + }); + + let operations = buffer.serialize_ops(Some(remote_version), cx); + let client = this.client.clone(); + cx.background() + .spawn( + async move { + let operations = operations.await; + for chunk in split_operations(operations) { + client + .request(proto::UpdateBuffer { + project_id, + buffer_id, + operations: chunk, + }) + .await?; + } + anyhow::Ok(()) + } + .log_err(), + ) + .detach(); + } + } + }); + + Ok(response) + } + async fn handle_format_buffers( this: ModelHandle, envelope: TypedEnvelope, @@ -5557,12 +5612,12 @@ impl Project { if shared_buffers.insert(buffer_id) { let buffer = buffer.read(cx); let state = buffer.to_proto(); - let operations = buffer.serialize_ops(cx); + let operations = buffer.serialize_ops(None, cx); let client = self.client.clone(); cx.background() .spawn( async move { - let mut operations = operations.await; + let operations = operations.await; client.send(proto::CreateBufferForPeer { project_id, @@ -5570,17 +5625,9 @@ impl Project { variant: Some(proto::create_buffer_for_peer::Variant::State(state)), })?; - loop { - #[cfg(any(test, feature = "test-support"))] - const CHUNK_SIZE: usize = 5; - - #[cfg(not(any(test, feature = "test-support")))] - const CHUNK_SIZE: usize = 100; - - let chunk = operations - .drain(..cmp::min(CHUNK_SIZE, operations.len())) - .collect(); - let is_last = operations.is_empty(); + let mut chunks = split_operations(operations).peekable(); + while let Some(chunk) = chunks.next() { + let is_last = chunks.peek().is_none(); client.send(proto::CreateBufferForPeer { project_id, peer_id: Some(peer_id), @@ -5592,10 +5639,6 @@ impl Project { }, )), })?; - - if is_last { - break; - } } Ok(()) @@ -5638,6 +5681,81 @@ impl Project { }) } + fn synchronize_remote_buffers(&mut self, cx: &mut ModelContext) -> Task> { + let project_id = match self.client_state.as_ref() { + Some(ProjectClientState::Remote { + sharing_has_stopped, + remote_id, + .. + }) => { + if *sharing_has_stopped { + return Task::ready(Err(anyhow!( + "can't synchronize remote buffers on a readonly project" + ))); + } else { + *remote_id + } + } + Some(ProjectClientState::Local { .. }) | None => { + return Task::ready(Err(anyhow!( + "can't synchronize remote buffers on a local project" + ))) + } + }; + + let client = self.client.clone(); + cx.spawn(|this, cx| async move { + let buffers = this.read_with(&cx, |this, cx| { + this.opened_buffers + .iter() + .filter_map(|(id, buffer)| { + let buffer = buffer.upgrade(cx)?; + Some(proto::BufferVersion { + id: *id, + version: language::proto::serialize_version(&buffer.read(cx).version), + }) + }) + .collect() + }); + let response = client + .request(proto::SynchronizeBuffers { + project_id, + buffers, + }) + .await?; + + let send_updates_for_buffers = response.buffers.into_iter().map(|buffer| { + let client = client.clone(); + let buffer_id = buffer.id; + let remote_version = language::proto::deserialize_version(buffer.version); + this.read_with(&cx, |this, cx| { + if let Some(buffer) = this.buffer_for_id(buffer_id, cx) { + let operations = buffer.read(cx).serialize_ops(Some(remote_version), cx); + cx.background().spawn(async move { + let operations = operations.await; + for chunk in split_operations(operations) { + client + .request(proto::UpdateBuffer { + project_id, + buffer_id, + operations: chunk, + }) + .await?; + } + anyhow::Ok(()) + }) + } else { + Task::ready(Ok(())) + } + }) + }); + futures::future::join_all(send_updates_for_buffers) + .await + .into_iter() + .collect() + }) + } + pub fn worktree_metadata_protos(&self, cx: &AppContext) -> Vec { self.worktrees(cx) .map(|worktree| { @@ -6126,6 +6244,28 @@ impl> From<(WorktreeId, P)> for ProjectPath { } } +fn split_operations( + mut operations: Vec, +) -> impl Iterator> { + #[cfg(any(test, feature = "test-support"))] + const CHUNK_SIZE: usize = 5; + + #[cfg(not(any(test, feature = "test-support")))] + const CHUNK_SIZE: usize = 100; + + std::iter::from_fn(move || { + if operations.is_empty() { + return None; + } + + Some( + operations + .drain(..cmp::min(CHUNK_SIZE, operations.len())) + .collect(), + ) + }) +} + fn serialize_symbol(symbol: &Symbol) -> proto::Symbol { proto::Symbol { language_server_name: symbol.language_server_name.0.to_string(), diff --git a/crates/rpc/proto/zed.proto b/crates/rpc/proto/zed.proto index f3b8a41a1d..740ac1467c 100644 --- a/crates/rpc/proto/zed.proto +++ b/crates/rpc/proto/zed.proto @@ -79,6 +79,8 @@ message Envelope { BufferReloaded buffer_reloaded = 61; ReloadBuffers reload_buffers = 62; ReloadBuffersResponse reload_buffers_response = 63; + SynchronizeBuffers synchronize_buffers = 200; + SynchronizeBuffersResponse synchronize_buffers_response = 201; FormatBuffers format_buffers = 64; FormatBuffersResponse format_buffers_response = 65; GetCompletions get_completions = 66; @@ -538,6 +540,20 @@ message ReloadBuffersResponse { ProjectTransaction transaction = 1; } +message SynchronizeBuffers { + uint64 project_id = 1; + repeated BufferVersion buffers = 2; +} + +message SynchronizeBuffersResponse { + repeated BufferVersion buffers = 1; +} + +message BufferVersion { + uint64 id = 1; + repeated VectorClockEntry version = 2; +} + enum FormatTrigger { Save = 0; Manual = 1; diff --git a/crates/rpc/src/proto.rs b/crates/rpc/src/proto.rs index b2017b839a..14541e4b66 100644 --- a/crates/rpc/src/proto.rs +++ b/crates/rpc/src/proto.rs @@ -207,6 +207,8 @@ messages!( (ShareProjectResponse, Foreground), (ShowContacts, Foreground), (StartLanguageServer, Foreground), + (SynchronizeBuffers, Foreground), + (SynchronizeBuffersResponse, Foreground), (Test, Foreground), (Unfollow, Foreground), (UnshareProject, Foreground), @@ -274,6 +276,7 @@ request_messages!( (SearchProject, SearchProjectResponse), (SendChannelMessage, SendChannelMessageResponse), (ShareProject, ShareProjectResponse), + (SynchronizeBuffers, SynchronizeBuffersResponse), (Test, Test), (UpdateBuffer, Ack), (UpdateParticipantLocation, Ack), @@ -315,6 +318,7 @@ entity_messages!( SaveBuffer, SearchProject, StartLanguageServer, + SynchronizeBuffers, Unfollow, UnshareProject, UpdateBuffer, From d8ccdff9fc8fd9d2e45da263bbe09f5916854d5b Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Wed, 21 Dec 2022 11:26:24 -0800 Subject: [PATCH 26/56] Move randomized integration test into its own file --- crates/collab/src/lib.rs | 5 +- crates/collab/src/tests.rs | 466 ++++++ .../src/{ => tests}/integration_tests.rs | 1360 +---------------- .../src/tests/randomized_integration_tests.rs | 919 +++++++++++ 4 files changed, 1402 insertions(+), 1348 deletions(-) create mode 100644 crates/collab/src/tests.rs rename crates/collab/src/{ => tests}/integration_tests.rs (79%) create mode 100644 crates/collab/src/tests/randomized_integration_tests.rs diff --git a/crates/collab/src/lib.rs b/crates/collab/src/lib.rs index 27f49f5b1e..1a83193bdf 100644 --- a/crates/collab/src/lib.rs +++ b/crates/collab/src/lib.rs @@ -3,10 +3,11 @@ pub mod auth; pub mod db; pub mod env; pub mod executor; -#[cfg(test)] -mod integration_tests; pub mod rpc; +#[cfg(test)] +mod tests; + use axum::{http::StatusCode, response::IntoResponse}; use db::Database; use serde::Deserialize; diff --git a/crates/collab/src/tests.rs b/crates/collab/src/tests.rs new file mode 100644 index 0000000000..8dc29f3d60 --- /dev/null +++ b/crates/collab/src/tests.rs @@ -0,0 +1,466 @@ +use crate::{ + db::{NewUserParams, TestDb, UserId}, + executor::Executor, + rpc::{Server, CLEANUP_TIMEOUT}, + AppState, +}; +use anyhow::anyhow; +use call::ActiveCall; +use client::{ + self, proto::PeerId, test::FakeHttpClient, Client, Connection, Credentials, + EstablishConnectionError, UserStore, +}; +use collections::{HashMap, HashSet}; +use fs::{FakeFs, HomeDir}; +use futures::{channel::oneshot, StreamExt as _}; +use gpui::{ + executor::Deterministic, test::EmptyView, ModelHandle, Task, TestAppContext, ViewHandle, +}; +use language::LanguageRegistry; +use parking_lot::Mutex; +use project::{Project, WorktreeId}; +use settings::Settings; +use std::{ + env, + ops::Deref, + path::{Path, PathBuf}, + sync::{ + atomic::{AtomicBool, AtomicUsize, Ordering::SeqCst}, + Arc, + }, +}; +use theme::ThemeRegistry; +use workspace::Workspace; + +mod integration_tests; +mod randomized_integration_tests; + +struct TestServer { + app_state: Arc, + server: Arc, + connection_killers: Arc>>>, + forbid_connections: Arc, + _test_db: TestDb, + test_live_kit_server: Arc, +} + +impl TestServer { + async fn start(deterministic: &Arc) -> Self { + static NEXT_LIVE_KIT_SERVER_ID: AtomicUsize = AtomicUsize::new(0); + + let use_postgres = env::var("USE_POSTGRES").ok(); + let use_postgres = use_postgres.as_deref(); + let test_db = if use_postgres == Some("true") || use_postgres == Some("1") { + TestDb::postgres(deterministic.build_background()) + } else { + TestDb::sqlite(deterministic.build_background()) + }; + let live_kit_server_id = NEXT_LIVE_KIT_SERVER_ID.fetch_add(1, SeqCst); + let live_kit_server = live_kit_client::TestServer::create( + format!("http://livekit.{}.test", live_kit_server_id), + format!("devkey-{}", live_kit_server_id), + format!("secret-{}", live_kit_server_id), + deterministic.build_background(), + ) + .unwrap(); + let app_state = Self::build_app_state(&test_db, &live_kit_server).await; + let epoch = app_state + .db + .create_server(&app_state.config.zed_environment) + .await + .unwrap(); + let server = Server::new( + epoch, + app_state.clone(), + Executor::Deterministic(deterministic.build_background()), + ); + server.start().await.unwrap(); + // Advance clock to ensure the server's cleanup task is finished. + deterministic.advance_clock(CLEANUP_TIMEOUT); + Self { + app_state, + server, + connection_killers: Default::default(), + forbid_connections: Default::default(), + _test_db: test_db, + test_live_kit_server: live_kit_server, + } + } + + async fn reset(&self) { + self.app_state.db.reset(); + let epoch = self + .app_state + .db + .create_server(&self.app_state.config.zed_environment) + .await + .unwrap(); + self.server.reset(epoch); + } + + async fn create_client(&mut self, cx: &mut TestAppContext, name: &str) -> TestClient { + cx.update(|cx| { + cx.set_global(HomeDir(Path::new("/tmp/").to_path_buf())); + + let mut settings = Settings::test(cx); + settings.projects_online_by_default = false; + cx.set_global(settings); + }); + + let http = FakeHttpClient::with_404_response(); + let user_id = if let Ok(Some(user)) = self + .app_state + .db + .get_user_by_github_account(name, None) + .await + { + user.id + } else { + self.app_state + .db + .create_user( + &format!("{name}@example.com"), + false, + NewUserParams { + github_login: name.into(), + github_user_id: 0, + invite_count: 0, + }, + ) + .await + .expect("creating user failed") + .user_id + }; + let client_name = name.to_string(); + let mut client = cx.read(|cx| Client::new(http.clone(), cx)); + let server = self.server.clone(); + let db = self.app_state.db.clone(); + let connection_killers = self.connection_killers.clone(); + let forbid_connections = self.forbid_connections.clone(); + + Arc::get_mut(&mut client) + .unwrap() + .set_id(user_id.0 as usize) + .override_authenticate(move |cx| { + cx.spawn(|_| async move { + let access_token = "the-token".to_string(); + Ok(Credentials { + user_id: user_id.0 as u64, + access_token, + }) + }) + }) + .override_establish_connection(move |credentials, cx| { + assert_eq!(credentials.user_id, user_id.0 as u64); + assert_eq!(credentials.access_token, "the-token"); + + let server = server.clone(); + let db = db.clone(); + let connection_killers = connection_killers.clone(); + let forbid_connections = forbid_connections.clone(); + let client_name = client_name.clone(); + cx.spawn(move |cx| async move { + if forbid_connections.load(SeqCst) { + Err(EstablishConnectionError::other(anyhow!( + "server is forbidding connections" + ))) + } else { + let (client_conn, server_conn, killed) = + Connection::in_memory(cx.background()); + let (connection_id_tx, connection_id_rx) = oneshot::channel(); + let user = db + .get_user_by_id(user_id) + .await + .expect("retrieving user failed") + .unwrap(); + cx.background() + .spawn(server.handle_connection( + server_conn, + client_name, + user, + Some(connection_id_tx), + Executor::Deterministic(cx.background()), + )) + .detach(); + let connection_id = connection_id_rx.await.unwrap(); + connection_killers + .lock() + .insert(connection_id.into(), killed); + Ok(client_conn) + } + }) + }); + + let fs = FakeFs::new(cx.background()); + let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http, cx)); + let app_state = Arc::new(workspace::AppState { + client: client.clone(), + user_store: user_store.clone(), + languages: Arc::new(LanguageRegistry::new(Task::ready(()))), + themes: ThemeRegistry::new((), cx.font_cache()), + fs: fs.clone(), + build_window_options: Default::default, + initialize_workspace: |_, _, _| unimplemented!(), + dock_default_item_factory: |_, _| unimplemented!(), + }); + + Project::init(&client); + cx.update(|cx| { + workspace::init(app_state.clone(), cx); + call::init(client.clone(), user_store.clone(), cx); + }); + + client + .authenticate_and_connect(false, &cx.to_async()) + .await + .unwrap(); + + let client = TestClient { + client, + username: name.to_string(), + local_projects: Default::default(), + remote_projects: Default::default(), + next_root_dir_id: 0, + user_store, + fs, + language_registry: Arc::new(LanguageRegistry::test()), + buffers: Default::default(), + }; + client.wait_for_current_user(cx).await; + client + } + + fn disconnect_client(&self, peer_id: PeerId) { + self.connection_killers + .lock() + .remove(&peer_id) + .unwrap() + .store(true, SeqCst); + } + + fn forbid_connections(&self) { + self.forbid_connections.store(true, SeqCst); + } + + fn allow_connections(&self) { + self.forbid_connections.store(false, SeqCst); + } + + async fn make_contacts(&self, clients: &mut [(&TestClient, &mut TestAppContext)]) { + for ix in 1..clients.len() { + let (left, right) = clients.split_at_mut(ix); + let (client_a, cx_a) = left.last_mut().unwrap(); + for (client_b, cx_b) in right { + client_a + .user_store + .update(*cx_a, |store, cx| { + store.request_contact(client_b.user_id().unwrap(), cx) + }) + .await + .unwrap(); + cx_a.foreground().run_until_parked(); + client_b + .user_store + .update(*cx_b, |store, cx| { + store.respond_to_contact_request(client_a.user_id().unwrap(), true, cx) + }) + .await + .unwrap(); + } + } + } + + async fn create_room(&self, clients: &mut [(&TestClient, &mut TestAppContext)]) { + self.make_contacts(clients).await; + + let (left, right) = clients.split_at_mut(1); + let (_client_a, cx_a) = &mut left[0]; + let active_call_a = cx_a.read(ActiveCall::global); + + for (client_b, cx_b) in right { + let user_id_b = client_b.current_user_id(*cx_b).to_proto(); + active_call_a + .update(*cx_a, |call, cx| call.invite(user_id_b, None, cx)) + .await + .unwrap(); + + cx_b.foreground().run_until_parked(); + let active_call_b = cx_b.read(ActiveCall::global); + active_call_b + .update(*cx_b, |call, cx| call.accept_incoming(cx)) + .await + .unwrap(); + } + } + + async fn build_app_state( + test_db: &TestDb, + fake_server: &live_kit_client::TestServer, + ) -> Arc { + Arc::new(AppState { + db: test_db.db().clone(), + live_kit_client: Some(Arc::new(fake_server.create_api_client())), + config: Default::default(), + }) + } +} + +impl Deref for TestServer { + type Target = Server; + + fn deref(&self) -> &Self::Target { + &self.server + } +} + +impl Drop for TestServer { + fn drop(&mut self) { + self.server.teardown(); + self.test_live_kit_server.teardown().unwrap(); + } +} + +struct TestClient { + client: Arc, + username: String, + local_projects: Vec>, + remote_projects: Vec>, + next_root_dir_id: usize, + pub user_store: ModelHandle, + language_registry: Arc, + fs: Arc, + buffers: HashMap, HashSet>>, +} + +impl Deref for TestClient { + type Target = Arc; + + fn deref(&self) -> &Self::Target { + &self.client + } +} + +struct ContactsSummary { + pub current: Vec, + pub outgoing_requests: Vec, + pub incoming_requests: Vec, +} + +impl TestClient { + pub fn current_user_id(&self, cx: &TestAppContext) -> UserId { + UserId::from_proto( + self.user_store + .read_with(cx, |user_store, _| user_store.current_user().unwrap().id), + ) + } + + async fn wait_for_current_user(&self, cx: &TestAppContext) { + let mut authed_user = self + .user_store + .read_with(cx, |user_store, _| user_store.watch_current_user()); + while authed_user.next().await.unwrap().is_none() {} + } + + async fn clear_contacts(&self, cx: &mut TestAppContext) { + self.user_store + .update(cx, |store, _| store.clear_contacts()) + .await; + } + + fn summarize_contacts(&self, cx: &TestAppContext) -> ContactsSummary { + self.user_store.read_with(cx, |store, _| ContactsSummary { + current: store + .contacts() + .iter() + .map(|contact| contact.user.github_login.clone()) + .collect(), + outgoing_requests: store + .outgoing_contact_requests() + .iter() + .map(|user| user.github_login.clone()) + .collect(), + incoming_requests: store + .incoming_contact_requests() + .iter() + .map(|user| user.github_login.clone()) + .collect(), + }) + } + + async fn build_local_project( + &self, + root_path: impl AsRef, + cx: &mut TestAppContext, + ) -> (ModelHandle, WorktreeId) { + let project = cx.update(|cx| { + Project::local( + self.client.clone(), + self.user_store.clone(), + self.language_registry.clone(), + self.fs.clone(), + cx, + ) + }); + let (worktree, _) = project + .update(cx, |p, cx| { + p.find_or_create_local_worktree(root_path, true, cx) + }) + .await + .unwrap(); + worktree + .read_with(cx, |tree, _| tree.as_local().unwrap().scan_complete()) + .await; + (project, worktree.read_with(cx, |tree, _| tree.id())) + } + + async fn build_remote_project( + &self, + host_project_id: u64, + guest_cx: &mut TestAppContext, + ) -> ModelHandle { + let active_call = guest_cx.read(ActiveCall::global); + let room = active_call.read_with(guest_cx, |call, _| call.room().unwrap().clone()); + room.update(guest_cx, |room, cx| { + room.join_project( + host_project_id, + self.language_registry.clone(), + self.fs.clone(), + cx, + ) + }) + .await + .unwrap() + } + + fn build_workspace( + &self, + project: &ModelHandle, + cx: &mut TestAppContext, + ) -> ViewHandle { + let (_, root_view) = cx.add_window(|_| EmptyView); + cx.add_view(&root_view, |cx| { + Workspace::new( + Default::default(), + 0, + project.clone(), + |_, _| unimplemented!(), + cx, + ) + }) + } + + fn create_new_root_dir(&mut self) -> PathBuf { + format!( + "/{}-root-{}", + self.username, + util::post_inc(&mut self.next_root_dir_id) + ) + .into() + } +} + +impl Drop for TestClient { + fn drop(&mut self) { + self.client.teardown(); + } +} diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/tests/integration_tests.rs similarity index 79% rename from crates/collab/src/integration_tests.rs rename to crates/collab/src/tests/integration_tests.rs index 2014b5172b..4a1aaf64d1 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/tests/integration_tests.rs @@ -1,51 +1,37 @@ use crate::{ - db::{self, NewUserParams, TestDb, UserId}, - executor::Executor, - rpc::{Server, CLEANUP_TIMEOUT, RECONNECT_TIMEOUT}, - AppState, + rpc::{CLEANUP_TIMEOUT, RECONNECT_TIMEOUT}, + tests::{TestClient, TestServer}, }; -use anyhow::anyhow; use call::{room, ActiveCall, ParticipantLocation, Room}; -use client::{ - self, proto::PeerId, test::FakeHttpClient, Client, Connection, Credentials, - EstablishConnectionError, User, UserStore, RECEIVE_TIMEOUT, -}; -use collections::{BTreeMap, HashMap, HashSet}; +use client::{User, RECEIVE_TIMEOUT}; +use collections::HashSet; use editor::{ - self, ConfirmCodeAction, ConfirmCompletion, ConfirmRename, Editor, ExcerptRange, MultiBuffer, - Redo, Rename, ToOffset, ToggleCodeActions, Undo, + ConfirmCodeAction, ConfirmCompletion, ConfirmRename, Editor, ExcerptRange, MultiBuffer, Redo, + Rename, ToOffset, ToggleCodeActions, Undo, }; -use fs::{FakeFs, Fs as _, HomeDir, LineEnding, RemoveOptions}; -use futures::{channel::oneshot, StreamExt as _}; +use fs::{FakeFs, Fs as _, LineEnding, RemoveOptions}; +use futures::StreamExt as _; use gpui::{ - executor::Deterministic, geometry::vector::vec2f, test::EmptyView, ModelHandle, Task, - TestAppContext, ViewHandle, + executor::Deterministic, geometry::vector::vec2f, test::EmptyView, ModelHandle, TestAppContext, + ViewHandle, }; use language::{ - range_to_lsp, tree_sitter_rust, Anchor, Diagnostic, DiagnosticEntry, FakeLspAdapter, Language, - LanguageConfig, LanguageRegistry, OffsetRangeExt, Point, PointUtf16, Rope, + tree_sitter_rust, Anchor, Diagnostic, DiagnosticEntry, FakeLspAdapter, Language, + LanguageConfig, OffsetRangeExt, Point, Rope, }; use live_kit_client::MacOSDisplay; -use lsp::{self, FakeLanguageServer}; -use parking_lot::Mutex; -use project::{search::SearchQuery, DiagnosticSummary, Project, ProjectPath, WorktreeId}; +use project::{search::SearchQuery, DiagnosticSummary, Project, ProjectPath}; use rand::prelude::*; use serde_json::json; use settings::{Formatter, Settings}; use std::{ cell::{Cell, RefCell}, env, future, mem, - ops::Deref, path::{Path, PathBuf}, rc::Rc, - sync::{ - atomic::{AtomicBool, AtomicUsize, Ordering::SeqCst}, - Arc, - }, + sync::Arc, }; -use theme::ThemeRegistry; use unindent::Unindent as _; -use util::post_inc; use workspace::{item::Item, shared_screen::SharedScreen, SplitDirection, ToggleFollow, Workspace}; #[ctor::ctor] @@ -6384,1324 +6370,6 @@ async fn test_peers_simultaneously_following_each_other( }); } -#[gpui::test(iterations = 100)] -async fn test_random_collaboration( - cx: &mut TestAppContext, - deterministic: Arc, - rng: StdRng, -) { - deterministic.forbid_parking(); - let rng = Arc::new(Mutex::new(rng)); - - let max_peers = env::var("MAX_PEERS") - .map(|i| i.parse().expect("invalid `MAX_PEERS` variable")) - .unwrap_or(5); - - let max_operations = env::var("OPERATIONS") - .map(|i| i.parse().expect("invalid `OPERATIONS` variable")) - .unwrap_or(10); - - let mut server = TestServer::start(&deterministic).await; - let db = server.app_state.db.clone(); - - let mut available_guests = Vec::new(); - for ix in 0..max_peers { - let username = format!("guest-{}", ix + 1); - let user_id = db - .create_user( - &format!("{username}@example.com"), - false, - NewUserParams { - github_login: username.clone(), - github_user_id: (ix + 1) as i32, - invite_count: 0, - }, - ) - .await - .unwrap() - .user_id; - available_guests.push((user_id, username)); - } - - for (ix, (user_id_a, _)) in available_guests.iter().enumerate() { - for (user_id_b, _) in &available_guests[ix + 1..] { - server - .app_state - .db - .send_contact_request(*user_id_a, *user_id_b) - .await - .unwrap(); - server - .app_state - .db - .respond_to_contact_request(*user_id_b, *user_id_a, true) - .await - .unwrap(); - } - } - - let mut clients = Vec::new(); - let mut user_ids = Vec::new(); - let mut op_start_signals = Vec::new(); - let mut next_entity_id = 100000; - - let mut operations = 0; - while operations < max_operations { - let distribution = rng.lock().gen_range(0..100); - match distribution { - 0..=19 if !available_guests.is_empty() => { - let guest_ix = rng.lock().gen_range(0..available_guests.len()); - let (_, guest_username) = available_guests.remove(guest_ix); - log::info!("Adding new connection for {}", guest_username); - next_entity_id += 100000; - let mut guest_cx = TestAppContext::new( - cx.foreground_platform(), - cx.platform(), - deterministic.build_foreground(next_entity_id), - deterministic.build_background(), - cx.font_cache(), - cx.leak_detector(), - next_entity_id, - cx.function_name.clone(), - ); - - let op_start_signal = futures::channel::mpsc::unbounded(); - let guest = server.create_client(&mut guest_cx, &guest_username).await; - user_ids.push(guest.current_user_id(&guest_cx)); - op_start_signals.push(op_start_signal.0); - clients.push(guest_cx.foreground().spawn(guest.simulate( - guest_username.clone(), - op_start_signal.1, - rng.clone(), - guest_cx, - ))); - - log::info!("Added connection for {}", guest_username); - operations += 1; - } - 20..=24 if clients.len() > 1 => { - let guest_ix = rng.lock().gen_range(1..clients.len()); - log::info!( - "Simulating full disconnection of guest {}", - user_ids[guest_ix] - ); - let removed_guest_id = user_ids.remove(guest_ix); - let user_connection_ids = server - .connection_pool - .lock() - .user_connection_ids(removed_guest_id) - .collect::>(); - assert_eq!(user_connection_ids.len(), 1); - let removed_peer_id = user_connection_ids[0].into(); - let guest = clients.remove(guest_ix); - op_start_signals.remove(guest_ix); - server.forbid_connections(); - server.disconnect_client(removed_peer_id); - deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); - deterministic.start_waiting(); - log::info!("Waiting for guest {} to exit...", removed_guest_id); - let (guest, mut guest_cx) = guest.await; - deterministic.finish_waiting(); - server.allow_connections(); - - for project in &guest.remote_projects { - project.read_with(&guest_cx, |project, _| assert!(project.is_read_only())); - } - for user_id in &user_ids { - let contacts = server.app_state.db.get_contacts(*user_id).await.unwrap(); - let pool = server.connection_pool.lock(); - for contact in contacts { - if let db::Contact::Accepted { user_id, .. } = contact { - if pool.is_user_online(user_id) { - assert_ne!( - user_id, removed_guest_id, - "removed guest is still a contact of another peer" - ); - } - } - } - } - - log::info!("{} removed", guest.username); - available_guests.push((removed_guest_id, guest.username.clone())); - guest_cx.update(|cx| { - cx.clear_globals(); - drop(guest); - }); - - operations += 1; - } - 25..=29 if clients.len() > 1 => { - let guest_ix = rng.lock().gen_range(1..clients.len()); - let user_id = user_ids[guest_ix]; - log::info!("Simulating temporary disconnection of guest {}", user_id); - let user_connection_ids = server - .connection_pool - .lock() - .user_connection_ids(user_id) - .collect::>(); - assert_eq!(user_connection_ids.len(), 1); - let peer_id = user_connection_ids[0].into(); - server.disconnect_client(peer_id); - deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); - operations += 1; - } - 30..=34 => { - log::info!("Simulating server restart"); - server.reset().await; - deterministic.advance_clock(RECEIVE_TIMEOUT); - server.start().await.unwrap(); - deterministic.advance_clock(CLEANUP_TIMEOUT); - let environment = &server.app_state.config.zed_environment; - let stale_room_ids = server - .app_state - .db - .stale_room_ids(environment, server.id()) - .await - .unwrap(); - assert_eq!(stale_room_ids, vec![]); - } - _ if !op_start_signals.is_empty() => { - while operations < max_operations && rng.lock().gen_bool(0.7) { - op_start_signals - .choose(&mut *rng.lock()) - .unwrap() - .unbounded_send(()) - .unwrap(); - operations += 1; - } - - if rng.lock().gen_bool(0.8) { - deterministic.run_until_parked(); - } - } - _ => {} - } - } - - drop(op_start_signals); - deterministic.start_waiting(); - let clients = futures::future::join_all(clients).await; - deterministic.finish_waiting(); - deterministic.run_until_parked(); - - for (guest_client, guest_cx) in &clients { - for guest_project in &guest_client.remote_projects { - guest_project.read_with(guest_cx, |guest_project, cx| { - let host_project = clients.iter().find_map(|(client, cx)| { - let project = client.local_projects.iter().find(|host_project| { - host_project.read_with(cx, |host_project, _| { - host_project.remote_id() == guest_project.remote_id() - }) - })?; - Some((project, cx)) - }); - - if !guest_project.is_read_only() { - if let Some((host_project, host_cx)) = host_project { - let host_worktree_snapshots = - host_project.read_with(host_cx, |host_project, cx| { - host_project - .worktrees(cx) - .map(|worktree| { - let worktree = worktree.read(cx); - (worktree.id(), worktree.snapshot()) - }) - .collect::>() - }); - let guest_worktree_snapshots = guest_project - .worktrees(cx) - .map(|worktree| { - let worktree = worktree.read(cx); - (worktree.id(), worktree.snapshot()) - }) - .collect::>(); - - assert_eq!( - guest_worktree_snapshots.keys().collect::>(), - host_worktree_snapshots.keys().collect::>(), - "{} has different worktrees than the host", - guest_client.username - ); - - for (id, host_snapshot) in &host_worktree_snapshots { - let guest_snapshot = &guest_worktree_snapshots[id]; - assert_eq!( - guest_snapshot.root_name(), - host_snapshot.root_name(), - "{} has different root name than the host for worktree {}", - guest_client.username, - id - ); - assert_eq!( - guest_snapshot.abs_path(), - host_snapshot.abs_path(), - "{} has different abs path than the host for worktree {}", - guest_client.username, - id - ); - assert_eq!( - guest_snapshot.entries(false).collect::>(), - host_snapshot.entries(false).collect::>(), - "{} has different snapshot than the host for worktree {}", - guest_client.username, - id - ); - assert_eq!(guest_snapshot.scan_id(), host_snapshot.scan_id()); - } - } - } - - guest_project.check_invariants(cx); - }); - } - - for (guest_project, guest_buffers) in &guest_client.buffers { - let project_id = if guest_project.read_with(guest_cx, |project, _| { - project.is_local() || project.is_read_only() - }) { - continue; - } else { - guest_project - .read_with(guest_cx, |project, _| project.remote_id()) - .unwrap() - }; - - let host_project = clients.iter().find_map(|(client, cx)| { - let project = client.local_projects.iter().find(|host_project| { - host_project.read_with(cx, |host_project, _| { - host_project.remote_id() == Some(project_id) - }) - })?; - Some((project, cx)) - }); - - let (host_project, host_cx) = if let Some((host_project, host_cx)) = host_project { - (host_project, host_cx) - } else { - continue; - }; - - for guest_buffer in guest_buffers { - let buffer_id = guest_buffer.read_with(guest_cx, |buffer, _| buffer.remote_id()); - let host_buffer = host_project.read_with(host_cx, |project, cx| { - project.buffer_for_id(buffer_id, cx).unwrap_or_else(|| { - panic!( - "host does not have buffer for guest:{}, peer:{:?}, id:{}", - guest_client.username, - guest_client.peer_id(), - buffer_id - ) - }) - }); - let path = host_buffer - .read_with(host_cx, |buffer, cx| buffer.file().unwrap().full_path(cx)); - - assert_eq!( - guest_buffer.read_with(guest_cx, |buffer, _| buffer.deferred_ops_len()), - 0, - "{}, buffer {}, path {:?} has deferred operations", - guest_client.username, - buffer_id, - path, - ); - assert_eq!( - guest_buffer.read_with(guest_cx, |buffer, _| buffer.text()), - host_buffer.read_with(host_cx, |buffer, _| buffer.text()), - "{}, buffer {}, path {:?}, differs from the host's buffer", - guest_client.username, - buffer_id, - path - ); - } - } - } - - for (client, mut cx) in clients { - cx.update(|cx| { - cx.clear_globals(); - drop(client); - }); - } -} - -struct TestServer { - app_state: Arc, - server: Arc, - connection_killers: Arc>>>, - forbid_connections: Arc, - _test_db: TestDb, - test_live_kit_server: Arc, -} - -impl TestServer { - async fn start(deterministic: &Arc) -> Self { - static NEXT_LIVE_KIT_SERVER_ID: AtomicUsize = AtomicUsize::new(0); - - let use_postgres = env::var("USE_POSTGRES").ok(); - let use_postgres = use_postgres.as_deref(); - let test_db = if use_postgres == Some("true") || use_postgres == Some("1") { - TestDb::postgres(deterministic.build_background()) - } else { - TestDb::sqlite(deterministic.build_background()) - }; - let live_kit_server_id = NEXT_LIVE_KIT_SERVER_ID.fetch_add(1, SeqCst); - let live_kit_server = live_kit_client::TestServer::create( - format!("http://livekit.{}.test", live_kit_server_id), - format!("devkey-{}", live_kit_server_id), - format!("secret-{}", live_kit_server_id), - deterministic.build_background(), - ) - .unwrap(); - let app_state = Self::build_app_state(&test_db, &live_kit_server).await; - let epoch = app_state - .db - .create_server(&app_state.config.zed_environment) - .await - .unwrap(); - let server = Server::new( - epoch, - app_state.clone(), - Executor::Deterministic(deterministic.build_background()), - ); - server.start().await.unwrap(); - // Advance clock to ensure the server's cleanup task is finished. - deterministic.advance_clock(CLEANUP_TIMEOUT); - Self { - app_state, - server, - connection_killers: Default::default(), - forbid_connections: Default::default(), - _test_db: test_db, - test_live_kit_server: live_kit_server, - } - } - - async fn reset(&self) { - self.app_state.db.reset(); - let epoch = self - .app_state - .db - .create_server(&self.app_state.config.zed_environment) - .await - .unwrap(); - self.server.reset(epoch); - } - - async fn create_client(&mut self, cx: &mut TestAppContext, name: &str) -> TestClient { - cx.update(|cx| { - cx.set_global(HomeDir(Path::new("/tmp/").to_path_buf())); - - let mut settings = Settings::test(cx); - settings.projects_online_by_default = false; - cx.set_global(settings); - }); - - let http = FakeHttpClient::with_404_response(); - let user_id = if let Ok(Some(user)) = self - .app_state - .db - .get_user_by_github_account(name, None) - .await - { - user.id - } else { - self.app_state - .db - .create_user( - &format!("{name}@example.com"), - false, - NewUserParams { - github_login: name.into(), - github_user_id: 0, - invite_count: 0, - }, - ) - .await - .expect("creating user failed") - .user_id - }; - let client_name = name.to_string(); - let mut client = cx.read(|cx| Client::new(http.clone(), cx)); - let server = self.server.clone(); - let db = self.app_state.db.clone(); - let connection_killers = self.connection_killers.clone(); - let forbid_connections = self.forbid_connections.clone(); - - Arc::get_mut(&mut client) - .unwrap() - .set_id(user_id.0 as usize) - .override_authenticate(move |cx| { - cx.spawn(|_| async move { - let access_token = "the-token".to_string(); - Ok(Credentials { - user_id: user_id.0 as u64, - access_token, - }) - }) - }) - .override_establish_connection(move |credentials, cx| { - assert_eq!(credentials.user_id, user_id.0 as u64); - assert_eq!(credentials.access_token, "the-token"); - - let server = server.clone(); - let db = db.clone(); - let connection_killers = connection_killers.clone(); - let forbid_connections = forbid_connections.clone(); - let client_name = client_name.clone(); - cx.spawn(move |cx| async move { - if forbid_connections.load(SeqCst) { - Err(EstablishConnectionError::other(anyhow!( - "server is forbidding connections" - ))) - } else { - let (client_conn, server_conn, killed) = - Connection::in_memory(cx.background()); - let (connection_id_tx, connection_id_rx) = oneshot::channel(); - let user = db - .get_user_by_id(user_id) - .await - .expect("retrieving user failed") - .unwrap(); - cx.background() - .spawn(server.handle_connection( - server_conn, - client_name, - user, - Some(connection_id_tx), - Executor::Deterministic(cx.background()), - )) - .detach(); - let connection_id = connection_id_rx.await.unwrap(); - connection_killers - .lock() - .insert(connection_id.into(), killed); - Ok(client_conn) - } - }) - }); - - let fs = FakeFs::new(cx.background()); - let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http, cx)); - let app_state = Arc::new(workspace::AppState { - client: client.clone(), - user_store: user_store.clone(), - languages: Arc::new(LanguageRegistry::new(Task::ready(()))), - themes: ThemeRegistry::new((), cx.font_cache()), - fs: fs.clone(), - build_window_options: Default::default, - initialize_workspace: |_, _, _| unimplemented!(), - dock_default_item_factory: |_, _| unimplemented!(), - }); - - Project::init(&client); - cx.update(|cx| { - workspace::init(app_state.clone(), cx); - call::init(client.clone(), user_store.clone(), cx); - }); - - client - .authenticate_and_connect(false, &cx.to_async()) - .await - .unwrap(); - - let client = TestClient { - client, - username: name.to_string(), - local_projects: Default::default(), - remote_projects: Default::default(), - next_root_dir_id: 0, - user_store, - fs, - language_registry: Arc::new(LanguageRegistry::test()), - buffers: Default::default(), - }; - client.wait_for_current_user(cx).await; - client - } - - fn disconnect_client(&self, peer_id: PeerId) { - self.connection_killers - .lock() - .remove(&peer_id) - .unwrap() - .store(true, SeqCst); - } - - fn forbid_connections(&self) { - self.forbid_connections.store(true, SeqCst); - } - - fn allow_connections(&self) { - self.forbid_connections.store(false, SeqCst); - } - - async fn make_contacts(&self, clients: &mut [(&TestClient, &mut TestAppContext)]) { - for ix in 1..clients.len() { - let (left, right) = clients.split_at_mut(ix); - let (client_a, cx_a) = left.last_mut().unwrap(); - for (client_b, cx_b) in right { - client_a - .user_store - .update(*cx_a, |store, cx| { - store.request_contact(client_b.user_id().unwrap(), cx) - }) - .await - .unwrap(); - cx_a.foreground().run_until_parked(); - client_b - .user_store - .update(*cx_b, |store, cx| { - store.respond_to_contact_request(client_a.user_id().unwrap(), true, cx) - }) - .await - .unwrap(); - } - } - } - - async fn create_room(&self, clients: &mut [(&TestClient, &mut TestAppContext)]) { - self.make_contacts(clients).await; - - let (left, right) = clients.split_at_mut(1); - let (_client_a, cx_a) = &mut left[0]; - let active_call_a = cx_a.read(ActiveCall::global); - - for (client_b, cx_b) in right { - let user_id_b = client_b.current_user_id(*cx_b).to_proto(); - active_call_a - .update(*cx_a, |call, cx| call.invite(user_id_b, None, cx)) - .await - .unwrap(); - - cx_b.foreground().run_until_parked(); - let active_call_b = cx_b.read(ActiveCall::global); - active_call_b - .update(*cx_b, |call, cx| call.accept_incoming(cx)) - .await - .unwrap(); - } - } - - async fn build_app_state( - test_db: &TestDb, - fake_server: &live_kit_client::TestServer, - ) -> Arc { - Arc::new(AppState { - db: test_db.db().clone(), - live_kit_client: Some(Arc::new(fake_server.create_api_client())), - config: Default::default(), - }) - } -} - -impl Deref for TestServer { - type Target = Server; - - fn deref(&self) -> &Self::Target { - &self.server - } -} - -impl Drop for TestServer { - fn drop(&mut self) { - self.server.teardown(); - self.test_live_kit_server.teardown().unwrap(); - } -} - -struct TestClient { - client: Arc, - username: String, - local_projects: Vec>, - remote_projects: Vec>, - next_root_dir_id: usize, - pub user_store: ModelHandle, - language_registry: Arc, - fs: Arc, - buffers: HashMap, HashSet>>, -} - -impl Deref for TestClient { - type Target = Arc; - - fn deref(&self) -> &Self::Target { - &self.client - } -} - -struct ContactsSummary { - pub current: Vec, - pub outgoing_requests: Vec, - pub incoming_requests: Vec, -} - -impl TestClient { - pub fn current_user_id(&self, cx: &TestAppContext) -> UserId { - UserId::from_proto( - self.user_store - .read_with(cx, |user_store, _| user_store.current_user().unwrap().id), - ) - } - - async fn wait_for_current_user(&self, cx: &TestAppContext) { - let mut authed_user = self - .user_store - .read_with(cx, |user_store, _| user_store.watch_current_user()); - while authed_user.next().await.unwrap().is_none() {} - } - - async fn clear_contacts(&self, cx: &mut TestAppContext) { - self.user_store - .update(cx, |store, _| store.clear_contacts()) - .await; - } - - fn summarize_contacts(&self, cx: &TestAppContext) -> ContactsSummary { - self.user_store.read_with(cx, |store, _| ContactsSummary { - current: store - .contacts() - .iter() - .map(|contact| contact.user.github_login.clone()) - .collect(), - outgoing_requests: store - .outgoing_contact_requests() - .iter() - .map(|user| user.github_login.clone()) - .collect(), - incoming_requests: store - .incoming_contact_requests() - .iter() - .map(|user| user.github_login.clone()) - .collect(), - }) - } - - async fn build_local_project( - &self, - root_path: impl AsRef, - cx: &mut TestAppContext, - ) -> (ModelHandle, WorktreeId) { - let project = cx.update(|cx| { - Project::local( - self.client.clone(), - self.user_store.clone(), - self.language_registry.clone(), - self.fs.clone(), - cx, - ) - }); - let (worktree, _) = project - .update(cx, |p, cx| { - p.find_or_create_local_worktree(root_path, true, cx) - }) - .await - .unwrap(); - worktree - .read_with(cx, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; - (project, worktree.read_with(cx, |tree, _| tree.id())) - } - - async fn build_remote_project( - &self, - host_project_id: u64, - guest_cx: &mut TestAppContext, - ) -> ModelHandle { - let active_call = guest_cx.read(ActiveCall::global); - let room = active_call.read_with(guest_cx, |call, _| call.room().unwrap().clone()); - room.update(guest_cx, |room, cx| { - room.join_project( - host_project_id, - self.language_registry.clone(), - self.fs.clone(), - cx, - ) - }) - .await - .unwrap() - } - - fn build_workspace( - &self, - project: &ModelHandle, - cx: &mut TestAppContext, - ) -> ViewHandle { - let (_, root_view) = cx.add_window(|_| EmptyView); - cx.add_view(&root_view, |cx| { - Workspace::new( - Default::default(), - 0, - project.clone(), - |_, _| unimplemented!(), - cx, - ) - }) - } - - pub async fn simulate( - mut self, - username: String, - mut op_start_signal: futures::channel::mpsc::UnboundedReceiver<()>, - rng: Arc>, - mut cx: TestAppContext, - ) -> (Self, TestAppContext) { - async fn tick( - client: &mut TestClient, - username: &str, - rng: Arc>, - cx: &mut TestAppContext, - ) -> anyhow::Result<()> { - let active_call = cx.read(ActiveCall::global); - if active_call.read_with(cx, |call, _| call.incoming().borrow().is_some()) { - if rng.lock().gen() { - log::info!("{}: accepting incoming call", username); - active_call - .update(cx, |call, cx| call.accept_incoming(cx)) - .await?; - } else { - log::info!("{}: declining incoming call", username); - active_call.update(cx, |call, _| call.decline_incoming())?; - } - } else { - let available_contacts = client.user_store.read_with(cx, |user_store, _| { - user_store - .contacts() - .iter() - .filter(|contact| contact.online && !contact.busy) - .cloned() - .collect::>() - }); - - let distribution = rng.lock().gen_range(0..100); - match distribution { - 0..=29 if !available_contacts.is_empty() => { - let contact = available_contacts.choose(&mut *rng.lock()).unwrap(); - log::info!("{}: inviting {}", username, contact.user.github_login); - active_call - .update(cx, |call, cx| call.invite(contact.user.id, None, cx)) - .await?; - } - 30..=39 if active_call.read_with(cx, |call, _| call.room().is_some()) => { - log::info!("{}: hanging up", username); - active_call.update(cx, |call, cx| call.hang_up(cx))?; - } - _ => {} - } - } - - let remote_projects = - if let Some(room) = active_call.read_with(cx, |call, _| call.room().cloned()) { - room.read_with(cx, |room, _| { - room.remote_participants() - .values() - .flat_map(|participant| participant.projects.clone()) - .collect::>() - }) - } else { - Default::default() - }; - let project = if remote_projects.is_empty() || rng.lock().gen() { - if client.local_projects.is_empty() || rng.lock().gen() { - let dir_paths = client.fs.directories().await; - let local_project = if dir_paths.is_empty() || rng.lock().gen() { - let root_path = format!( - "/{}-root-{}", - username, - post_inc(&mut client.next_root_dir_id) - ); - let root_path = Path::new(&root_path); - client.fs.create_dir(root_path).await.unwrap(); - client - .fs - .create_file(&root_path.join("main.rs"), Default::default()) - .await - .unwrap(); - log::info!("{}: opening local project at {:?}", username, root_path); - client.build_local_project(root_path, cx).await.0 - } else { - let root_path = dir_paths.choose(&mut *rng.lock()).unwrap(); - log::info!("{}: opening local project at {:?}", username, root_path); - client.build_local_project(root_path, cx).await.0 - }; - client.local_projects.push(local_project.clone()); - local_project - } else { - client - .local_projects - .choose(&mut *rng.lock()) - .unwrap() - .clone() - } - } else { - if client.remote_projects.is_empty() || rng.lock().gen() { - let remote_project_id = remote_projects.choose(&mut *rng.lock()).unwrap().id; - let remote_project = if let Some(project) = - client.remote_projects.iter().find(|project| { - project.read_with(cx, |project, _| { - project.remote_id() == Some(remote_project_id) - }) - }) { - project.clone() - } else { - log::info!("{}: opening remote project {}", username, remote_project_id); - let remote_project = Project::remote( - remote_project_id, - client.client.clone(), - client.user_store.clone(), - client.language_registry.clone(), - FakeFs::new(cx.background()), - cx.to_async(), - ) - .await?; - client.remote_projects.push(remote_project.clone()); - remote_project - }; - - remote_project - } else { - client - .remote_projects - .choose(&mut *rng.lock()) - .unwrap() - .clone() - } - }; - - if active_call.read_with(cx, |call, _| call.room().is_some()) { - if let Err(error) = active_call - .update(cx, |call, cx| call.share_project(project.clone(), cx)) - .await - { - log::error!("{}: error sharing project, {:?}", username, error); - } - } - - let buffers = client.buffers.entry(project.clone()).or_default(); - let buffer = if buffers.is_empty() || rng.lock().gen() { - let worktree = if let Some(worktree) = project.read_with(cx, |project, cx| { - project - .worktrees(cx) - .filter(|worktree| { - let worktree = worktree.read(cx); - worktree.is_visible() && worktree.entries(false).any(|e| e.is_file()) - }) - .choose(&mut *rng.lock()) - }) { - worktree - } else { - cx.background().simulate_random_delay().await; - return Ok(()); - }; - - let (worktree_root_name, project_path) = worktree.read_with(cx, |worktree, _| { - let entry = worktree - .entries(false) - .filter(|e| e.is_file()) - .choose(&mut *rng.lock()) - .unwrap(); - ( - worktree.root_name().to_string(), - (worktree.id(), entry.path.clone()), - ) - }); - log::info!( - "{}: opening path {:?} in worktree {} ({})", - username, - project_path.1, - project_path.0, - worktree_root_name, - ); - let buffer = project - .update(cx, |project, cx| { - project.open_buffer(project_path.clone(), cx) - }) - .await?; - log::info!( - "{}: opened path {:?} in worktree {} ({}) with buffer id {}", - username, - project_path.1, - project_path.0, - worktree_root_name, - buffer.read_with(cx, |buffer, _| buffer.remote_id()) - ); - buffers.insert(buffer.clone()); - buffer - } else { - buffers.iter().choose(&mut *rng.lock()).unwrap().clone() - }; - - let choice = rng.lock().gen_range(0..100); - match choice { - 0..=9 => { - cx.update(|cx| { - log::info!( - "{}: dropping buffer {:?}", - username, - buffer.read(cx).file().unwrap().full_path(cx) - ); - buffers.remove(&buffer); - drop(buffer); - }); - } - 10..=19 => { - let completions = project.update(cx, |project, cx| { - log::info!( - "{}: requesting completions for buffer {} ({:?})", - username, - buffer.read(cx).remote_id(), - buffer.read(cx).file().unwrap().full_path(cx) - ); - let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); - project.completions(&buffer, offset, cx) - }); - let completions = cx.background().spawn(async move { - completions - .await - .map_err(|err| anyhow!("completions request failed: {:?}", err)) - }); - if rng.lock().gen_bool(0.3) { - log::info!("{}: detaching completions request", username); - cx.update(|cx| completions.detach_and_log_err(cx)); - } else { - completions.await?; - } - } - 20..=29 => { - let code_actions = project.update(cx, |project, cx| { - log::info!( - "{}: requesting code actions for buffer {} ({:?})", - username, - buffer.read(cx).remote_id(), - buffer.read(cx).file().unwrap().full_path(cx) - ); - let range = buffer.read(cx).random_byte_range(0, &mut *rng.lock()); - project.code_actions(&buffer, range, cx) - }); - let code_actions = cx.background().spawn(async move { - code_actions - .await - .map_err(|err| anyhow!("code actions request failed: {:?}", err)) - }); - if rng.lock().gen_bool(0.3) { - log::info!("{}: detaching code actions request", username); - cx.update(|cx| code_actions.detach_and_log_err(cx)); - } else { - code_actions.await?; - } - } - 30..=39 if buffer.read_with(cx, |buffer, _| buffer.is_dirty()) => { - let (requested_version, save) = buffer.update(cx, |buffer, cx| { - log::info!( - "{}: saving buffer {} ({:?})", - username, - buffer.remote_id(), - buffer.file().unwrap().full_path(cx) - ); - (buffer.version(), buffer.save(cx)) - }); - let save = cx.background().spawn(async move { - let (saved_version, _, _) = save - .await - .map_err(|err| anyhow!("save request failed: {:?}", err))?; - assert!(saved_version.observed_all(&requested_version)); - Ok::<_, anyhow::Error>(()) - }); - if rng.lock().gen_bool(0.3) { - log::info!("{}: detaching save request", username); - cx.update(|cx| save.detach_and_log_err(cx)); - } else { - save.await?; - } - } - 40..=44 => { - let prepare_rename = project.update(cx, |project, cx| { - log::info!( - "{}: preparing rename for buffer {} ({:?})", - username, - buffer.read(cx).remote_id(), - buffer.read(cx).file().unwrap().full_path(cx) - ); - let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); - project.prepare_rename(buffer, offset, cx) - }); - let prepare_rename = cx.background().spawn(async move { - prepare_rename - .await - .map_err(|err| anyhow!("prepare rename request failed: {:?}", err)) - }); - if rng.lock().gen_bool(0.3) { - log::info!("{}: detaching prepare rename request", username); - cx.update(|cx| prepare_rename.detach_and_log_err(cx)); - } else { - prepare_rename.await?; - } - } - 45..=49 => { - let definitions = project.update(cx, |project, cx| { - log::info!( - "{}: requesting definitions for buffer {} ({:?})", - username, - buffer.read(cx).remote_id(), - buffer.read(cx).file().unwrap().full_path(cx) - ); - let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); - project.definition(&buffer, offset, cx) - }); - let definitions = cx.background().spawn(async move { - definitions - .await - .map_err(|err| anyhow!("definitions request failed: {:?}", err)) - }); - if rng.lock().gen_bool(0.3) { - log::info!("{}: detaching definitions request", username); - cx.update(|cx| definitions.detach_and_log_err(cx)); - } else { - buffers.extend(definitions.await?.into_iter().map(|loc| loc.target.buffer)); - } - } - 50..=54 => { - let highlights = project.update(cx, |project, cx| { - log::info!( - "{}: requesting highlights for buffer {} ({:?})", - username, - buffer.read(cx).remote_id(), - buffer.read(cx).file().unwrap().full_path(cx) - ); - let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); - project.document_highlights(&buffer, offset, cx) - }); - let highlights = cx.background().spawn(async move { - highlights - .await - .map_err(|err| anyhow!("highlights request failed: {:?}", err)) - }); - if rng.lock().gen_bool(0.3) { - log::info!("{}: detaching highlights request", username); - cx.update(|cx| highlights.detach_and_log_err(cx)); - } else { - highlights.await?; - } - } - 55..=59 => { - let search = project.update(cx, |project, cx| { - let query = rng.lock().gen_range('a'..='z'); - log::info!("{}: project-wide search {:?}", username, query); - project.search(SearchQuery::text(query, false, false), cx) - }); - let search = cx.background().spawn(async move { - search - .await - .map_err(|err| anyhow!("search request failed: {:?}", err)) - }); - if rng.lock().gen_bool(0.3) { - log::info!("{}: detaching search request", username); - cx.update(|cx| search.detach_and_log_err(cx)); - } else { - buffers.extend(search.await?.into_keys()); - } - } - 60..=79 => { - let worktree = project - .read_with(cx, |project, cx| { - project - .worktrees(cx) - .filter(|worktree| { - let worktree = worktree.read(cx); - worktree.is_visible() - && worktree.entries(false).any(|e| e.is_file()) - && worktree.root_entry().map_or(false, |e| e.is_dir()) - }) - .choose(&mut *rng.lock()) - }) - .unwrap(); - let (worktree_id, worktree_root_name) = worktree - .read_with(cx, |worktree, _| { - (worktree.id(), worktree.root_name().to_string()) - }); - - let mut new_name = String::new(); - for _ in 0..10 { - let letter = rng.lock().gen_range('a'..='z'); - new_name.push(letter); - } - - let is_dir = rng.lock().gen::(); - let mut new_path = PathBuf::new(); - new_path.push(new_name); - if !is_dir { - new_path.set_extension("rs"); - } - log::info!( - "{}: creating {:?} in worktree {} ({})", - username, - new_path, - worktree_id, - worktree_root_name, - ); - project - .update(cx, |project, cx| { - project.create_entry((worktree_id, new_path), is_dir, cx) - }) - .unwrap() - .await?; - } - _ => { - buffer.update(cx, |buffer, cx| { - log::info!( - "{}: updating buffer {} ({:?})", - username, - buffer.remote_id(), - buffer.file().unwrap().full_path(cx) - ); - if rng.lock().gen_bool(0.7) { - buffer.randomly_edit(&mut *rng.lock(), 5, cx); - } else { - buffer.randomly_undo_redo(&mut *rng.lock(), cx); - } - }); - } - } - - Ok(()) - } - - // Setup language server - let mut language = Language::new( - LanguageConfig { - name: "Rust".into(), - path_suffixes: vec!["rs".to_string()], - ..Default::default() - }, - None, - ); - let _fake_language_servers = language - .set_fake_lsp_adapter(Arc::new(FakeLspAdapter { - name: "the-fake-language-server", - capabilities: lsp::LanguageServer::full_capabilities(), - initializer: Some(Box::new({ - let rng = rng.clone(); - let fs = self.fs.clone(); - move |fake_server: &mut FakeLanguageServer| { - fake_server.handle_request::( - |_, _| async move { - Ok(Some(lsp::CompletionResponse::Array(vec![ - lsp::CompletionItem { - text_edit: Some(lsp::CompletionTextEdit::Edit( - lsp::TextEdit { - range: lsp::Range::new( - lsp::Position::new(0, 0), - lsp::Position::new(0, 0), - ), - new_text: "the-new-text".to_string(), - }, - )), - ..Default::default() - }, - ]))) - }, - ); - - fake_server.handle_request::( - |_, _| async move { - Ok(Some(vec![lsp::CodeActionOrCommand::CodeAction( - lsp::CodeAction { - title: "the-code-action".to_string(), - ..Default::default() - }, - )])) - }, - ); - - fake_server.handle_request::( - |params, _| async move { - Ok(Some(lsp::PrepareRenameResponse::Range(lsp::Range::new( - params.position, - params.position, - )))) - }, - ); - - fake_server.handle_request::({ - let fs = fs.clone(); - let rng = rng.clone(); - move |_, _| { - let fs = fs.clone(); - let rng = rng.clone(); - async move { - let files = fs.files().await; - let mut rng = rng.lock(); - let count = rng.gen_range::(1..3); - let files = (0..count) - .map(|_| files.choose(&mut *rng).unwrap()) - .collect::>(); - log::info!("LSP: Returning definitions in files {:?}", &files); - Ok(Some(lsp::GotoDefinitionResponse::Array( - files - .into_iter() - .map(|file| lsp::Location { - uri: lsp::Url::from_file_path(file).unwrap(), - range: Default::default(), - }) - .collect(), - ))) - } - } - }); - - fake_server.handle_request::( - { - let rng = rng.clone(); - move |_, _| { - let mut highlights = Vec::new(); - let highlight_count = rng.lock().gen_range(1..=5); - for _ in 0..highlight_count { - let start_row = rng.lock().gen_range(0..100); - let start_column = rng.lock().gen_range(0..100); - let start = PointUtf16::new(start_row, start_column); - let end_row = rng.lock().gen_range(0..100); - let end_column = rng.lock().gen_range(0..100); - let end = PointUtf16::new(end_row, end_column); - let range = - if start > end { end..start } else { start..end }; - highlights.push(lsp::DocumentHighlight { - range: range_to_lsp(range.clone()), - kind: Some(lsp::DocumentHighlightKind::READ), - }); - } - highlights.sort_unstable_by_key(|highlight| { - (highlight.range.start, highlight.range.end) - }); - async move { Ok(Some(highlights)) } - } - }, - ); - } - })), - ..Default::default() - })) - .await; - self.language_registry.add(Arc::new(language)); - - while op_start_signal.next().await.is_some() { - if let Err(error) = tick(&mut self, &username, rng.clone(), &mut cx).await { - log::error!("{} error: {:?}", username, error); - } - - cx.background().simulate_random_delay().await; - } - log::info!("{}: done", username); - - (self, cx) - } -} - -impl Drop for TestClient { - fn drop(&mut self) { - self.client.teardown(); - } -} - #[derive(Debug, Eq, PartialEq)] struct RoomParticipants { remote: Vec, diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs new file mode 100644 index 0000000000..6d1df1bee9 --- /dev/null +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -0,0 +1,919 @@ +use crate::{ + db::{self, NewUserParams}, + rpc::{CLEANUP_TIMEOUT, RECONNECT_TIMEOUT}, + tests::{TestClient, TestServer}, +}; +use anyhow::anyhow; +use call::ActiveCall; +use client::RECEIVE_TIMEOUT; +use collections::BTreeMap; +use fs::{FakeFs, Fs as _}; +use futures::StreamExt as _; +use gpui::{executor::Deterministic, TestAppContext}; +use language::{range_to_lsp, FakeLspAdapter, Language, LanguageConfig, PointUtf16}; +use lsp::FakeLanguageServer; +use parking_lot::Mutex; +use project::{search::SearchQuery, Project}; +use rand::prelude::*; +use std::{env, path::PathBuf, sync::Arc}; + +#[gpui::test(iterations = 100)] +async fn test_random_collaboration( + cx: &mut TestAppContext, + deterministic: Arc, + rng: StdRng, +) { + deterministic.forbid_parking(); + let rng = Arc::new(Mutex::new(rng)); + + let max_peers = env::var("MAX_PEERS") + .map(|i| i.parse().expect("invalid `MAX_PEERS` variable")) + .unwrap_or(5); + + let max_operations = env::var("OPERATIONS") + .map(|i| i.parse().expect("invalid `OPERATIONS` variable")) + .unwrap_or(10); + + let mut server = TestServer::start(&deterministic).await; + let db = server.app_state.db.clone(); + + let mut available_guests = Vec::new(); + for ix in 0..max_peers { + let username = format!("guest-{}", ix + 1); + let user_id = db + .create_user( + &format!("{username}@example.com"), + false, + NewUserParams { + github_login: username.clone(), + github_user_id: (ix + 1) as i32, + invite_count: 0, + }, + ) + .await + .unwrap() + .user_id; + available_guests.push((user_id, username)); + } + + for (ix, (user_id_a, _)) in available_guests.iter().enumerate() { + for (user_id_b, _) in &available_guests[ix + 1..] { + server + .app_state + .db + .send_contact_request(*user_id_a, *user_id_b) + .await + .unwrap(); + server + .app_state + .db + .respond_to_contact_request(*user_id_b, *user_id_a, true) + .await + .unwrap(); + } + } + + let mut clients = Vec::new(); + let mut user_ids = Vec::new(); + let mut op_start_signals = Vec::new(); + let mut next_entity_id = 100000; + + let mut operations = 0; + while operations < max_operations { + let distribution = rng.lock().gen_range(0..100); + match distribution { + 0..=19 if !available_guests.is_empty() => { + let guest_ix = rng.lock().gen_range(0..available_guests.len()); + let (_, guest_username) = available_guests.remove(guest_ix); + log::info!("Adding new connection for {}", guest_username); + next_entity_id += 100000; + let mut guest_cx = TestAppContext::new( + cx.foreground_platform(), + cx.platform(), + deterministic.build_foreground(next_entity_id), + deterministic.build_background(), + cx.font_cache(), + cx.leak_detector(), + next_entity_id, + cx.function_name.clone(), + ); + + let op_start_signal = futures::channel::mpsc::unbounded(); + let guest = server.create_client(&mut guest_cx, &guest_username).await; + user_ids.push(guest.current_user_id(&guest_cx)); + op_start_signals.push(op_start_signal.0); + clients.push(guest_cx.foreground().spawn(simulate_client( + guest, + op_start_signal.1, + rng.clone(), + guest_cx, + ))); + + log::info!("Added connection for {}", guest_username); + operations += 1; + } + 20..=24 if clients.len() > 1 => { + let guest_ix = rng.lock().gen_range(1..clients.len()); + log::info!( + "Simulating full disconnection of guest {}", + user_ids[guest_ix] + ); + let removed_guest_id = user_ids.remove(guest_ix); + let user_connection_ids = server + .connection_pool + .lock() + .user_connection_ids(removed_guest_id) + .collect::>(); + assert_eq!(user_connection_ids.len(), 1); + let removed_peer_id = user_connection_ids[0].into(); + let guest = clients.remove(guest_ix); + op_start_signals.remove(guest_ix); + server.forbid_connections(); + server.disconnect_client(removed_peer_id); + deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); + deterministic.start_waiting(); + log::info!("Waiting for guest {} to exit...", removed_guest_id); + let (guest, mut guest_cx) = guest.await; + deterministic.finish_waiting(); + server.allow_connections(); + + for project in &guest.remote_projects { + project.read_with(&guest_cx, |project, _| assert!(project.is_read_only())); + } + for user_id in &user_ids { + let contacts = server.app_state.db.get_contacts(*user_id).await.unwrap(); + let pool = server.connection_pool.lock(); + for contact in contacts { + if let db::Contact::Accepted { user_id, .. } = contact { + if pool.is_user_online(user_id) { + assert_ne!( + user_id, removed_guest_id, + "removed guest is still a contact of another peer" + ); + } + } + } + } + + log::info!("{} removed", guest.username); + available_guests.push((removed_guest_id, guest.username.clone())); + guest_cx.update(|cx| { + cx.clear_globals(); + drop(guest); + }); + + operations += 1; + } + 25..=29 if clients.len() > 1 => { + let guest_ix = rng.lock().gen_range(1..clients.len()); + let user_id = user_ids[guest_ix]; + log::info!("Simulating temporary disconnection of guest {}", user_id); + let user_connection_ids = server + .connection_pool + .lock() + .user_connection_ids(user_id) + .collect::>(); + assert_eq!(user_connection_ids.len(), 1); + let peer_id = user_connection_ids[0].into(); + server.disconnect_client(peer_id); + deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); + operations += 1; + } + 30..=34 => { + log::info!("Simulating server restart"); + server.reset().await; + deterministic.advance_clock(RECEIVE_TIMEOUT); + server.start().await.unwrap(); + deterministic.advance_clock(CLEANUP_TIMEOUT); + let environment = &server.app_state.config.zed_environment; + let stale_room_ids = server + .app_state + .db + .stale_room_ids(environment, server.id()) + .await + .unwrap(); + assert_eq!(stale_room_ids, vec![]); + } + _ if !op_start_signals.is_empty() => { + while operations < max_operations && rng.lock().gen_bool(0.7) { + op_start_signals + .choose(&mut *rng.lock()) + .unwrap() + .unbounded_send(()) + .unwrap(); + operations += 1; + } + + if rng.lock().gen_bool(0.8) { + deterministic.run_until_parked(); + } + } + _ => {} + } + } + + drop(op_start_signals); + deterministic.start_waiting(); + let clients = futures::future::join_all(clients).await; + deterministic.finish_waiting(); + deterministic.run_until_parked(); + + for (guest_client, guest_cx) in &clients { + for guest_project in &guest_client.remote_projects { + guest_project.read_with(guest_cx, |guest_project, cx| { + let host_project = clients.iter().find_map(|(client, cx)| { + let project = client.local_projects.iter().find(|host_project| { + host_project.read_with(cx, |host_project, _| { + host_project.remote_id() == guest_project.remote_id() + }) + })?; + Some((project, cx)) + }); + + if !guest_project.is_read_only() { + if let Some((host_project, host_cx)) = host_project { + let host_worktree_snapshots = + host_project.read_with(host_cx, |host_project, cx| { + host_project + .worktrees(cx) + .map(|worktree| { + let worktree = worktree.read(cx); + (worktree.id(), worktree.snapshot()) + }) + .collect::>() + }); + let guest_worktree_snapshots = guest_project + .worktrees(cx) + .map(|worktree| { + let worktree = worktree.read(cx); + (worktree.id(), worktree.snapshot()) + }) + .collect::>(); + + assert_eq!( + guest_worktree_snapshots.keys().collect::>(), + host_worktree_snapshots.keys().collect::>(), + "{} has different worktrees than the host", + guest_client.username + ); + + for (id, host_snapshot) in &host_worktree_snapshots { + let guest_snapshot = &guest_worktree_snapshots[id]; + assert_eq!( + guest_snapshot.root_name(), + host_snapshot.root_name(), + "{} has different root name than the host for worktree {}", + guest_client.username, + id + ); + assert_eq!( + guest_snapshot.abs_path(), + host_snapshot.abs_path(), + "{} has different abs path than the host for worktree {}", + guest_client.username, + id + ); + assert_eq!( + guest_snapshot.entries(false).collect::>(), + host_snapshot.entries(false).collect::>(), + "{} has different snapshot than the host for worktree {}", + guest_client.username, + id + ); + assert_eq!(guest_snapshot.scan_id(), host_snapshot.scan_id()); + } + } + } + + guest_project.check_invariants(cx); + }); + } + + for (guest_project, guest_buffers) in &guest_client.buffers { + let project_id = if guest_project.read_with(guest_cx, |project, _| { + project.is_local() || project.is_read_only() + }) { + continue; + } else { + guest_project + .read_with(guest_cx, |project, _| project.remote_id()) + .unwrap() + }; + + let host_project = clients.iter().find_map(|(client, cx)| { + let project = client.local_projects.iter().find(|host_project| { + host_project.read_with(cx, |host_project, _| { + host_project.remote_id() == Some(project_id) + }) + })?; + Some((project, cx)) + }); + + let (host_project, host_cx) = if let Some((host_project, host_cx)) = host_project { + (host_project, host_cx) + } else { + continue; + }; + + for guest_buffer in guest_buffers { + let buffer_id = guest_buffer.read_with(guest_cx, |buffer, _| buffer.remote_id()); + let host_buffer = host_project.read_with(host_cx, |project, cx| { + project.buffer_for_id(buffer_id, cx).unwrap_or_else(|| { + panic!( + "host does not have buffer for guest:{}, peer:{:?}, id:{}", + guest_client.username, + guest_client.peer_id(), + buffer_id + ) + }) + }); + let path = host_buffer + .read_with(host_cx, |buffer, cx| buffer.file().unwrap().full_path(cx)); + + assert_eq!( + guest_buffer.read_with(guest_cx, |buffer, _| buffer.deferred_ops_len()), + 0, + "{}, buffer {}, path {:?} has deferred operations", + guest_client.username, + buffer_id, + path, + ); + assert_eq!( + guest_buffer.read_with(guest_cx, |buffer, _| buffer.text()), + host_buffer.read_with(host_cx, |buffer, _| buffer.text()), + "{}, buffer {}, path {:?}, differs from the host's buffer", + guest_client.username, + buffer_id, + path + ); + } + } + } + + for (client, mut cx) in clients { + cx.update(|cx| { + cx.clear_globals(); + drop(client); + }); + } +} + +async fn simulate_client( + mut client: TestClient, + mut op_start_signal: futures::channel::mpsc::UnboundedReceiver<()>, + rng: Arc>, + mut cx: TestAppContext, +) -> (TestClient, TestAppContext) { + // Setup language server + let mut language = Language::new( + LanguageConfig { + name: "Rust".into(), + path_suffixes: vec!["rs".to_string()], + ..Default::default() + }, + None, + ); + let _fake_language_servers = language + .set_fake_lsp_adapter(Arc::new(FakeLspAdapter { + name: "the-fake-language-server", + capabilities: lsp::LanguageServer::full_capabilities(), + initializer: Some(Box::new({ + let rng = rng.clone(); + let fs = client.fs.clone(); + move |fake_server: &mut FakeLanguageServer| { + fake_server.handle_request::( + |_, _| async move { + Ok(Some(lsp::CompletionResponse::Array(vec![ + lsp::CompletionItem { + text_edit: Some(lsp::CompletionTextEdit::Edit(lsp::TextEdit { + range: lsp::Range::new( + lsp::Position::new(0, 0), + lsp::Position::new(0, 0), + ), + new_text: "the-new-text".to_string(), + })), + ..Default::default() + }, + ]))) + }, + ); + + fake_server.handle_request::( + |_, _| async move { + Ok(Some(vec![lsp::CodeActionOrCommand::CodeAction( + lsp::CodeAction { + title: "the-code-action".to_string(), + ..Default::default() + }, + )])) + }, + ); + + fake_server.handle_request::( + |params, _| async move { + Ok(Some(lsp::PrepareRenameResponse::Range(lsp::Range::new( + params.position, + params.position, + )))) + }, + ); + + fake_server.handle_request::({ + let fs = fs.clone(); + let rng = rng.clone(); + move |_, _| { + let fs = fs.clone(); + let rng = rng.clone(); + async move { + let files = fs.files().await; + let mut rng = rng.lock(); + let count = rng.gen_range::(1..3); + let files = (0..count) + .map(|_| files.choose(&mut *rng).unwrap()) + .collect::>(); + log::info!("LSP: Returning definitions in files {:?}", &files); + Ok(Some(lsp::GotoDefinitionResponse::Array( + files + .into_iter() + .map(|file| lsp::Location { + uri: lsp::Url::from_file_path(file).unwrap(), + range: Default::default(), + }) + .collect(), + ))) + } + } + }); + + fake_server.handle_request::({ + let rng = rng.clone(); + move |_, _| { + let mut highlights = Vec::new(); + let highlight_count = rng.lock().gen_range(1..=5); + for _ in 0..highlight_count { + let start_row = rng.lock().gen_range(0..100); + let start_column = rng.lock().gen_range(0..100); + let start = PointUtf16::new(start_row, start_column); + let end_row = rng.lock().gen_range(0..100); + let end_column = rng.lock().gen_range(0..100); + let end = PointUtf16::new(end_row, end_column); + let range = if start > end { end..start } else { start..end }; + highlights.push(lsp::DocumentHighlight { + range: range_to_lsp(range.clone()), + kind: Some(lsp::DocumentHighlightKind::READ), + }); + } + highlights.sort_unstable_by_key(|highlight| { + (highlight.range.start, highlight.range.end) + }); + async move { Ok(Some(highlights)) } + } + }); + } + })), + ..Default::default() + })) + .await; + client.language_registry.add(Arc::new(language)); + + while op_start_signal.next().await.is_some() { + if let Err(error) = randomly_mutate_client(&mut client, rng.clone(), &mut cx).await { + log::error!("{} error: {:?}", client.username, error); + } + + cx.background().simulate_random_delay().await; + } + log::info!("{}: done", client.username); + + (client, cx) +} + +async fn randomly_mutate_client( + client: &mut TestClient, + rng: Arc>, + cx: &mut TestAppContext, +) -> anyhow::Result<()> { + let active_call = cx.read(ActiveCall::global); + if active_call.read_with(cx, |call, _| call.incoming().borrow().is_some()) { + if rng.lock().gen() { + log::info!("{}: accepting incoming call", client.username); + active_call + .update(cx, |call, cx| call.accept_incoming(cx)) + .await?; + } else { + log::info!("{}: declining incoming call", client.username); + active_call.update(cx, |call, _| call.decline_incoming())?; + } + } else { + let available_contacts = client.user_store.read_with(cx, |user_store, _| { + user_store + .contacts() + .iter() + .filter(|contact| contact.online && !contact.busy) + .cloned() + .collect::>() + }); + + let distribution = rng.lock().gen_range(0..100); + match distribution { + 0..=29 if !available_contacts.is_empty() => { + let contact = available_contacts.choose(&mut *rng.lock()).unwrap(); + log::info!( + "{}: inviting {}", + client.username, + contact.user.github_login + ); + active_call + .update(cx, |call, cx| call.invite(contact.user.id, None, cx)) + .await?; + } + 30..=39 if active_call.read_with(cx, |call, _| call.room().is_some()) => { + log::info!("{}: hanging up", client.username); + active_call.update(cx, |call, cx| call.hang_up(cx))?; + } + _ => {} + } + } + + let remote_projects = + if let Some(room) = active_call.read_with(cx, |call, _| call.room().cloned()) { + room.read_with(cx, |room, _| { + room.remote_participants() + .values() + .flat_map(|participant| participant.projects.clone()) + .collect::>() + }) + } else { + Default::default() + }; + + let project = if remote_projects.is_empty() || rng.lock().gen() { + if client.local_projects.is_empty() || rng.lock().gen() { + let dir_paths = client.fs.directories().await; + let local_project = if dir_paths.is_empty() || rng.lock().gen() { + let root_path = client.create_new_root_dir(); + client.fs.create_dir(&root_path).await.unwrap(); + client + .fs + .create_file(&root_path.join("main.rs"), Default::default()) + .await + .unwrap(); + log::info!( + "{}: opening local project at {:?}", + client.username, + root_path + ); + client.build_local_project(root_path, cx).await.0 + } else { + let root_path = dir_paths.choose(&mut *rng.lock()).unwrap(); + log::info!( + "{}: opening local project at {:?}", + client.username, + root_path + ); + client.build_local_project(root_path, cx).await.0 + }; + client.local_projects.push(local_project.clone()); + local_project + } else { + client + .local_projects + .choose(&mut *rng.lock()) + .unwrap() + .clone() + } + } else { + if client.remote_projects.is_empty() || rng.lock().gen() { + let remote_project_id = remote_projects.choose(&mut *rng.lock()).unwrap().id; + let remote_project = if let Some(project) = + client.remote_projects.iter().find(|project| { + project.read_with(cx, |project, _| { + project.remote_id() == Some(remote_project_id) + }) + }) { + project.clone() + } else { + log::info!( + "{}: opening remote project {}", + client.username, + remote_project_id + ); + let remote_project = Project::remote( + remote_project_id, + client.client.clone(), + client.user_store.clone(), + client.language_registry.clone(), + FakeFs::new(cx.background()), + cx.to_async(), + ) + .await?; + client.remote_projects.push(remote_project.clone()); + remote_project + }; + + remote_project + } else { + client + .remote_projects + .choose(&mut *rng.lock()) + .unwrap() + .clone() + } + }; + + if active_call.read_with(cx, |call, _| call.room().is_some()) { + if let Err(error) = active_call + .update(cx, |call, cx| call.share_project(project.clone(), cx)) + .await + { + log::error!("{}: error sharing project, {:?}", client.username, error); + } + } + + let buffers = client.buffers.entry(project.clone()).or_default(); + let buffer = if buffers.is_empty() || rng.lock().gen() { + let worktree = if let Some(worktree) = project.read_with(cx, |project, cx| { + project + .worktrees(cx) + .filter(|worktree| { + let worktree = worktree.read(cx); + worktree.is_visible() && worktree.entries(false).any(|e| e.is_file()) + }) + .choose(&mut *rng.lock()) + }) { + worktree + } else { + cx.background().simulate_random_delay().await; + return Ok(()); + }; + + let (worktree_root_name, project_path) = worktree.read_with(cx, |worktree, _| { + let entry = worktree + .entries(false) + .filter(|e| e.is_file()) + .choose(&mut *rng.lock()) + .unwrap(); + ( + worktree.root_name().to_string(), + (worktree.id(), entry.path.clone()), + ) + }); + log::info!( + "{}: opening path {:?} in worktree {} ({})", + client.username, + project_path.1, + project_path.0, + worktree_root_name, + ); + let buffer = project + .update(cx, |project, cx| { + project.open_buffer(project_path.clone(), cx) + }) + .await?; + log::info!( + "{}: opened path {:?} in worktree {} ({}) with buffer id {}", + client.username, + project_path.1, + project_path.0, + worktree_root_name, + buffer.read_with(cx, |buffer, _| buffer.remote_id()) + ); + buffers.insert(buffer.clone()); + buffer + } else { + buffers.iter().choose(&mut *rng.lock()).unwrap().clone() + }; + + let choice = rng.lock().gen_range(0..100); + match choice { + 0..=9 => { + cx.update(|cx| { + log::info!( + "{}: dropping buffer {:?}", + client.username, + buffer.read(cx).file().unwrap().full_path(cx) + ); + buffers.remove(&buffer); + drop(buffer); + }); + } + 10..=19 => { + let completions = project.update(cx, |project, cx| { + log::info!( + "{}: requesting completions for buffer {} ({:?})", + client.username, + buffer.read(cx).remote_id(), + buffer.read(cx).file().unwrap().full_path(cx) + ); + let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); + project.completions(&buffer, offset, cx) + }); + let completions = cx.background().spawn(async move { + completions + .await + .map_err(|err| anyhow!("completions request failed: {:?}", err)) + }); + if rng.lock().gen_bool(0.3) { + log::info!("{}: detaching completions request", client.username); + cx.update(|cx| completions.detach_and_log_err(cx)); + } else { + completions.await?; + } + } + 20..=29 => { + let code_actions = project.update(cx, |project, cx| { + log::info!( + "{}: requesting code actions for buffer {} ({:?})", + client.username, + buffer.read(cx).remote_id(), + buffer.read(cx).file().unwrap().full_path(cx) + ); + let range = buffer.read(cx).random_byte_range(0, &mut *rng.lock()); + project.code_actions(&buffer, range, cx) + }); + let code_actions = cx.background().spawn(async move { + code_actions + .await + .map_err(|err| anyhow!("code actions request failed: {:?}", err)) + }); + if rng.lock().gen_bool(0.3) { + log::info!("{}: detaching code actions request", client.username); + cx.update(|cx| code_actions.detach_and_log_err(cx)); + } else { + code_actions.await?; + } + } + 30..=39 if buffer.read_with(cx, |buffer, _| buffer.is_dirty()) => { + let (requested_version, save) = buffer.update(cx, |buffer, cx| { + log::info!( + "{}: saving buffer {} ({:?})", + client.username, + buffer.remote_id(), + buffer.file().unwrap().full_path(cx) + ); + (buffer.version(), buffer.save(cx)) + }); + let save = cx.background().spawn(async move { + let (saved_version, _, _) = save + .await + .map_err(|err| anyhow!("save request failed: {:?}", err))?; + assert!(saved_version.observed_all(&requested_version)); + Ok::<_, anyhow::Error>(()) + }); + if rng.lock().gen_bool(0.3) { + log::info!("{}: detaching save request", client.username); + cx.update(|cx| save.detach_and_log_err(cx)); + } else { + save.await?; + } + } + 40..=44 => { + let prepare_rename = project.update(cx, |project, cx| { + log::info!( + "{}: preparing rename for buffer {} ({:?})", + client.username, + buffer.read(cx).remote_id(), + buffer.read(cx).file().unwrap().full_path(cx) + ); + let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); + project.prepare_rename(buffer, offset, cx) + }); + let prepare_rename = cx.background().spawn(async move { + prepare_rename + .await + .map_err(|err| anyhow!("prepare rename request failed: {:?}", err)) + }); + if rng.lock().gen_bool(0.3) { + log::info!("{}: detaching prepare rename request", client.username); + cx.update(|cx| prepare_rename.detach_and_log_err(cx)); + } else { + prepare_rename.await?; + } + } + 45..=49 => { + let definitions = project.update(cx, |project, cx| { + log::info!( + "{}: requesting definitions for buffer {} ({:?})", + client.username, + buffer.read(cx).remote_id(), + buffer.read(cx).file().unwrap().full_path(cx) + ); + let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); + project.definition(&buffer, offset, cx) + }); + let definitions = cx.background().spawn(async move { + definitions + .await + .map_err(|err| anyhow!("definitions request failed: {:?}", err)) + }); + if rng.lock().gen_bool(0.3) { + log::info!("{}: detaching definitions request", client.username); + cx.update(|cx| definitions.detach_and_log_err(cx)); + } else { + buffers.extend(definitions.await?.into_iter().map(|loc| loc.target.buffer)); + } + } + 50..=54 => { + let highlights = project.update(cx, |project, cx| { + log::info!( + "{}: requesting highlights for buffer {} ({:?})", + client.username, + buffer.read(cx).remote_id(), + buffer.read(cx).file().unwrap().full_path(cx) + ); + let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); + project.document_highlights(&buffer, offset, cx) + }); + let highlights = cx.background().spawn(async move { + highlights + .await + .map_err(|err| anyhow!("highlights request failed: {:?}", err)) + }); + if rng.lock().gen_bool(0.3) { + log::info!("{}: detaching highlights request", client.username); + cx.update(|cx| highlights.detach_and_log_err(cx)); + } else { + highlights.await?; + } + } + 55..=59 => { + let search = project.update(cx, |project, cx| { + let query = rng.lock().gen_range('a'..='z'); + log::info!("{}: project-wide search {:?}", client.username, query); + project.search(SearchQuery::text(query, false, false), cx) + }); + let search = cx.background().spawn(async move { + search + .await + .map_err(|err| anyhow!("search request failed: {:?}", err)) + }); + if rng.lock().gen_bool(0.3) { + log::info!("{}: detaching search request", client.username); + cx.update(|cx| search.detach_and_log_err(cx)); + } else { + buffers.extend(search.await?.into_keys()); + } + } + 60..=79 => { + let worktree = project + .read_with(cx, |project, cx| { + project + .worktrees(cx) + .filter(|worktree| { + let worktree = worktree.read(cx); + worktree.is_visible() + && worktree.entries(false).any(|e| e.is_file()) + && worktree.root_entry().map_or(false, |e| e.is_dir()) + }) + .choose(&mut *rng.lock()) + }) + .unwrap(); + let (worktree_id, worktree_root_name) = worktree.read_with(cx, |worktree, _| { + (worktree.id(), worktree.root_name().to_string()) + }); + + let mut new_name = String::new(); + for _ in 0..10 { + let letter = rng.lock().gen_range('a'..='z'); + new_name.push(letter); + } + + let is_dir = rng.lock().gen::(); + let mut new_path = PathBuf::new(); + new_path.push(new_name); + if !is_dir { + new_path.set_extension("rs"); + } + log::info!( + "{}: creating {:?} in worktree {} ({})", + client.username, + new_path, + worktree_id, + worktree_root_name, + ); + project + .update(cx, |project, cx| { + project.create_entry((worktree_id, new_path), is_dir, cx) + }) + .unwrap() + .await?; + } + _ => { + buffer.update(cx, |buffer, cx| { + log::info!( + "{}: updating buffer {} ({:?})", + client.username, + buffer.remote_id(), + buffer.file().unwrap().full_path(cx) + ); + if rng.lock().gen_bool(0.7) { + buffer.randomly_edit(&mut *rng.lock(), 5, cx); + } else { + buffer.randomly_undo_redo(&mut *rng.lock(), cx); + } + }); + } + } + + Ok(()) +} From 8cd94060bb12dcfa5ea2a53da0e18eba673447b4 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Wed, 21 Dec 2022 11:37:18 -0800 Subject: [PATCH 27/56] :lipstick: Avoid referring to all clients as guests in random integration test --- .../src/tests/randomized_integration_tests.rs | 110 +++++++++--------- 1 file changed, 57 insertions(+), 53 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 6d1df1bee9..b2057bf7ee 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -37,9 +37,9 @@ async fn test_random_collaboration( let mut server = TestServer::start(&deterministic).await; let db = server.app_state.db.clone(); - let mut available_guests = Vec::new(); + let mut available_users = Vec::new(); for ix in 0..max_peers { - let username = format!("guest-{}", ix + 1); + let username = format!("user-{}", ix + 1); let user_id = db .create_user( &format!("{username}@example.com"), @@ -53,11 +53,11 @@ async fn test_random_collaboration( .await .unwrap() .user_id; - available_guests.push((user_id, username)); + available_users.push((user_id, username)); } - for (ix, (user_id_a, _)) in available_guests.iter().enumerate() { - for (user_id_b, _) in &available_guests[ix + 1..] { + for (ix, (user_id_a, _)) in available_users.iter().enumerate() { + for (user_id_b, _) in &available_users[ix + 1..] { server .app_state .db @@ -82,12 +82,12 @@ async fn test_random_collaboration( while operations < max_operations { let distribution = rng.lock().gen_range(0..100); match distribution { - 0..=19 if !available_guests.is_empty() => { - let guest_ix = rng.lock().gen_range(0..available_guests.len()); - let (_, guest_username) = available_guests.remove(guest_ix); - log::info!("Adding new connection for {}", guest_username); + 0..=19 if !available_users.is_empty() => { + let client_ix = rng.lock().gen_range(0..available_users.len()); + let (_, username) = available_users.remove(client_ix); + log::info!("Adding new connection for {}", username); next_entity_id += 100000; - let mut guest_cx = TestAppContext::new( + let mut client_cx = TestAppContext::new( cx.foreground_platform(), cx.platform(), deterministic.build_foreground(next_entity_id), @@ -99,46 +99,47 @@ async fn test_random_collaboration( ); let op_start_signal = futures::channel::mpsc::unbounded(); - let guest = server.create_client(&mut guest_cx, &guest_username).await; - user_ids.push(guest.current_user_id(&guest_cx)); + let client = server.create_client(&mut client_cx, &username).await; + user_ids.push(client.current_user_id(&client_cx)); op_start_signals.push(op_start_signal.0); - clients.push(guest_cx.foreground().spawn(simulate_client( - guest, + clients.push(client_cx.foreground().spawn(simulate_client( + client, op_start_signal.1, rng.clone(), - guest_cx, + client_cx, ))); - log::info!("Added connection for {}", guest_username); + log::info!("Added connection for {}", username); operations += 1; } + 20..=24 if clients.len() > 1 => { - let guest_ix = rng.lock().gen_range(1..clients.len()); + let client_ix = rng.lock().gen_range(1..clients.len()); log::info!( - "Simulating full disconnection of guest {}", - user_ids[guest_ix] + "Simulating full disconnection of user {}", + user_ids[client_ix] ); - let removed_guest_id = user_ids.remove(guest_ix); + let removed_user_id = user_ids.remove(client_ix); let user_connection_ids = server .connection_pool .lock() - .user_connection_ids(removed_guest_id) + .user_connection_ids(removed_user_id) .collect::>(); assert_eq!(user_connection_ids.len(), 1); let removed_peer_id = user_connection_ids[0].into(); - let guest = clients.remove(guest_ix); - op_start_signals.remove(guest_ix); + let client = clients.remove(client_ix); + op_start_signals.remove(client_ix); server.forbid_connections(); server.disconnect_client(removed_peer_id); deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); deterministic.start_waiting(); - log::info!("Waiting for guest {} to exit...", removed_guest_id); - let (guest, mut guest_cx) = guest.await; + log::info!("Waiting for user {} to exit...", removed_user_id); + let (client, mut client_cx) = client.await; deterministic.finish_waiting(); server.allow_connections(); - for project in &guest.remote_projects { - project.read_with(&guest_cx, |project, _| assert!(project.is_read_only())); + for project in &client.remote_projects { + project.read_with(&client_cx, |project, _| assert!(project.is_read_only())); } for user_id in &user_ids { let contacts = server.app_state.db.get_contacts(*user_id).await.unwrap(); @@ -147,27 +148,28 @@ async fn test_random_collaboration( if let db::Contact::Accepted { user_id, .. } = contact { if pool.is_user_online(user_id) { assert_ne!( - user_id, removed_guest_id, - "removed guest is still a contact of another peer" + user_id, removed_user_id, + "removed client is still a contact of another peer" ); } } } } - log::info!("{} removed", guest.username); - available_guests.push((removed_guest_id, guest.username.clone())); - guest_cx.update(|cx| { + log::info!("{} removed", client.username); + available_users.push((removed_user_id, client.username.clone())); + client_cx.update(|cx| { cx.clear_globals(); - drop(guest); + drop(client); }); operations += 1; } + 25..=29 if clients.len() > 1 => { - let guest_ix = rng.lock().gen_range(1..clients.len()); - let user_id = user_ids[guest_ix]; - log::info!("Simulating temporary disconnection of guest {}", user_id); + let client_ix = rng.lock().gen_range(1..clients.len()); + let user_id = user_ids[client_ix]; + log::info!("Simulating temporary disconnection of user {}", user_id); let user_connection_ids = server .connection_pool .lock() @@ -179,6 +181,7 @@ async fn test_random_collaboration( deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); operations += 1; } + 30..=34 => { log::info!("Simulating server restart"); server.reset().await; @@ -194,6 +197,7 @@ async fn test_random_collaboration( .unwrap(); assert_eq!(stale_room_ids, vec![]); } + _ if !op_start_signals.is_empty() => { while operations < max_operations && rng.lock().gen_bool(0.7) { op_start_signals @@ -218,9 +222,9 @@ async fn test_random_collaboration( deterministic.finish_waiting(); deterministic.run_until_parked(); - for (guest_client, guest_cx) in &clients { - for guest_project in &guest_client.remote_projects { - guest_project.read_with(guest_cx, |guest_project, cx| { + for (client, client_cx) in &clients { + for guest_project in &client.remote_projects { + guest_project.read_with(client_cx, |guest_project, cx| { let host_project = clients.iter().find_map(|(client, cx)| { let project = client.local_projects.iter().find(|host_project| { host_project.read_with(cx, |host_project, _| { @@ -254,7 +258,7 @@ async fn test_random_collaboration( guest_worktree_snapshots.keys().collect::>(), host_worktree_snapshots.keys().collect::>(), "{} has different worktrees than the host", - guest_client.username + client.username ); for (id, host_snapshot) in &host_worktree_snapshots { @@ -263,21 +267,21 @@ async fn test_random_collaboration( guest_snapshot.root_name(), host_snapshot.root_name(), "{} has different root name than the host for worktree {}", - guest_client.username, + client.username, id ); assert_eq!( guest_snapshot.abs_path(), host_snapshot.abs_path(), "{} has different abs path than the host for worktree {}", - guest_client.username, + client.username, id ); assert_eq!( guest_snapshot.entries(false).collect::>(), host_snapshot.entries(false).collect::>(), "{} has different snapshot than the host for worktree {}", - guest_client.username, + client.username, id ); assert_eq!(guest_snapshot.scan_id(), host_snapshot.scan_id()); @@ -289,14 +293,14 @@ async fn test_random_collaboration( }); } - for (guest_project, guest_buffers) in &guest_client.buffers { - let project_id = if guest_project.read_with(guest_cx, |project, _| { + for (guest_project, guest_buffers) in &client.buffers { + let project_id = if guest_project.read_with(client_cx, |project, _| { project.is_local() || project.is_read_only() }) { continue; } else { guest_project - .read_with(guest_cx, |project, _| project.remote_id()) + .read_with(client_cx, |project, _| project.remote_id()) .unwrap() }; @@ -316,13 +320,13 @@ async fn test_random_collaboration( }; for guest_buffer in guest_buffers { - let buffer_id = guest_buffer.read_with(guest_cx, |buffer, _| buffer.remote_id()); + let buffer_id = guest_buffer.read_with(client_cx, |buffer, _| buffer.remote_id()); let host_buffer = host_project.read_with(host_cx, |project, cx| { project.buffer_for_id(buffer_id, cx).unwrap_or_else(|| { panic!( "host does not have buffer for guest:{}, peer:{:?}, id:{}", - guest_client.username, - guest_client.peer_id(), + client.username, + client.peer_id(), buffer_id ) }) @@ -331,18 +335,18 @@ async fn test_random_collaboration( .read_with(host_cx, |buffer, cx| buffer.file().unwrap().full_path(cx)); assert_eq!( - guest_buffer.read_with(guest_cx, |buffer, _| buffer.deferred_ops_len()), + guest_buffer.read_with(client_cx, |buffer, _| buffer.deferred_ops_len()), 0, "{}, buffer {}, path {:?} has deferred operations", - guest_client.username, + client.username, buffer_id, path, ); assert_eq!( - guest_buffer.read_with(guest_cx, |buffer, _| buffer.text()), + guest_buffer.read_with(client_cx, |buffer, _| buffer.text()), host_buffer.read_with(host_cx, |buffer, _| buffer.text()), "{}, buffer {}, path {:?}, differs from the host's buffer", - guest_client.username, + client.username, buffer_id, path ); From 89da738fae212ed1e512c4037f02dc616d799716 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Wed, 21 Dec 2022 14:13:43 -0800 Subject: [PATCH 28/56] In randomized test, open remote projects via the room Co-authored-by: Nathan Sobo --- .../src/tests/randomized_integration_tests.rs | 31 ++++++++++++------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index b2057bf7ee..b3f34fe51c 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -13,7 +13,7 @@ use gpui::{executor::Deterministic, TestAppContext}; use language::{range_to_lsp, FakeLspAdapter, Language, LanguageConfig, PointUtf16}; use lsp::FakeLanguageServer; use parking_lot::Mutex; -use project::{search::SearchQuery, Project}; +use project::search::SearchQuery; use rand::prelude::*; use std::{env, path::PathBuf, sync::Arc}; @@ -139,7 +139,13 @@ async fn test_random_collaboration( server.allow_connections(); for project in &client.remote_projects { - project.read_with(&client_cx, |project, _| assert!(project.is_read_only())); + project.read_with(&client_cx, |project, _| { + assert!( + project.is_read_only(), + "project {:?} should be read only", + project.remote_id() + ) + }); } for user_id in &user_ids { let contacts = server.app_state.db.get_contacts(*user_id).await.unwrap(); @@ -602,15 +608,18 @@ async fn randomly_mutate_client( client.username, remote_project_id ); - let remote_project = Project::remote( - remote_project_id, - client.client.clone(), - client.user_store.clone(), - client.language_registry.clone(), - FakeFs::new(cx.background()), - cx.to_async(), - ) - .await?; + let call = cx.read(ActiveCall::global); + let room = call.read_with(cx, |call, _| call.room().unwrap().clone()); + let remote_project = room + .update(cx, |room, cx| { + room.join_project( + remote_project_id, + client.language_registry.clone(), + FakeFs::new(cx.background().clone()), + cx, + ) + }) + .await?; client.remote_projects.push(remote_project.clone()); remote_project }; From c321f5d94a754977586b224f009689e7b791bcfc Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Wed, 21 Dec 2022 15:38:44 -0800 Subject: [PATCH 29/56] Assert that buffers' file state matches in randomized collab test Co-authored-by: Nathan Sobo --- .../src/tests/randomized_integration_tests.rs | 13 +++++++++++++ crates/editor/src/items.rs | 2 +- crates/editor/src/multi_buffer.rs | 2 +- crates/fs/src/fs.rs | 15 +++++++++++---- crates/language/src/buffer.rs | 8 ++++---- crates/project/src/worktree.rs | 2 +- crates/rpc/proto/zed.proto | 1 - 7 files changed, 31 insertions(+), 12 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index b3f34fe51c..0699684418 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -356,6 +356,19 @@ async fn test_random_collaboration( buffer_id, path ); + + let host_file = host_buffer.read_with(host_cx, |b, _| b.file().cloned()); + let guest_file = guest_buffer.read_with(client_cx, |b, _| b.file().cloned()); + match (host_file, guest_file) { + (Some(host_file), Some(guest_file)) => { + assert_eq!(host_file.mtime(), guest_file.mtime()); + assert_eq!(host_file.path(), guest_file.path()); + assert_eq!(host_file.is_deleted(), guest_file.is_deleted()); + } + (None, None) => {} + (None, _) => panic!("host's file is None, guest's isn't "), + (_, None) => panic!("guest's file is None, hosts's isn't "), + } } } } diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index 9bf7106c68..ec678c6180 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -1114,7 +1114,7 @@ fn path_for_buffer<'a>( cx: &'a AppContext, ) -> Option> { let file = buffer.read(cx).as_singleton()?.read(cx).file()?; - path_for_file(file, height, include_filename, cx) + path_for_file(file.as_ref(), height, include_filename, cx) } fn path_for_file<'a>( diff --git a/crates/editor/src/multi_buffer.rs b/crates/editor/src/multi_buffer.rs index 0a55fc1f4e..2347d9a63d 100644 --- a/crates/editor/src/multi_buffer.rs +++ b/crates/editor/src/multi_buffer.rs @@ -1311,7 +1311,7 @@ impl MultiBuffer { .and_then(|(buffer, offset)| buffer.read(cx).language_at(offset)) } - pub fn files<'a>(&'a self, cx: &'a AppContext) -> SmallVec<[&'a dyn File; 2]> { + pub fn files<'a>(&'a self, cx: &'a AppContext) -> SmallVec<[&'a Arc; 2]> { let buffers = self.buffers.borrow(); buffers .values() diff --git a/crates/fs/src/fs.rs b/crates/fs/src/fs.rs index d44aebce0f..7b1287247c 100644 --- a/crates/fs/src/fs.rs +++ b/crates/fs/src/fs.rs @@ -23,7 +23,7 @@ use std::{ time::{Duration, SystemTime}, }; use tempfile::NamedTempFile; -use util::ResultExt; +use util::{post_inc, ResultExt}; #[cfg(any(test, feature = "test-support"))] use collections::{btree_map, BTreeMap}; @@ -389,6 +389,7 @@ pub struct FakeFs { struct FakeFsState { root: Arc>, next_inode: u64, + next_mtime: SystemTime, event_txs: Vec>>, } @@ -521,6 +522,7 @@ impl FakeFs { entries: Default::default(), git_repo_state: None, })), + next_mtime: SystemTime::UNIX_EPOCH, next_inode: 1, event_txs: Default::default(), }), @@ -531,10 +533,12 @@ impl FakeFs { let mut state = self.state.lock().await; let path = path.as_ref(); let inode = state.next_inode; + let mtime = state.next_mtime; state.next_inode += 1; + state.next_mtime += Duration::from_millis(1); let file = Arc::new(Mutex::new(FakeFsEntry::File { inode, - mtime: SystemTime::now(), + mtime, content, })); state @@ -816,6 +820,9 @@ impl Fs for FakeFs { let source = normalize_path(source); let target = normalize_path(target); let mut state = self.state.lock().await; + let mtime = state.next_mtime; + let inode = post_inc(&mut state.next_inode); + state.next_mtime += Duration::from_millis(1); let source_entry = state.read_path(&source).await?; let content = source_entry.lock().await.file_content(&source)?.clone(); let entry = state @@ -831,8 +838,8 @@ impl Fs for FakeFs { } btree_map::Entry::Vacant(e) => Ok(Some( e.insert(Arc::new(Mutex::new(FakeFsEntry::File { - inode: 0, - mtime: SystemTime::now(), + inode, + mtime, content: String::new(), }))) .clone(), diff --git a/crates/language/src/buffer.rs b/crates/language/src/buffer.rs index 41bc2a8bab..66f8651e9d 100644 --- a/crates/language/src/buffer.rs +++ b/crates/language/src/buffer.rs @@ -514,8 +514,8 @@ impl Buffer { self.text.snapshot() } - pub fn file(&self) -> Option<&dyn File> { - self.file.as_deref() + pub fn file(&self) -> Option<&Arc> { + self.file.as_ref() } pub fn save( @@ -2373,8 +2373,8 @@ impl BufferSnapshot { self.selections_update_count } - pub fn file(&self) -> Option<&dyn File> { - self.file.as_deref() + pub fn file(&self) -> Option<&Arc> { + self.file.as_ref() } pub fn resolve_file_path(&self, cx: &AppContext, include_root: bool) -> Option { diff --git a/crates/project/src/worktree.rs b/crates/project/src/worktree.rs index 7961c05506..816e75a8ea 100644 --- a/crates/project/src/worktree.rs +++ b/crates/project/src/worktree.rs @@ -1981,7 +1981,7 @@ impl File { }) } - pub fn from_dyn(file: Option<&dyn language::File>) -> Option<&Self> { + pub fn from_dyn(file: Option<&Arc>) -> Option<&Self> { file.and_then(|f| f.as_any().downcast_ref()) } diff --git a/crates/rpc/proto/zed.proto b/crates/rpc/proto/zed.proto index 740ac1467c..8626806abe 100644 --- a/crates/rpc/proto/zed.proto +++ b/crates/rpc/proto/zed.proto @@ -170,7 +170,6 @@ message RejoinRoom { uint64 id = 1; repeated UpdateProject reshared_projects = 2; repeated RejoinProject rejoined_projects = 3; - // relay open buffers and their vector clock } message RejoinProject { From d750b02a7c0bad06a8068bfd80b7c8410abc116f Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Wed, 21 Dec 2022 15:39:57 -0800 Subject: [PATCH 30/56] Handle file and diff updates to incomplete buffers Co-authored-by: Antonio Scandurra --- crates/project/src/project.rs | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 7a6bce3b9d..dfd7b4fbf1 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -5034,14 +5034,14 @@ impl Project { this.update(&mut cx, |this, cx| { let buffer_id = envelope.payload.buffer_id; let diff_base = envelope.payload.diff_base; - let buffer = this + if let Some(buffer) = this .opened_buffers .get_mut(&buffer_id) .and_then(|b| b.upgrade(cx)) - .ok_or_else(|| anyhow!("No such buffer {}", buffer_id))?; - - buffer.update(cx, |buffer, cx| buffer.set_diff_base(diff_base, cx)); - + .or_else(|| this.incomplete_buffers.get(&buffer_id).cloned()) + { + buffer.update(cx, |buffer, cx| buffer.set_diff_base(diff_base, cx)); + } Ok(()) }) } @@ -5055,20 +5055,22 @@ impl Project { this.update(&mut cx, |this, cx| { let payload = envelope.payload.clone(); let buffer_id = payload.buffer_id; - let file = payload.file.ok_or_else(|| anyhow!("invalid file"))?; - let worktree = this - .worktree_for_id(WorktreeId::from_proto(file.worktree_id), cx) - .ok_or_else(|| anyhow!("no such worktree"))?; - let file = File::from_proto(file, worktree, cx)?; - let buffer = this + if let Some(buffer) = this .opened_buffers .get_mut(&buffer_id) .and_then(|b| b.upgrade(cx)) - .ok_or_else(|| anyhow!("no such buffer"))?; - buffer.update(cx, |buffer, cx| { - buffer.file_updated(Arc::new(file), cx).detach(); - }); - this.assign_language_to_buffer(&buffer, cx); + .or_else(|| this.incomplete_buffers.get(&buffer_id).cloned()) + { + let file = payload.file.ok_or_else(|| anyhow!("invalid file"))?; + let worktree = this + .worktree_for_id(WorktreeId::from_proto(file.worktree_id), cx) + .ok_or_else(|| anyhow!("no such worktree"))?; + let file = File::from_proto(file, worktree, cx)?; + buffer.update(cx, |buffer, cx| { + buffer.file_updated(Arc::new(file), cx).detach(); + }); + this.assign_language_to_buffer(&buffer, cx); + } Ok(()) }) } From 559e14799c7a3210309ac265eebcb7d434ee3762 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 22 Dec 2022 17:23:26 +0100 Subject: [PATCH 31/56] Restructure randomized test to be a bit clearer and test more stuff --- .../src/tests/randomized_integration_tests.rs | 257 +++++++++++++----- crates/fs/src/fs.rs | 15 + 2 files changed, 209 insertions(+), 63 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 0699684418..ed58b3371e 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -3,17 +3,17 @@ use crate::{ rpc::{CLEANUP_TIMEOUT, RECONNECT_TIMEOUT}, tests::{TestClient, TestServer}, }; -use anyhow::anyhow; +use anyhow::{anyhow, Result}; use call::ActiveCall; use client::RECEIVE_TIMEOUT; use collections::BTreeMap; use fs::{FakeFs, Fs as _}; use futures::StreamExt as _; -use gpui::{executor::Deterministic, TestAppContext}; +use gpui::{executor::Deterministic, ModelHandle, TestAppContext}; use language::{range_to_lsp, FakeLspAdapter, Language, LanguageConfig, PointUtf16}; use lsp::FakeLanguageServer; use parking_lot::Mutex; -use project::search::SearchQuery; +use project::{search::SearchQuery, Project}; use rand::prelude::*; use std::{env, path::PathBuf, sync::Arc}; @@ -77,6 +77,9 @@ async fn test_random_collaboration( let mut user_ids = Vec::new(); let mut op_start_signals = Vec::new(); let mut next_entity_id = 100000; + let allow_server_restarts = rng.lock().gen_bool(0.7); + let allow_client_reconnection = rng.lock().gen_bool(0.7); + let allow_client_disconnection = rng.lock().gen_bool(0.1); let mut operations = 0; while operations < max_operations { @@ -105,6 +108,7 @@ async fn test_random_collaboration( clients.push(client_cx.foreground().spawn(simulate_client( client, op_start_signal.1, + allow_client_disconnection, rng.clone(), client_cx, ))); @@ -113,7 +117,7 @@ async fn test_random_collaboration( operations += 1; } - 20..=24 if clients.len() > 1 => { + 20..=24 if clients.len() > 1 && allow_client_disconnection => { let client_ix = rng.lock().gen_range(1..clients.len()); log::info!( "Simulating full disconnection of user {}", @@ -172,7 +176,7 @@ async fn test_random_collaboration( operations += 1; } - 25..=29 if clients.len() > 1 => { + 25..=29 if clients.len() > 1 && allow_client_reconnection => { let client_ix = rng.lock().gen_range(1..clients.len()); let user_id = user_ids[client_ix]; log::info!("Simulating temporary disconnection of user {}", user_id); @@ -188,7 +192,7 @@ async fn test_random_collaboration( operations += 1; } - 30..=34 => { + 30..=34 if allow_server_restarts => { log::info!("Simulating server restart"); server.reset().await; deterministic.advance_clock(RECEIVE_TIMEOUT); @@ -384,6 +388,7 @@ async fn test_random_collaboration( async fn simulate_client( mut client: TestClient, mut op_start_signal: futures::channel::mpsc::UnboundedReceiver<()>, + can_hang_up: bool, rng: Arc>, mut cx: TestAppContext, ) -> (TestClient, TestAppContext) { @@ -500,7 +505,9 @@ async fn simulate_client( client.language_registry.add(Arc::new(language)); while op_start_signal.next().await.is_some() { - if let Err(error) = randomly_mutate_client(&mut client, rng.clone(), &mut cx).await { + if let Err(error) = + randomly_mutate_client(&mut client, can_hang_up, rng.clone(), &mut cx).await + { log::error!("{} error: {:?}", client.username, error); } @@ -513,12 +520,35 @@ async fn simulate_client( async fn randomly_mutate_client( client: &mut TestClient, + can_hang_up: bool, rng: Arc>, cx: &mut TestAppContext, -) -> anyhow::Result<()> { +) -> Result<()> { + let choice = rng.lock().gen_range(0..100); + match choice { + 0..=19 => randomly_mutate_active_call(client, can_hang_up, &rng, cx).await?, + 20..=49 => randomly_mutate_projects(client, &rng, cx).await?, + 50..=59 if !client.local_projects.is_empty() || !client.remote_projects.is_empty() => { + randomly_mutate_worktrees(client, &rng, cx).await?; + } + 60..=84 if !client.local_projects.is_empty() || !client.remote_projects.is_empty() => { + randomly_query_and_mutate_buffers(client, &rng, cx).await?; + } + _ => randomly_mutate_fs(client, &rng).await, + } + + Ok(()) +} + +async fn randomly_mutate_active_call( + client: &mut TestClient, + can_hang_up: bool, + rng: &Mutex, + cx: &mut TestAppContext, +) -> Result<()> { let active_call = cx.read(ActiveCall::global); if active_call.read_with(cx, |call, _| call.incoming().borrow().is_some()) { - if rng.lock().gen() { + if rng.lock().gen_bool(0.7) { log::info!("{}: accepting incoming call", client.username); active_call .update(cx, |call, cx| call.accept_incoming(cx)) @@ -550,7 +580,9 @@ async fn randomly_mutate_client( .update(cx, |call, cx| call.invite(contact.user.id, None, cx)) .await?; } - 30..=39 if active_call.read_with(cx, |call, _| call.room().is_some()) => { + 30..=39 + if can_hang_up && active_call.read_with(cx, |call, _| call.room().is_some()) => + { log::info!("{}: hanging up", client.username); active_call.update(cx, |call, cx| call.hang_up(cx))?; } @@ -558,6 +590,39 @@ async fn randomly_mutate_client( } } + Ok(()) +} + +async fn randomly_mutate_fs(client: &mut TestClient, rng: &Mutex) { + let is_dir = rng.lock().gen::(); + let mut new_path = client + .fs + .directories() + .await + .choose(&mut *rng.lock()) + .unwrap() + .clone(); + new_path.push(gen_file_name(rng)); + if is_dir { + log::info!("{}: creating local dir at {:?}", client.username, new_path); + client.fs.create_dir(&new_path).await.unwrap(); + } else { + new_path.set_extension("rs"); + log::info!("{}: creating local file at {:?}", client.username, new_path); + client + .fs + .create_file(&new_path, Default::default()) + .await + .unwrap(); + } +} + +async fn randomly_mutate_projects( + client: &mut TestClient, + rng: &Mutex, + cx: &mut TestAppContext, +) -> Result<()> { + let active_call = cx.read(ActiveCall::global); let remote_projects = if let Some(room) = active_call.read_with(cx, |call, _| call.room().cloned()) { room.read_with(cx, |room, _| { @@ -572,8 +637,8 @@ async fn randomly_mutate_client( let project = if remote_projects.is_empty() || rng.lock().gen() { if client.local_projects.is_empty() || rng.lock().gen() { - let dir_paths = client.fs.directories().await; - let local_project = if dir_paths.is_empty() || rng.lock().gen() { + let paths = client.fs.paths().await; + let local_project = if paths.is_empty() || rng.lock().gen() { let root_path = client.create_new_root_dir(); client.fs.create_dir(&root_path).await.unwrap(); client @@ -588,7 +653,7 @@ async fn randomly_mutate_client( ); client.build_local_project(root_path, cx).await.0 } else { - let root_path = dir_paths.choose(&mut *rng.lock()).unwrap(); + let root_path = paths.choose(&mut *rng.lock()).unwrap(); log::info!( "{}: opening local project at {:?}", client.username, @@ -647,7 +712,9 @@ async fn randomly_mutate_client( } }; - if active_call.read_with(cx, |call, _| call.room().is_some()) { + if active_call.read_with(cx, |call, _| call.room().is_some()) + && project.read_with(cx, |project, _| project.is_local()) + { if let Err(error) = active_call .update(cx, |call, cx| call.share_project(project.clone(), cx)) .await @@ -656,9 +723,99 @@ async fn randomly_mutate_client( } } + let choice = rng.lock().gen_range(0..100); + match choice { + 0..=19 if project.read_with(cx, |project, _| project.is_local()) => { + let paths = client.fs.paths().await; + let path = paths.choose(&mut *rng.lock()).unwrap(); + log::info!( + "{}: find or create local worktree for path {:?}", + client.username, + path + ); + project + .update(cx, |project, cx| { + project.find_or_create_local_worktree(&path, true, cx) + }) + .await + .unwrap(); + } + 20..=24 if project.read_with(cx, |project, _| project.is_remote()) => { + log::info!( + "{}: dropping remote project {}", + client.username, + project.read_with(cx, |project, _| project.remote_id().unwrap()) + ); + + cx.update(|_| { + client + .remote_projects + .retain(|remote_project| *remote_project != project); + client.buffers.remove(&project); + drop(project); + }); + } + _ => {} + } + + Ok(()) +} + +async fn randomly_mutate_worktrees( + client: &mut TestClient, + rng: &Mutex, + cx: &mut TestAppContext, +) -> Result<()> { + let project = choose_random_project(client, rng).unwrap(); + let Some(worktree) = project.read_with(cx, |project, cx| { + project + .worktrees(cx) + .filter(|worktree| { + let worktree = worktree.read(cx); + worktree.is_visible() + && worktree.entries(false).any(|e| e.is_file()) + && worktree.root_entry().map_or(false, |e| e.is_dir()) + }) + .choose(&mut *rng.lock()) + }) else { + return Ok(()) + }; + + let (worktree_id, worktree_root_name) = worktree.read_with(cx, |worktree, _| { + (worktree.id(), worktree.root_name().to_string()) + }); + + let is_dir = rng.lock().gen::(); + let mut new_path = PathBuf::new(); + new_path.push(gen_file_name(rng)); + if !is_dir { + new_path.set_extension("rs"); + } + log::info!( + "{}: creating {:?} in worktree {} ({})", + client.username, + new_path, + worktree_id, + worktree_root_name, + ); + project + .update(cx, |project, cx| { + project.create_entry((worktree_id, new_path), is_dir, cx) + }) + .unwrap() + .await?; + Ok(()) +} + +async fn randomly_query_and_mutate_buffers( + client: &mut TestClient, + rng: &Mutex, + cx: &mut TestAppContext, +) -> Result<()> { + let project = choose_random_project(client, rng).unwrap(); let buffers = client.buffers.entry(project.clone()).or_default(); let buffer = if buffers.is_empty() || rng.lock().gen() { - let worktree = if let Some(worktree) = project.read_with(cx, |project, cx| { + let Some(worktree) = project.read_with(cx, |project, cx| { project .worktrees(cx) .filter(|worktree| { @@ -666,10 +823,7 @@ async fn randomly_mutate_client( worktree.is_visible() && worktree.entries(false).any(|e| e.is_file()) }) .choose(&mut *rng.lock()) - }) { - worktree - } else { - cx.background().simulate_random_delay().await; + }) else { return Ok(()); }; @@ -880,50 +1034,6 @@ async fn randomly_mutate_client( buffers.extend(search.await?.into_keys()); } } - 60..=79 => { - let worktree = project - .read_with(cx, |project, cx| { - project - .worktrees(cx) - .filter(|worktree| { - let worktree = worktree.read(cx); - worktree.is_visible() - && worktree.entries(false).any(|e| e.is_file()) - && worktree.root_entry().map_or(false, |e| e.is_dir()) - }) - .choose(&mut *rng.lock()) - }) - .unwrap(); - let (worktree_id, worktree_root_name) = worktree.read_with(cx, |worktree, _| { - (worktree.id(), worktree.root_name().to_string()) - }); - - let mut new_name = String::new(); - for _ in 0..10 { - let letter = rng.lock().gen_range('a'..='z'); - new_name.push(letter); - } - - let is_dir = rng.lock().gen::(); - let mut new_path = PathBuf::new(); - new_path.push(new_name); - if !is_dir { - new_path.set_extension("rs"); - } - log::info!( - "{}: creating {:?} in worktree {} ({})", - client.username, - new_path, - worktree_id, - worktree_root_name, - ); - project - .update(cx, |project, cx| { - project.create_entry((worktree_id, new_path), is_dir, cx) - }) - .unwrap() - .await?; - } _ => { buffer.update(cx, |buffer, cx| { log::info!( @@ -943,3 +1053,24 @@ async fn randomly_mutate_client( Ok(()) } + +fn choose_random_project( + client: &mut TestClient, + rng: &Mutex, +) -> Option> { + client + .local_projects + .iter() + .chain(&client.remote_projects) + .choose(&mut *rng.lock()) + .cloned() +} + +fn gen_file_name(rng: &Mutex) -> String { + let mut name = String::new(); + for _ in 0..10 { + let letter = rng.lock().gen_range('a'..='z'); + name.push(letter); + } + name +} diff --git a/crates/fs/src/fs.rs b/crates/fs/src/fs.rs index 7b1287247c..97bb836b6d 100644 --- a/crates/fs/src/fs.rs +++ b/crates/fs/src/fs.rs @@ -635,6 +635,21 @@ impl FakeFs { } } + pub async fn paths(&self) -> Vec { + let mut result = Vec::new(); + let mut queue = collections::VecDeque::new(); + queue.push_back((PathBuf::from("/"), self.state.lock().await.root.clone())); + while let Some((path, entry)) = queue.pop_front() { + if let FakeFsEntry::Dir { entries, .. } = &*entry.lock().await { + for (name, entry) in entries { + queue.push_back((path.join(name), entry.clone())); + } + } + result.push(path); + } + result + } + pub async fn directories(&self) -> Vec { let mut result = Vec::new(); let mut queue = collections::VecDeque::new(); From 738e161bc6f948ed09599423e8d6af0396ec1fc2 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 22 Dec 2022 18:32:21 +0100 Subject: [PATCH 32/56] WIP: failing test SEED=882 RUST_LOG=collab::tests::randomized_integration_tests=info MAX_PEERS=2 ITERATIONS=1 OPERATIONS=49 cargo test --package=collab random -- --nocapture --- .../src/tests/randomized_integration_tests.rs | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index ed58b3371e..1deaafcba2 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -290,9 +290,10 @@ async fn test_random_collaboration( assert_eq!( guest_snapshot.entries(false).collect::>(), host_snapshot.entries(false).collect::>(), - "{} has different snapshot than the host for worktree {}", + "{} has different snapshot than the host for worktree {} ({:?})", client.username, - id + id, + host_snapshot.abs_path() ); assert_eq!(guest_snapshot.scan_id(), host_snapshot.scan_id()); } @@ -713,13 +714,18 @@ async fn randomly_mutate_projects( }; if active_call.read_with(cx, |call, _| call.room().is_some()) - && project.read_with(cx, |project, _| project.is_local()) + && project.read_with(cx, |project, _| project.is_local() && !project.is_shared()) { - if let Err(error) = active_call + match active_call .update(cx, |call, cx| call.share_project(project.clone(), cx)) .await { - log::error!("{}: error sharing project, {:?}", client.username, error); + Ok(project_id) => { + log::info!("{}: shared project with id {}", client.username, project_id); + } + Err(error) => { + log::error!("{}: error sharing project, {:?}", client.username, error); + } } } @@ -729,7 +735,7 @@ async fn randomly_mutate_projects( let paths = client.fs.paths().await; let path = paths.choose(&mut *rng.lock()).unwrap(); log::info!( - "{}: find or create local worktree for path {:?}", + "{}: finding/creating local worktree for path {:?}", client.username, path ); From 42e74e7eefffe6375cbf7a58792e984ad23193cd Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Thu, 22 Dec 2022 11:18:10 -0800 Subject: [PATCH 33/56] Excluded deleted entries when initially sending worktrees to guests Co-authored-by: Antonio Scandurra --- crates/collab/src/db.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 8b275bb680..31f6f9cd09 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -2390,7 +2390,11 @@ impl Database { // Populate worktree entries. { let mut db_entries = worktree_entry::Entity::find() - .filter(worktree_entry::Column::ProjectId.eq(project_id)) + .filter( + Condition::all() + .add(worktree_entry::Column::ProjectId.eq(project_id)) + .add(worktree_entry::Column::IsDeleted.eq(false)), + ) .stream(&*tx) .await?; while let Some(db_entry) = db_entries.next().await { From 75803d8dbbf2bc81244df12577db7468d1f66d0d Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 23 Dec 2022 11:53:13 +0100 Subject: [PATCH 34/56] Respond with an error when client hasn't got a registered handle --- crates/client/src/client.rs | 2 ++ crates/rpc/src/peer.rs | 21 +++++++++++++++++++++ crates/rpc/src/proto.rs | 10 ++++++++++ 3 files changed, 33 insertions(+) diff --git a/crates/client/src/client.rs b/crates/client/src/client.rs index 6d9ec305b6..aa46d64fcb 100644 --- a/crates/client/src/client.rs +++ b/crates/client/src/client.rs @@ -1235,6 +1235,7 @@ impl Client { subscriber } else { log::info!("unhandled message {}", type_name); + self.peer.respond_with_unhandled_message(message).log_err(); return; }; @@ -1278,6 +1279,7 @@ impl Client { .detach(); } else { log::info!("unhandled message {}", type_name); + self.peer.respond_with_unhandled_message(message).log_err(); } } diff --git a/crates/rpc/src/peer.rs b/crates/rpc/src/peer.rs index d2a4e6e080..4740120fc4 100644 --- a/crates/rpc/src/peer.rs +++ b/crates/rpc/src/peer.rs @@ -494,6 +494,27 @@ impl Peer { Ok(()) } + pub fn respond_with_unhandled_message( + &self, + envelope: Box, + ) -> Result<()> { + let connection = self.connection_state(envelope.sender_id())?; + let response = proto::Error { + message: format!("message {} was not handled", envelope.payload_type_name()), + }; + let message_id = connection + .next_message_id + .fetch_add(1, atomic::Ordering::SeqCst); + connection + .outgoing_tx + .unbounded_send(proto::Message::Envelope(response.into_envelope( + message_id, + Some(envelope.message_id()), + None, + )))?; + Ok(()) + } + fn connection_state(&self, connection_id: ConnectionId) -> Result { let connections = self.connections.read(); let connection = connections diff --git a/crates/rpc/src/proto.rs b/crates/rpc/src/proto.rs index 14541e4b66..6b09f07db4 100644 --- a/crates/rpc/src/proto.rs +++ b/crates/rpc/src/proto.rs @@ -42,6 +42,8 @@ pub trait AnyTypedEnvelope: 'static + Send + Sync { fn into_any(self: Box) -> Box; fn is_background(&self) -> bool; fn original_sender_id(&self) -> Option; + fn sender_id(&self) -> ConnectionId; + fn message_id(&self) -> u32; } pub enum MessagePriority { @@ -73,6 +75,14 @@ impl AnyTypedEnvelope for TypedEnvelope { fn original_sender_id(&self) -> Option { self.original_sender_id } + + fn sender_id(&self) -> ConnectionId { + self.sender_id + } + + fn message_id(&self) -> u32 { + self.message_id + } } impl PeerId { From 344d05045d208f55563c6ff513edc1e1698d141d Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 23 Dec 2022 12:26:48 +0100 Subject: [PATCH 35/56] Avoid hanging waiting for operations when buffer has none --- crates/project/src/project.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index dfd7b4fbf1..ca1b988d05 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -6255,16 +6255,19 @@ fn split_operations( #[cfg(not(any(test, feature = "test-support")))] const CHUNK_SIZE: usize = 100; + let mut done = false; std::iter::from_fn(move || { - if operations.is_empty() { + if done { return None; } - Some( - operations - .drain(..cmp::min(CHUNK_SIZE, operations.len())) - .collect(), - ) + let operations = operations + .drain(..cmp::min(CHUNK_SIZE, operations.len())) + .collect::>(); + if operations.is_empty() { + done = true; + } + Some(operations) }) } From 6458a9144ea054abbf9d5fd970ade5594554ade0 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 23 Dec 2022 15:02:06 +0100 Subject: [PATCH 36/56] WIP: failing randomized test SEED=175 MAX_PEERS=2 ITERATIONS=1 OPERATIONS=159 cargo test --package=collab random -- --nocapture --- Cargo.lock | 1 + crates/project/Cargo.toml | 1 + crates/project/src/project.rs | 14 +++++++++++++- 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 1dfc4bc24c..6debc9eb8a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4437,6 +4437,7 @@ dependencies = [ "aho-corasick", "anyhow", "async-trait", + "backtrace", "client", "clock", "collections", diff --git a/crates/project/Cargo.toml b/crates/project/Cargo.toml index dd4d2be5b6..6d6560ea38 100644 --- a/crates/project/Cargo.toml +++ b/crates/project/Cargo.toml @@ -37,6 +37,7 @@ util = { path = "../util" } aho-corasick = "0.7" anyhow = "1.0.57" async-trait = "0.1" +backtrace = "0.3" futures = "0.3" ignore = "0.4" lazy_static = "1.4.0" diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index ca1b988d05..b09c1fd267 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -4635,7 +4635,12 @@ impl Project { let is_host = collaborator.replica_id == 0; this.collaborators.insert(new_peer_id, collaborator); - if let Some(buffers) = this.shared_buffers.remove(&old_peer_id) { + let buffers = this.shared_buffers.remove(&old_peer_id); + println!( + "peer {} became {}. moving buffers {:?}", + old_peer_id, new_peer_id, &buffers + ); + if let Some(buffers) = buffers { this.shared_buffers.insert(new_peer_id, buffers); } @@ -5610,6 +5615,13 @@ impl Project { ) -> u64 { let buffer_id = buffer.read(cx).remote_id(); if let Some(project_id) = self.remote_id() { + if buffer_id == 300015 { + println!( + "creating buffer for peer {}. {:?}", + peer_id, + backtrace::Backtrace::new() + ); + } let shared_buffers = self.shared_buffers.entry(peer_id).or_default(); if shared_buffers.insert(buffer_id) { let buffer = buffer.read(cx); From 599acf0daa5581234157ab3f106d8666c4b33851 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Fri, 23 Dec 2022 17:34:13 -0800 Subject: [PATCH 37/56] WIP - Panic immediately when detecting non-determinism via a change to the execution trace --- crates/client/src/client.rs | 8 +- crates/collab/src/executor.rs | 8 ++ crates/collab/src/rpc.rs | 10 +- .../src/tests/randomized_integration_tests.rs | 2 +- crates/gpui/src/executor.rs | 110 ++++++++++++++++-- crates/gpui/src/test.rs | 16 ++- 6 files changed, 138 insertions(+), 16 deletions(-) diff --git a/crates/client/src/client.rs b/crates/client/src/client.rs index aa46d64fcb..60af7e0d0b 100644 --- a/crates/client/src/client.rs +++ b/crates/client/src/client.rs @@ -851,6 +851,7 @@ impl Client { }) .detach(); + let t0 = Instant::now(); let this = self.clone(); let cx = cx.clone(); cx.foreground() @@ -867,7 +868,12 @@ impl Client { } } Err(err) => { - log::error!("connection error: {:?}", err); + // TODO - remove. Make the test's non-determinism more apparent by + // only sometimes formatting this stack trace. + if Instant::now().duration_since(t0).as_nanos() % 2 == 0 { + log::error!("connection error: {:?}", err); + } + this.set_status(Status::ConnectionLost, &cx); } } diff --git a/crates/collab/src/executor.rs b/crates/collab/src/executor.rs index d2253f8ccb..cd3cc60d4a 100644 --- a/crates/collab/src/executor.rs +++ b/crates/collab/src/executor.rs @@ -33,4 +33,12 @@ impl Executor { } } } + + pub fn record_backtrace(&self) { + match self { + Executor::Production => {} + #[cfg(test)] + Executor::Deterministic(background) => background.record_backtrace(), + } + } } diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 9014ef7f40..bba07d34ef 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -95,6 +95,7 @@ struct Session { peer: Arc, connection_pool: Arc>, live_kit_client: Option>, + executor: Executor, } impl Session { @@ -521,7 +522,8 @@ impl Server { db: Arc::new(tokio::sync::Mutex::new(DbHandle(this.app_state.db.clone()))), peer: this.peer.clone(), connection_pool: this.connection_pool.clone(), - live_kit_client: this.app_state.live_kit_client.clone() + live_kit_client: this.app_state.live_kit_client.clone(), + executor: executor.clone(), }; update_user_contacts(user_id, &session).await?; @@ -1515,6 +1517,7 @@ async fn update_language_server( request: proto::UpdateLanguageServer, session: Session, ) -> Result<()> { + session.executor.record_backtrace(); let project_id = ProjectId::from_proto(request.project_id); let project_connection_ids = session .db() @@ -1541,6 +1544,7 @@ async fn forward_project_request( where T: EntityMessage + RequestMessage, { + session.executor.record_backtrace(); let project_id = ProjectId::from_proto(request.remote_entity_id()); let host_connection_id = { let collaborators = session @@ -1609,6 +1613,7 @@ async fn create_buffer_for_peer( request: proto::CreateBufferForPeer, session: Session, ) -> Result<()> { + session.executor.record_backtrace(); let peer_id = request.peer_id.ok_or_else(|| anyhow!("invalid peer id"))?; session .peer @@ -1621,6 +1626,7 @@ async fn update_buffer( response: Response, session: Session, ) -> Result<()> { + session.executor.record_backtrace(); let project_id = ProjectId::from_proto(request.project_id); let project_connection_ids = session .db() @@ -1628,6 +1634,8 @@ async fn update_buffer( .project_connection_ids(project_id, session.connection_id) .await?; + session.executor.record_backtrace(); + broadcast( session.connection_id, project_connection_ids.iter().copied(), diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 1deaafcba2..6b6166ec08 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -17,7 +17,7 @@ use project::{search::SearchQuery, Project}; use rand::prelude::*; use std::{env, path::PathBuf, sync::Arc}; -#[gpui::test(iterations = 100)] +#[gpui::test(iterations = 100, detect_nondeterminism = true)] async fn test_random_collaboration( cx: &mut TestAppContext, deterministic: Arc, diff --git a/crates/gpui/src/executor.rs b/crates/gpui/src/executor.rs index 876e48351d..a78a8c4b2e 100644 --- a/crates/gpui/src/executor.rs +++ b/crates/gpui/src/executor.rs @@ -4,7 +4,7 @@ use futures::channel::mpsc; use smol::{channel, prelude::*, Executor}; use std::{ any::Any, - fmt::{self, Display}, + fmt::{self, Display, Write as _}, marker::PhantomData, mem, pin::Pin, @@ -17,7 +17,8 @@ use std::{ use crate::{ platform::{self, Dispatcher}, - util, MutableAppContext, + util::{self, CwdBacktrace}, + MutableAppContext, }; pub enum Foreground { @@ -74,11 +75,18 @@ struct DeterministicState { pending_timers: Vec<(usize, std::time::Instant, postage::barrier::Sender)>, waiting_backtrace: Option, next_runnable_id: usize, - poll_history: Vec, + poll_history: Vec, + previous_poll_history: Option>, enable_runnable_backtraces: bool, runnable_backtraces: collections::HashMap, } +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum ExecutorEvent { + PollRunnable { id: usize }, + EnqueuRunnable { id: usize }, +} + #[cfg(any(test, feature = "test-support"))] struct ForegroundRunnable { id: usize, @@ -130,6 +138,7 @@ impl Deterministic { waiting_backtrace: None, next_runnable_id: 0, poll_history: Default::default(), + previous_poll_history: Default::default(), enable_runnable_backtraces: false, runnable_backtraces: Default::default(), })), @@ -137,10 +146,14 @@ impl Deterministic { }) } - pub fn runnable_history(&self) -> Vec { + pub fn execution_history(&self) -> Vec { self.state.lock().poll_history.clone() } + pub fn set_previous_execution_history(&self, history: Option>) { + self.state.lock().previous_poll_history = history; + } + pub fn enable_runnable_backtrace(&self) { self.state.lock().enable_runnable_backtraces = true; } @@ -185,6 +198,9 @@ impl Deterministic { let unparker = self.parker.lock().unparker(); let (runnable, task) = async_task::spawn_local(future, move |runnable| { let mut state = state.lock(); + state + .poll_history + .push(ExecutorEvent::EnqueuRunnable { id }); state .scheduled_from_foreground .entry(cx_id) @@ -212,6 +228,9 @@ impl Deterministic { let unparker = self.parker.lock().unparker(); let (runnable, task) = async_task::spawn(future, move |runnable| { let mut state = state.lock(); + state + .poll_history + .push(ExecutorEvent::EnqueuRunnable { id }); state .scheduled_from_background .push(BackgroundRunnable { id, runnable }); @@ -314,7 +333,9 @@ impl Deterministic { let background_len = state.scheduled_from_background.len(); let ix = state.rng.gen_range(0..background_len); let background_runnable = state.scheduled_from_background.remove(ix); - state.poll_history.push(background_runnable.id); + state.push_to_history(ExecutorEvent::PollRunnable { + id: background_runnable.id, + }); drop(state); background_runnable.runnable.run(); } else if !state.scheduled_from_foreground.is_empty() { @@ -332,7 +353,9 @@ impl Deterministic { if scheduled_from_cx.is_empty() { state.scheduled_from_foreground.remove(&cx_id_to_run); } - state.poll_history.push(foreground_runnable.id); + state.push_to_history(ExecutorEvent::PollRunnable { + id: foreground_runnable.id, + }); drop(state); @@ -366,7 +389,9 @@ impl Deterministic { let ix = state.rng.gen_range(0..=runnable_count); if ix < state.scheduled_from_background.len() { let background_runnable = state.scheduled_from_background.remove(ix); - state.poll_history.push(background_runnable.id); + state.push_to_history(ExecutorEvent::PollRunnable { + id: background_runnable.id, + }); drop(state); background_runnable.runnable.run(); } else { @@ -465,6 +490,25 @@ impl Deterministic { } } } + + pub fn record_backtrace(&self) { + let mut state = self.state.lock(); + if state.enable_runnable_backtraces { + let current_id = state + .poll_history + .iter() + .rev() + .find_map(|event| match event { + ExecutorEvent::PollRunnable { id } => Some(*id), + _ => None, + }); + if let Some(id) = current_id { + state + .runnable_backtraces + .insert(id, backtrace::Backtrace::new_unresolved()); + } + } + } } impl Drop for Timer { @@ -506,6 +550,38 @@ impl Future for Timer { #[cfg(any(test, feature = "test-support"))] impl DeterministicState { + fn push_to_history(&mut self, event: ExecutorEvent) { + self.poll_history.push(event); + if let Some(prev_history) = &self.previous_poll_history { + let ix = self.poll_history.len() - 1; + let prev_event = prev_history[ix]; + if event != prev_event { + let mut message = String::new(); + writeln!( + &mut message, + "current runnable backtrace:\n{:?}", + self.runnable_backtraces.get_mut(&event.id()).map(|trace| { + trace.resolve(); + CwdBacktrace(trace) + }) + ) + .unwrap(); + writeln!( + &mut message, + "previous runnable backtrace:\n{:?}", + self.runnable_backtraces + .get_mut(&prev_event.id()) + .map(|trace| { + trace.resolve(); + CwdBacktrace(trace) + }) + ) + .unwrap(); + panic!("detected non-determinism after {ix}. {message}"); + } + } + } + fn will_park(&mut self) { if self.forbid_parking { let mut backtrace_message = String::new(); @@ -526,6 +602,16 @@ impl DeterministicState { } } +#[cfg(any(test, feature = "test-support"))] +impl ExecutorEvent { + pub fn id(&self) -> usize { + match self { + ExecutorEvent::PollRunnable { id } => *id, + ExecutorEvent::EnqueuRunnable { id } => *id, + } + } +} + impl Foreground { pub fn platform(dispatcher: Arc) -> Result { if dispatcher.is_main_thread() { @@ -755,6 +841,16 @@ impl Background { } } } + + #[cfg(any(test, feature = "test-support"))] + pub fn record_backtrace(&self) { + match self { + Self::Deterministic { executor, .. } => executor.record_backtrace(), + _ => { + panic!("this method can only be called on a deterministic executor") + } + } + } } impl Default for Background { diff --git a/crates/gpui/src/test.rs b/crates/gpui/src/test.rs index aade1054a8..eb992b638a 100644 --- a/crates/gpui/src/test.rs +++ b/crates/gpui/src/test.rs @@ -1,7 +1,10 @@ use crate::{ - elements::Empty, executor, platform, util::CwdBacktrace, Element, ElementBox, Entity, - FontCache, Handle, LeakDetector, MutableAppContext, Platform, RenderContext, Subscription, - TestAppContext, View, + elements::Empty, + executor::{self, ExecutorEvent}, + platform, + util::CwdBacktrace, + Element, ElementBox, Entity, FontCache, Handle, LeakDetector, MutableAppContext, Platform, + RenderContext, Subscription, TestAppContext, View, }; use futures::StreamExt; use parking_lot::Mutex; @@ -62,7 +65,7 @@ pub fn run_test( let platform = Arc::new(platform::test::platform()); let font_system = platform.fonts(); let font_cache = Arc::new(FontCache::new(font_system)); - let mut prev_runnable_history: Option> = None; + let mut prev_runnable_history: Option> = None; for _ in 0..num_iterations { let seed = atomic_seed.load(SeqCst); @@ -73,6 +76,7 @@ pub fn run_test( let deterministic = executor::Deterministic::new(seed); if detect_nondeterminism { + deterministic.set_previous_execution_history(prev_runnable_history.clone()); deterministic.enable_runnable_backtrace(); } @@ -98,7 +102,7 @@ pub fn run_test( leak_detector.lock().detect(); if detect_nondeterminism { - let curr_runnable_history = deterministic.runnable_history(); + let curr_runnable_history = deterministic.execution_history(); if let Some(prev_runnable_history) = prev_runnable_history { let mut prev_entries = prev_runnable_history.iter().fuse(); let mut curr_entries = curr_runnable_history.iter().fuse(); @@ -138,7 +142,7 @@ pub fn run_test( let last_common_backtrace = common_history_prefix .last() - .map(|runnable_id| deterministic.runnable_backtrace(*runnable_id)); + .map(|event| deterministic.runnable_backtrace(event.id())); writeln!( &mut error, From 273988b8d569aae0b989d126f688d7367020a1dd Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Tue, 27 Dec 2022 16:47:28 -0700 Subject: [PATCH 38/56] Set transaction group interval to ZERO by default in tests We were seeing non-deterministic behavior in randomized tests when generating backtraces took enough time to cause transactions to group in some cases, but not group in others. Tests will need to explicitly opt into grouping if they want it by setting the interval explicitly. We have tests in the text module that currently test the history grouping explicitly, but I'm not sure it's needed elsewhere. --- crates/client/src/client.rs | 8 +------- crates/collab/src/tests/randomized_integration_tests.rs | 2 +- crates/gpui/src/executor.rs | 4 +--- crates/text/src/tests.rs | 7 ++++--- crates/text/src/text.rs | 4 ++++ 5 files changed, 11 insertions(+), 14 deletions(-) diff --git a/crates/client/src/client.rs b/crates/client/src/client.rs index 60af7e0d0b..aa46d64fcb 100644 --- a/crates/client/src/client.rs +++ b/crates/client/src/client.rs @@ -851,7 +851,6 @@ impl Client { }) .detach(); - let t0 = Instant::now(); let this = self.clone(); let cx = cx.clone(); cx.foreground() @@ -868,12 +867,7 @@ impl Client { } } Err(err) => { - // TODO - remove. Make the test's non-determinism more apparent by - // only sometimes formatting this stack trace. - if Instant::now().duration_since(t0).as_nanos() % 2 == 0 { - log::error!("connection error: {:?}", err); - } - + log::error!("connection error: {:?}", err); this.set_status(Status::ConnectionLost, &cx); } } diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 6b6166ec08..1deaafcba2 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -17,7 +17,7 @@ use project::{search::SearchQuery, Project}; use rand::prelude::*; use std::{env, path::PathBuf, sync::Arc}; -#[gpui::test(iterations = 100, detect_nondeterminism = true)] +#[gpui::test(iterations = 100)] async fn test_random_collaboration( cx: &mut TestAppContext, deterministic: Arc, diff --git a/crates/gpui/src/executor.rs b/crates/gpui/src/executor.rs index a78a8c4b2e..faf2a9729f 100644 --- a/crates/gpui/src/executor.rs +++ b/crates/gpui/src/executor.rs @@ -198,9 +198,7 @@ impl Deterministic { let unparker = self.parker.lock().unparker(); let (runnable, task) = async_task::spawn_local(future, move |runnable| { let mut state = state.lock(); - state - .poll_history - .push(ExecutorEvent::EnqueuRunnable { id }); + state.push_to_history(ExecutorEvent::EnqueuRunnable { id }); state .scheduled_from_foreground .entry(cx_id) diff --git a/crates/text/src/tests.rs b/crates/text/src/tests.rs index ae91478f89..4b8266120a 100644 --- a/crates/text/src/tests.rs +++ b/crates/text/src/tests.rs @@ -45,7 +45,7 @@ fn test_random_edits(mut rng: StdRng) { let mut buffer = Buffer::new(0, 0, reference_string.clone()); LineEnding::normalize(&mut reference_string); - buffer.history.group_interval = Duration::from_millis(rng.gen_range(0..=200)); + buffer.set_group_interval(Duration::from_millis(rng.gen_range(0..=200))); let mut buffer_versions = Vec::new(); log::info!( "buffer text {:?}, version: {:?}", @@ -488,7 +488,7 @@ fn test_anchors_at_start_and_end() { fn test_undo_redo() { let mut buffer = Buffer::new(0, 0, "1234".into()); // Set group interval to zero so as to not group edits in the undo stack. - buffer.history.group_interval = Duration::from_secs(0); + buffer.set_group_interval(Duration::from_secs(0)); buffer.edit([(1..1, "abx")]); buffer.edit([(3..4, "yzef")]); @@ -524,6 +524,7 @@ fn test_undo_redo() { fn test_history() { let mut now = Instant::now(); let mut buffer = Buffer::new(0, 0, "123456".into()); + buffer.set_group_interval(Duration::from_millis(300)); let transaction_1 = buffer.start_transaction_at(now).unwrap(); buffer.edit([(2..4, "cd")]); @@ -535,7 +536,7 @@ fn test_history() { buffer.end_transaction_at(now).unwrap(); assert_eq!(buffer.text(), "12cde6"); - now += buffer.history.group_interval + Duration::from_millis(1); + now += buffer.transaction_group_interval() + Duration::from_millis(1); buffer.start_transaction_at(now); buffer.edit([(0..1, "a")]); buffer.edit([(1..1, "b")]); diff --git a/crates/text/src/text.rs b/crates/text/src/text.rs index 5aa91ede8a..914023f305 100644 --- a/crates/text/src/text.rs +++ b/crates/text/src/text.rs @@ -115,6 +115,10 @@ impl History { undo_stack: Vec::new(), redo_stack: Vec::new(), transaction_depth: 0, + // Don't group transactions in tests unless we opt in, because it's a footgun. + #[cfg(any(test, feature = "test-support"))] + group_interval: Duration::ZERO, + #[cfg(not(any(test, feature = "test-support")))] group_interval: Duration::from_millis(300), } } From f3dee2d3321e53d582c97e858dc4d39c9017de8a Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Tue, 27 Dec 2022 16:54:45 -0700 Subject: [PATCH 39/56] Remove printlns, found a failure Failing seed: SEED=416 MAX_PEERS=2 ITERATIONS=5000 OPERATIONS=159 cargo +beta test --package=collab random -- --nocapture --- crates/project/src/project.rs | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index b09c1fd267..d9dd42dae1 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -4636,9 +4636,11 @@ impl Project { this.collaborators.insert(new_peer_id, collaborator); let buffers = this.shared_buffers.remove(&old_peer_id); - println!( + log::info!( "peer {} became {}. moving buffers {:?}", - old_peer_id, new_peer_id, &buffers + old_peer_id, + new_peer_id, + &buffers ); if let Some(buffers) = buffers { this.shared_buffers.insert(new_peer_id, buffers); @@ -5615,13 +5617,6 @@ impl Project { ) -> u64 { let buffer_id = buffer.read(cx).remote_id(); if let Some(project_id) = self.remote_id() { - if buffer_id == 300015 { - println!( - "creating buffer for peer {}. {:?}", - peer_id, - backtrace::Backtrace::new() - ); - } let shared_buffers = self.shared_buffers.entry(peer_id).or_default(); if shared_buffers.insert(buffer_id) { let buffer = buffer.read(cx); From 74843493f44d7ebcd32968483e77e180fd17334c Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 2 Jan 2023 10:20:52 -0800 Subject: [PATCH 40/56] Assign fake fs entries' mtimes more consistently --- crates/fs/src/fs.rs | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/crates/fs/src/fs.rs b/crates/fs/src/fs.rs index 97bb836b6d..d585195ab4 100644 --- a/crates/fs/src/fs.rs +++ b/crates/fs/src/fs.rs @@ -518,7 +518,7 @@ impl FakeFs { state: Mutex::new(FakeFsState { root: Arc::new(Mutex::new(FakeFsEntry::Dir { inode: 0, - mtime: SystemTime::now(), + mtime: SystemTime::UNIX_EPOCH, entries: Default::default(), git_repo_state: None, })), @@ -535,7 +535,7 @@ impl FakeFs { let inode = state.next_inode; let mtime = state.next_mtime; state.next_inode += 1; - state.next_mtime += Duration::from_millis(1); + state.next_mtime += Duration::from_nanos(1); let file = Arc::new(Mutex::new(FakeFsEntry::File { inode, mtime, @@ -745,6 +745,8 @@ impl Fs for FakeFs { } let inode = state.next_inode; + let mtime = state.next_mtime; + state.next_mtime += Duration::from_nanos(1); state.next_inode += 1; state .write_path(&cur_path, |entry| { @@ -752,7 +754,7 @@ impl Fs for FakeFs { created_dirs.push(cur_path.clone()); Arc::new(Mutex::new(FakeFsEntry::Dir { inode, - mtime: SystemTime::now(), + mtime, entries: Default::default(), git_repo_state: None, })) @@ -770,10 +772,12 @@ impl Fs for FakeFs { self.simulate_random_delay().await; let mut state = self.state.lock().await; let inode = state.next_inode; + let mtime = state.next_mtime; + state.next_mtime += Duration::from_nanos(1); state.next_inode += 1; let file = Arc::new(Mutex::new(FakeFsEntry::File { inode, - mtime: SystemTime::now(), + mtime, content: String::new(), })); state @@ -837,7 +841,7 @@ impl Fs for FakeFs { let mut state = self.state.lock().await; let mtime = state.next_mtime; let inode = post_inc(&mut state.next_inode); - state.next_mtime += Duration::from_millis(1); + state.next_mtime += Duration::from_nanos(1); let source_entry = state.read_path(&source).await?; let content = source_entry.lock().await.file_content(&source)?.clone(); let entry = state From a6ffcdd0cf2d82f5106dcf24ff94bd3532ed9519 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Mon, 2 Jan 2023 20:12:00 -0700 Subject: [PATCH 41/56] Track open buffers when handling sync requests When a host sends a buffer to a guest for the first time, they record that they have done so in a set tied to that guest's peer id. When the guest reconnects and syncs buffers, they do so under a different peer id, so we need to be sure we track which buffers we have sent them to avoid sending them the same buffer twice, which violates the guest's assumptions. --- crates/fs/src/fs.rs | 4 ++-- crates/gpui/src/executor.rs | 11 ++++++----- crates/project/src/project.rs | 28 +++++++++++++++++++++------- 3 files changed, 29 insertions(+), 14 deletions(-) diff --git a/crates/fs/src/fs.rs b/crates/fs/src/fs.rs index d585195ab4..184b2f2e5a 100644 --- a/crates/fs/src/fs.rs +++ b/crates/fs/src/fs.rs @@ -23,7 +23,7 @@ use std::{ time::{Duration, SystemTime}, }; use tempfile::NamedTempFile; -use util::{post_inc, ResultExt}; +use util::ResultExt; #[cfg(any(test, feature = "test-support"))] use collections::{btree_map, BTreeMap}; @@ -840,7 +840,7 @@ impl Fs for FakeFs { let target = normalize_path(target); let mut state = self.state.lock().await; let mtime = state.next_mtime; - let inode = post_inc(&mut state.next_inode); + let inode = util::post_inc(&mut state.next_inode); state.next_mtime += Duration::from_nanos(1); let source_entry = state.read_path(&source).await?; let content = source_entry.lock().await.file_content(&source)?.clone(); diff --git a/crates/gpui/src/executor.rs b/crates/gpui/src/executor.rs index faf2a9729f..16afa987e9 100644 --- a/crates/gpui/src/executor.rs +++ b/crates/gpui/src/executor.rs @@ -4,7 +4,7 @@ use futures::channel::mpsc; use smol::{channel, prelude::*, Executor}; use std::{ any::Any, - fmt::{self, Display, Write as _}, + fmt::{self, Display}, marker::PhantomData, mem, pin::Pin, @@ -17,8 +17,7 @@ use std::{ use crate::{ platform::{self, Dispatcher}, - util::{self, CwdBacktrace}, - MutableAppContext, + util, MutableAppContext, }; pub enum Foreground { @@ -549,6 +548,8 @@ impl Future for Timer { #[cfg(any(test, feature = "test-support"))] impl DeterministicState { fn push_to_history(&mut self, event: ExecutorEvent) { + use std::fmt::Write as _; + self.poll_history.push(event); if let Some(prev_history) = &self.previous_poll_history { let ix = self.poll_history.len() - 1; @@ -560,7 +561,7 @@ impl DeterministicState { "current runnable backtrace:\n{:?}", self.runnable_backtraces.get_mut(&event.id()).map(|trace| { trace.resolve(); - CwdBacktrace(trace) + util::CwdBacktrace(trace) }) ) .unwrap(); @@ -571,7 +572,7 @@ impl DeterministicState { .get_mut(&prev_event.id()) .map(|trace| { trace.resolve(); - CwdBacktrace(trace) + util::CwdBacktrace(trace) }) ) .unwrap(); diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index d9dd42dae1..be6dc18b35 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -62,7 +62,7 @@ use std::{ time::Instant, }; use terminal::{Terminal, TerminalBuilder}; -use util::{defer, post_inc, ResultExt, TryFutureExt as _}; +use util::{debug_panic, defer, post_inc, ResultExt, TryFutureExt as _}; pub use fs::*; pub use worktree::*; @@ -1501,16 +1501,20 @@ impl Project { } Some(OpenBuffer::Weak(existing_handle)) => { if existing_handle.upgrade(cx).is_some() { + debug_panic!("already registered buffer with remote id {}", remote_id); Err(anyhow!( "already registered buffer with remote id {}", remote_id ))? } } - Some(OpenBuffer::Strong(_)) => Err(anyhow!( - "already registered buffer with remote id {}", - remote_id - ))?, + Some(OpenBuffer::Strong(_)) => { + debug_panic!("already registered buffer with remote id {}", remote_id); + Err(anyhow!( + "already registered buffer with remote id {}", + remote_id + ))? + } } cx.subscribe(buffer, |this, buffer, event, cx| { this.on_buffer_event(buffer, event, cx); @@ -5150,18 +5154,28 @@ impl Project { this: ModelHandle, envelope: TypedEnvelope, _: Arc, - cx: AsyncAppContext, + mut cx: AsyncAppContext, ) -> Result { let project_id = envelope.payload.project_id; let mut response = proto::SynchronizeBuffersResponse { buffers: Default::default(), }; - this.read_with(&cx, |this, cx| { + this.update(&mut cx, |this, cx| { + let Some(guest_id) = envelope.original_sender_id else { + log::error!("missing original_sender_id on SynchronizeBuffers request"); + return; + }; + for buffer in envelope.payload.buffers { let buffer_id = buffer.id; let remote_version = language::proto::deserialize_version(buffer.version); if let Some(buffer) = this.buffer_for_id(buffer_id, cx) { + this.shared_buffers + .entry(guest_id) + .or_default() + .insert(buffer_id); + let buffer = buffer.read(cx); response.buffers.push(proto::BufferVersion { id: buffer_id, From 8d70a22fa3e1d233bf5fe1bf562b22e82b3f57f0 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Mon, 2 Jan 2023 21:12:39 -0700 Subject: [PATCH 42/56] Record failing seed --- crates/collab/src/tests/randomized_integration_tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 1deaafcba2..ec191bd3da 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -17,7 +17,7 @@ use project::{search::SearchQuery, Project}; use rand::prelude::*; use std::{env, path::PathBuf, sync::Arc}; -#[gpui::test(iterations = 100)] +#[gpui::test(iterations = 1, seed = 4742)] async fn test_random_collaboration( cx: &mut TestAppContext, deterministic: Arc, From 90fb9b53ad6d23d49cc123b4d541194f789dba00 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Tue, 3 Jan 2023 13:30:14 -0700 Subject: [PATCH 43/56] WIP --- ...ce_is_completed_with_completed_scan_id.sql | 3 +++ crates/collab/src/db.rs | 18 ++++++++++------- crates/collab/src/db/worktree.rs | 4 +++- crates/collab/src/rpc.rs | 4 ++-- .../src/tests/randomized_integration_tests.rs | 2 +- crates/project/src/worktree.rs | 20 ++++++++++--------- 6 files changed, 31 insertions(+), 20 deletions(-) create mode 100644 crates/collab/migrations/20230103200902_replace_is_completed_with_completed_scan_id.sql diff --git a/crates/collab/migrations/20230103200902_replace_is_completed_with_completed_scan_id.sql b/crates/collab/migrations/20230103200902_replace_is_completed_with_completed_scan_id.sql new file mode 100644 index 0000000000..e0f301b2e0 --- /dev/null +++ b/crates/collab/migrations/20230103200902_replace_is_completed_with_completed_scan_id.sql @@ -0,0 +1,3 @@ +ALTER TABLE worktrees + DROP COLUMN is_complete, + ADD COLUMN completed_scan_id INT8; diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 31f6f9cd09..822b2dc762 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1443,7 +1443,7 @@ impl Database { removed_entries: Default::default(), diagnostic_summaries: Default::default(), scan_id: db_worktree.scan_id as u64, - is_complete: db_worktree.is_complete, + completed_scan_id: db_worktree.completed_scan_id as u64, }; let rejoined_worktree = rejoined_project @@ -1997,7 +1997,7 @@ impl Database { root_name: ActiveValue::set(worktree.root_name.clone()), visible: ActiveValue::set(worktree.visible), scan_id: ActiveValue::set(0), - is_complete: ActiveValue::set(false), + completed_scan_id: ActiveValue::set(0), } })) .exec(&*tx) @@ -2091,7 +2091,7 @@ impl Database { root_name: ActiveValue::set(worktree.root_name.clone()), visible: ActiveValue::set(worktree.visible), scan_id: ActiveValue::set(0), - is_complete: ActiveValue::set(false), + completed_scan_id: ActiveValue::set(0), })) .on_conflict( OnConflict::columns([worktree::Column::ProjectId, worktree::Column::Id]) @@ -2141,7 +2141,11 @@ impl Database { project_id: ActiveValue::set(project_id), root_name: ActiveValue::set(update.root_name.clone()), scan_id: ActiveValue::set(update.scan_id as i64), - is_complete: ActiveValue::set(update.is_last_update), + completed_scan_id: if update.is_last_update { + ActiveValue::set(update.scan_id as i64) + } else { + ActiveValue::default() + }, abs_path: ActiveValue::set(update.abs_path.clone()), ..Default::default() }) @@ -2381,7 +2385,7 @@ impl Database { entries: Default::default(), diagnostic_summaries: Default::default(), scan_id: db_worktree.scan_id as u64, - is_complete: db_worktree.is_complete, + completed_scan_id: db_worktree.completed_scan_id as u64, }, ) }) @@ -3039,7 +3043,7 @@ pub struct RejoinedWorktree { pub removed_entries: Vec, pub diagnostic_summaries: Vec, pub scan_id: u64, - pub is_complete: bool, + pub completed_scan_id: u64, } pub struct LeftRoom { @@ -3093,7 +3097,7 @@ pub struct Worktree { pub entries: Vec, pub diagnostic_summaries: Vec, pub scan_id: u64, - pub is_complete: bool, + pub completed_scan_id: u64, } #[cfg(test)] diff --git a/crates/collab/src/db/worktree.rs b/crates/collab/src/db/worktree.rs index b9f0f97dee..fce72722db 100644 --- a/crates/collab/src/db/worktree.rs +++ b/crates/collab/src/db/worktree.rs @@ -11,8 +11,10 @@ pub struct Model { pub abs_path: String, pub root_name: String, pub visible: bool, + /// The last scan for which we've observed entries. It may be in progress. pub scan_id: i64, - pub is_complete: bool, + /// The last scan that fully completed. + pub completed_scan_id: i64, } #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index bba07d34ef..8e2e5c6041 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -1048,7 +1048,7 @@ async fn rejoin_room( updated_entries: worktree.updated_entries, removed_entries: worktree.removed_entries, scan_id: worktree.scan_id, - is_last_update: worktree.is_complete, + is_last_update: worktree.completed_scan_id == worktree.scan_id, }; for update in proto::split_worktree_update(message, MAX_CHUNK_SIZE) { session.peer.send(session.connection_id, update.clone())?; @@ -1361,7 +1361,7 @@ async fn join_project( updated_entries: worktree.entries, removed_entries: Default::default(), scan_id: worktree.scan_id, - is_last_update: worktree.is_complete, + is_last_update: worktree.scan_id == worktree.completed_scan_id, }; for update in proto::split_worktree_update(message, MAX_CHUNK_SIZE) { session.peer.send(session.connection_id, update.clone())?; diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index ec191bd3da..1deaafcba2 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -17,7 +17,7 @@ use project::{search::SearchQuery, Project}; use rand::prelude::*; use std::{env, path::PathBuf, sync::Arc}; -#[gpui::test(iterations = 1, seed = 4742)] +#[gpui::test(iterations = 100)] async fn test_random_collaboration( cx: &mut TestAppContext, deterministic: Arc, diff --git a/crates/project/src/worktree.rs b/crates/project/src/worktree.rs index 816e75a8ea..7ec9074b4c 100644 --- a/crates/project/src/worktree.rs +++ b/crates/project/src/worktree.rs @@ -94,7 +94,7 @@ pub struct Snapshot { entries_by_path: SumTree, entries_by_id: SumTree, scan_id: usize, - is_complete: bool, + completed_scan_id: usize, } #[derive(Clone)] @@ -230,7 +230,7 @@ impl Worktree { entries_by_path: Default::default(), entries_by_id: Default::default(), scan_id: 0, - is_complete: false, + completed_scan_id: 0, }; let (updates_tx, mut updates_rx) = mpsc::unbounded(); @@ -423,8 +423,8 @@ impl LocalWorktree { root_char_bag, entries_by_path: Default::default(), entries_by_id: Default::default(), - scan_id: 0, - is_complete: true, + scan_id: 1, + completed_scan_id: 0, }, }; if let Some(metadata) = metadata { @@ -1002,7 +1002,7 @@ impl LocalWorktree { entries_by_path: Default::default(), entries_by_id: Default::default(), scan_id: 0, - is_complete: true, + completed_scan_id: 0, }, }; while let Some(snapshot) = snapshots_rx.recv().await { @@ -1091,7 +1091,7 @@ impl RemoteWorktree { } fn observed_snapshot(&self, scan_id: usize) -> bool { - self.scan_id > scan_id || (self.scan_id == scan_id && self.is_complete) + self.completed_scan_id >= scan_id } fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future> { @@ -1254,7 +1254,9 @@ impl Snapshot { self.entries_by_path.edit(entries_by_path_edits, &()); self.entries_by_id.edit(entries_by_id_edits, &()); self.scan_id = update.scan_id as usize; - self.is_complete = update.is_last_update; + if update.is_last_update { + self.completed_scan_id = update.scan_id as usize; + } Ok(()) } @@ -1466,7 +1468,7 @@ impl LocalSnapshot { updated_entries, removed_entries, scan_id: self.scan_id as u64, - is_last_update: true, + is_last_update: self.completed_scan_id == self.scan_id, } } @@ -3437,7 +3439,7 @@ mod tests { root_name: Default::default(), root_char_bag: Default::default(), scan_id: 0, - is_complete: true, + completed_scan_id: 0, }, }; initial_snapshot.insert_entry( From 1dd085fc9235d9ef1170a18e5be02759e9981ce0 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Tue, 3 Jan 2023 18:26:57 -0700 Subject: [PATCH 44/56] Introduce completed_scan_id to worktree We need to know the most recent scan id we have actually completed. This is to handle the case where a guest disconnects when we're in the middle of streaming worktree entries to them. When they reconnect, they need to report a scan_id from before we started streaming the entries, because we have no record of when the stream was interrupted. Next failure: SEED=5051 ITERATIONS=1 OPERATIONS=200 cargo test --release --package=collab random -- --nocapture --- crates/call/src/room.rs | 2 +- .../20221109000000_test_schema.sql | 2 +- crates/project/src/worktree.rs | 30 +++++++++++++++---- 3 files changed, 27 insertions(+), 7 deletions(-) diff --git a/crates/call/src/room.rs b/crates/call/src/room.rs index 84f15e0602..6da8e55f7c 100644 --- a/crates/call/src/room.rs +++ b/crates/call/src/room.rs @@ -382,7 +382,7 @@ impl Room { let worktree = worktree.read(cx); proto::RejoinWorktree { id: worktree.id().to_proto(), - scan_id: worktree.scan_id() as u64, + scan_id: worktree.completed_scan_id() as u64, } }) .collect(), diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index 78a7043fb7..f89e46f8c9 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -57,7 +57,7 @@ CREATE TABLE "worktrees" ( "abs_path" VARCHAR NOT NULL, "visible" BOOL NOT NULL, "scan_id" INTEGER NOT NULL, - "is_complete" BOOL NOT NULL, + "completed_scan_id" INTEGER NOT NULL, PRIMARY KEY(project_id, id) ); CREATE INDEX "index_worktrees_on_project_id" ON "worktrees" ("project_id"); diff --git a/crates/project/src/worktree.rs b/crates/project/src/worktree.rs index 7ec9074b4c..3a8c69c704 100644 --- a/crates/project/src/worktree.rs +++ b/crates/project/src/worktree.rs @@ -344,6 +344,13 @@ impl Worktree { } } + pub fn completed_scan_id(&self) -> usize { + match self { + Worktree::Local(worktree) => worktree.snapshot.completed_scan_id, + Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id, + } + } + pub fn is_visible(&self) -> bool { match self { Worktree::Local(worktree) => worktree.visible, @@ -423,7 +430,7 @@ impl LocalWorktree { root_char_bag, entries_by_path: Default::default(), entries_by_id: Default::default(), - scan_id: 1, + scan_id: 0, completed_scan_id: 0, }, }; @@ -955,8 +962,9 @@ impl LocalWorktree { if let Some(old_path) = old_path { snapshot.remove_path(&old_path); } + snapshot.scan_started(); inserted_entry = snapshot.insert_entry(entry, fs.as_ref()); - snapshot.scan_id += 1; + snapshot.scan_completed(); } this.poll_snapshot(true, cx); Ok(inserted_entry) @@ -1345,6 +1353,14 @@ impl Snapshot { &self.root_name } + pub fn scan_started(&mut self) { + self.scan_id += 1; + } + + pub fn scan_completed(&mut self) { + self.completed_scan_id = self.scan_id; + } + pub fn scan_id(&self) -> usize { self.scan_id } @@ -2250,7 +2266,8 @@ impl BackgroundScanner { let is_dir; let next_entry_id; { - let snapshot = self.snapshot.lock(); + let mut snapshot = self.snapshot.lock(); + snapshot.scan_started(); root_char_bag = snapshot.root_char_bag; root_abs_path = snapshot.abs_path.clone(); root_inode = snapshot.root_entry().map(|e| e.inode); @@ -2316,6 +2333,8 @@ impl BackgroundScanner { } }) .await; + + self.snapshot.lock().scan_completed(); } Ok(()) @@ -2443,7 +2462,8 @@ impl BackgroundScanner { let root_abs_path; let next_entry_id; { - let snapshot = self.snapshot.lock(); + let mut snapshot = self.snapshot.lock(); + snapshot.scan_started(); root_char_bag = snapshot.root_char_bag; root_abs_path = snapshot.abs_path.clone(); next_entry_id = snapshot.next_entry_id.clone(); @@ -2468,7 +2488,6 @@ impl BackgroundScanner { let (scan_queue_tx, scan_queue_rx) = channel::unbounded(); { let mut snapshot = self.snapshot.lock(); - snapshot.scan_id += 1; for event in &events { if let Ok(path) = event.path.strip_prefix(&root_canonical_path) { snapshot.remove_path(path); @@ -2555,6 +2574,7 @@ impl BackgroundScanner { self.update_ignore_statuses().await; self.update_git_repositories(); + self.snapshot.lock().scan_completed(); true } From 789bbf15b7496e39962b750655f0a6b8ee91b1a5 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Wed, 4 Jan 2023 12:33:48 -0700 Subject: [PATCH 45/56] Update buffer files when synchronizing buffers It's possible that the host was disconnected when attempting to notify guests of a file save, so we need to transmit this in order to correctly update the file's mtime. Next failing seed OPERATIONS=200 SEED=6894 --- crates/collab/src/rpc.rs | 4 +-- .../src/tests/randomized_integration_tests.rs | 28 +++++++++++++------ crates/db/src/db.rs | 2 -- crates/project/src/project.rs | 15 ++++++++-- crates/workspace/src/persistence.rs | 2 +- 5 files changed, 35 insertions(+), 16 deletions(-) diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 8e2e5c6041..62db247b9f 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -580,7 +580,7 @@ impl Server { drop(foreground_message_handlers); tracing::info!(%user_id, %login, %connection_id, %address, "signing out"); - if let Err(error) = sign_out(session, teardown, executor).await { + if let Err(error) = connection_lost(session, teardown, executor).await { tracing::error!(%user_id, %login, %connection_id, %address, ?error, "error signing out"); } @@ -781,7 +781,7 @@ pub async fn handle_metrics(Extension(server): Extension>) -> Result } #[instrument(err, skip(executor))] -async fn sign_out( +async fn connection_lost( session: Session, mut teardown: watch::Receiver<()>, executor: Executor, diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 1deaafcba2..530ca05af8 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -314,6 +314,7 @@ async fn test_random_collaboration( .read_with(client_cx, |project, _| project.remote_id()) .unwrap() }; + let guest_user_id = client.user_id().unwrap(); let host_project = clients.iter().find_map(|(client, cx)| { let project = client.local_projects.iter().find(|host_project| { @@ -321,14 +322,15 @@ async fn test_random_collaboration( host_project.remote_id() == Some(project_id) }) })?; - Some((project, cx)) + Some((client.user_id().unwrap(), project, cx)) }); - let (host_project, host_cx) = if let Some((host_project, host_cx)) = host_project { - (host_project, host_cx) - } else { - continue; - }; + let (host_user_id, host_project, host_cx) = + if let Some((host_user_id, host_project, host_cx)) = host_project { + (host_user_id, host_project, host_cx) + } else { + continue; + }; for guest_buffer in guest_buffers { let buffer_id = guest_buffer.read_with(client_cx, |buffer, _| buffer.remote_id()); @@ -366,9 +368,17 @@ async fn test_random_collaboration( let guest_file = guest_buffer.read_with(client_cx, |b, _| b.file().cloned()); match (host_file, guest_file) { (Some(host_file), Some(guest_file)) => { - assert_eq!(host_file.mtime(), guest_file.mtime()); - assert_eq!(host_file.path(), guest_file.path()); - assert_eq!(host_file.is_deleted(), guest_file.is_deleted()); + assert_eq!(guest_file.path(), host_file.path()); + assert_eq!(guest_file.is_deleted(), host_file.is_deleted()); + assert_eq!( + guest_file.mtime(), + host_file.mtime(), + "guest {} mtime does not match host {} for path {:?} in project {}", + guest_user_id, + host_user_id, + guest_file.path(), + project_id, + ); } (None, None) => {} (None, _) => panic!("host's file is None, guest's isn't "), diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 921b9c8a53..20f2300d89 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -327,8 +327,6 @@ mod tests { .path(); corrupted_backup_dir.push(DB_FILE_NAME); - dbg!(&corrupted_backup_dir); - let backup = Connection::open_file(&corrupted_backup_dir.to_string_lossy()); assert!(backup.select_row::("SELECT * FROM test").unwrap()() .unwrap() diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index be6dc18b35..b1a8c81b9a 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -359,7 +359,7 @@ impl FormatTrigger { impl Project { pub fn init(client: &Arc) { client.add_model_message_handler(Self::handle_add_collaborator); - client.add_model_message_handler(Self::handle_update_collaborator); + client.add_model_message_handler(Self::handle_update_project_collaborator); client.add_model_message_handler(Self::handle_remove_collaborator); client.add_model_message_handler(Self::handle_buffer_reloaded); client.add_model_message_handler(Self::handle_buffer_saved); @@ -4617,7 +4617,7 @@ impl Project { Ok(()) } - async fn handle_update_collaborator( + async fn handle_update_project_collaborator( this: ModelHandle, envelope: TypedEnvelope, _: Arc, @@ -5184,9 +5184,20 @@ impl Project { let operations = buffer.serialize_ops(Some(remote_version), cx); let client = this.client.clone(); + let file = buffer.file().cloned(); cx.background() .spawn( async move { + if let Some(file) = file { + client + .send(proto::UpdateBufferFile { + project_id, + buffer_id: buffer_id as u64, + file: Some(file.to_proto()), + }) + .log_err(); + } + let operations = operations.await; for chunk in split_operations(operations) { client diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index bd4e6653d9..cd425c3a28 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -216,7 +216,7 @@ impl WorkspaceDb { let mut result = Vec::new(); let mut delete_tasks = Vec::new(); for (id, location) in self.recent_workspaces()? { - if location.paths().iter().all(|path| dbg!(path).exists()) { + if location.paths().iter().all(|path| path.exists()) { result.push((id, location)); } else { delete_tasks.push(self.delete_stale_workspace(id)); From 1006ada4584f42d270e11b1f6ae1806ccd71b8db Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Wed, 4 Jan 2023 13:59:16 -0700 Subject: [PATCH 46/56] Update scan_id on worktree entries when there is a conflict Forgetting to do this meant we were unable to sync changes with reconnecting guests in some cases. --- crates/collab/src/db.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 822b2dc762..1bbfac85d3 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -2184,6 +2184,7 @@ impl Database { worktree_entry::Column::MtimeNanos, worktree_entry::Column::IsSymlink, worktree_entry::Column::IsIgnored, + worktree_entry::Column::ScanId, ]) .to_owned(), ) From 7a629769b79eab6ddbc5236ee554f8d81d5d372f Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Wed, 4 Jan 2023 16:00:43 -0700 Subject: [PATCH 47/56] Re-request incomplete remote buffers when syncing buffers Any buffers we requested but that haven't been fully sent will cause outstainding open requests to hang. If we re-request them, any waiting open requests will resume when the requested buffers finish being created. Co-authored-by: Max Brunsfeld Co-authored-by: Mikayla Maki --- crates/project/src/lsp_command.rs | 6 +- crates/project/src/project.rs | 91 ++++++++++++++++++++++--------- 2 files changed, 68 insertions(+), 29 deletions(-) diff --git a/crates/project/src/lsp_command.rs b/crates/project/src/lsp_command.rs index a0eb845581..feec1ee0e4 100644 --- a/crates/project/src/lsp_command.rs +++ b/crates/project/src/lsp_command.rs @@ -524,7 +524,7 @@ async fn location_links_from_proto( Some(origin) => { let buffer = project .update(&mut cx, |this, cx| { - this.wait_for_buffer(origin.buffer_id, cx) + this.wait_for_remote_buffer(origin.buffer_id, cx) }) .await?; let start = origin @@ -549,7 +549,7 @@ async fn location_links_from_proto( let target = link.target.ok_or_else(|| anyhow!("missing target"))?; let buffer = project .update(&mut cx, |this, cx| { - this.wait_for_buffer(target.buffer_id, cx) + this.wait_for_remote_buffer(target.buffer_id, cx) }) .await?; let start = target @@ -814,7 +814,7 @@ impl LspCommand for GetReferences { for location in message.locations { let target_buffer = project .update(&mut cx, |this, cx| { - this.wait_for_buffer(location.buffer_id, cx) + this.wait_for_remote_buffer(location.buffer_id, cx) }) .await?; let start = location diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index b1a8c81b9a..dc41e68b4f 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -107,7 +107,7 @@ pub struct Project { opened_buffer: (watch::Sender<()>, watch::Receiver<()>), shared_buffers: HashMap>, #[allow(clippy::type_complexity)] - loading_buffers: HashMap< + loading_buffers_by_path: HashMap< ProjectPath, postage::watch::Receiver, Arc>>>, >, @@ -115,7 +115,9 @@ pub struct Project { loading_local_worktrees: HashMap, Shared, Arc>>>>, opened_buffers: HashMap, - incomplete_buffers: HashMap>, + /// A mapping from a buffer ID to None means that we've started waiting for an ID but haven't finished loading it. + /// Used for re-issuing buffer requests when peers temporarily disconnect + incomplete_remote_buffers: HashMap>>, buffer_snapshots: HashMap>, buffers_being_formatted: HashSet, nonce: u128, @@ -411,8 +413,8 @@ impl Project { collaborators: Default::default(), opened_buffers: Default::default(), shared_buffers: Default::default(), - incomplete_buffers: Default::default(), - loading_buffers: Default::default(), + incomplete_remote_buffers: Default::default(), + loading_buffers_by_path: Default::default(), loading_local_worktrees: Default::default(), buffer_snapshots: Default::default(), client_state: None, @@ -467,10 +469,10 @@ impl Project { let mut this = Self { worktrees: Vec::new(), - loading_buffers: Default::default(), + loading_buffers_by_path: Default::default(), opened_buffer: watch::channel(), shared_buffers: Default::default(), - incomplete_buffers: Default::default(), + incomplete_remote_buffers: Default::default(), loading_local_worktrees: Default::default(), active_entry: None, collaborators: Default::default(), @@ -1284,7 +1286,7 @@ impl Project { return Task::ready(Ok(existing_buffer)); } - let mut loading_watch = match self.loading_buffers.entry(project_path.clone()) { + let mut loading_watch = match self.loading_buffers_by_path.entry(project_path.clone()) { // If the given path is already being loaded, then wait for that existing // task to complete and return the same buffer. hash_map::Entry::Occupied(e) => e.get().clone(), @@ -1304,7 +1306,7 @@ impl Project { let load_result = load_buffer.await; *tx.borrow_mut() = Some(this.update(&mut cx, |this, _| { // Record the fact that the buffer is no longer loading. - this.loading_buffers.remove(&project_path); + this.loading_buffers_by_path.remove(&project_path); let buffer = load_result.map_err(Arc::new)?; Ok(buffer) })); @@ -1364,7 +1366,7 @@ impl Project { }) .await?; this.update(&mut cx, |this, cx| { - this.wait_for_buffer(response.buffer_id, cx) + this.wait_for_remote_buffer(response.buffer_id, cx) }) .await }) @@ -1425,8 +1427,10 @@ impl Project { .request(proto::OpenBufferById { project_id, id }); cx.spawn(|this, mut cx| async move { let buffer_id = request.await?.buffer_id; - this.update(&mut cx, |this, cx| this.wait_for_buffer(buffer_id, cx)) - .await + this.update(&mut cx, |this, cx| { + this.wait_for_remote_buffer(buffer_id, cx) + }) + .await }) } else { Task::ready(Err(anyhow!("cannot open buffer while disconnected"))) @@ -3268,7 +3272,7 @@ impl Project { cx.spawn(|this, mut cx| async move { let response = request.await?; this.update(&mut cx, |this, cx| { - this.wait_for_buffer(response.buffer_id, cx) + this.wait_for_remote_buffer(response.buffer_id, cx) }) .await }) @@ -4124,7 +4128,7 @@ impl Project { for location in response.locations { let target_buffer = this .update(&mut cx, |this, cx| { - this.wait_for_buffer(location.buffer_id, cx) + this.wait_for_remote_buffer(location.buffer_id, cx) }) .await?; let start = location @@ -5005,19 +5009,21 @@ impl Project { let buffer = cx.add_model(|_| { Buffer::from_proto(this.replica_id(), state, buffer_file).unwrap() }); - this.incomplete_buffers.insert(buffer_id, buffer); + this.incomplete_remote_buffers + .insert(buffer_id, Some(buffer)); } proto::create_buffer_for_peer::Variant::Chunk(chunk) => { let buffer = this - .incomplete_buffers + .incomplete_remote_buffers .get(&chunk.buffer_id) + .cloned() + .flatten() .ok_or_else(|| { anyhow!( "received chunk for buffer {} without initial state", chunk.buffer_id ) - })? - .clone(); + })?; let operations = chunk .operations .into_iter() @@ -5026,7 +5032,7 @@ impl Project { buffer.update(cx, |buffer, cx| buffer.apply_ops(operations, cx))?; if chunk.is_last { - this.incomplete_buffers.remove(&chunk.buffer_id); + this.incomplete_remote_buffers.remove(&chunk.buffer_id); this.register_buffer(&buffer, cx)?; } } @@ -5049,7 +5055,12 @@ impl Project { .opened_buffers .get_mut(&buffer_id) .and_then(|b| b.upgrade(cx)) - .or_else(|| this.incomplete_buffers.get(&buffer_id).cloned()) + .or_else(|| { + this.incomplete_remote_buffers + .get(&buffer_id) + .cloned() + .flatten() + }) { buffer.update(cx, |buffer, cx| buffer.set_diff_base(diff_base, cx)); } @@ -5070,7 +5081,12 @@ impl Project { .opened_buffers .get_mut(&buffer_id) .and_then(|b| b.upgrade(cx)) - .or_else(|| this.incomplete_buffers.get(&buffer_id).cloned()) + .or_else(|| { + this.incomplete_remote_buffers + .get(&buffer_id) + .cloned() + .flatten() + }) { let file = payload.file.ok_or_else(|| anyhow!("invalid file"))?; let worktree = this @@ -5610,7 +5626,9 @@ impl Project { for (buffer_id, transaction) in message.buffer_ids.into_iter().zip(message.transactions) { let buffer = this - .update(&mut cx, |this, cx| this.wait_for_buffer(buffer_id, cx)) + .update(&mut cx, |this, cx| { + this.wait_for_remote_buffer(buffer_id, cx) + }) .await?; let transaction = language::proto::deserialize_transaction(transaction)?; project_transaction.0.insert(buffer, transaction); @@ -5686,12 +5704,13 @@ impl Project { buffer_id } - fn wait_for_buffer( - &self, + fn wait_for_remote_buffer( + &mut self, id: u64, cx: &mut ModelContext, ) -> Task>> { let mut opened_buffer_rx = self.opened_buffer.1.clone(); + cx.spawn(|this, mut cx| async move { let buffer = loop { let buffer = this.read_with(&cx, |this, cx| { @@ -5705,6 +5724,9 @@ impl Project { return Err(anyhow!("disconnected before buffer {} could be opened", id)); } + this.update(&mut cx, |this, _| { + this.incomplete_remote_buffers.entry(id).or_default(); + }); opened_buffer_rx .next() .await @@ -5739,8 +5761,9 @@ impl Project { let client = self.client.clone(); cx.spawn(|this, cx| async move { - let buffers = this.read_with(&cx, |this, cx| { - this.opened_buffers + let (buffers, incomplete_buffer_ids) = this.read_with(&cx, |this, cx| { + let buffers = this + .opened_buffers .iter() .filter_map(|(id, buffer)| { let buffer = buffer.upgrade(cx)?; @@ -5749,7 +5772,14 @@ impl Project { version: language::proto::serialize_version(&buffer.read(cx).version), }) }) - .collect() + .collect(); + let incomplete_buffer_ids = this + .incomplete_remote_buffers + .keys() + .copied() + .collect::>(); + + (buffers, incomplete_buffer_ids) }); let response = client .request(proto::SynchronizeBuffers { @@ -5783,6 +5813,15 @@ impl Project { } }) }); + + // Any incomplete buffers have open requests waiting. Request that the host sends + // creates these buffers for us again to unblock any waiting futures. + for id in incomplete_buffer_ids { + cx.background() + .spawn(client.request(proto::OpenBufferById { project_id, id })) + .detach(); + } + futures::future::join_all(send_updates_for_buffers) .await .into_iter() From 77e322cb750ecefd5877f63034c7a33369be1452 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Thu, 5 Jan 2023 13:50:25 -0700 Subject: [PATCH 48/56] Wait for incomplete buffers when handling incoming buffer file updates --- crates/project/src/project.rs | 33 +++++++++++++++++++++------------ 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index dc41e68b4f..ff18fa0cbc 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -5074,20 +5074,29 @@ impl Project { _: Arc, mut cx: AsyncAppContext, ) -> Result<()> { + let buffer_id = envelope.payload.buffer_id; + let is_incomplete = this.read_with(&cx, |this, _| { + this.incomplete_remote_buffers.contains_key(&buffer_id) + }); + + let buffer = if is_incomplete { + Some( + this.update(&mut cx, |this, cx| { + this.wait_for_remote_buffer(buffer_id, cx) + }) + .await?, + ) + } else { + None + }; + this.update(&mut cx, |this, cx| { let payload = envelope.payload.clone(); - let buffer_id = payload.buffer_id; - if let Some(buffer) = this - .opened_buffers - .get_mut(&buffer_id) - .and_then(|b| b.upgrade(cx)) - .or_else(|| { - this.incomplete_remote_buffers - .get(&buffer_id) - .cloned() - .flatten() - }) - { + if let Some(buffer) = buffer.or_else(|| { + this.opened_buffers + .get(&buffer_id) + .and_then(|b| b.upgrade(cx)) + }) { let file = payload.file.ok_or_else(|| anyhow!("invalid file"))?; let worktree = this .worktree_for_id(WorktreeId::from_proto(file.worktree_id), cx) From 4256a960516499a76d588def89567cdf4d064926 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Thu, 5 Jan 2023 21:01:27 -0700 Subject: [PATCH 49/56] Avoid holding project handle on a call that could hang This fixes a leaked handle error. --- crates/project/src/project.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index ff18fa0cbc..79bada63f0 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -4185,8 +4185,11 @@ impl Project { } else if let Some(project_id) = self.remote_id() { let rpc = self.client.clone(); let message = request.to_proto(project_id, buffer); - return cx.spawn(|this, cx| async move { + return cx.spawn_weak(|this, cx| async move { let response = rpc.request(message).await?; + let this = this + .upgrade(&cx) + .ok_or_else(|| anyhow!("project dropped"))?; if this.read_with(&cx, |this, _| this.is_read_only()) { Err(anyhow!("disconnected before completing request")) } else { @@ -5720,8 +5723,11 @@ impl Project { ) -> Task>> { let mut opened_buffer_rx = self.opened_buffer.1.clone(); - cx.spawn(|this, mut cx| async move { + cx.spawn_weak(|this, mut cx| async move { let buffer = loop { + let Some(this) = this.upgrade(&cx) else { + return Err(anyhow!("project dropped")); + }; let buffer = this.read_with(&cx, |this, cx| { this.opened_buffers .get(&id) @@ -5736,6 +5742,7 @@ impl Project { this.update(&mut cx, |this, _| { this.incomplete_remote_buffers.entry(id).or_default(); }); + drop(this); opened_buffer_rx .next() .await From 5c05b7d41382a42e8389eb8a927957402d010d0d Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 6 Jan 2023 09:43:18 -0700 Subject: [PATCH 50/56] Ensure initial project metadata is sent when first sharing a project --- crates/project/src/project.rs | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 79bada63f0..bcf097c9ff 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -970,8 +970,6 @@ impl Project { return Task::ready(Err(anyhow!("project was already shared"))); } - let mut worktree_share_tasks = Vec::new(); - for open_buffer in self.opened_buffers.values_mut() { match open_buffer { OpenBuffer::Strong(_) => {} @@ -1007,21 +1005,11 @@ impl Project { .log_err(); } - for worktree in self.worktrees(cx).collect::>() { - worktree.update(cx, |worktree, cx| { - let worktree = worktree.as_local_mut().unwrap(); - worktree_share_tasks.push(worktree.share(project_id, cx)); - }); - } - self.client_subscriptions.push( self.client .subscribe_to_entity(project_id) .set_model(&cx.handle(), &mut cx.to_async()), ); - let _ = self.metadata_changed(cx); - cx.emit(Event::RemoteIdChanged(Some(project_id))); - cx.notify(); let (metadata_changed_tx, mut metadata_changed_rx) = mpsc::unbounded(); self.client_state = Some(ProjectClientState::Local { @@ -1052,7 +1040,23 @@ impl Project { }), }); - cx.foreground().spawn(async move { + let metadata_changed = self.metadata_changed(cx); + cx.emit(Event::RemoteIdChanged(Some(project_id))); + cx.notify(); + + let worktrees = self.worktrees(cx).collect::>(); + cx.spawn_weak(|_, mut cx| async move { + // Wait for the initial project metadata to be sent before sharing the worktrees. + metadata_changed.await; + + let mut worktree_share_tasks = Vec::new(); + for worktree in worktrees { + worktree.update(&mut cx, |worktree, cx| { + let worktree = worktree.as_local_mut().unwrap(); + worktree_share_tasks.push(worktree.share(project_id, cx)); + }); + } + futures::future::try_join_all(worktree_share_tasks).await?; Ok(()) }) From 8c5a0ca3a4e9f0ff1f154443b4518b9d3fb42052 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 6 Jan 2023 10:31:36 -0700 Subject: [PATCH 51/56] Couple worktree sharing with project metadata updates --- crates/call/src/room.rs | 6 ++--- crates/project/src/project.rs | 46 ++++++++++------------------------- 2 files changed, 15 insertions(+), 37 deletions(-) diff --git a/crates/call/src/room.rs b/crates/call/src/room.rs index 6da8e55f7c..6460a0900f 100644 --- a/crates/call/src/room.rs +++ b/crates/call/src/room.rs @@ -774,10 +774,8 @@ impl Room { let response = request.await?; project.update(&mut cx, |project, cx| { - project - .shared(response.project_id, cx) - .detach_and_log_err(cx) - }); + project.shared(response.project_id, cx) + })?; // If the user's location is in this project, it changes from UnsharedProject to SharedProject. this.update(&mut cx, |this, cx| { diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index bcf097c9ff..f88956ab5e 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -965,9 +965,9 @@ impl Project { } } - pub fn shared(&mut self, project_id: u64, cx: &mut ModelContext) -> Task> { + pub fn shared(&mut self, project_id: u64, cx: &mut ModelContext) -> Result<()> { if self.client_state.is_some() { - return Task::ready(Err(anyhow!("project was already shared"))); + return Err(anyhow!("project was already shared")); } for open_buffer in self.opened_buffers.values_mut() { @@ -1015,7 +1015,7 @@ impl Project { self.client_state = Some(ProjectClientState::Local { remote_id: project_id, metadata_changed: metadata_changed_tx, - _maintain_metadata: cx.spawn_weak(move |this, cx| async move { + _maintain_metadata: cx.spawn_weak(move |this, mut cx| async move { let mut txs = Vec::new(); while let Some(tx) = metadata_changed_rx.next().await { txs.push(tx); @@ -1024,6 +1024,8 @@ impl Project { } let Some(this) = this.upgrade(&cx) else { break }; + let worktrees = + this.read_with(&cx, |this, cx| this.worktrees(cx).collect::>()); this.read_with(&cx, |this, cx| { this.client.request(proto::UpdateProject { project_id, @@ -1032,6 +1034,12 @@ impl Project { }) .await .log_err(); + for worktree in worktrees { + worktree.update(&mut cx, |worktree, cx| { + let worktree = &mut worktree.as_local_mut().unwrap(); + worktree.share(project_id, cx).detach_and_log_err(cx) + }); + } for tx in txs.drain(..) { let _ = tx.send(()); @@ -1040,26 +1048,10 @@ impl Project { }), }); - let metadata_changed = self.metadata_changed(cx); + let _ = self.metadata_changed(cx); cx.emit(Event::RemoteIdChanged(Some(project_id))); cx.notify(); - - let worktrees = self.worktrees(cx).collect::>(); - cx.spawn_weak(|_, mut cx| async move { - // Wait for the initial project metadata to be sent before sharing the worktrees. - metadata_changed.await; - - let mut worktree_share_tasks = Vec::new(); - for worktree in worktrees { - worktree.update(&mut cx, |worktree, cx| { - let worktree = worktree.as_local_mut().unwrap(); - worktree_share_tasks.push(worktree.share(project_id, cx)); - }); - } - - futures::future::try_join_all(worktree_share_tasks).await?; - Ok(()) - }) + Ok(()) } pub fn reshared( @@ -4282,18 +4274,6 @@ impl Project { .update(&mut cx, |project, cx| project.add_worktree(&worktree, cx)) .await; - if let Some(project_id) = - project.read_with(&cx, |project, _| project.remote_id()) - { - worktree.update(&mut cx, |worktree, cx| { - worktree - .as_local_mut() - .unwrap() - .share(project_id, cx) - .detach_and_log_err(cx); - }); - } - Ok(worktree) } .map_err(Arc::new) From 0d31c8c1c8773863be2862d26825149cf85d9b34 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 6 Jan 2023 10:41:11 -0700 Subject: [PATCH 52/56] Only share worktrees when `UpdateProject` succeeded --- crates/project/src/project.rs | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index f88956ab5e..0f750e3255 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -1026,19 +1026,21 @@ impl Project { let Some(this) = this.upgrade(&cx) else { break }; let worktrees = this.read_with(&cx, |this, cx| this.worktrees(cx).collect::>()); - this.read_with(&cx, |this, cx| { - this.client.request(proto::UpdateProject { - project_id, - worktrees: this.worktree_metadata_protos(cx), + let update_project = this + .read_with(&cx, |this, cx| { + this.client.request(proto::UpdateProject { + project_id, + worktrees: this.worktree_metadata_protos(cx), + }) }) - }) - .await - .log_err(); - for worktree in worktrees { - worktree.update(&mut cx, |worktree, cx| { - let worktree = &mut worktree.as_local_mut().unwrap(); - worktree.share(project_id, cx).detach_and_log_err(cx) - }); + .await; + if update_project.is_ok() { + for worktree in worktrees { + worktree.update(&mut cx, |worktree, cx| { + let worktree = &mut worktree.as_local_mut().unwrap(); + worktree.share(project_id, cx).detach_and_log_err(cx) + }); + } } for tx in txs.drain(..) { From 585c23e9f634e99c0cf1376c5dc1b8b00512175a Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 6 Jan 2023 11:48:34 -0700 Subject: [PATCH 53/56] Match guest's reported buffers on host when synchronizing after reconnect If the host thinks a guest has a buffer that they don't have, the host won't send it to them when they attempt to open it the next time. This can happen if the guest disconnected before they received the host's response to an initial open buffer request. Co-Authored-By: Nathan Sobo --- crates/project/src/project.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 0f750e3255..0dff99e77e 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -5181,6 +5181,7 @@ impl Project { return; }; + this.shared_buffers.entry(guest_id).or_default().clear(); for buffer in envelope.payload.buffers { let buffer_id = buffer.id; let remote_version = language::proto::deserialize_version(buffer.version); From 8487ae77e70fb54056dc01bf54e5d1b25e08fced Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 6 Jan 2023 12:58:19 -0700 Subject: [PATCH 54/56] Share new worktrees when resharing project Co-Authored-By: Nathan Sobo --- crates/call/src/room.rs | 2 +- crates/collab/src/rpc.rs | 63 ++++++++++--------- .../src/tests/randomized_integration_tests.rs | 5 +- crates/project/src/project.rs | 30 +++------ crates/project/src/worktree.rs | 24 +++---- 5 files changed, 54 insertions(+), 70 deletions(-) diff --git a/crates/call/src/room.rs b/crates/call/src/room.rs index 6460a0900f..7527a69326 100644 --- a/crates/call/src/room.rs +++ b/crates/call/src/room.rs @@ -66,7 +66,7 @@ impl Entity for Room { fn release(&mut self, _: &mut MutableAppContext) { if self.status.is_online() { log::info!("room was released, sending leave message"); - self.client.send(proto::LeaveRoom {}).log_err(); + let _ = self.client.send(proto::LeaveRoom {}); } } } diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 62db247b9f..92d4935b23 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -672,15 +672,17 @@ impl<'a> Drop for ConnectionPoolGuard<'a> { } fn broadcast( - sender_id: ConnectionId, + sender_id: Option, receiver_ids: impl IntoIterator, mut f: F, ) where F: FnMut(ConnectionId) -> anyhow::Result<()>, { for receiver_id in receiver_ids { - if receiver_id != sender_id { - f(receiver_id).trace_err(); + if Some(receiver_id) != sender_id { + if let Err(error) = f(receiver_id) { + tracing::error!("failed to send to {:?} {}", receiver_id, error); + } } } } @@ -998,7 +1000,7 @@ async fn rejoin_room( } broadcast( - session.connection_id, + Some(session.connection_id), project .collaborators .iter() @@ -1279,7 +1281,7 @@ async fn unshare_project(message: proto::UnshareProject, session: Session) -> Re .await?; broadcast( - session.connection_id, + Some(session.connection_id), guest_connection_ids.iter().copied(), |conn_id| session.peer.send(conn_id, message.clone()), ); @@ -1430,7 +1432,7 @@ async fn update_project( .update_project(project_id, session.connection_id, &request.worktrees) .await?; broadcast( - session.connection_id, + Some(session.connection_id), guest_connection_ids.iter().copied(), |connection_id| { session @@ -1456,7 +1458,7 @@ async fn update_worktree( .await?; broadcast( - session.connection_id, + Some(session.connection_id), guest_connection_ids.iter().copied(), |connection_id| { session @@ -1479,7 +1481,7 @@ async fn update_diagnostic_summary( .await?; broadcast( - session.connection_id, + Some(session.connection_id), guest_connection_ids.iter().copied(), |connection_id| { session @@ -1502,7 +1504,7 @@ async fn start_language_server( .await?; broadcast( - session.connection_id, + Some(session.connection_id), guest_connection_ids.iter().copied(), |connection_id| { session @@ -1525,7 +1527,7 @@ async fn update_language_server( .project_connection_ids(project_id, session.connection_id) .await?; broadcast( - session.connection_id, + Some(session.connection_id), project_connection_ids.iter().copied(), |connection_id| { session @@ -1600,11 +1602,15 @@ async fn save_buffer( let project_connection_ids = collaborators .iter() .map(|collaborator| collaborator.connection_id); - broadcast(host_connection_id, project_connection_ids, |conn_id| { - session - .peer - .forward_send(host_connection_id, conn_id, response_payload.clone()) - }); + broadcast( + Some(host_connection_id), + project_connection_ids, + |conn_id| { + session + .peer + .forward_send(host_connection_id, conn_id, response_payload.clone()) + }, + ); response.send(response_payload)?; Ok(()) } @@ -1637,7 +1643,7 @@ async fn update_buffer( session.executor.record_backtrace(); broadcast( - session.connection_id, + Some(session.connection_id), project_connection_ids.iter().copied(), |connection_id| { session @@ -1658,7 +1664,7 @@ async fn update_buffer_file(request: proto::UpdateBufferFile, session: Session) .await?; broadcast( - session.connection_id, + Some(session.connection_id), project_connection_ids.iter().copied(), |connection_id| { session @@ -1677,7 +1683,7 @@ async fn buffer_reloaded(request: proto::BufferReloaded, session: Session) -> Re .project_connection_ids(project_id, session.connection_id) .await?; broadcast( - session.connection_id, + Some(session.connection_id), project_connection_ids.iter().copied(), |connection_id| { session @@ -1696,7 +1702,7 @@ async fn buffer_saved(request: proto::BufferSaved, session: Session) -> Result<( .project_connection_ids(project_id, session.connection_id) .await?; broadcast( - session.connection_id, + Some(session.connection_id), project_connection_ids.iter().copied(), |connection_id| { session @@ -1988,7 +1994,7 @@ async fn update_diff_base(request: proto::UpdateDiffBase, session: Session) -> R .project_connection_ids(project_id, session.connection_id) .await?; broadcast( - session.connection_id, + Some(session.connection_id), project_connection_ids.iter().copied(), |connection_id| { session @@ -2098,21 +2104,20 @@ fn contact_for_user( } fn room_updated(room: &proto::Room, peer: &Peer) { - for participant in &room.participants { - if let Some(peer_id) = participant - .peer_id - .ok_or_else(|| anyhow!("invalid participant peer id")) - .trace_err() - { + broadcast( + None, + room.participants + .iter() + .filter_map(|participant| Some(participant.peer_id?.into())), + |peer_id| { peer.send( peer_id.into(), proto::RoomUpdated { room: Some(room.clone()), }, ) - .trace_err(); - } - } + }, + ); } async fn update_user_contacts(user_id: UserId, session: &Session) -> Result<()> { diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 530ca05af8..a42d4f7d32 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -290,10 +290,11 @@ async fn test_random_collaboration( assert_eq!( guest_snapshot.entries(false).collect::>(), host_snapshot.entries(false).collect::>(), - "{} has different snapshot than the host for worktree {} ({:?})", + "{} has different snapshot than the host for worktree {} ({:?}) and project {:?}", client.username, id, - host_snapshot.abs_path() + host_snapshot.abs_path(), + host_project.read_with(host_cx, |project, _| project.remote_id()) ); assert_eq!(guest_snapshot.scan_id(), host_snapshot.scan_id()); } diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 0dff99e77e..271d0f242b 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -1037,7 +1037,7 @@ impl Project { if update_project.is_ok() { for worktree in worktrees { worktree.update(&mut cx, |worktree, cx| { - let worktree = &mut worktree.as_local_mut().unwrap(); + let worktree = worktree.as_local_mut().unwrap(); worktree.share(project_id, cx).detach_and_log_err(cx) }); } @@ -1062,17 +1062,7 @@ impl Project { cx: &mut ModelContext, ) -> Result<()> { self.set_collaborators_from_proto(message.collaborators, cx)?; - for worktree in self.worktrees.iter() { - if let Some(worktree) = worktree.upgrade(&cx) { - worktree.update(cx, |worktree, _| { - if let Some(worktree) = worktree.as_local_mut() { - worktree.reshare() - } else { - Ok(()) - } - })?; - } - } + let _ = self.metadata_changed(cx); Ok(()) } @@ -6259,18 +6249,14 @@ impl Entity for Project { fn release(&mut self, _: &mut gpui::MutableAppContext) { match &self.client_state { Some(ProjectClientState::Local { remote_id, .. }) => { - self.client - .send(proto::UnshareProject { - project_id: *remote_id, - }) - .log_err(); + let _ = self.client.send(proto::UnshareProject { + project_id: *remote_id, + }); } Some(ProjectClientState::Remote { remote_id, .. }) => { - self.client - .send(proto::LeaveProject { - project_id: *remote_id, - }) - .log_err(); + let _ = self.client.send(proto::LeaveProject { + project_id: *remote_id, + }); } _ => {} } diff --git a/crates/project/src/worktree.rs b/crates/project/src/worktree.rs index 3a8c69c704..f22e915785 100644 --- a/crates/project/src/worktree.rs +++ b/crates/project/src/worktree.rs @@ -166,7 +166,7 @@ enum ScanState { struct ShareState { project_id: u64, snapshots_tx: watch::Sender, - reshared: watch::Sender<()>, + resume_updates: watch::Sender<()>, _maintain_remote_snapshot: Task>, } @@ -975,12 +975,12 @@ impl LocalWorktree { pub fn share(&mut self, project_id: u64, cx: &mut ModelContext) -> Task> { let (share_tx, share_rx) = oneshot::channel(); - if self.share.is_some() { + if let Some(share) = self.share.as_mut() { let _ = share_tx.send(()); + *share.resume_updates.borrow_mut() = (); } else { let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot()); - let (reshared_tx, mut reshared_rx) = watch::channel(); - let _ = reshared_rx.try_recv(); + let (resume_updates_tx, mut resume_updates_rx) = watch::channel(); let worktree_id = cx.model_id() as u64; for (path, summary) in self.diagnostic_summaries.iter() { @@ -1022,10 +1022,11 @@ impl LocalWorktree { let update = snapshot.build_update(&prev_snapshot, project_id, worktree_id, true); for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) { + let _ = resume_updates_rx.try_recv(); while let Err(error) = client.request(update.clone()).await { log::error!("failed to send worktree update: {}", error); - log::info!("waiting for worktree to be reshared"); - if reshared_rx.next().await.is_none() { + log::info!("waiting to resume updates"); + if resume_updates_rx.next().await.is_none() { return Ok(()); } } @@ -1046,7 +1047,7 @@ impl LocalWorktree { self.share = Some(ShareState { project_id, snapshots_tx, - reshared: reshared_tx, + resume_updates: resume_updates_tx, _maintain_remote_snapshot, }); } @@ -1059,15 +1060,6 @@ impl LocalWorktree { self.share.take(); } - pub fn reshare(&mut self) -> Result<()> { - let share = self - .share - .as_mut() - .ok_or_else(|| anyhow!("can't reshare a worktree that wasn't shared"))?; - *share.reshared.borrow_mut() = (); - Ok(()) - } - pub fn is_shared(&self) -> bool { self.share.is_some() } From 83c98ce0492942668ca40a2132e732c26003da65 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 6 Jan 2023 13:40:32 -0700 Subject: [PATCH 55/56] Prevent making further requests after language server shut down Co-Authored-By: Nathan Sobo --- crates/lsp/src/lsp.rs | 71 +++++++++++++++++++++++++------------------ 1 file changed, 42 insertions(+), 29 deletions(-) diff --git a/crates/lsp/src/lsp.rs b/crates/lsp/src/lsp.rs index 6e7def92e9..b7199a5287 100644 --- a/crates/lsp/src/lsp.rs +++ b/crates/lsp/src/lsp.rs @@ -40,7 +40,7 @@ pub struct LanguageServer { name: String, capabilities: ServerCapabilities, notification_handlers: Arc>>, - response_handlers: Arc>>, + response_handlers: Arc>>>, executor: Arc, #[allow(clippy::type_complexity)] io_tasks: Mutex>, Task>)>>, @@ -170,12 +170,18 @@ impl LanguageServer { let (outbound_tx, outbound_rx) = channel::unbounded::>(); let notification_handlers = Arc::new(Mutex::new(HashMap::<_, NotificationHandler>::default())); - let response_handlers = Arc::new(Mutex::new(HashMap::<_, ResponseHandler>::default())); + let response_handlers = + Arc::new(Mutex::new(Some(HashMap::<_, ResponseHandler>::default()))); let input_task = cx.spawn(|cx| { let notification_handlers = notification_handlers.clone(); let response_handlers = response_handlers.clone(); async move { - let _clear_response_handlers = ClearResponseHandlers(response_handlers.clone()); + let _clear_response_handlers = util::defer({ + let response_handlers = response_handlers.clone(); + move || { + response_handlers.lock().take(); + } + }); let mut buffer = Vec::new(); loop { buffer.clear(); @@ -200,7 +206,11 @@ impl LanguageServer { } else if let Ok(AnyResponse { id, error, result }) = serde_json::from_slice(&buffer) { - if let Some(handler) = response_handlers.lock().remove(&id) { + if let Some(handler) = response_handlers + .lock() + .as_mut() + .and_then(|handlers| handlers.remove(&id)) + { if let Some(error) = error { handler(Err(error)); } else if let Some(result) = result { @@ -226,7 +236,12 @@ impl LanguageServer { let output_task = cx.background().spawn({ let response_handlers = response_handlers.clone(); async move { - let _clear_response_handlers = ClearResponseHandlers(response_handlers); + let _clear_response_handlers = util::defer({ + let response_handlers = response_handlers.clone(); + move || { + response_handlers.lock().take(); + } + }); let mut content_len_buffer = Vec::new(); while let Ok(message) = outbound_rx.recv().await { log::trace!("outgoing message:{}", String::from_utf8_lossy(&message)); @@ -366,7 +381,7 @@ impl LanguageServer { async move { log::debug!("language server shutdown started"); shutdown_request.await?; - response_handlers.lock().clear(); + response_handlers.lock().take(); exit?; output_done.recv().await; log::debug!("language server shutdown finished"); @@ -521,7 +536,7 @@ impl LanguageServer { fn request_internal( next_id: &AtomicUsize, - response_handlers: &Mutex>, + response_handlers: &Mutex>>, outbound_tx: &channel::Sender>, params: T::Params, ) -> impl 'static + Future> @@ -537,25 +552,31 @@ impl LanguageServer { }) .unwrap(); + let (tx, rx) = oneshot::channel(); + let handle_response = response_handlers + .lock() + .as_mut() + .ok_or_else(|| anyhow!("server shut down")) + .map(|handlers| { + handlers.insert( + id, + Box::new(move |result| { + let response = match result { + Ok(response) => serde_json::from_str(response) + .context("failed to deserialize response"), + Err(error) => Err(anyhow!("{}", error.message)), + }; + let _ = tx.send(response); + }), + ); + }); + let send = outbound_tx .try_send(message) .context("failed to write to language server's stdin"); - let (tx, rx) = oneshot::channel(); - response_handlers.lock().insert( - id, - Box::new(move |result| { - let response = match result { - Ok(response) => { - serde_json::from_str(response).context("failed to deserialize response") - } - Err(error) => Err(anyhow!("{}", error.message)), - }; - let _ = tx.send(response); - }), - ); - async move { + handle_response?; send?; rx.await? } @@ -762,14 +783,6 @@ impl FakeLanguageServer { } } -struct ClearResponseHandlers(Arc>>); - -impl Drop for ClearResponseHandlers { - fn drop(&mut self) { - self.0.lock().clear(); - } -} - #[cfg(test)] mod tests { use super::*; From 213658f1e9105628a7b137058973601ea030e015 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Fri, 6 Jan 2023 17:56:21 -0700 Subject: [PATCH 56/56] Fix tests that failed due to defaulting the grouping interval to zero in tests --- crates/collab/src/tests/integration_tests.rs | 7 ++++++- crates/editor/src/editor.rs | 2 ++ crates/editor/src/editor_tests.rs | 8 +++++++- crates/language/src/buffer_tests.rs | 11 ++++++++++- 4 files changed, 25 insertions(+), 3 deletions(-) diff --git a/crates/collab/src/tests/integration_tests.rs b/crates/collab/src/tests/integration_tests.rs index 4a1aaf64d1..729da6d109 100644 --- a/crates/collab/src/tests/integration_tests.rs +++ b/crates/collab/src/tests/integration_tests.rs @@ -1131,6 +1131,7 @@ async fn test_unshare_project( .unwrap(); let worktree_a = project_a.read_with(cx_a, |project, cx| project.worktrees(cx).next().unwrap()); let project_b = client_b.build_remote_project(project_id, cx_b).await; + deterministic.run_until_parked(); assert!(worktree_a.read_with(cx_a, |tree, _| tree.as_local().unwrap().is_shared())); project_b @@ -1160,6 +1161,7 @@ async fn test_unshare_project( .await .unwrap(); let project_c2 = client_c.build_remote_project(project_id, cx_c).await; + deterministic.run_until_parked(); assert!(worktree_a.read_with(cx_a, |tree, _| tree.as_local().unwrap().is_shared())); project_c2 .update(cx_c, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx)) @@ -1213,6 +1215,7 @@ async fn test_host_disconnect( .unwrap(); let project_b = client_b.build_remote_project(project_id, cx_b).await; + deterministic.run_until_parked(); assert!(worktree_a.read_with(cx_a, |tree, _| tree.as_local().unwrap().is_shared())); let (_, workspace_b) = cx_b.add_window(|cx| { @@ -1467,7 +1470,7 @@ async fn test_project_reconnect( .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) .await; let worktree3_id = worktree_a3.read_with(cx_a, |tree, _| { - assert!(tree.as_local().unwrap().is_shared()); + assert!(!tree.as_local().unwrap().is_shared()); tree.id() }); deterministic.run_until_parked(); @@ -1489,6 +1492,7 @@ async fn test_project_reconnect( deterministic.run_until_parked(); project_a1.read_with(cx_a, |project, cx| { assert!(project.is_shared()); + assert!(worktree_a1.read(cx).as_local().unwrap().is_shared()); assert_eq!( worktree_a1 .read(cx) @@ -1510,6 +1514,7 @@ async fn test_project_reconnect( "subdir2/i.txt" ] ); + assert!(worktree_a3.read(cx).as_local().unwrap().is_shared()); assert_eq!( worktree_a3 .read(cx) diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index d8ee49866b..85da12658a 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -3611,7 +3611,9 @@ impl Editor { } pub fn undo(&mut self, _: &Undo, cx: &mut ViewContext) { + dbg!("undo"); if let Some(tx_id) = self.buffer.update(cx, |buffer, cx| buffer.undo(cx)) { + dbg!(tx_id); if let Some((selections, _)) = self.selection_history.transaction(tx_id).cloned() { self.change_selections(None, cx, |s| { s.select_anchors(selections.to_vec()); diff --git a/crates/editor/src/editor_tests.rs b/crates/editor/src/editor_tests.rs index 2fcc5f0014..b9f3c67f38 100644 --- a/crates/editor/src/editor_tests.rs +++ b/crates/editor/src/editor_tests.rs @@ -29,7 +29,11 @@ use workspace::{ #[gpui::test] fn test_edit_events(cx: &mut MutableAppContext) { cx.set_global(Settings::test(cx)); - let buffer = cx.add_model(|cx| language::Buffer::new(0, "123456", cx)); + let buffer = cx.add_model(|cx| { + let mut buffer = language::Buffer::new(0, "123456", cx); + buffer.set_group_interval(Duration::from_secs(1)); + buffer + }); let events = Rc::new(RefCell::new(Vec::new())); let (_, editor1) = cx.add_window(Default::default(), { @@ -3502,6 +3506,8 @@ async fn test_surround_with_pair(cx: &mut gpui::TestAppContext) { ] ); + view.undo(&Undo, cx); + view.undo(&Undo, cx); view.undo(&Undo, cx); assert_eq!( view.text(cx), diff --git a/crates/language/src/buffer_tests.rs b/crates/language/src/buffer_tests.rs index e0b7d080cb..09ccc5d621 100644 --- a/crates/language/src/buffer_tests.rs +++ b/crates/language/src/buffer_tests.rs @@ -289,6 +289,9 @@ async fn test_reparse(cx: &mut gpui::TestAppContext) { ); buffer.update(cx, |buf, cx| { + buf.undo(cx); + buf.undo(cx); + buf.undo(cx); buf.undo(cx); assert_eq!(buf.text(), "fn a() {}"); assert!(buf.is_parsing()); @@ -304,6 +307,9 @@ async fn test_reparse(cx: &mut gpui::TestAppContext) { ); buffer.update(cx, |buf, cx| { + buf.redo(cx); + buf.redo(cx); + buf.redo(cx); buf.redo(cx); assert_eq!(buf.text(), "fn a(b: C) { d.e::(f); }"); assert!(buf.is_parsing()); @@ -1022,8 +1028,11 @@ fn test_autoindent_block_mode(cx: &mut MutableAppContext) { .unindent() ); + // Grouping is disabled in tests, so we need 2 undos + buffer.undo(cx); // Undo the auto-indent + buffer.undo(cx); // Undo the original edit + // Insert the block at a deeper indent level. The entire block is outdented. - buffer.undo(cx); buffer.edit([(Point::new(2, 0)..Point::new(2, 0), " ")], None, cx); buffer.edit( [(Point::new(2, 8)..Point::new(2, 8), inserted_text)],