Cleanup worktrees' shared state when unsharing

This commit is contained in:
Antonio Scandurra 2022-01-17 10:58:49 +01:00
parent 5415663a73
commit f51cf6b05e
4 changed files with 62 additions and 21 deletions

View File

@ -428,6 +428,11 @@ impl Project {
rpc.send(proto::UnshareProject { project_id }).await?; rpc.send(proto::UnshareProject { project_id }).await?;
this.update(&mut cx, |this, cx| { this.update(&mut cx, |this, cx| {
this.collaborators.clear(); this.collaborators.clear();
for worktree in &this.worktrees {
worktree.update(cx, |worktree, _| {
worktree.as_local_mut().unwrap().unshare();
});
}
cx.notify() cx.notify()
}); });
Ok(()) Ok(())

View File

@ -1009,6 +1009,7 @@ pub struct LocalWorktree {
struct ShareState { struct ShareState {
project_id: u64, project_id: u64,
snapshots_tx: Sender<Snapshot>, snapshots_tx: Sender<Snapshot>,
_maintain_remote_snapshot: Option<Task<()>>,
} }
pub struct RemoteWorktree { pub struct RemoteWorktree {
@ -1565,29 +1566,27 @@ impl LocalWorktree {
let rpc = self.client.clone(); let rpc = self.client.clone();
let worktree_id = cx.model_id() as u64; let worktree_id = cx.model_id() as u64;
let (snapshots_to_send_tx, snapshots_to_send_rx) = smol::channel::unbounded::<Snapshot>(); let (snapshots_to_send_tx, snapshots_to_send_rx) = smol::channel::unbounded::<Snapshot>();
let maintain_remote_snapshot = cx.background().spawn({
let rpc = rpc.clone();
let snapshot = snapshot.clone();
async move {
let mut prev_snapshot = snapshot;
while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
let message =
snapshot.build_update(&prev_snapshot, project_id, worktree_id, false);
match rpc.send(message).await {
Ok(()) => prev_snapshot = snapshot,
Err(err) => log::error!("error sending snapshot diff {}", err),
}
}
}
});
self.share = Some(ShareState { self.share = Some(ShareState {
project_id, project_id,
snapshots_tx: snapshots_to_send_tx, snapshots_tx: snapshots_to_send_tx,
_maintain_remote_snapshot: Some(maintain_remote_snapshot),
}); });
cx.background()
.spawn({
let rpc = rpc.clone();
let snapshot = snapshot.clone();
async move {
let mut prev_snapshot = snapshot;
while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
let message =
snapshot.build_update(&prev_snapshot, project_id, worktree_id, false);
match rpc.send(message).await {
Ok(()) => prev_snapshot = snapshot,
Err(err) => log::error!("error sending snapshot diff {}", err),
}
}
}
})
.detach();
let diagnostic_summaries = self.diagnostic_summaries.clone(); let diagnostic_summaries = self.diagnostic_summaries.clone();
let share_message = cx.background().spawn(async move { let share_message = cx.background().spawn(async move {
proto::ShareWorktree { proto::ShareWorktree {
@ -1601,6 +1600,14 @@ impl LocalWorktree {
Ok(()) Ok(())
}) })
} }
pub fn unshare(&mut self) {
self.share.take();
}
pub fn is_shared(&self) -> bool {
self.share.is_some()
}
} }
fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> { fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {

View File

@ -1305,6 +1305,7 @@ mod tests {
.update(&mut cx_a, |project, cx| project.share(cx)) .update(&mut cx_a, |project, cx| project.share(cx))
.await .await
.unwrap(); .unwrap();
assert!(worktree_a.read_with(&cx_a, |tree, _| tree.as_local().unwrap().is_shared()));
// Join that project as client B // Join that project as client B
let project_b = Project::remote( let project_b = Project::remote(
@ -1331,6 +1332,30 @@ mod tests {
project_b project_b
.condition(&mut cx_b, |project, _| project.is_read_only()) .condition(&mut cx_b, |project, _| project.is_read_only())
.await; .await;
assert!(worktree_a.read_with(&cx_a, |tree, _| !tree.as_local().unwrap().is_shared()));
drop(project_b);
// Share the project again and ensure guests can still join.
project_a
.update(&mut cx_a, |project, cx| project.share(cx))
.await
.unwrap();
assert!(worktree_a.read_with(&cx_a, |tree, _| tree.as_local().unwrap().is_shared()));
let project_c = Project::remote(
project_id,
client_b.clone(),
client_b.user_store.clone(),
lang_registry.clone(),
fs.clone(),
&mut cx_b.to_async(),
)
.await
.unwrap();
let worktree_c = project_c.read_with(&cx_b, |p, _| p.worktrees()[0].clone());
worktree_c
.update(&mut cx_b, |tree, cx| tree.open_buffer("a.txt", cx))
.await
.unwrap();
} }
#[gpui::test] #[gpui::test]

View File

@ -63,7 +63,7 @@ pub struct JoinedProject<'a> {
pub project: &'a Project, pub project: &'a Project,
} }
pub struct UnsharedWorktree { pub struct UnsharedProject {
pub connection_ids: Vec<ConnectionId>, pub connection_ids: Vec<ConnectionId>,
pub authorized_user_ids: Vec<UserId>, pub authorized_user_ids: Vec<UserId>,
} }
@ -348,7 +348,7 @@ impl Store {
&mut self, &mut self,
project_id: u64, project_id: u64,
acting_connection_id: ConnectionId, acting_connection_id: ConnectionId,
) -> tide::Result<UnsharedWorktree> { ) -> tide::Result<UnsharedProject> {
let project = if let Some(project) = self.projects.get_mut(&project_id) { let project = if let Some(project) = self.projects.get_mut(&project_id) {
project project
} else { } else {
@ -368,10 +368,14 @@ impl Store {
} }
} }
for worktree in project.worktrees.values_mut() {
worktree.share.take();
}
#[cfg(test)] #[cfg(test)]
self.check_invariants(); self.check_invariants();
Ok(UnsharedWorktree { Ok(UnsharedProject {
connection_ids, connection_ids,
authorized_user_ids, authorized_user_ids,
}) })