Add randomized test for mutating worktree during initial scan

This commit is contained in:
Max Brunsfeld 2023-04-13 22:34:03 -07:00
parent debb694d97
commit bb1cfd51b8
2 changed files with 188 additions and 34 deletions

View File

@ -523,31 +523,7 @@ impl FakeFs {
}
pub async fn insert_file(&self, path: impl AsRef<Path>, content: String) {
let mut state = self.state.lock();
let path = path.as_ref();
let inode = state.next_inode;
let mtime = state.next_mtime;
state.next_inode += 1;
state.next_mtime += Duration::from_nanos(1);
let file = Arc::new(Mutex::new(FakeFsEntry::File {
inode,
mtime,
content,
}));
state
.write_path(path, move |entry| {
match entry {
btree_map::Entry::Vacant(e) => {
e.insert(file);
}
btree_map::Entry::Occupied(mut e) => {
*e.get_mut() = file;
}
}
Ok(())
})
.unwrap();
state.emit_event(&[path]);
self.write_file_internal(path, content).unwrap()
}
pub async fn insert_symlink(&self, path: impl AsRef<Path>, target: PathBuf) {
@ -569,6 +545,33 @@ impl FakeFs {
state.emit_event(&[path]);
}
fn write_file_internal(&self, path: impl AsRef<Path>, content: String) -> Result<()> {
let mut state = self.state.lock();
let path = path.as_ref();
let inode = state.next_inode;
let mtime = state.next_mtime;
state.next_inode += 1;
state.next_mtime += Duration::from_nanos(1);
let file = Arc::new(Mutex::new(FakeFsEntry::File {
inode,
mtime,
content,
}));
state.write_path(path, move |entry| {
match entry {
btree_map::Entry::Vacant(e) => {
e.insert(file);
}
btree_map::Entry::Occupied(mut e) => {
*e.get_mut() = file;
}
}
Ok(())
})?;
state.emit_event(&[path]);
Ok(())
}
pub async fn pause_events(&self) {
self.state.lock().events_paused = true;
}
@ -952,7 +955,7 @@ impl Fs for FakeFs {
async fn atomic_write(&self, path: PathBuf, data: String) -> Result<()> {
self.simulate_random_delay().await;
let path = normalize_path(path.as_path());
self.insert_file(path, data.to_string()).await;
self.write_file_internal(path, data.to_string())?;
Ok(())
}
@ -961,7 +964,7 @@ impl Fs for FakeFs {
self.simulate_random_delay().await;
let path = normalize_path(path);
let content = chunks(text, line_ending).collect();
self.insert_file(path, content).await;
self.write_file_internal(path, content)?;
Ok(())
}

View File

@ -3523,6 +3523,83 @@ mod tests {
assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
}
#[gpui::test(iterations = 100)]
async fn test_random_worktree_operations_during_initial_scan(
cx: &mut TestAppContext,
mut rng: StdRng,
) {
let operations = env::var("OPERATIONS")
.map(|o| o.parse().unwrap())
.unwrap_or(5);
let initial_entries = env::var("INITIAL_ENTRIES")
.map(|o| o.parse().unwrap())
.unwrap_or(20);
let root_dir = Path::new("/test");
let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
fs.as_fake().insert_tree(root_dir, json!({})).await;
for _ in 0..initial_entries {
randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
}
log::info!("generated initial tree");
let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
let worktree = Worktree::local(
client.clone(),
root_dir,
true,
fs.clone(),
Default::default(),
&mut cx.to_async(),
)
.await
.unwrap();
let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
for _ in 0..operations {
worktree
.update(cx, |worktree, cx| {
randomly_mutate_worktree(worktree, &mut rng, cx)
})
.await
.log_err();
worktree.read_with(cx, |tree, _| {
tree.as_local().unwrap().snapshot.check_invariants()
});
if rng.gen_bool(0.6) {
let new_snapshot =
worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
let update = new_snapshot.build_update(&snapshot, 0, 0, true);
snapshot.apply_remote_update(update.clone()).unwrap();
assert_eq!(
snapshot.to_vec(true),
new_snapshot.to_vec(true),
"incorrect snapshot after update {:?}",
update
);
}
}
worktree
.update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
.await;
worktree.read_with(cx, |tree, _| {
tree.as_local().unwrap().snapshot.check_invariants()
});
let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
let update = new_snapshot.build_update(&snapshot, 0, 0, true);
snapshot.apply_remote_update(update.clone()).unwrap();
assert_eq!(
snapshot.to_vec(true),
new_snapshot.to_vec(true),
"incorrect snapshot after update {:?}",
update
);
}
#[gpui::test(iterations = 100)]
async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
let operations = env::var("OPERATIONS")
@ -3536,18 +3613,17 @@ mod tests {
let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
fs.as_fake().insert_tree(root_dir, json!({})).await;
for _ in 0..initial_entries {
randomly_mutate_tree(&fs, root_dir, 1.0, &mut rng).await;
randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
}
log::info!("generated initial tree");
let next_entry_id = Arc::new(AtomicUsize::default());
let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
let worktree = Worktree::local(
client.clone(),
root_dir,
true,
fs.clone(),
next_entry_id.clone(),
Default::default(),
&mut cx.to_async(),
)
.await
@ -3603,14 +3679,14 @@ mod tests {
let mut snapshots = Vec::new();
let mut mutations_len = operations;
while mutations_len > 1 {
randomly_mutate_tree(&fs, root_dir, 1.0, &mut rng).await;
randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
let buffered_event_count = fs.as_fake().buffered_event_count().await;
if buffered_event_count > 0 && rng.gen_bool(0.3) {
let len = rng.gen_range(0..=buffered_event_count);
log::info!("flushing {} events", len);
fs.as_fake().flush_events(len).await;
} else {
randomly_mutate_tree(&fs, root_dir, 0.6, &mut rng).await;
randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
mutations_len -= 1;
}
@ -3635,7 +3711,7 @@ mod tests {
root_dir,
true,
fs.clone(),
next_entry_id,
Default::default(),
&mut cx.to_async(),
)
.await
@ -3679,7 +3755,67 @@ mod tests {
}
}
async fn randomly_mutate_tree(
fn randomly_mutate_worktree(
worktree: &mut Worktree,
rng: &mut impl Rng,
cx: &mut ModelContext<Worktree>,
) -> Task<Result<()>> {
let worktree = worktree.as_local_mut().unwrap();
let snapshot = worktree.snapshot();
let entry = snapshot.entries(false).choose(rng).unwrap();
match rng.gen_range(0_u32..100) {
0..=33 if entry.path.as_ref() != Path::new("") => {
log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
worktree.delete_entry(entry.id, cx).unwrap()
}
..=66 if entry.path.as_ref() != Path::new("") => {
let other_entry = snapshot.entries(false).choose(rng).unwrap();
let new_parent_path = if other_entry.is_dir() {
other_entry.path.clone()
} else {
other_entry.path.parent().unwrap().into()
};
let mut new_path = new_parent_path.join(gen_name(rng));
if new_path.starts_with(&entry.path) {
new_path = gen_name(rng).into();
}
log::info!(
"renaming entry {:?} ({}) to {:?}",
entry.path,
entry.id.0,
new_path
);
let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
cx.foreground().spawn(async move {
task.await?;
Ok(())
})
}
_ => {
let task = if entry.is_dir() {
let child_path = entry.path.join(gen_name(rng));
let is_dir = rng.gen_bool(0.3);
log::info!(
"creating {} at {:?}",
if is_dir { "dir" } else { "file" },
child_path,
);
worktree.create_entry(child_path, is_dir, cx)
} else {
log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
};
cx.foreground().spawn(async move {
task.await?;
Ok(())
})
}
}
}
async fn randomly_mutate_fs(
fs: &Arc<dyn Fs>,
root_path: &Path,
insertion_probability: f64,
@ -3847,6 +3983,20 @@ mod tests {
impl LocalSnapshot {
fn check_invariants(&self) {
assert_eq!(
self.entries_by_path
.cursor::<()>()
.map(|e| (&e.path, e.id))
.collect::<Vec<_>>(),
self.entries_by_id
.cursor::<()>()
.map(|e| (&e.path, e.id))
.collect::<collections::BTreeSet<_>>()
.into_iter()
.collect::<Vec<_>>(),
"entries_by_path and entries_by_id are inconsistent"
);
let mut files = self.files(true, 0);
let mut visible_files = self.files(false, 0);
for entry in self.entries_by_path.cursor::<()>() {
@ -3857,6 +4007,7 @@ mod tests {
}
}
}
assert!(files.next().is_none());
assert!(visible_files.next().is_none());