working_copy: in LockedWorkingCopy::drop(), discard unsaved changes

In `LockedWorkingCopy::drop()`, we panic if the caller had not called
`finish()`. IIRC, the idea was both to find bugs where we forgot to
call `finish()` and to prevent continuing with a modified
`WorkingCopy` instance. I don't think the former has been a problem in
practice. It has been a problem in practice to call `discard()` to
avoid the panic, though. To address that, we can make the `Drop`
implementation discard the changes (forcing a reload of the state if
the working copy is accessed again).
This commit is contained in:
Martin von Zweigbergk 2023-09-01 09:57:50 -07:00 committed by Martin von Zweigbergk
parent f3a16eb964
commit b8f71a4b30
6 changed files with 5 additions and 22 deletions

View File

@ -1275,7 +1275,6 @@ impl WorkspaceCommandHelper {
(repo, wc_commit)
}
Err(StaleWorkingCopyError::WorkingCopyStale) => {
locked_wc.discard();
return Err(user_error_with_hint(
format!(
"The working copy is stale (not updated since operation {}).",
@ -1287,7 +1286,6 @@ See https://github.com/martinvonz/jj/blob/main/docs/working-copy.md#stale-workin
));
}
Err(StaleWorkingCopyError::SiblingOperation) => {
locked_wc.discard();
return Err(CommandError::InternalError(format!(
"The repo was loaded at operation {}, which seems to be a sibling of the \
working copy's operation {}",
@ -1296,7 +1294,6 @@ See https://github.com/martinvonz/jj/blob/main/docs/working-copy.md#stale-workin
)));
}
Err(StaleWorkingCopyError::UnrelatedOperation) => {
locked_wc.discard();
return Err(CommandError::InternalError(format!(
"The repo was loaded at operation {}, which seems unrelated to the working \
copy's operation {}",

View File

@ -1348,7 +1348,7 @@ fn cmd_untrack(
let wc_tree = store.get_root_tree(&wc_tree_id)?;
let added_back = wc_tree.entries_matching(matcher.as_ref()).collect_vec();
if !added_back.is_empty() {
locked_working_copy.discard();
drop(locked_working_copy);
let path = &added_back[0].0;
let ui_path = workspace_command.format_file_path(path);
let message = if added_back.len() > 1 {
@ -3594,7 +3594,6 @@ fn cmd_workspace_update_stale(
workspace_command.unchecked_start_working_copy_mutation()?;
match check_stale_working_copy(&locked_wc, &desired_wc_commit, &repo) {
Ok(_) => {
locked_wc.discard();
ui.write("Nothing to do (the working copy is not stale).\n")?;
}
Err(_) => {

View File

@ -26,7 +26,6 @@ fn test_snapshot_large_file() {
std::fs::write(repo_path.join("large"), "a lot of text").unwrap();
let stderr = test_env.jj_cmd_failure(&repo_path, &["files"]);
insta::assert_snapshot!(stderr, @r###"
BUG: Working copy lock was dropped without being closed.
Error: Failed to snapshot the working copy: New file $TEST_ENV/repo/large of size ~13.0B exceeds snapshot.max-new-file-size (10.0B)
Hint: Increase the value of the `snapshot.max-new-file-size` config option if you
want this file to be snapshotted. Otherwise add it to your `.gitignore` file.

View File

@ -1524,7 +1524,6 @@ impl WorkingCopy {
// them know.
if let Some(old_tree_id) = old_tree_id {
if *old_tree_id != locked_wc.old_tree_id {
locked_wc.discard();
return Err(CheckoutError::ConcurrentCheckout);
}
}
@ -1626,19 +1625,13 @@ impl LockedWorkingCopy<'_> {
self.closed = true;
Ok(())
}
pub fn discard(mut self) {
// Undo the changes in memory
self.wc.tree_state.take();
self.tree_state_dirty = false;
self.closed = true;
}
}
impl Drop for LockedWorkingCopy<'_> {
fn drop(&mut self) {
if !self.closed && !std::thread::panicking() {
eprintln!("BUG: Working copy lock was dropped without being closed.");
if !self.closed {
// Undo the changes in memory
self.wc.tree_state.take();
}
}
}

View File

@ -407,7 +407,7 @@ fn test_checkout_discard() {
let reloaded_wc = WorkingCopy::load(store.clone(), workspace_root.clone(), state_path.clone());
assert!(reloaded_wc.file_states().unwrap().contains_key(&file1_path));
assert!(!reloaded_wc.file_states().unwrap().contains_key(&file2_path));
locked_wc.discard();
drop(locked_wc);
// The change should remain in the working copy, but not in memory and not saved
assert!(wc.file_states().unwrap().contains_key(&file1_path));
@ -447,7 +447,6 @@ fn test_snapshot_racy_timestamps(use_git: bool) {
let new_tree_id = locked_wc
.snapshot(SnapshotOptions::empty_for_test())
.unwrap();
locked_wc.discard();
assert_ne!(new_tree_id, previous_tree_id);
previous_tree_id = new_tree_id;
}
@ -856,7 +855,6 @@ fn test_fsmonitor() {
let mut locked_wc = wc.start_mutation().unwrap();
let tree_id = snapshot(&mut locked_wc, &[]);
assert_eq!(tree_id, repo.store().empty_merged_tree_id());
locked_wc.discard();
}
{
@ -866,7 +864,6 @@ fn test_fsmonitor() {
tree 205f6b799e7d5c2524468ca006a0131aa57ecce7
file "foo" (257cc5642cb1a054f08cc83f2d943e56fd3ebe99): "foo\n"
"###);
locked_wc.discard();
}
{
@ -895,7 +892,6 @@ fn test_fsmonitor() {
file "foo" (9d053d7c8a18a286dce9b99a59bb058be173b463): "updated foo\n"
file "path/to/nested" (79c53955ef856f16f2107446bc721c8879a1bd2e): "nested\n"
"###);
locked_wc.discard();
}
{

View File

@ -128,7 +128,6 @@ fn test_checkout_parallel() {
let new_tree_id = locked_wc
.snapshot(SnapshotOptions::empty_for_test())
.unwrap();
locked_wc.discard();
assert!(tree_ids.contains(&new_tree_id));
});
}