working_copy: return MergedTreeId on snapshot

This commit is contained in:
Martin von Zweigbergk 2023-08-25 17:24:56 -07:00 committed by Martin von Zweigbergk
parent 88e9933462
commit abf3853717
6 changed files with 29 additions and 23 deletions

View File

@ -1295,7 +1295,7 @@ See https://github.com/martinvonz/jj/blob/main/docs/working-copy.md#stale-workin
max_new_file_size: self.settings.max_new_file_size()?,
})?;
drop(progress);
if new_tree_id != *wc_commit.tree_id() {
if new_tree_id != *wc_commit.merged_tree_id() {
let mut tx = start_repo_transaction(
&self.user_repo.repo,
&self.settings,
@ -1305,7 +1305,7 @@ See https://github.com/martinvonz/jj/blob/main/docs/working-copy.md#stale-workin
let mut_repo = tx.mut_repo();
let commit = mut_repo
.rewrite_commit(&self.settings, &wc_commit)
.set_tree(new_tree_id)
.set_tree_id(new_tree_id)
.write()?;
mut_repo.set_wc_commit(workspace_id, commit.id().clone())?;

View File

@ -1334,12 +1334,14 @@ fn cmd_untrack(
locked_working_copy.reset(&new_tree)?;
// Commit the working copy again so we can inform the user if paths couldn't be
// untracked because they're not ignored.
let wc_tree_id = locked_working_copy.snapshot(SnapshotOptions {
base_ignores,
fsmonitor_kind: command.settings().fsmonitor_kind()?,
progress: None,
max_new_file_size: command.settings().max_new_file_size()?,
})?;
let wc_tree_id = locked_working_copy
.snapshot(SnapshotOptions {
base_ignores,
fsmonitor_kind: command.settings().fsmonitor_kind()?,
progress: None,
max_new_file_size: command.settings().max_new_file_size()?,
})?
.to_legacy_tree_id();
if wc_tree_id != new_tree_id {
let wc_tree = store.get_tree(&RepoPath::root(), &wc_tree_id)?;
let added_back = wc_tree.entries_matching(matcher.as_ref()).collect_vec();

View File

@ -1583,10 +1583,10 @@ impl LockedWorkingCopy<'_> {
// The base_ignores are passed in here rather than being set on the TreeState
// because the TreeState may be long-lived if the library is used in a
// long-lived process.
pub fn snapshot(&mut self, options: SnapshotOptions) -> Result<TreeId, SnapshotError> {
pub fn snapshot(&mut self, options: SnapshotOptions) -> Result<MergedTreeId, SnapshotError> {
let tree_state = self.wc.tree_state_mut()?;
self.tree_state_dirty |= tree_state.snapshot(options)?;
Ok(tree_state.current_tree_id().to_legacy_tree_id())
Ok(tree_state.current_tree_id().clone())
}
pub fn check_out(&mut self, new_tree: &MergedTree) -> Result<CheckoutStats, CheckoutError> {

View File

@ -25,7 +25,7 @@ use std::os::unix::net::UnixListener;
use std::sync::Arc;
use itertools::Itertools;
use jj_lib::backend::{ObjectId, TreeId, TreeValue};
use jj_lib::backend::{MergedTreeId, ObjectId, TreeId, TreeValue};
use jj_lib::fsmonitor::FsmonitorKind;
use jj_lib::merge::Merge;
use jj_lib::merged_tree::MergedTree;
@ -444,7 +444,7 @@ fn test_snapshot_racy_timestamps(use_git: bool) {
let workspace_root = test_workspace.workspace.workspace_root().clone();
let file_path = workspace_root.join("file");
let mut previous_tree_id = repo.store().empty_tree_id().clone();
let mut previous_tree_id = MergedTreeId::Legacy(repo.store().empty_tree_id().clone());
let wc = test_workspace.workspace.working_copy_mut();
for i in 0..100 {
{
@ -496,7 +496,7 @@ fn test_snapshot_special_file() {
.snapshot(SnapshotOptions::empty_for_test())
.unwrap();
locked_wc.finish(OperationId::from_hex("abc123")).unwrap();
let tree = store.get_tree(&RepoPath::root(), &tree_id).unwrap();
let tree = store.get_root_tree(&tree_id).unwrap();
// Only the regular files should be in the tree
assert_eq!(
tree.entries().map(|(path, _value)| path).collect_vec(),
@ -883,14 +883,17 @@ fn test_fsmonitor() {
{
let mut locked_wc = wc.start_mutation().unwrap();
let tree_id = snapshot(&mut locked_wc, &[]);
assert_eq!(tree_id, *repo.store().empty_tree_id());
assert_eq!(
tree_id,
MergedTreeId::Legacy(repo.store().empty_tree_id().clone())
);
locked_wc.discard();
}
{
let mut locked_wc = wc.start_mutation().unwrap();
let tree_id = snapshot(&mut locked_wc, &[&foo_path]);
insta::assert_snapshot!(testutils::dump_tree(repo.store(), &tree_id), @r###"
insta::assert_snapshot!(testutils::dump_tree(repo.store(), tree_id.as_legacy_tree_id()), @r###"
tree 205f6b799e7d5c2524468ca006a0131aa57ecce7
file "foo" (257cc5642cb1a054f08cc83f2d943e56fd3ebe99): "foo\n"
"###);
@ -903,7 +906,7 @@ fn test_fsmonitor() {
&mut locked_wc,
&[&foo_path, &bar_path, &nested_path, &ignored_path],
);
insta::assert_snapshot!(testutils::dump_tree(repo.store(), &tree_id), @r###"
insta::assert_snapshot!(testutils::dump_tree(repo.store(), tree_id.as_legacy_tree_id()), @r###"
tree ab5a0465cc71725a723f28b685844a5bc0f5b599
file "bar" (5716ca5987cbf97d6bb54920bea6adde242d87e6): "bar\n"
file "foo" (257cc5642cb1a054f08cc83f2d943e56fd3ebe99): "foo\n"
@ -917,7 +920,7 @@ fn test_fsmonitor() {
testutils::write_working_copy_file(&workspace_root, &bar_path, "updated bar\n");
let mut locked_wc = wc.start_mutation().unwrap();
let tree_id = snapshot(&mut locked_wc, &[&foo_path]);
insta::assert_snapshot!(testutils::dump_tree(repo.store(), &tree_id), @r###"
insta::assert_snapshot!(testutils::dump_tree(repo.store(), tree_id.as_legacy_tree_id()), @r###"
tree 2f57ab8f48ae62e3137079f2add9878dfa1d1bcc
file "bar" (5716ca5987cbf97d6bb54920bea6adde242d87e6): "bar\n"
file "foo" (9d053d7c8a18a286dce9b99a59bb058be173b463): "updated foo\n"
@ -930,7 +933,7 @@ fn test_fsmonitor() {
std::fs::remove_file(foo_path.to_fs_path(&workspace_root)).unwrap();
let mut locked_wc = wc.start_mutation().unwrap();
let tree_id = snapshot(&mut locked_wc, &[&foo_path]);
insta::assert_snapshot!(testutils::dump_tree(repo.store(), &tree_id), @r###"
insta::assert_snapshot!(testutils::dump_tree(repo.store(), tree_id.as_legacy_tree_id()), @r###"
tree 34b83765131477e1a7d72160079daec12c6144e3
file "bar" (5716ca5987cbf97d6bb54920bea6adde242d87e6): "bar\n"
file "path/to/nested" (79c53955ef856f16f2107446bc721c8879a1bd2e): "nested\n"

View File

@ -16,6 +16,7 @@ use std::cmp::max;
use std::thread;
use assert_matches::assert_matches;
use jj_lib::backend::MergedTreeId;
use jj_lib::merged_tree::MergedTree;
use jj_lib::repo::{Repo, StoreFactories};
use jj_lib::repo_path::RepoPath;
@ -104,7 +105,7 @@ fn test_checkout_parallel() {
for i in 0..num_threads {
let path = RepoPath::from_internal_string(format!("file{i}").as_str());
let tree = testutils::create_tree(repo, &[(&path, "contents")]);
tree_ids.push(tree.id().clone());
tree_ids.push(MergedTreeId::Legacy(tree.id().clone()));
}
// Create another tree just so we can test the update stats reliably from the
@ -133,12 +134,12 @@ fn test_checkout_parallel() {
let tree = workspace
.repo_loader()
.store()
.get_tree(&RepoPath::root(), &tree_id)
.get_root_tree(&tree_id)
.unwrap();
// The operation ID is not correct, but that doesn't matter for this test
let stats = workspace
.working_copy_mut()
.check_out(op_id, None, &MergedTree::legacy(tree))
.check_out(op_id, None, &tree)
.unwrap();
assert_eq!(stats.updated_files, 0);
assert_eq!(stats.added_files, 1);

View File

@ -170,13 +170,13 @@ impl TestWorkspace {
let tree_id = locked_wc.snapshot(SnapshotOptions {
max_new_file_size: self.settings.max_new_file_size().unwrap(),
..SnapshotOptions::empty_for_test()
});
})?;
// arbitrary operation id
locked_wc.finish(self.repo.op_id().clone()).unwrap();
Ok(self
.repo
.store()
.get_tree(&RepoPath::root(), &tree_id?)
.get_tree(&RepoPath::root(), &tree_id.to_legacy_tree_id())
.unwrap())
}
}