mutationstore: copy entries automatically on flush

Summary:
Similar to D7121487 (af8ecd5f80) but works for mutation store. This makes sure at the Rust
layer, mutation entries won't get lost after rebasing or metaeditting a set of
commits where a subset of the commits being edited has mutation relations.

Unlike the Python layer, the Rust layer works for mutation chains. Therefore
some of the tests changes.

Reviewed By: markbt

Differential Revision: D22174991

fbshipit-source-id: d62f7c1071fc71f939ec8771ac5968b992aa253c
This commit is contained in:
Jun Wu 2020-07-02 13:11:57 -07:00 committed by Facebook GitHub Bot
parent 2f1d35b06e
commit 868c2b0108
7 changed files with 198 additions and 33 deletions

View File

@ -141,15 +141,19 @@ def createsyntheticentry(repo, preds, succ, op, splitting=None, user=None, date=
)
def recordentries(repo, entries, skipexisting=True):
def recordentries(repo, entries, skipexisting=True, raw=False):
count = 0
with repo.transaction("record-mutation") as tr:
ms = repo._mutationstore
if raw:
add = ms.addraw
else:
add = ms.add
tr.addfinalize("mutation", lambda _tr: ms.flush())
for entry in entries:
if skipexisting and ms.has(entry.succ()):
continue
ms.add(entry)
add(entry)
count += 1
return count
@ -656,7 +660,7 @@ def toposort(repo, items, nodefn=None):
def unbundle(repo, bundledata):
if enabled(repo):
entries = mutationstore.unbundle(bundledata)
recordentries(repo, entries, skipexisting=True)
recordentries(repo, entries, skipexisting=True, raw=True)
def entriesforbundle(repo, nodes):
@ -811,7 +815,7 @@ def convertfromobsmarkers(repo):
)
with repo.lock():
count = recordentries(repo, entries, skipexisting=False)
count = recordentries(repo, entries, skipexisting=False, raw=True)
return (len(entries), len(newmut), count)

View File

@ -186,6 +186,12 @@ py_class!(class mutationstore |py| {
Ok(PyNone)
}
def addraw(&self, entry: &mutationentry) -> PyResult<PyNone> {
let mut ms = self.mut_store(py).borrow_mut();
ms.add_raw(entry.entry(py)).map_pyerr(py)?;
Ok(PyNone)
}
def flush(&self) -> PyResult<PyNone> {
let mut ms = self.mut_store(py).borrow_mut();
ms.flush().map_pyerr(py)?;

View File

@ -5,6 +5,7 @@ edition = "2018"
[dependencies]
anyhow = "1.0.20"
bitflags = "1"
dag = { path = "../dag" }
indexedlog = { path = "../indexedlog" }
thiserror = "1.0.5"

View File

@ -28,13 +28,17 @@
//! be ignored.
use anyhow::Result;
use bitflags::bitflags;
use dag::namedag::MemNameDag;
use dag::ops::DagAddHeads;
use dag::DagAlgorithm;
use dag::Set;
use dag::VertexName;
use indexedlog::{
log::{self as ilog, IndexDef, IndexOutput, Log},
DefaultOpenOptions,
};
use std::collections::HashMap;
use std::collections::HashSet;
use std::io::Cursor;
use std::path::Path;
@ -46,6 +50,17 @@ pub use indexedlog::Repair;
pub struct MutationStore {
log: Log,
pending: Vec<MutationEntry>,
}
bitflags! {
pub struct DagFlags: u8 {
/// Include successors.
const SUCCESSORS = 0b1;
/// Include predecessors.
const PREDECESSORS = 0b10;
}
}
const INDEX_PRED: usize = 0;
@ -105,10 +120,20 @@ impl DefaultOpenOptions<ilog::OpenOptions> for MutationStore {
impl MutationStore {
pub fn open(path: impl AsRef<Path>) -> Result<MutationStore> {
let log = Self::default_open_options().open(path.as_ref())?;
Ok(MutationStore { log })
let pending = Vec::new();
Ok(MutationStore { log, pending })
}
/// Add an entry. Consider adding automatic entries based on this entry.
/// See `flush` for automatic entries.
pub fn add(&mut self, entry: &MutationEntry) -> Result<()> {
self.add_raw(entry)?;
self.pending.push(entry.clone());
Ok(())
}
/// Add an entry. Do not consider adding automatic entries.
pub fn add_raw(&mut self, entry: &MutationEntry) -> Result<()> {
let mut buf = Vec::with_capacity(types::mutation::DEFAULT_ENTRY_SIZE);
entry.serialize(&mut buf)?;
self.log.append(buf.as_slice())?;
@ -116,7 +141,72 @@ impl MutationStore {
}
pub fn flush(&mut self) -> Result<()> {
// If P -> Q, X -> Y are being added, and there is an existing chain P
// -> ... -> X, add a Q -> Y marker automatically.
// Note: P must not equal to X or Y.
//
// See also D7121487.
// Prepare for calculation.
let mut pred_map = HashMap::with_capacity(self.pending.len()); // node -> index
let mut pred_nodes = Vec::with_capacity(self.pending.len());
for (i, entry) in self.pending.iter().enumerate() {
let pred = entry.preds[0];
pred_map.insert(pred, i);
pred_nodes.push(pred);
}
let pred_set =
Set::from_static_names(pred_nodes.iter().map(|p| VertexName::copy_from(p.as_ref())));
let dag = self.get_dag_advanced(pred_nodes, DagFlags::SUCCESSORS)?;
let mut new_entries = Vec::new();
// Scan through "X"s.
for entry in &self.pending {
let x = entry.preds[0];
// Find all "P"s, as in P -> ... -> X, and X -> Y.
let x_set = VertexName::copy_from(x.as_ref()).into();
let ps = dag.ancestors(x_set)? & pred_set.clone();
for p in ps.iter()? {
let p = Node::from_slice(p?.as_ref())?;
let y = entry.succ;
if p == x || p == y {
continue;
}
let q = self.pending[pred_map[&p]].succ;
if q == x || q == y || q == p {
continue;
}
// Copy P -> X to Q -> Y.
let copy_entry = match self.get(x)? {
Some(entry) => entry,
_ => continue,
};
let op = if copy_entry.op.ends_with("-copy") {
copy_entry.op.clone()
} else {
format!("{}-copy", &copy_entry.op)
};
// The new entry will be the one returned by `get(y)`.
// It overrides the "x -> y" entry.
let new_entry = MutationEntry {
succ: y,
preds: vec![x, q],
op,
..copy_entry
};
new_entries.push(new_entry);
}
}
let mut buf = Vec::with_capacity(types::mutation::DEFAULT_ENTRY_SIZE);
for entry in new_entries {
buf.clear();
entry.serialize(&mut buf)?;
self.log.append(buf.as_slice())?;
}
self.log.flush()?;
self.pending.clear();
Ok(())
}
@ -167,6 +257,12 @@ impl MutationStore {
/// operations like common ancestors, heads, roots, etc. Parents in the
/// graph are predecessors.
pub fn get_dag(&self, nodes: Vec<Node>) -> Result<MemNameDag> {
self.get_dag_advanced(nodes, DagFlags::SUCCESSORS | DagFlags::PREDECESSORS)
}
/// Advanced version of `get_dag`. Specify whether to include successors or
/// predecessors explicitly.
pub fn get_dag_advanced(&self, nodes: Vec<Node>, flags: DagFlags) -> Result<MemNameDag> {
// Include successors recursively.
let mut to_visit = nodes;
let mut connected = HashSet::new();
@ -174,10 +270,13 @@ impl MutationStore {
if !connected.insert(node.clone()) {
continue;
}
if flags.contains(DagFlags::SUCCESSORS) {
for entry in self.log.lookup(INDEX_PRED, &node)? {
let entry = MutationEntry::deserialize(&mut Cursor::new(entry?))?;
to_visit.push(entry.succ);
}
}
if flags.contains(DagFlags::PREDECESSORS) {
for entry in self.log.lookup(INDEX_SUCC, &node)? {
let entry = MutationEntry::deserialize(&mut Cursor::new(entry?))?;
for pred in entry.preds {
@ -185,12 +284,22 @@ impl MutationStore {
}
}
}
let parent_func = |node| -> Result<Vec<VertexName>> {
}
let mut heads = connected
.iter()
.map(|s| VertexName::copy_from(s.as_ref()))
.collect::<Vec<_>>();
let parent_func = move |node| -> Result<Vec<VertexName>> {
let mut result = Vec::new();
for entry in self.log.lookup(INDEX_SUCC, &node)? {
let entry = MutationEntry::deserialize(&mut Cursor::new(entry?))?;
for pred in entry.preds {
result.push(VertexName::copy_from(pred.as_ref()));
if connected.contains(&pred) {
let parent_node = VertexName::copy_from(pred.as_ref());
if parent_node != node && !result.contains(&parent_node) {
result.push(parent_node);
}
}
}
}
Ok(result)
@ -198,10 +307,6 @@ impl MutationStore {
let parent_func = dag::utils::break_parent_func_cycle(parent_func);
let mut dag = MemNameDag::new();
let mut heads = connected
.into_iter()
.map(|s| VertexName::copy_from(s.as_ref()))
.collect::<Vec<_>>();
heads.sort();
dag.add_heads(parent_func, &heads)?;
Ok(dag)
@ -406,6 +511,54 @@ mod tests {
Ok(())
}
#[test]
fn test_copy_entries() -> Result<()> {
let dir = TempDir::new("mutationstore")?;
let mut ms = MutationStore::open(dir.path())?;
for (pred, succ) in [("P", "E"), ("E", "X")].iter() {
add(&mut ms, pred, succ)?;
}
ms.flush()?;
for (pred, succ) in [("P", "Q"), ("X", "Y")].iter() {
add(&mut ms, pred, succ)?;
}
// Before flush, Q -> Y is not connected.
assert_eq!(
render(&ms, "P")?,
r#"
o 5959595959595959595959595959595959595959 (Y)
o 5858585858585858585858585858585858585858 (X)
o 5151515151515151515151515151515151515151 (Q)
o 4545454545454545454545454545454545454545 (E)
o 5050505050505050505050505050505050505050 (P)"#
);
// After flush, Q -> Y is auto created.
ms.flush()?;
assert_eq!(
render(&ms, "P")?,
r#"
o 5959595959595959595959595959595959595959 (Y)
o 5858585858585858585858585858585858585858 (X)
o 5151515151515151515151515151515151515151 (Q)
o 4545454545454545454545454545454545454545 (E)
o 5050505050505050505050505050505050505050 (P)"#
);
Ok(())
}
/// Create a node from a single-char string.
fn n(s: impl ToString) -> Node {
Node::from_slice(s.to_string().repeat(Node::len()).as_bytes()).unwrap()

View File

@ -391,14 +391,13 @@ Test copying obsmarkers
* 5577c14fa08d51a4644b9b4b6e001835594cadd2 amend by test at 1970-01-01T00:00:00 from:
26805aba1e600a82e93661149f2313866a221a7b
* 1be7301b35ae8ac3543a07a5d0ce5ca615be709f metaedit-copy by test at 1970-01-01T00:00:00 from:
* 1be7301b35ae8ac3543a07a5d0ce5ca615be709f amend-copy by test at 1970-01-01T00:00:00 from:
|- 5577c14fa08d51a4644b9b4b6e001835594cadd2 amend by test at 1970-01-01T00:00:00 from:
| 26805aba1e600a82e93661149f2313866a221a7b
'- 19437442f9e42aa92f504afb1a352caa3e6040f5 metaedit by test at 1970-01-01T00:00:00 from:
26805aba1e600a82e93661149f2313866a221a7b
Slightly more complex: with double amends
FIXME: This does not work yet.
$ newrepo autorel1
$ setconfig mutation.proxy-obsstore=off experimental.evolution=obsolete
@ -418,7 +417,7 @@ FIXME: This does not work yet.
|
| o 8:52bc6136aa97@default(draft) D
| |
| o 7:19437442f9e4@default(draft) C
| x 7:19437442f9e4@default(draft) C
|/
o 6:888bb4818188@default(draft) B1
|
@ -426,6 +425,7 @@ FIXME: This does not work yet.
$ hg log -r 'successors(19437442f9e4)-19437442f9e4' -T '{node}\n'
1be7301b35ae8ac3543a07a5d0ce5ca615be709f
$ hg log -r 'precursors(19437442f9e4)-19437442f9e4' -T '{desc} {node}\n' --hidden
C 26805aba1e600a82e93661149f2313866a221a7b
@ -435,9 +435,11 @@ FIXME: This does not work yet.
bf080f2103efc214ac3a4638254d4c5370a9294b amend by test at 1970-01-01T00:00:00 from:
26805aba1e600a82e93661149f2313866a221a7b
* 1be7301b35ae8ac3543a07a5d0ce5ca615be709f metaedit by test at 1970-01-01T00:00:00 from:
5577c14fa08d51a4644b9b4b6e001835594cadd2 amend by test at 1970-01-01T00:00:00 from:
bf080f2103efc214ac3a4638254d4c5370a9294b amend by test at 1970-01-01T00:00:00 from:
* 1be7301b35ae8ac3543a07a5d0ce5ca615be709f amend-copy by test at 1970-01-01T00:00:00 from:
|- 5577c14fa08d51a4644b9b4b6e001835594cadd2 amend by test at 1970-01-01T00:00:00 from:
| bf080f2103efc214ac3a4638254d4c5370a9294b amend by test at 1970-01-01T00:00:00 from:
| 26805aba1e600a82e93661149f2313866a221a7b
'- 19437442f9e42aa92f504afb1a352caa3e6040f5 metaedit by test at 1970-01-01T00:00:00 from:
26805aba1e600a82e93661149f2313866a221a7b
@ -452,7 +454,7 @@ Test empty commit
|
| o 8:52bc6136aa97@default(draft) D
| |
| o 7:19437442f9e4@default(draft) C
| x 7:19437442f9e4@default(draft) C
|/
o 6:888bb4818188@default(draft) B1
|

View File

@ -1126,7 +1126,6 @@ Many splits and folds:
O
Metaedit with descendant amended commits
FIXME: metaedit-copy records are not written
$ cd ..
$ newrepo
@ -1148,11 +1147,11 @@ FIXME: metaedit-copy records are not written
|
| o E
| |
| o C2
| x C2 (Rewritten using rewrite into C4)
|/
| o D
| |
| o C
| x C (Rewritten using amend-copy into C4) (Rewritten using amend-copy into C2)
|/
o B
|
@ -1194,7 +1193,7 @@ Metaedit with descendant folded commits
|
| o D
| |
| x C (Rewritten using metaedit-copy into F)
| x C (Rewritten using fold-copy into F)
|/
o B
|
@ -1223,7 +1222,7 @@ Metaedit automatic rebase of amended commit
|
| o D
| |
| o C
| x C (Rewritten using amend-copy into C2)
|/
o B1
|

View File

@ -16,7 +16,7 @@
$ cp -R ../repo ../repob
FIXME: This does not quite work yet with singletransaction.
C -> C2 relation is copied with singletransaction.
$ hg rebase -s $B -d $Z --config rebase.singletransaction=true
rebasing 112478962961 "B"
@ -28,7 +28,7 @@ FIXME: This does not quite work yet with singletransaction.
|
| o f7f4f5b9173a D
| |
| o e709467ba6ed C
| x e709467ba6ed C
|/
o d74d19e598c8 B
|