mononoke (+ rust/shed/async_unit): update async_unit to expect async fn's

Summary:
This allows code that is being exercised under async_unit to call into code
that expects a Tokio 0.2 environment (e.g. 0.2 timers).

Unfortunately, this requires turning off LSAN for the async_unit tests, since
it looks like LSAN and Tokio 0.2 don't work very well together, resulting in
LSAN reporting leaked memory for some TLS structures that were initialized by
tokio-preview (regardless of whether the Runtime is being dropped):
https://fb.workplace.com/groups/rust.language/permalink/3249964938385432/

Considering async_unit is effectively only used in Mononoke, and Mononoke
already turns off LSAN in tests for precisely this reason ... it's probably
reasonable to do the same here.

The main body of changes here is also about updating the majority of our
changes to stop calling wait(), and use this new async unit everywhere. This is
effectively a pretty big batch conversion of all of our tests to use async fns
instead of the former approaches. I've also updated a substantial number of
utility functions to be async fns.

A few notable changes here:

- Some pushrebase tests were pretty flaky — the race they look for isn't
  deterministic. I added some actual waiting (using pushrebase hooks) to make
  it more deterministic.  This is kinda copy pasted from the globalrev hook
  (where I had introduced this first), but this will do for now.
- The multiplexblob tests don't work at all with new futures, because they call
  `poll()` all over the place. I've updated them to new futures, which required
  a bit of reworking.
- I took out a couple tests in async unit that were broken anyway.

Reviewed By: StanislavGlebik

Differential Revision: D19902539

fbshipit-source-id: 352b4a531ef5fa855114c1dd8bb4d70ed967dd55
This commit is contained in:
Thomas Orozco 2020-02-18 01:52:44 -08:00 committed by Facebook Github Bot
parent f7941272c1
commit 16384599a8
53 changed files with 6543 additions and 5902 deletions

View File

@ -54,17 +54,24 @@ use tokio_compat::runtime::Runtime;
use tracing_blobstore::TracingBlobstore;
use utils::{
create_changeset_no_parents, create_changeset_one_parent, get_empty_eager_repo,
get_empty_lazy_repo, run_future, string_to_nodehash, to_mpath, upload_file_no_parents,
get_empty_lazy_repo, string_to_nodehash, to_mpath, upload_file_no_parents,
upload_file_one_parent, upload_manifest_no_parents, upload_manifest_one_parent,
};
fn get_content(ctx: CoreContext, repo: &BlobRepo, id: HgFileNodeId) -> Result<bytes::Bytes, Error> {
let content_id = run_future(id.load(ctx.clone(), repo.blobstore()))?.content_id();
async fn get_content(
ctx: CoreContext,
repo: &BlobRepo,
id: HgFileNodeId,
) -> Result<bytes::Bytes, Error> {
let content_id = (id.load(ctx.clone(), repo.blobstore()))
.compat()
.await?
.content_id();
let content = filestore::fetch_stream(repo.blobstore(), ctx.clone(), content_id).concat2();
run_future(content)
(content).compat().await
}
fn upload_blob_no_parents(fb: FacebookInit, repo: BlobRepo) {
async fn upload_blob_no_parents(fb: FacebookInit, repo: BlobRepo) {
let ctx = CoreContext::test_mock(fb);
let expected_hash = HgFileNodeId::new(string_to_nodehash(
"c3127cdbf2eae0f09653f9237d85c8436425b246",
@ -72,14 +79,16 @@ fn upload_blob_no_parents(fb: FacebookInit, repo: BlobRepo) {
let fake_path = RepoPath::file("fake/file").expect("Can't generate fake RepoPath");
// The blob does not exist...
assert!(get_content(ctx.clone(), &repo, expected_hash).is_err());
assert!(get_content(ctx.clone(), &repo, expected_hash)
.await
.is_err());
// We upload it...
let (hash, future) = upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_path);
assert!(hash == expected_hash);
// The entry we're given is correct...
let (entry, path) = run_future(future).unwrap();
let (entry, path) = future.compat().await.unwrap();
assert!(path == fake_path);
assert!(HgFileNodeId::new(entry.get_hash().into_nodehash()) == expected_hash);
assert!(entry.get_type() == manifest::Type::File(FileType::Regular));
@ -87,16 +96,16 @@ fn upload_blob_no_parents(fb: FacebookInit, repo: BlobRepo) {
entry.get_name() == Some(&MPathElement::new("file".into()).expect("valid MPathElement"))
);
let content = run_future(entry.get_content(ctx.clone())).unwrap();
let content = entry.get_content(ctx.clone()).compat().await.unwrap();
let stream = match content {
manifest::Content::File(stream) => stream,
_ => panic!(),
};
let bytes = run_future(stream.concat2()).unwrap();
let bytes = stream.concat2().compat().await.unwrap();
assert_eq!(bytes.into_bytes().as_ref(), &b"blob"[..]);
// And the blob now exists
let bytes = get_content(ctx, &repo, expected_hash).unwrap();
let bytes = get_content(ctx, &repo, expected_hash).await.unwrap();
assert!(bytes.as_ref() == &b"blob"[..]);
}
@ -106,7 +115,7 @@ test_both_repotypes!(
upload_blob_no_parents_eager
);
fn upload_blob_one_parent(fb: FacebookInit, repo: BlobRepo) {
async fn upload_blob_one_parent(fb: FacebookInit, repo: BlobRepo) {
let ctx = CoreContext::test_mock(fb);
let expected_hash = HgFileNodeId::new(string_to_nodehash(
"c2d60b35a8e7e034042a9467783bbdac88a0d219",
@ -116,14 +125,21 @@ fn upload_blob_one_parent(fb: FacebookInit, repo: BlobRepo) {
let (p1, future) = upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_path);
// The blob does not exist...
let _ = get_content(ctx.clone(), &repo, expected_hash).unwrap_err();
let _ = get_content(ctx.clone(), &repo, expected_hash)
.await
.unwrap_err();
// We upload it...
let (hash, future2) = upload_file_one_parent(ctx.clone(), &repo, "blob", &fake_path, p1);
assert!(hash == expected_hash);
// The entry we're given is correct...
let (entry, path) = run_future(future2.join(future).map(|(item, _)| item)).unwrap();
let (entry, path) = future2
.join(future)
.map(|(item, _)| item)
.compat()
.await
.unwrap();
assert!(path == fake_path);
assert!(HgFileNodeId::new(entry.get_hash().into_nodehash()) == expected_hash);
@ -132,16 +148,18 @@ fn upload_blob_one_parent(fb: FacebookInit, repo: BlobRepo) {
entry.get_name() == Some(&MPathElement::new("file".into()).expect("valid MPathElement"))
);
let content = run_future(entry.get_content(ctx.clone())).unwrap();
let content = entry.get_content(ctx.clone()).compat().await.unwrap();
let stream = match content {
manifest::Content::File(stream) => stream,
_ => panic!(),
};
let bytes = run_future(stream.concat2()).unwrap();
let bytes = stream.concat2().compat().await.unwrap();
assert_eq!(bytes.into_bytes().as_ref(), &b"blob"[..]);
// And the blob now exists
let bytes = get_content(ctx.clone(), &repo, expected_hash).unwrap();
let bytes = get_content(ctx.clone(), &repo, expected_hash)
.await
.unwrap();
assert!(bytes.as_ref() == &b"blob"[..]);
}
@ -151,7 +169,7 @@ test_both_repotypes!(
upload_blob_one_parent_eager
);
fn create_one_changeset(fb: FacebookInit, repo: BlobRepo) {
async fn create_one_changeset(fb: FacebookInit, repo: BlobRepo) {
let ctx = CoreContext::test_mock(fb);
let fake_file_path = RepoPath::file("dir/file").expect("Can't generate fake RepoPath");
let fake_dir_path = RepoPath::dir("dir").expect("Can't generate fake RepoPath");
@ -186,7 +204,7 @@ fn create_one_changeset(fb: FacebookInit, repo: BlobRepo) {
vec![file_future, manifest_dir_future],
);
let bonsai_hg = run_future(commit.get_completed_changeset()).unwrap();
let bonsai_hg = commit.get_completed_changeset().compat().await.unwrap();
let cs = &bonsai_hg.1;
assert!(cs.manifestid() == HgManifestId::new(roothash));
assert!(cs.user() == author.as_bytes());
@ -198,7 +216,7 @@ fn create_one_changeset(fb: FacebookInit, repo: BlobRepo) {
);
// And check the file blob is present
let bytes = get_content(ctx.clone(), &repo, filehash).unwrap();
let bytes = get_content(ctx.clone(), &repo, filehash).await.unwrap();
assert!(bytes.as_ref() == &b"blob"[..]);
}
@ -208,7 +226,7 @@ test_both_repotypes!(
create_one_changeset_eager
);
fn create_two_changesets(fb: FacebookInit, repo: BlobRepo) {
async fn create_two_changesets(fb: FacebookInit, repo: BlobRepo) {
let ctx = CoreContext::test_mock(fb);
let fake_file_path = RepoPath::file("dir/file").expect("Can't generate fake RepoPath");
let fake_dir_path = RepoPath::dir("dir").expect("Can't generate fake RepoPath");
@ -257,11 +275,11 @@ fn create_two_changesets(fb: FacebookInit, repo: BlobRepo) {
commit1.clone(),
);
let (commit1, commit2) = run_future(
commit1
.get_completed_changeset()
.join(commit2.get_completed_changeset()),
)
let (commit1, commit2) = (commit1
.get_completed_changeset()
.join(commit2.get_completed_changeset()))
.compat()
.await
.unwrap();
let commit1 = &commit1.1;
@ -287,7 +305,7 @@ test_both_repotypes!(
create_two_changesets_eager
);
fn check_bonsai_creation(fb: FacebookInit, repo: BlobRepo) {
async fn check_bonsai_creation(fb: FacebookInit, repo: BlobRepo) {
let ctx = CoreContext::test_mock(fb);
let fake_file_path = RepoPath::file("dir/file").expect("Can't generate fake RepoPath");
let fake_dir_path = RepoPath::dir("dir").expect("Can't generate fake RepoPath");
@ -316,12 +334,20 @@ fn check_bonsai_creation(fb: FacebookInit, repo: BlobRepo) {
vec![file_future, manifest_dir_future],
);
let commit = run_future(commit.get_completed_changeset()).unwrap();
let commit = commit.get_completed_changeset().compat().await.unwrap();
let commit = &commit.1;
let bonsai_cs_id =
run_future(repo.get_bonsai_from_hg(ctx.clone(), commit.get_changeset_id())).unwrap();
let bonsai_cs_id = repo
.get_bonsai_from_hg(ctx.clone(), commit.get_changeset_id())
.compat()
.await
.unwrap();
assert!(bonsai_cs_id.is_some());
let bonsai = run_future(bonsai_cs_id.unwrap().load(ctx.clone(), repo.blobstore())).unwrap();
let bonsai = bonsai_cs_id
.unwrap()
.load(ctx.clone(), repo.blobstore())
.compat()
.await
.unwrap();
assert_eq!(
bonsai
.file_changes()
@ -337,7 +363,7 @@ test_both_repotypes!(
check_bonsai_creation_eager
);
fn check_bonsai_creation_with_rename(fb: FacebookInit, repo: BlobRepo) {
async fn check_bonsai_creation_with_rename(fb: FacebookInit, repo: BlobRepo) {
let ctx = CoreContext::test_mock(fb);
let parent = {
let fake_file_path = RepoPath::file("file").expect("Can't generate fake RepoPath");
@ -387,19 +413,29 @@ fn check_bonsai_creation_with_rename(fb: FacebookInit, repo: BlobRepo) {
)
};
let parent_cs = run_future(parent.get_completed_changeset()).unwrap();
let parent_cs = parent.get_completed_changeset().compat().await.unwrap();
let parent_cs = &parent_cs.1;
let child_cs = run_future(child.get_completed_changeset()).unwrap();
let child_cs = child.get_completed_changeset().compat().await.unwrap();
let child_cs = &child_cs.1;
let parent_bonsai_cs_id =
run_future(repo.get_bonsai_from_hg(ctx.clone(), parent_cs.get_changeset_id()))
.unwrap()
.unwrap();
let parent_bonsai_cs_id = repo
.get_bonsai_from_hg(ctx.clone(), parent_cs.get_changeset_id())
.compat()
.await
.unwrap()
.unwrap();
let bonsai_cs_id =
run_future(repo.get_bonsai_from_hg(ctx.clone(), child_cs.get_changeset_id())).unwrap();
let bonsai = run_future(bonsai_cs_id.unwrap().load(ctx.clone(), repo.blobstore())).unwrap();
let bonsai_cs_id = repo
.get_bonsai_from_hg(ctx.clone(), child_cs.get_changeset_id())
.compat()
.await
.unwrap();
let bonsai = bonsai_cs_id
.unwrap()
.load(ctx.clone(), repo.blobstore())
.compat()
.await
.unwrap();
let fc = bonsai.file_changes().collect::<BTreeMap<_, _>>();
let file = MPath::new("file").unwrap();
assert!(!fc[&file].is_some());
@ -417,7 +453,7 @@ test_both_repotypes!(
check_bonsai_creation_with_rename_eager
);
fn create_bad_changeset(fb: FacebookInit, repo: BlobRepo) {
async fn create_bad_changeset(fb: FacebookInit, repo: BlobRepo) {
let ctx = CoreContext::test_mock(fb);
let dirhash = string_to_nodehash("c2d60b35a8e7e034042a9467783bbdac88a0d219");
@ -431,7 +467,7 @@ fn create_bad_changeset(fb: FacebookInit, repo: BlobRepo) {
let commit =
create_changeset_no_parents(fb, &repo, root_manifest_future.map(Some).boxify(), vec![]);
run_future(commit.get_completed_changeset()).unwrap();
commit.get_completed_changeset().compat().await.unwrap();
}
test_both_repotypes!(
@ -441,7 +477,7 @@ test_both_repotypes!(
create_bad_changeset_eager
);
fn upload_entries_finalize_success(fb: FacebookInit, repo: BlobRepo) {
async fn upload_entries_finalize_success(fb: FacebookInit, repo: BlobRepo) {
let ctx = CoreContext::test_mock(fb);
let fake_file_path = RepoPath::file("file").expect("Can't generate fake RepoPath");
@ -456,16 +492,25 @@ fn upload_entries_finalize_success(fb: FacebookInit, repo: BlobRepo) {
&RepoPath::root(),
);
let (file_blob, _) = run_future(file_future).unwrap();
let (root_mf_blob, _) = run_future(root_manifest_future).unwrap();
let (file_blob, _) = file_future.compat().await.unwrap();
let (root_mf_blob, _) = root_manifest_future.compat().await.unwrap();
let entries = UploadEntries::new(repo.get_blobstore(), ScubaSampleBuilder::with_discard());
run_future(entries.process_root_manifest(ctx.clone(), &root_mf_blob)).unwrap();
(entries.process_root_manifest(ctx.clone(), &root_mf_blob))
.compat()
.await
.unwrap();
run_future(entries.process_one_entry(ctx.clone(), &file_blob, fake_file_path)).unwrap();
(entries.process_one_entry(ctx.clone(), &file_blob, fake_file_path))
.compat()
.await
.unwrap();
run_future(entries.finalize(ctx.clone(), HgManifestId::new(roothash), vec![])).unwrap();
(entries.finalize(ctx.clone(), HgManifestId::new(roothash), vec![]))
.compat()
.await
.unwrap();
}
test_both_repotypes!(
@ -474,7 +519,7 @@ test_both_repotypes!(
upload_entries_finalize_success_eager
);
fn upload_entries_finalize_fail(fb: FacebookInit, repo: BlobRepo) {
async fn upload_entries_finalize_fail(fb: FacebookInit, repo: BlobRepo) {
let entries = UploadEntries::new(repo.get_blobstore(), ScubaSampleBuilder::with_discard());
let ctx = CoreContext::test_mock(fb);
@ -486,15 +531,20 @@ fn upload_entries_finalize_fail(fb: FacebookInit, repo: BlobRepo) {
format!("dir\0{}t\n", dirhash),
&RepoPath::root(),
);
let (root_mf_blob, _) = run_future(root_manifest_future).unwrap();
let (root_mf_blob, _) = (root_manifest_future).compat().await.unwrap();
run_future(entries.process_root_manifest(ctx.clone(), &root_mf_blob)).unwrap();
(entries.process_root_manifest(ctx.clone(), &root_mf_blob))
.compat()
.await
.unwrap();
let res = run_future(entries.finalize(
let res = (entries.finalize(
ctx.clone(),
HgManifestId::new(root_mf_blob.get_hash().into_nodehash()),
vec![],
));
))
.compat()
.await;
assert!(res.is_err());
}
@ -507,9 +557,9 @@ test_both_repotypes!(
#[fbinit::test]
fn test_compute_changed_files_no_parents(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = many_files_dirs::getrepo(fb);
let repo = many_files_dirs::getrepo(fb).await;
let nodehash = string_to_nodehash("051946ed218061e925fb120dac02634f9ad40ae2");
let expected = vec![
MPath::new(b"1").unwrap(),
@ -518,17 +568,15 @@ fn test_compute_changed_files_no_parents(fb: FacebookInit) {
MPath::new(b"dir2/file_1_in_dir2").unwrap(),
];
let cs =
run_future(HgChangesetId::new(nodehash).load(ctx.clone(), repo.blobstore())).unwrap();
let cs = (HgChangesetId::new(nodehash).load(ctx.clone(), repo.blobstore()))
.compat()
.await
.unwrap();
let diff = run_future(compute_changed_files(
ctx.clone(),
repo.clone(),
cs.manifestid(),
None,
None,
))
.unwrap();
let diff = (compute_changed_files(ctx.clone(), repo.clone(), cs.manifestid(), None, None))
.compat()
.await
.unwrap();
assert!(
diff == expected,
"Got {:?}, expected {:?}\n",
@ -540,12 +588,12 @@ fn test_compute_changed_files_no_parents(fb: FacebookInit) {
#[fbinit::test]
fn test_compute_changed_files_one_parent(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
// Note that this is a commit and its parent commit, so you can use:
// hg log -T"{node}\n{files % ' MPath::new(b\"{file}\").unwrap(),\\n'}\\n" -r $HASH
// to see how Mercurial would compute the files list and confirm that it's the same
let repo = many_files_dirs::getrepo(fb);
let repo = many_files_dirs::getrepo(fb).await;
let nodehash = string_to_nodehash("051946ed218061e925fb120dac02634f9ad40ae2");
let parenthash = string_to_nodehash("d261bc7900818dea7c86935b3fb17a33b2e3a6b4");
let expected = vec![
@ -558,19 +606,25 @@ fn test_compute_changed_files_one_parent(fb: FacebookInit) {
MPath::new(b"dir1/subdir1/subsubdir2/file_2").unwrap(),
];
let cs =
run_future(HgChangesetId::new(nodehash).load(ctx.clone(), repo.blobstore())).unwrap();
let cs = (HgChangesetId::new(nodehash).load(ctx.clone(), repo.blobstore()))
.compat()
.await
.unwrap();
let parent_cs =
run_future(HgChangesetId::new(parenthash).load(ctx.clone(), repo.blobstore())).unwrap();
let parent_cs = (HgChangesetId::new(parenthash).load(ctx.clone(), repo.blobstore()))
.compat()
.await
.unwrap();
let diff = run_future(compute_changed_files(
let diff = (compute_changed_files(
ctx.clone(),
repo.clone(),
cs.manifestid(),
Some(parent_cs.manifestid()),
None,
))
.compat()
.await
.unwrap();
assert!(
diff == expected,
@ -618,20 +672,9 @@ fn make_file_change(
#[fbinit::test]
fn test_get_manifest_from_bonsai(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = merge_uneven::getrepo(fb);
let get_manifest_for_changeset = {
cloned!(ctx, repo);
move |cs_nodehash: &str| -> HgManifestId {
run_future(
HgChangesetId::new(string_to_nodehash(cs_nodehash))
.load(ctx.clone(), repo.blobstore()),
)
.unwrap()
.manifestid()
}
};
let repo = merge_uneven::getrepo(fb).await;
let get_entries = {
cloned!(ctx, repo);
move |ms_hash: HgManifestId| -> BoxFuture<HashMap<String, Box<dyn HgEntry + Sync>>, Error> {
@ -657,20 +700,36 @@ fn test_get_manifest_from_bonsai(fb: FacebookInit) {
// 5: 5
// base: branch1
// branch: 4
let ms1 = get_manifest_for_changeset("264f01429683b3dd8042cb3979e8bf37007118bc");
let ms1 = HgChangesetId::new(string_to_nodehash(
"264f01429683b3dd8042cb3979e8bf37007118bc",
))
.load(ctx.clone(), repo.blobstore())
.compat()
.await
.unwrap()
.manifestid();
// #CONTENT
// base: base
// branch: 4
let ms2 = get_manifest_for_changeset("16839021e338500b3cf7c9b871c8a07351697d68");
let ms2 = HgChangesetId::new(string_to_nodehash(
"16839021e338500b3cf7c9b871c8a07351697d68",
))
.load(ctx.clone(), repo.blobstore())
.compat()
.await
.unwrap()
.manifestid();
// fails with conflict
{
let ms_hash = run_future(repo.get_manifest_from_bonsai(
let ms_hash = (repo.get_manifest_from_bonsai(
ctx.clone(),
make_bonsai_changeset(None, None, vec![]),
vec![ms1, ms2],
));
))
.compat()
.await;
assert!(ms_hash
.expect_err("should have failed")
.to_string()
@ -679,13 +738,15 @@ fn test_get_manifest_from_bonsai(fb: FacebookInit) {
// resolves same content different parents for `branch` file
{
let ms_hash = run_future(repo.get_manifest_from_bonsai(
let ms_hash = (repo.get_manifest_from_bonsai(
ctx.clone(),
make_bonsai_changeset(None, None, vec![("base", None)]),
vec![ms1, ms2],
))
.compat()
.await
.expect("merge should have succeeded");
let entries = run_future(get_entries(ms_hash)).unwrap();
let entries = (get_entries(ms_hash)).compat().await.unwrap();
assert!(entries.get("1").is_some());
assert!(entries.get("2").is_some());
@ -695,8 +756,10 @@ fn test_get_manifest_from_bonsai(fb: FacebookInit) {
assert!(entries.get("base").is_none());
// check trivial merge parents
let (ms1_entries, ms2_entries) =
run_future(get_entries(ms1).join(get_entries(ms2))).unwrap();
let (ms1_entries, ms2_entries) = (get_entries(ms1).join(get_entries(ms2)))
.compat()
.await
.unwrap();
let mut br_expected_parents = HashSet::new();
br_expected_parents.insert(
ms1_entries
@ -714,7 +777,9 @@ fn test_get_manifest_from_bonsai(fb: FacebookInit) {
);
let br = entries.get("branch").expect("trivial merge should succeed");
let br_parents = run_future(br.get_parents(ctx.clone()))
let br_parents = (br.get_parents(ctx.clone()))
.compat()
.await
.unwrap()
.into_iter()
.collect::<HashSet<_>>();
@ -724,21 +789,25 @@ fn test_get_manifest_from_bonsai(fb: FacebookInit) {
// add file
{
let content_expected = &b"some awesome content"[..];
let fc = run_future(make_file_change(ctx.clone(), content_expected, &repo)).unwrap();
let fc = (make_file_change(ctx.clone(), content_expected, &repo))
.compat()
.await
.unwrap();
let bcs = make_bonsai_changeset(None, None, vec![("base", None), ("new", Some(fc))]);
let ms_hash =
run_future(repo.get_manifest_from_bonsai(ctx.clone(), bcs, vec![ms1, ms2]))
.expect("adding new file should not produce coflict");
let entries = run_future(get_entries(ms_hash)).unwrap();
let ms_hash = (repo.get_manifest_from_bonsai(ctx.clone(), bcs, vec![ms1, ms2]))
.compat()
.await
.expect("adding new file should not produce coflict");
let entries = (get_entries(ms_hash)).compat().await.unwrap();
let new = entries.get("new").expect("new file should be in entries");
let stream = match run_future(new.get_content(ctx.clone())).unwrap() {
let stream = match (new.get_content(ctx.clone())).compat().await.unwrap() {
manifest::Content::File(stream) => stream,
_ => panic!("content type mismatch"),
};
let bytes = run_future(stream.concat2()).unwrap();
let bytes = (stream.concat2()).compat().await.unwrap();
assert_eq!(bytes.into_bytes().as_ref(), content_expected.as_ref());
let new_parents = run_future(new.get_parents(ctx.clone())).unwrap();
let new_parents = (new.get_parents(ctx.clone())).compat().await.unwrap();
assert_eq!(new_parents, HgParents::None);
}
});
@ -746,21 +815,22 @@ fn test_get_manifest_from_bonsai(fb: FacebookInit) {
#[fbinit::test]
fn test_case_conflict_in_manifest(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = many_files_dirs::getrepo(fb);
let get_manifest_for_changeset = |cs_id: HgChangesetId| -> HgManifestId {
run_future(cs_id.load(ctx.clone(), repo.blobstore()))
.unwrap()
.manifestid()
};
let repo = many_files_dirs::getrepo(fb).await;
let hg_cs = HgChangesetId::new(string_to_nodehash(
"2f866e7e549760934e31bf0420a873f65100ad63",
));
let mf = get_manifest_for_changeset(hg_cs);
let bonsai_parent = run_future(repo.get_bonsai_from_hg(ctx.clone(), hg_cs))
let mf = (hg_cs.load(ctx.clone(), repo.blobstore()))
.compat()
.await
.unwrap()
.manifestid();
let bonsai_parent = (repo.get_bonsai_from_hg(ctx.clone(), hg_cs))
.compat()
.await
.unwrap()
.unwrap();
@ -778,19 +848,29 @@ fn test_case_conflict_in_manifest(fb: FacebookInit) {
ctx.clone(),
btreemap! {*path => Some("caseconflicttest")},
repo.clone(),
),
);
)
.await,
)
.await;
let child_hg_cs =
run_future(repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id.clone())).unwrap();
let child_mf = get_manifest_for_changeset(child_hg_cs);
let child_hg_cs = (repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id.clone()))
.compat()
.await
.unwrap();
let child_mf = (child_hg_cs.load(ctx.clone(), repo.blobstore()))
.compat()
.await
.unwrap()
.manifestid();
assert_eq!(
run_future(repo.check_case_conflict_in_manifest(
(repo.check_case_conflict_in_manifest(
ctx.clone(),
mf,
child_mf,
MPath::new(path).unwrap()
))
.compat()
.await
.unwrap(),
*result,
"{} expected to {} cause conflict",
@ -803,7 +883,7 @@ fn test_case_conflict_in_manifest(fb: FacebookInit) {
#[fbinit::test]
fn test_case_conflict_two_changeset(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = get_empty_lazy_repo();
@ -845,18 +925,18 @@ fn test_case_conflict_two_changeset(fb: FacebookInit) {
)
};
assert!(run_future(
commit1
.get_completed_changeset()
.join(commit2.get_completed_changeset()),
)
.is_err());
assert!(commit1
.get_completed_changeset()
.join(commit2.get_completed_changeset())
.compat()
.await
.is_err());
});
}
#[fbinit::test]
fn test_case_conflict_inside_one_changeset(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = get_empty_lazy_repo();
let fake_file_path_1 = RepoPath::file("file").expect("Can't generate fake RepoPath");
@ -881,13 +961,13 @@ fn test_case_conflict_inside_one_changeset(fb: FacebookInit) {
vec![file_future_1, file_future_2],
);
assert!(run_future(commit1.get_completed_changeset()).is_err());
assert!((commit1.get_completed_changeset()).compat().await.is_err());
});
}
#[fbinit::test]
fn test_no_case_conflict_removal(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = get_empty_lazy_repo();
@ -929,18 +1009,18 @@ fn test_no_case_conflict_removal(fb: FacebookInit) {
)
};
assert!(run_future(
commit1
.get_completed_changeset()
.join(commit2.get_completed_changeset()),
)
assert!((commit1
.get_completed_changeset()
.join(commit2.get_completed_changeset()))
.compat()
.await
.is_ok());
});
}
#[fbinit::test]
fn test_no_case_conflict_removal_dir(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = get_empty_lazy_repo();
@ -1001,24 +1081,25 @@ fn test_no_case_conflict_removal_dir(fb: FacebookInit) {
)
};
assert!(run_future(
commit1
.get_completed_changeset()
.join(commit2.get_completed_changeset()),
)
assert!((commit1
.get_completed_changeset()
.join(commit2.get_completed_changeset()))
.compat()
.await
.is_ok());
});
}
#[fbinit::test]
fn test_hg_commit_generation_simple(fb: FacebookInit) {
let repo = fixtures::linear::getrepo(fb);
let mut runtime = tokio_compat::runtime::Runtime::new().unwrap();
let repo = runtime.block_on_std(fixtures::linear::getrepo(fb));
let bcs = create_bonsai_changeset(vec![]);
let bcs_id = bcs.get_changeset_id();
let ctx = CoreContext::test_mock(fb);
let mut runtime = tokio_compat::runtime::Runtime::new().unwrap();
runtime
.block_on(blobrepo::save_bonsai_changesets(
vec![bcs],
@ -1034,7 +1115,9 @@ fn test_hg_commit_generation_simple(fb: FacebookInit) {
#[fbinit::test]
fn test_hg_commit_generation_stack(fb: FacebookInit) {
let repo = fixtures::linear::getrepo(fb);
let mut runtime = tokio_compat::runtime::Runtime::new().unwrap();
let repo = runtime.block_on_std(fixtures::linear::getrepo(fb));
let mut changesets = vec![];
let bcs = create_bonsai_changeset(vec![]);
@ -1051,7 +1134,6 @@ fn test_hg_commit_generation_stack(fb: FacebookInit) {
let top_of_stack = changesets.last().unwrap().clone().get_changeset_id();
let ctx = CoreContext::test_mock(fb);
let mut runtime = tokio_compat::runtime::Runtime::new().unwrap();
runtime
.block_on(blobrepo::save_bonsai_changesets(
changesets,
@ -1069,7 +1151,7 @@ fn test_hg_commit_generation_stack(fb: FacebookInit) {
fn test_hg_commit_generation_one_after_another(fb: FacebookInit) {
let ctx = CoreContext::test_mock(fb);
let mut runtime = tokio_compat::runtime::Runtime::new().unwrap();
let repo = fixtures::linear::getrepo(fb);
let repo = runtime.block_on_std(fixtures::linear::getrepo(fb));
let first_bcs = create_bonsai_changeset(vec![]);
let first_bcs_id = first_bcs.get_changeset_id();
@ -1100,7 +1182,7 @@ fn test_hg_commit_generation_one_after_another(fb: FacebookInit) {
fn test_hg_commit_generation_diamond(fb: FacebookInit) {
let ctx = CoreContext::test_mock(fb);
let mut runtime = tokio_compat::runtime::Runtime::new().unwrap();
let repo = fixtures::linear::getrepo(fb);
let repo = runtime.block_on_std(fixtures::linear::getrepo(fb));
let last_bcs_id = runtime
.block_on(fixtures::save_diamond_commits(
@ -1120,7 +1202,7 @@ fn test_hg_commit_generation_diamond(fb: FacebookInit) {
fn test_hg_commit_generation_many_diamond(fb: FacebookInit) {
let ctx = CoreContext::test_mock(fb);
let mut runtime = tokio_compat::runtime::Runtime::new().unwrap();
let repo = fixtures::many_diamonds::getrepo(fb, &mut runtime);
let repo = runtime.block_on_std(fixtures::many_diamonds::getrepo(fb));
let book = bookmarks::BookmarkName::new("master").unwrap();
let bcs_id = runtime
.block_on(repo.get_bonsai_bookmark(ctx.clone(), &book))

View File

@ -11,8 +11,6 @@ use anyhow::Error;
use ascii::AsAsciiStr;
use bytes::Bytes;
use fbinit::FacebookInit;
use futures::executor::spawn;
use futures::future::Future;
use futures::stream::futures_unordered;
use futures_ext::{BoxFuture, StreamExt};
use scuba_ext::ScubaSampleBuilder;
@ -44,15 +42,15 @@ macro_rules! test_both_repotypes {
($impl_name:ident, $lazy_test:ident, $eager_test:ident) => {
#[fbinit::test]
fn $lazy_test(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
$impl_name(fb, get_empty_lazy_repo());
async_unit::tokio_unit_test(async move {
$impl_name(fb, get_empty_lazy_repo()).await;
})
}
#[fbinit::test]
fn $eager_test(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
$impl_name(fb, get_empty_eager_repo());
async_unit::tokio_unit_test(async move {
$impl_name(fb, get_empty_eager_repo()).await;
})
}
};
@ -60,16 +58,16 @@ macro_rules! test_both_repotypes {
#[fbinit::test]
#[should_panic]
fn $lazy_test(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
$impl_name(fb, get_empty_lazy_repo());
async_unit::tokio_unit_test(async move {
$impl_name(fb, get_empty_lazy_repo()).await;
})
}
#[fbinit::test]
#[should_panic]
fn $eager_test(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
$impl_name(fb, get_empty_eager_repo());
async_unit::tokio_unit_test(async move {
$impl_name(fb, get_empty_eager_repo()).await;
})
}
};
@ -253,13 +251,6 @@ pub fn string_to_nodehash(hash: &str) -> HgNodeHash {
HgNodeHash::from_ascii_str(hash.as_ascii_str().unwrap()).unwrap()
}
pub fn run_future<F>(future: F) -> Result<F::Item, F::Error>
where
F: Future,
{
spawn(future).wait_future()
}
pub fn to_mpath(path: RepoPath) -> Result<MPath, Error> {
let bad_mpath = Error::msg("RepoPath did not convert to MPath");
path.into_mpath().ok_or(bad_mpath)

View File

@ -26,10 +26,10 @@ mod test {
#[fbinit::test]
fn test(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = $repo::getrepo(fb);
let repo = $repo::getrepo(fb).await;
let heads = repo.get_heads_maybe_stale(ctx.clone()).collect();
let verify = BonsaiMFVerify {

View File

@ -20,8 +20,12 @@ use context::CoreContext;
use fbinit::FacebookInit;
use futures::future::{Future, IntoFuture};
use futures::sync::oneshot;
use futures::Async;
use futures_ext::{BoxFuture, FutureExt};
use futures_preview::{
compat::Future01CompatExt,
future::FutureExt as _,
task::{Context, Poll},
};
use lock_ext::LockExt;
use metaconfig_types::{BlobstoreId, MultiplexId, ScrubAction};
use mononoke_types::BlobstoreBytes;
@ -145,7 +149,10 @@ fn make_value(value: &str) -> BlobstoreBytes {
#[fbinit::test]
fn base(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let waker = futures_preview::task::noop_waker();
let mut task_ctx = Context::from_waker(&waker);
let bs0 = Arc::new(Tickable::new());
let bs1 = Arc::new(Tickable::new());
let log = Arc::new(LogHandler::new());
@ -166,10 +173,14 @@ fn base(fb: FacebookInit) {
let v0 = make_value("v0");
let k0 = String::from("k0");
let mut put_fut = bs.put(ctx.clone(), k0.clone(), v0.clone());
assert_eq!(put_fut.poll().unwrap(), Async::NotReady);
let mut put_fut = bs
.put(ctx.clone(), k0.clone(), v0.clone())
.map_err(|_| ())
.compat()
.boxed();
assert_eq!(put_fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs0.tick(None);
put_fut.wait().unwrap();
put_fut.await.unwrap();
assert_eq!(bs0.storage.with(|s| s.get(&k0).cloned()), Some(v0.clone()));
assert!(bs1.storage.with(|s| s.is_empty()));
bs1.tick(Some("bs1 failed"));
@ -178,11 +189,11 @@ fn base(fb: FacebookInit) {
.with(|log| log == &vec![(BlobstoreId::new(0), k0.clone())]));
// should succeed as it is stored in bs1
let mut get_fut = bs.get(ctx.clone(), k0);
assert_eq!(get_fut.poll().unwrap(), Async::NotReady);
let mut get_fut = bs.get(ctx.clone(), k0).map_err(|_| ()).compat().boxed();
assert_eq!(get_fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs0.tick(None);
bs1.tick(None);
assert_eq!(get_fut.wait().unwrap(), Some(v0));
assert_eq!(get_fut.await.unwrap(), Some(v0));
assert!(bs1.storage.with(|s| s.is_empty()));
log.clear();
@ -193,24 +204,32 @@ fn base(fb: FacebookInit) {
let v1 = make_value("v1");
let k1 = String::from("k1");
let mut put_fut = bs.put(ctx.clone(), k1.clone(), v1.clone());
assert_eq!(put_fut.poll().unwrap(), Async::NotReady);
let mut put_fut = bs
.put(ctx.clone(), k1.clone(), v1.clone())
.map_err(|_| ())
.compat()
.boxed();
assert_eq!(put_fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs0.tick(Some("case 2: bs0 failed"));
assert_eq!(put_fut.poll().unwrap(), Async::NotReady);
assert_eq!(put_fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs1.tick(None);
put_fut.wait().unwrap();
put_fut.await.unwrap();
assert!(bs0.storage.with(|s| s.get(&k1).is_none()));
assert_eq!(bs1.storage.with(|s| s.get(&k1).cloned()), Some(v1.clone()));
assert!(log
.log
.with(|log| log == &vec![(BlobstoreId::new(1), k1.clone())]));
let mut get_fut = bs.get(ctx.clone(), k1.clone());
assert_eq!(get_fut.poll().unwrap(), Async::NotReady);
let mut get_fut = bs
.get(ctx.clone(), k1.clone())
.map_err(|_| ())
.compat()
.boxed();
assert_eq!(get_fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs0.tick(None);
assert_eq!(get_fut.poll().unwrap(), Async::NotReady);
assert_eq!(get_fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs1.tick(None);
assert_eq!(get_fut.wait().unwrap(), Some(v1));
assert_eq!(get_fut.await.unwrap(), Some(v1));
assert!(bs0.storage.with(|s| s.get(&k1).is_none()));
log.clear();
@ -221,38 +240,42 @@ fn base(fb: FacebookInit) {
let k2 = String::from("k2");
let v2 = make_value("v2");
let mut put_fut = bs.put(ctx.clone(), k2.clone(), v2.clone());
assert_eq!(put_fut.poll().unwrap(), Async::NotReady);
let mut put_fut = bs
.put(ctx.clone(), k2.clone(), v2.clone())
.map_err(|_| ())
.compat()
.boxed();
assert_eq!(put_fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs0.tick(Some("case 3: bs0 failed"));
assert_eq!(put_fut.poll().unwrap(), Async::NotReady);
assert_eq!(put_fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs1.tick(Some("case 3: bs1 failed"));
assert!(put_fut.wait().is_err());
assert!(put_fut.await.is_err());
}
// get: Error + None -> Error
{
let k3 = String::from("k3");
let mut get_fut = bs.get(ctx.clone(), k3);
assert_eq!(get_fut.poll().unwrap(), Async::NotReady);
let mut get_fut = bs.get(ctx.clone(), k3).map_err(|_| ()).compat().boxed();
assert_eq!(get_fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs0.tick(Some("case 4: bs0 failed"));
assert_eq!(get_fut.poll().unwrap(), Async::NotReady);
assert_eq!(get_fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs1.tick(None);
assert!(get_fut.wait().is_err());
assert!(get_fut.await.is_err());
}
// get: None + None -> None
{
let k3 = String::from("k3");
let mut get_fut = bs.get(ctx.clone(), k3);
assert_eq!(get_fut.poll().unwrap(), Async::NotReady);
let mut get_fut = bs.get(ctx.clone(), k3).map_err(|_| ()).compat().boxed();
assert_eq!(get_fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs0.tick(None);
assert_eq!(get_fut.poll().unwrap(), Async::NotReady);
assert_eq!(get_fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs1.tick(None);
assert_eq!(get_fut.wait().unwrap(), None);
assert_eq!(get_fut.await.unwrap(), None);
}
// both put succeed
@ -261,10 +284,14 @@ fn base(fb: FacebookInit) {
let v4 = make_value("v4");
log.clear();
let mut put_fut = bs.put(ctx.clone(), k4.clone(), v4.clone());
assert_eq!(put_fut.poll().unwrap(), Async::NotReady);
let mut put_fut = bs
.put(ctx.clone(), k4.clone(), v4.clone())
.map_err(|_| ())
.compat()
.boxed();
assert_eq!(put_fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs0.tick(None);
put_fut.wait().unwrap();
put_fut.await.unwrap();
assert_eq!(bs0.storage.with(|s| s.get(&k4).cloned()), Some(v4.clone()));
bs1.tick(None);
while log.log.with(|log| log.len() != 2) {}
@ -275,7 +302,10 @@ fn base(fb: FacebookInit) {
#[fbinit::test]
fn multiplexed(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let waker = futures_preview::task::noop_waker();
let mut task_ctx = Context::from_waker(&waker);
let ctx = CoreContext::test_mock(fb);
let queue = Arc::new(SqlBlobstoreSyncQueue::with_sqlite_in_memory().unwrap());
@ -295,14 +325,18 @@ fn multiplexed(fb: FacebookInit) {
{
let k0 = String::from("k0");
let mut get_fut = bs.get(ctx.clone(), k0.clone());
assert_eq!(get_fut.poll().unwrap(), Async::NotReady);
let mut get_fut = bs
.get(ctx.clone(), k0.clone())
.map_err(|_| ())
.compat()
.boxed();
assert_eq!(get_fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs0.tick(None);
assert_eq!(get_fut.poll().unwrap(), Async::NotReady);
assert_eq!(get_fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs1.tick(Some("case 1: bs1 failed"));
assert_eq!(get_fut.wait().unwrap(), None);
assert_eq!(get_fut.await.unwrap(), None);
}
// only replica containing key failed
@ -310,15 +344,20 @@ fn multiplexed(fb: FacebookInit) {
let k1 = String::from("k1");
let v1 = make_value("v1");
let mut put_fut = bs.put(ctx.clone(), k1.clone(), v1.clone());
assert_eq!(put_fut.poll().unwrap(), Async::NotReady);
let mut put_fut = bs
.put(ctx.clone(), k1.clone(), v1.clone())
.map_err(|_| ())
.compat()
.boxed();
assert_eq!(put_fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs0.tick(None);
bs1.tick(Some("case 2: bs1 failed"));
put_fut.wait().expect("case 2 put_fut failed");
put_fut.await.expect("case 2 put_fut failed");
match queue
.get(ctx.clone(), k1.clone())
.wait()
.compat()
.await
.expect("case 2 get failed")
.as_slice()
{
@ -326,29 +365,40 @@ fn multiplexed(fb: FacebookInit) {
_ => panic!("only one entry expected"),
}
let mut get_fut = bs.get(ctx.clone(), k1.clone());
assert_eq!(get_fut.poll().unwrap(), Async::NotReady);
let mut get_fut = bs
.get(ctx.clone(), k1.clone())
.map_err(|_| ())
.compat()
.boxed();
assert_eq!(get_fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs0.tick(Some("case 2: bs0 failed"));
bs1.tick(None);
assert!(get_fut.wait().is_err());
assert!(get_fut.await.is_err());
}
// both replicas fail
{
let k2 = String::from("k2");
let mut get_fut = bs.get(ctx.clone(), k2.clone());
assert_eq!(get_fut.poll().unwrap(), Async::NotReady);
let mut get_fut = bs
.get(ctx.clone(), k2.clone())
.map_err(|_| ())
.compat()
.boxed();
assert_eq!(get_fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs0.tick(Some("case 3: bs0 failed"));
bs1.tick(Some("case 3: bs1 failed"));
assert!(get_fut.wait().is_err());
assert!(get_fut.await.is_err());
}
});
}
#[fbinit::test]
fn scrubbed(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let waker = futures_preview::task::noop_waker();
let mut task_ctx = Context::from_waker(&waker);
let ctx = CoreContext::test_mock(fb);
let queue = Arc::new(SqlBlobstoreSyncQueue::with_sqlite_in_memory().unwrap());
let scrub_handler = Arc::new(LoggingScrubHandler::new(false)) as Arc<dyn ScrubHandler>;
@ -370,14 +420,18 @@ fn scrubbed(fb: FacebookInit) {
{
let k0 = String::from("k0");
let mut get_fut = bs.get(ctx.clone(), k0.clone());
assert_eq!(get_fut.poll().unwrap(), Async::NotReady);
let mut get_fut = bs
.get(ctx.clone(), k0.clone())
.map_err(|_| ())
.compat()
.boxed();
assert_eq!(get_fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs0.tick(None);
assert_eq!(get_fut.poll().unwrap(), Async::NotReady);
assert_eq!(get_fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs1.tick(Some("bs1 failed"));
assert_eq!(get_fut.wait().unwrap(), None, "None/Err no replication");
assert_eq!(get_fut.await.unwrap(), None, "None/Err no replication");
}
// only replica containing key failed
@ -385,16 +439,21 @@ fn scrubbed(fb: FacebookInit) {
let k1 = String::from("k1");
let v1 = make_value("v1");
let mut put_fut = bs.put(ctx.clone(), k1.clone(), v1.clone());
assert_eq!(put_fut.poll().unwrap(), Async::NotReady);
let mut put_fut = bs
.put(ctx.clone(), k1.clone(), v1.clone())
.map_err(|_| ())
.compat()
.boxed();
assert_eq!(put_fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs0.tick(None);
assert_eq!(put_fut.poll().unwrap(), Async::NotReady);
assert_eq!(put_fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs1.tick(Some("bs1 failed"));
put_fut.wait().unwrap();
put_fut.await.unwrap();
match queue
.get(ctx.clone(), k1.clone())
.wait()
.compat()
.await
.unwrap()
.as_slice()
{
@ -402,25 +461,33 @@ fn scrubbed(fb: FacebookInit) {
_ => panic!("only one entry expected"),
}
let mut get_fut = bs.get(ctx.clone(), k1.clone());
assert_eq!(get_fut.poll().unwrap(), Async::NotReady);
let mut get_fut = bs
.get(ctx.clone(), k1.clone())
.map_err(|_| ())
.compat()
.boxed();
assert_eq!(get_fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs0.tick(Some("bs0 failed"));
assert_eq!(get_fut.poll().unwrap(), Async::NotReady);
assert_eq!(get_fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs1.tick(None);
assert!(get_fut.wait().is_err(), "None/Err while replicating");
assert!(get_fut.await.is_err(), "None/Err while replicating");
}
// both replicas fail
{
let k2 = String::from("k2");
let mut get_fut = bs.get(ctx.clone(), k2.clone());
assert_eq!(get_fut.poll().unwrap(), Async::NotReady);
let mut get_fut = bs
.get(ctx.clone(), k2.clone())
.map_err(|_| ())
.compat()
.boxed();
assert_eq!(get_fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs0.tick(Some("bs0 failed"));
bs1.tick(Some("bs1 failed"));
assert!(get_fut.wait().is_err(), "Err/Err");
assert!(get_fut.await.is_err(), "Err/Err");
}
// Now replace bs1 with an empty blobstore, and see the scrub work
@ -440,25 +507,33 @@ fn scrubbed(fb: FacebookInit) {
{
let k0 = String::from("k0");
let mut get_fut = bs.get(ctx.clone(), k0.clone());
assert_eq!(get_fut.poll().unwrap(), Async::NotReady);
let mut get_fut = bs
.get(ctx.clone(), k0.clone())
.map_err(|_| ())
.compat()
.boxed();
assert_eq!(get_fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs0.tick(None);
assert_eq!(get_fut.poll().unwrap(), Async::NotReady);
assert_eq!(get_fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs1.tick(Some("bs1 failed"));
assert_eq!(get_fut.wait().unwrap(), None, "None/Err after replacement");
assert_eq!(get_fut.await.unwrap(), None, "None/Err after replacement");
}
// only replica containing key replaced after failure - DATA LOST
{
let k1 = String::from("k1");
let mut get_fut = bs.get(ctx.clone(), k1.clone());
assert_eq!(get_fut.poll().unwrap(), Async::NotReady);
let mut get_fut = bs
.get(ctx.clone(), k1.clone())
.map_err(|_| ())
.compat()
.boxed();
assert_eq!(get_fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs0.tick(Some("bs0 failed"));
bs1.tick(None);
assert!(get_fut.wait().is_err(), "Empty replacement against error");
assert!(get_fut.await.is_err(), "Empty replacement against error");
}
// One working replica after failure.
@ -468,7 +543,8 @@ fn scrubbed(fb: FacebookInit) {
match queue
.get(ctx.clone(), k1.clone())
.wait()
.compat()
.await
.unwrap()
.as_slice()
{
@ -476,7 +552,8 @@ fn scrubbed(fb: FacebookInit) {
assert_eq!(entry.blobstore_id, bid0, "Queue bad");
queue
.del(ctx.clone(), vec![entry.clone()])
.wait()
.compat()
.await
.expect("Could not delete scrub queue entry");
}
_ => panic!("only one entry expected"),
@ -486,18 +563,22 @@ fn scrubbed(fb: FacebookInit) {
assert_eq!(bs0.storage.with(|s| s.get(&k1).cloned()), Some(v1.clone()));
assert!(bs1.storage.with(|s| s.is_empty()));
let mut get_fut = bs.get(ctx.clone(), k1.clone());
assert_eq!(get_fut.poll().unwrap(), Async::NotReady);
let mut get_fut = bs
.get(ctx.clone(), k1.clone())
.map_err(|_| ())
.compat()
.boxed();
assert_eq!(get_fut.poll_unpin(&mut task_ctx), Poll::Pending);
// tick the gets
bs0.tick(None);
assert_eq!(get_fut.poll().unwrap(), Async::NotReady);
assert_eq!(get_fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs1.tick(None);
assert_eq!(get_fut.poll().unwrap(), Async::NotReady);
assert_eq!(get_fut.poll_unpin(&mut task_ctx), Poll::Pending);
// Tick the repairs
bs1.tick(None);
// Succeeds
assert_eq!(get_fut.wait().unwrap(), Some(v1.clone()));
assert_eq!(get_fut.await.unwrap(), Some(v1.clone()));
// Now both populated.
assert_eq!(bs0.storage.with(|s| s.get(&k1).cloned()), Some(v1.clone()));
assert_eq!(bs1.storage.with(|s| s.get(&k1).cloned()), Some(v1.clone()));
@ -507,7 +588,10 @@ fn scrubbed(fb: FacebookInit) {
#[fbinit::test]
fn queue_waits(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let waker = futures_preview::task::noop_waker();
let mut task_ctx = Context::from_waker(&waker);
let bs0 = Arc::new(Tickable::new());
let bs1 = Arc::new(Tickable::new());
let bs2 = Arc::new(Tickable::new());
@ -540,64 +624,80 @@ fn queue_waits(fb: FacebookInit) {
// Put succeeds once all blobstores have succeded, even if the queue hasn't.
{
let mut fut = bs.put(ctx.clone(), k.clone(), v.clone());
let mut fut = bs
.put(ctx.clone(), k.clone(), v.clone())
.map_err(|_| ())
.compat()
.boxed();
assert_eq!(fut.poll().unwrap(), Async::NotReady);
assert_eq!(fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs0.tick(None);
bs1.tick(None);
bs2.tick(None);
assert_eq!(fut.poll().unwrap(), Async::Ready(()));
assert_eq!(fut.poll_unpin(&mut task_ctx), Poll::Ready(Ok(())));
clear();
}
// Put succeeds after 1 write + a write to the queue
{
let mut fut = bs.put(ctx.clone(), k.clone(), v.clone());
let mut fut = bs
.put(ctx.clone(), k.clone(), v.clone())
.map_err(|_| ())
.compat()
.boxed();
assert_eq!(fut.poll().unwrap(), Async::NotReady);
assert_eq!(fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs0.tick(None);
assert_eq!(fut.poll().unwrap(), Async::NotReady);
assert_eq!(fut.poll_unpin(&mut task_ctx), Poll::Pending);
log.tick(None);
assert_eq!(fut.poll().unwrap(), Async::Ready(()));
assert_eq!(fut.poll_unpin(&mut task_ctx), Poll::Ready(Ok(())));
clear();
}
// Put succeeds despite errors, if the queue succeeds
{
let mut fut = bs.put(ctx.clone(), k.clone(), v.clone());
let mut fut = bs
.put(ctx.clone(), k.clone(), v.clone())
.map_err(|_| ())
.compat()
.boxed();
assert_eq!(fut.poll().unwrap(), Async::NotReady);
assert_eq!(fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs0.tick(None);
bs1.tick(Some("oops"));
bs2.tick(Some("oops"));
assert_eq!(fut.poll().unwrap(), Async::NotReady); // Trigger on_put
assert_eq!(fut.poll_unpin(&mut task_ctx), Poll::Pending); // Trigger on_put
log.tick(None);
assert_eq!(fut.poll().unwrap(), Async::Ready(()));
assert_eq!(fut.poll_unpin(&mut task_ctx), Poll::Ready(Ok(())));
clear();
}
// Put succeeds if any blobstore succeeds and writes to the queue
{
let mut fut = bs.put(ctx.clone(), k.clone(), v.clone());
let mut fut = bs
.put(ctx.clone(), k.clone(), v.clone())
.map_err(|_| ())
.compat()
.boxed();
assert_eq!(fut.poll().unwrap(), Async::NotReady);
assert_eq!(fut.poll_unpin(&mut task_ctx), Poll::Pending);
bs0.tick(Some("oops"));
bs1.tick(None);
bs2.tick(Some("oops"));
assert_eq!(fut.poll().unwrap(), Async::NotReady); // Trigger on_put
assert_eq!(fut.poll_unpin(&mut task_ctx), Poll::Pending); // Trigger on_put
log.tick(None);
assert_eq!(fut.poll().unwrap(), Async::Ready(()));
assert_eq!(fut.poll_unpin(&mut task_ctx), Poll::Ready(Ok(())));
clear();
}

View File

@ -10,7 +10,6 @@
#![deny(warnings)]
use anyhow::Error;
use futures::Future;
use assert_matches::assert_matches;
use bonsai_hg_mapping::{
@ -20,6 +19,7 @@ use bonsai_hg_mapping::{
use context::CoreContext;
use fbinit::FacebookInit;
use futures_ext::BoxFuture;
use futures_preview::compat::Future01CompatExt;
use mercurial_types::{HgChangesetIdPrefix, HgChangesetIdsResolvedFromPrefix};
use mercurial_types_mocks::nodehash as hg;
use mononoke_types::RepositoryId;
@ -32,7 +32,7 @@ use std::sync::{
Arc,
};
fn add_and_get<M: BonsaiHgMapping>(fb: FacebookInit, mapping: M) {
async fn add_and_get<M: BonsaiHgMapping>(fb: FacebookInit, mapping: M) {
let ctx = CoreContext::test_mock(fb);
let entry = BonsaiHgMappingEntry {
repo_id: REPO_ZERO,
@ -43,30 +43,35 @@ fn add_and_get<M: BonsaiHgMapping>(fb: FacebookInit, mapping: M) {
true,
mapping
.add(ctx.clone(), entry.clone())
.wait()
.compat()
.await
.expect("Adding new entry failed")
);
assert_eq!(
false,
mapping
.add(ctx.clone(), entry.clone())
.wait()
.compat()
.await
.expect("Adding same entry failed")
);
let result = mapping
.get(ctx.clone(), REPO_ZERO, hg::ONES_CSID.into())
.wait()
.compat()
.await
.expect("Get failed");
assert_eq!(result, vec![entry.clone()]);
let result = mapping
.get_hg_from_bonsai(ctx.clone(), REPO_ZERO, bonsai::ONES_CSID)
.wait()
.compat()
.await
.expect("Failed to get hg changeset by its bonsai counterpart");
assert_eq!(result, Some(hg::ONES_CSID));
let result = mapping
.get_bonsai_from_hg(ctx.clone(), REPO_ZERO, hg::ONES_CSID)
.wait()
.compat()
.await
.expect("Failed to get bonsai changeset by its hg counterpart");
assert_eq!(result, Some(bonsai::ONES_CSID));
@ -77,7 +82,8 @@ fn add_and_get<M: BonsaiHgMapping>(fb: FacebookInit, mapping: M) {
};
let result = mapping
.add(ctx.clone(), same_bc_entry.clone())
.wait()
.compat()
.await
.expect_err("Conflicting entries should haved produced an error");
assert_matches!(
result.downcast::<ErrorKind>(),
@ -91,7 +97,8 @@ fn add_and_get<M: BonsaiHgMapping>(fb: FacebookInit, mapping: M) {
};
let result = mapping
.add(ctx.clone(), same_hg_entry.clone())
.wait()
.compat()
.await
.expect_err("Conflicting entries should haved produced an error");
assert_matches!(
result.downcast::<ErrorKind>(),
@ -99,16 +106,17 @@ fn add_and_get<M: BonsaiHgMapping>(fb: FacebookInit, mapping: M) {
);
}
fn missing<M: BonsaiHgMapping>(fb: FacebookInit, mapping: M) {
async fn missing<M: BonsaiHgMapping>(fb: FacebookInit, mapping: M) {
let ctx = CoreContext::test_mock(fb);
let result = mapping
.get(ctx.clone(), REPO_ZERO, bonsai::ONES_CSID.into())
.wait()
.compat()
.await
.expect("Failed to fetch missing changeset (should succeed with None instead)");
assert_eq!(result, vec![]);
}
fn get_many_hg_by_prefix<M: BonsaiHgMapping>(fb: FacebookInit, mapping: M) {
async fn get_many_hg_by_prefix<M: BonsaiHgMapping>(fb: FacebookInit, mapping: M) {
let ctx = CoreContext::test_mock(fb);
let entry1 = BonsaiHgMappingEntry {
@ -136,28 +144,32 @@ fn get_many_hg_by_prefix<M: BonsaiHgMapping>(fb: FacebookInit, mapping: M) {
true,
mapping
.add(ctx.clone(), entry1.clone())
.wait()
.compat()
.await
.expect("Adding entry1 failed")
);
assert_eq!(
true,
mapping
.add(ctx.clone(), entry2.clone())
.wait()
.compat()
.await
.expect("Adding entry2 failed")
);
assert_eq!(
true,
mapping
.add(ctx.clone(), entry3.clone())
.wait()
.compat()
.await
.expect("Adding entry3 failed")
);
assert_eq!(
true,
mapping
.add(ctx.clone(), entry4.clone())
.wait()
.compat()
.await
.expect("Adding entry4 failed")
);
@ -169,7 +181,8 @@ fn get_many_hg_by_prefix<M: BonsaiHgMapping>(fb: FacebookInit, mapping: M) {
HgChangesetIdPrefix::from_bytes(&hg::ONES_CSID.as_ref()[0..8]).unwrap(),
10,
)
.wait()
.compat()
.await
.expect("Failed to get hg changeset by its prefix");
assert_eq!(
@ -185,7 +198,8 @@ fn get_many_hg_by_prefix<M: BonsaiHgMapping>(fb: FacebookInit, mapping: M) {
HgChangesetIdPrefix::from_bytes(&hg::TWOS_CSID.as_ref()[0..10]).unwrap(),
1,
)
.wait()
.compat()
.await
.expect("Failed to get hg changeset by its prefix");
assert_eq!(
@ -201,7 +215,8 @@ fn get_many_hg_by_prefix<M: BonsaiHgMapping>(fb: FacebookInit, mapping: M) {
HgChangesetIdPrefix::from_bytes(&hg::FS_CSID.as_ref()[0..8]).unwrap(),
10,
)
.wait()
.compat()
.await
.expect("Failed to get hg changeset by its prefix");
assert_eq!(
@ -217,7 +232,8 @@ fn get_many_hg_by_prefix<M: BonsaiHgMapping>(fb: FacebookInit, mapping: M) {
HgChangesetIdPrefix::from_str(&"fff").unwrap(),
10,
)
.wait()
.compat()
.await
.expect("Failed to get hg changeset by its prefix");
assert_eq!(
@ -233,7 +249,8 @@ fn get_many_hg_by_prefix<M: BonsaiHgMapping>(fb: FacebookInit, mapping: M) {
HgChangesetIdPrefix::from_bytes(&hg::FS_CSID.as_ref()[0..8]).unwrap(),
1,
)
.wait()
.compat()
.await
.expect("Failed to get hg changeset by its prefix");
assert_eq!(
@ -249,7 +266,8 @@ fn get_many_hg_by_prefix<M: BonsaiHgMapping>(fb: FacebookInit, mapping: M) {
HgChangesetIdPrefix::from_bytes(&hg::THREES_CSID.as_ref()[0..16]).unwrap(),
10,
)
.wait()
.compat()
.await
.expect("Failed to get hg changeset by its prefix");
assert_eq!(result, HgChangesetIdsResolvedFromPrefix::NoMatch);
@ -307,7 +325,7 @@ impl BonsaiHgMapping for CountedBonsaiHgMapping {
}
}
fn caching<M: BonsaiHgMapping + 'static>(fb: FacebookInit, mapping: M) {
async fn caching<M: BonsaiHgMapping + 'static>(fb: FacebookInit, mapping: M) {
let ctx = CoreContext::test_mock(fb);
let gets = Arc::new(AtomicUsize::new(0));
let adds = Arc::new(AtomicUsize::new(0));
@ -329,27 +347,31 @@ fn caching<M: BonsaiHgMapping + 'static>(fb: FacebookInit, mapping: M) {
true,
mapping
.add(ctx.clone(), entry.clone())
.wait()
.compat()
.await
.expect("Adding new entry failed")
);
let result = mapping
.get_bonsai_from_hg(ctx.clone(), REPO_ZERO, hg::ONES_CSID)
.wait()
.compat()
.await
.expect("Failed to get bonsai changeset by its hg counterpart");
assert_eq!(result, Some(bonsai::ONES_CSID));
assert_eq!(gets.load(Ordering::Relaxed), 1);
let result = mapping
.get_bonsai_from_hg(ctx.clone(), REPO_ZERO, hg::ONES_CSID)
.wait()
.compat()
.await
.expect("Failed to get bonsai changeset by its hg counterpart");
assert_eq!(result, Some(bonsai::ONES_CSID));
assert_eq!(gets.load(Ordering::Relaxed), 1);
let result = mapping
.get_bonsai_from_hg(ctx.clone(), REPO_ZERO, hg::TWOS_CSID)
.wait()
.compat()
.await
.expect("Failed to get bonsai changeset by its hg counterpart");
assert_eq!(result, None);
assert_eq!(gets.load(Ordering::Relaxed), 2);
@ -357,28 +379,32 @@ fn caching<M: BonsaiHgMapping + 'static>(fb: FacebookInit, mapping: M) {
#[fbinit::test]
fn test_add_and_get(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
add_and_get(fb, SqlBonsaiHgMapping::with_sqlite_in_memory().unwrap());
});
async_unit::tokio_unit_test(add_and_get(
fb,
SqlBonsaiHgMapping::with_sqlite_in_memory().unwrap(),
));
}
#[fbinit::test]
fn test_missing(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
missing(fb, SqlBonsaiHgMapping::with_sqlite_in_memory().unwrap());
});
async_unit::tokio_unit_test(missing(
fb,
SqlBonsaiHgMapping::with_sqlite_in_memory().unwrap(),
));
}
#[fbinit::test]
fn test_caching(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
caching(fb, SqlBonsaiHgMapping::with_sqlite_in_memory().unwrap());
});
async_unit::tokio_unit_test(caching(
fb,
SqlBonsaiHgMapping::with_sqlite_in_memory().unwrap(),
));
}
#[fbinit::test]
fn test_get_many_hg_by_prefix(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
get_many_hg_by_prefix(fb, SqlBonsaiHgMapping::with_sqlite_in_memory().unwrap());
});
async_unit::tokio_unit_test(get_many_hg_by_prefix(
fb,
SqlBonsaiHgMapping::with_sqlite_in_memory().unwrap(),
));
}

File diff suppressed because it is too large Load Diff

View File

@ -474,7 +474,7 @@ mod test {
// Move bookmark to another changeset
let another_hash = "607314ef579bd2407752361ba1b0c1729d08b281";
set_bookmark(fb, small_repo.clone(), another_hash, master.clone());
set_bookmark(fb, small_repo.clone(), another_hash, master.clone()).await;
let another_bcs_id =
helpers::csid_resolve(ctx.clone(), small_repo.clone(), another_hash.to_string())
.compat()
@ -497,7 +497,7 @@ mod test {
// Create another bookmark
let another_book = BookmarkName::new("newbook")?;
set_bookmark(fb, small_repo.clone(), another_hash, another_book.clone());
set_bookmark(fb, small_repo.clone(), another_hash, another_book.clone()).await;
let actual_diff = find_bookmark_diff(ctx.clone(), &commit_syncer).await?;
@ -575,8 +575,8 @@ mod test {
direction: CommitSyncDirection,
) -> Result<CommitSyncer<SqlSyncedCommitMapping>, Error> {
let ctx = CoreContext::test_mock(fb);
let small_repo = linear::getrepo_with_id(fb, RepositoryId::new(0));
let large_repo = linear::getrepo_with_id(fb, RepositoryId::new(1));
let small_repo = linear::getrepo_with_id(fb, RepositoryId::new(0)).await;
let large_repo = linear::getrepo_with_id(fb, RepositoryId::new(1)).await;
let master = BookmarkName::new("master")?;
let maybe_master_val = small_repo

View File

@ -797,10 +797,11 @@ mod tests {
#[fbinit::test]
fn test_backfill_data_latest(fb: FacebookInit) -> Result<(), Error> {
let ctx = CoreContext::test_mock(fb);
let repo = linear::getrepo(fb);
let mut runtime = Runtime::new()?;
let ctx = CoreContext::test_mock(fb);
let repo = runtime.block_on_std(linear::getrepo(fb));
let hg_cs_id = HgChangesetId::from_str("79a13814c5ce7330173ec04d279bf95ab3f652fb")?;
let maybe_bcs_id = runtime.block_on(repo.get_bonsai_from_hg(ctx.clone(), hg_cs_id))?;
let bcs_id = maybe_bcs_id.unwrap();
@ -813,10 +814,11 @@ mod tests {
#[fbinit::test]
fn test_backfill_data_batch(fb: FacebookInit) -> Result<(), Error> {
let ctx = CoreContext::test_mock(fb);
let repo = linear::getrepo(fb);
let mut runtime = Runtime::new()?;
let ctx = CoreContext::test_mock(fb);
let repo = runtime.block_on_std(linear::getrepo(fb));
let mut batch = vec![];
let hg_cs_ids = vec![
"a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157",
@ -846,10 +848,10 @@ mod tests {
// The test exercises that derived data mapping entries are written only after
// all other blobstore writes were successful i.e. mapping entry shouldn't exist
// if any of the corresponding blobs weren't successfully saved
let mut runtime = Runtime::new()?;
let ctx = CoreContext::test_mock(fb);
let origrepo = linear::getrepo(fb);
let mut runtime = Runtime::new()?;
let origrepo = runtime.block_on_std(linear::getrepo(fb));
let repo = origrepo.dangerous_override(|blobstore| -> Arc<dyn Blobstore> {
Arc::new(FailingBlobstore::new("manifest".to_string(), blobstore))

View File

@ -711,151 +711,194 @@ mod tests {
#[fbinit::test]
fn linear_test_get_statistics_from_changeset(fb: FacebookInit) {
let repo = linear::getrepo(fb);
let mut runtime = Runtime::new().unwrap();
let ctx = CoreContext::test_mock(fb);
let blobstore = repo.get_blobstore();
runtime.block_on_std(async move {
let repo = linear::getrepo(fb).await;
// Commit consists two files (name => content):
// "1" => "1\n"
// "files" => "1\n"
// */
let root = HgChangesetId::from_str("2d7d4ba9ce0a6ffd222de7785b249ead9c51c536").unwrap();
let p = repo.get_bonsai_from_hg(ctx.clone(), root);
let p = runtime.block_on(p).unwrap().unwrap();
let parents = vec![p];
let ctx = CoreContext::test_mock(fb);
let blobstore = repo.get_blobstore();
let bcs_id = create_commit(
ctx.clone(),
repo.clone(),
parents,
store_files(
// Commit consists two files (name => content):
// "1" => "1\n"
// "files" => "1\n"
// */
let root = HgChangesetId::from_str("2d7d4ba9ce0a6ffd222de7785b249ead9c51c536").unwrap();
let p = repo
.get_bonsai_from_hg(ctx.clone(), root)
.compat()
.await
.unwrap()
.unwrap();
let parents = vec![p];
let bcs_id = create_commit(
ctx.clone(),
btreemap! {
"dir1/dir2/file1" => Some("first line\nsecond line\n"),
"dir1/dir3/file2" => Some("first line\n"),
},
repo.clone(),
),
);
parents,
store_files(
ctx.clone(),
btreemap! {
"dir1/dir2/file1" => Some("first line\nsecond line\n"),
"dir1/dir3/file2" => Some("first line\n"),
},
repo.clone(),
)
.await,
)
.await;
let hg_cs_id = repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id);
let hg_cs_id = runtime.block_on(hg_cs_id).unwrap();
let hg_cs_id = repo
.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id)
.compat()
.await
.unwrap();
let stats = get_statistics_from_changeset(
ctx.clone(),
repo.clone(),
blobstore.clone(),
hg_cs_id.clone(),
);
let stats = runtime.block_on(stats).unwrap();
let stats = get_statistics_from_changeset(
ctx.clone(),
repo.clone(),
blobstore.clone(),
hg_cs_id.clone(),
)
.compat()
.await
.unwrap();
// (num_files, total_file_size, num_lines)
assert_eq!(stats, RepoStatistics::new(4, 38, 5));
// (num_files, total_file_size, num_lines)
assert_eq!(stats, RepoStatistics::new(4, 38, 5));
});
}
#[fbinit::test]
fn linear_test_get_statistics_from_entry_tree(fb: FacebookInit) {
let repo = linear::getrepo(fb);
let mut runtime = Runtime::new().unwrap();
let ctx = CoreContext::test_mock(fb);
let blobstore = repo.get_blobstore();
runtime.block_on_std(async move {
let repo = linear::getrepo(fb).await;
// Commit consists two files (name => content):
// "1" => "1\n"
// "files" => "1\n"
// */
let root = HgChangesetId::from_str("2d7d4ba9ce0a6ffd222de7785b249ead9c51c536").unwrap();
let p = repo.get_bonsai_from_hg(ctx.clone(), root);
let p = runtime.block_on(p).unwrap().unwrap();
let parents = vec![p];
let ctx = CoreContext::test_mock(fb);
let blobstore = repo.get_blobstore();
let bcs_id = create_commit(
ctx.clone(),
repo.clone(),
parents,
store_files(
// Commit consists two files (name => content):
// "1" => "1\n"
// "files" => "1\n"
// */
let root = HgChangesetId::from_str("2d7d4ba9ce0a6ffd222de7785b249ead9c51c536").unwrap();
let p = repo
.get_bonsai_from_hg(ctx.clone(), root)
.compat()
.await
.unwrap()
.unwrap();
let parents = vec![p];
let bcs_id = create_commit(
ctx.clone(),
btreemap! {
"dir1/dir2/file1" => Some("first line\nsecond line\n"),
"dir1/dir3/file2" => Some("first line\n"),
},
repo.clone(),
),
);
parents,
store_files(
ctx.clone(),
btreemap! {
"dir1/dir2/file1" => Some("first line\nsecond line\n"),
"dir1/dir3/file2" => Some("first line\n"),
},
repo.clone(),
)
.await,
)
.await;
let hg_cs_id = repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id);
let hg_cs_id = runtime.block_on(hg_cs_id).unwrap();
let hg_cs_id = repo
.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id)
.compat()
.await
.unwrap();
let tree_entries = get_manifest_from_changeset(ctx.clone(), repo.clone(), hg_cs_id.clone())
.and_then({
cloned!(ctx);
move |manifest| {
manifest
.list_all_entries(ctx.clone(), blobstore.clone())
.filter_map(|(_, entry)| match entry {
Entry::Tree(_) => Some(entry),
_ => None,
})
.collect()
}
});
let mut tree_entries = runtime.block_on(tree_entries).unwrap();
let mut tree_entries =
get_manifest_from_changeset(ctx.clone(), repo.clone(), hg_cs_id.clone())
.and_then({
cloned!(ctx);
move |manifest| {
manifest
.list_all_entries(ctx.clone(), blobstore.clone())
.filter_map(|(_, entry)| match entry {
Entry::Tree(_) => Some(entry),
_ => None,
})
.collect()
}
})
.compat()
.await
.unwrap();
let stats =
get_statistics_from_entry(ctx.clone(), repo.clone(), tree_entries.pop().unwrap());
let stats = runtime.block_on(stats).unwrap();
let stats =
get_statistics_from_entry(ctx.clone(), repo.clone(), tree_entries.pop().unwrap())
.compat()
.await
.unwrap();
// For Entry::Tree we expect repository with all statistics equal 0
// (num_files, total_file_size, num_lines)
assert_eq!(stats, RepoStatistics::default());
// For Entry::Tree we expect repository with all statistics equal 0
// (num_files, total_file_size, num_lines)
assert_eq!(stats, RepoStatistics::default());
});
}
#[fbinit::test]
fn linear_test_update_statistics(fb: FacebookInit) {
let repo = linear::getrepo(fb);
let mut runtime = Runtime::new().unwrap();
let ctx = CoreContext::test_mock(fb);
let blobstore = repo.get_blobstore();
runtime.block_on_std(async move {
let repo = linear::getrepo(fb).await;
/*
Commit consists two files (name => content):
"1" => "1\n"
"files" => "1\n"
*/
let prev_hg_cs_id =
HgChangesetId::from_str("2d7d4ba9ce0a6ffd222de7785b249ead9c51c536").unwrap();
/*
Commit consists two files (name => content):
"2" => "2\n"
"files" => "1\n2\n"
*/
let cur_hg_cs_id =
HgChangesetId::from_str("3e0e761030db6e479a7fb58b12881883f9f8c63f").unwrap();
let ctx = CoreContext::test_mock(fb);
let blobstore = repo.get_blobstore();
let stats = get_statistics_from_changeset(
ctx.clone(),
repo.clone(),
blobstore.clone(),
prev_hg_cs_id.clone(),
);
let stats = runtime.block_on(stats).unwrap();
/*
Commit consists two files (name => content):
"1" => "1\n"
"files" => "1\n"
*/
let prev_hg_cs_id =
HgChangesetId::from_str("2d7d4ba9ce0a6ffd222de7785b249ead9c51c536").unwrap();
/*
Commit consists two files (name => content):
"2" => "2\n"
"files" => "1\n2\n"
*/
let cur_hg_cs_id =
HgChangesetId::from_str("3e0e761030db6e479a7fb58b12881883f9f8c63f").unwrap();
let manifests =
get_manifest_from_changeset(ctx.clone(), repo.clone(), prev_hg_cs_id.clone()).join(
get_manifest_from_changeset(ctx.clone(), repo.clone(), cur_hg_cs_id.clone()),
);
let (prev_manifest, cur_manifest) = runtime.block_on(manifests).unwrap();
let stats = get_statistics_from_changeset(
ctx.clone(),
repo.clone(),
blobstore.clone(),
prev_hg_cs_id.clone(),
)
.compat()
.await
.unwrap();
let new_stats = update_statistics(
ctx.clone(),
repo.clone(),
stats.clone(),
prev_manifest.diff(ctx.clone(), blobstore.clone(), cur_manifest.clone()),
);
let new_stats = runtime.block_on(new_stats).unwrap();
let (prev_manifest, cur_manifest) =
get_manifest_from_changeset(ctx.clone(), repo.clone(), prev_hg_cs_id.clone())
.join(get_manifest_from_changeset(
ctx.clone(),
repo.clone(),
cur_hg_cs_id.clone(),
))
.compat()
.await
.unwrap();
// (num_files, total_file_size, num_lines)
assert_eq!(new_stats, RepoStatistics::new(3, 8, 4));
let new_stats = update_statistics(
ctx.clone(),
repo.clone(),
stats.clone(),
prev_manifest.diff(ctx.clone(), blobstore.clone(), cur_manifest.clone()),
)
.compat()
.await
.unwrap();
// (num_files, total_file_size, num_lines)
assert_eq!(new_stats, RepoStatistics::new(3, 8, 4));
});
}
}

View File

@ -634,8 +634,8 @@ mod test {
direction: CommitSyncDirection,
) -> Result<CommitSyncer<SqlSyncedCommitMapping>, Error> {
let ctx = CoreContext::test_mock(fb);
let small_repo = linear::getrepo_with_id(fb, RepositoryId::new(0));
let large_repo = linear::getrepo_with_id(fb, RepositoryId::new(1));
let small_repo = linear::getrepo_with_id(fb, RepositoryId::new(0)).await;
let large_repo = linear::getrepo_with_id(fb, RepositoryId::new(1)).await;
let master = BookmarkName::new("master")?;
let maybe_master_val = small_repo

View File

@ -12,7 +12,6 @@
use async_unit;
use bytes::Bytes;
use fbinit::FacebookInit;
use futures::Future;
use maplit::btreemap;
use std::collections::BTreeMap;
use std::str::FromStr;
@ -27,7 +26,7 @@ use context::CoreContext;
use cross_repo_sync_test_utils::rebase_root_on_master;
use fixtures::{linear, many_files_dirs};
use futures_preview::{FutureExt, TryFutureExt};
use futures_preview::compat::Future01CompatExt;
use mercurial_types::HgChangesetId;
use mononoke_types::{
BlobstoreValue, BonsaiChangesetMut, ChangesetId, DateTime, FileChange, FileContents, FileType,
@ -49,14 +48,15 @@ fn mpath(p: &str) -> MPath {
MPath::new(p).unwrap()
}
fn create_initial_commit(ctx: CoreContext, repo: &BlobRepo) -> ChangesetId {
async fn create_initial_commit(ctx: CoreContext, repo: &BlobRepo) -> ChangesetId {
let bookmark = BookmarkName::new("master").unwrap();
let content = FileContents::new_bytes(Bytes::from(b"123" as &[u8]));
let content_id = content
.into_blob()
.store(ctx.clone(), repo.blobstore())
.wait()
.compat()
.await
.unwrap();
let file_change = FileChange::new(content_id, FileType::Regular, 3, None);
@ -75,7 +75,8 @@ fn create_initial_commit(ctx: CoreContext, repo: &BlobRepo) -> ChangesetId {
let bcs_id = bcs.get_changeset_id();
save_bonsai_changesets(vec![bcs], ctx.clone(), repo.clone())
.wait()
.compat()
.await
.unwrap();
let mut txn = repo.update_bookmark_transaction(ctx.clone());
@ -87,15 +88,16 @@ fn create_initial_commit(ctx: CoreContext, repo: &BlobRepo) -> ChangesetId {
},
)
.unwrap();
txn.commit().wait().unwrap();
txn.commit().compat().await.unwrap();
bcs_id
}
fn create_empty_commit(ctx: CoreContext, repo: &BlobRepo) -> ChangesetId {
async fn create_empty_commit(ctx: CoreContext, repo: &BlobRepo) -> ChangesetId {
let bookmark = BookmarkName::new("master").unwrap();
let p1 = repo
.get_bonsai_bookmark(ctx.clone(), &bookmark)
.wait()
.compat()
.await
.unwrap()
.unwrap();
@ -114,7 +116,8 @@ fn create_empty_commit(ctx: CoreContext, repo: &BlobRepo) -> ChangesetId {
let bcs_id = bcs.get_changeset_id();
save_bonsai_changesets(vec![bcs], ctx.clone(), repo.clone())
.wait()
.compat()
.await
.unwrap();
let mut txn = repo.update_bookmark_transaction(ctx.clone());
@ -126,11 +129,11 @@ fn create_empty_commit(ctx: CoreContext, repo: &BlobRepo) -> ChangesetId {
},
)
.unwrap();
txn.commit().wait().unwrap();
txn.commit().compat().await.unwrap();
bcs_id
}
fn sync_to_master<M>(
async fn sync_to_master<M>(
ctx: CoreContext,
config: &CommitSyncer<M>,
source_bcs_id: ChangesetId,
@ -141,19 +144,16 @@ where
let bookmark_name = BookmarkName::new("master").unwrap();
let source_bcs = source_bcs_id
.load(ctx.clone(), config.get_source_repo().blobstore())
.wait()
.compat()
.await
.unwrap();
async move {
config
.unsafe_sync_commit_pushrebase(ctx.clone(), source_bcs, bookmark_name)
.await
}
.boxed()
.compat()
.wait()
config
.unsafe_sync_commit_pushrebase(ctx.clone(), source_bcs, bookmark_name)
.await
}
fn get_bcs_id<M>(
async fn get_bcs_id<M>(
ctx: CoreContext,
config: &CommitSyncer<M>,
source_hg_cs: HgChangesetId,
@ -164,12 +164,13 @@ where
config
.get_source_repo()
.get_bonsai_from_hg(ctx, source_hg_cs)
.wait()
.compat()
.await
.unwrap()
.unwrap()
}
fn check_mapping<M>(
async fn check_mapping<M>(
ctx: CoreContext,
config: &CommitSyncer<M>,
source_bcs_id: ChangesetId,
@ -188,11 +189,13 @@ fn check_mapping<M>(
source_bcs_id,
destination_repoid,
)
.wait()
.compat()
.await
.unwrap(),
expected_bcs_id
);
expected_bcs_id.map(move |expected_bcs_id| {
if let Some(expected_bcs_id) = expected_bcs_id {
assert_eq!(
mapping
.get(
@ -201,11 +204,12 @@ fn check_mapping<M>(
expected_bcs_id,
source_repoid
)
.wait()
.compat()
.await
.unwrap(),
Some(source_bcs_id)
)
});
);
}
}
fn prefix_mover(prefix: &str) -> Mover {
@ -218,10 +222,10 @@ fn reverse_prefix_mover(prefix: &str) -> Mover {
Arc::new(move |path: &MPath| Ok(path.remove_prefix_component(&prefix)))
}
fn sync_parentage(fb: FacebookInit) {
async fn sync_parentage(fb: FacebookInit) {
let ctx = CoreContext::test_mock(fb);
let (small_repo, megarepo, mapping) = prepare_repos_and_mapping().unwrap();
linear::initrepo(fb, &small_repo);
linear::initrepo(fb, &small_repo).await;
let linear = small_repo;
let repos = CommitSyncRepos::SmallToLarge {
small_repo: linear.clone(),
@ -234,7 +238,7 @@ fn sync_parentage(fb: FacebookInit) {
let config = CommitSyncer::new(mapping, repos);
create_initial_commit(ctx.clone(), &megarepo);
create_initial_commit(ctx.clone(), &megarepo).await;
// Take 2d7d4ba9ce0a6ffd222de7785b249ead9c51c536 from linear, and rewrite it as a child of master
// As this is the first commit from linear, it'll rewrite cleanly
@ -242,35 +246,40 @@ fn sync_parentage(fb: FacebookInit) {
ctx.clone(),
&config,
HgChangesetId::from_str("2d7d4ba9ce0a6ffd222de7785b249ead9c51c536").unwrap(),
);
)
.await;
let expected_bcs_id =
ChangesetId::from_str("8966842d2031e69108028d6f0ce5812bca28cae53679d066368a8c1472a5bb9a")
.ok();
let megarepo_base_bcs_id =
rebase_root_on_master(ctx.clone(), &config, linear_base_bcs_id).unwrap();
let megarepo_base_bcs_id = rebase_root_on_master(ctx.clone(), &config, linear_base_bcs_id)
.await
.unwrap();
// Confirm that we got the expected conversion
assert_eq!(Some(megarepo_base_bcs_id), expected_bcs_id);
check_mapping(ctx.clone(), &config, linear_base_bcs_id, expected_bcs_id);
check_mapping(ctx.clone(), &config, linear_base_bcs_id, expected_bcs_id).await;
// Finally, sync another commit
let linear_second_bcs_id = get_bcs_id(
ctx.clone(),
&config,
HgChangesetId::from_str("3e0e761030db6e479a7fb58b12881883f9f8c63f").unwrap(),
);
)
.await;
let expected_bcs_id =
ChangesetId::from_str("95c03dcd3324e172275ce22a5628d7a501aecb51d9a198b33284887769537acf")
.unwrap();
let megarepo_second_bcs_id =
sync_to_master(ctx.clone(), &config, linear_second_bcs_id).unwrap();
let megarepo_second_bcs_id = sync_to_master(ctx.clone(), &config, linear_second_bcs_id)
.await
.unwrap();
// Confirm that we got the expected conversion
assert_eq!(megarepo_second_bcs_id, Some(expected_bcs_id));
// And check that the synced commit has correct parentage
assert_eq!(
megarepo
.get_changeset_parents_by_bonsai(ctx.clone(), megarepo_second_bcs_id.unwrap())
.wait()
.compat()
.await
.unwrap(),
vec![megarepo_base_bcs_id]
);
@ -278,14 +287,17 @@ fn sync_parentage(fb: FacebookInit) {
#[fbinit::test]
fn test_sync_parentage(fb: FacebookInit) {
async_unit::tokio_unit_test(move || sync_parentage(fb))
async_unit::tokio_unit_test(async move {
sync_parentage(fb).await;
})
}
fn update_master_file(ctx: CoreContext, repo: &BlobRepo) -> ChangesetId {
async fn update_master_file(ctx: CoreContext, repo: &BlobRepo) -> ChangesetId {
let bookmark = BookmarkName::new("master").unwrap();
let p1 = repo
.get_bonsai_bookmark(ctx.clone(), &bookmark)
.wait()
.compat()
.await
.unwrap()
.unwrap();
@ -293,7 +305,8 @@ fn update_master_file(ctx: CoreContext, repo: &BlobRepo) -> ChangesetId {
let content_id = content
.into_blob()
.store(ctx.clone(), repo.blobstore())
.wait()
.compat()
.await
.unwrap();
let file_change = FileChange::new(content_id, FileType::Regular, 3, None);
@ -312,7 +325,8 @@ fn update_master_file(ctx: CoreContext, repo: &BlobRepo) -> ChangesetId {
let bcs_id = bcs.get_changeset_id();
save_bonsai_changesets(vec![bcs], ctx.clone(), repo.clone())
.wait()
.compat()
.await
.unwrap();
let mut txn = repo.update_bookmark_transaction(ctx.clone());
@ -324,14 +338,14 @@ fn update_master_file(ctx: CoreContext, repo: &BlobRepo) -> ChangesetId {
},
)
.unwrap();
txn.commit().wait().unwrap();
txn.commit().compat().await.unwrap();
bcs_id
}
fn sync_causes_conflict(fb: FacebookInit) {
async fn sync_causes_conflict(fb: FacebookInit) {
let ctx = CoreContext::test_mock(fb);
let megarepo = blobrepo_factory::new_memblob_empty_with_id(None, RepositoryId::new(1)).unwrap();
let linear = linear::getrepo(fb);
let linear = linear::getrepo(fb).await;
let linear_repos = CommitSyncRepos::SmallToLarge {
small_repo: linear.clone(),
large_repo: megarepo.clone(),
@ -355,39 +369,45 @@ fn sync_causes_conflict(fb: FacebookInit) {
let linear_config = CommitSyncer::new(mapping.clone(), linear_repos);
let master_file_config = CommitSyncer::new(mapping, master_file_repos);
create_initial_commit(ctx.clone(), &megarepo);
create_initial_commit(ctx.clone(), &megarepo).await;
// Take 2d7d4ba9ce0a6ffd222de7785b249ead9c51c536 from linear, and rewrite it as a child of master
let linear_base_bcs_id = get_bcs_id(
ctx.clone(),
&linear_config,
HgChangesetId::from_str("2d7d4ba9ce0a6ffd222de7785b249ead9c51c536").unwrap(),
);
rebase_root_on_master(ctx.clone(), &linear_config, linear_base_bcs_id).unwrap();
)
.await;
rebase_root_on_master(ctx.clone(), &linear_config, linear_base_bcs_id)
.await
.unwrap();
// Change master_file
update_master_file(ctx.clone(), &megarepo);
update_master_file(ctx.clone(), &megarepo).await;
// Finally, sync another commit over master_file - this should fail
let linear_second_bcs_id = get_bcs_id(
ctx.clone(),
&master_file_config,
HgChangesetId::from_str("3e0e761030db6e479a7fb58b12881883f9f8c63f").unwrap(),
);
)
.await;
let megarepo_fail_bcs_id =
sync_to_master(ctx.clone(), &master_file_config, linear_second_bcs_id);
sync_to_master(ctx.clone(), &master_file_config, linear_second_bcs_id).await;
// Confirm the syncing failed
assert!(
megarepo_fail_bcs_id.is_err(),
format!("{:?}", megarepo_fail_bcs_id)
);
check_mapping(ctx.clone(), &master_file_config, linear_second_bcs_id, None);
check_mapping(ctx.clone(), &master_file_config, linear_second_bcs_id, None).await;
}
#[fbinit::test]
fn test_sync_causes_conflict(fb: FacebookInit) {
async_unit::tokio_unit_test(move || sync_causes_conflict(fb))
async_unit::tokio_unit_test(async move {
sync_causes_conflict(fb).await;
})
}
fn prepare_repos_and_mapping() -> Result<(BlobRepo, BlobRepo, SqlSyncedCommitMapping), Error> {
@ -404,10 +424,10 @@ fn prepare_repos_and_mapping() -> Result<(BlobRepo, BlobRepo, SqlSyncedCommitMap
Ok((small_repo, megarepo, mapping))
}
fn sync_empty_commit(fb: FacebookInit) {
async fn sync_empty_commit(fb: FacebookInit) {
let ctx = CoreContext::test_mock(fb);
let (small_repo, megarepo, mapping) = prepare_repos_and_mapping().unwrap();
linear::initrepo(fb, &small_repo);
linear::initrepo(fb, &small_repo).await;
let linear = small_repo;
let lts_repos = CommitSyncRepos::LargeToSmall {
small_repo: linear.clone(),
@ -429,7 +449,7 @@ fn sync_empty_commit(fb: FacebookInit) {
let lts_config = CommitSyncer::new(mapping.clone(), lts_repos);
let stl_config = CommitSyncer::new(mapping, stl_repos);
create_initial_commit(ctx.clone(), &megarepo);
create_initial_commit(ctx.clone(), &megarepo).await;
// Take 2d7d4ba9ce0a6ffd222de7785b249ead9c51c536 from linear, and rewrite it as a child of master
// As this is the first commit from linear, it'll rewrite cleanly
@ -437,13 +457,17 @@ fn sync_empty_commit(fb: FacebookInit) {
ctx.clone(),
&stl_config,
HgChangesetId::from_str("2d7d4ba9ce0a6ffd222de7785b249ead9c51c536").unwrap(),
);
rebase_root_on_master(ctx.clone(), &stl_config, linear_base_bcs_id).unwrap();
)
.await;
rebase_root_on_master(ctx.clone(), &stl_config, linear_base_bcs_id)
.await
.unwrap();
// Sync an empty commit back to linear
let megarepo_empty_bcs_id = create_empty_commit(ctx.clone(), &megarepo);
let linear_empty_bcs_id =
sync_to_master(ctx.clone(), &lts_config, megarepo_empty_bcs_id).unwrap();
let megarepo_empty_bcs_id = create_empty_commit(ctx.clone(), &megarepo).await;
let linear_empty_bcs_id = sync_to_master(ctx.clone(), &lts_config, megarepo_empty_bcs_id)
.await
.unwrap();
let expected_bcs_id =
ChangesetId::from_str("dad900d07c885c21d4361a11590c220cc65c287d52fe1e0f4df61242c7c03f07")
@ -454,15 +478,18 @@ fn sync_empty_commit(fb: FacebookInit) {
&lts_config,
megarepo_empty_bcs_id,
linear_empty_bcs_id,
);
)
.await;
}
#[fbinit::test]
fn test_sync_empty_commit(fb: FacebookInit) {
async_unit::tokio_unit_test(move || sync_empty_commit(fb))
async_unit::tokio_unit_test(async move {
sync_empty_commit(fb).await;
})
}
fn megarepo_copy_file(
async fn megarepo_copy_file(
ctx: CoreContext,
repo: &BlobRepo,
linear_bcs_id: ChangesetId,
@ -470,7 +497,8 @@ fn megarepo_copy_file(
let bookmark = BookmarkName::new("master").unwrap();
let p1 = repo
.get_bonsai_bookmark(ctx.clone(), &bookmark)
.wait()
.compat()
.await
.unwrap()
.unwrap();
@ -478,7 +506,8 @@ fn megarepo_copy_file(
let content_id = content
.into_blob()
.store(ctx.clone(), repo.blobstore())
.wait()
.compat()
.await
.unwrap();
let file_change = FileChange::new(
content_id,
@ -502,7 +531,8 @@ fn megarepo_copy_file(
let bcs_id = bcs.get_changeset_id();
save_bonsai_changesets(vec![bcs], ctx.clone(), repo.clone())
.wait()
.compat()
.await
.unwrap();
let mut txn = repo.update_bookmark_transaction(ctx.clone());
@ -514,14 +544,14 @@ fn megarepo_copy_file(
},
)
.unwrap();
txn.commit().wait().unwrap();
txn.commit().compat().await.unwrap();
bcs_id
}
fn sync_copyinfo(fb: FacebookInit) {
async fn sync_copyinfo(fb: FacebookInit) {
let ctx = CoreContext::test_mock(fb);
let (small_repo, megarepo, mapping) = prepare_repos_and_mapping().unwrap();
linear::initrepo(fb, &small_repo);
linear::initrepo(fb, &small_repo).await;
let linear = small_repo;
let lts_repos = CommitSyncRepos::LargeToSmall {
small_repo: linear.clone(),
@ -543,7 +573,7 @@ fn sync_copyinfo(fb: FacebookInit) {
let stl_config = CommitSyncer::new(mapping.clone(), stl_repos);
let lts_config = CommitSyncer::new(mapping, lts_repos);
create_initial_commit(ctx.clone(), &megarepo);
create_initial_commit(ctx.clone(), &megarepo).await;
// Take 2d7d4ba9ce0a6ffd222de7785b249ead9c51c536 from linear, and rewrite it as a child of master
// As this is the first commit from linear, it'll rewrite cleanly
@ -551,24 +581,29 @@ fn sync_copyinfo(fb: FacebookInit) {
ctx.clone(),
&stl_config,
HgChangesetId::from_str("2d7d4ba9ce0a6ffd222de7785b249ead9c51c536").unwrap(),
);
)
.await;
let megarepo_linear_base_bcs_id =
rebase_root_on_master(ctx.clone(), &stl_config, linear_base_bcs_id).unwrap();
rebase_root_on_master(ctx.clone(), &stl_config, linear_base_bcs_id)
.await
.unwrap();
// Fetch master from linear - the pushrebase in a remap will change copyinfo
let linear_master_bcs_id = {
let bookmark = BookmarkName::new("master").unwrap();
linear
.get_bonsai_bookmark(ctx.clone(), &bookmark)
.wait()
.compat()
.await
.unwrap()
.unwrap()
};
let megarepo_copyinfo_commit =
megarepo_copy_file(ctx.clone(), &megarepo, megarepo_linear_base_bcs_id);
let linear_copyinfo_bcs_id =
sync_to_master(ctx.clone(), &lts_config, megarepo_copyinfo_commit).unwrap();
megarepo_copy_file(ctx.clone(), &megarepo, megarepo_linear_base_bcs_id).await;
let linear_copyinfo_bcs_id = sync_to_master(ctx.clone(), &lts_config, megarepo_copyinfo_commit)
.await
.unwrap();
let expected_bcs_id =
ChangesetId::from_str("68e495f850e16cd4a6b372d27f18f59931139242b5097c137afa1d738769cc60")
@ -579,13 +614,15 @@ fn sync_copyinfo(fb: FacebookInit) {
&lts_config,
megarepo_copyinfo_commit,
linear_copyinfo_bcs_id,
);
)
.await;
// Fetch commit from linear by its new ID, and confirm that it has the correct copyinfo
let linear_bcs = linear_copyinfo_bcs_id
.unwrap()
.load(ctx.clone(), linear.blobstore())
.wait()
.compat()
.await
.unwrap();
let file_changes: Vec<_> = linear_bcs.file_changes().collect();
@ -599,13 +636,15 @@ fn sync_copyinfo(fb: FacebookInit) {
#[fbinit::test]
fn test_sync_copyinfo(fb: FacebookInit) {
async_unit::tokio_unit_test(move || sync_copyinfo(fb))
async_unit::tokio_unit_test(async move {
sync_copyinfo(fb).await;
})
}
fn sync_remap_failure(fb: FacebookInit) {
async fn sync_remap_failure(fb: FacebookInit) {
let ctx = CoreContext::test_mock(fb);
let megarepo = blobrepo_factory::new_memblob_empty_with_id(None, RepositoryId::new(1)).unwrap();
let linear = linear::getrepo(fb);
let linear = linear::getrepo(fb).await;
let fail_repos = CommitSyncRepos::LargeToSmall {
small_repo: linear.clone(),
large_repo: megarepo.clone(),
@ -647,7 +686,7 @@ fn sync_remap_failure(fb: FacebookInit) {
let stl_config = CommitSyncer::new(mapping.clone(), stl_repos);
let copyfrom_fail_config = CommitSyncer::new(mapping, copyfrom_fail_repos);
create_initial_commit(ctx.clone(), &megarepo);
create_initial_commit(ctx.clone(), &megarepo).await;
// Take 2d7d4ba9ce0a6ffd222de7785b249ead9c51c536 from linear, and rewrite it as a child of master
// As this is the first commit from linear, it'll rewrite cleanly
@ -655,24 +694,29 @@ fn sync_remap_failure(fb: FacebookInit) {
ctx.clone(),
&stl_config,
HgChangesetId::from_str("2d7d4ba9ce0a6ffd222de7785b249ead9c51c536").unwrap(),
);
)
.await;
let megarepo_linear_base_bcs_id =
rebase_root_on_master(ctx.clone(), &stl_config, linear_base_bcs_id).unwrap();
rebase_root_on_master(ctx.clone(), &stl_config, linear_base_bcs_id)
.await
.unwrap();
let megarepo_copyinfo_commit =
megarepo_copy_file(ctx.clone(), &megarepo, megarepo_linear_base_bcs_id);
megarepo_copy_file(ctx.clone(), &megarepo, megarepo_linear_base_bcs_id).await;
let always_fail = sync_to_master(ctx.clone(), &fail_config, megarepo_copyinfo_commit);
let always_fail = sync_to_master(ctx.clone(), &fail_config, megarepo_copyinfo_commit).await;
assert!(always_fail.is_err());
let copyfrom_fail =
sync_to_master(ctx.clone(), &copyfrom_fail_config, megarepo_copyinfo_commit);
sync_to_master(ctx.clone(), &copyfrom_fail_config, megarepo_copyinfo_commit).await;
assert!(copyfrom_fail.is_err(), "{:#?}", copyfrom_fail);
}
#[fbinit::test]
fn test_sync_remap_failure(fb: FacebookInit) {
async_unit::tokio_unit_test(move || sync_remap_failure(fb))
async_unit::tokio_unit_test(async move {
sync_remap_failure(fb).await;
})
}
fn maybe_replace_prefix(
@ -691,10 +735,10 @@ fn maybe_replace_prefix(
}
}
fn sync_implicit_deletes(fb: FacebookInit) -> Result<(), Error> {
async fn sync_implicit_deletes(fb: FacebookInit) -> Result<(), Error> {
let ctx = CoreContext::test_mock(fb);
let (small_repo, megarepo, mapping) = prepare_repos_and_mapping().unwrap();
many_files_dirs::initrepo(fb, &small_repo);
many_files_dirs::initrepo(fb, &small_repo).await;
let repo = small_repo;
// Note: this mover relies on non-prefix-free path map, which may
@ -740,21 +784,22 @@ fn sync_implicit_deletes(fb: FacebookInit) -> Result<(), Error> {
let commit_syncer = CommitSyncer::new(mapping.clone(), commit_sync_repos);
let megarepo_initial_bcs_id = create_initial_commit(ctx.clone(), &megarepo);
let megarepo_initial_bcs_id = create_initial_commit(ctx.clone(), &megarepo).await;
// Insert a fake mapping entry, so that syncs succeed
let repo_initial_bcs_id = get_bcs_id(
ctx.clone(),
&commit_syncer,
HgChangesetId::from_str("2f866e7e549760934e31bf0420a873f65100ad63").unwrap(),
);
)
.await;
let entry = SyncedCommitMappingEntry::new(
megarepo.get_repoid(),
megarepo_initial_bcs_id,
repo.get_repoid(),
repo_initial_bcs_id,
);
mapping.add(ctx.clone(), entry).wait()?;
mapping.add(ctx.clone(), entry).compat().await?;
// d261bc7900818dea7c86935b3fb17a33b2e3a6b4 from "many_files_dirs" should sync cleanly
// on top of master. Among others, it introduces the following files:
@ -765,9 +810,11 @@ fn sync_implicit_deletes(fb: FacebookInit) -> Result<(), Error> {
ctx.clone(),
&commit_syncer,
HgChangesetId::from_str("d261bc7900818dea7c86935b3fb17a33b2e3a6b4").unwrap(),
);
)
.await;
sync_to_master(ctx.clone(), &commit_syncer, repo_base_bcs_id)
.await
.expect("Unexpectedly failed to rewrite 1")
.expect("Unexpectedly rewritten into nothingness");
@ -778,15 +825,18 @@ fn sync_implicit_deletes(fb: FacebookInit) -> Result<(), Error> {
ctx.clone(),
&commit_syncer,
HgChangesetId::from_str("051946ed218061e925fb120dac02634f9ad40ae2").unwrap(),
);
)
.await;
let megarepo_implicit_delete_bcs_id =
sync_to_master(ctx.clone(), &commit_syncer, repo_implicit_delete_bcs_id)
.await
.expect("Unexpectedly failed to rewrite 2")
.expect("Unexpectedly rewritten into nothingness");
let megarepo_implicit_delete_bcs = megarepo_implicit_delete_bcs_id
.load(ctx.clone(), megarepo.blobstore())
.wait()
.compat()
.await
.unwrap();
let file_changes: BTreeMap<MPath, _> = megarepo_implicit_delete_bcs
.file_changes()
@ -808,14 +858,17 @@ fn sync_implicit_deletes(fb: FacebookInit) -> Result<(), Error> {
#[fbinit::test]
fn test_sync_implicit_deletes(fb: FacebookInit) {
async_unit::tokio_unit_test(move || sync_implicit_deletes(fb).unwrap())
async_unit::tokio_unit_test(async move {
sync_implicit_deletes(fb).await.unwrap();
})
}
fn update_linear_1_file(ctx: CoreContext, repo: &BlobRepo) -> ChangesetId {
async fn update_linear_1_file(ctx: CoreContext, repo: &BlobRepo) -> ChangesetId {
let bookmark = BookmarkName::new("master").unwrap();
let p1 = repo
.get_bonsai_bookmark(ctx.clone(), &bookmark)
.wait()
.compat()
.await
.unwrap()
.unwrap();
@ -823,7 +876,8 @@ fn update_linear_1_file(ctx: CoreContext, repo: &BlobRepo) -> ChangesetId {
let content_id = content
.into_blob()
.store(ctx.clone(), repo.blobstore())
.wait()
.compat()
.await
.unwrap();
let file_change = FileChange::new(content_id, FileType::Regular, 3, None);
@ -842,7 +896,8 @@ fn update_linear_1_file(ctx: CoreContext, repo: &BlobRepo) -> ChangesetId {
let bcs_id = bcs.get_changeset_id();
save_bonsai_changesets(vec![bcs], ctx.clone(), repo.clone())
.wait()
.compat()
.await
.unwrap();
let mut txn = repo.update_bookmark_transaction(ctx.clone());
@ -854,15 +909,15 @@ fn update_linear_1_file(ctx: CoreContext, repo: &BlobRepo) -> ChangesetId {
},
)
.unwrap();
txn.commit().wait().unwrap();
txn.commit().compat().await.unwrap();
bcs_id
}
fn sync_parent_search(fb: FacebookInit) {
async fn sync_parent_search(fb: FacebookInit) {
let ctx = CoreContext::test_mock(fb);
let (small_repo, megarepo, mapping) = prepare_repos_and_mapping().unwrap();
linear::initrepo(fb, &small_repo);
linear::initrepo(fb, &small_repo).await;
let linear = small_repo;
let repos = CommitSyncRepos::SmallToLarge {
small_repo: linear.clone(),
@ -883,24 +938,31 @@ fn sync_parent_search(fb: FacebookInit) {
let config = CommitSyncer::new(mapping.clone(), repos);
let reverse_config = CommitSyncer::new(mapping, reverse_repos);
create_initial_commit(ctx.clone(), &megarepo);
create_initial_commit(ctx.clone(), &megarepo).await;
// Take 2d7d4ba9ce0a6ffd222de7785b249ead9c51c536 from linear, and rewrite it as a child of master
let linear_base_bcs_id = get_bcs_id(
ctx.clone(),
&config,
HgChangesetId::from_str("2d7d4ba9ce0a6ffd222de7785b249ead9c51c536").unwrap(),
);
rebase_root_on_master(ctx.clone(), &config, linear_base_bcs_id).unwrap();
)
.await;
rebase_root_on_master(ctx.clone(), &config, linear_base_bcs_id)
.await
.unwrap();
// Change master_file
let master_file_cs_id = update_master_file(ctx.clone(), &megarepo);
sync_to_master(ctx.clone(), &reverse_config, master_file_cs_id).unwrap();
let master_file_cs_id = update_master_file(ctx.clone(), &megarepo).await;
sync_to_master(ctx.clone(), &reverse_config, master_file_cs_id)
.await
.unwrap();
// And change a file in linear
let new_commit = update_linear_1_file(ctx.clone(), &megarepo);
let new_commit = update_linear_1_file(ctx.clone(), &megarepo).await;
// Now sync it back to linear
let sync_success_bcs_id = sync_to_master(ctx.clone(), &reverse_config, new_commit).unwrap();
let sync_success_bcs_id = sync_to_master(ctx.clone(), &reverse_config, new_commit)
.await
.unwrap();
// Confirm the syncing succeeded
let expected_bcs_id =
@ -913,17 +975,21 @@ fn sync_parent_search(fb: FacebookInit) {
&reverse_config,
new_commit,
sync_success_bcs_id,
);
)
.await;
// And validate that the mapping is correct when looked at the other way round
check_mapping(
ctx.clone(),
&config,
sync_success_bcs_id.unwrap(),
Some(new_commit),
);
)
.await;
}
#[fbinit::test]
fn test_sync_parent_search(fb: FacebookInit) {
async_unit::tokio_unit_test(move || sync_parent_search(fb))
async_unit::tokio_unit_test(async move {
sync_parent_search(fb).await;
})
}

View File

@ -17,7 +17,6 @@ use context::CoreContext;
use cross_repo_sync::{
rewrite_commit, update_mapping, upload_commits, CommitSyncRepos, CommitSyncer, Syncers,
};
use futures::Future;
use futures_preview::{compat::Future01CompatExt, FutureExt, TryFutureExt};
use maplit::hashmap;
use megarepolib::{common::ChangesetArgs, perform_move};
@ -37,7 +36,7 @@ use tests_utils::{bookmark, CreateCommitContext};
// Helper function that takes a root commit from source repo and rebases it on master bookmark
// in target repo
pub fn rebase_root_on_master<M>(
pub async fn rebase_root_on_master<M>(
ctx: CoreContext,
commit_syncer: &CommitSyncer<M>,
source_bcs_id: ChangesetId,
@ -48,7 +47,8 @@ where
let bookmark_name = BookmarkName::new("master").unwrap();
let source_bcs = source_bcs_id
.load(ctx.clone(), commit_syncer.get_source_repo().blobstore())
.wait()
.compat()
.await
.unwrap();
if !source_bcs.parents().collect::<Vec<_>>().is_empty() {
return Err(format_err!("not a root commit"));
@ -57,7 +57,8 @@ where
let maybe_bookmark_val = commit_syncer
.get_target_repo()
.get_bonsai_bookmark(ctx.clone(), &bookmark_name)
.wait()?;
.compat()
.await?;
let source_repo = commit_syncer.get_source_repo();
let target_repo = commit_syncer.get_target_repo();
@ -80,7 +81,8 @@ where
}
.boxed()
.compat()
.wait()?;
.compat()
.await?;
let mut target_bcs_mut = maybe_rewritten.unwrap();
target_bcs_mut.parents = vec![bookmark_val];
@ -99,7 +101,8 @@ where
}
.boxed()
.compat()
.wait()?;
.compat()
.await?;
let mut txn = target_repo.update_bookmark_transaction(ctx.clone());
txn.force_set(
@ -110,7 +113,7 @@ where
},
)
.unwrap();
txn.commit().wait().unwrap();
txn.commit().compat().await.unwrap();
let entry = SyncedCommitMappingEntry::new(
target_repo.get_repoid(),
@ -118,7 +121,11 @@ where
source_repo.get_repoid(),
source_bcs_id,
);
commit_syncer.get_mapping().add(ctx.clone(), entry).wait()?;
commit_syncer
.get_mapping()
.add(ctx.clone(), entry)
.compat()
.await?;
Ok(target_bcs.get_changeset_id())
}

View File

@ -10,7 +10,7 @@
#![deny(warnings)]
use fbinit::FacebookInit;
use futures::Future;
use futures_preview::compat::Future01CompatExt;
use context::CoreContext;
use mononoke_types_mocks::changesetid as bonsai;
@ -20,7 +20,7 @@ use synced_commit_mapping::{
SyncedCommitMappingEntry, WorkingCopyEquivalence,
};
fn add_and_get<M: SyncedCommitMapping>(fb: FacebookInit, mapping: M) {
async fn add_and_get<M: SyncedCommitMapping>(fb: FacebookInit, mapping: M) {
let ctx = CoreContext::test_mock(fb);
let entry =
SyncedCommitMappingEntry::new(REPO_ZERO, bonsai::ONES_CSID, REPO_ONE, bonsai::TWOS_CSID);
@ -28,20 +28,23 @@ fn add_and_get<M: SyncedCommitMapping>(fb: FacebookInit, mapping: M) {
true,
mapping
.add(ctx.clone(), entry.clone())
.wait()
.compat()
.await
.expect("Adding new entry failed")
);
assert_eq!(
false,
mapping
.add(ctx.clone(), entry)
.wait()
.compat()
.await
.expect("Adding same entry failed")
);
let res = mapping
.get_equivalent_working_copy(ctx.clone(), REPO_ZERO, bonsai::ONES_CSID, REPO_ONE)
.wait()
.compat()
.await
.expect("get equivalent wc failed, should succeed");
assert_eq!(
@ -56,13 +59,15 @@ fn add_and_get<M: SyncedCommitMapping>(fb: FacebookInit, mapping: M) {
true,
mapping
.add(ctx.clone(), entry.clone())
.wait()
.compat()
.await
.expect("Adding new entry failed")
);
let res = mapping
.get_equivalent_working_copy(ctx.clone(), REPO_ZERO, bonsai::THREES_CSID, REPO_ONE)
.wait()
.compat()
.await
.expect("get equivalent wc failed, should succeed");
assert_eq!(
@ -72,30 +77,34 @@ fn add_and_get<M: SyncedCommitMapping>(fb: FacebookInit, mapping: M) {
let result = mapping
.get(ctx.clone(), REPO_ZERO, bonsai::ONES_CSID, REPO_ONE)
.wait()
.compat()
.await
.expect("Get failed");
assert_eq!(result, Some(bonsai::TWOS_CSID));
let result = mapping
.get(ctx.clone(), REPO_ONE, bonsai::TWOS_CSID, REPO_ZERO)
.wait()
.compat()
.await
.expect("Get failed");
assert_eq!(result, Some(bonsai::ONES_CSID));
}
fn missing<M: SyncedCommitMapping>(fb: FacebookInit, mapping: M) {
async fn missing<M: SyncedCommitMapping>(fb: FacebookInit, mapping: M) {
let ctx = CoreContext::test_mock(fb);
let result = mapping
.get(ctx.clone(), REPO_ONE, bonsai::TWOS_CSID, REPO_ZERO)
.wait()
.compat()
.await
.expect("Failed to fetch missing changeset (should succeed with None instead)");
assert_eq!(result, None);
}
fn equivalent_working_copy<M: SyncedCommitMapping>(fb: FacebookInit, mapping: M) {
async fn equivalent_working_copy<M: SyncedCommitMapping>(fb: FacebookInit, mapping: M) {
let ctx = CoreContext::test_mock(fb);
let result = mapping
.get_equivalent_working_copy(ctx.clone(), REPO_ONE, bonsai::TWOS_CSID, REPO_ZERO)
.wait()
.compat()
.await
.expect("Failed to fetch equivalent working copy (should succeed with None instead)");
assert_eq!(result, None);
@ -107,19 +116,22 @@ fn equivalent_working_copy<M: SyncedCommitMapping>(fb: FacebookInit, mapping: M)
};
let result = mapping
.insert_equivalent_working_copy(ctx.clone(), entry.clone())
.wait()
.compat()
.await
.expect("Failed to insert working copy");
assert_eq!(result, true);
let result = mapping
.insert_equivalent_working_copy(ctx.clone(), entry)
.wait()
.compat()
.await
.expect("Failed to insert working copy");
assert_eq!(result, false);
let res = mapping
.get_equivalent_working_copy(ctx.clone(), REPO_ZERO, bonsai::ONES_CSID, REPO_ONE)
.wait()
.compat()
.await
.expect("get equivalent wc failed, should succeed");
assert_eq!(
@ -136,13 +148,15 @@ fn equivalent_working_copy<M: SyncedCommitMapping>(fb: FacebookInit, mapping: M)
let result = mapping
.insert_equivalent_working_copy(ctx.clone(), null_entry)
.wait()
.compat()
.await
.expect("Failed to insert working copy");
assert_eq!(result, true);
let res = mapping
.get_equivalent_working_copy(ctx.clone(), REPO_ZERO, bonsai::THREES_CSID, REPO_ONE)
.wait()
.compat()
.await
.expect("get equivalent wc failed, should succeed");
assert_eq!(res, Some(WorkingCopyEquivalence::NoWorkingCopy));
@ -155,27 +169,28 @@ fn equivalent_working_copy<M: SyncedCommitMapping>(fb: FacebookInit, mapping: M)
};
assert!(mapping
.insert_equivalent_working_copy(ctx.clone(), should_fail)
.wait()
.compat()
.await
.is_err());
}
#[fbinit::test]
fn test_add_and_get(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
add_and_get(fb, SqlSyncedCommitMapping::with_sqlite_in_memory().unwrap())
async_unit::tokio_unit_test(async move {
add_and_get(fb, SqlSyncedCommitMapping::with_sqlite_in_memory().unwrap()).await;
});
}
#[fbinit::test]
fn test_missing(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
missing(fb, SqlSyncedCommitMapping::with_sqlite_in_memory().unwrap())
async_unit::tokio_unit_test(async move {
missing(fb, SqlSyncedCommitMapping::with_sqlite_in_memory().unwrap()).await
});
}
#[fbinit::test]
fn test_equivalent_working_copy(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
equivalent_working_copy(fb, SqlSyncedCommitMapping::with_sqlite_in_memory().unwrap())
async_unit::tokio_unit_test(async move {
equivalent_working_copy(fb, SqlSyncedCommitMapping::with_sqlite_in_memory().unwrap()).await
});
}

View File

@ -6,6 +6,7 @@
*/
#![deny(warnings)]
#![type_length_limit = "1430018"]
mod derived;
pub use derived::{fetch_file_full_content, BlameRoot, BlameRootMapping};

View File

@ -10,7 +10,7 @@ use anyhow::Error;
use bytes::Bytes;
use context::CoreContext;
use fbinit::FacebookInit;
use futures::Future;
use futures_preview::compat::Future01CompatExt;
use maplit::{btreemap, hashmap};
use mononoke_types::{Blame, ChangesetId, MPath};
use std::collections::HashMap;
@ -137,7 +137,7 @@ fn test_blame(fb: FacebookInit) -> Result<(), Error> {
// \ /
// 4
//
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = blobrepo_factory::new_memblob_empty(None)?;
@ -153,27 +153,31 @@ fn test_blame(fb: FacebookInit) -> Result<(), Error> {
"_f2" => Some(F2[0]),
},
repo.clone(),
),
);
)
.await,
)
.await;
let mut c1_changes =
store_files(ctx.clone(), btreemap! {"f0" => Some(F0[1])}, repo.clone());
store_files(ctx.clone(), btreemap! {"f0" => Some(F0[1])}, repo.clone()).await;
let (f2_path, f2_change) = store_rename(
ctx.clone(),
(MPath::new("_f2")?, c0),
"f2",
F2[1],
repo.clone(),
);
)
.await;
c1_changes.insert(f2_path, f2_change);
let c1 = create_commit(ctx.clone(), repo.clone(), vec![c0], c1_changes);
let c1 = create_commit(ctx.clone(), repo.clone(), vec![c0], c1_changes).await;
let c2 = create_commit(
ctx.clone(),
repo.clone(),
vec![c1],
store_files(ctx.clone(), btreemap! {"f0" => Some(F0[2])}, repo.clone()),
);
store_files(ctx.clone(), btreemap! {"f0" => Some(F0[2])}, repo.clone()).await,
)
.await;
let c3 = create_commit(
ctx.clone(),
@ -187,8 +191,10 @@ fn test_blame(fb: FacebookInit) -> Result<(), Error> {
"f2" => Some(F2[2]),
},
repo.clone(),
),
);
)
.await,
)
.await;
let c4 = create_commit(
ctx.clone(),
@ -202,8 +208,10 @@ fn test_blame(fb: FacebookInit) -> Result<(), Error> {
"f2" => Some(F2[3]),
},
repo.clone(),
),
);
)
.await,
)
.await;
let names = hashmap! {
c0 => "c0",
@ -213,16 +221,19 @@ fn test_blame(fb: FacebookInit) -> Result<(), Error> {
c4 => "c4",
};
let (content, blame) =
fetch_blame(ctx.clone(), repo.clone(), c4, MPath::new("f0")?).wait()?;
let (content, blame) = fetch_blame(ctx.clone(), repo.clone(), c4, MPath::new("f0")?)
.compat()
.await?;
assert_eq!(annotate(content, blame, &names)?, F0_AT_C4);
let (content, blame) =
fetch_blame(ctx.clone(), repo.clone(), c4, MPath::new("f1")?).wait()?;
let (content, blame) = fetch_blame(ctx.clone(), repo.clone(), c4, MPath::new("f1")?)
.compat()
.await?;
assert_eq!(annotate(content, blame, &names)?, F1_AT_C4);
let (content, blame) =
fetch_blame(ctx.clone(), repo.clone(), c4, MPath::new("f2")?).wait()?;
let (content, blame) = fetch_blame(ctx.clone(), repo.clone(), c4, MPath::new("f2")?)
.compat()
.await?;
assert_eq!(annotate(content, blame, &names)?, F2_AT_C4);
Ok(())

View File

@ -602,8 +602,8 @@ mod tests {
#[fbinit::test]
fn many_file_dirs_test(fb: FacebookInit) {
let repo = many_files_dirs::getrepo(fb);
let mut runtime = Runtime::new().unwrap();
let repo = runtime.block_on_std(many_files_dirs::getrepo(fb));
let ctx = CoreContext::test_mock(fb);
let mf_id_1 = {
@ -699,13 +699,10 @@ mod tests {
.collect::<Vec<_>>();
let parent_mf_ids = parent_ids.into_iter().map(|(_, mf)| mf).collect::<Vec<_>>();
let bcs = create_bonsai_changeset(
ctx.fb,
repo.clone(),
&mut runtime,
store_files(ctx.clone(), file_changes, repo.clone()),
parent_bcs_ids,
);
let files = runtime.block_on_std(store_files(ctx.clone(), file_changes, repo.clone()));
let bcs =
create_bonsai_changeset(ctx.fb, repo.clone(), &mut runtime, files, parent_bcs_ids);
derive_manifest(ctx.clone(), repo.clone(), &mut runtime, bcs, parent_mf_ids)
}

View File

@ -187,7 +187,7 @@ mod tests {
BonsaiChangeset, BonsaiChangesetMut, ChangesetId, DateTime, FileChange, MPath,
};
use std::collections::BTreeMap;
use tokio::runtime::Runtime;
use tokio_compat::runtime::Runtime;
#[fbinit::test]
fn test_find_entries(fb: FacebookInit) {
@ -208,13 +208,8 @@ mod tests {
"dir-2/f-4" => Some("6\n"),
"dir-2/f-5" => Some("7\n"),
};
let bcs = create_bonsai_changeset(
ctx.fb,
repo.clone(),
&mut runtime,
store_files(ctx.clone(), file_changes, repo.clone()),
vec![],
);
let files = runtime.block_on_std(store_files(ctx.clone(), file_changes, repo.clone()));
let bcs = create_bonsai_changeset(ctx.fb, repo.clone(), &mut runtime, files, vec![]);
let bcs_id = bcs.get_changeset_id();
let mf_id = derive_manifest(ctx.clone(), repo.clone(), &mut runtime, bcs, vec![]);
@ -231,13 +226,9 @@ mod tests {
"dir-2/sub/f-3" => None,
"dir-2/f-4" => None,
};
let bcs = create_bonsai_changeset(
ctx.fb,
repo.clone(),
&mut runtime,
store_files(ctx.clone(), file_changes, repo.clone()),
vec![bcs_id_1],
);
let files = runtime.block_on_std(store_files(ctx.clone(), file_changes, repo.clone()));
let bcs =
create_bonsai_changeset(ctx.fb, repo.clone(), &mut runtime, files, vec![bcs_id_1]);
let _bcs_id = bcs.get_changeset_id();
let mf_id =
@ -335,13 +326,8 @@ mod tests {
"dir/sub/f-3" => Some("3\n"),
"dir/f-2" => Some("4\n"),
};
let bcs = create_bonsai_changeset(
ctx.fb,
repo.clone(),
&mut runtime,
store_files(ctx.clone(), file_changes, repo.clone()),
vec![],
);
let files = runtime.block_on_std(store_files(ctx.clone(), file_changes, repo.clone()));
let bcs = create_bonsai_changeset(ctx.fb, repo.clone(), &mut runtime, files, vec![]);
let bcs_id = bcs.get_changeset_id();
let mf_id = derive_manifest(ctx.clone(), repo.clone(), &mut runtime, bcs, vec![]);
@ -354,13 +340,9 @@ mod tests {
"dir/sub/f-1" => None,
"dir/sub/f-3" => None,
};
let bcs = create_bonsai_changeset(
ctx.fb,
repo.clone(),
&mut runtime,
store_files(ctx.clone(), file_changes, repo.clone()),
vec![bcs_id_1],
);
let files = runtime.block_on_std(store_files(ctx.clone(), file_changes, repo.clone()));
let bcs =
create_bonsai_changeset(ctx.fb, repo.clone(), &mut runtime, files, vec![bcs_id_1]);
let _bcs_id = bcs.get_changeset_id();
let mf_id =

View File

@ -242,9 +242,9 @@ mod test {
#[fbinit::test]
fn fetch_flattened_simple(fb: FacebookInit) -> Result<(), Error> {
let ctx = CoreContext::test_mock(fb);
let repo = linear::getrepo(fb);
let mut rt = Runtime::new().unwrap();
let ctx = CoreContext::test_mock(fb);
let repo = rt.block_on_std(linear::getrepo(fb));
let mut d = VecDeque::new();
d.push_back((ONES_CSID, vec![]));
let blobstore = Arc::new(repo.get_blobstore());
@ -264,9 +264,9 @@ mod test {
#[fbinit::test]
fn fetch_flattened_prepend(fb: FacebookInit) -> Result<(), Error> {
let ctx = CoreContext::test_mock(fb);
let repo = linear::getrepo(fb);
let mut rt = Runtime::new().unwrap();
let ctx = CoreContext::test_mock(fb);
let repo = rt.block_on_std(linear::getrepo(fb));
let mut d = VecDeque::new();
d.push_back((ONES_CSID, vec![]));
let blobstore = Arc::new(repo.get_blobstore());

View File

@ -210,12 +210,12 @@ mod tests {
use revset::AncestorsNodeStream;
use std::collections::{BTreeMap, HashSet, VecDeque};
use std::str::FromStr;
use tokio::runtime::Runtime;
use tokio_compat::runtime::Runtime;
#[fbinit::test]
fn test_derive_single_empty_commit_no_parents(fb: FacebookInit) {
let mut rt = Runtime::new().unwrap();
let repo = linear::getrepo(fb);
let repo = rt.block_on_std(linear::getrepo(fb));
let ctx = CoreContext::test_mock(fb);
let bcs = create_bonsai_changeset(vec![]);
let bcs_id = bcs.get_changeset_id();
@ -237,7 +237,7 @@ mod tests {
#[fbinit::test]
fn test_derive_single_commit_no_parents(fb: FacebookInit) {
let mut rt = Runtime::new().unwrap();
let repo = linear::getrepo(fb);
let repo = rt.block_on_std(linear::getrepo(fb));
let ctx = CoreContext::test_mock(fb);
// This is the initial diff with no parents
@ -289,7 +289,7 @@ mod tests {
#[fbinit::test]
fn test_derive_linear(fb: FacebookInit) {
let mut rt = Runtime::new().unwrap();
let repo = linear::getrepo(fb);
let repo = rt.block_on_std(linear::getrepo(fb));
let ctx = CoreContext::test_mock(fb);
let hg_cs_id = HgChangesetId::from_str("79a13814c5ce7330173ec04d279bf95ab3f652fb").unwrap();
@ -319,7 +319,7 @@ mod tests {
#[fbinit::test]
fn test_derive_overflow(fb: FacebookInit) {
let mut rt = Runtime::new().unwrap();
let repo = linear::getrepo(fb);
let repo = rt.block_on_std(linear::getrepo(fb));
let ctx = CoreContext::test_mock(fb);
let mut bonsais = vec![];
@ -327,11 +327,11 @@ mod tests {
for i in 1..max_entries_in_fastlog_batch() {
let filename = String::from("1");
let content = format!("{}", i);
let stored_files = store_files(
let stored_files = rt.block_on_std(store_files(
ctx.clone(),
btreemap! { filename.as_str() => Some(content.as_str()) },
repo.clone(),
);
));
let bcs = create_bonsai_changeset_with_files(parents, stored_files);
let bcs_id = bcs.get_changeset_id();
@ -349,7 +349,7 @@ mod tests {
#[fbinit::test]
fn test_random_repo(fb: FacebookInit) {
let mut rt = Runtime::new().unwrap();
let repo = linear::getrepo(fb);
let repo = rt.block_on_std(linear::getrepo(fb));
let ctx = CoreContext::test_mock(fb);
let mut rng = XorShiftRng::seed_from_u64(0); // reproducable Rng
@ -373,7 +373,7 @@ mod tests {
#[fbinit::test]
fn test_derive_empty_commits(fb: FacebookInit) {
let mut rt = Runtime::new().unwrap();
let repo = linear::getrepo(fb);
let repo = rt.block_on_std(linear::getrepo(fb));
let ctx = CoreContext::test_mock(fb);
let mut bonsais = vec![];
@ -395,7 +395,7 @@ mod tests {
#[fbinit::test]
fn test_find_intersection_of_diffs_unodes_linear(fb: FacebookInit) -> Result<(), Error> {
let mut rt = Runtime::new().unwrap();
let repo = linear::getrepo(fb);
let repo = rt.block_on_std(linear::getrepo(fb));
let ctx = CoreContext::test_mock(fb);
// This commit creates file "1" and "files"
@ -447,7 +447,7 @@ mod tests {
expected: Vec<String>,
) -> Result<(), Error> {
let mut rt = Runtime::new().unwrap();
let repo = linear::getrepo(fb);
let repo = rt.block_on_std(linear::getrepo(fb));
let ctx = CoreContext::test_mock(fb);
let mut bonsais = vec![];
@ -455,14 +455,15 @@ mod tests {
for (i, p) in parent_files.into_iter().enumerate() {
println!("parent {}, {:?} ", i, p);
let stored_files = store_files(ctx.clone(), p, repo.clone());
let stored_files = rt.block_on_std(store_files(ctx.clone(), p, repo.clone()));
let bcs = create_bonsai_changeset_with_files(vec![], stored_files);
parents.push(bcs.get_changeset_id());
bonsais.push(bcs);
}
println!("merge {:?} ", merge_files);
let merge_stored_files = store_files(ctx.clone(), merge_files, repo.clone());
let merge_stored_files =
rt.block_on_std(store_files(ctx.clone(), merge_files, repo.clone()));
let bcs = create_bonsai_changeset_with_files(parents.clone(), merge_stored_files);
let merge_bcs_id = bcs.get_changeset_id();
@ -573,7 +574,7 @@ mod tests {
let ctx = CoreContext::test_mock(fb);
{
let repo = merge_uneven::getrepo(fb);
let repo = rt.block_on_std(merge_uneven::getrepo(fb));
let all_commits = rt.block_on(all_commits(ctx.clone(), repo.clone()).collect())?;
for (bcs_id, _hg_cs_id) in all_commits {
@ -582,7 +583,7 @@ mod tests {
}
{
let repo = merge_even::getrepo(fb);
let repo = rt.block_on_std(merge_even::getrepo(fb));
let all_commits = rt.block_on(all_commits(ctx.clone(), repo.clone()).collect())?;
for (bcs_id, _hg_cs_id) in all_commits {
@ -591,7 +592,7 @@ mod tests {
}
{
let repo = unshared_merge_even::getrepo(fb);
let repo = rt.block_on_std(unshared_merge_even::getrepo(fb));
let all_commits = rt.block_on(all_commits(ctx.clone(), repo.clone()).collect())?;
for (bcs_id, _hg_cs_id) in all_commits {
@ -600,7 +601,7 @@ mod tests {
}
{
let repo = unshared_merge_uneven::getrepo(fb);
let repo = rt.block_on_std(unshared_merge_uneven::getrepo(fb));
let all_commits = rt.block_on(all_commits(ctx.clone(), repo.clone()).collect())?;
for (bcs_id, _hg_cs_id) in all_commits {
@ -614,7 +615,7 @@ mod tests {
#[fbinit::test]
fn test_bfs_order(fb: FacebookInit) -> Result<(), Error> {
let mut rt = Runtime::new().unwrap();
let repo = linear::getrepo(fb);
let repo = rt.block_on_std(linear::getrepo(fb));
let ctx = CoreContext::test_mock(fb);
// E
@ -644,20 +645,29 @@ mod tests {
println!("g = {}", g.get_changeset_id());
bonsais.push(g.clone());
let stored_files =
store_files(ctx.clone(), btreemap! { "file" => Some("f") }, repo.clone());
let stored_files = rt.block_on_std(store_files(
ctx.clone(),
btreemap! { "file" => Some("f") },
repo.clone(),
));
let f = create_bonsai_changeset_with_files(vec![g.get_changeset_id()], stored_files);
println!("f = {}", f.get_changeset_id());
bonsais.push(f.clone());
let stored_files =
store_files(ctx.clone(), btreemap! { "file" => Some("d") }, repo.clone());
let stored_files = rt.block_on_std(store_files(
ctx.clone(),
btreemap! { "file" => Some("d") },
repo.clone(),
));
let d = create_bonsai_changeset_with_files(vec![f.get_changeset_id()], stored_files);
println!("d = {}", d.get_changeset_id());
bonsais.push(d.clone());
let stored_files =
store_files(ctx.clone(), btreemap! { "file" => Some("e") }, repo.clone());
let stored_files = rt.block_on_std(store_files(
ctx.clone(),
btreemap! { "file" => Some("e") },
repo.clone(),
));
let e = create_bonsai_changeset_with_files(
vec![d.get_changeset_id(), c.get_changeset_id()],
stored_files,

View File

@ -340,7 +340,7 @@ mod test {
use maplit::btreemap;
use mononoke_types::{ChangesetId, FileUnodeId, MPath, ManifestUnodeId};
use std::collections::{HashMap, HashSet, VecDeque};
use tokio::runtime::Runtime;
use tokio_compat::runtime::Runtime;
#[fbinit::test]
fn test_list_linear_history(fb: FacebookInit) {
@ -358,11 +358,11 @@ mod test {
for i in 1..300 {
let file = if i % 2 == 1 { "2" } else { filename };
let content = format!("{}", i);
let stored_files = store_files(
let stored_files = rt.block_on_std(store_files(
ctx.clone(),
btreemap! { file => Some(content.as_str()) },
repo.clone(),
);
));
let bcs = create_bonsai_changeset_with_files(parents, stored_files);
let bcs_id = bcs.get_changeset_id();
@ -435,11 +435,11 @@ mod test {
let mut create_branch = |branch, number, mut parents: Vec<_>| {
for i in 0..number {
let content = format!("{} - {}", branch, i);
let stored_files = store_files(
let stored_files = rt.block_on_std(store_files(
ctx.clone(),
btreemap! { filename => Some(content.as_str()) },
repo.clone(),
);
));
let bcs = create_bonsai_changeset_with_files(parents.clone(), stored_files);
let bcs_id = bcs.get_changeset_id();
@ -520,44 +520,52 @@ mod test {
let filepath = path(filename);
let create_changeset = |content: String, parents: Vec<_>| {
let stored_files = store_files(
ctx.clone(),
btreemap! { filename => Some(content.as_str()) },
repo.clone(),
);
let ctx = &ctx;
let repo = &repo;
async move {
let stored_files = store_files(
ctx.clone(),
btreemap! { filename => Some(content.as_str()) },
repo.clone(),
)
.await;
create_bonsai_changeset_with_files(parents, stored_files)
create_bonsai_changeset_with_files(parents, stored_files)
}
};
let mut bonsais = vec![];
let mut expected = vec![];
let root = create_changeset("root".to_string(), vec![]);
let root = rt.block_on_std(create_changeset("root".to_string(), vec![]));
let root_id = root.get_changeset_id();
bonsais.push(root);
expected.push(root_id.clone());
let mut create_diamond = |number, parents: Vec<_>| {
// bottom
let bcs = create_changeset(format!("B - {}", number), parents.clone());
let bcs = rt.block_on_std(create_changeset(format!("B - {}", number), parents.clone()));
let bottom_id = bcs.get_changeset_id();
bonsais.push(bcs);
expected.push(bottom_id.clone());
// right
let bcs = create_changeset(format!("R - {}", number), vec![bottom_id]);
let bcs = rt.block_on_std(create_changeset(format!("R - {}", number), vec![bottom_id]));
let right_id = bcs.get_changeset_id();
bonsais.push(bcs);
expected.push(right_id.clone());
// left
let bcs = create_changeset(format!("L - {}", number), vec![bottom_id]);
let bcs = rt.block_on_std(create_changeset(format!("L - {}", number), vec![bottom_id]));
let left_id = bcs.get_changeset_id();
bonsais.push(bcs);
expected.push(left_id.clone());
// up
let bcs = create_changeset(format!("U - {}", number), vec![left_id, right_id]);
let bcs = rt.block_on_std(create_changeset(
format!("U - {}", number),
vec![left_id, right_id],
));
let up_id = bcs.get_changeset_id();
bonsais.push(bcs);
expected.push(up_id.clone());

View File

@ -405,8 +405,8 @@ mod test {
#[fbinit::test]
fn flat_linear_test(fb: FacebookInit) {
let repo = linear::getrepo(fb);
let mut runtime = Runtime::new().unwrap();
let repo = runtime.block_on_std(linear::getrepo(fb));
let ctx = CoreContext::test_mock(fb);
let parent_fsnode_id = {
@ -521,8 +521,8 @@ mod test {
#[fbinit::test]
fn nested_directories_test(fb: FacebookInit) {
let repo = many_files_dirs::getrepo(fb);
let mut runtime = Runtime::new().unwrap();
let repo = runtime.block_on_std(many_files_dirs::getrepo(fb));
let ctx = CoreContext::test_mock(fb);

View File

@ -154,6 +154,7 @@ mod test {
branch_even, branch_uneven, branch_wide, linear, many_diamonds, many_files_dirs,
merge_even, merge_uneven, unshared_merge_even, unshared_merge_uneven,
};
use futures_preview::future::Future as NewFuture;
use manifest::Entry;
use mercurial_types::{HgChangesetId, HgManifestId};
use revset::AncestorsNodeStream;
@ -230,9 +231,14 @@ mod test {
.flatten_stream()
}
fn verify_repo(fb: FacebookInit, repo: BlobRepo, runtime: &mut Runtime) {
fn verify_repo<F>(fb: FacebookInit, repo: F, runtime: &mut Runtime)
where
F: NewFuture<Output = BlobRepo>,
{
let ctx = CoreContext::test_mock(fb);
let repo = runtime.block_on_std(repo);
runtime
.block_on(
all_commits(ctx.clone(), repo.clone())
@ -251,8 +257,7 @@ mod test {
verify_repo(fb, branch_even::getrepo(fb), &mut runtime);
verify_repo(fb, branch_uneven::getrepo(fb), &mut runtime);
verify_repo(fb, branch_wide::getrepo(fb), &mut runtime);
let repo = many_diamonds::getrepo(fb, &mut runtime);
verify_repo(fb, repo, &mut runtime);
verify_repo(fb, many_diamonds::getrepo(fb), &mut runtime);
verify_repo(fb, many_files_dirs::getrepo(fb), &mut runtime);
verify_repo(fb, merge_even::getrepo(fb), &mut runtime);
verify_repo(fb, merge_uneven::getrepo(fb), &mut runtime);

View File

@ -523,7 +523,6 @@ mod test {
};
use futures_ext::BoxFuture;
use futures_preview::compat::Future01CompatExt;
use futures_util::future::{FutureExt as NewFutureExt, TryFutureExt};
use lazy_static::lazy_static;
use lock_ext::LockExt;
use maplit::hashmap;
@ -673,13 +672,15 @@ mod test {
.unwrap();
}
fn init_linear(fb: FacebookInit) -> BlobRepo {
linear::getrepo(fb).dangerous_override(|mut derived_data_config: DerivedDataConfig| {
derived_data_config
.derived_data_types
.insert(TestGenNum::NAME.to_string());
derived_data_config
})
async fn init_linear(fb: FacebookInit) -> BlobRepo {
linear::getrepo(fb).await.dangerous_override(
|mut derived_data_config: DerivedDataConfig| {
derived_data_config
.derived_data_types
.insert(TestGenNum::NAME.to_string());
derived_data_config
},
)
}
#[fbinit::test]
@ -687,41 +688,38 @@ mod test {
let ctx = CoreContext::test_mock(fb);
let mut runtime = Runtime::new()?;
let repo = init_linear(fb);
runtime.block_on(
async move {
// This is the parent of the root commit
// ...
// O <- 3e0e761030db6e479a7fb58b12881883f9f8c63f
// |
// O <- 2d7d4ba9ce0a6ffd222de7785b249ead9c51c536
let after_root_cs_id =
resolve_cs_id(&ctx, &repo, "3e0e761030db6e479a7fb58b12881883f9f8c63f").await?;
let root_cs_id =
resolve_cs_id(&ctx, &repo, "2d7d4ba9ce0a6ffd222de7785b249ead9c51c536").await?;
runtime.block_on_std(async move {
let repo = init_linear(fb).await;
TestGenNum::derive(ctx.clone(), repo.clone(), after_root_cs_id)
.compat()
.await?;
// This is the parent of the root commit
// ...
// O <- 3e0e761030db6e479a7fb58b12881883f9f8c63f
// |
// O <- 2d7d4ba9ce0a6ffd222de7785b249ead9c51c536
let after_root_cs_id =
resolve_cs_id(&ctx, &repo, "3e0e761030db6e479a7fb58b12881883f9f8c63f").await?;
let root_cs_id =
resolve_cs_id(&ctx, &repo, "2d7d4ba9ce0a6ffd222de7785b249ead9c51c536").await?;
// Delete root entry, and derive descendant of after_root changeset, make sure
// it doesn't fail
TestGenNum::mapping(&ctx, &repo).remove(&root_cs_id);
TestGenNum::derive(ctx.clone(), repo.clone(), after_root_cs_id)
.compat()
.await?;
TestGenNum::derive(ctx.clone(), repo.clone(), after_root_cs_id)
.compat()
.await?;
let third_cs_id =
resolve_cs_id(&ctx, &repo, "607314ef579bd2407752361ba1b0c1729d08b281").await?;
TestGenNum::derive(ctx.clone(), repo.clone(), third_cs_id)
.compat()
.await?;
// Delete root entry, and derive descendant of after_root changeset, make sure
// it doesn't fail
TestGenNum::mapping(&ctx, &repo).remove(&root_cs_id);
TestGenNum::derive(ctx.clone(), repo.clone(), after_root_cs_id)
.compat()
.await?;
Ok(())
}
.boxed()
.compat(),
)
let third_cs_id =
resolve_cs_id(&ctx, &repo, "607314ef579bd2407752361ba1b0c1729d08b281").await?;
TestGenNum::derive(ctx.clone(), repo.clone(), third_cs_id)
.compat()
.await?;
Ok(())
})
}
#[fbinit::test]
@ -729,45 +727,42 @@ mod test {
let ctx = CoreContext::test_mock(fb);
let mut runtime = Runtime::new()?;
let repo = init_linear(fb);
runtime.block_on(
async move {
// This is the parent of the root commit
// ...
// O <- 3e0e761030db6e479a7fb58b12881883f9f8c63f
// |
// O <- 2d7d4ba9ce0a6ffd222de7785b249ead9c51c536
let after_root_cs_id =
resolve_cs_id(&ctx, &repo, "3e0e761030db6e479a7fb58b12881883f9f8c63f").await?;
let root_cs_id =
resolve_cs_id(&ctx, &repo, "2d7d4ba9ce0a6ffd222de7785b249ead9c51c536").await?;
runtime.block_on_std(async move {
let repo = init_linear(fb).await;
let underived = TestGenNum::count_underived(&ctx, &repo, &after_root_cs_id, 100)
.compat()
.await?;
assert_eq!(underived, 2);
// This is the parent of the root commit
// ...
// O <- 3e0e761030db6e479a7fb58b12881883f9f8c63f
// |
// O <- 2d7d4ba9ce0a6ffd222de7785b249ead9c51c536
let after_root_cs_id =
resolve_cs_id(&ctx, &repo, "3e0e761030db6e479a7fb58b12881883f9f8c63f").await?;
let root_cs_id =
resolve_cs_id(&ctx, &repo, "2d7d4ba9ce0a6ffd222de7785b249ead9c51c536").await?;
let underived = TestGenNum::count_underived(&ctx, &repo, &root_cs_id, 100)
.compat()
.await?;
assert_eq!(underived, 1);
let underived = TestGenNum::count_underived(&ctx, &repo, &after_root_cs_id, 100)
.compat()
.await?;
assert_eq!(underived, 2);
let underived = TestGenNum::count_underived(&ctx, &repo, &after_root_cs_id, 1)
.compat()
.await?;
assert_eq!(underived, 2);
let underived = TestGenNum::count_underived(&ctx, &repo, &root_cs_id, 100)
.compat()
.await?;
assert_eq!(underived, 1);
let master_cs_id = resolve_cs_id(&ctx, &repo, "master").await?;
let underived = TestGenNum::count_underived(&ctx, &repo, &master_cs_id, 100)
.compat()
.await?;
assert_eq!(underived, 11);
let underived = TestGenNum::count_underived(&ctx, &repo, &after_root_cs_id, 1)
.compat()
.await?;
assert_eq!(underived, 2);
Ok(())
}
.boxed()
.compat(),
)
let master_cs_id = resolve_cs_id(&ctx, &repo, "master").await?;
let underived = TestGenNum::count_underived(&ctx, &repo, &master_cs_id, 100)
.compat()
.await?;
assert_eq!(underived, 11);
Ok(())
})
}
#[fbinit::test]
@ -775,34 +770,34 @@ mod test {
let ctx = CoreContext::test_mock(fb);
let mut runtime = Runtime::new()?;
let repo = branch_even::getrepo(fb);
let repo = runtime.block_on_std(branch_even::getrepo(fb));
derive_for_master(&mut runtime, ctx.clone(), repo.clone());
let repo = branch_uneven::getrepo(fb);
let repo = runtime.block_on_std(branch_uneven::getrepo(fb));
derive_for_master(&mut runtime, ctx.clone(), repo.clone());
let repo = branch_wide::getrepo(fb);
let repo = runtime.block_on_std(branch_wide::getrepo(fb));
derive_for_master(&mut runtime, ctx.clone(), repo.clone());
let repo = linear::getrepo(fb);
let repo = runtime.block_on_std(linear::getrepo(fb));
derive_for_master(&mut runtime, ctx.clone(), repo.clone());
let repo = many_files_dirs::getrepo(fb);
let repo = runtime.block_on_std(many_files_dirs::getrepo(fb));
derive_for_master(&mut runtime, ctx.clone(), repo.clone());
let repo = merge_even::getrepo(fb);
let repo = runtime.block_on_std(merge_even::getrepo(fb));
derive_for_master(&mut runtime, ctx.clone(), repo.clone());
let repo = merge_uneven::getrepo(fb);
let repo = runtime.block_on_std(merge_uneven::getrepo(fb));
derive_for_master(&mut runtime, ctx.clone(), repo.clone());
let repo = unshared_merge_even::getrepo(fb);
let repo = runtime.block_on_std(unshared_merge_even::getrepo(fb));
derive_for_master(&mut runtime, ctx.clone(), repo.clone());
let repo = unshared_merge_uneven::getrepo(fb);
let repo = runtime.block_on_std(unshared_merge_uneven::getrepo(fb));
derive_for_master(&mut runtime, ctx.clone(), repo.clone());
let repo = many_diamonds::getrepo(fb, &mut runtime);
let repo = runtime.block_on_std(many_diamonds::getrepo(fb));
derive_for_master(&mut runtime, ctx.clone(), repo.clone());
Ok(())
@ -812,7 +807,7 @@ mod test {
fn test_leases(fb: FacebookInit) -> Result<(), Error> {
let ctx = CoreContext::test_mock(fb);
let mut runtime = Runtime::new()?;
let repo = init_linear(fb);
let repo = runtime.block_on_std(init_linear(fb));
let mapping = TestGenNum::mapping(&ctx, &repo);
let hg_csid = HgChangesetId::from_str("2d7d4ba9ce0a6ffd222de7785b249ead9c51c536")?;
@ -905,12 +900,14 @@ mod test {
#[fbinit::test]
fn test_always_failing_lease(fb: FacebookInit) -> Result<(), Error> {
let ctx = CoreContext::test_mock(fb);
let repo =
init_linear(fb).dangerous_override(|_| Arc::new(FailingLease) as Arc<dyn LeaseOps>);
let mapping = TestGenNum::mapping(&ctx, &repo);
let mut runtime = Runtime::new()?;
let ctx = CoreContext::test_mock(fb);
let repo = runtime
.block_on_std(init_linear(fb))
.dangerous_override(|_| Arc::new(FailingLease) as Arc<dyn LeaseOps>);
let mapping = TestGenNum::mapping(&ctx, &repo);
let hg_csid = HgChangesetId::from_str("2d7d4ba9ce0a6ffd222de7785b249ead9c51c536")?;
let csid = runtime
.block_on(repo.get_bonsai_from_hg(ctx.clone(), hg_csid))?

View File

@ -293,8 +293,8 @@ mod tests {
#[fbinit::test]
fn linear_test(fb: FacebookInit) -> Result<(), Error> {
let repo = linear::getrepo(fb);
let mut runtime = Runtime::new().unwrap();
let repo = runtime.block_on_std(linear::getrepo(fb));
let ctx = CoreContext::test_mock(fb);
// Derive filenodes because they are going to be used in this test
@ -400,8 +400,8 @@ mod tests {
#[fbinit::test]
fn test_same_content_different_paths(fb: FacebookInit) {
let repo = linear::getrepo(fb);
let mut runtime = Runtime::new().unwrap();
let repo = runtime.block_on_std(linear::getrepo(fb));
let ctx = CoreContext::test_mock(fb);
fn check_unode_uniqeness(
@ -451,8 +451,8 @@ mod tests {
#[fbinit::test]
fn test_same_content_no_change(fb: FacebookInit) {
let repo = linear::getrepo(fb);
let mut runtime = Runtime::new().unwrap();
let repo = runtime.block_on_std(linear::getrepo(fb));
let ctx = CoreContext::test_mock(fb);
assert!(build_diamond_graph(

View File

@ -155,6 +155,7 @@ mod test {
branch_even, branch_uneven, branch_wide, linear, many_diamonds, many_files_dirs,
merge_even, merge_uneven, unshared_merge_even, unshared_merge_uneven,
};
use futures_preview::future::Future as NewFuture;
use manifest::Entry;
use mercurial_types::{HgChangesetId, HgManifestId};
use revset::AncestorsNodeStream;
@ -231,8 +232,12 @@ mod test {
.flatten_stream()
}
fn verify_repo(fb: FacebookInit, repo: BlobRepo, runtime: &mut Runtime) {
fn verify_repo<F>(fb: FacebookInit, repo: F, runtime: &mut Runtime)
where
F: NewFuture<Output = BlobRepo>,
{
let ctx = CoreContext::test_mock(fb);
let repo = runtime.block_on_std(repo);
runtime
.block_on(
@ -252,8 +257,7 @@ mod test {
verify_repo(fb, branch_even::getrepo(fb), &mut runtime);
verify_repo(fb, branch_uneven::getrepo(fb), &mut runtime);
verify_repo(fb, branch_wide::getrepo(fb), &mut runtime);
let repo = many_diamonds::getrepo(fb, &mut runtime);
verify_repo(fb, repo, &mut runtime);
verify_repo(fb, many_diamonds::getrepo(fb), &mut runtime);
verify_repo(fb, many_files_dirs::getrepo(fb), &mut runtime);
verify_repo(fb, merge_even::getrepo(fb), &mut runtime);
verify_repo(fb, merge_uneven::getrepo(fb), &mut runtime);

View File

@ -12,8 +12,8 @@
use context::CoreContext;
use filenodes::{FilenodeInfo, Filenodes};
use futures::future::Future;
use futures_ext::StreamExt;
use futures_preview::compat::Future01CompatExt;
use mercurial_types::{HgFileNodeId, RepoPath};
use mercurial_types_mocks::nodehash::{
ONES_CSID, ONES_FNID, THREES_CSID, THREES_FNID, TWOS_CSID, TWOS_FNID,
@ -99,7 +99,7 @@ fn copied_filenode() -> FilenodeInfo {
}
}
fn do_add_filenodes(
async fn do_add_filenodes(
ctx: CoreContext,
filenodes: &dyn Filenodes,
to_insert: Vec<FilenodeInfo>,
@ -108,20 +108,21 @@ fn do_add_filenodes(
let stream = futures::stream::iter_ok(to_insert.into_iter()).boxify();
filenodes
.add_filenodes(ctx, stream, repo_id)
.wait()
.compat()
.await
.unwrap();
}
fn do_add_filenode(
async fn do_add_filenode(
ctx: CoreContext,
filenodes: &dyn Filenodes,
node: FilenodeInfo,
repo_id: RepositoryId,
) {
do_add_filenodes(ctx, filenodes, vec![node], repo_id);
do_add_filenodes(ctx, filenodes, vec![node], repo_id).await;
}
fn assert_no_filenode(
async fn assert_no_filenode(
ctx: CoreContext,
filenodes: &dyn Filenodes,
path: &RepoPath,
@ -130,12 +131,13 @@ fn assert_no_filenode(
) {
let res = filenodes
.get_filenode(ctx, path, hash, repo_id)
.wait()
.compat()
.await
.expect("error while fetching filenode");
assert!(res.is_none());
}
fn assert_filenode(
async fn assert_filenode(
ctx: CoreContext,
filenodes: &dyn Filenodes,
path: &RepoPath,
@ -145,13 +147,14 @@ fn assert_filenode(
) {
let res = filenodes
.get_filenode(ctx, path, hash, repo_id)
.wait()
.compat()
.await
.expect("error while fetching filenode")
.expect(&format!("not found: {}", hash));
assert_eq!(res, expected);
}
fn assert_all_filenodes(
async fn assert_all_filenodes(
ctx: CoreContext,
filenodes: &dyn Filenodes,
path: &RepoPath,
@ -160,7 +163,8 @@ fn assert_all_filenodes(
) {
let res = filenodes
.get_all_filenodes_maybe_stale(ctx, path, repo_id)
.wait()
.compat()
.await
.expect("error while fetching filenode");
assert_eq!(&res, expected);
}
@ -181,11 +185,11 @@ macro_rules! filenodes_tests {
#[fbinit::test]
fn test_simple_filenode_insert_and_get(fb: FacebookInit) {
async_unit::tokio_unit_test(move || -> Result<_, !> {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let filenodes = &$create_db();
do_add_filenode(ctx.clone(), filenodes, root_first_filenode(), REPO_ZERO);
do_add_filenode(ctx.clone(), filenodes, root_first_filenode(), REPO_ZERO).await;
assert_filenode(
ctx.clone(),
filenodes,
@ -193,7 +197,8 @@ macro_rules! filenodes_tests {
ONES_FNID,
REPO_ZERO,
root_first_filenode(),
);
)
.await;
assert_no_filenode(
ctx.clone(),
@ -201,22 +206,22 @@ macro_rules! filenodes_tests {
&RepoPath::root(),
TWOS_FNID,
REPO_ZERO,
);
)
.await;
assert_no_filenode(
ctx.clone(),
filenodes,
&RepoPath::root(),
ONES_FNID,
REPO_ONE,
);
Ok(())
})
.expect("test failed");
)
.await;
});
}
#[fbinit::test]
fn test_insert_identical_in_batch(fb: FacebookInit) {
async_unit::tokio_unit_test(move || -> Result<_, !> {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let filenodes = &$create_db();
do_add_filenodes(
@ -224,31 +229,29 @@ macro_rules! filenodes_tests {
filenodes,
vec![root_first_filenode(), root_first_filenode()],
REPO_ZERO,
);
Ok(())
})
.expect("test failed");
)
.await;
});
}
#[fbinit::test]
fn test_filenode_insert_twice(fb: FacebookInit) {
async_unit::tokio_unit_test(move || -> Result<_, !> {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let filenodes = &$create_db();
do_add_filenode(ctx.clone(), filenodes, root_first_filenode(), REPO_ZERO);
do_add_filenode(ctx.clone(), filenodes, root_first_filenode(), REPO_ZERO);
Ok(())
})
.expect("test failed");
do_add_filenode(ctx.clone(), filenodes, root_first_filenode(), REPO_ZERO).await;
do_add_filenode(ctx.clone(), filenodes, root_first_filenode(), REPO_ZERO).await;
});
}
#[fbinit::test]
fn test_insert_filenode_with_parent(fb: FacebookInit) {
async_unit::tokio_unit_test(move || -> Result<_, !> {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let filenodes = &$create_db();
do_add_filenode(ctx.clone(), filenodes, root_first_filenode(), REPO_ZERO);
do_add_filenode(ctx.clone(), filenodes, root_second_filenode(), REPO_ZERO);
do_add_filenode(ctx.clone(), filenodes, root_first_filenode(), REPO_ZERO).await;
do_add_filenode(ctx.clone(), filenodes, root_second_filenode(), REPO_ZERO)
.await;
assert_filenode(
ctx.clone(),
filenodes,
@ -256,7 +259,8 @@ macro_rules! filenodes_tests {
ONES_FNID,
REPO_ZERO,
root_first_filenode(),
);
)
.await;
assert_filenode(
ctx.clone(),
filenodes,
@ -264,20 +268,20 @@ macro_rules! filenodes_tests {
TWOS_FNID,
REPO_ZERO,
root_second_filenode(),
);
Ok(())
})
.expect("test failed");
)
.await;
});
}
#[fbinit::test]
fn test_insert_root_filenode_with_two_parents(fb: FacebookInit) {
async_unit::tokio_unit_test(move || -> Result<_, !> {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let filenodes = &$create_db();
do_add_filenode(ctx.clone(), filenodes, root_first_filenode(), REPO_ZERO);
do_add_filenode(ctx.clone(), filenodes, root_second_filenode(), REPO_ZERO);
do_add_filenode(ctx.clone(), filenodes, root_merge_filenode(), REPO_ZERO);
do_add_filenode(ctx.clone(), filenodes, root_first_filenode(), REPO_ZERO).await;
do_add_filenode(ctx.clone(), filenodes, root_second_filenode(), REPO_ZERO)
.await;
do_add_filenode(ctx.clone(), filenodes, root_merge_filenode(), REPO_ZERO).await;
assert_filenode(
ctx.clone(),
@ -286,19 +290,20 @@ macro_rules! filenodes_tests {
THREES_FNID,
REPO_ZERO,
root_merge_filenode(),
);
Ok(())
})
.expect("test failed");
)
.await;
});
}
#[fbinit::test]
fn test_insert_file_filenode(fb: FacebookInit) {
async_unit::tokio_unit_test(move || -> Result<_, !> {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let filenodes = &$create_db();
do_add_filenode(ctx.clone(), filenodes, file_a_first_filenode(), REPO_ZERO);
do_add_filenode(ctx.clone(), filenodes, file_b_first_filenode(), REPO_ZERO);
do_add_filenode(ctx.clone(), filenodes, file_a_first_filenode(), REPO_ZERO)
.await;
do_add_filenode(ctx.clone(), filenodes, file_b_first_filenode(), REPO_ZERO)
.await;
assert_no_filenode(
ctx.clone(),
@ -306,7 +311,8 @@ macro_rules! filenodes_tests {
&RepoPath::file("non-existent").unwrap(),
ONES_FNID,
REPO_ZERO,
);
)
.await;
assert_filenode(
ctx.clone(),
filenodes,
@ -314,7 +320,8 @@ macro_rules! filenodes_tests {
ONES_FNID,
REPO_ZERO,
file_a_first_filenode(),
);
)
.await;
assert_filenode(
ctx.clone(),
filenodes,
@ -322,19 +329,18 @@ macro_rules! filenodes_tests {
TWOS_FNID,
REPO_ZERO,
file_b_first_filenode(),
);
Ok(())
})
.expect("test failed");
)
.await;
});
}
#[fbinit::test]
fn test_insert_different_repo(fb: FacebookInit) {
async_unit::tokio_unit_test(move || -> Result<_, !> {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let filenodes = &$create_db();
do_add_filenode(ctx.clone(), filenodes, root_first_filenode(), REPO_ZERO);
do_add_filenode(ctx.clone(), filenodes, root_second_filenode(), REPO_ONE);
do_add_filenode(ctx.clone(), filenodes, root_first_filenode(), REPO_ZERO).await;
do_add_filenode(ctx.clone(), filenodes, root_second_filenode(), REPO_ONE).await;
assert_filenode(
ctx.clone(),
@ -343,7 +349,8 @@ macro_rules! filenodes_tests {
ONES_FNID,
REPO_ZERO,
root_first_filenode(),
);
)
.await;
assert_no_filenode(
ctx.clone(),
@ -351,7 +358,8 @@ macro_rules! filenodes_tests {
&RepoPath::root(),
ONES_FNID,
REPO_ONE,
);
)
.await;
assert_filenode(
ctx.clone(),
@ -360,15 +368,14 @@ macro_rules! filenodes_tests {
TWOS_FNID,
REPO_ONE,
root_second_filenode(),
);
Ok(())
})
.expect("test failed");
)
.await;
});
}
#[fbinit::test]
fn test_insert_parent_and_child_in_same_batch(fb: FacebookInit) {
async_unit::tokio_unit_test(move || -> Result<_, !> {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let filenodes = &$create_db();
@ -377,7 +384,8 @@ macro_rules! filenodes_tests {
filenodes,
vec![root_first_filenode(), root_second_filenode()],
REPO_ZERO,
);
)
.await;
assert_filenode(
ctx.clone(),
@ -386,7 +394,8 @@ macro_rules! filenodes_tests {
ONES_FNID,
REPO_ZERO,
root_first_filenode(),
);
)
.await;
assert_filenode(
ctx.clone(),
@ -395,15 +404,14 @@ macro_rules! filenodes_tests {
TWOS_FNID,
REPO_ZERO,
root_second_filenode(),
);
Ok(())
})
.expect("test failed");
)
.await;
});
}
#[fbinit::test]
fn insert_copied_file(fb: FacebookInit) {
async_unit::tokio_unit_test(move || -> Result<_, !> {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let filenodes = &$create_db();
@ -412,7 +420,8 @@ macro_rules! filenodes_tests {
filenodes,
vec![copied_from_filenode(), copied_filenode()],
REPO_ZERO,
);
)
.await;
assert_filenode(
ctx.clone(),
filenodes,
@ -420,15 +429,14 @@ macro_rules! filenodes_tests {
TWOS_FNID,
REPO_ZERO,
copied_filenode(),
);
Ok(())
})
.expect("test failed");
)
.await;
});
}
#[fbinit::test]
fn insert_same_copied_file(fb: FacebookInit) {
async_unit::tokio_unit_test(move || -> Result<_, !> {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let filenodes = &$create_db();
@ -437,21 +445,21 @@ macro_rules! filenodes_tests {
filenodes,
vec![copied_from_filenode()],
REPO_ZERO,
);
)
.await;
do_add_filenodes(
ctx.clone(),
filenodes,
vec![copied_filenode(), copied_filenode()],
REPO_ZERO,
);
Ok(())
})
.expect("test failed");
)
.await;
});
}
#[fbinit::test]
fn insert_copied_file_to_different_repo(fb: FacebookInit) {
async_unit::tokio_unit_test(move || -> Result<_, !> {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let filenodes = &$create_db();
@ -478,8 +486,10 @@ macro_rules! filenodes_tests {
filenodes,
vec![copied_from_filenode(), copied.clone()],
REPO_ZERO,
);
do_add_filenodes(ctx.clone(), filenodes, vec![notcopied.clone()], REPO_ONE);
)
.await;
do_add_filenodes(ctx.clone(), filenodes, vec![notcopied.clone()], REPO_ONE)
.await;
assert_filenode(
ctx.clone(),
filenodes,
@ -487,7 +497,8 @@ macro_rules! filenodes_tests {
TWOS_FNID,
REPO_ZERO,
copied,
);
)
.await;
assert_filenode(
ctx.clone(),
@ -496,15 +507,14 @@ macro_rules! filenodes_tests {
TWOS_FNID,
REPO_ONE,
notcopied,
);
Ok(())
})
.expect("test failed");
)
.await;
});
}
#[fbinit::test]
fn get_all_filenodes_maybe_stale(fb: FacebookInit) {
async_unit::tokio_unit_test(move || -> Result<_, !> {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let filenodes = &$create_db();
let root_filenodes = vec![
@ -521,13 +531,15 @@ macro_rules! filenodes_tests {
root_merge_filenode(),
],
REPO_ZERO,
);
)
.await;
do_add_filenodes(
ctx.clone(),
filenodes,
vec![file_a_first_filenode(), file_b_first_filenode()],
REPO_ZERO,
);
)
.await;
assert_all_filenodes(
ctx.clone(),
@ -535,7 +547,8 @@ macro_rules! filenodes_tests {
&RepoPath::RootPath,
REPO_ZERO,
&root_filenodes,
);
)
.await;
assert_all_filenodes(
ctx.clone(),
@ -543,7 +556,8 @@ macro_rules! filenodes_tests {
&RepoPath::file("a").unwrap(),
REPO_ZERO,
&vec![file_a_first_filenode()],
);
)
.await;
assert_all_filenodes(
ctx.clone(),
@ -551,10 +565,9 @@ macro_rules! filenodes_tests {
&RepoPath::file("b").unwrap(),
REPO_ZERO,
&vec![file_b_first_filenode()],
);
Ok(())
})
.expect("test failed");
)
.await;
});
}
}
};

View File

@ -187,12 +187,11 @@ mod test {
/// This function creates a new Git tree from the fixture's master Bonsai bookmark,
/// materializes it to disk, then verifies that libgit produces the same Git tree for it.
async fn run_tree_derivation_for_fixture<F>(fb: FacebookInit, fixture: F) -> Result<(), Error>
where
F: FnOnce(FacebookInit) -> BlobRepo,
{
async fn run_tree_derivation_for_fixture(
fb: FacebookInit,
repo: BlobRepo,
) -> Result<(), Error> {
let ctx = CoreContext::test_mock(fb);
let repo = fixture(fb);
let bcs_id = repo
.get_bonsai_bookmark(ctx.clone(), &("master".try_into()?))
@ -248,7 +247,8 @@ mod test {
($fixture:ident) => {
#[fbinit::test]
async fn $fixture(fb: FacebookInit) -> Result<(), Error> {
run_tree_derivation_for_fixture(fb, fixtures::$fixture::getrepo).await
let repo = fixtures::$fixture::getrepo(fb).await;
run_tree_derivation_for_fixture(fb, repo).await
}
};
}
@ -262,11 +262,5 @@ mod test {
impl_test!(merge_uneven);
impl_test!(unshared_merge_even);
impl_test!(unshared_merge_uneven);
#[fbinit::test]
fn many_diamonds(fb: FacebookInit) -> Result<(), Error> {
let mut runtime = ::tokio_compat::runtime::Runtime::new().unwrap();
let repo = fixtures::many_diamonds::getrepo(fb, &mut runtime);
runtime.block_on_std(run_tree_derivation_for_fixture(fb, move |_| repo))
}
impl_test!(many_diamonds);
}

View File

@ -18,6 +18,7 @@ use futures::future::ok;
use futures::Future;
use futures::{stream, Stream};
use futures_ext::{BoxFuture, FutureExt};
use futures_preview::compat::Future01CompatExt;
use hooks::{
hook_loader::load_hooks, ErrorKind, FileHookExecutionID, Hook, HookChangeset,
HookChangesetParents, HookContext, HookExecution, HookFile, HookManager, HookRejectionInfo,
@ -398,7 +399,7 @@ fn length_matching_file_hook(length: u64) -> Box<dyn Hook<HookFile>> {
#[fbinit::test]
fn test_changeset_hook_accepted(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let hooks: HashMap<String, Box<dyn Hook<HookChangeset>>> = hashmap! {
"hook1".to_string() => always_accepting_changeset_hook()
@ -410,13 +411,13 @@ fn test_changeset_hook_accepted(fb: FacebookInit) {
let expected = hashmap! {
"hook1".to_string() => HookExecution::Accepted
};
run_changeset_hooks(ctx, "bm1", hooks, bookmarks, regexes, expected);
run_changeset_hooks(ctx, "bm1", hooks, bookmarks, regexes, expected).await;
});
}
#[fbinit::test]
fn test_changeset_hook_rejected(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let hooks: HashMap<String, Box<dyn Hook<HookChangeset>>> = hashmap! {
"hook1".to_string() => always_rejecting_changeset_hook()
@ -428,13 +429,13 @@ fn test_changeset_hook_rejected(fb: FacebookInit) {
let expected = hashmap! {
"hook1".to_string() => default_rejection()
};
run_changeset_hooks(ctx, "bm1", hooks, bookmarks, regexes, expected);
run_changeset_hooks(ctx, "bm1", hooks, bookmarks, regexes, expected).await;
});
}
#[fbinit::test]
fn test_changeset_hook_mix(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let hooks: HashMap<String, Box<dyn Hook<HookChangeset>>> = hashmap! {
"hook1".to_string() => always_accepting_changeset_hook(),
@ -452,13 +453,13 @@ fn test_changeset_hook_mix(fb: FacebookInit) {
"hook2".to_string() => default_rejection(),
"hook3".to_string() => HookExecution::Accepted,
};
run_changeset_hooks(ctx, "bm1", hooks, bookmarks, regexes, expected);
run_changeset_hooks(ctx, "bm1", hooks, bookmarks, regexes, expected).await;
});
}
#[fbinit::test]
fn test_changeset_hook_context(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let files = vec![
("dir1/subdir1/subsubdir1/file_1".to_string(), ONES_FNID),
@ -506,13 +507,13 @@ fn test_changeset_hook_context(fb: FacebookInit) {
let expected = hashmap! {
"hook1".to_string() => HookExecution::Accepted
};
run_changeset_hooks(ctx, "bm1", hooks, bookmarks, regexes, expected);
run_changeset_hooks(ctx, "bm1", hooks, bookmarks, regexes, expected).await;
});
}
#[fbinit::test]
fn test_changeset_hook_other_file_text(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let hooks: HashMap<String, Box<dyn Hook<HookChangeset>>> = hashmap! {
"hook1".to_string() => other_file_matching_changeset_hook("dir1/subdir1/subsubdir1/file_1".to_string(), Some("elephants".to_string())),
@ -534,13 +535,13 @@ fn test_changeset_hook_other_file_text(fb: FacebookInit) {
"hook4".to_string() => HookExecution::Accepted,
"hook5".to_string() => default_rejection(),
};
run_changeset_hooks(ctx, "bm1", hooks, bookmarks, regexes, expected);
run_changeset_hooks(ctx, "bm1", hooks, bookmarks, regexes, expected).await;
});
}
#[fbinit::test]
fn test_changeset_hook_file_text(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let hook1_map = hashmap![
"dir1/subdir1/subsubdir1/file_1".to_string() => Some("elephants".to_string()),
@ -573,13 +574,13 @@ fn test_changeset_hook_file_text(fb: FacebookInit) {
"hook2".to_string() => default_rejection(),
"hook3".to_string() => default_rejection(),
};
run_changeset_hooks(ctx, "bm1", hooks, bookmarks, regexes, expected);
run_changeset_hooks(ctx, "bm1", hooks, bookmarks, regexes, expected).await;
});
}
#[fbinit::test]
fn test_changeset_hook_lengths(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let hook1_map = hashmap![
"dir1/subdir1/subsubdir1/file_1".to_string() => 9,
@ -612,13 +613,13 @@ fn test_changeset_hook_lengths(fb: FacebookInit) {
"hook2".to_string() => default_rejection(),
"hook3".to_string() => default_rejection(),
};
run_changeset_hooks(ctx, "bm1", hooks, bookmarks, regexes, expected);
run_changeset_hooks(ctx, "bm1", hooks, bookmarks, regexes, expected).await;
});
}
#[fbinit::test]
fn test_file_hook_accepted(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let hooks: HashMap<String, Box<dyn Hook<HookFile>>> = hashmap! {
"hook1".to_string() => always_accepting_file_hook()
@ -642,13 +643,14 @@ fn test_file_hook_accepted(fb: FacebookInit) {
regexes,
expected,
ContentStoreType::InMemory,
);
)
.await;
});
}
#[fbinit::test]
fn test_file_hook_rejected(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let hooks: HashMap<String, Box<dyn Hook<HookFile>>> = hashmap! {
"hook1".to_string() => always_rejecting_file_hook()
@ -672,13 +674,14 @@ fn test_file_hook_rejected(fb: FacebookInit) {
regexes,
expected,
ContentStoreType::InMemory,
);
)
.await;
});
}
#[fbinit::test]
fn test_file_hook_mix(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let hooks: HashMap<String, Box<dyn Hook<HookFile>>> = hashmap! {
"hook1".to_string() => always_rejecting_file_hook(),
@ -710,13 +713,14 @@ fn test_file_hook_mix(fb: FacebookInit) {
regexes,
expected,
ContentStoreType::InMemory,
);
)
.await;
});
}
#[fbinit::test]
fn test_file_hooks_paths(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let matching_paths = hashset![
"dir1/subdir1/subsubdir2/file_1".to_string(),
@ -744,13 +748,14 @@ fn test_file_hooks_paths(fb: FacebookInit) {
regexes,
expected,
ContentStoreType::InMemory,
);
)
.await;
});
}
#[fbinit::test]
fn test_file_hooks_paths_mix(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let matching_paths1 = hashset![
"dir1/subdir1/subsubdir2/file_1".to_string(),
@ -787,13 +792,14 @@ fn test_file_hooks_paths_mix(fb: FacebookInit) {
regexes,
expected,
ContentStoreType::InMemory,
);
)
.await;
});
}
#[fbinit::test]
fn test_file_hook_file_text(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let hooks: HashMap<String, Box<dyn Hook<HookFile>>> = hashmap! {
"hook1".to_string() => file_text_matching_file_hook(Some("elephants".to_string())),
@ -831,13 +837,14 @@ fn test_file_hook_file_text(fb: FacebookInit) {
regexes,
expected,
ContentStoreType::InMemory,
);
)
.await;
});
}
#[fbinit::test]
fn test_file_hook_is_symlink(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let hooks: HashMap<String, Box<dyn Hook<HookFile>>> = hashmap! {
"hook1".to_string() => is_symlink_matching_file_hook(true),
@ -869,13 +876,14 @@ fn test_file_hook_is_symlink(fb: FacebookInit) {
regexes,
expected,
ContentStoreType::InMemory,
);
)
.await;
});
}
#[fbinit::test]
fn test_file_hook_length(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let hooks: HashMap<String, Box<dyn Hook<HookFile>>> = hashmap! {
"hook1".to_string() => length_matching_file_hook("elephants".len() as u64),
@ -919,14 +927,15 @@ fn test_file_hook_length(fb: FacebookInit) {
regexes,
expected,
ContentStoreType::InMemory,
);
)
.await;
});
}
#[fbinit::test]
fn test_register_changeset_hooks(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
let mut hook_manager = hook_manager_inmem(fb);
async_unit::tokio_unit_test(async move {
let mut hook_manager = hook_manager_inmem(fb).await;
let hook1 = always_accepting_changeset_hook();
hook_manager.register_changeset_hook("hook1", hook1.into(), Default::default());
let hook2 = always_accepting_changeset_hook();
@ -941,7 +950,7 @@ fn test_register_changeset_hooks(fb: FacebookInit) {
#[fbinit::test]
fn test_cs_hooks_with_blob_store(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let hooks: HashMap<String, Box<dyn Hook<HookChangeset>>> = hashmap! {
"hook1".to_string() => always_accepting_changeset_hook()
@ -960,14 +969,15 @@ fn test_cs_hooks_with_blob_store(fb: FacebookInit) {
bookmarks,
regexes.clone(),
expected,
ContentStoreType::Blob(many_files_dirs::getrepo(ctx.fb)),
);
ContentStoreType::Blob(many_files_dirs::getrepo(ctx.fb).await),
)
.await;
});
}
#[fbinit::test]
fn test_file_hooks_with_blob_store(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
// Create an init a repo
let (repo, hg_cs_id) = {
@ -981,8 +991,10 @@ fn test_file_hooks_with_blob_store(fb: FacebookInit) {
ctx.clone(),
btreemap! {"toremove" => Some("content")},
repo.clone(),
),
);
)
.await,
)
.await;
let bcs_id = create_commit(
ctx.clone(),
repo.clone(),
@ -995,8 +1007,10 @@ fn test_file_hooks_with_blob_store(fb: FacebookInit) {
"dir/somefile" => Some("good"),
},
repo.clone(),
),
);
)
.await,
)
.await;
let mut txn = repo.update_bookmark_transaction(ctx.clone());
txn.force_set(
@ -1007,10 +1021,11 @@ fn test_file_hooks_with_blob_store(fb: FacebookInit) {
},
)
.unwrap();
txn.commit().wait().unwrap();
txn.commit().compat().await.unwrap();
let hg_cs_id = repo
.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id)
.wait()
.compat()
.await
.unwrap();
(repo, hg_cs_id)
};
@ -1038,11 +1053,12 @@ fn test_file_hooks_with_blob_store(fb: FacebookInit) {
expected,
ContentStoreType::Blob(repo),
hg_cs_id,
);
)
.await;
})
}
fn run_changeset_hooks(
async fn run_changeset_hooks(
ctx: CoreContext,
bookmark_name: &str,
hooks: HashMap<String, Box<dyn Hook<HookChangeset>>>,
@ -1059,9 +1075,10 @@ fn run_changeset_hooks(
expected,
ContentStoreType::InMemory,
)
.await
}
fn run_changeset_hooks_with_mgr(
async fn run_changeset_hooks_with_mgr(
ctx: CoreContext,
bookmark_name: &str,
hooks: HashMap<String, Box<dyn Hook<HookChangeset>>>,
@ -1070,7 +1087,7 @@ fn run_changeset_hooks_with_mgr(
expected: HashMap<String, HookExecution>,
content_store_type: ContentStoreType,
) {
let mut hook_manager = setup_hook_manager(ctx.fb, bookmarks, regexes, content_store_type);
let mut hook_manager = setup_hook_manager(ctx.fb, bookmarks, regexes, content_store_type).await;
for (hook_name, hook) in hooks {
hook_manager.register_changeset_hook(&hook_name, hook.into(), Default::default());
}
@ -1080,7 +1097,7 @@ fn run_changeset_hooks_with_mgr(
&BookmarkName::new(bookmark_name).unwrap(),
None,
);
let res = fut.wait().unwrap();
let res = fut.compat().await.unwrap();
let map: HashMap<String, HookExecution> = res
.into_iter()
.map(|(exec_id, exec)| (exec_id.hook_name, exec))
@ -1093,7 +1110,7 @@ enum ContentStoreType {
Blob(BlobRepo),
}
fn run_file_hooks(
async fn run_file_hooks(
ctx: CoreContext,
bookmark_name: &str,
hooks: HashMap<String, Box<dyn Hook<HookFile>>>,
@ -1112,9 +1129,10 @@ fn run_file_hooks(
content_store_type,
default_changeset_id(),
)
.await
}
fn run_file_hooks_for_cs(
async fn run_file_hooks_for_cs(
ctx: CoreContext,
bookmark_name: &str,
hooks: HashMap<String, Box<dyn Hook<HookFile>>>,
@ -1134,9 +1152,10 @@ fn run_file_hooks_for_cs(
content_store_type,
hg_cs_id,
)
.await
}
fn run_file_hooks_with_mgr(
async fn run_file_hooks_with_mgr(
ctx: CoreContext,
bookmark_name: &str,
hooks: HashMap<String, Box<dyn Hook<HookFile>>>,
@ -1146,7 +1165,7 @@ fn run_file_hooks_with_mgr(
content_store_type: ContentStoreType,
hg_cs_id: HgChangesetId,
) {
let mut hook_manager = setup_hook_manager(ctx.fb, bookmarks, regexes, content_store_type);
let mut hook_manager = setup_hook_manager(ctx.fb, bookmarks, regexes, content_store_type).await;
for (hook_name, hook) in hooks {
hook_manager.register_file_hook(&hook_name, hook.into(), Default::default());
}
@ -1157,7 +1176,7 @@ fn run_file_hooks_with_mgr(
&BookmarkName::new(bookmark_name).unwrap(),
None,
);
let res = fut.wait().unwrap();
let res = fut.compat().await.unwrap();
let map: HashMap<String, HashMap<String, HookExecution>> =
res.into_iter()
.fold(HashMap::new(), |mut m, (exec_id, exec)| {
@ -1170,14 +1189,14 @@ fn run_file_hooks_with_mgr(
assert_eq!(expected, map);
}
fn setup_hook_manager(
async fn setup_hook_manager(
fb: FacebookInit,
bookmarks: HashMap<String, Vec<String>>,
regexes: HashMap<String, Vec<String>>,
content_store_type: ContentStoreType,
) -> HookManager {
let mut hook_manager = match content_store_type {
ContentStoreType::InMemory => hook_manager_inmem(fb),
ContentStoreType::InMemory => hook_manager_inmem(fb).await,
ContentStoreType::Blob(repo) => hook_manager_blobrepo(fb, repo),
};
for (bookmark_name, hook_names) in bookmarks {
@ -1214,8 +1233,8 @@ fn hook_manager_blobrepo(fb: FacebookInit, repo: BlobRepo) -> HookManager {
)
}
fn hook_manager_many_files_dirs_blobrepo(fb: FacebookInit) -> HookManager {
hook_manager_blobrepo(fb, many_files_dirs::getrepo(fb))
async fn hook_manager_many_files_dirs_blobrepo(fb: FacebookInit) -> HookManager {
hook_manager_blobrepo(fb, many_files_dirs::getrepo(fb).await)
}
fn to_mpath(string: &str) -> MPath {
@ -1223,12 +1242,16 @@ fn to_mpath(string: &str) -> MPath {
MPath::new(string.to_string().as_bytes().to_vec()).unwrap()
}
fn hook_manager_inmem(fb: FacebookInit) -> HookManager {
async fn hook_manager_inmem(fb: FacebookInit) -> HookManager {
let ctx = CoreContext::test_mock(fb);
let repo = many_files_dirs::getrepo(fb);
let repo = many_files_dirs::getrepo(fb).await;
// Load up an in memory store with a single commit from the many_files_dirs store
let cs_id = HgChangesetId::from_str("d261bc7900818dea7c86935b3fb17a33b2e3a6b4").unwrap();
let cs = cs_id.load(ctx.clone(), repo.blobstore()).wait().unwrap();
let cs = cs_id
.load(ctx.clone(), repo.blobstore())
.compat()
.await
.unwrap();
let mut changeset_store = InMemoryChangesetStore::new();
changeset_store.insert_changeset(cs_id, cs);
let files = vec![
@ -1321,7 +1344,7 @@ fn default_repo_config() -> RepoConfig {
#[fbinit::test]
fn test_load_hooks(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let mut config = default_repo_config();
config.bookmarks = vec![
BookmarkParams {
@ -1370,7 +1393,7 @@ fn test_load_hooks(fb: FacebookInit) {
},
];
let mut hm = hook_manager_many_files_dirs_blobrepo(fb);
let mut hm = hook_manager_many_files_dirs_blobrepo(fb).await;
match load_hooks(fb, &mut hm, config, &hashset![]) {
Err(e) => assert!(false, format!("Failed to load hooks {}", e)),
Ok(()) => (),
@ -1380,32 +1403,34 @@ fn test_load_hooks(fb: FacebookInit) {
#[fbinit::test]
fn test_verify_integrity_fast_failure(fb: FacebookInit) {
let mut config = default_repo_config();
config.bookmarks = vec![BookmarkParams {
bookmark: Regex::new("bm2").unwrap().into(),
hooks: vec!["rust:verify_integrity".into()],
only_fast_forward: false,
allowed_users: None,
rewrite_dates: None,
}];
config.hooks = vec![HookParams {
name: "rust:verify_integrity".into(),
code: Some("whateva".into()),
hook_type: HookType::PerChangeset,
config: HookConfig {
strings: hashmap! {String::from("verify_integrity_path") => String::from("bad_nonexisting_filename")},
..Default::default()
},
}];
async_unit::tokio_unit_test(async move {
let mut config = default_repo_config();
config.bookmarks = vec![BookmarkParams {
bookmark: Regex::new("bm2").unwrap().into(),
hooks: vec!["rust:verify_integrity".into()],
only_fast_forward: false,
allowed_users: None,
rewrite_dates: None,
}];
config.hooks = vec![HookParams {
name: "rust:verify_integrity".into(),
code: Some("whateva".into()),
hook_type: HookType::PerChangeset,
config: HookConfig {
strings: hashmap! {String::from("verify_integrity_path") => String::from("bad_nonexisting_filename")},
..Default::default()
},
}];
let mut hm = hook_manager_many_files_dirs_blobrepo(fb);
load_hooks(fb, &mut hm, config, &hashset![])
.expect_err("`verify_integrity` hook loading should have failed");
let mut hm = hook_manager_many_files_dirs_blobrepo(fb).await;
load_hooks(fb, &mut hm, config, &hashset![])
.expect_err("`verify_integrity` hook loading should have failed");
});
}
#[fbinit::test]
fn test_load_hooks_no_such_hook(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let book_or_rex = BookmarkOrRegex::Bookmark(BookmarkName::new("bm1").unwrap());
let mut config = default_repo_config();
config.bookmarks = vec![BookmarkParams {
@ -1423,7 +1448,7 @@ fn test_load_hooks_no_such_hook(fb: FacebookInit) {
config: Default::default(),
}];
let mut hm = hook_manager_many_files_dirs_blobrepo(fb);
let mut hm = hook_manager_many_files_dirs_blobrepo(fb).await;
match load_hooks(fb, &mut hm, config, &hashset![])
.unwrap_err()
@ -1439,7 +1464,7 @@ fn test_load_hooks_no_such_hook(fb: FacebookInit) {
#[fbinit::test]
fn test_load_hooks_bad_rust_hook(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let mut config = default_repo_config();
config.bookmarks = vec![BookmarkParams {
bookmark: BookmarkName::new("bm1").unwrap().into(),
@ -1456,7 +1481,7 @@ fn test_load_hooks_bad_rust_hook(fb: FacebookInit) {
config: Default::default(),
}];
let mut hm = hook_manager_many_files_dirs_blobrepo(fb);
let mut hm = hook_manager_many_files_dirs_blobrepo(fb).await;
match load_hooks(fb, &mut hm, config, &hashset![])
.unwrap_err()
@ -1472,64 +1497,70 @@ fn test_load_hooks_bad_rust_hook(fb: FacebookInit) {
#[fbinit::test]
fn test_load_disabled_hooks(fb: FacebookInit) {
let mut config = default_repo_config();
async_unit::tokio_unit_test(async move {
let mut config = default_repo_config();
config.bookmarks = vec![];
config.bookmarks = vec![];
config.hooks = vec![HookParams {
name: "hook1".into(),
code: None,
hook_type: HookType::PerChangeset,
config: Default::default(),
}];
config.hooks = vec![HookParams {
name: "hook1".into(),
code: None,
hook_type: HookType::PerChangeset,
config: Default::default(),
}];
let mut hm = hook_manager_many_files_dirs_blobrepo(fb);
let mut hm = hook_manager_many_files_dirs_blobrepo(fb).await;
load_hooks(fb, &mut hm, config, &hashset!["hook1".to_string()])
.expect("disabling a broken hook should allow loading to succeed");
load_hooks(fb, &mut hm, config, &hashset!["hook1".to_string()])
.expect("disabling a broken hook should allow loading to succeed");
});
}
#[fbinit::test]
fn test_load_disabled_hooks_referenced_by_bookmark(fb: FacebookInit) {
let mut config = default_repo_config();
async_unit::tokio_unit_test(async move {
let mut config = default_repo_config();
config.bookmarks = vec![BookmarkParams {
bookmark: BookmarkName::new("bm1").unwrap().into(),
hooks: vec!["hook1".into()],
only_fast_forward: false,
allowed_users: None,
rewrite_dates: None,
}];
config.bookmarks = vec![BookmarkParams {
bookmark: BookmarkName::new("bm1").unwrap().into(),
hooks: vec!["hook1".into()],
only_fast_forward: false,
allowed_users: None,
rewrite_dates: None,
}];
config.hooks = vec![HookParams {
name: "hook1".into(),
code: None,
hook_type: HookType::PerChangeset,
config: Default::default(),
}];
config.hooks = vec![HookParams {
name: "hook1".into(),
code: None,
hook_type: HookType::PerChangeset,
config: Default::default(),
}];
let mut hm = hook_manager_many_files_dirs_blobrepo(fb);
let mut hm = hook_manager_many_files_dirs_blobrepo(fb).await;
load_hooks(fb, &mut hm, config, &hashset!["hook1".to_string()])
.expect("disabling a broken hook should allow loading to succeed");
load_hooks(fb, &mut hm, config, &hashset!["hook1".to_string()])
.expect("disabling a broken hook should allow loading to succeed");
});
}
#[fbinit::test]
fn test_load_disabled_hooks_hook_does_not_exist(fb: FacebookInit) {
let mut config = default_repo_config();
async_unit::tokio_unit_test(async move {
let mut config = default_repo_config();
config.bookmarks = vec![];
config.hooks = vec![];
config.bookmarks = vec![];
config.hooks = vec![];
let mut hm = hook_manager_many_files_dirs_blobrepo(fb);
let mut hm = hook_manager_many_files_dirs_blobrepo(fb).await;
match load_hooks(fb, &mut hm, config, &hashset!["hook1".to_string()])
.unwrap_err()
.downcast::<ErrorKind>()
{
Ok(ErrorKind::NoSuchHookToDisable(hooks)) => {
assert_eq!(hashset!["hook1".to_string()], hooks);
}
_ => assert!(false, "Unexpected err type"),
};
match load_hooks(fb, &mut hm, config, &hashset!["hook1".to_string()])
.unwrap_err()
.downcast::<ErrorKind>()
{
Ok(ErrorKind::NoSuchHookToDisable(hooks)) => {
assert_eq!(hashset!["hook1".to_string()], hooks);
}
_ => assert!(false, "Unexpected err type"),
};
});
}

File diff suppressed because it is too large Load Diff

View File

@ -293,12 +293,13 @@ impl HgEntry for MockEntry {
mod test {
use super::*;
use fbinit::FacebookInit;
use futures::{Future, Stream};
use futures::Stream;
use futures_preview::compat::Future01CompatExt;
use maplit::btreemap;
#[fbinit::test]
fn lookup(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let paths = btreemap! {
"foo/bar1" => (FileType::Regular, "bar1"),
@ -319,7 +320,8 @@ mod test {
.expect("foo should be present");
let foo_content = foo_entry
.get_content(ctx.clone())
.wait()
.compat()
.await
.expect("content fetch should work");
let foo_manifest = match foo_content {
Content::Tree(manifest) => manifest,
@ -331,7 +333,8 @@ mod test {
.expect("bar1 should be present");
let bar1_content = bar1_entry
.get_content(ctx.clone())
.wait()
.compat()
.await
.expect("content fetch should work");
let bar1_stream = match bar1_content {
Content::File(stream) => stream,
@ -339,7 +342,8 @@ mod test {
};
let bar1_bytes = bar1_stream
.concat2()
.wait()
.compat()
.await
.expect("content stream should work");
assert_eq!(bar1_bytes.into_bytes().as_ref(), &b"bar1"[..]);
@ -348,7 +352,8 @@ mod test {
.expect("bar2 should be present");
let bar2_content = bar2_entry
.get_content(ctx.clone())
.wait()
.compat()
.await
.expect("content fetch should work");
let bar2_stream = match bar2_content {
Content::Symlink(stream) => stream,
@ -356,7 +361,8 @@ mod test {
};
let bar2_bytes = bar2_stream
.concat2()
.wait()
.compat()
.await
.expect("content stream should work");
assert_eq!(bar2_bytes.into_bytes().as_ref(), &b"bar2"[..]);
})

View File

@ -862,64 +862,55 @@ impl RepoContext {
mod tests {
use super::*;
use fixtures::{linear, merge_even};
use futures_preview::{FutureExt as NewFutureExt, TryFutureExt};
#[fbinit::test]
fn test_try_find_child(fb: FacebookInit) -> Result<(), Error> {
let mut runtime = tokio_compat::runtime::Runtime::new().unwrap();
runtime.block_on(
async move {
let ctx = CoreContext::test_mock(fb);
let repo = Repo::new_test(ctx.clone(), linear::getrepo(fb)).await?;
runtime.block_on_std(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Repo::new_test(ctx.clone(), linear::getrepo(fb).await).await?;
let ancestor = ChangesetId::from_str(
"c9f9a2a39195a583d523a4e5f6973443caeb0c66a315d5bf7db1b5775c725310",
)?;
let descendant = ChangesetId::from_str(
"7785606eb1f26ff5722c831de402350cf97052dc44bc175da6ac0d715a3dbbf6",
)?;
let ancestor = ChangesetId::from_str(
"c9f9a2a39195a583d523a4e5f6973443caeb0c66a315d5bf7db1b5775c725310",
)?;
let descendant = ChangesetId::from_str(
"7785606eb1f26ff5722c831de402350cf97052dc44bc175da6ac0d715a3dbbf6",
)?;
let maybe_child = repo.try_find_child(&ctx, ancestor, descendant, 100).await?;
let child = maybe_child.ok_or(format_err!("didn't find child"))?;
assert_eq!(
child,
ChangesetId::from_str(
"98ef3234c2f1acdbb272715e8cfef4a6378e5443120677e0d87d113571280f79"
)?
);
let maybe_child = repo.try_find_child(&ctx, ancestor, descendant, 100).await?;
let child = maybe_child.ok_or(format_err!("didn't find child"))?;
assert_eq!(
child,
ChangesetId::from_str(
"98ef3234c2f1acdbb272715e8cfef4a6378e5443120677e0d87d113571280f79"
)?
);
let maybe_child = repo.try_find_child(&ctx, ancestor, descendant, 1).await?;
assert!(maybe_child.is_none());
let maybe_child = repo.try_find_child(&ctx, ancestor, descendant, 1).await?;
assert!(maybe_child.is_none());
Ok(())
}
.boxed()
.compat(),
)
Ok(())
})
}
#[fbinit::test]
fn test_try_find_child_merge(fb: FacebookInit) -> Result<(), Error> {
let mut runtime = tokio_compat::runtime::Runtime::new().unwrap();
runtime.block_on(
async move {
let ctx = CoreContext::test_mock(fb);
let repo = Repo::new_test(ctx.clone(), merge_even::getrepo(fb)).await?;
runtime.block_on_std(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Repo::new_test(ctx.clone(), merge_even::getrepo(fb).await).await?;
let ancestor = ChangesetId::from_str(
"35fb4e0fb3747b7ca4d18281d059be0860d12407dc5dce5e02fb99d1f6a79d2a",
)?;
let descendant = ChangesetId::from_str(
"567a25d453cafaef6550de955c52b91bf9295faf38d67b6421d5d2e532e5adef",
)?;
let ancestor = ChangesetId::from_str(
"35fb4e0fb3747b7ca4d18281d059be0860d12407dc5dce5e02fb99d1f6a79d2a",
)?;
let descendant = ChangesetId::from_str(
"567a25d453cafaef6550de955c52b91bf9295faf38d67b6421d5d2e532e5adef",
)?;
let maybe_child = repo.try_find_child(&ctx, ancestor, descendant, 100).await?;
let child = maybe_child.ok_or(format_err!("didn't find child"))?;
assert_eq!(child, descendant);
Ok(())
}
.boxed()
.compat(),
)
let maybe_child = repo.try_find_child(&ctx, ancestor, descendant, 100).await?;
let child = maybe_child.ok_or(format_err!("didn't find child"))?;
assert_eq!(child, descendant);
Ok(())
})
}
}

File diff suppressed because it is too large Load Diff

View File

@ -17,7 +17,6 @@ use fixtures::{linear, many_files_dirs};
use futures::stream::Stream;
use futures_preview::compat::Future01CompatExt;
use futures_preview::future::{FutureExt, TryFutureExt};
use crate::{
ChangesetContext, ChangesetId, CoreContext, CreateChange, FileType, Mononoke, MononokeError,
@ -27,19 +26,86 @@ use crate::{
#[fbinit::test]
fn create_commit(fb: FacebookInit) -> Result<(), Error> {
let mut runtime = tokio_compat::runtime::Runtime::new().unwrap();
runtime.block_on(
async move {
let ctx = CoreContext::test_mock(fb);
let mononoke =
Mononoke::new_test(ctx.clone(), vec![("test".to_string(), linear::getrepo(fb))])
.await?;
let repo = mononoke
.repo(ctx, "test")?
.expect("repo exists")
.write()
.await?;
let expected_hash = "68c9120f387cf1c3b7e4c2e30cdbd5b953f27a732cfe9f42f335f0091ece3c6c";
let parent_hash = "7785606eb1f26ff5722c831de402350cf97052dc44bc175da6ac0d715a3dbbf6";
runtime.block_on_std(async move {
let ctx = CoreContext::test_mock(fb);
let mononoke = Mononoke::new_test(
ctx.clone(),
vec![("test".to_string(), linear::getrepo(fb).await)],
)
.await?;
let repo = mononoke
.repo(ctx, "test")?
.expect("repo exists")
.write()
.await?;
let expected_hash = "68c9120f387cf1c3b7e4c2e30cdbd5b953f27a732cfe9f42f335f0091ece3c6c";
let parent_hash = "7785606eb1f26ff5722c831de402350cf97052dc44bc175da6ac0d715a3dbbf6";
let parents = vec![ChangesetId::from_str(parent_hash)?];
let author = String::from("Test Author <test@example.com>");
let author_date = FixedOffset::east(0).ymd(2000, 2, 1).and_hms(12, 0, 0);
let committer = None;
let committer_date = None;
let message = String::from("Test Created Commit");
let extra = BTreeMap::new();
let mut changes: BTreeMap<MononokePath, CreateChange> = BTreeMap::new();
changes.insert(
MononokePath::try_from("TEST_CREATE")?,
CreateChange::NewContent(Bytes::from("TEST CREATE\n"), FileType::Regular, None),
);
let cs = repo
.create_changeset(
parents,
author,
author_date,
committer,
committer_date,
message,
extra,
changes,
)
.await?;
assert_eq!(cs.message().await?, "Test Created Commit");
assert_eq!(cs.id(), ChangesetId::from_str(expected_hash)?);
let content = cs
.path("TEST_CREATE")?
.file()
.await?
.expect("file should exist")
.content()
.await
.collect()
.compat()
.await?;
assert_eq!(content, vec![Bytes::from("TEST CREATE\n")]);
Ok(())
})
}
#[fbinit::test]
fn create_commit_bad_changes(fb: FacebookInit) -> Result<(), Error> {
let mut runtime = tokio_compat::runtime::Runtime::new().unwrap();
runtime.block_on_std(async move {
let ctx = CoreContext::test_mock(fb);
let mononoke = Mononoke::new_test(
ctx.clone(),
vec![("test".to_string(), many_files_dirs::getrepo(fb).await)],
)
.await?;
let repo = mononoke
.repo(ctx, "test")?
.expect("repo exists")
.write()
.await?;
async fn create_changeset(
repo: &RepoWriteContext,
changes: BTreeMap<MononokePath, CreateChange>,
) -> Result<ChangesetContext, MononokeError> {
let parent_hash = "b0d1bf77898839595ee0f0cba673dd6e3be9dadaaa78bc6dd2dea97ca6bee77e";
let parents = vec![ChangesetId::from_str(parent_hash)?];
let author = String::from("Test Author <test@example.com>");
let author_date = FixedOffset::east(0).ymd(2000, 2, 1).and_hms(12, 0, 0);
@ -47,153 +113,79 @@ fn create_commit(fb: FacebookInit) -> Result<(), Error> {
let committer_date = None;
let message = String::from("Test Created Commit");
let extra = BTreeMap::new();
let mut changes: BTreeMap<MononokePath, CreateChange> = BTreeMap::new();
changes.insert(
MononokePath::try_from("TEST_CREATE")?,
CreateChange::NewContent(Bytes::from("TEST CREATE\n"), FileType::Regular, None),
);
let cs = repo
.create_changeset(
parents,
author,
author_date,
committer,
committer_date,
message,
extra,
changes,
)
.await?;
assert_eq!(cs.message().await?, "Test Created Commit");
assert_eq!(cs.id(), ChangesetId::from_str(expected_hash)?);
let content = cs
.path("TEST_CREATE")?
.file()
.await?
.expect("file should exist")
.content()
.await
.collect()
.compat()
.await?;
assert_eq!(content, vec![Bytes::from("TEST CREATE\n")]);
Ok(())
}
.boxed()
.compat(),
)
}
#[fbinit::test]
fn create_commit_bad_changes(fb: FacebookInit) -> Result<(), Error> {
let mut runtime = tokio_compat::runtime::Runtime::new().unwrap();
runtime.block_on(
async move {
let ctx = CoreContext::test_mock(fb);
let mononoke = Mononoke::new_test(
ctx.clone(),
vec![("test".to_string(), many_files_dirs::getrepo(fb))],
repo.create_changeset(
parents,
author,
author_date,
committer,
committer_date,
message,
extra,
changes,
)
.await?;
let repo = mononoke
.repo(ctx, "test")?
.expect("repo exists")
.write()
.await?;
async fn create_changeset(
repo: &RepoWriteContext,
changes: BTreeMap<MononokePath, CreateChange>,
) -> Result<ChangesetContext, MononokeError> {
let parent_hash =
"b0d1bf77898839595ee0f0cba673dd6e3be9dadaaa78bc6dd2dea97ca6bee77e";
let parents = vec![ChangesetId::from_str(parent_hash)?];
let author = String::from("Test Author <test@example.com>");
let author_date = FixedOffset::east(0).ymd(2000, 2, 1).and_hms(12, 0, 0);
let committer = None;
let committer_date = None;
let message = String::from("Test Created Commit");
let extra = BTreeMap::new();
repo.create_changeset(
parents,
author,
author_date,
committer,
committer_date,
message,
extra,
changes,
)
.await
}
// Cannot delete a file that is not there
let mut changes: BTreeMap<MononokePath, CreateChange> = BTreeMap::new();
changes.insert(MononokePath::try_from("TEST_CREATE")?, CreateChange::Delete);
assert_matches!(
create_changeset(&repo, changes).await,
Err(MononokeError::InvalidRequest(_))
);
// Cannot replace a file with a directory without deleting the file
let mut changes: BTreeMap<MononokePath, CreateChange> = BTreeMap::new();
changes.insert(
MononokePath::try_from("1/TEST_CREATE")?,
CreateChange::NewContent(Bytes::from("test"), FileType::Regular, None),
);
assert_matches!(
create_changeset(&repo, changes.clone()).await,
Err(MononokeError::InvalidRequest(_))
);
// Deleting the file means we can now replace it with a directory.
changes.insert(MononokePath::try_from("1")?, CreateChange::Delete);
assert!(create_changeset(&repo, changes).await.is_ok());
// Changes cannot introduce path conflicts
let mut changes: BTreeMap<MononokePath, CreateChange> = BTreeMap::new();
changes.insert(
MononokePath::try_from("TEST_CREATE")?,
CreateChange::NewContent(Bytes::from("test"), FileType::Regular, None),
);
changes.insert(
MononokePath::try_from("TEST_CREATE/TEST_CREATE")?,
CreateChange::NewContent(Bytes::from("test"), FileType::Regular, None),
);
assert_matches!(
create_changeset(&repo, changes).await,
Err(MononokeError::InvalidRequest(_))
);
// Superfluous changes when a directory is replaced by a file are dropped
let mut changes: BTreeMap<MononokePath, CreateChange> = BTreeMap::new();
changes.insert(
MononokePath::try_from("dir1")?,
CreateChange::NewContent(Bytes::from("test"), FileType::Regular, None),
);
let cs1 = create_changeset(&repo, changes.clone()).await?;
changes.insert(
MononokePath::try_from("dir1/file_1_in_dir1")?,
CreateChange::Delete,
);
changes.insert(
MononokePath::try_from("dir1/subdir1/file_1")?,
CreateChange::Delete,
);
let cs2 = create_changeset(&repo, changes).await?;
// Since the superfluous changes were dropped, the two commits
// have the same bonsai hash.
assert_eq!(cs1.id(), cs2.id());
Ok(())
.await
}
.boxed()
.compat(),
)
// Cannot delete a file that is not there
let mut changes: BTreeMap<MononokePath, CreateChange> = BTreeMap::new();
changes.insert(MononokePath::try_from("TEST_CREATE")?, CreateChange::Delete);
assert_matches!(
create_changeset(&repo, changes).await,
Err(MononokeError::InvalidRequest(_))
);
// Cannot replace a file with a directory without deleting the file
let mut changes: BTreeMap<MononokePath, CreateChange> = BTreeMap::new();
changes.insert(
MononokePath::try_from("1/TEST_CREATE")?,
CreateChange::NewContent(Bytes::from("test"), FileType::Regular, None),
);
assert_matches!(
create_changeset(&repo, changes.clone()).await,
Err(MononokeError::InvalidRequest(_))
);
// Deleting the file means we can now replace it with a directory.
changes.insert(MononokePath::try_from("1")?, CreateChange::Delete);
assert!(create_changeset(&repo, changes).await.is_ok());
// Changes cannot introduce path conflicts
let mut changes: BTreeMap<MononokePath, CreateChange> = BTreeMap::new();
changes.insert(
MononokePath::try_from("TEST_CREATE")?,
CreateChange::NewContent(Bytes::from("test"), FileType::Regular, None),
);
changes.insert(
MononokePath::try_from("TEST_CREATE/TEST_CREATE")?,
CreateChange::NewContent(Bytes::from("test"), FileType::Regular, None),
);
assert_matches!(
create_changeset(&repo, changes).await,
Err(MononokeError::InvalidRequest(_))
);
// Superfluous changes when a directory is replaced by a file are dropped
let mut changes: BTreeMap<MononokePath, CreateChange> = BTreeMap::new();
changes.insert(
MononokePath::try_from("dir1")?,
CreateChange::NewContent(Bytes::from("test"), FileType::Regular, None),
);
let cs1 = create_changeset(&repo, changes.clone()).await?;
changes.insert(
MononokePath::try_from("dir1/file_1_in_dir1")?,
CreateChange::Delete,
);
changes.insert(
MononokePath::try_from("dir1/subdir1/file_1")?,
CreateChange::Delete,
);
let cs2 = create_changeset(&repo, changes).await?;
// Since the superfluous changes were dropped, the two commits
// have the same bonsai hash.
assert_eq!(cs1.id(), cs2.id());
Ok(())
})
}

View File

@ -251,7 +251,7 @@ mod test {
#[fbinit::test]
fn test_fastlog_batch_empty(fb: FacebookInit) -> Result<()> {
let mut rt = Runtime::new().unwrap();
let blobstore = Arc::new(linear::getrepo(fb).get_blobstore());
let blobstore = Arc::new(rt.block_on_std(linear::getrepo(fb)).get_blobstore());
let ctx = CoreContext::test_mock(fb);
let list = VecDeque::new();
@ -266,7 +266,7 @@ mod test {
#[fbinit::test]
fn test_fastlog_batch_single(fb: FacebookInit) -> Result<()> {
let mut rt = Runtime::new().unwrap();
let blobstore = Arc::new(linear::getrepo(fb).get_blobstore());
let blobstore = Arc::new(rt.block_on_std(linear::getrepo(fb)).get_blobstore());
let ctx = CoreContext::test_mock(fb);
let mut list = VecDeque::new();
@ -283,7 +283,7 @@ mod test {
#[fbinit::test]
fn test_fastlog_batch_large(fb: FacebookInit) -> Result<()> {
let mut rt = Runtime::new().unwrap();
let blobstore = Arc::new(linear::getrepo(fb).get_blobstore());
let blobstore = Arc::new(rt.block_on_std(linear::getrepo(fb)).get_blobstore());
let ctx = CoreContext::test_mock(fb);
let mut list = VecDeque::new();
@ -306,7 +306,7 @@ mod test {
#[fbinit::test]
fn test_fastlog_batch_overflow(fb: FacebookInit) -> Result<()> {
let mut rt = Runtime::new().unwrap();
let blobstore = Arc::new(linear::getrepo(fb).get_blobstore());
let blobstore = Arc::new(rt.block_on_std(linear::getrepo(fb)).get_blobstore());
let ctx = CoreContext::test_mock(fb);
let mut list = VecDeque::new();
@ -331,7 +331,7 @@ mod test {
fn fastlog_roundtrip(fb: FacebookInit, hashes: Vec<(ChangesetId, i32)>) -> TestResult {
let mut rt = Runtime::new().unwrap();
let blobstore = Arc::new(linear::getrepo(fb).get_blobstore());
let blobstore = Arc::new(rt.block_on_std(linear::getrepo(fb)).get_blobstore());
let ctx = CoreContext::test_mock(fb);
let mut raw_list = VecDeque::new();

View File

@ -117,7 +117,7 @@ fn is_public(
fn get_phase_hint_test(fb: FacebookInit) {
let mut rt = Runtime::new().unwrap();
let repo = linear::getrepo(fb);
let repo = rt.block_on_std(linear::getrepo(fb));
// @ 79a13814c5ce7330173ec04d279bf95ab3f652fb
// |
// o a5ffa77602a066db7d5cfb9fb5823a0895717c5a
@ -268,7 +268,7 @@ fn get_phase_hint_test(fb: FacebookInit) {
fn test_mark_reachable_as_public(fb: FacebookInit) -> Result<()> {
let mut rt = Runtime::new()?;
let repo = fixtures::branch_even::getrepo(fb);
let repo = rt.block_on_std(fixtures::branch_even::getrepo(fb));
// @ 4f7f3fd428bec1a48f9314414b063c706d9c1aed (6)
// |
// o b65231269f651cfe784fd1d97ef02a049a37b8a0 (5)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -13,7 +13,7 @@ use blobrepo::BlobRepo;
use context::CoreContext;
use fbinit::FacebookInit;
use fixtures::{branch_wide, linear, merge_uneven};
use futures::future::Future;
use futures_preview::compat::Future01CompatExt;
#[cfg(test)]
use common::fetch_generation_and_join;
@ -25,10 +25,15 @@ pub fn string_to_nodehash(hash: &'static str) -> HgNodeHash {
HgNodeHash::from_static_str(hash).expect("Can't turn string to HgNodeHash")
}
pub fn string_to_bonsai(ctx: CoreContext, repo: &Arc<BlobRepo>, s: &'static str) -> ChangesetId {
pub async fn string_to_bonsai(
ctx: CoreContext,
repo: &Arc<BlobRepo>,
s: &'static str,
) -> ChangesetId {
let node = string_to_nodehash(s);
repo.get_bonsai_from_hg(ctx, HgChangesetId::new(node))
.wait()
.compat()
.await
.unwrap()
.unwrap()
}
@ -37,51 +42,59 @@ pub fn test_linear_reachability<T: ReachabilityIndex + 'static>(
fb: FacebookInit,
index_creator: fn() -> T,
) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let index = index_creator();
let ordered_hashes = vec![
string_to_bonsai(
ctx.clone(),
&repo,
"a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"0ed509bf086fadcb8a8a5384dc3b550729b0fc17",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"eed3a8c0ec67b6a6fe2eb3543334df3f0b4f202b",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"cb15ca4a43a59acff5388cea9648c162afde8372",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"d0a361e9022d226ae52f689667bd7d212a19cfe0",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"607314ef579bd2407752361ba1b0c1729d08b281",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"3e0e761030db6e479a7fb58b12881883f9f8c63f",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"2d7d4ba9ce0a6ffd222de7785b249ead9c51c536",
),
)
.await,
];
for i in 0..ordered_hashes.len() {
@ -90,10 +103,10 @@ pub fn test_linear_reachability<T: ReachabilityIndex + 'static>(
let dst = ordered_hashes.get(j).unwrap();
let future_result_src_to_dst =
index.query_reachability(ctx.clone(), repo.get_changeset_fetcher(), *src, *dst);
assert!(future_result_src_to_dst.wait().unwrap());
assert!(future_result_src_to_dst.compat().await.unwrap());
let future_result_dst_to_src =
index.query_reachability(ctx.clone(), repo.get_changeset_fetcher(), *dst, *src);
assert_eq!(future_result_dst_to_src.wait().unwrap(), src == dst);
assert_eq!(future_result_dst_to_src.compat().await.unwrap(), src == dst);
}
}
});
@ -103,15 +116,16 @@ pub fn test_merge_uneven_reachability<T: ReachabilityIndex + 'static>(
fb: FacebookInit,
index_creator: fn() -> T,
) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(merge_uneven::getrepo(fb));
let repo = Arc::new(merge_uneven::getrepo(fb).await);
let index = index_creator();
let root_node = string_to_bonsai(
ctx.clone(),
&repo,
"15c40d0abc36d47fb51c8eaec51ac7aad31f669c",
);
)
.await;
// order is oldest to newest
let branch_1 = vec![
@ -119,17 +133,20 @@ pub fn test_merge_uneven_reachability<T: ReachabilityIndex + 'static>(
ctx.clone(),
&repo,
"3cda5c78aa35f0f5b09780d971197b51cad4613a",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"1d8a907f7b4bf50c6a09c16361e2205047ecc5e5",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"16839021e338500b3cf7c9b871c8a07351697d68",
),
)
.await,
];
// order is oldest to newest
@ -138,49 +155,58 @@ pub fn test_merge_uneven_reachability<T: ReachabilityIndex + 'static>(
ctx.clone(),
&repo,
"d7542c9db7f4c77dab4b315edd328edf1514952f",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"b65231269f651cfe784fd1d97ef02a049a37b8a0",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"4f7f3fd428bec1a48f9314414b063c706d9c1aed",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"795b8133cf375f6d68d27c6c23db24cd5d0cd00f",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"bc7b4d0f858c19e2474b03e442b8495fd7aeef33",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"fc2cef43395ff3a7b28159007f63d6529d2f41ca",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"5d43888a3c972fe68c224f93d41b30e9f888df7c",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"264f01429683b3dd8042cb3979e8bf37007118bc",
),
)
.await,
];
let _merge_node = string_to_bonsai(
ctx.clone(),
&repo,
"7221fa26c85f147db37c2b5f4dbcd5fe52e7645b",
);
)
.await;
for left_node in branch_1.into_iter() {
for right_node in branch_2.iter() {
@ -191,7 +217,8 @@ pub fn test_merge_uneven_reachability<T: ReachabilityIndex + 'static>(
left_node,
root_node
)
.wait()
.compat()
.await
.unwrap());
assert!(index
.query_reachability(
@ -200,7 +227,8 @@ pub fn test_merge_uneven_reachability<T: ReachabilityIndex + 'static>(
*right_node,
root_node
)
.wait()
.compat()
.await
.unwrap());
assert!(!index
.query_reachability(
@ -209,7 +237,8 @@ pub fn test_merge_uneven_reachability<T: ReachabilityIndex + 'static>(
root_node,
left_node
)
.wait()
.compat()
.await
.unwrap());
assert!(!index
.query_reachability(
@ -218,7 +247,8 @@ pub fn test_merge_uneven_reachability<T: ReachabilityIndex + 'static>(
root_node,
*right_node
)
.wait()
.compat()
.await
.unwrap());
}
}
@ -229,47 +259,54 @@ pub fn test_branch_wide_reachability<T: ReachabilityIndex + 'static>(
fb: FacebookInit,
index_creator: fn() -> T,
) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
// this repo has no merges but many branches
let repo = Arc::new(branch_wide::getrepo(fb));
let repo = Arc::new(branch_wide::getrepo(fb).await);
let index = index_creator();
let root_node = string_to_bonsai(
ctx.clone(),
&repo,
"ecba698fee57eeeef88ac3dcc3b623ede4af47bd",
);
)
.await;
let b1 = string_to_bonsai(
ctx.clone(),
&repo,
"9e8521affb7f9d10e9551a99c526e69909042b20",
);
)
.await;
let b2 = string_to_bonsai(
ctx.clone(),
&repo,
"4685e9e62e4885d477ead6964a7600c750e39b03",
);
)
.await;
let b1_1 = string_to_bonsai(
ctx.clone(),
&repo,
"b6a8169454af58b4b72b3665f9aa0d25529755ff",
);
)
.await;
let b1_2 = string_to_bonsai(
ctx.clone(),
&repo,
"c27ef5b7f15e9930e5b93b1f32cc2108a2aabe12",
);
)
.await;
let b2_1 = string_to_bonsai(
ctx.clone(),
&repo,
"04decbb0d1a65789728250ddea2fe8d00248e01c",
);
)
.await;
let b2_2 = string_to_bonsai(
ctx.clone(),
&repo,
"49f53ab171171b3180e125b918bd1cf0af7e5449",
);
)
.await;
// all nodes can reach the root
for above_root in vec![b1, b2, b1_1, b1_2, b2_1, b2_2].iter() {
@ -280,7 +317,8 @@ pub fn test_branch_wide_reachability<T: ReachabilityIndex + 'static>(
*above_root,
root_node
)
.wait()
.compat()
.await
.unwrap());
assert!(!index
.query_reachability(
@ -289,7 +327,8 @@ pub fn test_branch_wide_reachability<T: ReachabilityIndex + 'static>(
root_node,
*above_root
)
.wait()
.compat()
.await
.unwrap());
}
@ -303,7 +342,8 @@ pub fn test_branch_wide_reachability<T: ReachabilityIndex + 'static>(
*b1_node,
*b2_node
)
.wait()
.compat()
.await
.unwrap());
assert!(!index
.query_reachability(
@ -312,7 +352,8 @@ pub fn test_branch_wide_reachability<T: ReachabilityIndex + 'static>(
*b2_node,
*b1_node
)
.wait()
.compat()
.await
.unwrap());
}
}
@ -321,37 +362,45 @@ pub fn test_branch_wide_reachability<T: ReachabilityIndex + 'static>(
// - branch 1
assert!(index
.query_reachability(ctx.clone(), repo.get_changeset_fetcher(), b1_1, b1)
.wait()
.compat()
.await
.unwrap());
assert!(index
.query_reachability(ctx.clone(), repo.get_changeset_fetcher(), b1_2, b1)
.wait()
.compat()
.await
.unwrap());
assert!(!index
.query_reachability(ctx.clone(), repo.get_changeset_fetcher(), b1_1, b1_2)
.wait()
.compat()
.await
.unwrap());
assert!(!index
.query_reachability(ctx.clone(), repo.get_changeset_fetcher(), b1_2, b1_1)
.wait()
.compat()
.await
.unwrap());
// - branch 2
assert!(index
.query_reachability(ctx.clone(), repo.get_changeset_fetcher(), b2_1, b2)
.wait()
.compat()
.await
.unwrap());
assert!(index
.query_reachability(ctx.clone(), repo.get_changeset_fetcher(), b2_2, b2)
.wait()
.compat()
.await
.unwrap());
assert!(!index
.query_reachability(ctx.clone(), repo.get_changeset_fetcher(), b2_1, b2_2)
.wait()
.compat()
.await
.unwrap());
assert!(!index
.query_reachability(ctx.clone(), repo.get_changeset_fetcher(), b2_2, b2_1)
.wait()
.compat()
.await
.unwrap());
});
}
@ -365,62 +414,70 @@ mod test {
use context::CoreContext;
use fbinit::FacebookInit;
use fixtures::linear;
use futures::Future;
use mononoke_types::Generation;
#[fbinit::test]
fn test_helpers(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let mut ordered_hashes_oldest_to_newest = vec![
string_to_bonsai(
ctx.clone(),
&repo,
"a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"0ed509bf086fadcb8a8a5384dc3b550729b0fc17",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"eed3a8c0ec67b6a6fe2eb3543334df3f0b4f202b",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"cb15ca4a43a59acff5388cea9648c162afde8372",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"d0a361e9022d226ae52f689667bd7d212a19cfe0",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"607314ef579bd2407752361ba1b0c1729d08b281",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"3e0e761030db6e479a7fb58b12881883f9f8c63f",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"2d7d4ba9ce0a6ffd222de7785b249ead9c51c536",
),
)
.await,
];
ordered_hashes_oldest_to_newest.reverse();
for (i, node) in ordered_hashes_oldest_to_newest.into_iter().enumerate() {
assert_eq!(
fetch_generation_and_join(ctx.clone(), repo.get_changeset_fetcher(), node)
.wait()
.compat()
.await
.unwrap(),
(node, Generation::new(i as u64 + 1))
);

View File

@ -202,6 +202,7 @@ impl RepoReadWriteFetcher {
#[cfg(test)]
mod test {
use super::*;
use futures_preview::compat::Future01CompatExt;
use metaconfig_types::RepoReadOnly::*;
static CONFIG_MSG: &str = "Set by config option";
@ -222,166 +223,195 @@ mod test {
#[test]
fn test_readonly_config_no_sqlite() {
let fetcher =
RepoReadWriteFetcher::new(None, ReadOnly(CONFIG_MSG.to_string()), "repo".to_string());
assert_eq!(
fetcher.readonly().wait().unwrap(),
ReadOnly(CONFIG_MSG.to_string())
);
async_unit::tokio_unit_test(async move {
let fetcher = RepoReadWriteFetcher::new(
None,
ReadOnly(CONFIG_MSG.to_string()),
"repo".to_string(),
);
assert_eq!(
fetcher.readonly().compat().await.unwrap(),
ReadOnly(CONFIG_MSG.to_string())
);
})
}
#[test]
fn test_readwrite_config_no_sqlite() {
let fetcher = RepoReadWriteFetcher::new(None, ReadWrite, "repo".to_string());
assert_eq!(fetcher.readonly().wait().unwrap(), ReadWrite);
async_unit::tokio_unit_test(async move {
let fetcher = RepoReadWriteFetcher::new(None, ReadWrite, "repo".to_string());
assert_eq!(fetcher.readonly().compat().await.unwrap(), ReadWrite);
});
}
#[test]
fn test_readonly_config_with_sqlite() {
let sql_repo_read_write_status = SqlRepoReadWriteStatus::with_sqlite_in_memory().unwrap();
let fetcher = RepoReadWriteFetcher::new(
Some(sql_repo_read_write_status),
ReadOnly(CONFIG_MSG.to_string()),
"repo".to_string(),
);
assert_eq!(
fetcher.readonly().wait().unwrap(),
ReadOnly(CONFIG_MSG.to_string())
);
async_unit::tokio_unit_test(async move {
let sql_repo_read_write_status =
SqlRepoReadWriteStatus::with_sqlite_in_memory().unwrap();
let fetcher = RepoReadWriteFetcher::new(
Some(sql_repo_read_write_status),
ReadOnly(CONFIG_MSG.to_string()),
"repo".to_string(),
);
assert_eq!(
fetcher.readonly().compat().await.unwrap(),
ReadOnly(CONFIG_MSG.to_string())
);
});
}
#[test]
fn test_readwrite_with_sqlite() {
let sql_repo_read_write_status = SqlRepoReadWriteStatus::with_sqlite_in_memory().unwrap();
let fetcher = RepoReadWriteFetcher::new(
Some(sql_repo_read_write_status),
ReadWrite,
"repo".to_string(),
);
// As the DB hasn't been populated for this row, ensure that we mark the repo as locked.
assert_eq!(
fetcher.readonly().wait().unwrap(),
ReadOnly(DEFAULT_MSG.to_string())
);
async_unit::tokio_unit_test(async move {
let sql_repo_read_write_status =
SqlRepoReadWriteStatus::with_sqlite_in_memory().unwrap();
let fetcher = RepoReadWriteFetcher::new(
Some(sql_repo_read_write_status),
ReadWrite,
"repo".to_string(),
);
// As the DB hasn't been populated for this row, ensure that we mark the repo as locked.
assert_eq!(
fetcher.readonly().compat().await.unwrap(),
ReadOnly(DEFAULT_MSG.to_string())
);
InsertState::query(
&fetcher
.sql_repo_read_write_status
.clone()
.unwrap()
.write_connection,
&[("repo", &HgMononokeReadWrite::MononokeWrite)],
)
.wait()
.unwrap();
InsertState::query(
&fetcher
.sql_repo_read_write_status
.clone()
.unwrap()
.write_connection,
&[("repo", &HgMononokeReadWrite::MononokeWrite)],
)
.compat()
.await
.unwrap();
assert_eq!(fetcher.readonly().wait().unwrap(), ReadWrite);
assert_eq!(fetcher.readonly().compat().await.unwrap(), ReadWrite);
InsertState::query(
&fetcher
.sql_repo_read_write_status
.clone()
.unwrap()
.write_connection,
&[("repo", &HgMononokeReadWrite::HgWrite)],
)
.wait()
.unwrap();
InsertState::query(
&fetcher
.sql_repo_read_write_status
.clone()
.unwrap()
.write_connection,
&[("repo", &HgMononokeReadWrite::HgWrite)],
)
.compat()
.await
.unwrap();
assert_eq!(
fetcher.readonly().wait().unwrap(),
ReadOnly(DB_MSG.to_string())
);
assert_eq!(
fetcher.readonly().compat().await.unwrap(),
ReadOnly(DB_MSG.to_string())
);
});
}
#[test]
fn test_readwrite_with_sqlite_and_reason() {
let sql_repo_read_write_status = SqlRepoReadWriteStatus::with_sqlite_in_memory().unwrap();
let fetcher = RepoReadWriteFetcher::new(
Some(sql_repo_read_write_status),
ReadWrite,
"repo".to_string(),
);
async_unit::tokio_unit_test(async move {
let sql_repo_read_write_status =
SqlRepoReadWriteStatus::with_sqlite_in_memory().unwrap();
let fetcher = RepoReadWriteFetcher::new(
Some(sql_repo_read_write_status),
ReadWrite,
"repo".to_string(),
);
InsertStateWithReason::query(
&fetcher
.sql_repo_read_write_status
.clone()
.unwrap()
.write_connection,
&[("repo", &HgMononokeReadWrite::HgWrite, "reason123")],
)
.wait()
.unwrap();
InsertStateWithReason::query(
&fetcher
.sql_repo_read_write_status
.clone()
.unwrap()
.write_connection,
&[("repo", &HgMononokeReadWrite::HgWrite, "reason123")],
)
.compat()
.await
.unwrap();
assert_eq!(
fetcher.readonly().wait().unwrap(),
ReadOnly("reason123".to_string())
);
assert_eq!(
fetcher.readonly().compat().await.unwrap(),
ReadOnly("reason123".to_string())
);
});
}
#[test]
fn test_readwrite_with_sqlite_other_repo() {
let sql_repo_read_write_status = SqlRepoReadWriteStatus::with_sqlite_in_memory().unwrap();
let fetcher = RepoReadWriteFetcher::new(
Some(sql_repo_read_write_status),
ReadWrite,
"repo".to_string(),
);
// As the DB hasn't been populated for this row, ensure that we mark the repo as locked.
assert_eq!(
fetcher.readonly().wait().unwrap(),
ReadOnly(DEFAULT_MSG.to_string())
);
async_unit::tokio_unit_test(async move {
let sql_repo_read_write_status =
SqlRepoReadWriteStatus::with_sqlite_in_memory().unwrap();
let fetcher = RepoReadWriteFetcher::new(
Some(sql_repo_read_write_status),
ReadWrite,
"repo".to_string(),
);
// As the DB hasn't been populated for this row, ensure that we mark the repo as locked.
assert_eq!(
fetcher.readonly().compat().await.unwrap(),
ReadOnly(DEFAULT_MSG.to_string())
);
InsertState::query(
&fetcher
.sql_repo_read_write_status
.clone()
.unwrap()
.write_connection,
&[("other_repo", &HgMononokeReadWrite::MononokeWrite)],
)
.wait()
.unwrap();
InsertState::query(
&fetcher
.sql_repo_read_write_status
.clone()
.unwrap()
.write_connection,
&[("other_repo", &HgMononokeReadWrite::MononokeWrite)],
)
.compat()
.await
.unwrap();
assert_eq!(
fetcher.readonly().wait().unwrap(),
ReadOnly(DEFAULT_MSG.to_string())
);
assert_eq!(
fetcher.readonly().compat().await.unwrap(),
ReadOnly(DEFAULT_MSG.to_string())
);
InsertState::query(
&fetcher
.sql_repo_read_write_status
.clone()
.unwrap()
.write_connection,
&[("repo", &HgMononokeReadWrite::MononokeWrite)],
)
.wait()
.unwrap();
InsertState::query(
&fetcher
.sql_repo_read_write_status
.clone()
.unwrap()
.write_connection,
&[("repo", &HgMononokeReadWrite::MononokeWrite)],
)
.compat()
.await
.unwrap();
assert_eq!(fetcher.readonly().wait().unwrap(), ReadWrite);
assert_eq!(fetcher.readonly().compat().await.unwrap(), ReadWrite);
})
}
#[test]
fn test_write() {
let sql_repo_read_write_status = SqlRepoReadWriteStatus::with_sqlite_in_memory().unwrap();
let fetcher = RepoReadWriteFetcher::new(
Some(sql_repo_read_write_status),
ReadWrite,
"repo".to_string(),
);
// As the DB hasn't been populated for this row, ensure that we mark the repo as locked.
assert_eq!(
fetcher.readonly().wait().unwrap(),
ReadOnly(DEFAULT_MSG.to_string())
);
async_unit::tokio_unit_test(async move {
let sql_repo_read_write_status =
SqlRepoReadWriteStatus::with_sqlite_in_memory().unwrap();
let fetcher = RepoReadWriteFetcher::new(
Some(sql_repo_read_write_status),
ReadWrite,
"repo".to_string(),
);
// As the DB hasn't been populated for this row, ensure that we mark the repo as locked.
assert_eq!(
fetcher.readonly().compat().await.unwrap(),
ReadOnly(DEFAULT_MSG.to_string())
);
fetcher
.set_mononoke_read_write(&"repo is locked".to_string())
.wait()
.unwrap();
assert_eq!(fetcher.readonly().wait().unwrap(), ReadWrite);
fetcher
.set_mononoke_read_write(&"repo is locked".to_string())
.compat()
.await
.unwrap();
assert_eq!(fetcher.readonly().compat().await.unwrap(), ReadWrite);
});
}
}

View File

@ -205,7 +205,7 @@ fn test_pushredirect_config() {
#[fbinit::test]
async fn get_changed_manifests_stream_test(fb: FacebookInit) -> Result<(), Error> {
let ctx = CoreContext::test_mock(fb);
let repo = many_files_dirs::getrepo(fb);
let repo = many_files_dirs::getrepo(fb).await;
// Commit that has only dir2 directory
let root_mf_id = HgChangesetId::from_str("051946ed218061e925fb120dac02634f9ad40ae2")?
@ -271,7 +271,7 @@ async fn get_changed_manifests_stream_test(fb: FacebookInit) -> Result<(), Error
#[fbinit::test]
async fn get_changed_manifests_stream_test_depth(fb: FacebookInit) -> Result<(), Error> {
let ctx = CoreContext::test_mock(fb);
let repo = many_files_dirs::getrepo(fb);
let repo = many_files_dirs::getrepo(fb).await;
let root_mf_id = HgChangesetId::from_str("d261bc7900818dea7c86935b3fb17a33b2e3a6b4")?
.load(ctx.clone(), &repo.get_blobstore())
@ -328,7 +328,7 @@ async fn get_changed_manifests_stream_test_depth(fb: FacebookInit) -> Result<(),
#[fbinit::test]
async fn get_changed_manifests_stream_test_base_path(fb: FacebookInit) -> Result<(), Error> {
let ctx = CoreContext::test_mock(fb);
let repo = many_files_dirs::getrepo(fb);
let repo = many_files_dirs::getrepo(fb).await;
let root_mf_id = HgChangesetId::from_str("d261bc7900818dea7c86935b3fb17a33b2e3a6b4")?
.load(ctx.clone(), &repo.get_blobstore())

View File

@ -9,10 +9,13 @@ use anyhow::Error;
use blobrepo::BlobRepo;
use context::CoreContext;
use fbinit::FacebookInit;
use futures::executor::spawn;
use futures::future::Future;
use futures::stream::Stream;
use futures_ext::BoxStream;
use futures_preview::{
compat::{Future01CompatExt, Stream01CompatExt},
stream::StreamExt,
};
use mercurial_types::nodehash::HgChangesetId;
use mercurial_types::HgNodeHash;
use mononoke_types::ChangesetId;
@ -36,16 +39,17 @@ pub fn string_to_nodehash(hash: &str) -> HgNodeHash {
HgNodeHash::from_str(hash).expect("Can't turn string to HgNodeHash")
}
pub fn string_to_bonsai(fb: FacebookInit, repo: &Arc<BlobRepo>, s: &str) -> ChangesetId {
pub async fn string_to_bonsai(fb: FacebookInit, repo: &Arc<BlobRepo>, s: &str) -> ChangesetId {
let ctx = CoreContext::test_mock(fb);
let node = string_to_nodehash(s);
repo.get_bonsai_from_hg(ctx, HgChangesetId::new(node))
.wait()
.compat()
.await
.unwrap()
.unwrap()
}
pub fn assert_changesets_sequence<I>(
pub async fn assert_changesets_sequence<I>(
ctx: CoreContext,
repo: &Arc<BlobRepo>,
hashes: I,
@ -53,7 +57,7 @@ pub fn assert_changesets_sequence<I>(
) where
I: IntoIterator<Item = ChangesetId>,
{
let mut nodestream = spawn(stream);
let mut nodestream = stream.compat();
let mut received_hashes = HashSet::new();
for expected in hashes {
// If we pulled it in earlier, we've found it.
@ -64,13 +68,15 @@ pub fn assert_changesets_sequence<I>(
let expected_generation = repo
.clone()
.get_generation_number_by_bonsai(ctx.clone(), expected)
.wait()
.compat()
.await
.expect("Unexpected error");
// Keep pulling in hashes until we either find this one, or move on to a new generation
loop {
let hash = nodestream
.wait_stream()
.next()
.await
.expect("Unexpected end of stream")
.expect("Unexpected error");
@ -81,7 +87,8 @@ pub fn assert_changesets_sequence<I>(
let node_generation = repo
.clone()
.get_generation_number_by_bonsai(ctx.clone(), expected)
.wait()
.compat()
.await
.expect("Unexpected error");
assert!(
@ -102,7 +109,7 @@ pub fn assert_changesets_sequence<I>(
received_hashes
);
let next_node = nodestream.wait_stream();
let next_node = nodestream.next().await;
assert!(
next_node.is_none(),
"Too many nodes received: {:?}",
@ -121,10 +128,12 @@ mod test {
#[fbinit::test]
fn valid_changeset(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let bcs_id = string_to_bonsai(fb, &repo, "a5ffa77602a066db7d5cfb9fb5823a0895717c5a");
let repo = linear::getrepo(fb).await;
let repo = Arc::new(repo);
let bcs_id =
string_to_bonsai(fb, &repo, "a5ffa77602a066db7d5cfb9fb5823a0895717c5a").await;
let changeset_stream = single_changeset_id(ctx.clone(), bcs_id.clone(), &repo);
assert_changesets_sequence(
@ -132,19 +141,22 @@ mod test {
&repo,
vec![bcs_id].into_iter(),
changeset_stream.boxify(),
);
)
.await;
});
}
#[fbinit::test]
fn invalid_changeset(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = linear::getrepo(fb).await;
let repo = Arc::new(repo);
let cs_id = ONES_CSID;
let changeset_stream = single_changeset_id(ctx.clone(), cs_id, &repo.clone());
assert_changesets_sequence(ctx, &repo, vec![].into_iter(), changeset_stream.boxify());
assert_changesets_sequence(ctx, &repo, vec![].into_iter(), changeset_stream.boxify())
.await;
});
}
}

View File

@ -189,16 +189,16 @@ mod test {
#[fbinit::test]
fn linear_ancestors(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let nodestream = AncestorsNodeStream::new(
ctx.clone(),
&changeset_fetcher,
string_to_bonsai(fb, &repo, "a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157"),
string_to_bonsai(fb, &repo, "a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157").await,
)
.boxify();
@ -206,32 +206,33 @@ mod test {
ctx.clone(),
&repo,
vec![
string_to_bonsai(fb, &repo, "a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157"),
string_to_bonsai(fb, &repo, "0ed509bf086fadcb8a8a5384dc3b550729b0fc17"),
string_to_bonsai(fb, &repo, "eed3a8c0ec67b6a6fe2eb3543334df3f0b4f202b"),
string_to_bonsai(fb, &repo, "cb15ca4a43a59acff5388cea9648c162afde8372"),
string_to_bonsai(fb, &repo, "d0a361e9022d226ae52f689667bd7d212a19cfe0"),
string_to_bonsai(fb, &repo, "607314ef579bd2407752361ba1b0c1729d08b281"),
string_to_bonsai(fb, &repo, "3e0e761030db6e479a7fb58b12881883f9f8c63f"),
string_to_bonsai(fb, &repo, "2d7d4ba9ce0a6ffd222de7785b249ead9c51c536"),
string_to_bonsai(fb, &repo, "a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157").await,
string_to_bonsai(fb, &repo, "0ed509bf086fadcb8a8a5384dc3b550729b0fc17").await,
string_to_bonsai(fb, &repo, "eed3a8c0ec67b6a6fe2eb3543334df3f0b4f202b").await,
string_to_bonsai(fb, &repo, "cb15ca4a43a59acff5388cea9648c162afde8372").await,
string_to_bonsai(fb, &repo, "d0a361e9022d226ae52f689667bd7d212a19cfe0").await,
string_to_bonsai(fb, &repo, "607314ef579bd2407752361ba1b0c1729d08b281").await,
string_to_bonsai(fb, &repo, "3e0e761030db6e479a7fb58b12881883f9f8c63f").await,
string_to_bonsai(fb, &repo, "2d7d4ba9ce0a6ffd222de7785b249ead9c51c536").await,
],
nodestream,
);
)
.await;
});
}
#[fbinit::test]
fn merge_ancestors_from_merge(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(merge_uneven::getrepo(fb));
let repo = Arc::new(merge_uneven::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let nodestream = AncestorsNodeStream::new(
ctx.clone(),
&changeset_fetcher,
string_to_bonsai(fb, &repo, "7221fa26c85f147db37c2b5f4dbcd5fe52e7645b"),
string_to_bonsai(fb, &repo, "7221fa26c85f147db37c2b5f4dbcd5fe52e7645b").await,
)
.boxify();
@ -239,37 +240,38 @@ mod test {
ctx.clone(),
&repo,
vec![
string_to_bonsai(fb, &repo, "7221fa26c85f147db37c2b5f4dbcd5fe52e7645b"),
string_to_bonsai(fb, &repo, "264f01429683b3dd8042cb3979e8bf37007118bc"),
string_to_bonsai(fb, &repo, "5d43888a3c972fe68c224f93d41b30e9f888df7c"),
string_to_bonsai(fb, &repo, "fc2cef43395ff3a7b28159007f63d6529d2f41ca"),
string_to_bonsai(fb, &repo, "bc7b4d0f858c19e2474b03e442b8495fd7aeef33"),
string_to_bonsai(fb, &repo, "795b8133cf375f6d68d27c6c23db24cd5d0cd00f"),
string_to_bonsai(fb, &repo, "4f7f3fd428bec1a48f9314414b063c706d9c1aed"),
string_to_bonsai(fb, &repo, "16839021e338500b3cf7c9b871c8a07351697d68"),
string_to_bonsai(fb, &repo, "1d8a907f7b4bf50c6a09c16361e2205047ecc5e5"),
string_to_bonsai(fb, &repo, "b65231269f651cfe784fd1d97ef02a049a37b8a0"),
string_to_bonsai(fb, &repo, "d7542c9db7f4c77dab4b315edd328edf1514952f"),
string_to_bonsai(fb, &repo, "3cda5c78aa35f0f5b09780d971197b51cad4613a"),
string_to_bonsai(fb, &repo, "15c40d0abc36d47fb51c8eaec51ac7aad31f669c"),
string_to_bonsai(fb, &repo, "7221fa26c85f147db37c2b5f4dbcd5fe52e7645b").await,
string_to_bonsai(fb, &repo, "264f01429683b3dd8042cb3979e8bf37007118bc").await,
string_to_bonsai(fb, &repo, "5d43888a3c972fe68c224f93d41b30e9f888df7c").await,
string_to_bonsai(fb, &repo, "fc2cef43395ff3a7b28159007f63d6529d2f41ca").await,
string_to_bonsai(fb, &repo, "bc7b4d0f858c19e2474b03e442b8495fd7aeef33").await,
string_to_bonsai(fb, &repo, "795b8133cf375f6d68d27c6c23db24cd5d0cd00f").await,
string_to_bonsai(fb, &repo, "4f7f3fd428bec1a48f9314414b063c706d9c1aed").await,
string_to_bonsai(fb, &repo, "16839021e338500b3cf7c9b871c8a07351697d68").await,
string_to_bonsai(fb, &repo, "1d8a907f7b4bf50c6a09c16361e2205047ecc5e5").await,
string_to_bonsai(fb, &repo, "b65231269f651cfe784fd1d97ef02a049a37b8a0").await,
string_to_bonsai(fb, &repo, "d7542c9db7f4c77dab4b315edd328edf1514952f").await,
string_to_bonsai(fb, &repo, "3cda5c78aa35f0f5b09780d971197b51cad4613a").await,
string_to_bonsai(fb, &repo, "15c40d0abc36d47fb51c8eaec51ac7aad31f669c").await,
],
nodestream,
);
)
.await;
});
}
#[fbinit::test]
fn merge_ancestors_one_branch(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(merge_uneven::getrepo(fb));
let repo = Arc::new(merge_uneven::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let nodestream = AncestorsNodeStream::new(
ctx.clone(),
&changeset_fetcher,
string_to_bonsai(fb, &repo, "16839021e338500b3cf7c9b871c8a07351697d68"),
string_to_bonsai(fb, &repo, "16839021e338500b3cf7c9b871c8a07351697d68").await,
)
.boxify();
@ -277,30 +279,31 @@ mod test {
ctx.clone(),
&repo,
vec![
string_to_bonsai(fb, &repo, "16839021e338500b3cf7c9b871c8a07351697d68"),
string_to_bonsai(fb, &repo, "1d8a907f7b4bf50c6a09c16361e2205047ecc5e5"),
string_to_bonsai(fb, &repo, "3cda5c78aa35f0f5b09780d971197b51cad4613a"),
string_to_bonsai(fb, &repo, "15c40d0abc36d47fb51c8eaec51ac7aad31f669c"),
string_to_bonsai(fb, &repo, "16839021e338500b3cf7c9b871c8a07351697d68").await,
string_to_bonsai(fb, &repo, "1d8a907f7b4bf50c6a09c16361e2205047ecc5e5").await,
string_to_bonsai(fb, &repo, "3cda5c78aa35f0f5b09780d971197b51cad4613a").await,
string_to_bonsai(fb, &repo, "15c40d0abc36d47fb51c8eaec51ac7aad31f669c").await,
],
nodestream,
);
)
.await;
});
}
#[fbinit::test]
fn unshared_merge_all(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
// The unshared_merge_uneven fixture has a commit after the merge. Pull in everything
// by starting at the head and working back to the original unshared history commits
let repo = Arc::new(unshared_merge_uneven::getrepo(fb));
let repo = Arc::new(unshared_merge_uneven::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let nodestream = AncestorsNodeStream::new(
ctx.clone(),
&changeset_fetcher,
string_to_bonsai(fb, &repo, "dd993aab2bed7276e17c88470286ba8459ba6d94"),
string_to_bonsai(fb, &repo, "dd993aab2bed7276e17c88470286ba8459ba6d94").await,
)
.boxify();
@ -308,36 +311,37 @@ mod test {
ctx.clone(),
&repo,
vec![
string_to_bonsai(fb, &repo, "dd993aab2bed7276e17c88470286ba8459ba6d94"),
string_to_bonsai(fb, &repo, "9c6dd4e2c2f43c89613b094efb426cc42afdee2a"),
string_to_bonsai(fb, &repo, "64011f64aaf9c2ad2e674f57c033987da4016f51"),
string_to_bonsai(fb, &repo, "c1d5375bf73caab8725d759eaca56037c725c7d1"),
string_to_bonsai(fb, &repo, "e819f2dd9a01d3e63d9a93e298968df275e6ad7c"),
string_to_bonsai(fb, &repo, "5a3e8d5a475ec07895e64ec1e1b2ec09bfa70e4e"),
string_to_bonsai(fb, &repo, "76096af83f52cc9a225ccfd8ddfb05ea18132343"),
string_to_bonsai(fb, &repo, "33fb49d8a47b29290f5163e30b294339c89505a2"),
string_to_bonsai(fb, &repo, "03b0589d9788870817d03ce7b87516648ed5b33a"),
string_to_bonsai(fb, &repo, "2fa8b4ee6803a18db4649a3843a723ef1dfe852b"),
string_to_bonsai(fb, &repo, "f01e186c165a2fbe931fd1bf4454235398c591c9"),
string_to_bonsai(fb, &repo, "163adc0d0f5d2eb0695ca123addcb92bab202096"),
string_to_bonsai(fb, &repo, "0b94a2881dda90f0d64db5fae3ee5695a38e7c8f"),
string_to_bonsai(fb, &repo, "eee492dcdeaae18f91822c4359dd516992e0dbcd"),
string_to_bonsai(fb, &repo, "f61fdc0ddafd63503dcd8eed8994ec685bfc8941"),
string_to_bonsai(fb, &repo, "3775a86c64cceeaf68ffe3f012fc90774c42002b"),
string_to_bonsai(fb, &repo, "36ff88dd69c9966c9fad9d6d0457c52153039dde"),
string_to_bonsai(fb, &repo, "1700524113b1a3b1806560341009684b4378660b"),
string_to_bonsai(fb, &repo, "9d374b7e8180f933e3043ad1ffab0a9f95e2bac6"),
string_to_bonsai(fb, &repo, "dd993aab2bed7276e17c88470286ba8459ba6d94").await,
string_to_bonsai(fb, &repo, "9c6dd4e2c2f43c89613b094efb426cc42afdee2a").await,
string_to_bonsai(fb, &repo, "64011f64aaf9c2ad2e674f57c033987da4016f51").await,
string_to_bonsai(fb, &repo, "c1d5375bf73caab8725d759eaca56037c725c7d1").await,
string_to_bonsai(fb, &repo, "e819f2dd9a01d3e63d9a93e298968df275e6ad7c").await,
string_to_bonsai(fb, &repo, "5a3e8d5a475ec07895e64ec1e1b2ec09bfa70e4e").await,
string_to_bonsai(fb, &repo, "76096af83f52cc9a225ccfd8ddfb05ea18132343").await,
string_to_bonsai(fb, &repo, "33fb49d8a47b29290f5163e30b294339c89505a2").await,
string_to_bonsai(fb, &repo, "03b0589d9788870817d03ce7b87516648ed5b33a").await,
string_to_bonsai(fb, &repo, "2fa8b4ee6803a18db4649a3843a723ef1dfe852b").await,
string_to_bonsai(fb, &repo, "f01e186c165a2fbe931fd1bf4454235398c591c9").await,
string_to_bonsai(fb, &repo, "163adc0d0f5d2eb0695ca123addcb92bab202096").await,
string_to_bonsai(fb, &repo, "0b94a2881dda90f0d64db5fae3ee5695a38e7c8f").await,
string_to_bonsai(fb, &repo, "eee492dcdeaae18f91822c4359dd516992e0dbcd").await,
string_to_bonsai(fb, &repo, "f61fdc0ddafd63503dcd8eed8994ec685bfc8941").await,
string_to_bonsai(fb, &repo, "3775a86c64cceeaf68ffe3f012fc90774c42002b").await,
string_to_bonsai(fb, &repo, "36ff88dd69c9966c9fad9d6d0457c52153039dde").await,
string_to_bonsai(fb, &repo, "1700524113b1a3b1806560341009684b4378660b").await,
string_to_bonsai(fb, &repo, "9d374b7e8180f933e3043ad1ffab0a9f95e2bac6").await,
],
nodestream,
);
)
.await;
});
}
#[fbinit::test]
fn no_common_ancestor(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(unshared_merge_uneven::getrepo(fb));
let repo = Arc::new(unshared_merge_uneven::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
@ -345,19 +349,19 @@ mod test {
ctx.clone(),
changeset_fetcher,
vec![
string_to_bonsai(fb, &repo, "64011f64aaf9c2ad2e674f57c033987da4016f51"),
string_to_bonsai(fb, &repo, "1700524113b1a3b1806560341009684b4378660b"),
string_to_bonsai(fb, &repo, "64011f64aaf9c2ad2e674f57c033987da4016f51").await,
string_to_bonsai(fb, &repo, "1700524113b1a3b1806560341009684b4378660b").await,
],
);
assert_changesets_sequence(ctx.clone(), &repo, vec![], nodestream);
assert_changesets_sequence(ctx.clone(), &repo, vec![], nodestream).await;
});
}
#[fbinit::test]
fn greatest_common_ancestor_different_branches(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(merge_uneven::getrepo(fb));
let repo = Arc::new(merge_uneven::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
@ -365,28 +369,25 @@ mod test {
ctx.clone(),
changeset_fetcher,
vec![
string_to_bonsai(fb, &repo, "4f7f3fd428bec1a48f9314414b063c706d9c1aed"),
string_to_bonsai(fb, &repo, "3cda5c78aa35f0f5b09780d971197b51cad4613a"),
string_to_bonsai(fb, &repo, "4f7f3fd428bec1a48f9314414b063c706d9c1aed").await,
string_to_bonsai(fb, &repo, "3cda5c78aa35f0f5b09780d971197b51cad4613a").await,
],
);
assert_changesets_sequence(
ctx.clone(),
&repo,
vec![string_to_bonsai(
fb,
&repo,
"15c40d0abc36d47fb51c8eaec51ac7aad31f669c",
)],
vec![string_to_bonsai(fb, &repo, "15c40d0abc36d47fb51c8eaec51ac7aad31f669c").await],
nodestream,
);
)
.await;
});
}
#[fbinit::test]
fn greatest_common_ancestor_same_branch(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(merge_uneven::getrepo(fb));
let repo = Arc::new(merge_uneven::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
@ -394,28 +395,25 @@ mod test {
ctx.clone(),
changeset_fetcher,
vec![
string_to_bonsai(fb, &repo, "4f7f3fd428bec1a48f9314414b063c706d9c1aed"),
string_to_bonsai(fb, &repo, "264f01429683b3dd8042cb3979e8bf37007118bc"),
string_to_bonsai(fb, &repo, "4f7f3fd428bec1a48f9314414b063c706d9c1aed").await,
string_to_bonsai(fb, &repo, "264f01429683b3dd8042cb3979e8bf37007118bc").await,
],
);
assert_changesets_sequence(
ctx.clone(),
&repo,
vec![string_to_bonsai(
fb,
&repo,
"4f7f3fd428bec1a48f9314414b063c706d9c1aed",
)],
vec![string_to_bonsai(fb, &repo, "4f7f3fd428bec1a48f9314414b063c706d9c1aed").await],
nodestream,
);
)
.await;
});
}
#[fbinit::test]
fn all_common_ancestors_different_branches(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(merge_uneven::getrepo(fb));
let repo = Arc::new(merge_uneven::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
@ -423,28 +421,25 @@ mod test {
ctx.clone(),
changeset_fetcher,
vec![
string_to_bonsai(fb, &repo, "4f7f3fd428bec1a48f9314414b063c706d9c1aed"),
string_to_bonsai(fb, &repo, "3cda5c78aa35f0f5b09780d971197b51cad4613a"),
string_to_bonsai(fb, &repo, "4f7f3fd428bec1a48f9314414b063c706d9c1aed").await,
string_to_bonsai(fb, &repo, "3cda5c78aa35f0f5b09780d971197b51cad4613a").await,
],
);
assert_changesets_sequence(
ctx.clone(),
&repo,
vec![string_to_bonsai(
fb,
&repo,
"15c40d0abc36d47fb51c8eaec51ac7aad31f669c",
)],
vec![string_to_bonsai(fb, &repo, "15c40d0abc36d47fb51c8eaec51ac7aad31f669c").await],
nodestream,
);
)
.await;
});
}
#[fbinit::test]
fn all_common_ancestors_same_branch(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(merge_uneven::getrepo(fb));
let repo = Arc::new(merge_uneven::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
@ -452,21 +447,22 @@ mod test {
ctx.clone(),
changeset_fetcher,
vec![
string_to_bonsai(fb, &repo, "4f7f3fd428bec1a48f9314414b063c706d9c1aed"),
string_to_bonsai(fb, &repo, "264f01429683b3dd8042cb3979e8bf37007118bc"),
string_to_bonsai(fb, &repo, "4f7f3fd428bec1a48f9314414b063c706d9c1aed").await,
string_to_bonsai(fb, &repo, "264f01429683b3dd8042cb3979e8bf37007118bc").await,
],
);
assert_changesets_sequence(
ctx.clone(),
&repo,
vec![
string_to_bonsai(fb, &repo, "4f7f3fd428bec1a48f9314414b063c706d9c1aed"),
string_to_bonsai(fb, &repo, "b65231269f651cfe784fd1d97ef02a049a37b8a0"),
string_to_bonsai(fb, &repo, "d7542c9db7f4c77dab4b315edd328edf1514952f"),
string_to_bonsai(fb, &repo, "15c40d0abc36d47fb51c8eaec51ac7aad31f669c"),
string_to_bonsai(fb, &repo, "4f7f3fd428bec1a48f9314414b063c706d9c1aed").await,
string_to_bonsai(fb, &repo, "b65231269f651cfe784fd1d97ef02a049a37b8a0").await,
string_to_bonsai(fb, &repo, "d7542c9db7f4c77dab4b315edd328edf1514952f").await,
string_to_bonsai(fb, &repo, "15c40d0abc36d47fb51c8eaec51ac7aad31f669c").await,
],
nodestream,
);
)
.await;
});
}
}

View File

@ -373,9 +373,9 @@ mod test {
#[fbinit::test]
fn empty_ancestors_combinators(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
@ -387,13 +387,10 @@ mod test {
)
.boxify();
assert_changesets_sequence(ctx.clone(), &repo, vec![], stream);
assert_changesets_sequence(ctx.clone(), &repo, vec![], stream).await;
let excludes = vec![string_to_bonsai(
fb,
&repo,
"0ed509bf086fadcb8a8a5384dc3b550729b0fc17",
)];
let excludes =
vec![string_to_bonsai(fb, &repo, "0ed509bf086fadcb8a8a5384dc3b550729b0fc17").await];
let stream = DifferenceOfUnionsOfAncestorsNodeStream::new_with_excludes(
ctx.clone(),
@ -404,15 +401,15 @@ mod test {
)
.boxify();
assert_changesets_sequence(ctx.clone(), &repo, vec![], stream);
assert_changesets_sequence(ctx.clone(), &repo, vec![], stream).await;
});
}
#[fbinit::test]
fn linear_ancestors_with_excludes(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
@ -420,37 +417,26 @@ mod test {
ctx.clone(),
&changeset_fetcher,
Arc::new(SkiplistIndex::new()),
vec![string_to_bonsai(
fb,
&repo,
"a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157",
)],
vec![string_to_bonsai(
fb,
&repo,
"0ed509bf086fadcb8a8a5384dc3b550729b0fc17",
)],
vec![string_to_bonsai(fb, &repo, "a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157").await],
vec![string_to_bonsai(fb, &repo, "0ed509bf086fadcb8a8a5384dc3b550729b0fc17").await],
)
.boxify();
assert_changesets_sequence(
ctx.clone(),
&repo,
vec![string_to_bonsai(
fb,
&repo,
"a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157",
)],
vec![string_to_bonsai(fb, &repo, "a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157").await],
nodestream,
);
)
.await;
});
}
#[fbinit::test]
fn linear_ancestors_with_excludes_empty(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
@ -458,28 +444,20 @@ mod test {
ctx.clone(),
&changeset_fetcher,
Arc::new(SkiplistIndex::new()),
vec![string_to_bonsai(
fb,
&repo,
"0ed509bf086fadcb8a8a5384dc3b550729b0fc17",
)],
vec![string_to_bonsai(
fb,
&repo,
"0ed509bf086fadcb8a8a5384dc3b550729b0fc17",
)],
vec![string_to_bonsai(fb, &repo, "0ed509bf086fadcb8a8a5384dc3b550729b0fc17").await],
vec![string_to_bonsai(fb, &repo, "0ed509bf086fadcb8a8a5384dc3b550729b0fc17").await],
)
.boxify();
assert_changesets_sequence(ctx.clone(), &repo, vec![], nodestream);
assert_changesets_sequence(ctx.clone(), &repo, vec![], nodestream).await;
});
}
#[fbinit::test]
fn ancestors_union(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(merge_uneven::getrepo(fb));
let repo = Arc::new(merge_uneven::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
@ -488,8 +466,8 @@ mod test {
&changeset_fetcher,
Arc::new(SkiplistIndex::new()),
vec![
string_to_bonsai(fb, &repo, "fc2cef43395ff3a7b28159007f63d6529d2f41ca"),
string_to_bonsai(fb, &repo, "16839021e338500b3cf7c9b871c8a07351697d68"),
string_to_bonsai(fb, &repo, "fc2cef43395ff3a7b28159007f63d6529d2f41ca").await,
string_to_bonsai(fb, &repo, "16839021e338500b3cf7c9b871c8a07351697d68").await,
],
)
.boxify();
@ -497,27 +475,28 @@ mod test {
ctx.clone(),
&repo,
vec![
string_to_bonsai(fb, &repo, "fc2cef43395ff3a7b28159007f63d6529d2f41ca"),
string_to_bonsai(fb, &repo, "bc7b4d0f858c19e2474b03e442b8495fd7aeef33"),
string_to_bonsai(fb, &repo, "795b8133cf375f6d68d27c6c23db24cd5d0cd00f"),
string_to_bonsai(fb, &repo, "4f7f3fd428bec1a48f9314414b063c706d9c1aed"),
string_to_bonsai(fb, &repo, "16839021e338500b3cf7c9b871c8a07351697d68"),
string_to_bonsai(fb, &repo, "1d8a907f7b4bf50c6a09c16361e2205047ecc5e5"),
string_to_bonsai(fb, &repo, "b65231269f651cfe784fd1d97ef02a049a37b8a0"),
string_to_bonsai(fb, &repo, "d7542c9db7f4c77dab4b315edd328edf1514952f"),
string_to_bonsai(fb, &repo, "3cda5c78aa35f0f5b09780d971197b51cad4613a"),
string_to_bonsai(fb, &repo, "15c40d0abc36d47fb51c8eaec51ac7aad31f669c"),
string_to_bonsai(fb, &repo, "fc2cef43395ff3a7b28159007f63d6529d2f41ca").await,
string_to_bonsai(fb, &repo, "bc7b4d0f858c19e2474b03e442b8495fd7aeef33").await,
string_to_bonsai(fb, &repo, "795b8133cf375f6d68d27c6c23db24cd5d0cd00f").await,
string_to_bonsai(fb, &repo, "4f7f3fd428bec1a48f9314414b063c706d9c1aed").await,
string_to_bonsai(fb, &repo, "16839021e338500b3cf7c9b871c8a07351697d68").await,
string_to_bonsai(fb, &repo, "1d8a907f7b4bf50c6a09c16361e2205047ecc5e5").await,
string_to_bonsai(fb, &repo, "b65231269f651cfe784fd1d97ef02a049a37b8a0").await,
string_to_bonsai(fb, &repo, "d7542c9db7f4c77dab4b315edd328edf1514952f").await,
string_to_bonsai(fb, &repo, "3cda5c78aa35f0f5b09780d971197b51cad4613a").await,
string_to_bonsai(fb, &repo, "15c40d0abc36d47fb51c8eaec51ac7aad31f669c").await,
],
nodestream,
);
)
.await;
});
}
#[fbinit::test]
fn merge_ancestors_from_merge_excludes(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(merge_uneven::getrepo(fb));
let repo = Arc::new(merge_uneven::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
@ -525,14 +504,10 @@ mod test {
ctx.clone(),
&changeset_fetcher,
Arc::new(SkiplistIndex::new()),
vec![string_to_bonsai(
fb,
&repo,
"7221fa26c85f147db37c2b5f4dbcd5fe52e7645b",
)],
vec![string_to_bonsai(fb, &repo, "7221fa26c85f147db37c2b5f4dbcd5fe52e7645b").await],
vec![
string_to_bonsai(fb, &repo, "fc2cef43395ff3a7b28159007f63d6529d2f41ca"),
string_to_bonsai(fb, &repo, "16839021e338500b3cf7c9b871c8a07351697d68"),
string_to_bonsai(fb, &repo, "fc2cef43395ff3a7b28159007f63d6529d2f41ca").await,
string_to_bonsai(fb, &repo, "16839021e338500b3cf7c9b871c8a07351697d68").await,
],
)
.boxify();
@ -541,20 +516,21 @@ mod test {
ctx.clone(),
&repo,
vec![
string_to_bonsai(fb, &repo, "7221fa26c85f147db37c2b5f4dbcd5fe52e7645b"),
string_to_bonsai(fb, &repo, "264f01429683b3dd8042cb3979e8bf37007118bc"),
string_to_bonsai(fb, &repo, "5d43888a3c972fe68c224f93d41b30e9f888df7c"),
string_to_bonsai(fb, &repo, "7221fa26c85f147db37c2b5f4dbcd5fe52e7645b").await,
string_to_bonsai(fb, &repo, "264f01429683b3dd8042cb3979e8bf37007118bc").await,
string_to_bonsai(fb, &repo, "5d43888a3c972fe68c224f93d41b30e9f888df7c").await,
],
nodestream,
);
)
.await;
});
}
#[fbinit::test]
fn merge_ancestors_from_merge_excludes_union(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(merge_uneven::getrepo(fb));
let repo = Arc::new(merge_uneven::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
@ -562,16 +538,8 @@ mod test {
ctx.clone(),
&changeset_fetcher,
Arc::new(SkiplistIndex::new()),
vec![string_to_bonsai(
fb,
&repo,
"7221fa26c85f147db37c2b5f4dbcd5fe52e7645b",
)],
vec![string_to_bonsai(
fb,
&repo,
"16839021e338500b3cf7c9b871c8a07351697d68",
)],
vec![string_to_bonsai(fb, &repo, "7221fa26c85f147db37c2b5f4dbcd5fe52e7645b").await],
vec![string_to_bonsai(fb, &repo, "16839021e338500b3cf7c9b871c8a07351697d68").await],
)
.boxify();
@ -579,18 +547,19 @@ mod test {
ctx.clone(),
&repo,
vec![
string_to_bonsai(fb, &repo, "7221fa26c85f147db37c2b5f4dbcd5fe52e7645b"),
string_to_bonsai(fb, &repo, "264f01429683b3dd8042cb3979e8bf37007118bc"),
string_to_bonsai(fb, &repo, "5d43888a3c972fe68c224f93d41b30e9f888df7c"),
string_to_bonsai(fb, &repo, "fc2cef43395ff3a7b28159007f63d6529d2f41ca"),
string_to_bonsai(fb, &repo, "bc7b4d0f858c19e2474b03e442b8495fd7aeef33"),
string_to_bonsai(fb, &repo, "795b8133cf375f6d68d27c6c23db24cd5d0cd00f"),
string_to_bonsai(fb, &repo, "4f7f3fd428bec1a48f9314414b063c706d9c1aed"),
string_to_bonsai(fb, &repo, "b65231269f651cfe784fd1d97ef02a049a37b8a0"),
string_to_bonsai(fb, &repo, "d7542c9db7f4c77dab4b315edd328edf1514952f"),
string_to_bonsai(fb, &repo, "7221fa26c85f147db37c2b5f4dbcd5fe52e7645b").await,
string_to_bonsai(fb, &repo, "264f01429683b3dd8042cb3979e8bf37007118bc").await,
string_to_bonsai(fb, &repo, "5d43888a3c972fe68c224f93d41b30e9f888df7c").await,
string_to_bonsai(fb, &repo, "fc2cef43395ff3a7b28159007f63d6529d2f41ca").await,
string_to_bonsai(fb, &repo, "bc7b4d0f858c19e2474b03e442b8495fd7aeef33").await,
string_to_bonsai(fb, &repo, "795b8133cf375f6d68d27c6c23db24cd5d0cd00f").await,
string_to_bonsai(fb, &repo, "4f7f3fd428bec1a48f9314414b063c706d9c1aed").await,
string_to_bonsai(fb, &repo, "b65231269f651cfe784fd1d97ef02a049a37b8a0").await,
string_to_bonsai(fb, &repo, "d7542c9db7f4c77dab4b315edd328edf1514952f").await,
],
nodestream,
);
)
.await;
});
}
}

View File

@ -187,14 +187,14 @@ mod test {
#[fbinit::test]
fn intersect_identical_node(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let hash = "a5ffa77602a066db7d5cfb9fb5823a0895717c5a";
let head_csid = string_to_bonsai(fb, &repo, hash);
let head_csid = string_to_bonsai(fb, &repo, hash).await;
let inputs: Vec<BonsaiNodeStream> = vec![
single_changeset_id(ctx.clone(), head_csid.clone(), &repo).boxify(),
@ -205,21 +205,24 @@ mod test {
IntersectNodeStream::new(ctx.clone(), &changeset_fetcher, inputs.into_iter())
.boxify();
assert_changesets_sequence(ctx, &repo, vec![head_csid], nodestream);
assert_changesets_sequence(ctx, &repo, vec![head_csid], nodestream).await;
});
}
#[fbinit::test]
fn intersect_three_different_nodes(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let bcs_a947 = string_to_bonsai(fb, &repo, "a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157");
let bcs_3c15 = string_to_bonsai(fb, &repo, "3c15267ebf11807f3d772eb891272b911ec68759");
let bcs_d0a = string_to_bonsai(fb, &repo, "d0a361e9022d226ae52f689667bd7d212a19cfe0");
let bcs_a947 =
string_to_bonsai(fb, &repo, "a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157").await;
let bcs_3c15 =
string_to_bonsai(fb, &repo, "3c15267ebf11807f3d772eb891272b911ec68759").await;
let bcs_d0a =
string_to_bonsai(fb, &repo, "d0a361e9022d226ae52f689667bd7d212a19cfe0").await;
// Note that these are *not* in generation order deliberately.
let inputs: Vec<BonsaiNodeStream> = vec![
single_changeset_id(ctx.clone(), bcs_a947, &repo).boxify(),
@ -231,19 +234,20 @@ mod test {
IntersectNodeStream::new(ctx.clone(), &changeset_fetcher, inputs.into_iter())
.boxify();
assert_changesets_sequence(ctx, &repo, vec![], nodestream);
assert_changesets_sequence(ctx, &repo, vec![], nodestream).await;
});
}
#[fbinit::test]
fn intersect_three_identical_nodes(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let bcs_d0a = string_to_bonsai(fb, &repo, "d0a361e9022d226ae52f689667bd7d212a19cfe0");
let bcs_d0a =
string_to_bonsai(fb, &repo, "d0a361e9022d226ae52f689667bd7d212a19cfe0").await;
let inputs: Vec<BonsaiNodeStream> = vec![
single_changeset_id(ctx.clone(), bcs_d0a, &repo).boxify(),
single_changeset_id(ctx.clone(), bcs_d0a, &repo).boxify(),
@ -253,19 +257,20 @@ mod test {
IntersectNodeStream::new(ctx.clone(), &changeset_fetcher, inputs.into_iter())
.boxify();
assert_changesets_sequence(ctx.clone(), &repo, vec![bcs_d0a], nodestream);
assert_changesets_sequence(ctx.clone(), &repo, vec![bcs_d0a], nodestream).await;
});
}
#[fbinit::test]
fn intersect_nesting(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let bcs_3c15 = string_to_bonsai(fb, &repo, "3c15267ebf11807f3d772eb891272b911ec68759");
let bcs_3c15 =
string_to_bonsai(fb, &repo, "3c15267ebf11807f3d772eb891272b911ec68759").await;
let inputs: Vec<BonsaiNodeStream> = vec![
single_changeset_id(ctx.clone(), bcs_3c15.clone(), &repo).boxify(),
single_changeset_id(ctx.clone(), bcs_3c15.clone(), &repo).boxify(),
@ -283,15 +288,16 @@ mod test {
IntersectNodeStream::new(ctx.clone(), &changeset_fetcher, inputs.into_iter())
.boxify();
assert_changesets_sequence(ctx.clone(), &repo, vec![bcs_3c15.clone()], nodestream);
assert_changesets_sequence(ctx.clone(), &repo, vec![bcs_3c15.clone()], nodestream)
.await;
});
}
#[fbinit::test]
fn intersection_of_unions(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
@ -299,13 +305,14 @@ mod test {
let hash2 = "3c15267ebf11807f3d772eb891272b911ec68759";
let hash3 = "a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157";
let inputs = get_single_bonsai_streams(ctx.clone(), &repo, &vec![hash1, hash2]);
let inputs = get_single_bonsai_streams(ctx.clone(), &repo, &vec![hash1, hash2]).await;
let nodestream =
UnionNodeStream::new(ctx.clone(), &changeset_fetcher, inputs.into_iter()).boxify();
// This set has a different node sequence, so that we can demonstrate that we skip nodes
// when they're not going to contribute.
let inputs = get_single_bonsai_streams(ctx.clone(), &repo, &[hash3, hash2, hash1]);
let inputs =
get_single_bonsai_streams(ctx.clone(), &repo, &[hash3, hash2, hash1]).await;
let nodestream2 =
UnionNodeStream::new(ctx.clone(), &changeset_fetcher, inputs.into_iter()).boxify();
@ -318,24 +325,25 @@ mod test {
ctx.clone(),
&repo,
vec![
string_to_bonsai(fb, &repo, "3c15267ebf11807f3d772eb891272b911ec68759"),
string_to_bonsai(fb, &repo, "d0a361e9022d226ae52f689667bd7d212a19cfe0"),
string_to_bonsai(fb, &repo, "3c15267ebf11807f3d772eb891272b911ec68759").await,
string_to_bonsai(fb, &repo, "d0a361e9022d226ae52f689667bd7d212a19cfe0").await,
],
nodestream,
);
)
.await;
});
}
#[fbinit::test]
fn intersect_error_node(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let hash = "a5ffa77602a066db7d5cfb9fb5823a0895717c5a";
let changeset = string_to_bonsai(fb, &repo, hash);
let changeset = string_to_bonsai(fb, &repo, hash).await;
let inputs: Vec<BonsaiNodeStream> = vec![
RepoErrorStream { item: changeset }.boxify(),
@ -360,26 +368,26 @@ mod test {
#[fbinit::test]
fn intersect_nothing(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let inputs: Vec<BonsaiNodeStream> = vec![];
let nodestream =
IntersectNodeStream::new(ctx.clone(), &changeset_fetcher, inputs.into_iter());
assert_changesets_sequence(ctx, &repo, vec![], nodestream.boxify());
assert_changesets_sequence(ctx, &repo, vec![], nodestream.boxify()).await;
});
}
#[fbinit::test]
fn slow_ready_intersect_nothing(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
// Tests that we handle an input staying at NotReady for a while without panicing
let repeats = 10;
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let inputs: Vec<BonsaiNodeStream> = vec![NotReadyEmptyStream::new(repeats).boxify()];
@ -404,9 +412,9 @@ mod test {
#[fbinit::test]
fn intersect_unshared_merge_even(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(unshared_merge_even::getrepo(fb));
let repo = Arc::new(unshared_merge_even::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
@ -420,7 +428,8 @@ mod test {
"33fb49d8a47b29290f5163e30b294339c89505a2",
"03b0589d9788870817d03ce7b87516648ed5b33a",
],
);
)
.await;
let left_nodestream =
UnionNodeStream::new(ctx.clone(), &changeset_fetcher, inputs.into_iter()).boxify();
@ -435,7 +444,8 @@ mod test {
"0b94a2881dda90f0d64db5fae3ee5695a38e7c8f",
"f61fdc0ddafd63503dcd8eed8994ec685bfc8941",
],
);
)
.await;
let right_nodestream =
UnionNodeStream::new(ctx.clone(), &changeset_fetcher, inputs.into_iter()).boxify();
@ -446,21 +456,18 @@ mod test {
assert_changesets_sequence(
ctx.clone(),
&repo,
vec![string_to_bonsai(
fb,
&repo,
"03b0589d9788870817d03ce7b87516648ed5b33a",
)],
vec![string_to_bonsai(fb, &repo, "03b0589d9788870817d03ce7b87516648ed5b33a").await],
nodestream.boxify(),
);
)
.await;
});
}
#[fbinit::test]
fn intersect_unshared_merge_uneven(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(unshared_merge_uneven::getrepo(fb));
let repo = Arc::new(unshared_merge_uneven::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
@ -474,7 +481,8 @@ mod test {
"64011f64aaf9c2ad2e674f57c033987da4016f51",
"03b0589d9788870817d03ce7b87516648ed5b33a",
],
);
)
.await;
let left_nodestream =
UnionNodeStream::new(ctx.clone(), &changeset_fetcher, inputs.into_iter()).boxify();
@ -489,7 +497,8 @@ mod test {
"0b94a2881dda90f0d64db5fae3ee5695a38e7c8f",
"f61fdc0ddafd63503dcd8eed8994ec685bfc8941",
],
);
)
.await;
let right_nodestream =
UnionNodeStream::new(ctx.clone(), &changeset_fetcher, inputs.into_iter()).boxify();
@ -501,13 +510,10 @@ mod test {
assert_changesets_sequence(
ctx.clone(),
&repo,
vec![string_to_bonsai(
fb,
&repo,
"03b0589d9788870817d03ce7b87516648ed5b33a",
)],
vec![string_to_bonsai(fb, &repo, "03b0589d9788870817d03ce7b87516648ed5b33a").await],
nodestream,
);
)
.await;
});
}
}

View File

@ -32,12 +32,12 @@ mod test {
use cloned::cloned;
use context::CoreContext;
use fbinit::FacebookInit;
use futures::executor::spawn;
use futures::{
future::{join_all, ok},
Stream,
};
use futures_ext::{BoxFuture, BoxStream, StreamExt};
use futures_preview::{compat::Stream01CompatExt, stream::StreamExt as _};
use mononoke_types::ChangesetId;
use quickcheck::{quickcheck, Arbitrary, Gen};
use rand::{seq::SliceRandom, thread_rng, Rng};
@ -60,20 +60,20 @@ mod test {
rp_entries: Vec<RevsetEntry>,
}
fn get_changesets_from_repo(ctx: CoreContext, repo: &BlobRepo) -> Vec<ChangesetId> {
async fn get_changesets_from_repo(ctx: CoreContext, repo: &BlobRepo) -> Vec<ChangesetId> {
let changeset_fetcher = repo.get_changeset_fetcher();
let mut all_changesets_executor = spawn(
repo.get_bonsai_heads_maybe_stale(ctx.clone())
.map({
cloned!(ctx);
move |head| AncestorsNodeStream::new(ctx.clone(), &changeset_fetcher, head)
})
.flatten(),
);
let mut all_changesets_stream = repo
.get_bonsai_heads_maybe_stale(ctx.clone())
.map({
cloned!(ctx);
move |head| AncestorsNodeStream::new(ctx.clone(), &changeset_fetcher, head)
})
.flatten()
.compat();
let mut all_changesets: Vec<ChangesetId> = Vec::new();
loop {
all_changesets.push(match all_changesets_executor.wait_stream() {
all_changesets.push(match all_changesets_stream.next().await {
None => break,
Some(changeset) => changeset.expect("Failed to get changesets from repo"),
});
@ -84,11 +84,11 @@ mod test {
}
impl RevsetSpec {
pub fn add_hashes<G>(&mut self, ctx: CoreContext, repo: &BlobRepo, random: &mut G)
pub async fn add_hashes<G>(&mut self, ctx: CoreContext, repo: &BlobRepo, random: &mut G)
where
G: Rng,
{
let all_changesets = get_changesets_from_repo(ctx, repo);
let all_changesets = get_changesets_from_repo(ctx, repo).await;
for elem in self.rp_entries.iter_mut() {
if let &mut RevsetEntry::SingleNode(None) = elem {
*elem =
@ -254,16 +254,16 @@ mod test {
// is a SetDifference by pure chance.
}
fn match_streams(
async fn match_streams(
expected: BoxStream<ChangesetId, Error>,
actual: BoxStream<ChangesetId, Error>,
) -> bool {
let mut expected = {
let mut nodestream = spawn(expected);
let mut nodestream = expected.compat();
let mut expected = HashSet::new();
loop {
let hash = nodestream.wait_stream();
let hash = nodestream.next().await;
match hash {
Some(hash) => {
let hash = hash.expect("unexpected error");
@ -277,10 +277,10 @@ mod test {
expected
};
let mut nodestream = spawn(actual);
let mut nodestream = actual.compat();
while !expected.is_empty() {
match nodestream.wait_stream() {
match nodestream.next().await {
Some(hash) => {
let hash = hash.expect("unexpected error");
if !expected.remove(&hash) {
@ -292,24 +292,29 @@ mod test {
}
}
}
nodestream.wait_stream().is_none() && expected.is_empty()
nodestream.next().await.is_none() && expected.is_empty()
}
fn match_hashset_to_revset(ctx: CoreContext, repo: Arc<BlobRepo>, mut set: RevsetSpec) -> bool {
set.add_hashes(ctx.clone(), &*repo, &mut thread_rng());
async fn match_hashset_to_revset(
ctx: CoreContext,
repo: Arc<BlobRepo>,
mut set: RevsetSpec,
) -> bool {
set.add_hashes(ctx.clone(), &*repo, &mut thread_rng()).await;
let mut hashes = set.as_hashes();
let mut nodestream = spawn(set.as_revset(ctx, repo));
let mut nodestream = set.as_revset(ctx, repo).compat();
while !hashes.is_empty() {
let hash = nodestream
.wait_stream()
.next()
.await
.expect("Unexpected end of stream")
.expect("Unexpected error");
if !hashes.remove(&hash) {
return false;
}
}
nodestream.wait_stream().is_none() && hashes.is_empty()
nodestream.next().await.is_none() && hashes.is_empty()
}
// This is slightly icky. I would like to construct $test_name as setops_$repo, but concat_idents!
@ -319,10 +324,10 @@ mod test {
#[test]
fn $test_name() {
fn prop(fb: FacebookInit, set: RevsetSpec) -> bool {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new($repo::getrepo(fb));
match_hashset_to_revset(ctx, repo, set)
let repo = Arc::new($repo::getrepo(fb).await);
match_hashset_to_revset(ctx, repo, set).await
})
}
@ -397,14 +402,14 @@ mod test {
($test_name:ident, $repo:ident) => {
#[fbinit::test]
fn $test_name(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new($repo::getrepo(fb));
let repo = Arc::new($repo::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let all_changesets = get_changesets_from_repo(ctx.clone(), &*repo);
let all_changesets = get_changesets_from_repo(ctx.clone(), &*repo).await;
// Limit the number of changesets, otherwise tests take too much time
let max_changesets = 7;
@ -464,7 +469,7 @@ mod test {
.boxify();
assert!(
match_streams(expected, actual.boxify()),
match_streams(expected, actual.boxify()).await,
"streams do not match for {:?} {:?}",
include,
exclude

View File

@ -240,23 +240,29 @@ mod test {
use context::CoreContext;
use fbinit::FacebookInit;
use futures_ext::StreamExt;
use futures_preview::compat::Future01CompatExt;
use mercurial_types::HgChangesetId;
use revset_test_helper::assert_changesets_sequence;
use revset_test_helper::string_to_nodehash;
fn string_to_bonsai(ctx: CoreContext, repo: &Arc<BlobRepo>, s: &'static str) -> ChangesetId {
async fn string_to_bonsai(
ctx: CoreContext,
repo: &Arc<BlobRepo>,
s: &'static str,
) -> ChangesetId {
let node = string_to_nodehash(s);
repo.get_bonsai_from_hg(ctx, HgChangesetId::new(node))
.wait()
.compat()
.await
.unwrap()
.unwrap()
}
#[fbinit::test]
fn linear_range(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let nodestream = RangeNodeStream::new(
ctx.clone(),
@ -265,12 +271,14 @@ mod test {
ctx.clone(),
&repo,
"d0a361e9022d226ae52f689667bd7d212a19cfe0",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157",
),
)
.await,
)
.boxify();
@ -282,34 +290,39 @@ mod test {
ctx.clone(),
&repo,
"a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"0ed509bf086fadcb8a8a5384dc3b550729b0fc17",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"eed3a8c0ec67b6a6fe2eb3543334df3f0b4f202b",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"cb15ca4a43a59acff5388cea9648c162afde8372",
),
string_to_bonsai(ctx, &repo, "d0a361e9022d226ae52f689667bd7d212a19cfe0"),
)
.await,
string_to_bonsai(ctx, &repo, "d0a361e9022d226ae52f689667bd7d212a19cfe0").await,
],
nodestream,
);
)
.await;
})
}
#[fbinit::test]
fn linear_direct_parent_range(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let nodestream = RangeNodeStream::new(
ctx.clone(),
@ -318,12 +331,14 @@ mod test {
ctx.clone(),
&repo,
"0ed509bf086fadcb8a8a5384dc3b550729b0fc17",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157",
),
)
.await,
)
.boxify();
@ -335,19 +350,21 @@ mod test {
ctx.clone(),
&repo,
"a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157",
),
string_to_bonsai(ctx, &repo, "0ed509bf086fadcb8a8a5384dc3b550729b0fc17"),
)
.await,
string_to_bonsai(ctx, &repo, "0ed509bf086fadcb8a8a5384dc3b550729b0fc17").await,
],
nodestream,
);
)
.await;
})
}
#[fbinit::test]
fn linear_single_node_range(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let nodestream = RangeNodeStream::new(
ctx.clone(),
@ -356,33 +373,34 @@ mod test {
ctx.clone(),
&repo,
"d0a361e9022d226ae52f689667bd7d212a19cfe0",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"d0a361e9022d226ae52f689667bd7d212a19cfe0",
),
)
.await,
)
.boxify();
assert_changesets_sequence(
ctx.clone(),
&repo,
vec![string_to_bonsai(
ctx,
&repo,
"d0a361e9022d226ae52f689667bd7d212a19cfe0",
)],
vec![
string_to_bonsai(ctx, &repo, "d0a361e9022d226ae52f689667bd7d212a19cfe0").await,
],
nodestream,
);
)
.await;
})
}
#[fbinit::test]
fn linear_empty_range(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
// These are swapped, so won't find anything
let nodestream = RangeNodeStream::new(
@ -392,24 +410,26 @@ mod test {
ctx.clone(),
&repo,
"a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"d0a361e9022d226ae52f689667bd7d212a19cfe0",
),
)
.await,
)
.boxify();
assert_changesets_sequence(ctx.clone(), &repo, vec![], nodestream);
assert_changesets_sequence(ctx.clone(), &repo, vec![], nodestream).await;
})
}
#[fbinit::test]
fn merge_range_from_merge(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(merge_uneven::getrepo(fb));
let repo = Arc::new(merge_uneven::getrepo(fb).await);
let nodestream = RangeNodeStream::new(
ctx.clone(),
@ -418,12 +438,14 @@ mod test {
ctx.clone(),
&repo,
"1d8a907f7b4bf50c6a09c16361e2205047ecc5e5",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"7221fa26c85f147db37c2b5f4dbcd5fe52e7645b",
),
)
.await,
)
.boxify();
@ -435,24 +457,27 @@ mod test {
ctx.clone(),
&repo,
"7221fa26c85f147db37c2b5f4dbcd5fe52e7645b",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"16839021e338500b3cf7c9b871c8a07351697d68",
),
string_to_bonsai(ctx, &repo, "1d8a907f7b4bf50c6a09c16361e2205047ecc5e5"),
)
.await,
string_to_bonsai(ctx, &repo, "1d8a907f7b4bf50c6a09c16361e2205047ecc5e5").await,
],
nodestream,
);
)
.await;
})
}
#[fbinit::test]
fn merge_range_everything(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(merge_uneven::getrepo(fb));
let repo = Arc::new(merge_uneven::getrepo(fb).await);
let nodestream = RangeNodeStream::new(
ctx.clone(),
@ -461,12 +486,14 @@ mod test {
ctx.clone(),
&repo,
"15c40d0abc36d47fb51c8eaec51ac7aad31f669c",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"7221fa26c85f147db37c2b5f4dbcd5fe52e7645b",
),
)
.await,
)
.boxify();
@ -478,66 +505,79 @@ mod test {
ctx.clone(),
&repo,
"7221fa26c85f147db37c2b5f4dbcd5fe52e7645b",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"264f01429683b3dd8042cb3979e8bf37007118bc",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"5d43888a3c972fe68c224f93d41b30e9f888df7c",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"fc2cef43395ff3a7b28159007f63d6529d2f41ca",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"bc7b4d0f858c19e2474b03e442b8495fd7aeef33",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"795b8133cf375f6d68d27c6c23db24cd5d0cd00f",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"4f7f3fd428bec1a48f9314414b063c706d9c1aed",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"16839021e338500b3cf7c9b871c8a07351697d68",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"1d8a907f7b4bf50c6a09c16361e2205047ecc5e5",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"b65231269f651cfe784fd1d97ef02a049a37b8a0",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"d7542c9db7f4c77dab4b315edd328edf1514952f",
),
)
.await,
string_to_bonsai(
ctx.clone(),
&repo,
"3cda5c78aa35f0f5b09780d971197b51cad4613a",
),
string_to_bonsai(ctx, &repo, "15c40d0abc36d47fb51c8eaec51ac7aad31f669c"),
)
.await,
string_to_bonsai(ctx, &repo, "15c40d0abc36d47fb51c8eaec51ac7aad31f669c").await,
],
nodestream,
);
)
.await;
})
}
}

View File

@ -137,14 +137,14 @@ mod test {
#[fbinit::test]
fn difference_identical_node(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let hash = "a5ffa77602a066db7d5cfb9fb5823a0895717c5a";
let changeset = string_to_bonsai(fb, &repo, hash);
let changeset = string_to_bonsai(fb, &repo, hash).await;
let nodestream = SetDifferenceNodeStream::new(
ctx.clone(),
&changeset_fetcher,
@ -152,20 +152,20 @@ mod test {
single_changeset_id(ctx.clone(), changeset.clone(), &repo).boxify(),
)
.boxify();
assert_changesets_sequence(ctx.clone(), &repo, vec![], nodestream);
assert_changesets_sequence(ctx.clone(), &repo, vec![], nodestream).await;
});
}
#[fbinit::test]
fn difference_node_and_empty(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let hash = "a5ffa77602a066db7d5cfb9fb5823a0895717c5a";
let changeset = string_to_bonsai(fb, &repo, hash);
let changeset = string_to_bonsai(fb, &repo, hash).await;
let nodestream = SetDifferenceNodeStream::new(
ctx.clone(),
&changeset_fetcher,
@ -173,19 +173,20 @@ mod test {
NotReadyEmptyStream::new(0).boxify(),
)
.boxify();
assert_changesets_sequence(ctx.clone(), &repo, vec![changeset], nodestream);
assert_changesets_sequence(ctx.clone(), &repo, vec![changeset], nodestream).await;
});
}
#[fbinit::test]
fn difference_empty_and_node(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let bcs_id = string_to_bonsai(fb, &repo, "a5ffa77602a066db7d5cfb9fb5823a0895717c5a");
let bcs_id =
string_to_bonsai(fb, &repo, "a5ffa77602a066db7d5cfb9fb5823a0895717c5a").await;
let nodestream = SetDifferenceNodeStream::new(
ctx.clone(),
@ -195,15 +196,15 @@ mod test {
)
.boxify();
assert_changesets_sequence(ctx.clone(), &repo, vec![], nodestream);
assert_changesets_sequence(ctx.clone(), &repo, vec![], nodestream).await;
});
}
#[fbinit::test]
fn difference_two_nodes(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
@ -211,12 +212,14 @@ mod test {
fb,
&repo.clone(),
"d0a361e9022d226ae52f689667bd7d212a19cfe0",
);
)
.await;
let bcs_id_2 = string_to_bonsai(
fb,
&repo.clone(),
"3c15267ebf11807f3d772eb891272b911ec68759",
);
)
.await;
let nodestream = SetDifferenceNodeStream::new(
ctx.clone(),
&changeset_fetcher,
@ -225,20 +228,20 @@ mod test {
)
.boxify();
assert_changesets_sequence(ctx.clone(), &repo, vec![bcs_id_1], nodestream);
assert_changesets_sequence(ctx.clone(), &repo, vec![bcs_id_1], nodestream).await;
});
}
#[fbinit::test]
fn difference_error_node(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let hash = "a5ffa77602a066db7d5cfb9fb5823a0895717c5a";
let changeset = string_to_bonsai(fb, &repo, hash);
let changeset = string_to_bonsai(fb, &repo, hash).await;
let mut nodestream = spawn(
SetDifferenceNodeStream::new(
ctx.clone(),
@ -266,11 +269,11 @@ mod test {
#[fbinit::test]
fn slow_ready_difference_nothing(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
// Tests that we handle an input staying at NotReady for a while without panicing
let repeats = 10;
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let mut nodestream = SetDifferenceNodeStream::new(
@ -298,9 +301,9 @@ mod test {
#[fbinit::test]
fn difference_union_with_single_node(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
@ -312,7 +315,8 @@ mod test {
"a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157",
"d0a361e9022d226ae52f689667bd7d212a19cfe0",
],
);
)
.await;
let nodestream =
UnionNodeStream::new(ctx.clone(), &changeset_fetcher, inputs.into_iter()).boxify();
@ -321,7 +325,8 @@ mod test {
fb,
&repo.clone(),
"3c15267ebf11807f3d772eb891272b911ec68759",
);
)
.await;
let nodestream = SetDifferenceNodeStream::new(
ctx.clone(),
&changeset_fetcher,
@ -334,19 +339,20 @@ mod test {
ctx.clone(),
&repo,
vec![
string_to_bonsai(fb, &repo, "a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157"),
string_to_bonsai(fb, &repo, "d0a361e9022d226ae52f689667bd7d212a19cfe0"),
string_to_bonsai(fb, &repo, "a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157").await,
string_to_bonsai(fb, &repo, "d0a361e9022d226ae52f689667bd7d212a19cfe0").await,
],
nodestream,
);
)
.await;
});
}
#[fbinit::test]
fn difference_single_node_with_union(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
@ -358,7 +364,8 @@ mod test {
"a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157",
"d0a361e9022d226ae52f689667bd7d212a19cfe0",
],
);
)
.await;
let nodestream =
UnionNodeStream::new(ctx.clone(), &changeset_fetcher, inputs.into_iter()).boxify();
@ -366,7 +373,8 @@ mod test {
fb,
&repo.clone(),
"3c15267ebf11807f3d772eb891272b911ec68759",
);
)
.await;
let nodestream = SetDifferenceNodeStream::new(
ctx.clone(),
&changeset_fetcher,
@ -375,15 +383,15 @@ mod test {
)
.boxify();
assert_changesets_sequence(ctx.clone(), &repo, vec![], nodestream);
assert_changesets_sequence(ctx.clone(), &repo, vec![], nodestream).await;
});
}
#[fbinit::test]
fn difference_merge_even(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(merge_even::getrepo(fb));
let repo = Arc::new(merge_even::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
@ -396,7 +404,8 @@ mod test {
"4f7f3fd428bec1a48f9314414b063c706d9c1aed",
"16839021e338500b3cf7c9b871c8a07351697d68",
],
);
)
.await;
let left_nodestream =
UnionNodeStream::new(ctx.clone(), &changeset_fetcher, inputs.into_iter()).boxify();
@ -411,7 +420,8 @@ mod test {
"d7542c9db7f4c77dab4b315edd328edf1514952f",
"15c40d0abc36d47fb51c8eaec51ac7aad31f669c",
],
);
)
.await;
let right_nodestream =
UnionNodeStream::new(ctx.clone(), &changeset_fetcher, inputs.into_iter()).boxify();
@ -427,19 +437,20 @@ mod test {
ctx.clone(),
&repo,
vec![
string_to_bonsai(fb, &repo, "4dcf230cd2f20577cb3e88ba52b73b376a2b3f69"),
string_to_bonsai(fb, &repo, "16839021e338500b3cf7c9b871c8a07351697d68"),
string_to_bonsai(fb, &repo, "4dcf230cd2f20577cb3e88ba52b73b376a2b3f69").await,
string_to_bonsai(fb, &repo, "16839021e338500b3cf7c9b871c8a07351697d68").await,
],
nodestream,
);
)
.await;
});
}
#[fbinit::test]
fn difference_merge_uneven(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(merge_uneven::getrepo(fb));
let repo = Arc::new(merge_uneven::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
@ -452,7 +463,8 @@ mod test {
"4f7f3fd428bec1a48f9314414b063c706d9c1aed",
"16839021e338500b3cf7c9b871c8a07351697d68",
],
);
)
.await;
let left_nodestream =
UnionNodeStream::new(ctx.clone(), &changeset_fetcher, inputs.into_iter()).boxify();
@ -466,7 +478,8 @@ mod test {
"3cda5c78aa35f0f5b09780d971197b51cad4613a",
"15c40d0abc36d47fb51c8eaec51ac7aad31f669c",
],
);
)
.await;
let right_nodestream =
UnionNodeStream::new(ctx.clone(), &changeset_fetcher, inputs.into_iter()).boxify();
@ -482,11 +495,12 @@ mod test {
ctx.clone(),
&repo,
vec![
string_to_bonsai(fb, &repo, "7221fa26c85f147db37c2b5f4dbcd5fe52e7645b"),
string_to_bonsai(fb, &repo, "4f7f3fd428bec1a48f9314414b063c706d9c1aed"),
string_to_bonsai(fb, &repo, "7221fa26c85f147db37c2b5f4dbcd5fe52e7645b").await,
string_to_bonsai(fb, &repo, "4f7f3fd428bec1a48f9314414b063c706d9c1aed").await,
],
nodestream,
);
)
.await;
});
}
}

View File

@ -54,20 +54,22 @@ impl ChangesetFetcher for TestChangesetFetcher {
HashMap::new()
}
}
pub fn get_single_bonsai_streams(
pub async fn get_single_bonsai_streams(
ctx: CoreContext,
repo: &Arc<BlobRepo>,
hashes: &[&str],
) -> Vec<BonsaiNodeStream> {
hashes
.iter()
.map(|hash| {
single_changeset_id(
ctx.clone(),
string_to_bonsai(ctx.fb, &repo.clone(), hash),
&repo,
)
.boxify()
})
.collect()
let mut ret = vec![];
for hash in hashes {
let stream = single_changeset_id(
ctx.clone(),
string_to_bonsai(ctx.fb, &repo.clone(), hash).await,
&repo,
)
.boxify();
ret.push(stream)
}
ret
}

View File

@ -173,14 +173,14 @@ mod test {
#[fbinit::test]
fn union_identical_node(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let hash = "a5ffa77602a066db7d5cfb9fb5823a0895717c5a";
let head_csid = string_to_bonsai(fb, &repo, hash);
let head_csid = string_to_bonsai(fb, &repo, hash).await;
let inputs: Vec<BonsaiNodeStream> = vec![
single_changeset_id(ctx.clone(), head_csid.clone(), &repo).boxify(),
@ -189,20 +189,21 @@ mod test {
let nodestream =
UnionNodeStream::new(ctx.clone(), &changeset_fetcher, inputs.into_iter()).boxify();
assert_changesets_sequence(ctx.clone(), &repo, vec![head_csid.clone()], nodestream);
assert_changesets_sequence(ctx.clone(), &repo, vec![head_csid.clone()], nodestream)
.await;
});
}
#[fbinit::test]
fn union_error_node(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let hash = "a5ffa77602a066db7d5cfb9fb5823a0895717c5a";
let expected_csid = string_to_bonsai(fb, &repo, hash);
let expected_csid = string_to_bonsai(fb, &repo, hash).await;
let inputs: Vec<BonsaiNodeStream> = vec![
RepoErrorStream {
@ -229,15 +230,18 @@ mod test {
#[fbinit::test]
fn union_three_nodes(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let bcs_d0a = string_to_bonsai(fb, &repo, "d0a361e9022d226ae52f689667bd7d212a19cfe0");
let bcs_3c1 = string_to_bonsai(fb, &repo, "3c15267ebf11807f3d772eb891272b911ec68759");
let bcs_a947 = string_to_bonsai(fb, &repo, "a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157");
let bcs_d0a =
string_to_bonsai(fb, &repo, "d0a361e9022d226ae52f689667bd7d212a19cfe0").await;
let bcs_3c1 =
string_to_bonsai(fb, &repo, "3c15267ebf11807f3d772eb891272b911ec68759").await;
let bcs_a947 =
string_to_bonsai(fb, &repo, "a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157").await;
// Note that these are *not* in generation order deliberately.
let inputs: Vec<BonsaiNodeStream> = vec![
single_changeset_id(ctx.clone(), bcs_a947, &repo).boxify(),
@ -253,35 +257,38 @@ mod test {
&repo,
vec![bcs_3c1, bcs_a947, bcs_d0a],
nodestream,
);
)
.await;
});
}
#[fbinit::test]
fn union_nothing(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let inputs: Vec<BonsaiNodeStream> = vec![];
let nodestream =
UnionNodeStream::new(ctx.clone(), &changeset_fetcher, inputs.into_iter()).boxify();
assert_changesets_sequence(ctx.clone(), &repo, vec![], nodestream);
assert_changesets_sequence(ctx.clone(), &repo, vec![], nodestream).await;
});
}
#[fbinit::test]
fn union_nesting(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let bcs_d0a = string_to_bonsai(fb, &repo, "d0a361e9022d226ae52f689667bd7d212a19cfe0");
let bcs_3c1 = string_to_bonsai(fb, &repo, "3c15267ebf11807f3d772eb891272b911ec68759");
let bcs_d0a =
string_to_bonsai(fb, &repo, "d0a361e9022d226ae52f689667bd7d212a19cfe0").await;
let bcs_3c1 =
string_to_bonsai(fb, &repo, "3c15267ebf11807f3d772eb891272b911ec68759").await;
// Note that these are *not* in generation order deliberately.
let inputs: Vec<BonsaiNodeStream> = vec![
single_changeset_id(ctx.clone(), bcs_d0a, &repo).boxify(),
@ -291,7 +298,8 @@ mod test {
let nodestream =
UnionNodeStream::new(ctx.clone(), &changeset_fetcher, inputs.into_iter()).boxify();
let bcs_a947 = string_to_bonsai(fb, &repo, "a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157");
let bcs_a947 =
string_to_bonsai(fb, &repo, "a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157").await;
let inputs: Vec<BonsaiNodeStream> = vec![
nodestream,
single_changeset_id(ctx.clone(), bcs_a947, &repo).boxify(),
@ -304,17 +312,18 @@ mod test {
&repo,
vec![bcs_3c1, bcs_a947, bcs_d0a],
nodestream,
);
)
.await;
});
}
#[fbinit::test]
fn slow_ready_union_nothing(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
// Tests that we handle an input staying at NotReady for a while without panicing
let repeats = 10;
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
@ -339,16 +348,16 @@ mod test {
#[fbinit::test]
fn union_branch_even_repo(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(branch_even::getrepo(fb));
let repo = Arc::new(branch_even::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let nodes = vec![
string_to_bonsai(fb, &repo, "4f7f3fd428bec1a48f9314414b063c706d9c1aed"),
string_to_bonsai(fb, &repo, "3cda5c78aa35f0f5b09780d971197b51cad4613a"),
string_to_bonsai(fb, &repo, "d7542c9db7f4c77dab4b315edd328edf1514952f"),
string_to_bonsai(fb, &repo, "4f7f3fd428bec1a48f9314414b063c706d9c1aed").await,
string_to_bonsai(fb, &repo, "3cda5c78aa35f0f5b09780d971197b51cad4613a").await,
string_to_bonsai(fb, &repo, "d7542c9db7f4c77dab4b315edd328edf1514952f").await,
];
// Two nodes should share the same generation number
@ -359,23 +368,28 @@ mod test {
.collect();
let nodestream =
UnionNodeStream::new(ctx.clone(), &changeset_fetcher, inputs.into_iter()).boxify();
assert_changesets_sequence(ctx.clone(), &repo, nodes, nodestream);
assert_changesets_sequence(ctx.clone(), &repo, nodes, nodestream).await;
});
}
#[fbinit::test]
fn union_branch_uneven_repo(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(branch_uneven::getrepo(fb));
let repo = Arc::new(branch_uneven::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let cs_1 = string_to_bonsai(fb, &repo, "3cda5c78aa35f0f5b09780d971197b51cad4613a");
let cs_2 = string_to_bonsai(fb, &repo, "d7542c9db7f4c77dab4b315edd328edf1514952f");
let cs_3 = string_to_bonsai(fb, &repo, "4f7f3fd428bec1a48f9314414b063c706d9c1aed");
let cs_4 = string_to_bonsai(fb, &repo, "bc7b4d0f858c19e2474b03e442b8495fd7aeef33");
let cs_5 = string_to_bonsai(fb, &repo, "264f01429683b3dd8042cb3979e8bf37007118bc");
let cs_1 =
string_to_bonsai(fb, &repo, "3cda5c78aa35f0f5b09780d971197b51cad4613a").await;
let cs_2 =
string_to_bonsai(fb, &repo, "d7542c9db7f4c77dab4b315edd328edf1514952f").await;
let cs_3 =
string_to_bonsai(fb, &repo, "4f7f3fd428bec1a48f9314414b063c706d9c1aed").await;
let cs_4 =
string_to_bonsai(fb, &repo, "bc7b4d0f858c19e2474b03e442b8495fd7aeef33").await;
let cs_5 =
string_to_bonsai(fb, &repo, "264f01429683b3dd8042cb3979e8bf37007118bc").await;
// Two nodes should share the same generation number
let inputs: Vec<BonsaiNodeStream> = vec![
single_changeset_id(ctx.clone(), cs_1.clone(), &repo).boxify(),
@ -392,15 +406,16 @@ mod test {
&repo,
vec![cs_5, cs_4, cs_3, cs_1, cs_2],
nodestream,
);
)
.await;
});
}
#[fbinit::test]
fn union_branch_wide_repo(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(branch_wide::getrepo(fb));
let repo = Arc::new(branch_wide::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
@ -414,7 +429,8 @@ mod test {
"c27ef5b7f15e9930e5b93b1f32cc2108a2aabe12",
"9e8521affb7f9d10e9551a99c526e69909042b20",
],
);
)
.await;
let nodestream =
UnionNodeStream::new(ctx.clone(), &changeset_fetcher, inputs.into_iter()).boxify();
@ -422,13 +438,14 @@ mod test {
ctx.clone(),
&repo,
vec![
string_to_bonsai(fb, &repo, "49f53ab171171b3180e125b918bd1cf0af7e5449"),
string_to_bonsai(fb, &repo, "c27ef5b7f15e9930e5b93b1f32cc2108a2aabe12"),
string_to_bonsai(fb, &repo, "4685e9e62e4885d477ead6964a7600c750e39b03"),
string_to_bonsai(fb, &repo, "9e8521affb7f9d10e9551a99c526e69909042b20"),
string_to_bonsai(fb, &repo, "49f53ab171171b3180e125b918bd1cf0af7e5449").await,
string_to_bonsai(fb, &repo, "c27ef5b7f15e9930e5b93b1f32cc2108a2aabe12").await,
string_to_bonsai(fb, &repo, "4685e9e62e4885d477ead6964a7600c750e39b03").await,
string_to_bonsai(fb, &repo, "9e8521affb7f9d10e9551a99c526e69909042b20").await,
],
nodestream,
);
)
.await;
});
}
}

View File

@ -85,29 +85,30 @@ mod test {
#[fbinit::test]
fn validate_accepts_single_node(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let head_csid = string_to_bonsai(fb, &repo, "a5ffa77602a066db7d5cfb9fb5823a0895717c5a");
let head_csid =
string_to_bonsai(fb, &repo, "a5ffa77602a066db7d5cfb9fb5823a0895717c5a").await;
let nodestream = single_changeset_id(ctx.clone(), head_csid.clone(), &repo).boxify();
let nodestream =
ValidateNodeStream::new(ctx.clone(), nodestream, &changeset_fetcher).boxify();
assert_changesets_sequence(ctx, &repo, vec![head_csid], nodestream);
assert_changesets_sequence(ctx, &repo, vec![head_csid], nodestream).await;
});
}
#[fbinit::test]
fn slow_ready_validates(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
// Tests that we handle an input staying at NotReady for a while without panicing
let repeats = 10;
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let changeset_fetcher: Arc<dyn ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
@ -136,11 +137,12 @@ mod test {
#[fbinit::test]
#[should_panic]
fn repeat_hash_panics(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let head_csid = string_to_bonsai(fb, &repo, "a5ffa77602a066db7d5cfb9fb5823a0895717c5a");
let head_csid =
string_to_bonsai(fb, &repo, "a5ffa77602a066db7d5cfb9fb5823a0895717c5a").await;
let nodestream = single_changeset_id(ctx.clone(), head_csid.clone(), &repo)
.chain(single_changeset_id(ctx.clone(), head_csid.clone(), &repo));
@ -161,18 +163,20 @@ mod test {
#[fbinit::test]
#[should_panic]
fn wrong_order_panics(fb: FacebookInit) {
async_unit::tokio_unit_test(move || {
async_unit::tokio_unit_test(async move {
let ctx = CoreContext::test_mock(fb);
let repo = Arc::new(linear::getrepo(fb));
let repo = Arc::new(linear::getrepo(fb).await);
let nodestream = single_changeset_id(
ctx.clone(),
string_to_bonsai(fb, &repo, "cb15ca4a43a59acff5388cea9648c162afde8372").clone(),
string_to_bonsai(fb, &repo, "cb15ca4a43a59acff5388cea9648c162afde8372")
.await
.clone(),
&repo,
)
.chain(single_changeset_id(
ctx.clone(),
string_to_bonsai(fb, &repo, "3c15267ebf11807f3d772eb891272b911ec68759"),
string_to_bonsai(fb, &repo, "3c15267ebf11807f3d772eb891272b911ec68759").await,
&repo,
));
let changeset_fetcher: Arc<dyn ChangesetFetcher> =

View File

@ -94,7 +94,6 @@ mod tests {
use fbinit::FacebookInit;
use futures::stream::{self, Stream};
use futures_preview::StreamExt;
use futures_preview::{FutureExt as NewFutureExt, TryFutureExt};
use futures_util::compat::Stream01CompatExt;
use revset::AncestorsNodeStream;
@ -129,63 +128,51 @@ mod tests {
#[fbinit::test]
fn test_build_idmap_linear(fb: FacebookInit) -> Result<()> {
let mut runtime = tokio_compat::runtime::Runtime::new().unwrap();
runtime.block_on(
async move {
let ctx = CoreContext::test_mock(fb);
let repo = linear::getrepo(fb);
runtime.block_on_std(async move {
let ctx = CoreContext::test_mock(fb);
let repo = linear::getrepo(fb).await;
let head = ChangesetId::from_str(
"7785606eb1f26ff5722c831de402350cf97052dc44bc175da6ac0d715a3dbbf6",
)?;
let idmap = build_idmap(&ctx, &repo, head).await?;
assert_topologic_sorted(&ctx, &repo, head, idmap).await?;
let head = ChangesetId::from_str(
"7785606eb1f26ff5722c831de402350cf97052dc44bc175da6ac0d715a3dbbf6",
)?;
let idmap = build_idmap(&ctx, &repo, head).await?;
assert_topologic_sorted(&ctx, &repo, head, idmap).await?;
Ok(())
}
.boxed()
.compat(),
)
Ok(())
})
}
#[fbinit::test]
fn test_build_idmap_merge_even(fb: FacebookInit) -> Result<()> {
let mut runtime = tokio_compat::runtime::Runtime::new().unwrap();
runtime.block_on(
async move {
let ctx = CoreContext::test_mock(fb);
let repo = merge_even::getrepo(fb);
runtime.block_on_std(async move {
let ctx = CoreContext::test_mock(fb);
let repo = merge_even::getrepo(fb).await;
let head = ChangesetId::from_str(
"567a25d453cafaef6550de955c52b91bf9295faf38d67b6421d5d2e532e5adef",
)?;
let idmap = build_idmap(&ctx, &repo, head).await?;
assert_topologic_sorted(&ctx, &repo, head, idmap).await?;
let head = ChangesetId::from_str(
"567a25d453cafaef6550de955c52b91bf9295faf38d67b6421d5d2e532e5adef",
)?;
let idmap = build_idmap(&ctx, &repo, head).await?;
assert_topologic_sorted(&ctx, &repo, head, idmap).await?;
Ok(())
}
.boxed()
.compat(),
)
Ok(())
})
}
#[fbinit::test]
fn test_build_idmap_merge_uneven(fb: FacebookInit) -> Result<()> {
let mut runtime = tokio_compat::runtime::Runtime::new().unwrap();
runtime.block_on(
async move {
let ctx = CoreContext::test_mock(fb);
let repo = merge_uneven::getrepo(fb);
runtime.block_on_std(async move {
let ctx = CoreContext::test_mock(fb);
let repo = merge_uneven::getrepo(fb).await;
let head = ChangesetId::from_str(
"288d72de7fd26ebcd19f5e4f1b41542f22f4a9f7e2f6845fa04e8fd70064973d",
)?;
let idmap = build_idmap(&ctx, &repo, head).await?;
assert_topologic_sorted(&ctx, &repo, head, idmap).await?;
let head = ChangesetId::from_str(
"288d72de7fd26ebcd19f5e4f1b41542f22f4a9f7e2f6845fa04e8fd70064973d",
)?;
let idmap = build_idmap(&ctx, &repo, head).await?;
assert_topologic_sorted(&ctx, &repo, head, idmap).await?;
Ok(())
}
.boxed()
.compat(),
)
Ok(())
})
}
}

File diff suppressed because it is too large Load Diff

View File

@ -13,7 +13,6 @@ use blobstore::Storable;
use bookmarks::{BookmarkName, BookmarkUpdateReason};
use bytes::Bytes;
use context::CoreContext;
use futures::future::Future;
use futures_util::{compat::Future01CompatExt, future};
use maplit::btreemap;
use mercurial_types::HgChangesetId;
@ -360,7 +359,7 @@ impl From<BookmarkName> for BookmarkIdentifier {
}
}
pub fn store_files<T: AsRef<str>>(
pub async fn store_files<T: AsRef<str>>(
ctx: CoreContext,
files: BTreeMap<&str, Option<T>>,
repo: BlobRepo,
@ -377,7 +376,8 @@ pub fn store_files<T: AsRef<str>>(
let content_id = content
.into_blob()
.store(ctx.clone(), repo.blobstore())
.wait()
.compat()
.await
.unwrap();
let file_change = FileChange::new(content_id, FileType::Regular, size as u64, None);
@ -391,7 +391,7 @@ pub fn store_files<T: AsRef<str>>(
res
}
pub fn store_rename(
pub async fn store_rename(
ctx: CoreContext,
copy_src: (MPath, ChangesetId),
path: &str,
@ -404,7 +404,8 @@ pub fn store_rename(
let content_id = content
.into_blob()
.store(ctx, repo.blobstore())
.wait()
.compat()
.await
.unwrap();
let file_change = FileChange::new(content_id, FileType::Regular, size as u64, Some(copy_src));
@ -462,7 +463,7 @@ pub async fn resolve_cs_id(
}
}
pub fn create_commit(
pub async fn create_commit(
ctx: CoreContext,
repo: BlobRepo,
parents: Vec<ChangesetId>,
@ -483,12 +484,13 @@ pub fn create_commit(
let bcs_id = bcs.get_changeset_id();
save_bonsai_changesets(vec![bcs], ctx, repo.clone())
.wait()
.compat()
.await
.unwrap();
bcs_id
}
pub fn create_commit_with_date(
pub async fn create_commit_with_date(
ctx: CoreContext,
repo: BlobRepo,
parents: Vec<ChangesetId>,
@ -510,7 +512,8 @@ pub fn create_commit_with_date(
let bcs_id = bcs.get_changeset_id();
save_bonsai_changesets(vec![bcs], ctx, repo.clone())
.wait()
.compat()
.await
.unwrap();
bcs_id
}