mononoke: pass CoreContext down to bonsai-hg-mapping

Reviewed By: aslpavel

Differential Revision: D13277450

fbshipit-source-id: 97cfbd917b321727bb4d960c91a784787660eb5b
This commit is contained in:
Lukas Piatkowski 2018-11-30 10:11:37 -08:00 committed by Facebook Github Bot
parent 8ca5b452a2
commit 08db0a35eb
57 changed files with 2547 additions and 766 deletions

View File

@ -12,6 +12,7 @@ mod lfs;
use std::collections::HashMap;
use context::CoreContext;
use failure::Error;
use futures::{Future, IntoFuture, future::join_all};
use futures_ext::{BoxFuture, FutureExt};
@ -59,8 +60,11 @@ impl Mononoke {
&self,
MononokeQuery { repo, kind, .. }: MononokeQuery,
) -> BoxFuture<MononokeRepoResponse, ErrorKind> {
// TODO(T37478150, luk): This is not good, APIServer is not a test case, but I want to handle this
// later
let ctx = CoreContext::test_mock();
match self.repos.get(&repo) {
Some(repo) => repo.send_query(kind),
Some(repo) => repo.send_query(ctx, kind),
None => match kind {
MononokeRepoQuery::LfsBatch { .. } => {
// LFS batch request require error in the different format:

View File

@ -20,6 +20,7 @@ use tokio::runtime::TaskExecutor;
use api;
use blobrepo::{BlobRepo, get_sha256_alias, get_sha256_alias_key};
use context::CoreContext;
use mercurial_types::{HgManifestId, RepositoryId};
use mercurial_types::manifest::Content;
use metaconfig::repoconfig::RepoConfig;
@ -115,6 +116,7 @@ impl MononokeRepo {
fn is_ancestor(
&self,
ctx: CoreContext,
proposed_ancestor: String,
proposed_descendent: String,
) -> BoxFuture<MononokeRepoResponse, ErrorKind> {
@ -122,14 +124,14 @@ impl MononokeRepo {
let src_hash_maybe = FS::get_changeset_id(proposed_descendent.clone());
let dst_hash_maybe = FS::get_changeset_id(proposed_ancestor.clone());
let src_hash_future = src_hash_maybe.into_future().or_else({
cloned!(self.repo, proposed_descendent);
move |_| FS::string_to_bookmark_changeset_id(proposed_descendent, repo)
cloned!(ctx, self.repo, proposed_descendent);
move |_| FS::string_to_bookmark_changeset_id(ctx, proposed_descendent, repo)
});
let src_hash_future = src_hash_future
.and_then({
cloned!(self.repo);
move |hg_cs_id| repo.get_bonsai_from_hg(&hg_cs_id).from_err()
cloned!(ctx, self.repo);
move |hg_cs_id| repo.get_bonsai_from_hg(ctx, &hg_cs_id).from_err()
})
.and_then(move |maybenode| {
maybenode.ok_or(ErrorKind::NotFound(
@ -139,14 +141,14 @@ impl MononokeRepo {
});
let dst_hash_future = dst_hash_maybe.into_future().or_else({
cloned!(self.repo, proposed_ancestor);
move |_| FS::string_to_bookmark_changeset_id(proposed_ancestor, repo)
cloned!(ctx, self.repo, proposed_ancestor);
move |_| FS::string_to_bookmark_changeset_id(ctx, proposed_ancestor, repo)
});
let dst_hash_future = dst_hash_future
.and_then({
cloned!(self.repo);
move |hg_cs_id| repo.get_bonsai_from_hg(&hg_cs_id).from_err()
move |hg_cs_id| repo.get_bonsai_from_hg(ctx, &hg_cs_id).from_err()
})
.and_then(move |maybenode| {
maybenode.ok_or(ErrorKind::NotFound(format!("{}", proposed_ancestor), None))
@ -159,7 +161,11 @@ impl MononokeRepo {
.and_then(|src| dst_hash_future.map(move |dst| (src, dst)))
.and_then({
cloned!(self.repo);
move |(src, dst)| genbfs.query_reachability(repo.get_changeset_fetcher(), src, dst).from_err()
move |(src, dst)| {
genbfs
.query_reachability(repo.get_changeset_fetcher(), src, dst)
.from_err()
}
})
.then(|r| tx.send(r).map_err(|_| ())),
);
@ -298,7 +304,11 @@ impl MononokeRepo {
.boxify()
}
pub fn send_query(&self, msg: MononokeRepoQuery) -> BoxFuture<MononokeRepoResponse, ErrorKind> {
pub fn send_query(
&self,
ctx: CoreContext,
msg: MononokeRepoQuery,
) -> BoxFuture<MononokeRepoResponse, ErrorKind> {
use MononokeRepoQuery::*;
match msg {
@ -310,7 +320,7 @@ impl MononokeRepo {
IsAncestor {
proposed_ancestor,
proposed_descendent,
} => self.is_ancestor(proposed_ancestor, proposed_descendent),
} => self.is_ancestor(ctx, proposed_ancestor, proposed_descendent),
DownloadLargeFile { oid } => self.download_large_file(oid),
LfsBatch {

View File

@ -16,6 +16,7 @@ use futures_ext::{BoxFuture, FutureExt};
use api;
use blobrepo::BlobRepo;
use bookmarks::Bookmark;
use context::CoreContext;
use mercurial_types::{HgChangesetId, HgNodeHash};
use mononoke_types::{MPath, hash::Sha256};
@ -42,12 +43,13 @@ pub fn get_nodehash(hash: &str) -> Result<HgNodeHash, ErrorKind> {
// this method doesn't consider that the string could be a node hash, so any caller
// should do that check themselves, and if it fails, then attempt to use this method.
pub fn string_to_bookmark_changeset_id(
ctx: CoreContext,
node_string: String,
repo: Arc<BlobRepo>,
) -> BoxFuture<HgChangesetId, ErrorKind> {
get_bookmark(node_string.clone())
.into_future()
.and_then({ move |bookmark| api::get_changeset_by_bookmark(repo, bookmark).from_err() })
.and_then(move |bookmark| api::get_changeset_by_bookmark(ctx, repo, bookmark).from_err())
.map_err(move |e| ErrorKind::InvalidInput(node_string.to_string(), Some(e.into())))
.boxify()
}

View File

@ -21,6 +21,7 @@ extern crate clap;
#[macro_use]
extern crate cloned;
extern crate cmdlib;
extern crate context;
extern crate env_logger;
#[macro_use]
extern crate failure_ext as failure;
@ -73,6 +74,7 @@ use actix_web::{server, App, HttpRequest, HttpResponse, Json, State, http::heade
use blobrepo::BlobRepo;
use bookmarks::Bookmark;
use clap::Arg;
use context::CoreContext;
use failure::{err_msg, Result};
use futures::Future;
use http::uri::{Authority, Parts, PathAndQuery, Scheme, Uri};
@ -293,6 +295,8 @@ fn create_config<P: AsRef<Path>>(
bookmark: Option<&str>,
hash: Option<&str>,
) -> Result<RepoConfigs> {
// TODO(T37478150, luk) This is not a test case, will be fixed in later diffs
let ctx = CoreContext::test_mock();
let config_repo = BlobRepo::new_rocksdb(
logger.new(o!["repo" => "Config repo"]),
path.as_ref(),
@ -303,7 +307,7 @@ fn create_config<P: AsRef<Path>>(
.ok_or_else(|| err_msg(""))
.and_then(|bookmark| {
let bookmark = Bookmark::new(bookmark)?;
runtime.block_on(config_repo.get_bookmark(&bookmark))
runtime.block_on(config_repo.get_bookmark(ctx, &bookmark))
})
.and_then(|bookmark| bookmark.ok_or_else(|| err_msg("bookmark not found")))
.or_else(|e| {

View File

@ -39,6 +39,7 @@ extern crate bonsai_hg_mapping;
extern crate bookmarks;
extern crate cachelib;
extern crate changesets;
extern crate context;
extern crate crypto;
extern crate dbbookmarks;
extern crate delayblob;

View File

@ -41,6 +41,7 @@ use bonsai_hg_mapping::{BonsaiHgMapping, BonsaiHgMappingEntry, BonsaiOrHgChanges
use bookmarks::{self, Bookmark, BookmarkPrefix, Bookmarks};
use cachelib;
use changesets::{CachingChangests, ChangesetEntry, ChangesetInsert, Changesets, SqlChangesets};
use context::CoreContext;
use dbbookmarks::SqlBookmarks;
use delayblob::DelayBlob;
use file::fetch_file_envelope;
@ -636,25 +637,28 @@ impl BlobRepo {
.boxify()
}
pub fn get_changesets(&self) -> BoxStream<HgNodeHash, Error> {
pub fn get_changesets(&self, ctx: CoreContext) -> BoxStream<HgNodeHash, Error> {
STATS::get_changesets.add_value(1);
HgBlobChangesetStream {
repo: self.clone(),
state: BCState::Idle,
heads: self.get_heads_maybe_stale().boxify(),
heads: self.get_heads_maybe_stale(ctx).boxify(),
seen: HashSet::new(),
}.boxify()
}
/// Heads maybe read from replica, so they may be out of date
pub fn get_heads_maybe_stale(&self) -> impl Stream<Item = HgNodeHash, Error = Error> {
pub fn get_heads_maybe_stale(
&self,
ctx: CoreContext,
) -> impl Stream<Item = HgNodeHash, Error = Error> {
STATS::get_heads_maybe_stale.add_value(1);
self.bookmarks
.list_by_prefix_maybe_stale(&BookmarkPrefix::empty(), &self.repoid)
.and_then({
let repo = self.clone();
move |(_, cs)| {
repo.get_hg_from_bonsai_changeset(cs)
repo.get_hg_from_bonsai_changeset(ctx.clone(), cs)
.map(|cs| cs.into_nodehash())
}
})
@ -668,13 +672,17 @@ impl BlobRepo {
}
// TODO(stash): make it accept ChangesetId
pub fn changeset_exists(&self, changesetid: &HgChangesetId) -> BoxFuture<bool, Error> {
pub fn changeset_exists(
&self,
ctx: CoreContext,
changesetid: &HgChangesetId,
) -> BoxFuture<bool, Error> {
STATS::changeset_exists.add_value(1);
let changesetid = changesetid.clone();
let repo = self.clone();
let repoid = self.repoid.clone();
self.get_bonsai_from_hg(&changesetid)
self.get_bonsai_from_hg(ctx, &changesetid)
.and_then(move |maybebonsai| match maybebonsai {
Some(bonsai) => repo.changesets
.get(repoid, bonsai)
@ -687,12 +695,13 @@ impl BlobRepo {
pub fn many_changesets_exists(
&self,
ctx: CoreContext,
changesetids: &[&HgChangesetId],
) -> BoxFuture<Vec<HgChangesetId>, Error> {
STATS::many_changesets_exists.add_value(1);
let param = BonsaiOrHgChangesetIds::Hg(Vec::from_iter(changesetids.iter().map(|cs| **cs)));
self.bonsai_hg_mapping.get(self.repoid, param)
self.bonsai_hg_mapping.get(ctx, self.repoid, param)
.map(|entries| entries.into_iter().map(|entry| entry.hg_cs_id).collect())
// TODO(stash, luk): T37303879 also need to check that entries exist in changeset table
.boxify()
@ -713,22 +722,21 @@ impl BlobRepo {
// TODO(stash): make it accept ChangesetId
pub fn get_changeset_parents(
&self,
ctx: CoreContext,
changesetid: &HgChangesetId,
) -> BoxFuture<Vec<HgChangesetId>, Error> {
STATS::get_changeset_parents.add_value(1);
let changesetid = *changesetid;
let repo = self.clone();
self.get_bonsai_cs_entry_or_fail(changesetid)
self.get_bonsai_cs_entry_or_fail(ctx.clone(), changesetid)
.map(|bonsai| bonsai.parents)
.and_then({
cloned!(repo);
move |bonsai_parents| {
future::join_all(
bonsai_parents.into_iter().map(move |bonsai_parent| {
repo.get_hg_from_bonsai_changeset(bonsai_parent)
}),
)
future::join_all(bonsai_parents.into_iter().map(move |bonsai_parent| {
repo.get_hg_from_bonsai_changeset(ctx.clone(), bonsai_parent)
}))
}
})
.boxify()
@ -753,12 +761,13 @@ impl BlobRepo {
fn get_bonsai_cs_entry_or_fail(
&self,
ctx: CoreContext,
changesetid: HgChangesetId,
) -> impl Future<Item = ChangesetEntry, Error = Error> {
let repoid = self.repoid.clone();
let changesets = self.changesets.clone();
self.get_bonsai_from_hg(&changesetid)
self.get_bonsai_from_hg(ctx, &changesetid)
.and_then(move |maybebonsai| {
maybebonsai.ok_or(ErrorKind::BonsaiMappingNotFound(changesetid).into())
})
@ -801,7 +810,11 @@ impl BlobRepo {
Box::new(HgBlobEntry::new_root(self.blobstore.clone(), *manifestid))
}
pub fn get_bookmark(&self, name: &Bookmark) -> BoxFuture<Option<HgChangesetId>, Error> {
pub fn get_bookmark(
&self,
ctx: CoreContext,
name: &Bookmark,
) -> BoxFuture<Option<HgChangesetId>, Error> {
STATS::get_bookmark.add_value(1);
self.bookmarks
.get(name, &self.repoid)
@ -809,7 +822,7 @@ impl BlobRepo {
let repo = self.clone();
move |cs_opt| match cs_opt {
None => future::ok(None).left_future(),
Some(cs) => repo.get_hg_from_bonsai_changeset(cs)
Some(cs) => repo.get_hg_from_bonsai_changeset(ctx, cs)
.map(|cs| Some(cs))
.right_future(),
}
@ -824,14 +837,17 @@ impl BlobRepo {
/// Heads maybe read from replica, so they may be out of date. Prefer to use this method
/// over `get_bookmarks` unless you need the most up-to-date bookmarks
pub fn get_bookmarks_maybe_stale(&self) -> BoxStream<(Bookmark, HgChangesetId), Error> {
pub fn get_bookmarks_maybe_stale(
&self,
ctx: CoreContext,
) -> BoxStream<(Bookmark, HgChangesetId), Error> {
STATS::get_bookmarks_maybe_stale.add_value(1);
self.bookmarks
.list_by_prefix_maybe_stale(&BookmarkPrefix::empty(), &self.repoid)
.and_then({
let repo = self.clone();
move |(bm, cs)| {
repo.get_hg_from_bonsai_changeset(cs)
repo.get_hg_from_bonsai_changeset(ctx.clone(), cs)
.map(move |cs| (bm, cs))
}
})
@ -876,11 +892,12 @@ impl BlobRepo {
pub fn get_bonsai_from_hg(
&self,
ctx: CoreContext,
hg_cs_id: &HgChangesetId,
) -> BoxFuture<Option<ChangesetId>, Error> {
STATS::get_bonsai_from_hg.add_value(1);
self.bonsai_hg_mapping
.get_bonsai_from_hg(self.repoid, *hg_cs_id)
.get_bonsai_from_hg(ctx, self.repoid, *hg_cs_id)
}
pub fn get_bonsai_changeset(
@ -894,13 +911,14 @@ impl BlobRepo {
// TODO(stash): make it accept ChangesetId
pub fn get_generation_number(
&self,
ctx: CoreContext,
cs: &HgChangesetId,
) -> impl Future<Item = Option<Generation>, Error = Error> {
STATS::get_generation_number.add_value(1);
let repo = self.clone();
let repoid = self.repoid.clone();
self.get_bonsai_from_hg(&cs)
self.get_bonsai_from_hg(ctx, &cs)
.and_then(move |maybebonsai| match maybebonsai {
Some(bonsai) => repo.changesets
.get(repoid, bonsai)
@ -1016,6 +1034,7 @@ impl BlobRepo {
pub fn store_file_change_or_reuse(
&self,
ctx: CoreContext,
p1: Option<HgFileNodeId>,
p2: Option<HgFileNodeId>,
path: &MPath,
@ -1056,7 +1075,7 @@ impl BlobRepo {
let path = path.clone();
move |maybe_entry| match maybe_entry {
None => repo.store_file_change(p1, p2, &path, change.as_ref())
None => repo.store_file_change(ctx, p1, p2, &path, change.as_ref())
.right_future(),
_ => future::ok(maybe_entry).left_future(),
}
@ -1065,6 +1084,7 @@ impl BlobRepo {
pub fn store_file_change(
&self,
ctx: CoreContext,
p1: Option<HgFileNodeId>,
p2: Option<HgFileNodeId>,
path: &MPath,
@ -1077,7 +1097,7 @@ impl BlobRepo {
Some(change) => {
let copy_from_fut = match change.copy_from() {
None => future::ok(None).left_future(),
Some((path, bcs_id)) => self.get_hg_from_bonsai_changeset(*bcs_id)
Some((path, bcs_id)) => self.get_hg_from_bonsai_changeset(ctx, *bcs_id)
.and_then({
cloned!(repo);
move |cs_id| repo.get_changeset_by_changesetid(&cs_id)
@ -1277,6 +1297,7 @@ impl BlobRepo {
pub fn get_manifest_from_bonsai(
&self,
ctx: CoreContext,
bcs: BonsaiChangeset,
manifest_p1: Option<&HgManifestId>,
manifest_p2: Option<&HgManifestId>,
@ -1315,9 +1336,10 @@ impl BlobRepo {
.into_future()
.and_then({
let entry = entry.cloned();
cloned!(repo, path);
cloned!(ctx, repo, path);
move |(p1, p2)| {
repo.store_file_change_or_reuse(
ctx,
p1.and_then(|x| x),
p2.and_then(|x| x),
&path,
@ -1359,29 +1381,32 @@ impl BlobRepo {
pub fn get_hg_from_bonsai_changeset(
&self,
ctx: CoreContext,
bcs_id: ChangesetId,
) -> impl Future<Item = HgChangesetId, Error = Error> + Send {
STATS::get_hg_from_bonsai_changeset.add_value(1);
fn create_hg_from_bonsai_changeset(
ctx: CoreContext,
repo: &BlobRepo,
bcs_id: ChangesetId,
) -> BoxFuture<HgChangesetId, Error> {
repo.fetch(&bcs_id)
.and_then({
cloned!(repo);
cloned!(ctx, repo);
move |bcs| {
let parents_futs = bcs.parents()
.map(|p_bcs_id| {
repo.get_hg_from_bonsai_changeset(*p_bcs_id).and_then({
cloned!(repo);
move |p_cs_id| repo.get_changeset_by_changesetid(&p_cs_id)
})
repo.get_hg_from_bonsai_changeset(ctx.clone(), *p_bcs_id)
.and_then({
cloned!(repo);
move |p_cs_id| repo.get_changeset_by_changesetid(&p_cs_id)
})
})
.collect::<Vec<_>>();
future::join_all(parents_futs)
// fetch parents
.and_then({
cloned!(bcs, repo);
cloned!(ctx, bcs, repo);
move |parents| {
let mut parents = parents.into_iter();
let p0 = parents.next();
@ -1401,7 +1426,7 @@ impl BlobRepo {
p0_hash.map(|h| h.into_nodehash()).as_ref(),
p1_hash.map(|h| h.into_nodehash()).as_ref(),
);
repo.get_manifest_from_bonsai(bcs, mf_p0.as_ref(), mf_p1.as_ref())
repo.get_manifest_from_bonsai(ctx, bcs, mf_p0.as_ref(), mf_p1.as_ref())
.map(move |(manifest_id, incomplete_filenodes)| {
(manifest_id, incomplete_filenodes, hg_parents)
})
@ -1409,7 +1434,7 @@ impl BlobRepo {
})
// create changeset
.and_then({
cloned!(repo, bcs);
cloned!(ctx, repo, bcs);
move |(manifest_id, incomplete_filenodes, parents)| {
let metadata = ChangesetMetadata {
user: bcs.author().to_string(),
@ -1443,12 +1468,15 @@ impl BlobRepo {
move |_| incomplete_filenodes.upload(cs_id, &repo)
})
.and_then({
cloned!(repo);
move |_| repo.bonsai_hg_mapping.add(BonsaiHgMappingEntry {
repo_id: repo.get_repoid(),
hg_cs_id: cs_id,
bcs_id,
})
cloned!(ctx, repo);
move |_| repo.bonsai_hg_mapping.add(
ctx,
BonsaiHgMappingEntry {
repo_id: repo.get_repoid(),
hg_cs_id: cs_id,
bcs_id,
},
)
})
.map(move |_| cs_id)
.boxify()
@ -1460,12 +1488,12 @@ impl BlobRepo {
}
self.bonsai_hg_mapping
.get_hg_from_bonsai(self.repoid, bcs_id)
.get_hg_from_bonsai(ctx.clone(), self.repoid, bcs_id)
.and_then({
let repo = self.clone();
move |cs_id| match cs_id {
Some(cs_id) => future::ok(cs_id).left_future(),
None => create_hg_from_bonsai_changeset(&repo, bcs_id).right_future(),
None => create_hg_from_bonsai_changeset(ctx, &repo, bcs_id).right_future(),
}
})
}
@ -1942,7 +1970,12 @@ pub struct CreateChangeset {
}
impl CreateChangeset {
pub fn create(self, repo: &BlobRepo, mut scuba_logger: ScubaSampleBuilder) -> ChangesetHandle {
pub fn create(
self,
ctx: CoreContext,
repo: &BlobRepo,
mut scuba_logger: ScubaSampleBuilder,
) -> ChangesetHandle {
STATS::create_changeset.add_value(1);
// This is used for logging, so that we can tie up all our pieces without knowing about
// the final commit hash
@ -2133,7 +2166,7 @@ impl CreateChangeset {
};
bonsai_hg_mapping
.add(bonsai_hg_entry)
.add(ctx, bonsai_hg_entry)
.map(move |_| (hg_cs, bonsai_cs))
.context("While inserting mapping")
}

View File

@ -20,6 +20,7 @@ use scuba_ext::{ScubaSampleBuilder, ScubaSampleBuilderExt};
use stats::Timeseries;
use blobstore::Blobstore;
use context::CoreContext;
use filenodes::{FilenodeInfo, Filenodes};
use mercurial::file;
use mercurial_types::{Changeset, Entry, HgChangesetId, HgEntryId, HgNodeHash, HgNodeKey,
@ -77,8 +78,8 @@ impl ChangesetHandle {
}
}
pub fn ready_cs_handle(repo: Arc<BlobRepo>, hg_cs: HgChangesetId) -> Self {
let bonsai_cs = repo.get_bonsai_from_hg(&hg_cs)
pub fn ready_cs_handle(ctx: CoreContext, repo: Arc<BlobRepo>, hg_cs: HgChangesetId) -> Self {
let bonsai_cs = repo.get_bonsai_from_hg(ctx, &hg_cs)
.and_then(move |bonsai_id| {
bonsai_id.ok_or(ErrorKind::BonsaiMappingNotFound(hg_cs).into())
})

View File

@ -18,6 +18,7 @@ extern crate scuba_ext;
extern crate blobrepo;
extern crate blobstore;
extern crate changesets;
extern crate context;
extern crate dbbookmarks;
extern crate fixtures;
#[macro_use]
@ -39,6 +40,7 @@ use std::sync::Arc;
use blobrepo::{compute_changed_files, BlobRepo, ErrorKind};
use blobstore::{Blobstore, LazyMemblob, PrefixBlobstore};
use context::CoreContext;
use mercurial_types::{manifest, Changeset, Entry, FileType, HgChangesetId, HgEntryId,
HgManifestId, HgParents, MPath, MPathElement, RepoPath, RepositoryId};
use mononoke_types::{BonsaiChangeset, ChangesetId, ContentId, DateTime, FileChange, FileContents,
@ -289,6 +291,7 @@ test_both_repotypes!(
);
fn check_bonsai_creation(repo: BlobRepo) {
let ctx = CoreContext::test_mock();
let fake_file_path = RepoPath::file("dir/file").expect("Can't generate fake RepoPath");
let fake_dir_path = RepoPath::dir("dir").expect("Can't generate fake RepoPath");
@ -308,7 +311,8 @@ fn check_bonsai_creation(repo: BlobRepo) {
let commit = run_future(commit.get_completed_changeset()).unwrap();
let commit = &commit.1;
let bonsai_cs_id = run_future(repo.get_bonsai_from_hg(&commit.get_changeset_id())).unwrap();
let bonsai_cs_id =
run_future(repo.get_bonsai_from_hg(ctx, &commit.get_changeset_id())).unwrap();
assert!(bonsai_cs_id.is_some());
let bonsai = run_future(repo.get_bonsai_changeset(bonsai_cs_id.unwrap())).unwrap();
assert_eq!(
@ -327,6 +331,7 @@ test_both_repotypes!(
);
fn check_bonsai_creation_with_rename(repo: BlobRepo) {
let ctx = CoreContext::test_mock();
let parent = {
let fake_file_path = RepoPath::file("file").expect("Can't generate fake RepoPath");
@ -371,11 +376,13 @@ fn check_bonsai_creation_with_rename(repo: BlobRepo) {
let child_cs = run_future(child.get_completed_changeset()).unwrap();
let child_cs = &child_cs.1;
let parent_bonsai_cs_id = run_future(repo.get_bonsai_from_hg(&parent_cs.get_changeset_id()))
.unwrap()
.unwrap();
let parent_bonsai_cs_id =
run_future(repo.get_bonsai_from_hg(ctx.clone(), &parent_cs.get_changeset_id()))
.unwrap()
.unwrap();
let bonsai_cs_id = run_future(repo.get_bonsai_from_hg(&child_cs.get_changeset_id())).unwrap();
let bonsai_cs_id =
run_future(repo.get_bonsai_from_hg(ctx, &child_cs.get_changeset_id())).unwrap();
let bonsai = run_future(repo.get_bonsai_changeset(bonsai_cs_id.unwrap())).unwrap();
let fc = bonsai.file_changes().collect::<BTreeMap<_, _>>();
let file = MPath::new("file").unwrap();
@ -677,6 +684,7 @@ fn make_file_change(
#[test]
fn test_get_manifest_from_bonsai() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = merge_uneven::getrepo(None);
let get_manifest_for_changeset = |cs_nodehash: &str| -> HgManifestId {
*run_future(repo.get_changeset_by_changesetid(&HgChangesetId::new(
@ -716,6 +724,7 @@ fn test_get_manifest_from_bonsai() {
// fails with conflict
{
let ms_hash = run_future(repo.get_manifest_from_bonsai(
ctx.clone(),
make_bonsai_changeset(None, None, vec![]),
Some(&ms1),
Some(&ms2),
@ -733,6 +742,7 @@ fn test_get_manifest_from_bonsai() {
// resolves same content different parents for `branch` file
{
let (ms_hash, _) = run_future(repo.get_manifest_from_bonsai(
ctx.clone(),
make_bonsai_changeset(None, None, vec![("base", None)]),
Some(&ms1),
Some(&ms2),
@ -779,7 +789,7 @@ fn test_get_manifest_from_bonsai() {
let fc = run_future(make_file_change(content_expected, &repo)).unwrap();
let bcs = make_bonsai_changeset(None, None, vec![("base", None), ("new", Some(fc))]);
let (ms_hash, _) =
run_future(repo.get_manifest_from_bonsai(bcs, Some(&ms1), Some(&ms2)))
run_future(repo.get_manifest_from_bonsai(ctx, bcs, Some(&ms1), Some(&ms2)))
.expect("adding new file should not produce coflict");
let entries = run_future(get_entries(&ms_hash)).unwrap();
let new = entries.get("new").expect("new file should be in entries");
@ -798,6 +808,7 @@ fn test_get_manifest_from_bonsai() {
#[test]
fn test_case_conflict_in_manifest() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = many_files_dirs::getrepo(None);
let get_manifest_for_changeset = |cs_id: &HgChangesetId| -> HgManifestId {
*run_future(repo.get_changeset_by_changesetid(cs_id))
@ -810,7 +821,7 @@ fn test_case_conflict_in_manifest() {
));
let mf = get_manifest_for_changeset(&hg_cs);
let bonsai_parent = run_future(repo.get_bonsai_from_hg(&hg_cs))
let bonsai_parent = run_future(repo.get_bonsai_from_hg(ctx.clone(), &hg_cs))
.unwrap()
.unwrap();
@ -827,7 +838,7 @@ fn test_case_conflict_in_manifest() {
);
let child_hg_cs =
run_future(repo.get_hg_from_bonsai_changeset(bcs_id.clone())).unwrap();
run_future(repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id.clone())).unwrap();
let child_mf = get_manifest_for_changeset(&child_hg_cs);
assert_eq!(
run_future(repo.check_case_conflict_in_manifest(

View File

@ -18,6 +18,7 @@ use scuba_ext::ScubaSampleBuilder;
use blobrepo::{BlobRepo, ChangesetHandle, ChangesetMetadata, CreateChangeset, HgBlobEntry,
UploadHgFileContents, UploadHgFileEntry, UploadHgNodeHash, UploadHgTreeEntry};
use blobstore::{EagerMemblob, LazyMemblob};
use context::CoreContext;
use mercurial_types::{FileType, HgBlobNode, HgNodeHash, RepoPath};
use mononoke_types::DateTime;
use std::sync::Arc;
@ -190,7 +191,11 @@ pub fn create_changeset_no_parents(
cs_metadata,
must_check_case_conflicts: true,
};
create_changeset.create(repo, ScubaSampleBuilder::with_discard())
create_changeset.create(
CoreContext::test_mock(),
repo,
ScubaSampleBuilder::with_discard(),
)
}
pub fn create_changeset_one_parent(
@ -215,7 +220,11 @@ pub fn create_changeset_one_parent(
cs_metadata,
must_check_case_conflicts: true,
};
create_changeset.create(repo, ScubaSampleBuilder::with_discard())
create_changeset.create(
CoreContext::test_mock(),
repo,
ScubaSampleBuilder::with_discard(),
)
}
pub fn string_to_nodehash(hash: &str) -> HgNodeHash {

View File

@ -19,6 +19,7 @@ use futures_ext::{BoxFuture, FutureExt, StreamExt};
use blobrepo::{BlobManifest, BlobRepo, HgBlobChangeset, HgBlobEntry};
use blobrepo::internal::{IncompleteFilenodes, MemoryRootManifest};
use bonsai_utils::{bonsai_diff, BonsaiDiffResult};
use context::CoreContext;
use mercurial_types::{Changeset, Entry, HgChangesetId, HgManifestId, HgNodeHash, Type};
use mercurial_types::manifest_utils::{changed_entry_stream, ChangedEntry};
use mononoke_types::DateTime;
@ -110,6 +111,7 @@ impl fmt::Debug for BonsaiMFVerifyDifference {
}
pub struct BonsaiMFVerify {
pub ctx: CoreContext,
pub logger: Logger,
pub repo: BlobRepo,
pub follow_limit: usize,
@ -128,6 +130,7 @@ impl BonsaiMFVerify {
let repo = self.repo.in_memory_writes_READ_DOC_COMMENT();
visit_changesets(
self.ctx,
self.logger,
repo,
BonsaiMFVerifyVisitor {
@ -153,6 +156,7 @@ impl ChangesetVisitor for BonsaiMFVerifyVisitor {
fn visit(
self,
ctx: CoreContext,
logger: Logger,
repo: BlobRepo,
changeset: HgBlobChangeset,
@ -180,7 +184,7 @@ impl ChangesetVisitor for BonsaiMFVerifyVisitor {
debug!(logger, "Starting bonsai diff computation");
let parents_fut = repo.get_changeset_parents(&changeset_id).and_then({
let parents_fut = repo.get_changeset_parents(ctx, &changeset_id).and_then({
let repo = repo.clone();
move |parent_hashes| {
let changesets = parent_hashes

View File

@ -7,6 +7,7 @@
use std::sync::Arc;
use chashmap::CHashMap;
use context::CoreContext;
use failure::{Error, Result};
use futures::{Future, Stream, sync::mpsc::{self, Sender}};
use slog::Logger;
@ -29,6 +30,7 @@ pub trait ChangesetVisitor: Clone + Send + Sync + 'static {
/// unavoidable due to the way tokio works. To share state between instances, use an Arc.
fn visit(
self,
ctx: CoreContext,
logger: Logger,
repo: BlobRepo,
changeset: HgBlobChangeset,
@ -49,6 +51,7 @@ pub struct ChangesetVisitMeta {
/// this is typically highly parallel). Dropping the returned stream will cause further visiting to
/// be canceled.
pub fn visit_changesets<V, I>(
ctx: CoreContext,
logger: Logger,
repo: BlobRepo,
visitor: V,
@ -80,7 +83,7 @@ where
for changeset_id in start_points {
// Start off with follow_limit + 1 because that's logically the previous follow_remaining.
let visit_one = VisitOne::new(&inner, changeset_id, follow_limit, &mut sender);
let visit_one = VisitOne::new(ctx.clone(), &inner, changeset_id, follow_limit, &mut sender);
if let Some(visit_one) = visit_one {
tokio::spawn(visit_one.visit());
}
@ -114,6 +117,7 @@ struct VisitOne<V>
where
V: ChangesetVisitor,
{
ctx: CoreContext,
shared: Arc<VisitOneShared<V>>,
logger: Logger,
changeset_id: HgChangesetId,
@ -126,6 +130,7 @@ where
V: ChangesetVisitor,
{
fn new(
ctx: CoreContext,
shared: &Arc<VisitOneShared<V>>,
changeset_id: HgChangesetId,
prev_follow_remaining: usize,
@ -148,6 +153,7 @@ where
.new(o!["changeset_id" => format!("{}", changeset_id)]);
Some(Self {
ctx,
shared: shared.clone(),
logger,
changeset_id,
@ -158,6 +164,7 @@ where
fn visit(self) -> impl Future<Item = (), Error = ()> + Send {
let Self {
ctx,
shared,
logger,
changeset_id,
@ -167,29 +174,35 @@ where
shared.mark_visit_started(changeset_id);
let parents_fut = shared.repo.get_changeset_parents(&changeset_id).map({
let shared = shared.clone();
let mut sender = sender.clone();
move |parent_hashes| {
for parent_id in parent_hashes {
let visit_one =
VisitOne::new(&shared, parent_id, follow_remaining, &mut sender);
if let Some(visit_one) = visit_one {
// Avoid unbounded recursion by spawning separate futures for each parent
// directly on the executor.
tokio::spawn(visit_one.visit());
let parents_fut = shared
.repo
.get_changeset_parents(ctx.clone(), &changeset_id)
.map({
cloned!(ctx, shared, mut sender);
move |parent_hashes| {
for parent_id in parent_hashes {
let visit_one = VisitOne::new(
ctx.clone(),
&shared,
parent_id,
follow_remaining,
&mut sender,
);
if let Some(visit_one) = visit_one {
// Avoid unbounded recursion by spawning separate futures for each parent
// directly on the executor.
tokio::spawn(visit_one.visit());
}
}
}
}
});
});
let visit_fut = shared
.repo
.get_changeset_by_changesetid(&changeset_id)
.and_then({
let visitor = shared.visitor.clone();
let repo = shared.repo.clone();
move |changeset| visitor.visit(logger, repo, changeset, follow_remaining)
cloned!(ctx, shared.visitor, shared.repo);
move |changeset| visitor.visit(ctx, logger, repo, changeset, follow_remaining)
})
.and_then({
let sender = sender.clone();

View File

@ -10,6 +10,8 @@
extern crate chashmap;
#[macro_use]
extern crate cloned;
#[macro_use]
extern crate failure_ext as failure;
extern crate futures;
#[macro_use]
@ -20,6 +22,7 @@ extern crate futures_ext;
extern crate blobrepo;
extern crate bonsai_utils;
extern crate context;
extern crate mercurial_types;
extern crate mononoke_types;

View File

@ -14,6 +14,7 @@ extern crate async_unit;
extern crate slog_glog_fmt;
extern crate blobrepo_utils;
extern crate context;
extern crate mercurial_types;
extern crate fixtures;
@ -35,6 +36,7 @@ mod test {
use slog_glog_fmt::default_drain as glog_drain;
use blobrepo_utils::{BonsaiMFVerify, BonsaiMFVerifyResult};
use context::CoreContext;
use mercurial_types::HgChangesetId;
use $repo;
@ -42,15 +44,17 @@ mod test {
#[test]
fn test() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let drain = glog_drain().filter_level(Level::Debug).fuse();
let logger = Logger::root(drain, o![]);
let repo = $repo::getrepo(Some(logger.clone()));
let heads = repo.get_heads_maybe_stale()
let heads = repo.get_heads_maybe_stale(ctx.clone())
.collect()
.map(|heads| heads.into_iter().map(HgChangesetId::new));
let verify = BonsaiMFVerify {
ctx,
logger,
repo,
follow_limit: 1024,

View File

@ -10,6 +10,7 @@ use bytes::Bytes;
use cachelib::LruCachePool;
use caching_ext::{CachelibHandler, GetOrFillMultipleFromCacheLayers, McErrorKind, McResult,
MemcacheHandler};
use context::CoreContext;
use errors::Error;
use futures::{Future, future::ok};
use futures_ext::{BoxFuture, FutureExt};
@ -98,12 +99,13 @@ fn memcache_serialize(entry: &BonsaiHgMappingEntry) -> Bytes {
}
impl BonsaiHgMapping for CachingBonsaiHgMapping {
fn add(&self, entry: BonsaiHgMappingEntry) -> BoxFuture<bool, Error> {
self.mapping.add(entry)
fn add(&self, ctx: CoreContext, entry: BonsaiHgMappingEntry) -> BoxFuture<bool, Error> {
self.mapping.add(ctx, entry)
}
fn get(
&self,
ctx: CoreContext,
repo_id: RepositoryId,
cs: BonsaiOrHgChangesetIds,
) -> BoxFuture<Vec<BonsaiHgMappingEntry>, Error> {
@ -157,7 +159,7 @@ impl BonsaiHgMapping for CachingBonsaiHgMapping {
};
mapping
.get(repo_id, bonsai_or_hg_csids)
.get(ctx.clone(), repo_id, bonsai_or_hg_csids)
.map(move |mapping_entries| {
mapping_entries
.into_iter()

View File

@ -25,6 +25,7 @@ extern crate tokio;
#[macro_use]
extern crate cloned;
extern crate context;
extern crate futures_ext;
#[macro_use]
extern crate lazy_static;
@ -43,6 +44,7 @@ use std::sync::Arc;
use sql::Connection;
pub use sql_ext::SqlConstructors;
use context::CoreContext;
use futures::{Future, IntoFuture};
use futures_ext::{BoxFuture, FutureExt};
use mercurial_types::{HgChangesetId, HgNodeHash, RepositoryId};
@ -125,46 +127,50 @@ impl From<HgChangesetId> for BonsaiOrHgChangesetIds {
}
pub trait BonsaiHgMapping: Send + Sync {
fn add(&self, entry: BonsaiHgMappingEntry) -> BoxFuture<bool, Error>;
fn add(&self, ctx: CoreContext, entry: BonsaiHgMappingEntry) -> BoxFuture<bool, Error>;
fn get(
&self,
ctx: CoreContext,
repo_id: RepositoryId,
cs_id: BonsaiOrHgChangesetIds,
) -> BoxFuture<Vec<BonsaiHgMappingEntry>, Error>;
fn get_hg_from_bonsai(
&self,
ctx: CoreContext,
repo_id: RepositoryId,
cs_id: ChangesetId,
) -> BoxFuture<Option<HgChangesetId>, Error> {
self.get(repo_id, cs_id.into())
self.get(ctx, repo_id, cs_id.into())
.map(|result| result.into_iter().next().map(|entry| entry.hg_cs_id))
.boxify()
}
fn get_bonsai_from_hg(
&self,
ctx: CoreContext,
repo_id: RepositoryId,
cs_id: HgChangesetId,
) -> BoxFuture<Option<ChangesetId>, Error> {
self.get(repo_id, cs_id.into())
self.get(ctx, repo_id, cs_id.into())
.map(|result| result.into_iter().next().map(|entry| entry.bcs_id))
.boxify()
}
}
impl BonsaiHgMapping for Arc<BonsaiHgMapping> {
fn add(&self, entry: BonsaiHgMappingEntry) -> BoxFuture<bool, Error> {
(**self).add(entry)
fn add(&self, ctx: CoreContext, entry: BonsaiHgMappingEntry) -> BoxFuture<bool, Error> {
(**self).add(ctx, entry)
}
fn get(
&self,
ctx: CoreContext,
repo_id: RepositoryId,
cs_id: BonsaiOrHgChangesetIds,
) -> BoxFuture<Vec<BonsaiHgMappingEntry>, Error> {
(**self).get(repo_id, cs_id)
(**self).get(ctx, repo_id, cs_id)
}
}
@ -237,7 +243,7 @@ impl SqlConstructors for SqlBonsaiHgMapping {
}
impl BonsaiHgMapping for SqlBonsaiHgMapping {
fn add(&self, entry: BonsaiHgMappingEntry) -> BoxFuture<bool, Error> {
fn add(&self, _ctxt: CoreContext, entry: BonsaiHgMappingEntry) -> BoxFuture<bool, Error> {
STATS::adds.add_value(1);
cloned!(self.read_master_connection);
@ -273,6 +279,7 @@ impl BonsaiHgMapping for SqlBonsaiHgMapping {
fn get(
&self,
_ctxt: CoreContext,
repo_id: RepositoryId,
ids: BonsaiOrHgChangesetIds,
) -> BoxFuture<Vec<BonsaiHgMappingEntry>, Error> {

View File

@ -4,6 +4,7 @@
// This software may be used and distributed according to the terms of the
// GNU General Public License version 2 or any later version.
use context::CoreContext;
use futures::{Future, future::ok};
use futures_ext::{BoxFuture, FutureExt};
use mercurial_types::{HgChangesetId, RepositoryId};
@ -60,7 +61,7 @@ impl MemWritesBonsaiHgMapping {
}
impl BonsaiHgMapping for MemWritesBonsaiHgMapping {
fn add(&self, entry: BonsaiHgMappingEntry) -> BoxFuture<bool, Error> {
fn add(&self, ctx: CoreContext, entry: BonsaiHgMappingEntry) -> BoxFuture<bool, Error> {
let repo_id = entry.repo_id;
{
let mappings = self.mappings.lock().expect("lock poisoned");
@ -73,7 +74,11 @@ impl BonsaiHgMapping for MemWritesBonsaiHgMapping {
}
self.inner
.get(repo_id, BonsaiOrHgChangesetIds::Bonsai(vec![entry.bcs_id]))
.get(
ctx,
repo_id,
BonsaiOrHgChangesetIds::Bonsai(vec![entry.bcs_id]),
)
.and_then({
cloned!(self.mappings);
move |maybe_mapping| {
@ -103,6 +108,7 @@ impl BonsaiHgMapping for MemWritesBonsaiHgMapping {
fn get(
&self,
ctx: CoreContext,
repo_id: RepositoryId,
ids: BonsaiOrHgChangesetIds,
) -> BoxFuture<Vec<BonsaiHgMappingEntry>, Error> {
@ -144,7 +150,7 @@ impl BonsaiHgMapping for MemWritesBonsaiHgMapping {
ok(mappings).boxify()
} else {
self.inner
.get(repo_id, left_to_fetch)
.get(ctx, repo_id, left_to_fetch)
.map(move |mut mappings_from_inner| {
mappings.append(&mut mappings_from_inner);
mappings

View File

@ -16,6 +16,7 @@ extern crate futures;
extern crate futures_ext;
extern crate bonsai_hg_mapping;
extern crate context;
extern crate mercurial_types;
extern crate mercurial_types_mocks;
extern crate mononoke_types;
@ -27,6 +28,7 @@ use futures::Future;
use bonsai_hg_mapping::{BonsaiHgMapping, BonsaiHgMappingEntry, BonsaiOrHgChangesetIds,
CachingBonsaiHgMapping, ErrorKind, MemWritesBonsaiHgMapping,
SqlBonsaiHgMapping, SqlConstructors};
use context::CoreContext;
use futures_ext::BoxFuture;
use mercurial_types::RepositoryId;
use mercurial_types_mocks::nodehash as hg;
@ -36,6 +38,7 @@ use mononoke_types_mocks::changesetid as bonsai;
use std::sync::{Arc, atomic::{AtomicUsize, Ordering}};
fn add_and_get<M: BonsaiHgMapping>(mapping: M) {
let ctx = CoreContext::test_mock();
let entry = BonsaiHgMappingEntry {
repo_id: REPO_ZERO,
hg_cs_id: hg::ONES_CSID,
@ -44,30 +47,30 @@ fn add_and_get<M: BonsaiHgMapping>(mapping: M) {
assert_eq!(
true,
mapping
.add(entry.clone())
.add(ctx.clone(), entry.clone())
.wait()
.expect("Adding new entry failed")
);
assert_eq!(
false,
mapping
.add(entry.clone())
.add(ctx.clone(), entry.clone())
.wait()
.expect("Adding same entry failed")
);
let result = mapping
.get(REPO_ZERO, hg::ONES_CSID.into())
.get(ctx.clone(), REPO_ZERO, hg::ONES_CSID.into())
.wait()
.expect("Get failed");
assert_eq!(result, vec![entry.clone()]);
let result = mapping
.get_hg_from_bonsai(REPO_ZERO, bonsai::ONES_CSID)
.get_hg_from_bonsai(ctx.clone(), REPO_ZERO, bonsai::ONES_CSID)
.wait()
.expect("Failed to get hg changeset by its bonsai counterpart");
assert_eq!(result, Some(hg::ONES_CSID));
let result = mapping
.get_bonsai_from_hg(REPO_ZERO, hg::ONES_CSID)
.get_bonsai_from_hg(ctx.clone(), REPO_ZERO, hg::ONES_CSID)
.wait()
.expect("Failed to get bonsai changeset by its hg counterpart");
assert_eq!(result, Some(bonsai::ONES_CSID));
@ -78,7 +81,7 @@ fn add_and_get<M: BonsaiHgMapping>(mapping: M) {
bcs_id: bonsai::ONES_CSID,
};
let result = mapping
.add(same_bc_entry.clone())
.add(ctx.clone(), same_bc_entry.clone())
.wait()
.expect_err("Conflicting entries should haved produced an error");
assert_matches!(
@ -92,7 +95,7 @@ fn add_and_get<M: BonsaiHgMapping>(mapping: M) {
bcs_id: bonsai::TWOS_CSID, // differ from entry.bcs_id
};
let result = mapping
.add(same_hg_entry.clone())
.add(ctx.clone(), same_hg_entry.clone())
.wait()
.expect_err("Conflicting entries should haved produced an error");
assert_matches!(
@ -102,14 +105,16 @@ fn add_and_get<M: BonsaiHgMapping>(mapping: M) {
}
fn missing<M: BonsaiHgMapping>(mapping: M) {
let ctx = CoreContext::test_mock();
let result = mapping
.get(REPO_ZERO, bonsai::ONES_CSID.into())
.get(ctx.clone(), REPO_ZERO, bonsai::ONES_CSID.into())
.wait()
.expect("Failed to fetch missing changeset (should succeed with None instead)");
assert_eq!(result, vec![]);
}
fn mem_writes<M: BonsaiHgMapping + 'static>(mapping: M) {
let ctx = CoreContext::test_mock();
let entry = BonsaiHgMappingEntry {
repo_id: REPO_ZERO,
hg_cs_id: hg::ONES_CSID,
@ -118,7 +123,7 @@ fn mem_writes<M: BonsaiHgMapping + 'static>(mapping: M) {
assert_eq!(
true,
mapping
.add(entry.clone())
.add(ctx.clone(), entry.clone())
.wait()
.expect("Adding new entry failed")
);
@ -129,7 +134,7 @@ fn mem_writes<M: BonsaiHgMapping + 'static>(mapping: M) {
assert_eq!(
false,
mem_mapping
.add(entry.clone())
.add(ctx.clone(), entry.clone())
.wait()
.expect("Adding same entry failed")
);
@ -142,19 +147,19 @@ fn mem_writes<M: BonsaiHgMapping + 'static>(mapping: M) {
assert_eq!(
true,
mem_mapping
.add(first_entry.clone())
.add(ctx.clone(), first_entry.clone())
.wait()
.expect("Adding new entry failed")
);
let result = mem_mapping
.get_bonsai_from_hg(REPO_ZERO, hg::ONES_CSID)
.get_bonsai_from_hg(ctx.clone(), REPO_ZERO, hg::ONES_CSID)
.wait()
.expect("Failed to get bonsai changeset by its hg counterpart");
assert_eq!(result, Some(bonsai::ONES_CSID));
let result = mem_mapping
.get_bonsai_from_hg(REPO_ZERO, hg::TWOS_CSID)
.get_bonsai_from_hg(ctx.clone(), REPO_ZERO, hg::TWOS_CSID)
.wait()
.expect("Failed to get bonsai changeset by its hg counterpart");
assert_eq!(result, Some(bonsai::TWOS_CSID));
@ -170,7 +175,7 @@ fn mem_writes<M: BonsaiHgMapping + 'static>(mapping: M) {
assert_eq!(
true,
mem_mapping
.add(second_entry.clone())
.add(ctx.clone(), second_entry.clone())
.wait()
.expect("Adding new entry failed")
);
@ -179,7 +184,7 @@ fn mem_writes<M: BonsaiHgMapping + 'static>(mapping: M) {
let inner = mem_mapping.get_inner();
let result = inner
.get_bonsai_from_hg(REPO_ZERO, hg::TWOS_CSID)
.get_bonsai_from_hg(ctx.clone(), REPO_ZERO, hg::TWOS_CSID)
.wait()
.expect("Failed to get bonsai changeset by its hg counterpart");
assert_eq!(result, None);
@ -202,22 +207,24 @@ impl CountedBonsaiHgMapping {
}
impl BonsaiHgMapping for CountedBonsaiHgMapping {
fn add(&self, entry: BonsaiHgMappingEntry) -> BoxFuture<bool, Error> {
fn add(&self, ctx: CoreContext, entry: BonsaiHgMappingEntry) -> BoxFuture<bool, Error> {
self.adds.fetch_add(1, Ordering::Relaxed);
self.mapping.add(entry)
self.mapping.add(ctx, entry)
}
fn get(
&self,
ctx: CoreContext,
repo_id: RepositoryId,
cs_id: BonsaiOrHgChangesetIds,
) -> BoxFuture<Vec<BonsaiHgMappingEntry>, Error> {
self.gets.fetch_add(1, Ordering::Relaxed);
self.mapping.get(repo_id, cs_id)
self.mapping.get(ctx, repo_id, cs_id)
}
}
fn caching<M: BonsaiHgMapping + 'static>(mapping: M) {
let ctx = CoreContext::test_mock();
let gets = Arc::new(AtomicUsize::new(0));
let adds = Arc::new(AtomicUsize::new(0));
let mapping = CountedBonsaiHgMapping::new(Arc::new(mapping), gets.clone(), adds.clone());
@ -231,27 +238,27 @@ fn caching<M: BonsaiHgMapping + 'static>(mapping: M) {
assert_eq!(
true,
mapping
.add(entry.clone())
.add(ctx.clone(), entry.clone())
.wait()
.expect("Adding new entry failed")
);
let result = mapping
.get_bonsai_from_hg(REPO_ZERO, hg::ONES_CSID)
.get_bonsai_from_hg(ctx.clone(), REPO_ZERO, hg::ONES_CSID)
.wait()
.expect("Failed to get bonsai changeset by its hg counterpart");
assert_eq!(result, Some(bonsai::ONES_CSID));
assert_eq!(gets.load(Ordering::Relaxed), 1);
let result = mapping
.get_bonsai_from_hg(REPO_ZERO, hg::ONES_CSID)
.get_bonsai_from_hg(ctx.clone(), REPO_ZERO, hg::ONES_CSID)
.wait()
.expect("Failed to get bonsai changeset by its hg counterpart");
assert_eq!(result, Some(bonsai::ONES_CSID));
assert_eq!(gets.load(Ordering::Relaxed), 1);
let result = mapping
.get_bonsai_from_hg(REPO_ZERO, hg::TWOS_CSID)
.get_bonsai_from_hg(ctx.clone(), REPO_ZERO, hg::TWOS_CSID)
.wait()
.expect("Failed to get bonsai changeset by its hg counterpart");
assert_eq!(result, None);

View File

@ -12,6 +12,7 @@ use std::iter::FromIterator;
use std::sync::Arc;
use blobrepo::BlobRepo;
use context::CoreContext;
use futures::{stream, Future, Stream};
use mercurial::{self, RevlogChangeset};
use mercurial_bundles::{parts, part_encode::PartEncodeBuilder};
@ -22,6 +23,7 @@ use revset::DifferenceOfUnionsOfAncestorsNodeStream;
use mononoke_types::ChangesetId;
pub fn create_getbundle_response(
ctx: CoreContext,
blobrepo: BlobRepo,
common: Vec<HgChangesetId>,
heads: Vec<HgChangesetId>,
@ -36,6 +38,7 @@ pub fn create_getbundle_response(
let common_heads: HashSet<_> = HashSet::from_iter(common.iter());
let heads = hg_to_bonsai_stream(
ctx.clone(),
&blobrepo,
heads
.iter()
@ -45,6 +48,7 @@ pub fn create_getbundle_response(
);
let excludes = hg_to_bonsai_stream(
ctx.clone(),
&blobrepo,
common
.iter()
@ -80,7 +84,7 @@ pub fn create_getbundle_response(
cloned!(blobrepo);
move |bonsai| {
blobrepo
.get_hg_from_bonsai_changeset(bonsai)
.get_hg_from_bonsai_changeset(ctx.clone(), bonsai)
.map(|cs| cs.into_nodehash())
.and_then({
cloned!(blobrepo);
@ -116,6 +120,7 @@ pub fn create_getbundle_response(
}
fn hg_to_bonsai_stream(
ctx: CoreContext,
repo: &Arc<BlobRepo>,
nodes: Vec<HgChangesetId>,
) -> impl Future<Item = Vec<ChangesetId>, Error = Error> {
@ -123,7 +128,7 @@ fn hg_to_bonsai_stream(
.map({
cloned!(repo);
move |node| {
repo.get_bonsai_from_hg(&node)
repo.get_bonsai_from_hg(ctx.clone(), &node)
.and_then(move |maybe_bonsai| {
maybe_bonsai.ok_or(ErrorKind::BonsaiNotFoundForHgChangeset(node).into())
})

View File

@ -48,6 +48,7 @@ extern crate tokio_io;
extern crate blobrepo;
extern crate bonsai_utils;
extern crate bookmarks;
extern crate context;
extern crate hooks;
extern crate mercurial;
extern crate mercurial_bundles;

View File

@ -42,6 +42,7 @@
use blobrepo::{save_bonsai_changesets, BlobRepo};
use bonsai_utils::{bonsai_diff, BonsaiDiffResult};
use bookmarks::Bookmark;
use context::CoreContext;
use errors::*;
use futures::{Future, IntoFuture, Stream};
use futures::future::{err, join_all, loop_fn, ok, Loop};
@ -92,12 +93,13 @@ pub struct PushrebaseSuccessResult {
/// The commits from the pushed set should already be committed to the blobrepo
/// Returns updated bookmark value.
pub fn do_pushrebase(
ctx: CoreContext,
repo: Arc<BlobRepo>,
config: PushrebaseParams,
onto_bookmark: Bookmark,
pushed_set: Vec<HgChangesetId>,
) -> impl Future<Item = PushrebaseSuccessResult, Error = PushrebaseError> {
fetch_bonsai_changesets(repo.clone(), pushed_set)
fetch_bonsai_changesets(ctx.clone(), repo.clone(), pushed_set)
.and_then(|pushed| {
let head = find_only_head_or_fail(&pushed)?;
let roots = find_roots(&pushed)?;
@ -115,11 +117,18 @@ pub fn do_pushrebase(
move |(head, root)| {
// Calculate client changed files only once, since they won't change
(
find_changed_files(&repo, root, head, /* reject_merges */ false),
find_changed_files(
ctx.clone(),
&repo,
root,
head,
/* reject_merges */ false,
),
fetch_bonsai_range(&repo, root, head),
).into_future()
.and_then(move |(client_cf, client_bcs)| {
rebase_in_loop(
ctx,
repo,
config,
onto_bookmark,
@ -134,6 +143,7 @@ pub fn do_pushrebase(
}
fn rebase_in_loop(
ctx: CoreContext,
repo: Arc<BlobRepo>,
config: PushrebaseParams,
onto_bookmark: Bookmark,
@ -146,7 +156,7 @@ fn rebase_in_loop(
(root.clone(), 0),
move |(latest_rebase_attempt, retry_num)| {
get_bookmark_value(&repo, &onto_bookmark).and_then({
cloned!(client_cf, client_bcs, onto_bookmark, repo, config);
cloned!(ctx, client_cf, client_bcs, onto_bookmark, repo, config);
move |bookmark_val| {
fetch_bonsai_range(&repo, latest_rebase_attempt, bookmark_val)
.and_then(move |server_bcs| {
@ -161,6 +171,7 @@ fn rebase_in_loop(
cloned!(repo);
move |()| {
find_changed_files(
ctx.clone(),
&repo,
latest_rebase_attempt.clone(),
bookmark_val,
@ -205,11 +216,12 @@ fn do_rebase(
}
fn fetch_bonsai_changesets(
ctx: CoreContext,
repo: Arc<BlobRepo>,
commit_ids: Vec<HgChangesetId>,
) -> impl Future<Item = Vec<BonsaiChangeset>, Error = PushrebaseError> {
join_all(commit_ids.into_iter().map(move |hg_cs| {
repo.get_bonsai_from_hg(&hg_cs)
repo.get_bonsai_from_hg(ctx.clone(), &hg_cs)
.and_then({
cloned!(hg_cs);
move |bcs_cs| bcs_cs.ok_or(ErrorKind::BonsaiNotFoundForHgChangeset(hg_cs).into())
@ -303,6 +315,7 @@ fn find_closest_root(
/// find changed files by comparing manifests of `ancestor` and `descendant`
fn find_changed_files_between_manfiests(
ctx: CoreContext,
repo: &Arc<BlobRepo>,
ancestor: ChangesetId,
descendant: ChangesetId,
@ -310,7 +323,7 @@ fn find_changed_files_between_manfiests(
let id_to_manifest = {
cloned!(repo);
move |bcs_id| {
repo.get_hg_from_bonsai_changeset(bcs_id)
repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id)
.and_then({
cloned!(repo);
move |cs_id| repo.get_changeset_by_changesetid(&cs_id)
@ -351,6 +364,7 @@ fn fetch_bonsai_range(
}
fn find_changed_files(
ctx: CoreContext,
repo: &Arc<BlobRepo>,
ancestor: ChangesetId,
descendant: ChangesetId,
@ -397,8 +411,12 @@ fn find_changed_files(
// one of the parents is not in the rebase set, to calculate
// changed files in this case we will compute manifest diff
// between elements that are in rebase set.
find_changed_files_between_manfiests(&repo, id, *p_id)
.right_future()
find_changed_files_between_manfiests(
ctx.clone(),
&repo,
id,
*p_id,
).right_future()
}
(None, None) => panic!(
"`RangeNodeStream` produced invalid result for: ({}, {})",
@ -615,9 +633,9 @@ mod tests {
use std::str::FromStr;
use tests_utils::{create_commit, store_files, store_rename};
fn set_bookmark(repo: BlobRepo, book: &Bookmark, cs_id: &str) {
fn set_bookmark(ctx: CoreContext, repo: BlobRepo, book: &Bookmark, cs_id: &str) {
let head = HgChangesetId::from_str(cs_id).unwrap();
let head = repo.get_bonsai_from_hg(&head).wait().unwrap().unwrap();
let head = repo.get_bonsai_from_hg(ctx, &head).wait().unwrap().unwrap();
let mut txn = repo.update_bookmark_transaction();
txn.force_set(&book, &head).unwrap();
txn.commit().wait().unwrap();
@ -631,10 +649,14 @@ mod tests {
#[test]
fn pushrebase_one_commit() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = linear::getrepo(None);
// Bottom commit of the repo
let root = HgChangesetId::from_str("2d7d4ba9ce0a6ffd222de7785b249ead9c51c536").unwrap();
let p = repo.get_bonsai_from_hg(&root).wait().unwrap().unwrap();
let p = repo.get_bonsai_from_hg(ctx.clone(), &root)
.wait()
.unwrap()
.unwrap();
let parents = vec![p];
let bcs_id = create_commit(
@ -642,16 +664,19 @@ mod tests {
parents,
store_files(btreemap!{"file" => Some("content")}, repo.clone()),
);
let hg_cs = repo.get_hg_from_bonsai_changeset(bcs_id).wait().unwrap();
let hg_cs = repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id)
.wait()
.unwrap();
let book = Bookmark::new("master").unwrap();
set_bookmark(
ctx.clone(),
repo.clone(),
&book,
"a5ffa77602a066db7d5cfb9fb5823a0895717c5a",
);
do_pushrebase(Arc::new(repo), Default::default(), book, vec![hg_cs])
do_pushrebase(ctx, Arc::new(repo), Default::default(), book, vec![hg_cs])
.wait()
.expect("pushrebase failed");
});
@ -660,10 +685,14 @@ mod tests {
#[test]
fn pushrebase_stack() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = linear::getrepo(None);
// Bottom commit of the repo
let root = HgChangesetId::from_str("2d7d4ba9ce0a6ffd222de7785b249ead9c51c536").unwrap();
let p = repo.get_bonsai_from_hg(&root).wait().unwrap().unwrap();
let p = repo.get_bonsai_from_hg(ctx.clone(), &root)
.wait()
.unwrap()
.unwrap();
let bcs_id_1 = create_commit(
repo.clone(),
vec![p],
@ -676,7 +705,7 @@ mod tests {
);
assert_eq!(
find_changed_files(&Arc::new(repo.clone()), p, bcs_id_2, false)
find_changed_files(ctx.clone(), &Arc::new(repo.clone()), p, bcs_id_2, false)
.wait()
.unwrap(),
make_paths(&["file", "file2"]),
@ -684,14 +713,20 @@ mod tests {
let book = Bookmark::new("master").unwrap();
set_bookmark(
ctx.clone(),
repo.clone(),
&book,
"a5ffa77602a066db7d5cfb9fb5823a0895717c5a",
);
let hg_cs_1 = repo.get_hg_from_bonsai_changeset(bcs_id_1).wait().unwrap();
let hg_cs_2 = repo.get_hg_from_bonsai_changeset(bcs_id_2).wait().unwrap();
let hg_cs_1 = repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id_1)
.wait()
.unwrap();
let hg_cs_2 = repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id_2)
.wait()
.unwrap();
do_pushrebase(
ctx,
Arc::new(repo),
Default::default(),
book,
@ -704,10 +739,14 @@ mod tests {
#[test]
fn pushrebase_stack_with_renames() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = linear::getrepo(None);
// Bottom commit of the repo
let root = HgChangesetId::from_str("2d7d4ba9ce0a6ffd222de7785b249ead9c51c536").unwrap();
let p = repo.get_bonsai_from_hg(&root).wait().unwrap().unwrap();
let p = repo.get_bonsai_from_hg(ctx.clone(), &root)
.wait()
.unwrap()
.unwrap();
let bcs_id_1 = create_commit(
repo.clone(),
vec![p],
@ -725,7 +764,7 @@ mod tests {
let bcs_id_2 = create_commit(repo.clone(), vec![bcs_id_1], file_changes);
assert_eq!(
find_changed_files(&Arc::new(repo.clone()), p, bcs_id_2, false)
find_changed_files(ctx.clone(), &Arc::new(repo.clone()), p, bcs_id_2, false)
.wait()
.unwrap(),
make_paths(&["file", "file_renamed"]),
@ -733,14 +772,20 @@ mod tests {
let book = Bookmark::new("master").unwrap();
set_bookmark(
ctx.clone(),
repo.clone(),
&book,
"a5ffa77602a066db7d5cfb9fb5823a0895717c5a",
);
let hg_cs_1 = repo.get_hg_from_bonsai_changeset(bcs_id_1).wait().unwrap();
let hg_cs_2 = repo.get_hg_from_bonsai_changeset(bcs_id_2).wait().unwrap();
let hg_cs_1 = repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id_1)
.wait()
.unwrap();
let hg_cs_2 = repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id_2)
.wait()
.unwrap();
do_pushrebase(
ctx,
Arc::new(repo),
Default::default(),
book,
@ -767,21 +812,22 @@ mod tests {
// root0 -> o
//
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = linear::getrepo(None);
let repo_arc = Arc::new(repo.clone());
let config = PushrebaseParams::default();
let root0 = repo.get_bonsai_from_hg(&HgChangesetId::from_str(
"2d7d4ba9ce0a6ffd222de7785b249ead9c51c536",
).unwrap())
.wait()
let root0 = repo.get_bonsai_from_hg(
ctx.clone(),
&HgChangesetId::from_str("2d7d4ba9ce0a6ffd222de7785b249ead9c51c536").unwrap(),
).wait()
.unwrap()
.unwrap();
let root1 = repo.get_bonsai_from_hg(&HgChangesetId::from_str(
"607314ef579bd2407752361ba1b0c1729d08b281",
).unwrap())
.wait()
let root1 = repo.get_bonsai_from_hg(
ctx.clone(),
&HgChangesetId::from_str("607314ef579bd2407752361ba1b0c1729d08b281").unwrap(),
).wait()
.unwrap()
.unwrap();
@ -803,14 +849,15 @@ mod tests {
let book = Bookmark::new("master").unwrap();
set_bookmark(
ctx.clone(),
repo.clone(),
&book,
"a5ffa77602a066db7d5cfb9fb5823a0895717c5a",
);
let bcs_id_master = repo.get_bonsai_from_hg(&HgChangesetId::from_str(
"a5ffa77602a066db7d5cfb9fb5823a0895717c5a",
).unwrap())
.wait()
let bcs_id_master = repo.get_bonsai_from_hg(
ctx.clone(),
&HgChangesetId::from_str("a5ffa77602a066db7d5cfb9fb5823a0895717c5a").unwrap(),
).wait()
.unwrap()
.unwrap();
@ -823,16 +870,23 @@ mod tests {
);
assert_eq!(
find_changed_files(&repo_arc, root, bcs_id_3, false)
find_changed_files(ctx.clone(), &repo_arc, root, bcs_id_3, false)
.wait()
.unwrap(),
make_paths(&["f0", "f1", "f2"]),
);
let hg_cs_1 = repo.get_hg_from_bonsai_changeset(bcs_id_1).wait().unwrap();
let hg_cs_2 = repo.get_hg_from_bonsai_changeset(bcs_id_2).wait().unwrap();
let hg_cs_3 = repo.get_hg_from_bonsai_changeset(bcs_id_3).wait().unwrap();
let hg_cs_1 = repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id_1)
.wait()
.unwrap();
let hg_cs_2 = repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id_2)
.wait()
.unwrap();
let hg_cs_3 = repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id_3)
.wait()
.unwrap();
let bcs_id_rebased = do_pushrebase(
ctx,
repo_arc.clone(),
config,
book,
@ -865,11 +919,12 @@ mod tests {
#[test]
fn pushrebase_conflict() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = linear::getrepo(None);
let root = repo.get_bonsai_from_hg(&HgChangesetId::from_str(
"2d7d4ba9ce0a6ffd222de7785b249ead9c51c536",
).unwrap())
.wait()
let root = repo.get_bonsai_from_hg(
ctx.clone(),
&HgChangesetId::from_str("2d7d4ba9ce0a6ffd222de7785b249ead9c51c536").unwrap(),
).wait()
.unwrap()
.unwrap();
@ -891,15 +946,23 @@ mod tests {
let book = Bookmark::new("master").unwrap();
set_bookmark(
ctx.clone(),
repo.clone(),
&book,
"a5ffa77602a066db7d5cfb9fb5823a0895717c5a",
);
let hg_cs_1 = repo.get_hg_from_bonsai_changeset(bcs_id_1).wait().unwrap();
let hg_cs_2 = repo.get_hg_from_bonsai_changeset(bcs_id_2).wait().unwrap();
let hg_cs_3 = repo.get_hg_from_bonsai_changeset(bcs_id_3).wait().unwrap();
let hg_cs_1 = repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id_1)
.wait()
.unwrap();
let hg_cs_2 = repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id_2)
.wait()
.unwrap();
let hg_cs_3 = repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id_3)
.wait()
.unwrap();
let result = do_pushrebase(
ctx,
Arc::new(repo),
Default::default(),
book,
@ -925,11 +988,12 @@ mod tests {
#[test]
fn pushrebase_caseconflicting_rename() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = linear::getrepo(None);
let root = repo.get_bonsai_from_hg(&HgChangesetId::from_str(
"2d7d4ba9ce0a6ffd222de7785b249ead9c51c536",
).unwrap())
.wait()
let root = repo.get_bonsai_from_hg(
ctx.clone(),
&HgChangesetId::from_str("2d7d4ba9ce0a6ffd222de7785b249ead9c51c536").unwrap(),
).wait()
.unwrap()
.unwrap();
@ -949,19 +1013,26 @@ mod tests {
store_files(btreemap!{"file" => Some("file")}, repo.clone()),
);
let hgcss = vec![
repo.get_hg_from_bonsai_changeset(bcs_id_1).wait().unwrap(),
repo.get_hg_from_bonsai_changeset(bcs_id_2).wait().unwrap(),
repo.get_hg_from_bonsai_changeset(bcs_id_3).wait().unwrap(),
repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id_1)
.wait()
.unwrap(),
repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id_2)
.wait()
.unwrap(),
repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id_3)
.wait()
.unwrap(),
];
let book = Bookmark::new("master").unwrap();
set_bookmark(
ctx.clone(),
repo.clone(),
&book,
"a5ffa77602a066db7d5cfb9fb5823a0895717c5a",
);
do_pushrebase(Arc::new(repo), Default::default(), book, hgcss)
do_pushrebase(ctx, Arc::new(repo), Default::default(), book, hgcss)
.wait()
.expect("push-rebase failed");
})
@ -970,11 +1041,12 @@ mod tests {
#[test]
fn pushrebase_caseconflicting_dirs() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = linear::getrepo(None);
let root = repo.get_bonsai_from_hg(&HgChangesetId::from_str(
"2d7d4ba9ce0a6ffd222de7785b249ead9c51c536",
).unwrap())
.wait()
let root = repo.get_bonsai_from_hg(
ctx.clone(),
&HgChangesetId::from_str("2d7d4ba9ce0a6ffd222de7785b249ead9c51c536").unwrap(),
).wait()
.unwrap()
.unwrap();
@ -995,18 +1067,23 @@ mod tests {
),
);
let hgcss = vec![
repo.get_hg_from_bonsai_changeset(bcs_id_1).wait().unwrap(),
repo.get_hg_from_bonsai_changeset(bcs_id_2).wait().unwrap(),
repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id_1)
.wait()
.unwrap(),
repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id_2)
.wait()
.unwrap(),
];
let book = Bookmark::new("master").unwrap();
set_bookmark(
ctx.clone(),
repo.clone(),
&book,
"a5ffa77602a066db7d5cfb9fb5823a0895717c5a",
);
do_pushrebase(Arc::new(repo), Default::default(), book, hgcss)
do_pushrebase(ctx, Arc::new(repo), Default::default(), book, hgcss)
.wait()
.expect("push-rebase failed");
})
@ -1015,11 +1092,12 @@ mod tests {
#[test]
fn pushrebase_recursion_limit() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = linear::getrepo(None);
let root = repo.get_bonsai_from_hg(&HgChangesetId::from_str(
"2d7d4ba9ce0a6ffd222de7785b249ead9c51c536",
).unwrap())
.wait()
let root = repo.get_bonsai_from_hg(
ctx.clone(),
&HgChangesetId::from_str("2d7d4ba9ce0a6ffd222de7785b249ead9c51c536").unwrap(),
).wait()
.unwrap()
.unwrap();
@ -1042,19 +1120,25 @@ mod tests {
let hgcss = join_all(
bcss.iter()
.map(|bcs| repo.get_hg_from_bonsai_changeset(*bcs))
.map(|bcs| repo.get_hg_from_bonsai_changeset(ctx.clone(), *bcs))
.collect::<Vec<_>>(),
).wait()
.unwrap();
let book = Bookmark::new("master").unwrap();
set_bookmark(
ctx.clone(),
repo.clone(),
&book,
"a5ffa77602a066db7d5cfb9fb5823a0895717c5a",
);
let repo_arc = Arc::new(repo.clone());
do_pushrebase(repo_arc.clone(), Default::default(), book.clone(), hgcss)
.wait()
do_pushrebase(
ctx.clone(),
repo_arc.clone(),
Default::default(),
book.clone(),
hgcss,
).wait()
.expect("pushrebase failed");
let bcs = create_commit(
@ -1062,15 +1146,25 @@ mod tests {
vec![root],
store_files(btreemap!{"file" => Some("data")}, repo.clone()),
);
let hgcss = vec![repo_arc.get_hg_from_bonsai_changeset(bcs).wait().unwrap()];
let hgcss = vec![
repo_arc
.get_hg_from_bonsai_changeset(ctx.clone(), bcs)
.wait()
.unwrap(),
];
// try rebase with small recursion limit
let config = PushrebaseParams {
recursion_limit: 128,
..Default::default()
};
let result =
do_pushrebase(repo_arc.clone(), config, book.clone(), hgcss.clone()).wait();
let result = do_pushrebase(
ctx.clone(),
repo_arc.clone(),
config,
book.clone(),
hgcss.clone(),
).wait();
match result {
Err(PushrebaseError::RootTooFarBehind) => (),
_ => panic!("push-rebase should have failed because root too far behind"),
@ -1080,7 +1174,7 @@ mod tests {
recursion_limit: 256,
..Default::default()
};
do_pushrebase(repo_arc, config, book, hgcss)
do_pushrebase(ctx, repo_arc, config, book, hgcss)
.wait()
.expect("push-rebase failed");
})
@ -1089,11 +1183,12 @@ mod tests {
#[test]
fn pushrebase_rewritedates() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = linear::getrepo(None);
let root = repo.get_bonsai_from_hg(&HgChangesetId::from_str(
"2d7d4ba9ce0a6ffd222de7785b249ead9c51c536",
).unwrap())
.wait()
let root = repo.get_bonsai_from_hg(
ctx.clone(),
&HgChangesetId::from_str("2d7d4ba9ce0a6ffd222de7785b249ead9c51c536").unwrap(),
).wait()
.unwrap()
.unwrap();
let book = Bookmark::new("master").unwrap();
@ -1102,9 +1197,14 @@ mod tests {
vec![root],
store_files(btreemap!{"file" => Some("data")}, repo.clone()),
);
let hgcss = vec![repo.get_hg_from_bonsai_changeset(bcs).wait().unwrap()];
let hgcss = vec![
repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs)
.wait()
.unwrap(),
];
set_bookmark(
ctx.clone(),
repo.clone(),
&book,
"a5ffa77602a066db7d5cfb9fb5823a0895717c5a",
@ -1113,12 +1213,17 @@ mod tests {
rewritedates: false,
..Default::default()
};
let bcs_keep_date =
do_pushrebase(Arc::new(repo.clone()), config, book.clone(), hgcss.clone())
.wait()
.expect("push-rebase failed");
let bcs_keep_date = do_pushrebase(
ctx.clone(),
Arc::new(repo.clone()),
config,
book.clone(),
hgcss.clone(),
).wait()
.expect("push-rebase failed");
set_bookmark(
ctx.clone(),
repo.clone(),
&book,
"a5ffa77602a066db7d5cfb9fb5823a0895717c5a",
@ -1127,7 +1232,7 @@ mod tests {
rewritedates: true,
..Default::default()
};
let bcs_rewrite_date = do_pushrebase(Arc::new(repo.clone()), config, book, hgcss)
let bcs_rewrite_date = do_pushrebase(ctx, Arc::new(repo.clone()), config, book, hgcss)
.wait()
.expect("push-rebase failed");
@ -1147,11 +1252,12 @@ mod tests {
#[test]
fn pushrebase_case_conflict() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = many_files_dirs::getrepo(None);
let root = repo.get_bonsai_from_hg(&HgChangesetId::from_str(
"5a28e25f924a5d209b82ce0713d8d83e68982bc8",
).unwrap())
.wait()
let root = repo.get_bonsai_from_hg(
ctx.clone(),
&HgChangesetId::from_str("5a28e25f924a5d209b82ce0713d8d83e68982bc8").unwrap(),
).wait()
.unwrap()
.unwrap();
@ -1163,16 +1269,21 @@ mod tests {
repo.clone(),
),
);
let hgcss = vec![repo.get_hg_from_bonsai_changeset(bcs).wait().unwrap()];
let hgcss = vec![
repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs)
.wait()
.unwrap(),
];
let book = Bookmark::new("master").unwrap();
set_bookmark(
ctx.clone(),
repo.clone(),
&book,
"2f866e7e549760934e31bf0420a873f65100ad63",
);
let result = do_pushrebase(Arc::new(repo), Default::default(), book, hgcss).wait();
let result = do_pushrebase(ctx, Arc::new(repo), Default::default(), book, hgcss).wait();
match result {
Err(PushrebaseError::PotentialCaseConflict(conflict)) => {
assert_eq!(conflict, MPath::new("Dir1/file_1_in_dir1").unwrap())
@ -1214,6 +1325,7 @@ mod tests {
use mononoke_types::FileType;
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = linear::getrepo(None);
let path_1 = MPath::new("1").unwrap();
@ -1226,7 +1338,10 @@ mod tests {
.unwrap();
// crate filechange with with same content as "1" but set executable bit
let root = repo.get_bonsai_from_hg(root_hg).wait().unwrap().unwrap();
let root = repo.get_bonsai_from_hg(ctx.clone(), root_hg)
.wait()
.unwrap()
.unwrap();
let root_bcs = repo.get_bonsai_changeset(root).wait().unwrap();
let file_1 = root_bcs
.file_changes()
@ -1248,17 +1363,27 @@ mod tests {
vec![root],
btreemap!{path_1.clone() => Some(file_1_exec.clone())},
);
let hgcss = vec![repo.get_hg_from_bonsai_changeset(bcs).wait().unwrap()];
let hgcss = vec![
repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs)
.wait()
.unwrap(),
];
let book = Bookmark::new("master").unwrap();
set_bookmark(
ctx.clone(),
repo.clone(),
&book,
"a5ffa77602a066db7d5cfb9fb5823a0895717c5a",
);
let result = do_pushrebase(Arc::new(repo.clone()), Default::default(), book, hgcss)
.wait()
let result = do_pushrebase(
ctx.clone(),
Arc::new(repo.clone()),
Default::default(),
book,
hgcss,
).wait()
.expect("pushrebase failed");
let result_bcs = repo.get_bonsai_changeset(result.head).wait().unwrap();
let file_1_result = result_bcs
@ -1269,7 +1394,7 @@ mod tests {
.unwrap();
assert_eq!(file_1_result, &file_1_exec);
let result_hg = repo.get_hg_from_bonsai_changeset(result.head)
let result_hg = repo.get_hg_from_bonsai_changeset(ctx, result.head)
.wait()
.unwrap();
let result_cs = repo.get_changeset_by_changesetid(&result_hg)
@ -1289,14 +1414,19 @@ mod tests {
#[test]
fn pushrebase_simultaneously() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = linear::getrepo(None);
// Bottom commit of the repo
let root = HgChangesetId::from_str("2d7d4ba9ce0a6ffd222de7785b249ead9c51c536").unwrap();
let p = repo.get_bonsai_from_hg(&root).wait().unwrap().unwrap();
let p = repo.get_bonsai_from_hg(ctx.clone(), &root)
.wait()
.unwrap()
.unwrap();
let parents = vec![p];
let book = Bookmark::new("master").unwrap();
set_bookmark(
ctx.clone(),
repo.clone(),
&book,
"a5ffa77602a066db7d5cfb9fb5823a0895717c5a",
@ -1311,10 +1441,13 @@ mod tests {
parents.clone(),
store_files(btreemap!{ f.as_ref() => Some("content")}, repo.clone()),
);
let hg_cs = repo.get_hg_from_bonsai_changeset(bcs_id).wait().unwrap();
let hg_cs = repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id)
.wait()
.unwrap();
let fut = spawn_future(
do_pushrebase(
ctx.clone(),
Arc::new(repo.clone()),
Default::default(),
book.clone(),
@ -1335,13 +1468,16 @@ mod tests {
let previous_master =
HgChangesetId::from_str("a5ffa77602a066db7d5cfb9fb5823a0895717c5a").unwrap();
let previous_master = repo.get_bonsai_from_hg(&previous_master)
let previous_master = repo.get_bonsai_from_hg(ctx.clone(), &previous_master)
.wait()
.unwrap()
.unwrap();
let new_master = repo.get_bookmark(&book).wait().unwrap().unwrap();
let new_master = repo.get_bonsai_from_hg(&new_master)
let new_master = repo.get_bookmark(ctx.clone(), &book)
.wait()
.unwrap()
.unwrap();
let new_master = repo.get_bonsai_from_hg(ctx, &new_master)
.wait()
.unwrap()
.unwrap();

View File

@ -14,6 +14,7 @@ use blobrepo::{BlobRepo, ChangesetHandle, ChangesetMetadata, ContentBlobInfo, Cr
HgBlobEntry};
use bookmarks::{Bookmark, Transaction};
use bytes::{Bytes, BytesMut};
use context::CoreContext;
use failure::{err_msg, Compat, FutureFailureErrorExt, StreamFailureErrorExt};
use futures::{Future, IntoFuture, Stream};
use futures::future::{self, err, ok, Shared};
@ -51,6 +52,7 @@ type UploadedChangesets = HashMap<HgNodeHash, ChangesetHandle>;
/// Manifests and uploades all of them to the provided BlobRepo in the correct order.
/// It returns a Future that contains the response that should be send back to the requester.
pub fn resolve(
ctx: CoreContext,
repo: Arc<BlobRepo>,
logger: Logger,
scuba_logger: ScubaSampleBuilder,
@ -67,20 +69,21 @@ pub fn resolve(
resolver
.maybe_resolve_commonheads(bundle2)
.and_then(move |(commonheads, bundle2)| match commonheads {
Some(commonheads) => resolve_pushrebase(commonheads, resolver, bundle2, lca_hint),
None => resolve_push(resolver, bundle2),
Some(commonheads) => resolve_pushrebase(ctx, commonheads, resolver, bundle2, lca_hint),
None => resolve_push(ctx, resolver, bundle2),
})
.boxify()
}
fn resolve_push(
ctx: CoreContext,
resolver: Bundle2Resolver,
bundle2: BoxStream<Bundle2Item, Error>,
) -> BoxFuture<Bytes, Error> {
resolver
.maybe_resolve_changegroup(bundle2)
.and_then({
let resolver = resolver.clone();
cloned!(resolver);
move |(cg_push, bundle2)| {
resolver
.resolve_multiple_parts(bundle2, Bundle2Resolver::maybe_resolve_pushkey)
@ -100,7 +103,7 @@ fn resolve_push(
}
})
.and_then({
let resolver = resolver.clone();
cloned!(resolver);
move |(cg_push, bookmark_push, bundle2)| {
if let Some(cg_push) = cg_push {
resolver
@ -115,12 +118,12 @@ fn resolve_push(
}
})
.and_then({
let resolver = resolver.clone();
cloned!(ctx, resolver);
move |(cg_and_manifests, bookmark_push, bundle2)| {
if let Some((cg_push, manifests)) = cg_and_manifests {
let changegroup_id = Some(cg_push.part_id);
resolver
.upload_changesets(cg_push, manifests)
.upload_changesets(ctx, cg_push, manifests)
.map(move |()| (changegroup_id, bookmark_push, bundle2))
.boxify()
} else {
@ -129,7 +132,7 @@ fn resolve_push(
}
})
.and_then({
let resolver = resolver.clone();
cloned!(resolver);
move |(changegroup_id, bookmark_push, bundle2)| {
resolver
.maybe_resolve_infinitepush_bookmarks(bundle2)
@ -137,7 +140,7 @@ fn resolve_push(
}
})
.and_then({
let resolver = resolver.clone();
cloned!(resolver);
move |(changegroup_id, bookmark_push, bundle2)| {
resolver
.ensure_stream_finished(bundle2)
@ -145,18 +148,18 @@ fn resolve_push(
}
})
.and_then({
let resolver = resolver.clone();
cloned!(resolver);
move |(changegroup_id, bookmarks_push)| {
let bookmarks_push_fut = bookmarks_push
.into_iter()
.map(|bp| BonsaiBookmarkPush::new(&resolver.repo, bp))
.map(|bp| BonsaiBookmarkPush::new(ctx.clone(), &resolver.repo, bp))
.collect::<Vec<_>>();
future::join_all(bookmarks_push_fut)
.map(move |bookmakrs_push| (changegroup_id, bookmakrs_push))
}
})
.and_then({
let resolver = resolver.clone();
cloned!(resolver);
move |(changegroup_id, bookmark_push)| {
(move || {
let bookmark_ids: Vec<_> = bookmark_push.iter().map(|bp| bp.part_id).collect();
@ -189,6 +192,7 @@ fn resolve_push(
}
fn resolve_pushrebase(
ctx: CoreContext,
commonheads: CommonHeads,
resolver: Bundle2Resolver,
bundle2: BoxStream<Bundle2Item, Error>,
@ -231,11 +235,11 @@ fn resolve_pushrebase(
}
})
.and_then({
cloned!(resolver);
cloned!(ctx, resolver);
move |(onto, cg_push, manifests, maybe_pushvars, bundle2)| {
let changesets = cg_push.changesets.clone();
resolver
.upload_changesets(cg_push, manifests)
.upload_changesets(ctx, cg_push, manifests)
.map(move |()| (changesets, onto, maybe_pushvars, bundle2))
}
})
@ -263,7 +267,7 @@ fn resolve_pushrebase(
}
})
.and_then({
cloned!(resolver);
cloned!(ctx, resolver);
move |(changesets, bookmark_pushes, maybe_pushvars, onto)| {
resolver
.run_hooks(changesets.clone(), maybe_pushvars, &onto)
@ -292,16 +296,13 @@ fn resolve_pushrebase(
})
.and_then(move |()| {
resolver
.pushrebase(changesets.clone(), bookmark_pushes, &onto)
.pushrebase(ctx, changesets.clone(), bookmark_pushes, &onto)
.map(|pushrebased_rev| (pushrebased_rev, onto))
})
}
})
.and_then({
cloned!(resolver);
move |(pushrebased_rev, onto)| {
resolver.prepare_pushrebase_response(commonheads, pushrebased_rev, onto, lca_hint)
}
.and_then(move |(pushrebased_rev, onto)| {
resolver.prepare_pushrebase_response(ctx, commonheads, pushrebased_rev, onto, lca_hint)
})
.boxify()
}
@ -346,16 +347,18 @@ struct BonsaiBookmarkPush {
impl BonsaiBookmarkPush {
fn new(
ctx: CoreContext,
repo: &Arc<BlobRepo>,
bookmark_push: BookmarkPush,
) -> impl Future<Item = BonsaiBookmarkPush, Error = Error> + Send {
fn bonsai_from_hg_opt(
ctx: CoreContext,
repo: &Arc<BlobRepo>,
cs_id: Option<HgChangesetId>,
) -> impl Future<Item = Option<ChangesetId>, Error = Error> {
match cs_id {
None => future::ok(None).left_future(),
Some(cs_id) => repo.get_bonsai_from_hg(&cs_id).right_future(),
Some(cs_id) => repo.get_bonsai_from_hg(ctx, &cs_id).right_future(),
}
}
@ -366,8 +369,10 @@ impl BonsaiBookmarkPush {
new,
} = bookmark_push;
(bonsai_from_hg_opt(repo, old), bonsai_from_hg_opt(repo, new))
.into_future()
(
bonsai_from_hg_opt(ctx.clone(), repo, old),
bonsai_from_hg_opt(ctx, repo, new),
).into_future()
.map(move |(old, new)| BonsaiBookmarkPush {
part_id,
name,
@ -640,6 +645,7 @@ impl Bundle2Resolver {
/// that the changesets were uploaded
fn upload_changesets(
&self,
ctx: CoreContext,
cg_push: ChangegroupPush,
manifests: Manifests,
) -> BoxFuture<(), Error> {
@ -660,6 +666,7 @@ impl Bundle2Resolver {
STATS::content_blobs_count.add_value(content_blobs.len() as i64);
fn upload_changeset(
ctx: CoreContext,
repo: Arc<BlobRepo>,
scuba_logger: ScubaSampleBuilder,
node: HgNodeHash,
@ -671,8 +678,8 @@ impl Bundle2Resolver {
) -> BoxFuture<UploadedChangesets, Error> {
let (p1, p2) = {
(
get_parent(&repo, &uploaded_changesets, revlog_cs.p1),
get_parent(&repo, &uploaded_changesets, revlog_cs.p2),
get_parent(ctx.clone(), &repo, &uploaded_changesets, revlog_cs.p1),
get_parent(ctx.clone(), &repo, &uploaded_changesets, revlog_cs.p2),
)
};
let NewBlobs {
@ -708,7 +715,7 @@ impl Bundle2Resolver {
cs_metadata,
must_check_case_conflicts: true,
};
let scheduled_uploading = create_changeset.create(&repo, scuba_logger);
let scheduled_uploading = create_changeset.create(ctx, &repo, scuba_logger);
uploaded_changesets.insert(node, scheduled_uploading);
Ok(uploaded_changesets)
@ -731,6 +738,7 @@ impl Bundle2Resolver {
HashMap::new(),
move |uploaded_changesets, (node, revlog_cs)| {
upload_changeset(
ctx.clone(),
repo.clone(),
scuba_logger.clone(),
node.clone(),
@ -800,6 +808,7 @@ impl Bundle2Resolver {
fn prepare_pushrebase_response(
&self,
ctx: CoreContext,
commonheads: CommonHeads,
pushrebased_rev: ChangesetId,
onto: Bookmark,
@ -810,9 +819,9 @@ impl Bundle2Resolver {
// suddenly moved before current pushrebase finished.
let repo: BlobRepo = (*self.repo).clone();
let common = commonheads.heads;
let maybe_onto_head = repo.get_bookmark(&onto);
let maybe_onto_head = repo.get_bookmark(ctx.clone(), &onto);
let pushrebased_rev = repo.get_hg_from_bonsai_changeset(pushrebased_rev);
let pushrebased_rev = repo.get_hg_from_bonsai_changeset(ctx.clone(), pushrebased_rev);
let mut scuba_logger = self.scuba_logger.clone();
maybe_onto_head
@ -823,7 +832,7 @@ impl Bundle2Resolver {
heads.push(onto_head);
}
heads.push(pushrebased_rev);
getbundle_response::create_getbundle_response(repo, common, heads, lca_hint)
getbundle_response::create_getbundle_response(ctx, repo, common, heads, lca_hint)
})
.and_then(|cg_part_builder| {
let compression = None;
@ -887,6 +896,7 @@ impl Bundle2Resolver {
fn pushrebase(
&self,
ctx: CoreContext,
changesets: Changesets,
bookmark_pushes: Vec<BookmarkPush>,
onto_bookmark: &Bookmark,
@ -909,6 +919,7 @@ impl Bundle2Resolver {
}
pushrebase::do_pushrebase(
ctx,
self.repo.clone(),
self.pushrebase.clone(),
onto_bookmark.clone(),
@ -1021,6 +1032,7 @@ fn add_bookmark_to_transaction(
/// Retrieves the parent from uploaded changesets, if it is missing then fetches it from BlobRepo
fn get_parent(
ctx: CoreContext,
repo: &BlobRepo,
map: &UploadedChangesets,
p: Option<HgNodeHash>,
@ -1029,6 +1041,7 @@ fn get_parent(
None => None,
Some(p) => match map.get(&p) {
None => Some(ChangesetHandle::ready_cs_handle(
ctx,
Arc::new(repo.clone()),
HgChangesetId::new(p),
)),

View File

@ -13,6 +13,7 @@ extern crate futures_ext;
extern crate slog;
extern crate blobrepo;
extern crate context;
extern crate mercurial_types;
extern crate metaconfig;
extern crate revset;
@ -21,6 +22,7 @@ use std::sync::Arc;
use blobrepo::BlobRepo;
use bookmarks::Bookmark;
use context::CoreContext;
use futures::{Future, IntoFuture, Stream};
use futures_ext::{spawn_future, BoxFuture, FutureExt};
use mercurial_types::{Changeset, HgChangesetId, MPath, RepoPath};
@ -94,6 +96,7 @@ fn blobstore_and_filenodes_warmup(
// Iterate over first parents, and fetch them
fn changesets_warmup(
ctx: CoreContext,
start_rev: HgChangesetId,
repo: Arc<BlobRepo>,
cs_limit: usize,
@ -101,7 +104,7 @@ fn changesets_warmup(
) -> impl Future<Item = (), Error = Error> {
info!(logger, "about to start warming up changesets cache");
repo.get_bonsai_from_hg(&start_rev)
repo.get_bonsai_from_hg(ctx, &start_rev)
.and_then({
let start_rev = start_rev.clone();
move |maybe_node| {
@ -119,12 +122,13 @@ fn changesets_warmup(
}
fn do_cache_warmup(
ctx: CoreContext,
repo: Arc<BlobRepo>,
bookmark: Bookmark,
commit_limit: usize,
logger: Logger,
) -> BoxFuture<(), Error> {
repo.get_bookmark(&bookmark)
repo.get_bookmark(ctx.clone(), &bookmark)
.and_then({
let logger = logger.clone();
let repo = repo.clone();
@ -135,8 +139,13 @@ fn do_cache_warmup(
bookmark_rev,
logger.clone(),
));
let cs_warmup =
spawn_future(changesets_warmup(bookmark_rev, repo, commit_limit, logger));
let cs_warmup = spawn_future(changesets_warmup(
ctx,
bookmark_rev,
repo,
commit_limit,
logger,
));
blobstore_warmup.join(cs_warmup).map(|_| ()).boxify()
}
None => {
@ -157,12 +166,14 @@ fn do_cache_warmup(
/// Fetch all manifest entries for a bookmark, and fetches up to `commit_warmup_limit`
/// ancestors of the bookmark.
pub fn cache_warmup(
ctx: CoreContext,
repo: Arc<BlobRepo>,
cache_warmup: Option<CacheWarmupParams>,
logger: Logger,
) -> BoxFuture<(), Error> {
match cache_warmup {
Some(cache_warmup) => do_cache_warmup(
ctx,
repo,
cache_warmup.bookmark,
cache_warmup.commit_limit,

View File

@ -16,6 +16,7 @@ use slog::Logger;
use blobrepo::BlobRepo;
use bookmarks::Bookmark;
use context::CoreContext;
use mercurial::RevlogRepo;
use mercurial_types::HgChangesetId;
@ -39,6 +40,7 @@ pub fn read_bookmarks(revlogrepo: RevlogRepo) -> BoxFuture<Vec<(Vec<u8>, HgChang
}
pub fn upload_bookmarks(
ctx: CoreContext,
logger: &Logger,
revlogrepo: RevlogRepo,
blobrepo: Arc<BlobRepo>,
@ -53,9 +55,9 @@ pub fn upload_bookmarks(
move |bookmarks| {
stream::futures_unordered(bookmarks.into_iter().map(|(key, cs_id)| {
blobrepo
.changeset_exists(&cs_id)
.changeset_exists(ctx.clone(), &cs_id)
.and_then({
cloned!(logger, key, blobrepo, stale_bookmarks);
cloned!(ctx, logger, key, blobrepo, stale_bookmarks);
move |exists| {
match (exists, stale_bookmarks.get(&key).cloned()) {
(false, Some(stale_cs_id)) => {
@ -70,7 +72,7 @@ pub fn upload_bookmarks(
);
blobrepo
.changeset_exists(&stale_cs_id)
.changeset_exists(ctx, &stale_cs_id)
.map(move |exists| (key, stale_cs_id, exists))
.boxify()
}
@ -78,10 +80,10 @@ pub fn upload_bookmarks(
}
}})
.and_then({
cloned!(blobrepo, logger);
cloned!(ctx, blobrepo, logger);
move |(key, cs_id, exists)| {
if exists {
blobrepo.get_bonsai_from_hg(&cs_id)
blobrepo.get_bonsai_from_hg(ctx, &cs_id)
.and_then(move |bcs_id| bcs_id.ok_or(err_msg(
format!("failed to resolve hg to bonsai: {}", cs_id),
)))

View File

@ -8,6 +8,7 @@ use std::collections::HashMap;
use std::sync::Arc;
use bytes::Bytes;
use context::CoreContext;
use failure::err_msg;
use failure::prelude::*;
use futures::{Future, IntoFuture};
@ -203,6 +204,7 @@ fn upload_entry(
}
pub struct UploadChangesets {
pub ctx: CoreContext,
pub blobrepo: Arc<BlobRepo>,
pub revlogrepo: RevlogRepo,
pub changeset: Option<HgNodeHash>,
@ -213,6 +215,7 @@ pub struct UploadChangesets {
impl UploadChangesets {
pub fn upload(self) -> BoxStream<SharedItem<(BonsaiChangeset, HgBlobChangeset)>, Error> {
let Self {
ctx,
blobrepo,
revlogrepo,
changeset,
@ -302,8 +305,8 @@ impl UploadChangesets {
let hg_cs_id = HgChangesetId::new(p);
maybe_handle.unwrap_or_else({
cloned!(blobrepo);
move || ChangesetHandle::ready_cs_handle(blobrepo, hg_cs_id)
cloned!(ctx, blobrepo);
move || ChangesetHandle::ready_cs_handle(ctx, blobrepo, hg_cs_id)
})
}
});
@ -331,7 +334,7 @@ impl UploadChangesets {
must_check_case_conflicts: false,
};
let cshandle =
create_changeset.create(&blobrepo, ScubaSampleBuilder::with_discard());
create_changeset.create(ctx.clone(), &blobrepo, ScubaSampleBuilder::with_discard());
parent_changeset_handles.insert(csid, cshandle.clone());
oneshot::spawn(cshandle
.get_completed_changeset()

View File

@ -18,12 +18,14 @@ use futures_ext::{BoxFuture, FutureExt, StreamExt};
use slog::Logger;
use blobrepo::BlobRepo;
use context::CoreContext;
use mercurial::RevlogRepo;
use mercurial_types::HgNodeHash;
use self::changeset::UploadChangesets;
pub struct Blobimport {
pub ctx: CoreContext,
pub logger: Logger,
pub blobrepo: Arc<BlobRepo>,
pub revlogrepo_path: PathBuf,
@ -36,6 +38,7 @@ pub struct Blobimport {
impl Blobimport {
pub fn import(self) -> BoxFuture<(), Error> {
let Self {
ctx,
logger,
blobrepo,
revlogrepo_path,
@ -53,6 +56,7 @@ impl Blobimport {
let revlogrepo = RevlogRepo::open(revlogrepo_path).expect("cannot open revlogrepo");
let upload_changesets = UploadChangesets {
ctx: ctx.clone(),
blobrepo: blobrepo.clone(),
revlogrepo: revlogrepo.clone(),
changeset,
@ -102,7 +106,7 @@ impl Blobimport {
);
future::ok(()).boxify()
} else {
bookmark::upload_bookmarks(&logger, revlogrepo, blobrepo, stale_bookmarks)
bookmark::upload_bookmarks(ctx, &logger, revlogrepo, blobrepo, stale_bookmarks)
}
})
.boxify()

View File

@ -30,6 +30,7 @@ extern crate slog_glog_fmt;
extern crate blobrepo;
extern crate bookmarks;
extern crate changesets;
extern crate context;
extern crate hooks;
extern crate mercurial;
extern crate mercurial_types;

View File

@ -13,6 +13,7 @@ use slog::Logger;
use blobrepo::BlobRepo;
use bookmarks::Bookmark;
use context::CoreContext;
const SET_CMD: &'static str = "set";
const GET_CMD: &'static str = "get";
@ -52,13 +53,14 @@ pub fn prepare_command<'a, 'b>(app: App<'a, 'b>) -> App<'a, 'b> {
}
pub fn handle_command<'a>(
ctx: CoreContext,
repo: &BlobRepo,
matches: &ArgMatches<'a>,
logger: Logger,
) -> BoxFuture<(), Error> {
match matches.subcommand() {
(GET_CMD, Some(sub_m)) => handle_get(sub_m, logger, repo.clone()),
(SET_CMD, Some(sub_m)) => handle_set(sub_m, logger, repo.clone()),
(GET_CMD, Some(sub_m)) => handle_get(sub_m, ctx, logger, repo.clone()),
(SET_CMD, Some(sub_m)) => handle_set(sub_m, ctx, logger, repo.clone()),
_ => {
println!("{}", matches.usage());
::std::process::exit(1);
@ -78,14 +80,19 @@ fn format_output(json_flag: bool, changeset_id: String, changeset_type: &str) ->
}
}
fn handle_get<'a>(args: &ArgMatches<'a>, _logger: Logger, repo: BlobRepo) -> BoxFuture<(), Error> {
fn handle_get<'a>(
args: &ArgMatches<'a>,
ctx: CoreContext,
_logger: Logger,
repo: BlobRepo,
) -> BoxFuture<(), Error> {
let bookmark_name = args.value_of("BOOKMARK_NAME").unwrap().to_string();
let bookmark = Bookmark::new(bookmark_name).unwrap();
let changeset_type = args.value_of("changeset-type").unwrap_or("hg");
let json_flag: bool = args.is_present("json");
match changeset_type {
"hg" => repo.get_bookmark(&bookmark)
"hg" => repo.get_bookmark(ctx, &bookmark)
.and_then(move |cs| {
let changeset_id_str = cs.expect("bookmark could not be found").to_string();
let output = format_output(json_flag, changeset_id_str, "hg");
@ -96,7 +103,7 @@ fn handle_get<'a>(args: &ArgMatches<'a>, _logger: Logger, repo: BlobRepo) -> Box
"bonsai" => {
cloned!(bookmark);
::fetch_bonsai_changeset(bookmark.to_string().as_str(), &repo)
::fetch_bonsai_changeset(ctx, bookmark.to_string().as_str(), &repo)
.and_then(move |bonsai_cs| {
let changeset_id_str = bonsai_cs.get_changeset_id().to_string();
let output = format_output(json_flag, changeset_id_str, "bonsai");
@ -110,12 +117,17 @@ fn handle_get<'a>(args: &ArgMatches<'a>, _logger: Logger, repo: BlobRepo) -> Box
}
}
fn handle_set<'a>(args: &ArgMatches<'a>, _logger: Logger, repo: BlobRepo) -> BoxFuture<(), Error> {
fn handle_set<'a>(
args: &ArgMatches<'a>,
ctx: CoreContext,
_logger: Logger,
repo: BlobRepo,
) -> BoxFuture<(), Error> {
let bookmark_name = args.value_of("BOOKMARK_NAME").unwrap().to_string();
let rev = args.value_of("HG_CHANGESET_ID").unwrap();
let bookmark = Bookmark::new(bookmark_name).unwrap();
::fetch_bonsai_changeset(rev, &repo)
::fetch_bonsai_changeset(ctx, rev, &repo)
.and_then(move |bonsai_cs| {
let mut transaction = repo.update_bookmark_transaction();
try_boxfuture!(transaction.force_set(&bookmark, &bonsai_cs.get_changeset_id()));

View File

@ -12,6 +12,7 @@ use std::process::{Command, ExitStatus};
use std::sync::Arc;
use clap::{App, Arg, ArgMatches, SubCommand};
use context::CoreContext;
use failure::{Error, Result};
use futures::prelude::*;
use futures_ext::{BoxFuture, FutureExt};
@ -134,11 +135,15 @@ impl<'a, 'b> AppExt for App<'a, 'b> {
}
}
pub fn handle_command<'a>(matches: &ArgMatches<'a>, logger: Logger) -> BoxFuture<(), Error> {
pub fn handle_command<'a>(
matches: &ArgMatches<'a>,
ctx: CoreContext,
logger: Logger,
) -> BoxFuture<(), Error> {
match matches.subcommand() {
(CLONE_CMD, Some(sub_m)) => handle_clone(sub_m, logger),
(IMPORT_CMD, Some(sub_m)) => handle_import(sub_m, logger),
(FBPKG_CMD, Some(sub_m)) => handle_fbpkg(sub_m, logger),
(IMPORT_CMD, Some(sub_m)) => handle_import(sub_m, ctx, logger),
(FBPKG_CMD, Some(sub_m)) => handle_fbpkg(sub_m, ctx, logger),
_ => {
println!("{}", matches.usage());
::std::process::exit(1);
@ -171,7 +176,11 @@ fn handle_clone<'a>(args: &ArgMatches<'a>, logger: Logger) -> BoxFuture<(), Erro
clone(dest)
}
fn handle_import<'a>(args: &ArgMatches<'a>, logger: Logger) -> BoxFuture<(), Error> {
fn handle_import<'a>(
args: &ArgMatches<'a>,
ctx: CoreContext,
logger: Logger,
) -> BoxFuture<(), Error> {
let interactive = args.is_present("interactive");
let dest = {
let default = try_boxfuture!(data_dir()).join(IMPORT_DFLT_DIR);
@ -208,10 +217,14 @@ fn handle_import<'a>(args: &ArgMatches<'a>, logger: Logger) -> BoxFuture<(), Err
info!(logger, "Using {} as source for importing", src.display());
try_boxfuture!(fs::create_dir_all(&dest));
import(logger, src, dest)
import(ctx, logger, src, dest)
}
fn handle_fbpkg<'a>(args: &ArgMatches<'a>, logger: Logger) -> BoxFuture<(), Error> {
fn handle_fbpkg<'a>(
args: &ArgMatches<'a>,
ctx: CoreContext,
logger: Logger,
) -> BoxFuture<(), Error> {
let interactive = args.is_present("interactive");
let ephemeral = args.is_present("ephemeral");
let non_forward = args.is_present("non-forward");
@ -240,7 +253,7 @@ fn handle_fbpkg<'a>(args: &ArgMatches<'a>, logger: Logger) -> BoxFuture<(), Erro
})
.and_then({
cloned!(src, import_dir);
move |()| import(logger, src.clone(), import_dir)
move |()| import(ctx, logger, src.clone(), import_dir)
})
.and_then(move |()| {
let mut fbpkg = Command::new("fbpkg");
@ -326,7 +339,7 @@ fn clone(dest: PathBuf) -> BoxFuture<(), Error> {
.boxify()
}
fn import(logger: Logger, src: PathBuf, dest: PathBuf) -> BoxFuture<(), Error> {
fn import(ctx: CoreContext, logger: Logger, src: PathBuf, dest: PathBuf) -> BoxFuture<(), Error> {
try_boxfuture!(setup_repo_dir(&dest, true));
let blobrepo = Arc::new(try_boxfuture!(BlobRepo::new_rocksdb(
logger.new(o!["BlobRepo:Rocksdb" => dest.to_string_lossy().into_owned()]),
@ -337,6 +350,7 @@ fn import(logger: Logger, src: PathBuf, dest: PathBuf) -> BoxFuture<(), Error> {
check_hg_config_repo(src.clone())
.and_then(move |()| {
Blobimport {
ctx,
logger,
blobrepo,
revlogrepo_path: src.join(".hg"),

View File

@ -24,6 +24,7 @@ extern crate blobstore;
extern crate bonsai_utils;
extern crate bookmarks;
extern crate cmdlib;
extern crate context;
#[macro_use]
extern crate futures_ext;
extern crate manifoldblob;
@ -59,6 +60,7 @@ use blobstore::{new_memcache_blobstore, Blobstore, CacheBlobstoreExt, PrefixBlob
use bonsai_utils::{bonsai_diff, BonsaiDiffResult};
use bookmarks::Bookmark;
use cmdlib::args;
use context::CoreContext;
use futures_ext::{BoxFuture, FutureExt};
use manifoldblob::ManifoldBlob;
use mercurial_types::{Changeset, HgChangesetEnvelope, HgChangesetId, HgFileEnvelope,
@ -202,11 +204,15 @@ fn fetch_content_from_manifest(
}
}
fn resolve_hg_rev(repo: &BlobRepo, rev: &str) -> impl Future<Item = HgChangesetId, Error = Error> {
fn resolve_hg_rev(
ctx: CoreContext,
repo: &BlobRepo,
rev: &str,
) -> impl Future<Item = HgChangesetId, Error = Error> {
let book = Bookmark::new(&rev).unwrap();
let hash = HgChangesetId::from_str(rev);
repo.get_bookmark(&book).and_then({
repo.get_bookmark(ctx, &book).and_then({
move |r| match r {
Some(cs) => Ok(cs),
None => hash,
@ -215,13 +221,14 @@ fn resolve_hg_rev(repo: &BlobRepo, rev: &str) -> impl Future<Item = HgChangesetI
}
fn fetch_content(
ctx: CoreContext,
logger: Logger,
repo: &BlobRepo,
rev: &str,
path: &str,
) -> BoxFuture<Content, Error> {
let path = try_boxfuture!(MPath::new(path));
let resolved_cs_id = resolve_hg_rev(repo, rev);
let resolved_cs_id = resolve_hg_rev(ctx, repo, rev);
let mf = resolved_cs_id
.and_then({
@ -257,15 +264,16 @@ fn fetch_content(
}
pub fn fetch_bonsai_changeset(
ctx: CoreContext,
rev: &str,
repo: &BlobRepo,
) -> impl Future<Item = BonsaiChangeset, Error = Error> {
let hg_changeset_id = resolve_hg_rev(repo, rev);
let hg_changeset_id = resolve_hg_rev(ctx.clone(), repo, rev);
hg_changeset_id
.and_then({
let repo = repo.clone();
move |hg_cs| repo.get_bonsai_from_hg(&hg_cs)
move |hg_cs| repo.get_bonsai_from_hg(ctx, &hg_cs)
})
.and_then({
let rev = rev.to_string();
@ -571,8 +579,11 @@ fn main() -> Result<()> {
args::init_cachelib(&matches);
// TODO(T37478150, luk) This is not a test case, fix it up in future diffs
let ctx = CoreContext::test_mock();
let repo = args::open_repo(&logger, &matches)?;
fetch_bonsai_changeset(rev, repo.blobrepo())
fetch_bonsai_changeset(ctx, rev, repo.blobrepo())
.map(|bcs| {
println!("{:?}", bcs);
})
@ -584,8 +595,11 @@ fn main() -> Result<()> {
args::init_cachelib(&matches);
// TODO(T37478150, luk) This is not a test case, fix it up in future diffs
let ctx = CoreContext::test_mock();
let repo = args::open_repo(&logger, &matches)?;
fetch_content(logger.clone(), repo.blobrepo(), rev, path)
fetch_content(ctx, logger.clone(), repo.blobrepo(), rev, path)
.and_then(|content| {
match content {
Content::Executable(_) => {
@ -628,12 +642,18 @@ fn main() -> Result<()> {
})
.boxify()
}
(CONFIG_REPO, Some(sub_m)) => config_repo::handle_command(sub_m, logger),
(CONFIG_REPO, Some(sub_m)) => {
let ctx = CoreContext::test_mock();
config_repo::handle_command(sub_m, ctx, logger)
}
(BOOKMARKS, Some(sub_m)) => {
args::init_cachelib(&matches);
let repo = args::open_repo(&logger, &matches)?;
bookmarks_manager::handle_command(&repo.blobrepo(), sub_m, logger)
// TODO(T37478150, luk) This is not a test case, fix it up in future diffs
let ctx = CoreContext::test_mock();
bookmarks_manager::handle_command(ctx, &repo.blobrepo(), sub_m, logger)
}
(HG_CHANGESET, Some(sub_m)) => match sub_m.subcommand() {
(HG_CHANGESET_DIFF, Some(sub_m)) => {
@ -673,15 +693,16 @@ fn main() -> Result<()> {
args::init_cachelib(&matches);
let repo = args::open_repo(&logger, &matches)?.blobrepo().clone();
let ctx = CoreContext::test_mock();
(start_cs, stop_cs)
.into_future()
.and_then({
cloned!(repo);
cloned!(ctx, repo);
move |(start_cs, stop_cs)| {
(
repo.get_bonsai_from_hg(&start_cs),
repo.get_bonsai_from_hg(&stop_cs),
repo.get_bonsai_from_hg(ctx.clone(), &start_cs),
repo.get_bonsai_from_hg(ctx, &stop_cs),
)
}
})
@ -695,7 +716,7 @@ fn main() -> Result<()> {
cloned!(repo);
move |(start_cs, stop_cs)| {
RangeNodeStream::new(&Arc::new(repo.clone()), start_cs, stop_cs)
.map(move |cs| repo.get_hg_from_bonsai_changeset(cs))
.map(move |cs| repo.get_hg_from_bonsai_changeset(ctx.clone(), cs))
.buffered(100)
.map(|cs| cs.to_hex().to_string())
.collect()

View File

@ -8,6 +8,7 @@
extern crate clap;
extern crate cmdlib;
extern crate context;
extern crate failure_ext as failure;
extern crate futures;
extern crate mercurial_types;
@ -19,6 +20,7 @@ use std::str::FromStr;
use std::sync::Arc;
use clap::{App, Arg};
use context::CoreContext;
use failure::{Result, SlogKVError};
use futures::Future;
@ -87,6 +89,8 @@ fn main() -> Result<()> {
let no_bookmark = matches.is_present("no-bookmark");
let blobimport = Blobimport {
// TODO(T37478150, luk) this is not a test use case, but I want to fix it properly later
ctx: CoreContext::test_mock(),
logger: logger.clone(),
blobrepo,
revlogrepo_path,

View File

@ -20,6 +20,7 @@ extern crate futures_ext;
extern crate blobrepo_utils;
extern crate cmdlib;
extern crate context;
extern crate failure_ext;
extern crate mercurial_types;
extern crate mononoke_types;
@ -40,6 +41,7 @@ use futures_ext::FutureExt;
use blobrepo_utils::{BonsaiMFVerify, BonsaiMFVerifyResult};
use cmdlib::args;
use context::CoreContext;
use mercurial_types::HgChangesetId;
use failure_ext::Result;
@ -100,6 +102,9 @@ fn main() -> Result<()> {
// matter much.
let (end_sender, end_receiver) = ::std::sync::mpsc::channel();
// TODO(luk): This is not a test use case, fix it in next diffs
let ctx = CoreContext::test_mock();
// The future::lazy is to ensure that bonsai_verify (which calls tokio::spawn) is called after
// tokio::run, not before.
let verify_fut = future::lazy({
@ -110,6 +115,7 @@ fn main() -> Result<()> {
let ignored = ignored.clone();
move || {
let bonsai_verify = BonsaiMFVerify {
ctx: ctx,
logger: logger.clone(),
repo: repo.blobrepo().clone(),
follow_limit,

View File

@ -17,6 +17,7 @@ extern crate clap;
#[macro_use]
extern crate cloned;
extern crate cmdlib;
extern crate context;
#[macro_use]
extern crate failure_ext as failure;
extern crate futures;
@ -45,6 +46,7 @@ pub mod tailer;
use blobrepo::BlobRepo;
use bookmarks::Bookmark;
use clap::{App, ArgMatches};
use context::CoreContext;
use failure::Error;
use failure::Result;
use futures::Stream;
@ -114,7 +116,11 @@ fn main() -> Result<()> {
let manifold_client = ManifoldHttpClient::new(id, rc)?;
// TODO(T37478150, luk) This is not a test case, will be fixed in later diffs
let ctx = CoreContext::test_mock();
let tailer = Tailer::new(
ctx,
repo_name.to_string(),
// TODO (T32873881): Arc<BlobRepo> should become BlobRepo
Arc::new(blobrepo),
@ -305,6 +311,8 @@ fn setup_logger<'a>(matches: &ArgMatches<'a>, repo_name: String) -> Logger {
fn get_config<'a>(logger: &Logger, matches: &ArgMatches<'a>) -> Result<RepoConfigs> {
let crpath = PathBuf::from(matches.value_of("crpath").unwrap());
// TODO(T37478150, luk) This is not a test case, fix it up in future diffs
let ctx = CoreContext::test_mock();
let config_repo = BlobRepo::new_rocksdb(
logger.new(o!["repo" => "Config repo"]),
&crpath,
@ -317,7 +325,7 @@ fn get_config<'a>(logger: &Logger, matches: &ArgMatches<'a>) -> Result<RepoConfi
Some(book) => {
let book = bookmarks::Bookmark::new(book).expect("book must be ascii");
tokio_runtime
.block_on(config_repo.get_bookmark(&book))?
.block_on(config_repo.get_bookmark(ctx, &book))?
.expect("bookmark not found")
}
None => mercurial_types::nodehash::HgChangesetId::from_str(

View File

@ -9,6 +9,7 @@
use super::HookResults;
use blobrepo::BlobRepo;
use bookmarks::Bookmark;
use context::CoreContext;
use failure::Error;
use failure::Result;
use futures::{Future, Stream};
@ -23,6 +24,7 @@ use slog::Logger;
use std::sync::Arc;
pub struct Tailer {
ctx: CoreContext,
repo: Arc<BlobRepo>,
hook_manager: Arc<HookManager>,
bookmark: Bookmark,
@ -33,6 +35,7 @@ pub struct Tailer {
impl Tailer {
pub fn new(
ctx: CoreContext,
repo_name: String,
repo: Arc<BlobRepo>,
config: RepoConfig,
@ -57,6 +60,7 @@ impl Tailer {
let last_rev_key = format!("{}{}", "__mononoke_hook_tailer_last_rev.", repo_id).to_string();
Ok(Tailer {
ctx,
repo,
hook_manager: Arc::new(hook_manager),
bookmark,
@ -71,6 +75,7 @@ impl Tailer {
}
fn run_in_range0(
ctx: CoreContext,
repo: Arc<BlobRepo>,
hm: Arc<HookManager>,
last_rev: HgNodeHash,
@ -80,14 +85,14 @@ impl Tailer {
) -> BoxFuture<Vec<HookResults>, Error> {
debug!(logger, "Running in range {} to {}", last_rev, end_rev);
cloned!(logger);
nodehash_to_bonsai(&repo, end_rev)
nodehash_to_bonsai(ctx.clone(), &repo, end_rev)
.and_then(move |end_rev| {
AncestorsNodeStream::new(&repo.get_changeset_fetcher(), end_rev)
.take(1000) // Limit number so we don't process too many
.map({
move |cs| {
cloned!(bm, hm, logger, repo);
run_hooks_for_changeset(repo, hm, bm, cs, logger)
cloned!(ctx, bm, hm, logger, repo);
run_hooks_for_changeset(ctx, repo, hm, bm, cs, logger)
}
})
.map(spawn_future)
@ -106,36 +111,43 @@ impl Tailer {
last_rev: HgNodeHash,
end_rev: HgNodeHash,
) -> BoxFuture<Vec<HookResults>, Error> {
let repo = self.repo.clone();
let hm = self.hook_manager.clone();
let bm = self.bookmark.clone();
Tailer::run_in_range0(repo, hm, last_rev, end_rev, bm, self.logger.clone())
cloned!(self.ctx, self.repo, self.hook_manager, self.bookmark);
Tailer::run_in_range0(
ctx,
repo,
hook_manager,
last_rev,
end_rev,
bookmark,
self.logger.clone(),
)
}
pub fn run_with_limit(&self, limit: u64) -> BoxFuture<Vec<HookResults>, Error> {
let ctx = self.ctx.clone();
let bm = self.bookmark.clone();
let hm = self.hook_manager.clone();
let bm_rev = self.repo
.get_bookmark(&bm)
.get_bookmark(ctx.clone(), &bm)
.and_then({
cloned!(bm);
|opt| opt.ok_or(ErrorKind::NoSuchBookmark(bm).into())
})
.and_then({
cloned!(self.repo);
move |bm_rev| nodehash_to_bonsai(&repo, bm_rev.into_nodehash())
move |bm_rev| nodehash_to_bonsai(ctx, &repo, bm_rev.into_nodehash())
});
cloned!(self.repo, self.logger);
cloned!(self.ctx, self.repo, self.logger);
bm_rev
.and_then(move |bm_rev| {
AncestorsNodeStream::new(&repo.get_changeset_fetcher(), bm_rev)
.take(limit)
.map({
move |cs| {
cloned!(bm, hm, logger, repo);
run_hooks_for_changeset(repo, hm, bm, cs, logger)
cloned!(ctx, bm, hm, logger, repo);
run_hooks_for_changeset(ctx, repo, hm, bm, cs, logger)
}
})
.map(spawn_future)
@ -147,6 +159,7 @@ impl Tailer {
}
pub fn run(&self) -> BoxFuture<Vec<HookResults>, Error> {
let ctx = self.ctx.clone();
let bm = self.bookmark.clone();
let bm2 = bm.clone();
let repo = self.repo.clone();
@ -163,7 +176,7 @@ impl Tailer {
let logger3 = logger.clone();
self.repo
.get_bookmark(&bm)
.get_bookmark(ctx.clone(), &bm)
.and_then(|opt| opt.ok_or(ErrorKind::NoSuchBookmark(bm).into()))
.and_then(move |current_bm_cs| {
manifold_client
@ -187,7 +200,7 @@ impl Tailer {
if last_rev == end_rev {
info!(logger, "Nothing to do");
}
Tailer::run_in_range0(repo, hm, last_rev, end_rev, bm2, logger3)
Tailer::run_in_range0(ctx, repo, hm, last_rev, end_rev, bm2, logger3)
.map(move |res| (end_rev, res))
})
.and_then(move |(end_rev, res)| {
@ -200,22 +213,24 @@ impl Tailer {
}
fn nodehash_to_bonsai(
ctx: CoreContext,
repo: &Arc<BlobRepo>,
node: HgNodeHash,
) -> impl Future<Item = ChangesetId, Error = Error> {
let hg_cs = HgChangesetId::new(node);
repo.get_bonsai_from_hg(&hg_cs)
repo.get_bonsai_from_hg(ctx, &hg_cs)
.and_then(move |maybe_node| maybe_node.ok_or(ErrorKind::BonsaiNotFound(hg_cs).into()))
}
fn run_hooks_for_changeset(
ctx: CoreContext,
repo: Arc<BlobRepo>,
hm: Arc<HookManager>,
bm: Bookmark,
cs: ChangesetId,
logger: Logger,
) -> impl Future<Item = (HgChangesetId, HookResults), Error = Error> {
repo.get_hg_from_bonsai_changeset(cs)
repo.get_hg_from_bonsai_changeset(ctx, cs)
.and_then(move |hg_cs| {
debug!(logger, "Running file hooks for changeset {:?}", hg_cs);
hm.run_file_hooks_for_bookmark(hg_cs.clone(), &bm, None)

View File

@ -11,6 +11,7 @@ extern crate cloned;
extern crate blobrepo;
extern crate bookmarks;
extern crate context;
#[macro_use]
extern crate failure_ext as failure;
extern crate futures;
@ -27,6 +28,7 @@ use futures::Future;
use blobrepo::BlobRepo;
use bookmarks::Bookmark;
use context::CoreContext;
use mercurial_types::{Changeset, HgChangesetId};
use mercurial_types::manifest::Content;
use mononoke_types::MPath;
@ -52,10 +54,11 @@ pub fn get_content_by_path(
}
pub fn get_changeset_by_bookmark(
ctx: CoreContext,
repo: Arc<BlobRepo>,
bookmark: Bookmark,
) -> impl Future<Item = HgChangesetId, Error = Error> {
repo.get_bookmark(&bookmark)
repo.get_bookmark(ctx, &bookmark)
.map_err({
cloned!(bookmark);
move |_| ErrorKind::InvalidInput(bookmark.to_string()).into()

View File

@ -111,6 +111,7 @@ mod test {
use std::sync::Arc;
use async_unit;
use context::CoreContext;
use fixtures::linear;
use futures::Future;
use mononoke_types::Generation;
@ -121,16 +122,49 @@ mod test {
#[test]
fn test_helpers() {
async_unit::tokio_unit_test(move || {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
let mut ordered_hashes_oldest_to_newest = vec![
string_to_bonsai(&repo, "a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157"),
string_to_bonsai(&repo, "0ed509bf086fadcb8a8a5384dc3b550729b0fc17"),
string_to_bonsai(&repo, "eed3a8c0ec67b6a6fe2eb3543334df3f0b4f202b"),
string_to_bonsai(&repo, "cb15ca4a43a59acff5388cea9648c162afde8372"),
string_to_bonsai(&repo, "d0a361e9022d226ae52f689667bd7d212a19cfe0"),
string_to_bonsai(&repo, "607314ef579bd2407752361ba1b0c1729d08b281"),
string_to_bonsai(&repo, "3e0e761030db6e479a7fb58b12881883f9f8c63f"),
string_to_bonsai(&repo, "2d7d4ba9ce0a6ffd222de7785b249ead9c51c536"),
string_to_bonsai(
ctx.clone(),
&repo,
"a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157",
),
string_to_bonsai(
ctx.clone(),
&repo,
"0ed509bf086fadcb8a8a5384dc3b550729b0fc17",
),
string_to_bonsai(
ctx.clone(),
&repo,
"eed3a8c0ec67b6a6fe2eb3543334df3f0b4f202b",
),
string_to_bonsai(
ctx.clone(),
&repo,
"cb15ca4a43a59acff5388cea9648c162afde8372",
),
string_to_bonsai(
ctx.clone(),
&repo,
"d0a361e9022d226ae52f689667bd7d212a19cfe0",
),
string_to_bonsai(
ctx.clone(),
&repo,
"607314ef579bd2407752361ba1b0c1729d08b281",
),
string_to_bonsai(
ctx.clone(),
&repo,
"3e0e761030db6e479a7fb58b12881883f9f8c63f",
),
string_to_bonsai(
ctx.clone(),
&repo,
"2d7d4ba9ce0a6ffd222de7785b249ead9c51c536",
),
];
ordered_hashes_oldest_to_newest.reverse();

View File

@ -16,6 +16,8 @@ extern crate futures_ext;
extern crate blobrepo;
#[cfg(test)]
extern crate context;
#[cfg(test)]
#[macro_use]
extern crate maplit;
extern crate mercurial_types;

File diff suppressed because it is too large Load Diff

View File

@ -9,6 +9,7 @@ use blobrepo::BlobRepo;
use futures::future::Future;
use std::sync::Arc;
use context::CoreContext;
use mercurial_types::{HgChangesetId, HgNodeHash};
use mononoke_types::ChangesetId;
@ -22,9 +23,9 @@ pub fn string_to_nodehash(hash: &'static str) -> HgNodeHash {
use index::ReachabilityIndex;
pub fn string_to_bonsai(repo: &Arc<BlobRepo>, s: &'static str) -> ChangesetId {
pub fn string_to_bonsai(ctx: CoreContext, repo: &Arc<BlobRepo>, s: &'static str) -> ChangesetId {
let node = string_to_nodehash(s);
repo.get_bonsai_from_hg(&HgChangesetId::new(node))
repo.get_bonsai_from_hg(ctx, &HgChangesetId::new(node))
.wait()
.unwrap()
.unwrap()
@ -32,26 +33,61 @@ pub fn string_to_bonsai(repo: &Arc<BlobRepo>, s: &'static str) -> ChangesetId {
pub fn test_linear_reachability<T: ReachabilityIndex + 'static>(index_creator: fn() -> T) {
async_unit::tokio_unit_test(move || {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
let index = index_creator();
let ordered_hashes = vec![
string_to_bonsai(&repo, "a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157"),
string_to_bonsai(&repo, "0ed509bf086fadcb8a8a5384dc3b550729b0fc17"),
string_to_bonsai(&repo, "eed3a8c0ec67b6a6fe2eb3543334df3f0b4f202b"),
string_to_bonsai(&repo, "cb15ca4a43a59acff5388cea9648c162afde8372"),
string_to_bonsai(&repo, "d0a361e9022d226ae52f689667bd7d212a19cfe0"),
string_to_bonsai(&repo, "607314ef579bd2407752361ba1b0c1729d08b281"),
string_to_bonsai(&repo, "3e0e761030db6e479a7fb58b12881883f9f8c63f"),
string_to_bonsai(&repo, "2d7d4ba9ce0a6ffd222de7785b249ead9c51c536"),
string_to_bonsai(
ctx.clone(),
&repo,
"a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157",
),
string_to_bonsai(
ctx.clone(),
&repo,
"0ed509bf086fadcb8a8a5384dc3b550729b0fc17",
),
string_to_bonsai(
ctx.clone(),
&repo,
"eed3a8c0ec67b6a6fe2eb3543334df3f0b4f202b",
),
string_to_bonsai(
ctx.clone(),
&repo,
"cb15ca4a43a59acff5388cea9648c162afde8372",
),
string_to_bonsai(
ctx.clone(),
&repo,
"d0a361e9022d226ae52f689667bd7d212a19cfe0",
),
string_to_bonsai(
ctx.clone(),
&repo,
"607314ef579bd2407752361ba1b0c1729d08b281",
),
string_to_bonsai(
ctx.clone(),
&repo,
"3e0e761030db6e479a7fb58b12881883f9f8c63f",
),
string_to_bonsai(
ctx.clone(),
&repo,
"2d7d4ba9ce0a6ffd222de7785b249ead9c51c536",
),
];
for i in 0..ordered_hashes.len() {
for j in i..ordered_hashes.len() {
let src = ordered_hashes.get(i).unwrap();
let dst = ordered_hashes.get(j).unwrap();
let future_result_src_to_dst = index.query_reachability(repo.get_changeset_fetcher(), *src, *dst);
let future_result_src_to_dst =
index.query_reachability(repo.get_changeset_fetcher(), *src, *dst);
assert!(future_result_src_to_dst.wait().unwrap());
let future_result_dst_to_src = index.query_reachability(repo.get_changeset_fetcher(), *dst, *src);
let future_result_dst_to_src =
index.query_reachability(repo.get_changeset_fetcher(), *dst, *src);
assert_eq!(future_result_dst_to_src.wait().unwrap(), src == dst);
}
}
@ -60,30 +96,83 @@ pub fn test_linear_reachability<T: ReachabilityIndex + 'static>(index_creator: f
pub fn test_merge_uneven_reachability<T: ReachabilityIndex + 'static>(index_creator: fn() -> T) {
async_unit::tokio_unit_test(move || {
let ctx = CoreContext::test_mock();
let repo = Arc::new(merge_uneven::getrepo(None));
let index = index_creator();
let root_node = string_to_bonsai(&repo, "15c40d0abc36d47fb51c8eaec51ac7aad31f669c");
let root_node = string_to_bonsai(
ctx.clone(),
&repo,
"15c40d0abc36d47fb51c8eaec51ac7aad31f669c",
);
// order is oldest to newest
let branch_1 = vec![
string_to_bonsai(&repo, "3cda5c78aa35f0f5b09780d971197b51cad4613a"),
string_to_bonsai(&repo, "1d8a907f7b4bf50c6a09c16361e2205047ecc5e5"),
string_to_bonsai(&repo, "16839021e338500b3cf7c9b871c8a07351697d68"),
string_to_bonsai(
ctx.clone(),
&repo,
"3cda5c78aa35f0f5b09780d971197b51cad4613a",
),
string_to_bonsai(
ctx.clone(),
&repo,
"1d8a907f7b4bf50c6a09c16361e2205047ecc5e5",
),
string_to_bonsai(
ctx.clone(),
&repo,
"16839021e338500b3cf7c9b871c8a07351697d68",
),
];
// order is oldest to newest
let branch_2 = vec![
string_to_bonsai(&repo, "d7542c9db7f4c77dab4b315edd328edf1514952f"),
string_to_bonsai(&repo, "b65231269f651cfe784fd1d97ef02a049a37b8a0"),
string_to_bonsai(&repo, "4f7f3fd428bec1a48f9314414b063c706d9c1aed"),
string_to_bonsai(&repo, "795b8133cf375f6d68d27c6c23db24cd5d0cd00f"),
string_to_bonsai(&repo, "bc7b4d0f858c19e2474b03e442b8495fd7aeef33"),
string_to_bonsai(&repo, "fc2cef43395ff3a7b28159007f63d6529d2f41ca"),
string_to_bonsai(&repo, "5d43888a3c972fe68c224f93d41b30e9f888df7c"),
string_to_bonsai(&repo, "264f01429683b3dd8042cb3979e8bf37007118bc"),
string_to_bonsai(
ctx.clone(),
&repo,
"d7542c9db7f4c77dab4b315edd328edf1514952f",
),
string_to_bonsai(
ctx.clone(),
&repo,
"b65231269f651cfe784fd1d97ef02a049a37b8a0",
),
string_to_bonsai(
ctx.clone(),
&repo,
"4f7f3fd428bec1a48f9314414b063c706d9c1aed",
),
string_to_bonsai(
ctx.clone(),
&repo,
"795b8133cf375f6d68d27c6c23db24cd5d0cd00f",
),
string_to_bonsai(
ctx.clone(),
&repo,
"bc7b4d0f858c19e2474b03e442b8495fd7aeef33",
),
string_to_bonsai(
ctx.clone(),
&repo,
"fc2cef43395ff3a7b28159007f63d6529d2f41ca",
),
string_to_bonsai(
ctx.clone(),
&repo,
"5d43888a3c972fe68c224f93d41b30e9f888df7c",
),
string_to_bonsai(
ctx.clone(),
&repo,
"264f01429683b3dd8042cb3979e8bf37007118bc",
),
];
let _merge_node = string_to_bonsai(&repo, "6d0c1c30df4acb4e64cb4c4868d4c974097da055");
let _merge_node = string_to_bonsai(
ctx.clone(),
&repo,
"6d0c1c30df4acb4e64cb4c4868d4c974097da055",
);
for left_node in branch_1.into_iter() {
for right_node in branch_2.iter() {
@ -114,17 +203,46 @@ pub fn test_merge_uneven_reachability<T: ReachabilityIndex + 'static>(index_crea
pub fn test_branch_wide_reachability<T: ReachabilityIndex + 'static>(index_creator: fn() -> T) {
async_unit::tokio_unit_test(move || {
let ctx = CoreContext::test_mock();
// this repo has no merges but many branches
let repo = Arc::new(branch_wide::getrepo(None));
let index = index_creator();
let root_node = string_to_bonsai(&repo, "ecba698fee57eeeef88ac3dcc3b623ede4af47bd");
let root_node = string_to_bonsai(
ctx.clone(),
&repo,
"ecba698fee57eeeef88ac3dcc3b623ede4af47bd",
);
let b1 = string_to_bonsai(&repo, "9e8521affb7f9d10e9551a99c526e69909042b20");
let b2 = string_to_bonsai(&repo, "4685e9e62e4885d477ead6964a7600c750e39b03");
let b1_1 = string_to_bonsai(&repo, "b6a8169454af58b4b72b3665f9aa0d25529755ff");
let b1_2 = string_to_bonsai(&repo, "c27ef5b7f15e9930e5b93b1f32cc2108a2aabe12");
let b2_1 = string_to_bonsai(&repo, "04decbb0d1a65789728250ddea2fe8d00248e01c");
let b2_2 = string_to_bonsai(&repo, "49f53ab171171b3180e125b918bd1cf0af7e5449");
let b1 = string_to_bonsai(
ctx.clone(),
&repo,
"9e8521affb7f9d10e9551a99c526e69909042b20",
);
let b2 = string_to_bonsai(
ctx.clone(),
&repo,
"4685e9e62e4885d477ead6964a7600c750e39b03",
);
let b1_1 = string_to_bonsai(
ctx.clone(),
&repo,
"b6a8169454af58b4b72b3665f9aa0d25529755ff",
);
let b1_2 = string_to_bonsai(
ctx.clone(),
&repo,
"c27ef5b7f15e9930e5b93b1f32cc2108a2aabe12",
);
let b2_1 = string_to_bonsai(
ctx.clone(),
&repo,
"04decbb0d1a65789728250ddea2fe8d00248e01c",
);
let b2_2 = string_to_bonsai(
ctx.clone(),
&repo,
"49f53ab171171b3180e125b918bd1cf0af7e5449",
);
// all nodes can reach the root
for above_root in vec![b1, b2, b1_1, b1_2, b2_1, b2_2].iter() {

View File

@ -22,7 +22,6 @@ use itertools::Itertools;
use slog::Logger;
use stats::Histogram;
use time_ext::DurationExt;
use uuid::Uuid;
use blobrepo::HgBlobChangeset;
use bookmarks::Bookmark;
@ -163,7 +162,7 @@ fn bundle2caps() -> String {
#[derive(Clone)]
pub struct RepoClient {
repo: MononokeRepo,
ctxt: CoreContext<Uuid>,
ctx: CoreContext,
// Percent of returned entries (filelogs, manifests, changesets) which content
// will be hash validated
hash_validation_percentage: usize,
@ -252,28 +251,28 @@ impl WireprotoLogger {
impl RepoClient {
pub fn new(
repo: MononokeRepo,
ctxt: CoreContext<Uuid>,
ctx: CoreContext,
hash_validation_percentage: usize,
lca_hint: Arc<LeastCommonAncestorsHint + Send + Sync>,
) -> Self {
RepoClient {
repo,
ctxt,
ctx,
hash_validation_percentage,
lca_hint,
}
}
fn logger(&self) -> &Logger {
self.ctxt.logger()
self.ctx.logger()
}
fn trace(&self) -> &TraceContext {
self.ctxt.trace()
self.ctx.trace()
}
fn scuba_logger(&self, op: &str, args: Option<String>) -> ScubaSampleBuilder {
let mut scuba_logger = self.ctxt.scuba().clone();
let mut scuba_logger = self.ctx.scuba().clone();
scuba_logger.add("command", op);
@ -289,6 +288,7 @@ impl RepoClient {
let blobrepo = self.repo.blobrepo();
let mut bundle2_parts = vec![];
let cg_part_builder = bundle2_resolver::create_getbundle_response(
self.ctx.clone(),
blobrepo.clone(),
args.common
.into_iter()
@ -308,10 +308,12 @@ impl RepoClient {
// TODO: generalize this to other listkey types
// (note: just calling &b"bookmarks"[..] doesn't work because https://fburl.com/0p0sq6kp)
if args.listkeys.contains(&b"bookmarks".to_vec()) {
let items = blobrepo.get_bookmarks_maybe_stale().map(|(name, cs)| {
let hash: Vec<u8> = cs.into_nodehash().to_hex().into();
(name.to_string(), hash)
});
let items = blobrepo
.get_bookmarks_maybe_stale(self.ctx.clone())
.map(|(name, cs)| {
let hash: Vec<u8> = cs.into_nodehash().to_hex().into();
(name.to_string(), hash)
});
bundle2_parts.push(parts::listkey_part("bookmarks", items)?);
}
// TODO(stash): handle includepattern= and excludepattern=
@ -410,10 +412,10 @@ impl RepoClient {
args: Option<serde_json::Value>,
) -> WireprotoLogger {
WireprotoLogger::new(
self.ctxt.scuba().clone(),
self.ctx.scuba().clone(),
wireproto_command,
args,
self.ctxt.wireproto_scribe_category().clone(),
self.ctx.wireproto_scribe_category().clone(),
self.repo.reponame().clone(),
)
}
@ -509,7 +511,7 @@ impl HgCommands for RepoClient {
self.repo
.blobrepo()
.get_heads_maybe_stale()
.get_heads_maybe_stale(self.ctx.clone())
.collect()
.map(|v| v.into_iter().collect())
.from_err()
@ -543,8 +545,12 @@ impl HgCommands for RepoClient {
buf.freeze()
}
fn check_bookmark_exists(repo: BlobRepo, bookmark: Bookmark) -> HgCommandRes<Bytes> {
repo.get_bookmark(&bookmark)
fn check_bookmark_exists(
ctx: CoreContext,
repo: BlobRepo,
bookmark: Bookmark,
) -> HgCommandRes<Bytes> {
repo.get_bookmark(ctx, &bookmark)
.map(move |csid| match csid {
Some(csid) => generate_resp_buf(true, csid.to_hex().as_bytes()),
None => generate_resp_buf(false, format!("{} not found", bookmark).as_bytes()),
@ -558,19 +564,22 @@ impl HgCommands for RepoClient {
let lookup_fut = match (node, bookmark) {
(Some(node), Some(bookmark)) => {
let csid = HgChangesetId::new(node);
repo.changeset_exists(&csid)
.and_then(move |exists| {
if exists {
Ok(generate_resp_buf(true, node.to_hex().as_bytes()))
.into_future()
.boxify()
} else {
check_bookmark_exists(repo, bookmark)
repo.changeset_exists(self.ctx.clone(), &csid)
.and_then({
cloned!(self.ctx);
move |exists| {
if exists {
Ok(generate_resp_buf(true, node.to_hex().as_bytes()))
.into_future()
.boxify()
} else {
check_bookmark_exists(ctx, repo, bookmark)
}
}
})
.boxify()
}
(None, Some(bookmark)) => check_bookmark_exists(repo, bookmark),
(None, Some(bookmark)) => check_bookmark_exists(self.ctx.clone(), repo, bookmark),
// Failed to parse as a hash or bookmark.
_ => Ok(generate_resp_buf(false, "invalid input".as_bytes()))
.into_future()
@ -604,7 +613,7 @@ impl HgCommands for RepoClient {
({
let ref_nodes: Vec<_> = nodes.iter().collect();
blobrepo.many_changesets_exists(&ref_nodes[..])
blobrepo.many_changesets_exists(self.ctx.clone(), &ref_nodes[..])
}).map(move |cs| {
let cs: HashSet<_> = cs.into_iter().collect();
let known_nodes: Vec<_> = nodes
@ -687,7 +696,7 @@ impl HgCommands for RepoClient {
self.repo
.blobrepo()
.get_bookmarks_maybe_stale()
.get_bookmarks_maybe_stale(self.ctx.clone())
.map(|(name, cs)| {
let hash: Vec<u8> = cs.into_nodehash().to_hex().into();
(name, hash)
@ -731,6 +740,7 @@ impl HgCommands for RepoClient {
let mut scuba_logger = self.scuba_logger(ops::UNBUNDLE, None);
let res = bundle2_resolver::resolve(
self.ctx.clone(),
Arc::new(self.repo.blobrepo().clone()),
self.logger().new(o!("command" => "unbundle")),
scuba_logger.clone(),

View File

@ -37,7 +37,6 @@ extern crate stats;
extern crate time_ext;
#[macro_use]
extern crate tracing;
extern crate uuid;
extern crate blobrepo;
extern crate blobstore;

View File

@ -21,6 +21,7 @@ use futures_ext::StreamExt;
use UniqueHeap;
use blobrepo::{BlobRepo, ChangesetFetcher};
use context::CoreContext;
use mercurial_types::HgNodeHash;
use mercurial_types::nodehash::HgChangesetId;
use mononoke_types::{ChangesetId, Generation};
@ -139,6 +140,7 @@ impl Stream for AncestorsNodeStream {
}
pub fn common_ancestors<I>(
ctx: CoreContext,
repo: &BlobRepo,
changeset_fetcher: Arc<ChangesetFetcher>,
nodes: I,
@ -147,10 +149,10 @@ where
I: IntoIterator<Item = HgNodeHash>,
{
let nodes_iter = nodes.into_iter().map({
let repo = repo.clone();
cloned!(ctx, repo);
move |node| {
let node = HgChangesetId::new(node);
repo.get_bonsai_from_hg(&node)
repo.get_bonsai_from_hg(ctx.clone(), &node)
.and_then({
cloned!(node);
move |maybe_node| {
@ -158,13 +160,13 @@ where
}
})
.map({
cloned!(changeset_fetcher, repo);
cloned!(ctx, changeset_fetcher, repo);
move |node| {
AncestorsNodeStream::new(&changeset_fetcher, node)
.map({
let repo = repo.clone();
cloned!(repo);
move |node| {
repo.get_hg_from_bonsai_changeset(node)
repo.get_hg_from_bonsai_changeset(ctx.clone(), node)
.map(|cs| cs.into_nodehash())
}
})
@ -175,10 +177,11 @@ where
.boxify()
}
});
IntersectNodeStream::new(&Arc::new(repo.clone()), nodes_iter).boxed()
IntersectNodeStream::new(ctx, &Arc::new(repo.clone()), nodes_iter).boxed()
}
pub fn greatest_common_ancestor<I>(
ctx: CoreContext,
repo: &BlobRepo,
changeset_fetcher: Arc<ChangesetFetcher>,
nodes: I,
@ -186,7 +189,7 @@ pub fn greatest_common_ancestor<I>(
where
I: IntoIterator<Item = HgNodeHash>,
{
Box::new(common_ancestors(repo, changeset_fetcher, nodes).take(1))
Box::new(common_ancestors(ctx, repo, changeset_fetcher, nodes).take(1))
}
#[cfg(test)]
@ -202,26 +205,59 @@ mod test {
#[test]
fn linear_ancestors() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
let changeset_fetcher: Arc<ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let nodestream = AncestorsNodeStream::new(
&changeset_fetcher,
string_to_bonsai(&repo, "a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157"),
string_to_bonsai(
ctx.clone(),
&repo,
"a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157",
),
).boxed();
assert_changesets_sequence(
&repo,
vec![
string_to_bonsai(&repo, "a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157"),
string_to_bonsai(&repo, "0ed509bf086fadcb8a8a5384dc3b550729b0fc17"),
string_to_bonsai(&repo, "eed3a8c0ec67b6a6fe2eb3543334df3f0b4f202b"),
string_to_bonsai(&repo, "cb15ca4a43a59acff5388cea9648c162afde8372"),
string_to_bonsai(&repo, "d0a361e9022d226ae52f689667bd7d212a19cfe0"),
string_to_bonsai(&repo, "607314ef579bd2407752361ba1b0c1729d08b281"),
string_to_bonsai(&repo, "3e0e761030db6e479a7fb58b12881883f9f8c63f"),
string_to_bonsai(&repo, "2d7d4ba9ce0a6ffd222de7785b249ead9c51c536"),
string_to_bonsai(
ctx.clone(),
&repo,
"a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157",
),
string_to_bonsai(
ctx.clone(),
&repo,
"0ed509bf086fadcb8a8a5384dc3b550729b0fc17",
),
string_to_bonsai(
ctx.clone(),
&repo,
"eed3a8c0ec67b6a6fe2eb3543334df3f0b4f202b",
),
string_to_bonsai(
ctx.clone(),
&repo,
"cb15ca4a43a59acff5388cea9648c162afde8372",
),
string_to_bonsai(
ctx.clone(),
&repo,
"d0a361e9022d226ae52f689667bd7d212a19cfe0",
),
string_to_bonsai(
ctx.clone(),
&repo,
"607314ef579bd2407752361ba1b0c1729d08b281",
),
string_to_bonsai(
ctx.clone(),
&repo,
"3e0e761030db6e479a7fb58b12881883f9f8c63f",
),
string_to_bonsai(ctx, &repo, "2d7d4ba9ce0a6ffd222de7785b249ead9c51c536"),
],
nodestream,
);
@ -231,31 +267,84 @@ mod test {
#[test]
fn merge_ancestors_from_merge() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(merge_uneven::getrepo(None));
let changeset_fetcher: Arc<ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let nodestream = AncestorsNodeStream::new(
&changeset_fetcher,
string_to_bonsai(&repo, "6d0c1c30df4acb4e64cb4c4868d4c974097da055"),
string_to_bonsai(
ctx.clone(),
&repo,
"6d0c1c30df4acb4e64cb4c4868d4c974097da055",
),
).boxed();
assert_changesets_sequence(
&repo,
vec![
string_to_bonsai(&repo, "6d0c1c30df4acb4e64cb4c4868d4c974097da055"),
string_to_bonsai(&repo, "264f01429683b3dd8042cb3979e8bf37007118bc"),
string_to_bonsai(&repo, "5d43888a3c972fe68c224f93d41b30e9f888df7c"),
string_to_bonsai(&repo, "fc2cef43395ff3a7b28159007f63d6529d2f41ca"),
string_to_bonsai(&repo, "bc7b4d0f858c19e2474b03e442b8495fd7aeef33"),
string_to_bonsai(&repo, "795b8133cf375f6d68d27c6c23db24cd5d0cd00f"),
string_to_bonsai(&repo, "4f7f3fd428bec1a48f9314414b063c706d9c1aed"),
string_to_bonsai(&repo, "16839021e338500b3cf7c9b871c8a07351697d68"),
string_to_bonsai(&repo, "1d8a907f7b4bf50c6a09c16361e2205047ecc5e5"),
string_to_bonsai(&repo, "b65231269f651cfe784fd1d97ef02a049a37b8a0"),
string_to_bonsai(&repo, "d7542c9db7f4c77dab4b315edd328edf1514952f"),
string_to_bonsai(&repo, "3cda5c78aa35f0f5b09780d971197b51cad4613a"),
string_to_bonsai(&repo, "15c40d0abc36d47fb51c8eaec51ac7aad31f669c"),
string_to_bonsai(
ctx.clone(),
&repo,
"6d0c1c30df4acb4e64cb4c4868d4c974097da055",
),
string_to_bonsai(
ctx.clone(),
&repo,
"264f01429683b3dd8042cb3979e8bf37007118bc",
),
string_to_bonsai(
ctx.clone(),
&repo,
"5d43888a3c972fe68c224f93d41b30e9f888df7c",
),
string_to_bonsai(
ctx.clone(),
&repo,
"fc2cef43395ff3a7b28159007f63d6529d2f41ca",
),
string_to_bonsai(
ctx.clone(),
&repo,
"bc7b4d0f858c19e2474b03e442b8495fd7aeef33",
),
string_to_bonsai(
ctx.clone(),
&repo,
"795b8133cf375f6d68d27c6c23db24cd5d0cd00f",
),
string_to_bonsai(
ctx.clone(),
&repo,
"4f7f3fd428bec1a48f9314414b063c706d9c1aed",
),
string_to_bonsai(
ctx.clone(),
&repo,
"16839021e338500b3cf7c9b871c8a07351697d68",
),
string_to_bonsai(
ctx.clone(),
&repo,
"1d8a907f7b4bf50c6a09c16361e2205047ecc5e5",
),
string_to_bonsai(
ctx.clone(),
&repo,
"b65231269f651cfe784fd1d97ef02a049a37b8a0",
),
string_to_bonsai(
ctx.clone(),
&repo,
"d7542c9db7f4c77dab4b315edd328edf1514952f",
),
string_to_bonsai(
ctx.clone(),
&repo,
"3cda5c78aa35f0f5b09780d971197b51cad4613a",
),
string_to_bonsai(ctx, &repo, "15c40d0abc36d47fb51c8eaec51ac7aad31f669c"),
],
nodestream,
);
@ -265,22 +354,39 @@ mod test {
#[test]
fn merge_ancestors_one_branch() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(merge_uneven::getrepo(None));
let changeset_fetcher: Arc<ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let nodestream = AncestorsNodeStream::new(
&changeset_fetcher,
string_to_bonsai(&repo, "16839021e338500b3cf7c9b871c8a07351697d68"),
string_to_bonsai(
ctx.clone(),
&repo,
"16839021e338500b3cf7c9b871c8a07351697d68",
),
).boxed();
assert_changesets_sequence(
&repo,
vec![
string_to_bonsai(&repo, "16839021e338500b3cf7c9b871c8a07351697d68"),
string_to_bonsai(&repo, "1d8a907f7b4bf50c6a09c16361e2205047ecc5e5"),
string_to_bonsai(&repo, "3cda5c78aa35f0f5b09780d971197b51cad4613a"),
string_to_bonsai(&repo, "15c40d0abc36d47fb51c8eaec51ac7aad31f669c"),
string_to_bonsai(
ctx.clone(),
&repo,
"16839021e338500b3cf7c9b871c8a07351697d68",
),
string_to_bonsai(
ctx.clone(),
&repo,
"1d8a907f7b4bf50c6a09c16361e2205047ecc5e5",
),
string_to_bonsai(
ctx.clone(),
&repo,
"3cda5c78aa35f0f5b09780d971197b51cad4613a",
),
string_to_bonsai(ctx, &repo, "15c40d0abc36d47fb51c8eaec51ac7aad31f669c"),
],
nodestream,
);
@ -290,6 +396,7 @@ mod test {
#[test]
fn unshared_merge_all() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
// The unshared_merge_uneven fixture has a commit after the merge. Pull in everything
// by starting at the head and working back to the original unshared history commits
let repo = Arc::new(unshared_merge_uneven::getrepo(None));
@ -298,31 +405,107 @@ mod test {
let nodestream = AncestorsNodeStream::new(
&changeset_fetcher,
string_to_bonsai(&repo, "c10443fa4198c6abad76dc6c69c1417b2e821508)"),
string_to_bonsai(
ctx.clone(),
&repo,
"c10443fa4198c6abad76dc6c69c1417b2e821508)",
),
).boxed();
assert_changesets_sequence(
&repo,
vec![
string_to_bonsai(&repo, "c10443fa4198c6abad76dc6c69c1417b2e821508"),
string_to_bonsai(&repo, "a5ab070634ab9cbdfc92404b3ec648f7e29547bc"),
string_to_bonsai(&repo, "64011f64aaf9c2ad2e674f57c033987da4016f51"),
string_to_bonsai(&repo, "c1d5375bf73caab8725d759eaca56037c725c7d1"),
string_to_bonsai(&repo, "e819f2dd9a01d3e63d9a93e298968df275e6ad7c"),
string_to_bonsai(&repo, "5a3e8d5a475ec07895e64ec1e1b2ec09bfa70e4e"),
string_to_bonsai(&repo, "76096af83f52cc9a225ccfd8ddfb05ea18132343"),
string_to_bonsai(&repo, "33fb49d8a47b29290f5163e30b294339c89505a2"),
string_to_bonsai(&repo, "03b0589d9788870817d03ce7b87516648ed5b33a"),
string_to_bonsai(&repo, "2fa8b4ee6803a18db4649a3843a723ef1dfe852b"),
string_to_bonsai(&repo, "f01e186c165a2fbe931fd1bf4454235398c591c9"),
string_to_bonsai(&repo, "163adc0d0f5d2eb0695ca123addcb92bab202096"),
string_to_bonsai(&repo, "0b94a2881dda90f0d64db5fae3ee5695a38e7c8f"),
string_to_bonsai(&repo, "eee492dcdeaae18f91822c4359dd516992e0dbcd"),
string_to_bonsai(&repo, "f61fdc0ddafd63503dcd8eed8994ec685bfc8941"),
string_to_bonsai(&repo, "3775a86c64cceeaf68ffe3f012fc90774c42002b"),
string_to_bonsai(&repo, "36ff88dd69c9966c9fad9d6d0457c52153039dde"),
string_to_bonsai(&repo, "1700524113b1a3b1806560341009684b4378660b"),
string_to_bonsai(&repo, "9d374b7e8180f933e3043ad1ffab0a9f95e2bac6"),
string_to_bonsai(
ctx.clone(),
&repo,
"c10443fa4198c6abad76dc6c69c1417b2e821508",
),
string_to_bonsai(
ctx.clone(),
&repo,
"a5ab070634ab9cbdfc92404b3ec648f7e29547bc",
),
string_to_bonsai(
ctx.clone(),
&repo,
"64011f64aaf9c2ad2e674f57c033987da4016f51",
),
string_to_bonsai(
ctx.clone(),
&repo,
"c1d5375bf73caab8725d759eaca56037c725c7d1",
),
string_to_bonsai(
ctx.clone(),
&repo,
"e819f2dd9a01d3e63d9a93e298968df275e6ad7c",
),
string_to_bonsai(
ctx.clone(),
&repo,
"5a3e8d5a475ec07895e64ec1e1b2ec09bfa70e4e",
),
string_to_bonsai(
ctx.clone(),
&repo,
"76096af83f52cc9a225ccfd8ddfb05ea18132343",
),
string_to_bonsai(
ctx.clone(),
&repo,
"33fb49d8a47b29290f5163e30b294339c89505a2",
),
string_to_bonsai(
ctx.clone(),
&repo,
"03b0589d9788870817d03ce7b87516648ed5b33a",
),
string_to_bonsai(
ctx.clone(),
&repo,
"2fa8b4ee6803a18db4649a3843a723ef1dfe852b",
),
string_to_bonsai(
ctx.clone(),
&repo,
"f01e186c165a2fbe931fd1bf4454235398c591c9",
),
string_to_bonsai(
ctx.clone(),
&repo,
"163adc0d0f5d2eb0695ca123addcb92bab202096",
),
string_to_bonsai(
ctx.clone(),
&repo,
"0b94a2881dda90f0d64db5fae3ee5695a38e7c8f",
),
string_to_bonsai(
ctx.clone(),
&repo,
"eee492dcdeaae18f91822c4359dd516992e0dbcd",
),
string_to_bonsai(
ctx.clone(),
&repo,
"f61fdc0ddafd63503dcd8eed8994ec685bfc8941",
),
string_to_bonsai(
ctx.clone(),
&repo,
"3775a86c64cceeaf68ffe3f012fc90774c42002b",
),
string_to_bonsai(
ctx.clone(),
&repo,
"36ff88dd69c9966c9fad9d6d0457c52153039dde",
),
string_to_bonsai(
ctx.clone(),
&repo,
"1700524113b1a3b1806560341009684b4378660b",
),
string_to_bonsai(ctx, &repo, "9d374b7e8180f933e3043ad1ffab0a9f95e2bac6"),
],
nodestream,
);
@ -332,11 +515,13 @@ mod test {
#[test]
fn no_common_ancestor() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(unshared_merge_uneven::getrepo(None));
let changeset_fetcher: Arc<ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let nodestream = greatest_common_ancestor(
ctx.clone(),
&repo,
changeset_fetcher,
vec![
@ -344,18 +529,20 @@ mod test {
string_to_nodehash("1700524113b1a3b1806560341009684b4378660b"),
],
);
assert_node_sequence(&repo, vec![], nodestream);
assert_node_sequence(ctx, &repo, vec![], nodestream);
});
}
#[test]
fn greatest_common_ancestor_different_branches() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(merge_uneven::getrepo(None));
let changeset_fetcher: Arc<ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let nodestream = greatest_common_ancestor(
ctx.clone(),
&repo,
changeset_fetcher,
vec![
@ -364,6 +551,7 @@ mod test {
],
);
assert_node_sequence(
ctx,
&repo,
vec![
string_to_nodehash("15c40d0abc36d47fb51c8eaec51ac7aad31f669c"),
@ -376,11 +564,13 @@ mod test {
#[test]
fn greatest_common_ancestor_same_branch() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(merge_uneven::getrepo(None));
let changeset_fetcher: Arc<ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let nodestream = greatest_common_ancestor(
ctx.clone(),
&repo,
changeset_fetcher,
vec![
@ -389,6 +579,7 @@ mod test {
],
);
assert_node_sequence(
ctx,
&repo,
vec![
string_to_nodehash("4f7f3fd428bec1a48f9314414b063c706d9c1aed"),
@ -401,11 +592,13 @@ mod test {
#[test]
fn all_common_ancestors_different_branches() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(merge_uneven::getrepo(None));
let changeset_fetcher: Arc<ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let nodestream = common_ancestors(
ctx.clone(),
&repo,
changeset_fetcher,
vec![
@ -414,6 +607,7 @@ mod test {
],
);
assert_node_sequence(
ctx,
&repo,
vec![
string_to_nodehash("15c40d0abc36d47fb51c8eaec51ac7aad31f669c"),
@ -426,11 +620,13 @@ mod test {
#[test]
fn all_common_ancestors_same_branch() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(merge_uneven::getrepo(None));
let changeset_fetcher: Arc<ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let nodestream = common_ancestors(
ctx.clone(),
&repo,
changeset_fetcher,
vec![
@ -439,6 +635,7 @@ mod test {
],
);
assert_node_sequence(
ctx,
&repo,
vec![
string_to_nodehash("4f7f3fd428bec1a48f9314414b063c706d9c1aed"),

View File

@ -242,8 +242,7 @@ impl DifferenceOfUnionsOfAncestorsNodeStream {
} else if exclude_gen == current_generation {
let mut should_exclude: Option<bool> = None;
{
if let Some(ref nodes) =
curr_exclude_ancestors.get(&current_generation)
if let Some(ref nodes) = curr_exclude_ancestors.get(&current_generation)
{
should_exclude = Some(nodes.contains(&node));
}
@ -342,6 +341,7 @@ impl Stream for DifferenceOfUnionsOfAncestorsNodeStream {
mod test {
use super::*;
use async_unit;
use context::CoreContext;
use fixtures::linear;
use fixtures::merge_uneven;
use reachabilityindex::SkiplistIndex;
@ -350,6 +350,7 @@ mod test {
#[test]
fn empty_ancestors_combinators() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
let changeset_fetcher: Arc<ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
@ -363,7 +364,7 @@ mod test {
assert_changesets_sequence(&repo, vec![], stream);
let excludes = vec![
string_to_bonsai(&repo, "0ed509bf086fadcb8a8a5384dc3b550729b0fc17"),
string_to_bonsai(ctx, &repo, "0ed509bf086fadcb8a8a5384dc3b550729b0fc17"),
];
let stream = DifferenceOfUnionsOfAncestorsNodeStream::new_with_excludes(
@ -380,6 +381,7 @@ mod test {
#[test]
fn linear_ancestors_with_excludes() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
let changeset_fetcher: Arc<ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
@ -388,17 +390,25 @@ mod test {
&changeset_fetcher,
Arc::new(SkiplistIndex::new()),
vec![
string_to_bonsai(&repo, "a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157"),
string_to_bonsai(
ctx.clone(),
&repo,
"a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157",
),
],
vec![
string_to_bonsai(&repo, "0ed509bf086fadcb8a8a5384dc3b550729b0fc17"),
string_to_bonsai(
ctx.clone(),
&repo,
"0ed509bf086fadcb8a8a5384dc3b550729b0fc17",
),
],
).boxify();
assert_changesets_sequence(
&repo,
vec![
string_to_bonsai(&repo, "a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157"),
string_to_bonsai(ctx, &repo, "a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157"),
],
nodestream,
);
@ -408,6 +418,7 @@ mod test {
#[test]
fn linear_ancestors_with_excludes_empty() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
let changeset_fetcher: Arc<ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
@ -416,10 +427,14 @@ mod test {
&changeset_fetcher,
Arc::new(SkiplistIndex::new()),
vec![
string_to_bonsai(&repo, "0ed509bf086fadcb8a8a5384dc3b550729b0fc17"),
string_to_bonsai(
ctx.clone(),
&repo,
"0ed509bf086fadcb8a8a5384dc3b550729b0fc17",
),
],
vec![
string_to_bonsai(&repo, "0ed509bf086fadcb8a8a5384dc3b550729b0fc17"),
string_to_bonsai(ctx, &repo, "0ed509bf086fadcb8a8a5384dc3b550729b0fc17"),
],
).boxify();
@ -430,6 +445,7 @@ mod test {
#[test]
fn ancestors_union() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(merge_uneven::getrepo(None));
let changeset_fetcher: Arc<ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
@ -438,23 +454,67 @@ mod test {
&changeset_fetcher,
Arc::new(SkiplistIndex::new()),
vec![
string_to_bonsai(&repo, "fc2cef43395ff3a7b28159007f63d6529d2f41ca"),
string_to_bonsai(&repo, "16839021e338500b3cf7c9b871c8a07351697d68"),
string_to_bonsai(
ctx.clone(),
&repo,
"fc2cef43395ff3a7b28159007f63d6529d2f41ca",
),
string_to_bonsai(
ctx.clone(),
&repo,
"16839021e338500b3cf7c9b871c8a07351697d68",
),
],
).boxify();
assert_changesets_sequence(
&repo,
vec![
string_to_bonsai(&repo, "fc2cef43395ff3a7b28159007f63d6529d2f41ca"),
string_to_bonsai(&repo, "bc7b4d0f858c19e2474b03e442b8495fd7aeef33"),
string_to_bonsai(&repo, "795b8133cf375f6d68d27c6c23db24cd5d0cd00f"),
string_to_bonsai(&repo, "4f7f3fd428bec1a48f9314414b063c706d9c1aed"),
string_to_bonsai(&repo, "16839021e338500b3cf7c9b871c8a07351697d68"),
string_to_bonsai(&repo, "1d8a907f7b4bf50c6a09c16361e2205047ecc5e5"),
string_to_bonsai(&repo, "b65231269f651cfe784fd1d97ef02a049a37b8a0"),
string_to_bonsai(&repo, "d7542c9db7f4c77dab4b315edd328edf1514952f"),
string_to_bonsai(&repo, "3cda5c78aa35f0f5b09780d971197b51cad4613a"),
string_to_bonsai(&repo, "15c40d0abc36d47fb51c8eaec51ac7aad31f669c"),
string_to_bonsai(
ctx.clone(),
&repo,
"fc2cef43395ff3a7b28159007f63d6529d2f41ca",
),
string_to_bonsai(
ctx.clone(),
&repo,
"bc7b4d0f858c19e2474b03e442b8495fd7aeef33",
),
string_to_bonsai(
ctx.clone(),
&repo,
"795b8133cf375f6d68d27c6c23db24cd5d0cd00f",
),
string_to_bonsai(
ctx.clone(),
&repo,
"4f7f3fd428bec1a48f9314414b063c706d9c1aed",
),
string_to_bonsai(
ctx.clone(),
&repo,
"16839021e338500b3cf7c9b871c8a07351697d68",
),
string_to_bonsai(
ctx.clone(),
&repo,
"1d8a907f7b4bf50c6a09c16361e2205047ecc5e5",
),
string_to_bonsai(
ctx.clone(),
&repo,
"b65231269f651cfe784fd1d97ef02a049a37b8a0",
),
string_to_bonsai(
ctx.clone(),
&repo,
"d7542c9db7f4c77dab4b315edd328edf1514952f",
),
string_to_bonsai(
ctx.clone(),
&repo,
"3cda5c78aa35f0f5b09780d971197b51cad4613a",
),
string_to_bonsai(ctx, &repo, "15c40d0abc36d47fb51c8eaec51ac7aad31f669c"),
],
nodestream,
);
@ -464,6 +524,7 @@ mod test {
#[test]
fn merge_ancestors_from_merge_excludes() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(merge_uneven::getrepo(None));
let changeset_fetcher: Arc<ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
@ -472,20 +533,40 @@ mod test {
&changeset_fetcher,
Arc::new(SkiplistIndex::new()),
vec![
string_to_bonsai(&repo, "6d0c1c30df4acb4e64cb4c4868d4c974097da055"),
string_to_bonsai(
ctx.clone(),
&repo,
"6d0c1c30df4acb4e64cb4c4868d4c974097da055",
),
],
vec![
string_to_bonsai(&repo, "fc2cef43395ff3a7b28159007f63d6529d2f41ca"),
string_to_bonsai(&repo, "16839021e338500b3cf7c9b871c8a07351697d68"),
string_to_bonsai(
ctx.clone(),
&repo,
"fc2cef43395ff3a7b28159007f63d6529d2f41ca",
),
string_to_bonsai(
ctx.clone(),
&repo,
"16839021e338500b3cf7c9b871c8a07351697d68",
),
],
).boxify();
assert_changesets_sequence(
&repo,
vec![
string_to_bonsai(&repo, "6d0c1c30df4acb4e64cb4c4868d4c974097da055"),
string_to_bonsai(&repo, "264f01429683b3dd8042cb3979e8bf37007118bc"),
string_to_bonsai(&repo, "5d43888a3c972fe68c224f93d41b30e9f888df7c"),
string_to_bonsai(
ctx.clone(),
&repo,
"6d0c1c30df4acb4e64cb4c4868d4c974097da055",
),
string_to_bonsai(
ctx.clone(),
&repo,
"264f01429683b3dd8042cb3979e8bf37007118bc",
),
string_to_bonsai(ctx, &repo, "5d43888a3c972fe68c224f93d41b30e9f888df7c"),
],
nodestream,
);
@ -495,6 +576,7 @@ mod test {
#[test]
fn merge_ancestors_from_merge_excludes_union() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(merge_uneven::getrepo(None));
let changeset_fetcher: Arc<ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
@ -503,25 +585,65 @@ mod test {
&changeset_fetcher,
Arc::new(SkiplistIndex::new()),
vec![
string_to_bonsai(&repo, "6d0c1c30df4acb4e64cb4c4868d4c974097da055"),
string_to_bonsai(
ctx.clone(),
&repo,
"6d0c1c30df4acb4e64cb4c4868d4c974097da055",
),
],
vec![
string_to_bonsai(&repo, "16839021e338500b3cf7c9b871c8a07351697d68"),
string_to_bonsai(
ctx.clone(),
&repo,
"16839021e338500b3cf7c9b871c8a07351697d68",
),
],
).boxify();
assert_changesets_sequence(
&repo,
vec![
string_to_bonsai(&repo, "6d0c1c30df4acb4e64cb4c4868d4c974097da055"),
string_to_bonsai(&repo, "264f01429683b3dd8042cb3979e8bf37007118bc"),
string_to_bonsai(&repo, "5d43888a3c972fe68c224f93d41b30e9f888df7c"),
string_to_bonsai(&repo, "fc2cef43395ff3a7b28159007f63d6529d2f41ca"),
string_to_bonsai(&repo, "bc7b4d0f858c19e2474b03e442b8495fd7aeef33"),
string_to_bonsai(&repo, "795b8133cf375f6d68d27c6c23db24cd5d0cd00f"),
string_to_bonsai(&repo, "4f7f3fd428bec1a48f9314414b063c706d9c1aed"),
string_to_bonsai(&repo, "b65231269f651cfe784fd1d97ef02a049a37b8a0"),
string_to_bonsai(&repo, "d7542c9db7f4c77dab4b315edd328edf1514952f"),
string_to_bonsai(
ctx.clone(),
&repo,
"6d0c1c30df4acb4e64cb4c4868d4c974097da055",
),
string_to_bonsai(
ctx.clone(),
&repo,
"264f01429683b3dd8042cb3979e8bf37007118bc",
),
string_to_bonsai(
ctx.clone(),
&repo,
"5d43888a3c972fe68c224f93d41b30e9f888df7c",
),
string_to_bonsai(
ctx.clone(),
&repo,
"fc2cef43395ff3a7b28159007f63d6529d2f41ca",
),
string_to_bonsai(
ctx.clone(),
&repo,
"bc7b4d0f858c19e2474b03e442b8495fd7aeef33",
),
string_to_bonsai(
ctx.clone(),
&repo,
"795b8133cf375f6d68d27c6c23db24cd5d0cd00f",
),
string_to_bonsai(
ctx.clone(),
&repo,
"4f7f3fd428bec1a48f9314414b063c706d9c1aed",
),
string_to_bonsai(
ctx.clone(),
&repo,
"b65231269f651cfe784fd1d97ef02a049a37b8a0",
),
string_to_bonsai(ctx, &repo, "d7542c9db7f4c77dab4b315edd328edf1514952f"),
],
nodestream,
);

View File

@ -5,6 +5,7 @@
// GNU General Public License version 2 or any later version.
use blobrepo::BlobRepo;
use context::CoreContext;
use futures::Async;
use futures::Poll;
use futures::stream::Stream;
@ -29,13 +30,16 @@ pub struct IntersectNodeStream {
}
impl IntersectNodeStream {
pub fn new<I>(repo: &Arc<BlobRepo>, inputs: I) -> Self
pub fn new<I>(ctx: CoreContext, repo: &Arc<BlobRepo>, inputs: I) -> Self
where
I: IntoIterator<Item = Box<NodeStream>>,
{
let hash_and_gen = inputs
.into_iter()
.map({ move |i| (add_generations(i, repo.clone()), Ok(Async::NotReady)) });
let hash_and_gen = inputs.into_iter().map(move |i| {
(
add_generations(ctx.clone(), i, repo.clone()),
Ok(Async::NotReady),
)
});
IntersectNodeStream {
inputs: hash_and_gen.collect(),
current_generation: None,
@ -157,6 +161,7 @@ mod test {
use super::*;
use {NodeStream, SingleNodeHash, UnionNodeStream};
use async_unit;
use context::CoreContext;
use fixtures::linear;
use fixtures::unshared_merge_even;
use fixtures::unshared_merge_uneven;
@ -169,67 +174,80 @@ mod test {
#[test]
fn intersect_identical_node() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
let head_hash = string_to_nodehash("a5ffa77602a066db7d5cfb9fb5823a0895717c5a");
let inputs: Vec<Box<NodeStream>> = vec![
SingleNodeHash::new(head_hash.clone(), &repo).boxed(),
SingleNodeHash::new(head_hash.clone(), &repo).boxed(),
SingleNodeHash::new(ctx.clone(), head_hash.clone(), &repo).boxed(),
SingleNodeHash::new(ctx.clone(), head_hash.clone(), &repo).boxed(),
];
let nodestream = IntersectNodeStream::new(&repo, inputs.into_iter()).boxed();
let nodestream =
IntersectNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed();
assert_node_sequence(&repo, vec![head_hash.clone()], nodestream);
assert_node_sequence(ctx, &repo, vec![head_hash.clone()], nodestream);
});
}
#[test]
fn intersect_three_different_nodes() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
// Note that these are *not* in generation order deliberately.
let inputs: Vec<Box<NodeStream>> = vec![
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("3c15267ebf11807f3d772eb891272b911ec68759"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("d0a361e9022d226ae52f689667bd7d212a19cfe0"),
&repo,
).boxed(),
];
let nodestream = IntersectNodeStream::new(&repo, inputs.into_iter()).boxed();
let nodestream =
IntersectNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed();
assert_node_sequence(&repo, vec![], nodestream);
assert_node_sequence(ctx, &repo, vec![], nodestream);
});
}
#[test]
fn intersect_three_identical_nodes() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
let inputs: Vec<Box<NodeStream>> = vec![
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("d0a361e9022d226ae52f689667bd7d212a19cfe0"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("d0a361e9022d226ae52f689667bd7d212a19cfe0"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("d0a361e9022d226ae52f689667bd7d212a19cfe0"),
&repo,
).boxed(),
];
let nodestream = IntersectNodeStream::new(&repo, inputs.into_iter()).boxed();
let nodestream =
IntersectNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed();
assert_node_sequence(
ctx,
&repo,
vec![
string_to_nodehash("d0a361e9022d226ae52f689667bd7d212a19cfe0"),
@ -242,31 +260,38 @@ mod test {
#[test]
fn intersect_nesting() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
let inputs: Vec<Box<NodeStream>> = vec![
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("3c15267ebf11807f3d772eb891272b911ec68759"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("3c15267ebf11807f3d772eb891272b911ec68759"),
&repo,
).boxed(),
];
let nodestream = IntersectNodeStream::new(&repo, inputs.into_iter()).boxed();
let nodestream =
IntersectNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed();
let inputs: Vec<Box<NodeStream>> = vec![
nodestream,
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("3c15267ebf11807f3d772eb891272b911ec68759"),
&repo,
).boxed(),
];
let nodestream = IntersectNodeStream::new(&repo, inputs.into_iter()).boxed();
let nodestream =
IntersectNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed();
assert_node_sequence(
ctx,
&repo,
vec![
string_to_nodehash("3c15267ebf11807f3d772eb891272b911ec68759"),
@ -279,44 +304,52 @@ mod test {
#[test]
fn intersection_of_unions() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
let inputs: Vec<Box<NodeStream>> = vec![
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("d0a361e9022d226ae52f689667bd7d212a19cfe0"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("3c15267ebf11807f3d772eb891272b911ec68759"),
&repo,
).boxed(),
];
let nodestream = UnionNodeStream::new(&repo, inputs.into_iter()).boxed();
let nodestream = UnionNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed();
// This set has a different node sequence, so that we can demonstrate that we skip nodes
// when they're not going to contribute.
let inputs: Vec<Box<NodeStream>> = vec![
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("3c15267ebf11807f3d772eb891272b911ec68759"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("d0a361e9022d226ae52f689667bd7d212a19cfe0"),
&repo,
).boxed(),
];
let nodestream2 = UnionNodeStream::new(&repo, inputs.into_iter()).boxed();
let nodestream2 = UnionNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed();
let inputs: Vec<Box<NodeStream>> = vec![nodestream, nodestream2];
let nodestream = IntersectNodeStream::new(&repo, inputs.into_iter()).boxed();
let nodestream =
IntersectNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed();
assert_node_sequence(
ctx,
&repo,
vec![
string_to_nodehash("3c15267ebf11807f3d772eb891272b911ec68759"),
@ -330,14 +363,16 @@ mod test {
#[test]
fn intersect_error_node() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
let nodehash = string_to_nodehash("0000000000000000000000000000000000000000");
let inputs: Vec<Box<NodeStream>> = vec![
Box::new(RepoErrorStream { hash: nodehash }),
SingleNodeHash::new(nodehash.clone(), &repo).boxed(),
SingleNodeHash::new(ctx.clone(), nodehash.clone(), &repo).boxed(),
];
let mut nodestream = spawn(IntersectNodeStream::new(&repo, inputs.into_iter()).boxed());
let mut nodestream =
spawn(IntersectNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed());
match nodestream.wait_stream() {
Some(Err(err)) => match err_downcast!(err, err: ErrorKind => err) {
@ -354,17 +389,20 @@ mod test {
#[test]
fn intersect_nothing() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
let inputs: Vec<Box<NodeStream>> = vec![];
let nodestream = IntersectNodeStream::new(&repo, inputs.into_iter()).boxed();
assert_node_sequence(&repo, vec![], nodestream);
let nodestream =
IntersectNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed();
assert_node_sequence(ctx, &repo, vec![], nodestream);
});
}
#[test]
fn slow_ready_intersect_nothing() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
// Tests that we handle an input staying at NotReady for a while without panicing
let repeats = 10;
let repo = Arc::new(linear::getrepo(None));
@ -373,7 +411,8 @@ mod test {
poll_count: repeats,
}),
];
let mut nodestream = IntersectNodeStream::new(&repo, inputs.into_iter()).boxed();
let mut nodestream =
IntersectNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed();
// Keep polling until we should be done.
for _ in 0..repeats + 1 {
@ -393,54 +432,67 @@ mod test {
#[test]
fn intersect_unshared_merge_even() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(unshared_merge_even::getrepo(None));
// Post-merge, merge, and both unshared branches
let inputs: Vec<Box<NodeStream>> = vec![
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("7fe9947f101acb4acf7d945e69f0d6ce76a81113"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("d592490c4386cdb3373dd93af04d563de199b2fb"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("33fb49d8a47b29290f5163e30b294339c89505a2"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("03b0589d9788870817d03ce7b87516648ed5b33a"),
&repo,
).boxed(),
];
let left_nodestream = UnionNodeStream::new(&repo, inputs.into_iter()).boxed();
let left_nodestream =
UnionNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed();
// Four commits from one branch
let inputs: Vec<Box<NodeStream>> = vec![
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("03b0589d9788870817d03ce7b87516648ed5b33a"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("2fa8b4ee6803a18db4649a3843a723ef1dfe852b"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("0b94a2881dda90f0d64db5fae3ee5695a38e7c8f"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("f61fdc0ddafd63503dcd8eed8994ec685bfc8941"),
&repo,
).boxed(),
];
let right_nodestream = UnionNodeStream::new(&repo, inputs.into_iter()).boxed();
let right_nodestream =
UnionNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed();
let inputs: Vec<Box<NodeStream>> = vec![left_nodestream, right_nodestream];
let nodestream = IntersectNodeStream::new(&repo, inputs.into_iter()).boxed();
let nodestream =
IntersectNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed();
assert_node_sequence(
ctx,
&repo,
vec![
string_to_nodehash("03b0589d9788870817d03ce7b87516648ed5b33a"),
@ -453,54 +505,67 @@ mod test {
#[test]
fn intersect_unshared_merge_uneven() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(unshared_merge_uneven::getrepo(None));
// Post-merge, merge, and both unshared branches
let inputs: Vec<Box<NodeStream>> = vec![
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("c10443fa4198c6abad76dc6c69c1417b2e821508)"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("a5ab070634ab9cbdfc92404b3ec648f7e29547bc)"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("64011f64aaf9c2ad2e674f57c033987da4016f51"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("03b0589d9788870817d03ce7b87516648ed5b33a"),
&repo,
).boxed(),
];
let left_nodestream = UnionNodeStream::new(&repo, inputs.into_iter()).boxed();
let left_nodestream =
UnionNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed();
// Four commits from one branch
let inputs: Vec<Box<NodeStream>> = vec![
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("03b0589d9788870817d03ce7b87516648ed5b33a"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("2fa8b4ee6803a18db4649a3843a723ef1dfe852b"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("0b94a2881dda90f0d64db5fae3ee5695a38e7c8f"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("f61fdc0ddafd63503dcd8eed8994ec685bfc8941"),
&repo,
).boxed(),
];
let right_nodestream = UnionNodeStream::new(&repo, inputs.into_iter()).boxed();
let right_nodestream =
UnionNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed();
let inputs: Vec<Box<NodeStream>> = vec![left_nodestream, right_nodestream];
let nodestream = IntersectNodeStream::new(&repo, inputs.into_iter()).boxed();
let nodestream =
IntersectNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed();
assert_node_sequence(
ctx,
&repo,
vec![
string_to_nodehash("03b0589d9788870817d03ce7b87516648ed5b33a"),

View File

@ -8,6 +8,7 @@ extern crate blobrepo;
extern crate changesets;
#[macro_use]
extern crate cloned;
extern crate context;
#[macro_use]
extern crate failure_ext as failure;
#[macro_use]

View File

@ -18,6 +18,7 @@ use std::iter::Iterator;
use std::sync::Arc;
use blobrepo::{BlobRepo, ChangesetFetcher};
use context::CoreContext;
use mercurial_types::{HgChangesetId, HgNodeHash};
use mononoke_types::ChangesetId;
use reachabilityindex::SkiplistIndex;
@ -54,8 +55,8 @@ pub struct RevsetSpec {
rp_entries: Vec<RevsetEntry>,
}
fn get_changesets_from_repo(repo: &BlobRepo) -> Vec<HgNodeHash> {
let mut all_changesets_executor = spawn(repo.get_changesets());
fn get_changesets_from_repo(ctx: CoreContext, repo: &BlobRepo) -> Vec<HgNodeHash> {
let mut all_changesets_executor = spawn(repo.get_changesets(ctx));
let mut all_changesets: Vec<HgNodeHash> = Vec::new();
loop {
all_changesets.push(match all_changesets_executor.wait_stream() {
@ -69,11 +70,11 @@ fn get_changesets_from_repo(repo: &BlobRepo) -> Vec<HgNodeHash> {
}
impl RevsetSpec {
pub fn add_hashes<G>(&mut self, repo: &BlobRepo, random: &mut G)
pub fn add_hashes<G>(&mut self, ctx: CoreContext, repo: &BlobRepo, random: &mut G)
where
G: Rng,
{
let all_changesets = get_changesets_from_repo(repo);
let all_changesets = get_changesets_from_repo(ctx, repo);
for elem in self.rp_entries.iter_mut() {
if let &mut RevsetEntry::SingleNode(None) = elem {
*elem = RevsetEntry::SingleNode(random.choose(all_changesets.as_slice()).cloned());
@ -118,29 +119,31 @@ impl RevsetSpec {
output.pop().expect("No revisions").into_iter().collect()
}
pub fn as_revset(&self, repo: Arc<BlobRepo>) -> Box<NodeStream> {
pub fn as_revset(&self, ctx: CoreContext, repo: Arc<BlobRepo>) -> Box<NodeStream> {
let mut output: Vec<Box<NodeStream>> = Vec::with_capacity(self.rp_entries.len());
for entry in self.rp_entries.iter() {
let next_node = ValidateNodeStream::new(
ctx.clone(),
match entry {
&RevsetEntry::SingleNode(None) => panic!("You need to add_hashes first!"),
&RevsetEntry::SingleNode(Some(hash)) => {
SingleNodeHash::new(hash, &*repo.clone()).boxed()
SingleNodeHash::new(ctx.clone(), hash, &*repo.clone()).boxed()
}
&RevsetEntry::SetDifference => {
let keep = output.pop().expect("No keep for setdifference");
let remove = output.pop().expect("No remove for setdifference");
SetDifferenceNodeStream::new(&repo.clone(), keep, remove).boxed()
SetDifferenceNodeStream::new(ctx.clone(), &repo.clone(), keep, remove)
.boxed()
}
&RevsetEntry::Union(size) => {
let idx = output.len() - size;
let inputs = output.split_off(idx);
UnionNodeStream::new(&repo.clone(), inputs).boxed()
UnionNodeStream::new(ctx.clone(), &repo.clone(), inputs).boxed()
}
&RevsetEntry::Intersect(size) => {
let idx = output.len() - size;
let inputs = output.split_off(idx);
IntersectNodeStream::new(&repo.clone(), inputs).boxed()
IntersectNodeStream::new(ctx.clone(), &repo.clone(), inputs).boxed()
}
},
&repo.clone(),
@ -255,10 +258,10 @@ fn match_streams(
nodestream.wait_stream().is_none() && expected.is_empty()
}
fn match_hashset_to_revset(repo: Arc<BlobRepo>, mut set: RevsetSpec) -> bool {
set.add_hashes(&*repo, &mut thread_rng());
fn match_hashset_to_revset(ctx: CoreContext, repo: Arc<BlobRepo>, mut set: RevsetSpec) -> bool {
set.add_hashes(ctx.clone(), &*repo, &mut thread_rng());
let mut hashes = set.as_hashes();
let mut nodestream = spawn(set.as_revset(repo));
let mut nodestream = spawn(set.as_revset(ctx, repo));
while !hashes.is_empty() {
let hash = nodestream
@ -280,8 +283,9 @@ macro_rules! quickcheck_setops {
fn $test_name() {
fn prop(set: RevsetSpec) -> bool {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new($repo::getrepo(None));
match_hashset_to_revset(repo, set)
match_hashset_to_revset(ctx, repo, set)
})
}
@ -352,13 +356,17 @@ impl Iterator for IncludeExcludeDiscardCombinationsIterator {
}
}
fn hg_to_bonsai_changesetid(repo: &Arc<BlobRepo>, nodes: Vec<HgNodeHash>) -> Vec<ChangesetId> {
fn hg_to_bonsai_changesetid(
ctx: CoreContext,
repo: &Arc<BlobRepo>,
nodes: Vec<HgNodeHash>,
) -> Vec<ChangesetId> {
stream::iter_ok(nodes.into_iter())
.boxify()
.and_then({
let repo = repo.clone();
move |hash| {
repo.get_bonsai_from_hg(&HgChangesetId::new(hash.clone()))
repo.get_bonsai_from_hg(ctx.clone(), &HgChangesetId::new(hash.clone()))
.map(move |maybe_bonsai| {
maybe_bonsai.expect("Failed to get Bonsai Changeset from HgNodeHash")
})
@ -370,6 +378,7 @@ fn hg_to_bonsai_changesetid(repo: &Arc<BlobRepo>, nodes: Vec<HgNodeHash>) -> Vec
}
fn bonsai_nodestream_to_nodestream(
ctx: CoreContext,
repo: &Arc<BlobRepo>,
stream: Box<BonsaiNodeStream>,
) -> Box<NodeStream> {
@ -377,7 +386,7 @@ fn bonsai_nodestream_to_nodestream(
.and_then({
let repo = repo.clone();
move |bonsai| {
repo.get_hg_from_bonsai_changeset(bonsai)
repo.get_hg_from_bonsai_changeset(ctx.clone(), bonsai)
.map(|cs| cs.into_nodehash())
}
})
@ -389,12 +398,13 @@ macro_rules! ancestors_check {
#[test]
fn $test_name() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new($repo::getrepo(None));
let changeset_fetcher: Arc<ChangesetFetcher> =
Arc::new(TestChangesetFetcher::new(repo.clone()));
let all_changesets = get_changesets_from_repo(&*repo);
let all_changesets = get_changesets_from_repo(ctx.clone(), &*repo);
// Limit the number of changesets, otherwise tests take too much time
let max_changesets = 7;
@ -407,13 +417,13 @@ macro_rules! ancestors_check {
let difference_stream =
create_skiplist(&repo)
.map({
cloned!(changeset_fetcher, exclude, include, repo);
cloned!(ctx, changeset_fetcher, exclude, include, repo);
move |skiplist| {
DifferenceOfUnionsOfAncestorsNodeStream::new_with_excludes(
&changeset_fetcher,
skiplist,
hg_to_bonsai_changesetid(&repo, include.clone()),
hg_to_bonsai_changesetid(&repo, exclude.clone()),
hg_to_bonsai_changesetid(ctx.clone(), &repo, include.clone()),
hg_to_bonsai_changesetid(ctx, &repo, exclude.clone()),
)
}
})
@ -421,14 +431,16 @@ macro_rules! ancestors_check {
.boxify();
let actual = ValidateNodeStream::new(
bonsai_nodestream_to_nodestream(&repo, difference_stream),
ctx.clone(),
bonsai_nodestream_to_nodestream(ctx.clone(), &repo, difference_stream),
&repo.clone(),
);
let mut includes = vec![];
for i in hg_to_bonsai_changesetid(&repo, include.clone()) {
for i in hg_to_bonsai_changesetid(ctx.clone(), &repo, include.clone()) {
includes.push(
bonsai_nodestream_to_nodestream(
ctx.clone(),
&repo,
AncestorsNodeStream::new(&changeset_fetcher, i).boxify()
)
@ -436,18 +448,19 @@ macro_rules! ancestors_check {
}
let mut excludes = vec![];
for i in hg_to_bonsai_changesetid(&repo, exclude.clone()) {
for i in hg_to_bonsai_changesetid(ctx.clone(), &repo, exclude.clone()) {
excludes.push(
bonsai_nodestream_to_nodestream(
ctx.clone(),
&repo,
AncestorsNodeStream::new(&changeset_fetcher, i).boxify()
)
);
}
let includes = UnionNodeStream::new(&repo, includes).boxify();
let excludes = UnionNodeStream::new(&repo, excludes).boxify();
let expected = SetDifferenceNodeStream::new(&repo, includes, excludes);
let includes = UnionNodeStream::new(ctx.clone(), &repo, includes).boxify();
let excludes = UnionNodeStream::new(ctx.clone(), &repo, excludes).boxify();
let expected = SetDifferenceNodeStream::new(ctx.clone(), &repo, includes, excludes);
assert!(
match_streams(expected.boxify(), actual.boxify()),

View File

@ -228,6 +228,7 @@ impl Stream for RangeNodeStream {
mod test {
use super::*;
use async_unit;
use context::CoreContext;
use fixtures::linear;
use fixtures::merge_uneven;
use futures_ext::StreamExt;
@ -235,9 +236,9 @@ mod test {
use tests::assert_changesets_sequence;
use tests::string_to_nodehash;
fn string_to_bonsai(repo: &Arc<BlobRepo>, s: &'static str) -> ChangesetId {
fn string_to_bonsai(ctx: CoreContext, repo: &Arc<BlobRepo>, s: &'static str) -> ChangesetId {
let node = string_to_nodehash(s);
repo.get_bonsai_from_hg(&HgChangesetId::new(node))
repo.get_bonsai_from_hg(ctx, &HgChangesetId::new(node))
.wait()
.unwrap()
.unwrap()
@ -246,22 +247,47 @@ mod test {
#[test]
fn linear_range() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
let nodestream = RangeNodeStream::new(
&repo,
string_to_bonsai(&repo, "d0a361e9022d226ae52f689667bd7d212a19cfe0"),
string_to_bonsai(&repo, "a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157"),
string_to_bonsai(
ctx.clone(),
&repo,
"d0a361e9022d226ae52f689667bd7d212a19cfe0",
),
string_to_bonsai(
ctx.clone(),
&repo,
"a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157",
),
).boxify();
assert_changesets_sequence(
&repo,
vec![
string_to_bonsai(&repo, "a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157"),
string_to_bonsai(&repo, "0ed509bf086fadcb8a8a5384dc3b550729b0fc17"),
string_to_bonsai(&repo, "eed3a8c0ec67b6a6fe2eb3543334df3f0b4f202b"),
string_to_bonsai(&repo, "cb15ca4a43a59acff5388cea9648c162afde8372"),
string_to_bonsai(&repo, "d0a361e9022d226ae52f689667bd7d212a19cfe0"),
string_to_bonsai(
ctx.clone(),
&repo,
"a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157",
),
string_to_bonsai(
ctx.clone(),
&repo,
"0ed509bf086fadcb8a8a5384dc3b550729b0fc17",
),
string_to_bonsai(
ctx.clone(),
&repo,
"eed3a8c0ec67b6a6fe2eb3543334df3f0b4f202b",
),
string_to_bonsai(
ctx.clone(),
&repo,
"cb15ca4a43a59acff5388cea9648c162afde8372",
),
string_to_bonsai(ctx, &repo, "d0a361e9022d226ae52f689667bd7d212a19cfe0"),
],
nodestream,
);
@ -271,19 +297,32 @@ mod test {
#[test]
fn linear_direct_parent_range() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
let nodestream = RangeNodeStream::new(
&repo,
string_to_bonsai(&repo, "0ed509bf086fadcb8a8a5384dc3b550729b0fc17"),
string_to_bonsai(&repo, "a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157"),
string_to_bonsai(
ctx.clone(),
&repo,
"0ed509bf086fadcb8a8a5384dc3b550729b0fc17",
),
string_to_bonsai(
ctx.clone(),
&repo,
"a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157",
),
).boxify();
assert_changesets_sequence(
&repo,
vec![
string_to_bonsai(&repo, "a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157"),
string_to_bonsai(&repo, "0ed509bf086fadcb8a8a5384dc3b550729b0fc17"),
string_to_bonsai(
ctx.clone(),
&repo,
"a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157",
),
string_to_bonsai(ctx, &repo, "0ed509bf086fadcb8a8a5384dc3b550729b0fc17"),
],
nodestream,
);
@ -293,18 +332,27 @@ mod test {
#[test]
fn linear_single_node_range() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
let nodestream = RangeNodeStream::new(
&repo,
string_to_bonsai(&repo, "d0a361e9022d226ae52f689667bd7d212a19cfe0"),
string_to_bonsai(&repo, "d0a361e9022d226ae52f689667bd7d212a19cfe0"),
string_to_bonsai(
ctx.clone(),
&repo,
"d0a361e9022d226ae52f689667bd7d212a19cfe0",
),
string_to_bonsai(
ctx.clone(),
&repo,
"d0a361e9022d226ae52f689667bd7d212a19cfe0",
),
).boxify();
assert_changesets_sequence(
&repo,
vec![
string_to_bonsai(&repo, "d0a361e9022d226ae52f689667bd7d212a19cfe0"),
string_to_bonsai(ctx, &repo, "d0a361e9022d226ae52f689667bd7d212a19cfe0"),
],
nodestream,
);
@ -314,13 +362,18 @@ mod test {
#[test]
fn linear_empty_range() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
// These are swapped, so won't find anything
let nodestream = RangeNodeStream::new(
&repo,
string_to_bonsai(&repo, "a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157"),
string_to_bonsai(&repo, "d0a361e9022d226ae52f689667bd7d212a19cfe0"),
string_to_bonsai(
ctx.clone(),
&repo,
"a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157",
),
string_to_bonsai(ctx, &repo, "d0a361e9022d226ae52f689667bd7d212a19cfe0"),
).boxify();
assert_changesets_sequence(&repo, vec![], nodestream);
@ -330,20 +383,37 @@ mod test {
#[test]
fn merge_range_from_merge() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(merge_uneven::getrepo(None));
let nodestream = RangeNodeStream::new(
&repo,
string_to_bonsai(&repo, "1d8a907f7b4bf50c6a09c16361e2205047ecc5e5"),
string_to_bonsai(&repo, "6d0c1c30df4acb4e64cb4c4868d4c974097da055"),
string_to_bonsai(
ctx.clone(),
&repo,
"1d8a907f7b4bf50c6a09c16361e2205047ecc5e5",
),
string_to_bonsai(
ctx.clone(),
&repo,
"6d0c1c30df4acb4e64cb4c4868d4c974097da055",
),
).boxify();
assert_changesets_sequence(
&repo,
vec![
string_to_bonsai(&repo, "6d0c1c30df4acb4e64cb4c4868d4c974097da055"),
string_to_bonsai(&repo, "16839021e338500b3cf7c9b871c8a07351697d68"),
string_to_bonsai(&repo, "1d8a907f7b4bf50c6a09c16361e2205047ecc5e5"),
string_to_bonsai(
ctx.clone(),
&repo,
"6d0c1c30df4acb4e64cb4c4868d4c974097da055",
),
string_to_bonsai(
ctx.clone(),
&repo,
"16839021e338500b3cf7c9b871c8a07351697d68",
),
string_to_bonsai(ctx, &repo, "1d8a907f7b4bf50c6a09c16361e2205047ecc5e5"),
],
nodestream,
);
@ -353,30 +423,87 @@ mod test {
#[test]
fn merge_range_everything() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(merge_uneven::getrepo(None));
let nodestream = RangeNodeStream::new(
&repo,
string_to_bonsai(&repo, "15c40d0abc36d47fb51c8eaec51ac7aad31f669c"),
string_to_bonsai(&repo, "6d0c1c30df4acb4e64cb4c4868d4c974097da055"),
string_to_bonsai(
ctx.clone(),
&repo,
"15c40d0abc36d47fb51c8eaec51ac7aad31f669c",
),
string_to_bonsai(
ctx.clone(),
&repo,
"6d0c1c30df4acb4e64cb4c4868d4c974097da055",
),
).boxify();
assert_changesets_sequence(
&repo,
vec![
string_to_bonsai(&repo, "6d0c1c30df4acb4e64cb4c4868d4c974097da055"),
string_to_bonsai(&repo, "264f01429683b3dd8042cb3979e8bf37007118bc"),
string_to_bonsai(&repo, "5d43888a3c972fe68c224f93d41b30e9f888df7c"),
string_to_bonsai(&repo, "fc2cef43395ff3a7b28159007f63d6529d2f41ca"),
string_to_bonsai(&repo, "bc7b4d0f858c19e2474b03e442b8495fd7aeef33"),
string_to_bonsai(&repo, "795b8133cf375f6d68d27c6c23db24cd5d0cd00f"),
string_to_bonsai(&repo, "4f7f3fd428bec1a48f9314414b063c706d9c1aed"),
string_to_bonsai(&repo, "16839021e338500b3cf7c9b871c8a07351697d68"),
string_to_bonsai(&repo, "1d8a907f7b4bf50c6a09c16361e2205047ecc5e5"),
string_to_bonsai(&repo, "b65231269f651cfe784fd1d97ef02a049a37b8a0"),
string_to_bonsai(&repo, "d7542c9db7f4c77dab4b315edd328edf1514952f"),
string_to_bonsai(&repo, "3cda5c78aa35f0f5b09780d971197b51cad4613a"),
string_to_bonsai(&repo, "15c40d0abc36d47fb51c8eaec51ac7aad31f669c"),
string_to_bonsai(
ctx.clone(),
&repo,
"6d0c1c30df4acb4e64cb4c4868d4c974097da055",
),
string_to_bonsai(
ctx.clone(),
&repo,
"264f01429683b3dd8042cb3979e8bf37007118bc",
),
string_to_bonsai(
ctx.clone(),
&repo,
"5d43888a3c972fe68c224f93d41b30e9f888df7c",
),
string_to_bonsai(
ctx.clone(),
&repo,
"fc2cef43395ff3a7b28159007f63d6529d2f41ca",
),
string_to_bonsai(
ctx.clone(),
&repo,
"bc7b4d0f858c19e2474b03e442b8495fd7aeef33",
),
string_to_bonsai(
ctx.clone(),
&repo,
"795b8133cf375f6d68d27c6c23db24cd5d0cd00f",
),
string_to_bonsai(
ctx.clone(),
&repo,
"4f7f3fd428bec1a48f9314414b063c706d9c1aed",
),
string_to_bonsai(
ctx.clone(),
&repo,
"16839021e338500b3cf7c9b871c8a07351697d68",
),
string_to_bonsai(
ctx.clone(),
&repo,
"1d8a907f7b4bf50c6a09c16361e2205047ecc5e5",
),
string_to_bonsai(
ctx.clone(),
&repo,
"b65231269f651cfe784fd1d97ef02a049a37b8a0",
),
string_to_bonsai(
ctx.clone(),
&repo,
"d7542c9db7f4c77dab4b315edd328edf1514952f",
),
string_to_bonsai(
ctx.clone(),
&repo,
"3cda5c78aa35f0f5b09780d971197b51cad4613a",
),
string_to_bonsai(ctx, &repo, "15c40d0abc36d47fb51c8eaec51ac7aad31f669c"),
],
nodestream,
);

View File

@ -5,6 +5,7 @@
// GNU General Public License version 2 or any later version.
use blobrepo::{BlobRepo, ChangesetFetcher};
use context::CoreContext;
use failure::prelude::*;
use futures::future::Future;
use futures::stream::Stream;
@ -25,9 +26,13 @@ pub type InputStream = Box<Stream<Item = (HgNodeHash, Generation), Error = Error
pub type BonsaiInputStream =
Box<Stream<Item = (ChangesetId, Generation), Error = Error> + 'static + Send>;
pub fn add_generations(stream: Box<NodeStream>, repo: Arc<BlobRepo>) -> InputStream {
pub fn add_generations(
ctx: CoreContext,
stream: Box<NodeStream>,
repo: Arc<BlobRepo>,
) -> InputStream {
let stream = stream.and_then(move |node_hash| {
repo.get_generation_number(&HgChangesetId::new(node_hash))
repo.get_generation_number(ctx.clone(), &HgChangesetId::new(node_hash))
.and_then(move |genopt| {
genopt.ok_or_else(|| err_msg(format!("{} not found", node_hash)))
})

View File

@ -5,6 +5,7 @@
// GNU General Public License version 2 or any later version.
use blobrepo::BlobRepo;
use context::CoreContext;
use futures::{Async, Poll};
use futures::stream::Stream;
use mercurial_types::HgNodeHash;
@ -30,14 +31,15 @@ pub struct SetDifferenceNodeStream {
impl SetDifferenceNodeStream {
pub fn new(
ctx: CoreContext,
repo: &Arc<BlobRepo>,
keep_input: Box<NodeStream>,
remove_input: Box<NodeStream>,
) -> SetDifferenceNodeStream {
SetDifferenceNodeStream {
keep_input: add_generations(keep_input, repo.clone()),
keep_input: add_generations(ctx.clone(), keep_input, repo.clone()),
next_keep: Async::NotReady,
remove_input: add_generations(remove_input, repo.clone()),
remove_input: add_generations(ctx, remove_input, repo.clone()),
next_remove: Async::NotReady,
remove_nodes: HashSet::new(),
@ -115,6 +117,7 @@ mod test {
use SingleNodeHash;
use UnionNodeStream;
use async_unit;
use context::CoreContext;
use fixtures::linear;
use fixtures::merge_even;
use fixtures::merge_uneven;
@ -127,69 +130,80 @@ mod test {
#[test]
fn difference_identical_node() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
let head_hash = string_to_nodehash("a5ffa77602a066db7d5cfb9fb5823a0895717c5a");
let nodestream = SetDifferenceNodeStream::new(
ctx.clone(),
&repo,
SingleNodeHash::new(head_hash.clone(), &repo).boxed(),
SingleNodeHash::new(head_hash.clone(), &repo).boxed(),
SingleNodeHash::new(ctx.clone(), head_hash.clone(), &repo).boxed(),
SingleNodeHash::new(ctx.clone(), head_hash.clone(), &repo).boxed(),
).boxed();
assert_node_sequence(&repo, vec![], nodestream);
assert_node_sequence(ctx, &repo, vec![], nodestream);
});
}
#[test]
fn difference_node_and_empty() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
let head_hash = string_to_nodehash("a5ffa77602a066db7d5cfb9fb5823a0895717c5a");
let nodestream = SetDifferenceNodeStream::new(
ctx.clone(),
&repo,
SingleNodeHash::new(head_hash.clone(), &repo).boxed(),
SingleNodeHash::new(ctx.clone(), head_hash.clone(), &repo).boxed(),
Box::new(NotReadyEmptyStream { poll_count: 0 }),
).boxed();
assert_node_sequence(&repo, vec![head_hash], nodestream);
assert_node_sequence(ctx, &repo, vec![head_hash], nodestream);
});
}
#[test]
fn difference_empty_and_node() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
let head_hash = string_to_nodehash("a5ffa77602a066db7d5cfb9fb5823a0895717c5a");
let nodestream = SetDifferenceNodeStream::new(
ctx.clone(),
&repo,
Box::new(NotReadyEmptyStream { poll_count: 0 }),
SingleNodeHash::new(head_hash.clone(), &repo).boxed(),
SingleNodeHash::new(ctx.clone(), head_hash.clone(), &repo).boxed(),
).boxed();
assert_node_sequence(&repo, vec![], nodestream);
assert_node_sequence(ctx, &repo, vec![], nodestream);
});
}
#[test]
fn difference_two_nodes() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
let nodestream = SetDifferenceNodeStream::new(
ctx.clone(),
&repo,
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("d0a361e9022d226ae52f689667bd7d212a19cfe0"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("3c15267ebf11807f3d772eb891272b911ec68759"),
&repo,
).boxed(),
).boxed();
assert_node_sequence(
ctx,
&repo,
vec![
string_to_nodehash("d0a361e9022d226ae52f689667bd7d212a19cfe0"),
@ -202,14 +216,16 @@ mod test {
#[test]
fn difference_error_node() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
let nodehash = string_to_nodehash("0000000000000000000000000000000000000000");
let mut nodestream = spawn(
SetDifferenceNodeStream::new(
ctx.clone(),
&repo,
Box::new(RepoErrorStream { hash: nodehash }),
SingleNodeHash::new(nodehash.clone(), &repo).boxed(),
SingleNodeHash::new(ctx.clone(), nodehash.clone(), &repo).boxed(),
).boxed(),
);
@ -228,10 +244,12 @@ mod test {
#[test]
fn slow_ready_difference_nothing() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
// Tests that we handle an input staying at NotReady for a while without panicing
let repeats = 10;
let repo = Arc::new(linear::getrepo(None));
let mut nodestream = SetDifferenceNodeStream::new(
ctx.clone(),
&repo,
Box::new(NotReadyEmptyStream {
poll_count: repeats,
@ -259,34 +277,41 @@ mod test {
#[test]
fn difference_union_with_single_node() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
let inputs: Vec<Box<NodeStream>> = vec![
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("3c15267ebf11807f3d772eb891272b911ec68759"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("d0a361e9022d226ae52f689667bd7d212a19cfe0"),
&repo,
).boxed(),
];
let nodestream = UnionNodeStream::new(&repo, inputs.into_iter()).boxed();
let nodestream = UnionNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed();
let nodestream = SetDifferenceNodeStream::new(
ctx.clone(),
&repo,
nodestream,
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("3c15267ebf11807f3d772eb891272b911ec68759"),
&repo,
).boxed(),
).boxed();
assert_node_sequence(
ctx,
&repo,
vec![
string_to_nodehash("a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157"),
@ -300,84 +325,102 @@ mod test {
#[test]
fn difference_single_node_with_union() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
let inputs: Vec<Box<NodeStream>> = vec![
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("3c15267ebf11807f3d772eb891272b911ec68759"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("d0a361e9022d226ae52f689667bd7d212a19cfe0"),
&repo,
).boxed(),
];
let nodestream = UnionNodeStream::new(&repo, inputs.into_iter()).boxed();
let nodestream = UnionNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed();
let nodestream = SetDifferenceNodeStream::new(
ctx.clone(),
&repo,
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("3c15267ebf11807f3d772eb891272b911ec68759"),
&repo,
).boxed(),
nodestream,
).boxed();
assert_node_sequence(&repo, vec![], nodestream);
assert_node_sequence(ctx, &repo, vec![], nodestream);
});
}
#[test]
fn difference_merge_even() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(merge_even::getrepo(None));
// Top three commits in my hg log -G -r 'all()' output
let inputs: Vec<Box<NodeStream>> = vec![
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("6120679e1fedb0b2f3717bbf042e5fd718763042"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("4f7f3fd428bec1a48f9314414b063c706d9c1aed"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("16839021e338500b3cf7c9b871c8a07351697d68"),
&repo,
).boxed(),
];
let left_nodestream = UnionNodeStream::new(&repo, inputs.into_iter()).boxed();
let left_nodestream =
UnionNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed();
// Everything from base to just before merge on one side
let inputs: Vec<Box<NodeStream>> = vec![
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("4f7f3fd428bec1a48f9314414b063c706d9c1aed"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("b65231269f651cfe784fd1d97ef02a049a37b8a0"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("d7542c9db7f4c77dab4b315edd328edf1514952f"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("15c40d0abc36d47fb51c8eaec51ac7aad31f669c"),
&repo,
).boxed(),
];
let right_nodestream = UnionNodeStream::new(&repo, inputs.into_iter()).boxed();
let right_nodestream =
UnionNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed();
let nodestream =
SetDifferenceNodeStream::new(&repo, left_nodestream, right_nodestream).boxed();
SetDifferenceNodeStream::new(ctx.clone(), &repo, left_nodestream, right_nodestream)
.boxed();
assert_node_sequence(
ctx,
&repo,
vec![
string_to_nodehash("6120679e1fedb0b2f3717bbf042e5fd718763042"),
@ -391,50 +434,62 @@ mod test {
#[test]
fn difference_merge_uneven() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(merge_uneven::getrepo(None));
// Merge commit, and one from each branch
let inputs: Vec<Box<NodeStream>> = vec![
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("6d0c1c30df4acb4e64cb4c4868d4c974097da055"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("4f7f3fd428bec1a48f9314414b063c706d9c1aed"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("16839021e338500b3cf7c9b871c8a07351697d68"),
&repo,
).boxed(),
];
let left_nodestream = UnionNodeStream::new(&repo, inputs.into_iter()).boxed();
let left_nodestream =
UnionNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed();
// Everything from base to just before merge on one side
let inputs: Vec<Box<NodeStream>> = vec![
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("16839021e338500b3cf7c9b871c8a07351697d68"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("1d8a907f7b4bf50c6a09c16361e2205047ecc5e5"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("3cda5c78aa35f0f5b09780d971197b51cad4613a"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("15c40d0abc36d47fb51c8eaec51ac7aad31f669c"),
&repo,
).boxed(),
];
let right_nodestream = UnionNodeStream::new(&repo, inputs.into_iter()).boxed();
let right_nodestream =
UnionNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed();
let nodestream =
SetDifferenceNodeStream::new(&repo, left_nodestream, right_nodestream).boxed();
SetDifferenceNodeStream::new(ctx.clone(), &repo, left_nodestream, right_nodestream)
.boxed();
assert_node_sequence(
ctx,
&repo,
vec![
string_to_nodehash("6d0c1c30df4acb4e64cb4c4868d4c974097da055"),

View File

@ -7,6 +7,7 @@
use std::boxed::Box;
use blobrepo::BlobRepo;
use context::CoreContext;
use failure::Error;
use futures::{Async, Poll};
use futures::future::Future;
@ -22,9 +23,9 @@ pub struct SingleNodeHash {
}
impl SingleNodeHash {
pub fn new(nodehash: HgNodeHash, repo: &BlobRepo) -> Self {
pub fn new(ctx: CoreContext, nodehash: HgNodeHash, repo: &BlobRepo) -> Self {
let changesetid = HgChangesetId::new(nodehash);
let exists = Box::new(repo.changeset_exists(&changesetid));
let exists = Box::new(repo.changeset_exists(ctx, &changesetid));
let nodehash = Some(nodehash);
SingleNodeHash { nodehash, exists }
}
@ -58,6 +59,7 @@ impl Stream for SingleNodeHash {
mod test {
use super::*;
use async_unit;
use context::CoreContext;
use fixtures::linear;
use std::sync::Arc;
use tests::assert_node_sequence;
@ -66,13 +68,16 @@ mod test {
#[test]
fn valid_node() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
let nodestream = SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("a5ffa77602a066db7d5cfb9fb5823a0895717c5a"),
&repo,
);
assert_node_sequence(
ctx,
&repo,
vec![
string_to_nodehash("a5ffa77602a066db7d5cfb9fb5823a0895717c5a"),
@ -85,11 +90,12 @@ mod test {
#[test]
fn invalid_node() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
let nodehash = string_to_nodehash("1000000000000000000000000000000000000000");
let nodestream = SingleNodeHash::new(nodehash, &repo).boxed();
let nodestream = SingleNodeHash::new(ctx.clone(), nodehash, &repo).boxed();
assert_node_sequence(&repo, vec![].into_iter(), nodestream);
assert_node_sequence(ctx, &repo, vec![].into_iter(), nodestream);
});
}
}

View File

@ -6,6 +6,7 @@
use NodeStream;
use blobrepo::{BlobRepo, ChangesetFetcher};
use context::CoreContext;
use failure::{err_msg, Error};
use futures::{Future, Stream};
use futures::executor::spawn;
@ -22,9 +23,9 @@ pub fn string_to_nodehash(hash: &'static str) -> HgNodeHash {
HgNodeHash::from_static_str(hash).expect("Can't turn string to HgNodeHash")
}
pub fn string_to_bonsai(repo: &Arc<BlobRepo>, s: &'static str) -> ChangesetId {
pub fn string_to_bonsai(ctx: CoreContext, repo: &Arc<BlobRepo>, s: &'static str) -> ChangesetId {
let node = string_to_nodehash(s);
repo.get_bonsai_from_hg(&HgChangesetId::new(node))
repo.get_bonsai_from_hg(ctx, &HgChangesetId::new(node))
.wait()
.unwrap()
.unwrap()
@ -60,8 +61,12 @@ impl ChangesetFetcher for TestChangesetFetcher {
// TODO(stash): remove assert_node_sequence, use assert_changesets_sequence instead
/// Accounting for reordering within generations, ensure that a NodeStream gives the expected
/// NodeHashes for testing.
pub fn assert_node_sequence<I>(repo: &Arc<BlobRepo>, hashes: I, stream: Box<NodeStream>)
where
pub fn assert_node_sequence<I>(
ctx: CoreContext,
repo: &Arc<BlobRepo>,
hashes: I,
stream: Box<NodeStream>,
) where
I: IntoIterator<Item = HgNodeHash>,
{
let mut nodestream = spawn(stream);
@ -74,7 +79,7 @@ where
}
let expected_generation = repo.clone()
.get_generation_number(&HgChangesetId::new(expected))
.get_generation_number(ctx.clone(), &HgChangesetId::new(expected))
.wait()
.expect("Unexpected error");
@ -90,7 +95,7 @@ where
}
let node_generation = repo.clone()
.get_generation_number(&HgChangesetId::new(expected))
.get_generation_number(ctx.clone(), &HgChangesetId::new(expected))
.wait()
.expect("Unexpected error");

View File

@ -5,6 +5,7 @@
// GNU General Public License version 2 or any later version.
use blobrepo::BlobRepo;
use context::CoreContext;
use futures::Async;
use futures::Poll;
use futures::stream::Stream;
@ -30,13 +31,16 @@ pub struct UnionNodeStream {
}
impl UnionNodeStream {
pub fn new<I>(repo: &Arc<BlobRepo>, inputs: I) -> Self
pub fn new<I>(ctx: CoreContext, repo: &Arc<BlobRepo>, inputs: I) -> Self
where
I: IntoIterator<Item = Box<NodeStream>>,
{
let hash_and_gen = inputs
.into_iter()
.map({ move |i| (add_generations(i, repo.clone()), Ok(Async::NotReady)) });
let hash_and_gen = inputs.into_iter().map(move |i| {
(
add_generations(ctx.clone(), i, repo.clone()),
Ok(Async::NotReady),
)
});
UnionNodeStream {
inputs: hash_and_gen.collect(),
current_generation: None,
@ -149,6 +153,7 @@ mod test {
use super::*;
use {NodeStream, SingleNodeHash};
use async_unit;
use context::CoreContext;
use errors::ErrorKind;
use fixtures::{branch_even, branch_uneven, branch_wide, linear};
use futures::executor::spawn;
@ -160,30 +165,33 @@ mod test {
#[test]
fn union_identical_node() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
let head_hash = string_to_nodehash("a5ffa77602a066db7d5cfb9fb5823a0895717c5a");
let inputs: Vec<Box<NodeStream>> = vec![
SingleNodeHash::new(head_hash.clone(), &repo).boxed(),
SingleNodeHash::new(head_hash.clone(), &repo).boxed(),
SingleNodeHash::new(ctx.clone(), head_hash.clone(), &repo).boxed(),
SingleNodeHash::new(ctx.clone(), head_hash.clone(), &repo).boxed(),
];
let nodestream = UnionNodeStream::new(&repo, inputs.into_iter()).boxed();
let nodestream = UnionNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed();
assert_node_sequence(&repo, vec![head_hash.clone()], nodestream);
assert_node_sequence(ctx, &repo, vec![head_hash.clone()], nodestream);
});
}
#[test]
fn union_error_node() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
let nodehash = string_to_nodehash("0000000000000000000000000000000000000000");
let inputs: Vec<Box<NodeStream>> = vec![
Box::new(RepoErrorStream { hash: nodehash }),
SingleNodeHash::new(nodehash.clone(), &repo).boxed(),
SingleNodeHash::new(ctx.clone(), nodehash.clone(), &repo).boxed(),
];
let mut nodestream = spawn(UnionNodeStream::new(&repo, inputs.into_iter()).boxed());
let mut nodestream =
spawn(UnionNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed());
match nodestream.wait_stream() {
Some(Err(err)) => match err_downcast!(err, err: ErrorKind => err) {
@ -200,27 +208,32 @@ mod test {
#[test]
fn union_three_nodes() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
// Note that these are *not* in generation order deliberately.
let inputs: Vec<Box<NodeStream>> = vec![
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("3c15267ebf11807f3d772eb891272b911ec68759"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("d0a361e9022d226ae52f689667bd7d212a19cfe0"),
&repo,
).boxed(),
];
let nodestream = UnionNodeStream::new(&repo, inputs.into_iter()).boxed();
let nodestream = UnionNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed();
// But, once I hit the asserts, I expect them in generation order.
assert_node_sequence(
ctx,
&repo,
vec![
string_to_nodehash("3c15267ebf11807f3d772eb891272b911ec68759"),
@ -235,43 +248,49 @@ mod test {
#[test]
fn union_nothing() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
let inputs: Vec<Box<NodeStream>> = vec![];
let nodestream = UnionNodeStream::new(&repo, inputs.into_iter()).boxed();
assert_node_sequence(&repo, vec![], nodestream);
let nodestream = UnionNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed();
assert_node_sequence(ctx, &repo, vec![], nodestream);
});
}
#[test]
fn union_nesting() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
// Note that these are *not* in generation order deliberately.
let inputs: Vec<Box<NodeStream>> = vec![
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("d0a361e9022d226ae52f689667bd7d212a19cfe0"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("3c15267ebf11807f3d772eb891272b911ec68759"),
&repo,
).boxed(),
];
let nodestream = UnionNodeStream::new(&repo, inputs.into_iter()).boxed();
let nodestream = UnionNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed();
let inputs: Vec<Box<NodeStream>> = vec![
nodestream,
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("a9473beb2eb03ddb1cccc3fbaeb8a4820f9cd157"),
&repo,
).boxed(),
];
let nodestream = UnionNodeStream::new(&repo, inputs.into_iter()).boxed();
let nodestream = UnionNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed();
assert_node_sequence(
ctx,
&repo,
vec![
string_to_nodehash("3c15267ebf11807f3d772eb891272b911ec68759"),
@ -286,6 +305,7 @@ mod test {
#[test]
fn slow_ready_union_nothing() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
// Tests that we handle an input staying at NotReady for a while without panicing
let repeats = 10;
let repo = Arc::new(linear::getrepo(None));
@ -294,7 +314,8 @@ mod test {
poll_count: repeats,
}),
];
let mut nodestream = UnionNodeStream::new(&repo, inputs.into_iter()).boxed();
let mut nodestream =
UnionNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed();
// Keep polling until we should be done.
for _ in 0..repeats + 1 {
@ -314,26 +335,31 @@ mod test {
#[test]
fn union_branch_even_repo() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(branch_even::getrepo(None));
// Two nodes should share the same generation number
let inputs: Vec<Box<NodeStream>> = vec![
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("3cda5c78aa35f0f5b09780d971197b51cad4613a"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("d7542c9db7f4c77dab4b315edd328edf1514952f"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("4f7f3fd428bec1a48f9314414b063c706d9c1aed"),
&repo,
).boxed(),
];
let nodestream = UnionNodeStream::new(&repo, inputs.into_iter()).boxed();
let nodestream = UnionNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed();
assert_node_sequence(
ctx,
&repo,
vec![
string_to_nodehash("4f7f3fd428bec1a48f9314414b063c706d9c1aed"),
@ -348,34 +374,41 @@ mod test {
#[test]
fn union_branch_uneven_repo() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(branch_uneven::getrepo(None));
// Two nodes should share the same generation number
let inputs: Vec<Box<NodeStream>> = vec![
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("3cda5c78aa35f0f5b09780d971197b51cad4613a"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("d7542c9db7f4c77dab4b315edd328edf1514952f"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("4f7f3fd428bec1a48f9314414b063c706d9c1aed"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("bc7b4d0f858c19e2474b03e442b8495fd7aeef33"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("264f01429683b3dd8042cb3979e8bf37007118bc"),
&repo,
).boxed(),
];
let nodestream = UnionNodeStream::new(&repo, inputs.into_iter()).boxed();
let nodestream = UnionNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed();
assert_node_sequence(
ctx,
&repo,
vec![
string_to_nodehash("264f01429683b3dd8042cb3979e8bf37007118bc"),
@ -392,30 +425,36 @@ mod test {
#[test]
fn union_branch_wide_repo() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(branch_wide::getrepo(None));
// Two nodes should share the same generation number
let inputs: Vec<Box<NodeStream>> = vec![
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("49f53ab171171b3180e125b918bd1cf0af7e5449"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("4685e9e62e4885d477ead6964a7600c750e39b03"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("c27ef5b7f15e9930e5b93b1f32cc2108a2aabe12"),
&repo,
).boxed(),
SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("9e8521affb7f9d10e9551a99c526e69909042b20"),
&repo,
).boxed(),
];
let nodestream = UnionNodeStream::new(&repo, inputs.into_iter()).boxed();
let nodestream = UnionNodeStream::new(ctx.clone(), &repo, inputs.into_iter()).boxed();
assert_node_sequence(
ctx,
&repo,
vec![
string_to_nodehash("49f53ab171171b3180e125b918bd1cf0af7e5449"),

View File

@ -8,6 +8,7 @@ use std::collections::HashSet;
use std::sync::Arc;
use blobrepo::BlobRepo;
use context::CoreContext;
use failure::Error;
use futures::{Async, Poll};
use futures::stream::Stream;
@ -28,9 +29,13 @@ pub struct ValidateNodeStream {
}
impl ValidateNodeStream {
pub fn new(wrapped: Box<NodeStream>, repo: &Arc<BlobRepo>) -> ValidateNodeStream {
pub fn new(
ctx: CoreContext,
wrapped: Box<NodeStream>,
repo: &Arc<BlobRepo>,
) -> ValidateNodeStream {
ValidateNodeStream {
wrapped: add_generations(wrapped, repo.clone()),
wrapped: add_generations(ctx, wrapped, repo.clone()),
last_generation: None,
seen_hashes: HashSet::new(),
}
@ -85,24 +90,28 @@ mod test {
#[test]
fn validate_accepts_single_node() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
let head_hash = string_to_nodehash("a5ffa77602a066db7d5cfb9fb5823a0895717c5a");
let nodestream = SingleNodeHash::new(head_hash, &repo);
let nodestream = SingleNodeHash::new(ctx.clone(), head_hash, &repo);
let nodestream = ValidateNodeStream::new(Box::new(nodestream), &repo).boxed();
assert_node_sequence(&repo, vec![head_hash], nodestream);
let nodestream =
ValidateNodeStream::new(ctx.clone(), Box::new(nodestream), &repo).boxed();
assert_node_sequence(ctx, &repo, vec![head_hash], nodestream);
});
}
#[test]
fn slow_ready_validates() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
// Tests that we handle an input staying at NotReady for a while without panicing
let repeats = 10;
let repo = Arc::new(linear::getrepo(None));
let mut nodestream = ValidateNodeStream::new(
ctx,
Box::new(NotReadyEmptyStream {
poll_count: repeats,
}),
@ -128,13 +137,14 @@ mod test {
#[should_panic]
fn repeat_hash_panics() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
let head_hash = string_to_nodehash("a5ffa77602a066db7d5cfb9fb5823a0895717c5a");
let nodestream =
SingleNodeHash::new(head_hash, &repo).chain(SingleNodeHash::new(head_hash, &repo));
let nodestream = SingleNodeHash::new(ctx.clone(), head_hash, &repo)
.chain(SingleNodeHash::new(ctx.clone(), head_hash, &repo));
let mut nodestream = ValidateNodeStream::new(Box::new(nodestream), &repo).boxed();
let mut nodestream = ValidateNodeStream::new(ctx, Box::new(nodestream), &repo).boxed();
loop {
match nodestream.poll() {
@ -149,17 +159,20 @@ mod test {
#[should_panic]
fn wrong_order_panics() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
let nodestream = SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("cb15ca4a43a59acff5388cea9648c162afde8372"),
&repo,
).chain(SingleNodeHash::new(
ctx.clone(),
string_to_nodehash("3c15267ebf11807f3d772eb891272b911ec68759"),
&repo,
));
let mut nodestream = ValidateNodeStream::new(Box::new(nodestream), &repo).boxed();
let mut nodestream = ValidateNodeStream::new(ctx, Box::new(nodestream), &repo).boxed();
loop {
match nodestream.poll() {

View File

@ -5,16 +5,19 @@
// GNU General Public License version 2 or any later version.
extern crate scuba_ext;
#[macro_use]
extern crate slog;
extern crate tracing;
extern crate uuid;
use scuba_ext::ScubaSampleBuilder;
use slog::Logger;
use tracing::TraceContext;
use uuid::Uuid;
#[derive(Debug, Clone)]
pub struct CoreContext<T> {
pub session: T,
pub struct CoreContext {
pub session: Uuid,
pub logger: Logger,
pub scuba: ScubaSampleBuilder,
// Logging some prod wireproto requests to scribe so that they can be later replayed on
@ -23,8 +26,18 @@ pub struct CoreContext<T> {
pub trace: TraceContext,
}
impl<T> CoreContext<T> {
pub fn session(&self) -> &T {
impl CoreContext {
pub fn test_mock() -> Self {
Self {
session: Uuid::new_v4(),
logger: Logger::root(::slog::Discard, o!()),
scuba: ScubaSampleBuilder::with_discard(),
wireproto_scribe_category: None,
trace: TraceContext::default(),
}
}
pub fn session(&self) -> &Uuid {
&self.session
}
pub fn logger(&self) -> &Logger {

View File

@ -15,6 +15,7 @@ use sql::myrouter;
use blobstore::Blobstore;
use cache_warmup::cache_warmup;
use context::CoreContext;
use hooks::{HookManager, hook_loader::load_hooks};
use mercurial_types::RepositoryId;
use metaconfig::repoconfig::{RepoConfig, RepoType};
@ -136,9 +137,11 @@ pub fn repo_handlers(
// TODO (T32873881): Arc<BlobRepo> should become BlobRepo
let initial_warmup = ensure_myrouter_ready.and_then({
cloned!(reponame, listen_log);
// TODO(T37478150, luk): this is not a test use case, need to address this later
let ctx = CoreContext::test_mock();
let blobrepo = repo.blobrepo().clone();
move |()| {
cache_warmup(Arc::new(blobrepo), config.cache_warmup, listen_log)
cache_warmup(ctx, Arc::new(blobrepo), config.cache_warmup, listen_log)
.chain_err(format!("while warming up cache for repo: {}", reponame))
.from_err()
}

View File

@ -121,7 +121,7 @@ pub fn request_handler(
scuba_logger.log_with_msg("Connection established", None);
let ctxt = CoreContext {
let ctx = CoreContext {
session: session_uuid,
logger: conn_log.clone(),
scuba: scuba_logger.clone(),
@ -132,7 +132,7 @@ pub fn request_handler(
// Construct a hg protocol handler
let proto_handler = HgProtoHandler::new(
stdin,
RepoClient::new(repo.clone(), ctxt, hash_validation_percentage, lca_hint),
RepoClient::new(repo.clone(), ctx, hash_validation_percentage, lca_hint),
sshproto::HgSshCommandDecode,
sshproto::HgSshCommandEncode,
&conn_log,

View File

@ -29,6 +29,7 @@ extern crate blobrepo;
extern crate bookmarks;
extern crate cachelib;
extern crate cmdlib;
extern crate context;
extern crate mercurial_types;
extern crate metaconfig;
extern crate panichandler;
@ -43,6 +44,7 @@ use std::str::FromStr;
use std::sync::atomic::{AtomicBool, Ordering};
use clap::{App, ArgMatches};
use context::CoreContext;
use failure::SlogKVError;
use futures::Future;
use slog::{Drain, Level, Logger};
@ -125,6 +127,8 @@ fn get_config<'a>(
) -> Result<RepoConfigs> {
// TODO: This needs to cope with blob repos, too
let crpath = PathBuf::from(matches.value_of("crpath").unwrap());
// TODO(T37478150, luk) This is not a test case, fix it up in future diffs
let ctx = CoreContext::test_mock();
let config_repo = BlobRepo::new_rocksdb(
logger.new(o!["repo" => "Config repo"]),
&crpath,
@ -136,7 +140,7 @@ fn get_config<'a>(
let book = bookmarks::Bookmark::new(book).expect("book must be ascii");
println!("Looking for bookmark {:?}", book);
runtime
.block_on(config_repo.get_bookmark(&book))?
.block_on(config_repo.get_bookmark(ctx, &book))?
.expect("bookmark not found")
}
None => mercurial_types::nodehash::HgChangesetId::from_str(

View File

@ -12,6 +12,7 @@ extern crate blobrepo;
extern crate blobstore;
extern crate bookmarks;
extern crate bytes;
extern crate context;
extern crate futures;
#[macro_use]
extern crate maplit;
@ -25,6 +26,7 @@ use std::str::FromStr;
use blobrepo::{save_bonsai_changesets, BlobRepo};
use bookmarks::Bookmark;
use bytes::Bytes;
use context::CoreContext;
use futures::future::{join_all, Future};
use mercurial_types::{HgChangesetId, MPath};
use mononoke_types::{BonsaiChangesetMut, DateTime, FileChange, FileContents, FileType};
@ -60,6 +62,7 @@ fn create_bonsai_changeset_from_test_data(
files: BTreeMap<&str, Option<&str>>,
commit_metadata: BTreeMap<&str, &str>,
) {
let ctx = CoreContext::test_mock();
let file_changes = store_files(files, blobrepo.clone());
let date: Vec<_> = commit_metadata
.get("author_date")
@ -76,7 +79,7 @@ fn create_bonsai_changeset_from_test_data(
.map(|s| HgChangesetId::from_str(s).unwrap())
.map(|p| {
blobrepo
.get_bonsai_from_hg(&p)
.get_bonsai_from_hg(ctx.clone(), &p)
.map(|maybe_cs| maybe_cs.unwrap())
});
@ -101,7 +104,7 @@ fn create_bonsai_changeset_from_test_data(
.unwrap();
let hg_cs = blobrepo
.get_hg_from_bonsai_changeset(bcs_id)
.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id)
.wait()
.unwrap();