diff --git a/apiserver/src/actor/model.rs b/apiserver/src/actor/model.rs index 6c9d551245..a9ce43ecf5 100644 --- a/apiserver/src/actor/model.rs +++ b/apiserver/src/actor/model.rs @@ -13,6 +13,7 @@ use chrono::{DateTime, FixedOffset}; use failure::{err_msg, Error}; use blobrepo::HgBlobChangeset; +use context::CoreContext; use futures::prelude::*; use futures_ext::{spawn_future, BoxFuture, FutureExt}; use mercurial_types::{Changeset as HgChangeset, Entry as HgEntry, Type}; @@ -77,7 +78,10 @@ pub struct EntryWithSizeAndContentHash { } impl EntryWithSizeAndContentHash { - pub fn materialize_future(entry: Box) -> BoxFuture { + pub fn materialize_future( + ctx: CoreContext, + entry: Box, + ) -> BoxFuture { let name = try_boxfuture!( entry .get_name() @@ -89,7 +93,7 @@ impl EntryWithSizeAndContentHash { let ttype = entry.get_type().into(); let hash = entry.get_hash().to_string(); - spawn_future(entry.get_content().and_then(move |content| { + spawn_future(entry.get_content(ctx).and_then(move |content| { let size = match &content { Content::File(contents) | Content::Executable(contents) diff --git a/apiserver/src/actor/repo.rs b/apiserver/src/actor/repo.rs index 2069ad96ea..32e9554a8d 100644 --- a/apiserver/src/actor/repo.rs +++ b/apiserver/src/actor/repo.rs @@ -89,6 +89,7 @@ impl MononokeRepo { fn get_raw_file( &self, + ctx: CoreContext, changeset: String, path: String, ) -> BoxFuture { @@ -101,7 +102,7 @@ impl MononokeRepo { let changesetid = try_boxfuture!(FS::get_changeset_id(changeset)); let repo = self.repo.clone(); - api::get_content_by_path(repo, changesetid, Some(mpath)) + api::get_content_by_path(ctx, repo, changesetid, Some(mpath)) .and_then(move |content| match content { Content::File(content) | Content::Executable(content) @@ -175,11 +176,15 @@ impl MononokeRepo { .boxify() } - fn get_blob_content(&self, hash: String) -> BoxFuture { + fn get_blob_content( + &self, + ctx: CoreContext, + hash: String, + ) -> BoxFuture { let blobhash = try_boxfuture!(FS::get_nodehash(&hash)); self.repo - .get_file_content(&blobhash) + .get_file_content(ctx, &blobhash) .and_then(move |content| match content { FileContents::Bytes(content) => { Ok(MononokeRepoResponse::GetBlobContent { content }) @@ -191,6 +196,7 @@ impl MononokeRepo { fn list_directory( &self, + ctx: CoreContext, changeset: String, path: String, ) -> BoxFuture { @@ -202,7 +208,7 @@ impl MononokeRepo { let changesetid = try_boxfuture!(FS::get_changeset_id(changeset)); let repo = self.repo.clone(); - api::get_content_by_path(repo, changesetid, mpath) + api::get_content_by_path(ctx, repo, changesetid, mpath) .and_then(move |content| match content { Content::Tree(tree) => Ok(tree), _ => Err(ErrorKind::InvalidInput(path.to_string(), None).into()), @@ -218,16 +224,19 @@ impl MononokeRepo { .boxify() } - fn get_tree(&self, hash: String) -> BoxFuture { + fn get_tree( + &self, + ctx: CoreContext, + hash: String, + ) -> BoxFuture { let treehash = try_boxfuture!(FS::get_nodehash(&hash)); let treemanifestid = HgManifestId::new(treehash); self.repo - .get_manifest_by_nodeid(&treemanifestid) - .map(|tree| { - join_all( - tree.list() - .map(|entry| EntryWithSizeAndContentHash::materialize_future(entry)), - ) + .get_manifest_by_nodeid(ctx.clone(), &treemanifestid) + .map(move |tree| { + join_all(tree.list().map(move |entry| { + EntryWithSizeAndContentHash::materialize_future(ctx.clone(), entry) + })) }) .flatten() .map(|files| MononokeRepoResponse::GetTree { files }) @@ -235,22 +244,30 @@ impl MononokeRepo { .boxify() } - fn get_changeset(&self, hash: String) -> BoxFuture { + fn get_changeset( + &self, + ctx: CoreContext, + hash: String, + ) -> BoxFuture { let changesetid = try_boxfuture!(FS::get_changeset_id(hash)); self.repo - .get_changeset_by_changesetid(&changesetid) + .get_changeset_by_changesetid(ctx, &changesetid) .and_then(|changeset| changeset.try_into().map_err(From::from)) .map(|changeset| MononokeRepoResponse::GetChangeset { changeset }) .from_err() .boxify() } - fn download_large_file(&self, oid: String) -> BoxFuture { + fn download_large_file( + &self, + ctx: CoreContext, + oid: String, + ) -> BoxFuture { let sha256_oid = try_boxfuture!(FS::get_sha256_oid(oid)); self.repo - .get_file_content_by_alias(sha256_oid) + .get_file_content_by_alias(ctx, sha256_oid) .and_then(move |content| match content { FileContents::Bytes(content) => { Ok(MononokeRepoResponse::DownloadLargeFile { content }) @@ -262,6 +279,7 @@ impl MononokeRepo { fn upload_large_file( &self, + ctx: CoreContext, oid: String, body: Bytes, ) -> BoxFuture { @@ -278,7 +296,7 @@ impl MononokeRepo { } self.repo - .upload_file_content_by_alias(sha256_oid, body) + .upload_file_content_by_alias(ctx, sha256_oid, body) .and_then(|_| Ok(MononokeRepoResponse::UploadLargeFile {})) .from_err() .boxify() @@ -312,23 +330,23 @@ impl MononokeRepo { use MononokeRepoQuery::*; match msg { - GetRawFile { changeset, path } => self.get_raw_file(changeset, path), - GetBlobContent { hash } => self.get_blob_content(hash), - ListDirectory { changeset, path } => self.list_directory(changeset, path), - GetTree { hash } => self.get_tree(hash), - GetChangeset { hash } => self.get_changeset(hash), + GetRawFile { changeset, path } => self.get_raw_file(ctx, changeset, path), + GetBlobContent { hash } => self.get_blob_content(ctx, hash), + ListDirectory { changeset, path } => self.list_directory(ctx, changeset, path), + GetTree { hash } => self.get_tree(ctx, hash), + GetChangeset { hash } => self.get_changeset(ctx, hash), IsAncestor { proposed_ancestor, proposed_descendent, } => self.is_ancestor(ctx, proposed_ancestor, proposed_descendent), - DownloadLargeFile { oid } => self.download_large_file(oid), + DownloadLargeFile { oid } => self.download_large_file(ctx, oid), LfsBatch { repo_name, req, lfs_url, } => self.lfs_batch(repo_name, req, lfs_url), - UploadLargeFile { oid, body } => self.upload_large_file(oid, body), + UploadLargeFile { oid, body } => self.upload_large_file(ctx, oid, body), } } } diff --git a/blobrepo/src/bonsai_generation.rs b/blobrepo/src/bonsai_generation.rs index 2713138b74..62ec426cbc 100644 --- a/blobrepo/src/bonsai_generation.rs +++ b/blobrepo/src/bonsai_generation.rs @@ -13,6 +13,7 @@ use futures::future::{join_all, Future}; use futures_ext::FutureExt; use bonsai_utils; +use context::CoreContext; use mercurial_types::{Changeset, HgFileNodeId, HgManifestId, HgNodeHash, MPath}; use mononoke_types::{BlobstoreValue, BonsaiChangeset, BonsaiChangesetMut, ChangesetId, FileChange, MononokeId}; @@ -24,12 +25,14 @@ use errors::*; /// Creates bonsai changeset from already created HgBlobChangeset. pub fn create_bonsai_changeset_object( + ctx: CoreContext, cs: HgBlobChangeset, parent_manifests: Vec, bonsai_parents: Vec, repo: BlobRepo, ) -> impl Future { let file_changes = find_file_changes( + ctx, cs.clone(), parent_manifests, repo.clone(), @@ -67,6 +70,7 @@ pub fn create_bonsai_changeset_object( } pub fn save_bonsai_changeset_object( + ctx: CoreContext, blobstore: RepoBlobstore, bonsai_cs: BonsaiChangeset, ) -> impl Future { @@ -74,11 +78,14 @@ pub fn save_bonsai_changeset_object( let bcs_id = bonsai_blob.id().clone(); let blobstore_key = bcs_id.blobstore_key(); - blobstore.put(blobstore_key, bonsai_blob.into()).map(|_| ()) + blobstore + .put(ctx, blobstore_key, bonsai_blob.into()) + .map(|_| ()) } // Finds files that were changed in the commit and returns it in the format suitable for BonsaiChangeset fn find_file_changes( + ctx: CoreContext, cs: HgBlobChangeset, parent_manifests: Vec, repo: BlobRepo, @@ -93,17 +100,18 @@ fn find_file_changes( .get(1) .map(|root_mf| repo.get_root_entry(root_mf)); - bonsai_utils::bonsai_diff(root_entry, p1_root_entry, p2_root_entry) + bonsai_utils::bonsai_diff(ctx.clone(), root_entry, p1_root_entry, p2_root_entry) .map(move |changed_file| match changed_file { bonsai_utils::BonsaiDiffResult::Changed(path, ty, entry_id) => { let file_node_id = entry_id.into_nodehash(); - cloned!(bonsai_parents, repo, parent_manifests); - repo.get_file_content(&file_node_id) + cloned!(ctx, bonsai_parents, repo, parent_manifests); + repo.get_file_content(ctx.clone(), &file_node_id) .and_then(move |file_contents| { let size = file_contents.size(); let content_id = file_contents.into_blob().id().clone(); get_copy_info( + ctx, repo, bonsai_parents, path.clone(), @@ -122,8 +130,8 @@ fn find_file_changes( } bonsai_utils::BonsaiDiffResult::ChangedReusedId(path, ty, entry_id) => { let file_node_id = entry_id.into_nodehash(); - cloned!(repo); - repo.get_file_content(&file_node_id).and_then(move |file_contents| { + cloned!(ctx, repo); + repo.get_file_content(ctx, &file_node_id).and_then(move |file_contents| { let size = file_contents.size(); let content_id = file_contents.into_blob().id().clone(); @@ -150,61 +158,64 @@ fn find_file_changes( // In hg copy information is (path, filenode), in bonsai it's (path, parent cs id). That means that // we need to find a parent from which this filenode was copied. fn get_copy_info( + ctx: CoreContext, repo: BlobRepo, bonsai_parents: Vec, copy_from_path: MPath, nodehash: HgNodeHash, parent_manifests: Vec, ) -> impl Future, Error = Error> { - repo.get_hg_file_copy_from_blobstore(&nodehash).and_then({ - cloned!(repo); - move |maybecopy| match maybecopy { - Some((repopath, copyfromnode)) => { - let repopath: Result = repopath - .mpath() - .cloned() - .ok_or(ErrorKind::UnexpectedRootPath.into()); + repo.get_hg_file_copy_from_blobstore(ctx.clone(), &nodehash) + .and_then({ + cloned!(repo); + move |maybecopy| match maybecopy { + Some((repopath, copyfromnode)) => { + let repopath: Result = repopath + .mpath() + .cloned() + .ok_or(ErrorKind::UnexpectedRootPath.into()); - let parents_bonsai_and_mfs = - bonsai_parents.into_iter().zip(parent_manifests.into_iter()); + let parents_bonsai_and_mfs = + bonsai_parents.into_iter().zip(parent_manifests.into_iter()); - repopath - .into_future() - .and_then(move |repopath| { - join_all(parents_bonsai_and_mfs.map({ - cloned!(repopath); - move |(bonsai_parent, parent_mf)| { - repo.find_file_in_manifest(&repopath, parent_mf).map( - move |res| match res { - Some((_, node)) if node == HgFileNodeId::new(copyfromnode) => { - Some(bonsai_parent) - } - _ => None, - }, - ) + repopath + .into_future() + .and_then(move |repopath| { + join_all(parents_bonsai_and_mfs.map({ + cloned!(ctx, repopath); + move |(bonsai_parent, parent_mf)| { + repo.find_file_in_manifest(ctx.clone(), &repopath, parent_mf) + .map(move |res| match res { + Some((_, node)) + if node == HgFileNodeId::new(copyfromnode) => + { + Some(bonsai_parent) + } + _ => None, + }) + } + })).map(move |res| (res, repopath)) + }) + .and_then(move |(copied_from_bonsai_commits, repopath)| { + let copied_from: Vec<_> = copied_from_bonsai_commits + .into_iter() + .filter_map(|x| x) + .collect(); + match copied_from.get(0) { + Some(bonsai_cs_copied_from) => { + Ok(Some((repopath, bonsai_cs_copied_from.clone()))) + } + None => Err(ErrorKind::IncorrectCopyInfo { + from_path: copy_from_path, + from_node: nodehash, + to_path: repopath.clone(), + to_node: copyfromnode, + }.into()), } - })).map(move |res| (res, repopath)) - }) - .and_then(move |(copied_from_bonsai_commits, repopath)| { - let copied_from: Vec<_> = copied_from_bonsai_commits - .into_iter() - .filter_map(|x| x) - .collect(); - match copied_from.get(0) { - Some(bonsai_cs_copied_from) => { - Ok(Some((repopath, bonsai_cs_copied_from.clone()))) - } - None => Err(ErrorKind::IncorrectCopyInfo { - from_path: copy_from_path, - from_node: nodehash, - to_path: repopath.clone(), - to_node: copyfromnode, - }.into()), - } - }) - .boxify() + }) + .boxify() + } + None => Ok(None).into_future().boxify(), } - None => Ok(None).into_future().boxify(), - } - }) + }) } diff --git a/blobrepo/src/changeset.rs b/blobrepo/src/changeset.rs index 87993ccc71..73933cc650 100644 --- a/blobrepo/src/changeset.rs +++ b/blobrepo/src/changeset.rs @@ -13,6 +13,7 @@ use futures::future::{Either, Future, IntoFuture}; use blobstore::Blobstore; +use context::CoreContext; use mercurial; use mercurial::changeset::Extra; use mercurial::revlogrepo::RevlogChangeset; @@ -142,6 +143,7 @@ impl HgBlobChangeset { } pub fn load( + ctx: CoreContext, blobstore: &RepoBlobstore, changesetid: &HgChangesetId, ) -> impl Future, Error = Error> + Send + 'static { @@ -157,7 +159,7 @@ impl HgBlobChangeset { let key = changesetid.blobstore_key(); let fut = blobstore - .get(key.clone()) + .get(ctx, key.clone()) .and_then(move |got| match got { None => Ok(None), Some(bytes) => { @@ -185,6 +187,7 @@ impl HgBlobChangeset { pub fn save( &self, + ctx: CoreContext, blobstore: RepoBlobstore, ) -> impl Future + Send + 'static { let key = self.changesetid.blobstore_key(); @@ -207,7 +210,7 @@ impl HgBlobChangeset { Ok(envelope.into_blob()) }) .into_future() - .and_then(move |blob| blobstore.put(key, blob.into())) + .and_then(move |blob| blobstore.put(ctx, key, blob.into())) } #[inline] diff --git a/blobrepo/src/changeset_fetcher.rs b/blobrepo/src/changeset_fetcher.rs index 083512777b..436e86393b 100644 --- a/blobrepo/src/changeset_fetcher.rs +++ b/blobrepo/src/changeset_fetcher.rs @@ -179,7 +179,7 @@ impl CachingChangesetFetcher { format!("changesetscache_{}", bucket * self.cache_bucket_size) } - fn fill_cache(&self, gen_num: u64) -> impl Future { + fn fill_cache(&self, ctx: CoreContext, gen_num: u64) -> impl Future { let blobstore_cache_key = self.get_blobstore_cache_key(gen_num); if !self.already_fetched_blobs .lock() @@ -188,7 +188,7 @@ impl CachingChangesetFetcher { { cloned!(self.fetches_from_blobstore); self.blobstore - .get(blobstore_cache_key.clone()) + .get(ctx, blobstore_cache_key.clone()) .map({ let cs_fetcher = self.clone(); move |val| { @@ -228,15 +228,21 @@ impl CachingChangesetFetcher { cloned!(self.repo_id, self.max_request_latency); self.cache_requests.fetch_add(1, Ordering::Relaxed); - cachelib::get_cached_or_fill(&self.cache_pool, cache_key, move || { - self.cache_misses.fetch_add(1, Ordering::Relaxed); - self.changesets.get(ctx, repo_id, cs_id) + cachelib::get_cached_or_fill(&self.cache_pool, cache_key, { + cloned!(ctx); + move || { + self.cache_misses.fetch_add(1, Ordering::Relaxed); + self.changesets.get(ctx.clone(), repo_id, cs_id) + } }).and_then(move |maybe_cs| maybe_cs.ok_or_else(|| err_msg(format!("{} not found", cs_id)))) .and_then({ let cs_fetcher = self.clone(); move |cs| { if cs_fetcher.too_many_cache_misses() { - cs_fetcher.fill_cache(cs.gen).map(|()| cs).left_future() + cs_fetcher + .fill_cache(ctx, cs.gen) + .map(|()| cs) + .left_future() } else { future::ok(cs).right_future() } @@ -393,13 +399,18 @@ mod tests { } impl Blobstore for TestBlobstore { - fn get(&self, key: String) -> BoxFuture, Error> { + fn get(&self, _ctx: CoreContext, key: String) -> BoxFuture, Error> { let blobstore = self.blobstore.lock().unwrap(); self.get_counter.fetch_add(1, Ordering::Relaxed); Ok(blobstore.get(&key).cloned()).into_future().boxify() } - fn put(&self, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> { + fn put( + &self, + _ctx: CoreContext, + key: String, + value: BlobstoreBytes, + ) -> BoxFuture<(), Error> { let mut blobstore = self.blobstore.lock().unwrap(); blobstore.insert(key, value); Ok(()).into_future().boxify() @@ -552,6 +563,7 @@ mod tests { // Blob cache entries with gen number 0 up to 4 blobstore.put( + ctx.clone(), "changesetscache_4".to_string(), BlobstoreBytes::from_bytes(serialize_cs_entries(vec![ cs.get(ctx.clone(), REPO_ZERO, FIVES_CSID) diff --git a/blobrepo/src/file.rs b/blobrepo/src/file.rs index 51da4a2358..75a22dd8ce 100644 --- a/blobrepo/src/file.rs +++ b/blobrepo/src/file.rs @@ -20,6 +20,7 @@ use mercurial_types::nodehash::HgEntryId; use mononoke_types::{ContentId, FileContents, MononokeId, hash::Sha256}; use blobstore::Blobstore; +use context::CoreContext; use errors::*; @@ -44,15 +45,16 @@ impl PartialEq for HgBlobEntry { impl Eq for HgBlobEntry {} pub fn fetch_raw_filenode_bytes( + ctx: CoreContext, blobstore: &RepoBlobstore, node_id: HgNodeHash, ) -> BoxFuture { - fetch_file_envelope(blobstore, node_id) + fetch_file_envelope(ctx.clone(), blobstore, node_id) .and_then({ let blobstore = blobstore.clone(); move |envelope| { let envelope = envelope.into_mut(); - let file_contents_fut = fetch_file_contents(&blobstore, envelope.content_id); + let file_contents_fut = fetch_file_contents(ctx, &blobstore, envelope.content_id); let mut metadata = envelope.metadata; if metadata.is_empty() { @@ -76,47 +78,52 @@ pub fn fetch_raw_filenode_bytes( } pub fn fetch_file_content_from_blobstore( + ctx: CoreContext, blobstore: &RepoBlobstore, node_id: HgNodeHash, ) -> impl Future { - fetch_file_envelope(blobstore, node_id).and_then({ + fetch_file_envelope(ctx.clone(), blobstore, node_id).and_then({ let blobstore = blobstore.clone(); move |envelope| { let content_id = envelope.content_id(); - fetch_file_contents(&blobstore, content_id.clone()) + fetch_file_contents(ctx, &blobstore, content_id.clone()) } }) } pub fn fetch_file_size_from_blobstore( + ctx: CoreContext, blobstore: &RepoBlobstore, node_id: HgFileNodeId, ) -> impl Future { - fetch_file_envelope(blobstore, node_id.into_nodehash()) + fetch_file_envelope(ctx, blobstore, node_id.into_nodehash()) .map({ |envelope| envelope.content_size() }) } pub fn fetch_file_content_id_from_blobstore( + ctx: CoreContext, blobstore: &RepoBlobstore, node_id: HgFileNodeId, ) -> impl Future { - fetch_file_envelope(blobstore, node_id.into_nodehash()) + fetch_file_envelope(ctx, blobstore, node_id.into_nodehash()) .map({ |envelope| *envelope.content_id() }) } pub fn fetch_file_content_sha256_from_blobstore( + ctx: CoreContext, blobstore: &RepoBlobstore, content_id: ContentId, ) -> impl Future { - fetch_file_contents(blobstore, content_id) + fetch_file_contents(ctx, blobstore, content_id) .map(|file_content| get_sha256(&file_content.into_bytes())) } pub fn fetch_rename_from_blobstore( + ctx: CoreContext, blobstore: &RepoBlobstore, node_id: HgNodeHash, ) -> impl Future, Error = Error> { - fetch_file_envelope(blobstore, node_id).and_then(|envelope| { + fetch_file_envelope(ctx, blobstore, node_id).and_then(|envelope| { let envelope = envelope.into_mut(); // This is a bit of a hack because metadata is not the complete file. However, it's @@ -130,10 +137,11 @@ pub fn fetch_rename_from_blobstore( } pub fn fetch_file_envelope( + ctx: CoreContext, blobstore: &RepoBlobstore, node_id: HgNodeHash, ) -> impl Future { - fetch_file_envelope_opt(blobstore, node_id) + fetch_file_envelope_opt(ctx, blobstore, node_id) .and_then(move |envelope| { let envelope = envelope.ok_or(ErrorKind::HgContentMissing( node_id, @@ -145,12 +153,13 @@ pub fn fetch_file_envelope( } pub fn fetch_file_envelope_opt( + ctx: CoreContext, blobstore: &RepoBlobstore, node_id: HgNodeHash, ) -> impl Future, Error = Error> { let blobstore_key = HgFileNodeId::new(node_id).blobstore_key(); blobstore - .get(blobstore_key.clone()) + .get(ctx, blobstore_key.clone()) .context("While fetching manifest envelope blob") .map_err(Error::from) .and_then(move |bytes| { @@ -173,12 +182,13 @@ pub fn fetch_file_envelope_opt( } pub fn fetch_file_contents( + ctx: CoreContext, blobstore: &RepoBlobstore, content_id: ContentId, ) -> impl Future { let blobstore_key = content_id.blobstore_key(); blobstore - .get(blobstore_key.clone()) + .get(ctx, blobstore_key.clone()) .context("While fetching content blob") .map_err(Error::from) .and_then(move |bytes| { @@ -212,10 +222,12 @@ impl HgBlobEntry { } } - fn get_raw_content_inner(&self) -> BoxFuture { + fn get_raw_content_inner(&self, ctx: CoreContext) -> BoxFuture { match self.ty { - Type::Tree => fetch_raw_manifest_bytes(&self.blobstore, self.id.into_nodehash()), - Type::File(_) => fetch_raw_filenode_bytes(&self.blobstore, self.id.into_nodehash()), + Type::Tree => fetch_raw_manifest_bytes(ctx, &self.blobstore, self.id.into_nodehash()), + Type::File(_) => { + fetch_raw_filenode_bytes(ctx, &self.blobstore, self.id.into_nodehash()) + } } } } @@ -225,32 +237,36 @@ impl Entry for HgBlobEntry { self.ty } - fn get_parents(&self) -> BoxFuture { + fn get_parents(&self, ctx: CoreContext) -> BoxFuture { match self.ty { - Type::Tree => fetch_manifest_envelope(&self.blobstore, self.id.into_nodehash()) - .map(move |envelope| { - let (p1, p2) = envelope.parents(); - HgParents::new(p1, p2) - }) - .boxify(), - Type::File(_) => fetch_file_envelope(&self.blobstore, self.id.into_nodehash()) - .map(move |envelope| { - let (p1, p2) = envelope.parents(); - HgParents::new(p1, p2) - }) - .boxify(), + Type::Tree => { + fetch_manifest_envelope(ctx.clone(), &self.blobstore, self.id.into_nodehash()) + .map(move |envelope| { + let (p1, p2) = envelope.parents(); + HgParents::new(p1, p2) + }) + .boxify() + } + Type::File(_) => { + fetch_file_envelope(ctx.clone(), &self.blobstore, self.id.into_nodehash()) + .map(move |envelope| { + let (p1, p2) = envelope.parents(); + HgParents::new(p1, p2) + }) + .boxify() + } } } - fn get_raw_content(&self) -> BoxFuture { - self.get_raw_content_inner() + fn get_raw_content(&self, ctx: CoreContext) -> BoxFuture { + self.get_raw_content_inner(ctx) } - fn get_content(&self) -> BoxFuture { + fn get_content(&self, ctx: CoreContext) -> BoxFuture { let blobstore = self.blobstore.clone(); match self.ty { Type::Tree => { - BlobManifest::load(&blobstore, &HgManifestId::new(self.id.into_nodehash())) + BlobManifest::load(ctx, &blobstore, &HgManifestId::new(self.id.into_nodehash())) .and_then({ let node_id = self.id.into_nodehash(); move |blob_manifest| { @@ -266,10 +282,11 @@ impl Entry for HgBlobEntry { .from_err() .boxify() } - Type::File(ft) => fetch_file_envelope(&blobstore, self.id.into_nodehash()) + Type::File(ft) => fetch_file_envelope(ctx.clone(), &blobstore, self.id.into_nodehash()) .and_then(move |envelope| { let envelope = envelope.into_mut(); - let file_contents_fut = fetch_file_contents(&blobstore, envelope.content_id); + let file_contents_fut = + fetch_file_contents(ctx, &blobstore, envelope.content_id); file_contents_fut.map(move |contents| match ft { FileType::Regular => Content::File(contents), FileType::Executable => Content::Executable(contents), @@ -286,12 +303,14 @@ impl Entry for HgBlobEntry { } // XXX get_size should probably return a u64, not a usize - fn get_size(&self) -> BoxFuture, Error> { + fn get_size(&self, ctx: CoreContext) -> BoxFuture, Error> { match self.ty { Type::Tree => future::ok(None).boxify(), - Type::File(_) => fetch_file_envelope(&self.blobstore, self.id.into_nodehash()) - .map(|envelope| Some(envelope.content_size() as usize)) - .boxify(), + Type::File(_) => { + fetch_file_envelope(ctx.clone(), &self.blobstore, self.id.into_nodehash()) + .map(|envelope| Some(envelope.content_size() as usize)) + .boxify() + } } } diff --git a/blobrepo/src/manifest.rs b/blobrepo/src/manifest.rs index 5e81846d96..567b43aeaa 100644 --- a/blobrepo/src/manifest.rs +++ b/blobrepo/src/manifest.rs @@ -13,6 +13,7 @@ use failure::{Error, FutureFailureErrorExt, Result, ResultExt}; use futures::future::{Future, IntoFuture}; use futures_ext::{BoxFuture, FutureExt}; +use context::CoreContext; use mercurial_types::{Entry, FileType, HgBlob, HgManifestEnvelope, MPathElement, Manifest, Type}; use mercurial_types::nodehash::{HgEntryId, HgManifestId, HgNodeHash, NULL_HASH}; @@ -84,10 +85,11 @@ impl ManifestContent { } pub fn fetch_raw_manifest_bytes( + ctx: CoreContext, blobstore: &RepoBlobstore, node_id: HgNodeHash, ) -> BoxFuture { - fetch_manifest_envelope(blobstore, node_id) + fetch_manifest_envelope(ctx, blobstore, node_id) .map(move |envelope| { let envelope = envelope.into_mut(); HgBlob::from(envelope.contents) @@ -97,10 +99,11 @@ pub fn fetch_raw_manifest_bytes( } pub fn fetch_manifest_envelope( + ctx: CoreContext, blobstore: &RepoBlobstore, node_id: HgNodeHash, ) -> impl Future { - fetch_manifest_envelope_opt(blobstore, node_id) + fetch_manifest_envelope_opt(ctx, blobstore, node_id) .and_then(move |envelope| { let envelope = envelope.ok_or(ErrorKind::HgContentMissing(node_id, Type::Tree))?; Ok(envelope) @@ -110,12 +113,13 @@ pub fn fetch_manifest_envelope( /// Like `fetch_manifest_envelope`, but returns None if the manifest wasn't found. pub fn fetch_manifest_envelope_opt( + ctx: CoreContext, blobstore: &RepoBlobstore, node_id: HgNodeHash, ) -> impl Future, Error = Error> { let blobstore_key = HgManifestId::new(node_id).blobstore_key(); blobstore - .get(blobstore_key.clone()) + .get(ctx, blobstore_key.clone()) .context("While fetching manifest envelope blob") .map_err(Error::from) .and_then(move |bytes| { @@ -149,6 +153,7 @@ pub struct BlobManifest { impl BlobManifest { pub fn load( + ctx: CoreContext, blobstore: &RepoBlobstore, manifestid: &HgManifestId, ) -> BoxFuture, Error> { @@ -164,7 +169,7 @@ impl BlobManifest { })).into_future() .boxify() } else { - fetch_manifest_envelope_opt(&blobstore, manifestid.into_nodehash()) + fetch_manifest_envelope_opt(ctx, &blobstore, manifestid.into_nodehash()) .and_then({ let blobstore = blobstore.clone(); move |envelope| match envelope { diff --git a/blobrepo/src/memory_manifest.rs b/blobrepo/src/memory_manifest.rs index 2c24d673ff..54f7616e3b 100644 --- a/blobrepo/src/memory_manifest.rs +++ b/blobrepo/src/memory_manifest.rs @@ -18,6 +18,7 @@ use futures_ext::{BoxFuture, FutureExt}; use slog::Logger; +use context::CoreContext; use mercurial_types::{Entry, HgFileNodeId, HgManifestId, HgNodeHash, MPath, MPathElement, Manifest, RepoPath, Type}; use mercurial_types::manifest::Content; @@ -95,6 +96,7 @@ impl MemoryManifestEntry { /// True iff this entry is a tree with no children pub fn is_empty( &self, + ctx: CoreContext, blobstore: &RepoBlobstore, ) -> impl Future + Send { match self { @@ -110,14 +112,14 @@ impl MemoryManifestEntry { if changes_are_empty { Either::B(future::ok(base_manifest_id.is_none())) } else { - let is_empty_rec = self.get_new_children(blobstore) + let is_empty_rec = self.get_new_children(ctx.clone(), blobstore) .and_then({ - let blobstore = blobstore.clone(); + cloned!(ctx, blobstore); move |children| { future::join_all( children .into_iter() - .map(move |(_, child)| child.is_empty(&blobstore)), + .map(move |(_, child)| child.is_empty(ctx.clone(), &blobstore)), ) } }) @@ -171,6 +173,7 @@ impl MemoryManifestEntry { /// Save all manifests represented here to the blobstore pub fn save( &self, + ctx: CoreContext, blobstore: &RepoBlobstore, logger: &Logger, incomplete_filenodes: &IncompleteFilenodes, @@ -192,21 +195,25 @@ impl MemoryManifestEntry { let p1 = *p1; let p2 = *p2; if self.is_modified() { - self.get_new_children(blobstore) + self.get_new_children(ctx.clone(), blobstore) .and_then({ cloned!(logger, blobstore, incomplete_filenodes); move |new_children| { // First save only the non-empty children let entries = stream::iter_ok(new_children.into_iter()) .and_then({ - cloned!(blobstore); + cloned!(ctx, blobstore); move |(path_elem, entry)| { - (entry.is_empty(&blobstore), Ok(path_elem), Ok(entry)) + ( + entry.is_empty(ctx.clone(), &blobstore), + Ok(path_elem), + Ok(entry), + ) } }) .filter(|(empty, ..)| !empty) .and_then({ - cloned!(logger, blobstore, path, incomplete_filenodes); + cloned!(ctx, logger, blobstore, path, incomplete_filenodes); move |(_, path_elem, entry)| { let path_elem = path_elem.clone(); // This is safe, because we only save trees @@ -214,6 +221,7 @@ impl MemoryManifestEntry { extend_repopath_with_dir(&path, &path_elem); entry .save( + ctx.clone(), &blobstore, &logger, &incomplete_filenodes, @@ -247,7 +255,7 @@ impl MemoryManifestEntry { path, }; upload_manifest - .upload_to_blobstore(&blobstore, &logger) + .upload_to_blobstore(ctx, &blobstore, &logger) .map(|(_hash, future)| future) .into_future() .flatten() @@ -319,6 +327,7 @@ impl MemoryManifestEntry { // The list of this node's children, or empty if it's not a tree with children. fn get_new_children( &self, + ctx: CoreContext, blobstore: &RepoBlobstore, ) -> impl Future, Error = Error> + Send { match self { @@ -328,13 +337,13 @@ impl MemoryManifestEntry { .. } => match base_manifest_id { Some(manifest_id) => Either::B( - BlobManifest::load(blobstore, &HgManifestId::new(*manifest_id)) + BlobManifest::load(ctx.clone(), blobstore, &HgManifestId::new(*manifest_id)) .and_then({ let manifest_id = HgManifestId::new(*manifest_id); move |m| m.ok_or(ErrorKind::ManifestMissing(manifest_id).into()) }) .and_then({ - let blobstore = blobstore.clone(); + cloned!(blobstore); move |m| { let mut children = BTreeMap::new(); for entry in m.list() { @@ -383,6 +392,7 @@ impl MemoryManifestEntry { } fn merge_trees( + ctx: CoreContext, mut children: BTreeMap, other_children: BTreeMap, blobstore: RepoBlobstore, @@ -408,6 +418,7 @@ impl MemoryManifestEntry { conflicts.push( conflict_entry .merge_with_conflicts( + ctx.clone(), other_entry, blobstore.clone(), logger.clone(), @@ -442,6 +453,7 @@ impl MemoryManifestEntry { /// structure in strict order, so that first entry is p1, second is p2 etc. pub fn merge_with_conflicts( self, + ctx: CoreContext, other: Self, blobstore: RepoBlobstore, logger: Logger, @@ -451,6 +463,7 @@ impl MemoryManifestEntry { use self::MemoryManifestEntry::*; if self.is_modified() { return self.save( + ctx.clone(), &blobstore, &logger, &incomplete_filenodes, @@ -458,6 +471,7 @@ impl MemoryManifestEntry { ).map(|entry| Self::convert_treenode(&entry.get_hash().into_nodehash())) .and_then(move |saved| { saved.merge_with_conflicts( + ctx, other, blobstore, logger, @@ -470,6 +484,7 @@ impl MemoryManifestEntry { if other.is_modified() { return other .save( + ctx.clone(), &blobstore, &logger, &incomplete_filenodes, @@ -478,6 +493,7 @@ impl MemoryManifestEntry { .map(|entry| Self::convert_treenode(&entry.get_hash().into_nodehash())) .and_then(move |saved| { self.merge_with_conflicts( + ctx, saved, blobstore, logger, @@ -545,13 +561,14 @@ impl MemoryManifestEntry { future::ok(self.clone()).boxify() } else { // Otherwise, merge on an entry-by-entry basis - self.get_new_children(&blobstore) - .join(other.get_new_children(&blobstore)) + self.get_new_children(ctx.clone(), &blobstore) + .join(other.get_new_children(ctx.clone(), &blobstore)) .and_then({ let p1 = p1.clone(); let p2 = p2.clone(); move |(children, other_children)| { Self::merge_trees( + ctx, children, other_children, blobstore, @@ -645,6 +662,7 @@ impl MemoryManifestEntry { /// way through the path) pub fn find_mut( &self, + ctx: CoreContext, mut path: impl Iterator + Send + 'static, blobstore: RepoBlobstore, ) -> BoxFuture, Error> { @@ -673,12 +691,10 @@ impl MemoryManifestEntry { // Do the lookup in base_manifest_id if let Some(manifest_id) = base_manifest_id { let manifest_id = HgManifestId::new(*manifest_id); - BlobManifest::load(&blobstore, &manifest_id) - .and_then( - move |m| { - m.ok_or(ErrorKind::ManifestMissing(manifest_id).into()) - } - ) + BlobManifest::load(ctx.clone(), &blobstore, &manifest_id) + .and_then(move |m| { + m.ok_or(ErrorKind::ManifestMissing(manifest_id).into()) + }) .map({ let entry_changes = entry_changes.clone(); let element = element.clone(); @@ -696,9 +712,16 @@ impl MemoryManifestEntry { } else { future::ok(()).boxify() } - }.and_then(move |_| { - let mut changes = entry_changes.lock().expect("lock poisoned"); - Self::find_mut_helper(&mut changes, element).find_mut(path, blobstore) + }.and_then({ + cloned!(ctx); + move |_| { + let mut changes = entry_changes.lock().expect("lock poisoned"); + Self::find_mut_helper(&mut changes, element).find_mut( + ctx, + path, + blobstore, + ) + } }) .boxify() } @@ -747,36 +770,38 @@ impl MemoryManifestEntry { /// Resolve conflicts when blobs point to the same data but have different parents pub fn resolve_trivial_conflicts( &self, + ctx: CoreContext, repo: BlobRepo, incomplete_filenodes: IncompleteFilenodes, ) -> impl Future + Send { fn merge_content( + ctx: CoreContext, entries: Vec, ) -> impl Future, Error = Error> + Send { - if let Some(Type::File(file_type)) = entries.first().map(|e| e.get_type()) { - let fut = future::join_all(entries.into_iter().map(|e| e.get_content())).map( - move |content| { - let mut iter = content.iter(); - if let Some(first) = iter.next() { - if iter.all(|other| match (first, other) { - (Content::File(c0), Content::File(c1)) - | (Content::Executable(c0), Content::Executable(c1)) - | (Content::Symlink(c0), Content::Symlink(c1)) => c0 == c1, - _ => false, - }) { - return match first { - Content::Executable(file_content) - | Content::File(file_content) - | Content::Symlink(file_content) => { - Some((file_type, file_content.clone())) - } - _ => unreachable!(), - }; + if let Some(Type::File(file_type)) = entries.first().map(move |e| e.get_type()) { + let fut = future::join_all( + entries.into_iter().map(move |e| e.get_content(ctx.clone())), + ).map(move |content| { + let mut iter = content.iter(); + if let Some(first) = iter.next() { + if iter.all(|other| match (first, other) { + (Content::File(c0), Content::File(c1)) + | (Content::Executable(c0), Content::Executable(c1)) + | (Content::Symlink(c0), Content::Symlink(c1)) => c0 == c1, + _ => false, + }) { + return match first { + Content::Executable(file_content) + | Content::File(file_content) + | Content::Symlink(file_content) => { + Some((file_type, file_content.clone())) + } + _ => unreachable!(), }; }; - None - }, - ); + }; + None + }); Either::A(fut) } else { Either::B(future::ok(None)) @@ -784,6 +809,7 @@ impl MemoryManifestEntry { } fn merge_entries( + ctx: CoreContext, path: Option, entries: Vec, repo: BlobRepo, @@ -793,7 +819,7 @@ impl MemoryManifestEntry { .iter() .map(|e| e.get_hash().into_nodehash()) .collect::>(); - merge_content(entries).and_then(move |content| { + merge_content(ctx.clone(), entries).and_then(move |content| { let mut parents = parents.into_iter(); if let Some((file_type, file_content)) = content { let path = try_boxfuture!(path.ok_or(ErrorKind::EmptyFilePath).into()); @@ -809,7 +835,7 @@ impl MemoryManifestEntry { p2: p2.clone(), path: path, }; - let (_, upload_future) = try_boxfuture!(upload_entry.upload(&repo)); + let (_, upload_future) = try_boxfuture!(upload_entry.upload(ctx, &repo)); upload_future .map(move |(entry, path)| { incomplete_filenodes.add(IncompleteFilenodeInfo { @@ -829,6 +855,7 @@ impl MemoryManifestEntry { } fn resolve_rec( + ctx: CoreContext, path: Option, node: MemoryManifestEntry, repo: BlobRepo, @@ -843,11 +870,16 @@ impl MemoryManifestEntry { .flat_map(|(k, v)| v.clone().map(|v| (k, v))) .map(|(name, child)| { let path = MPath::join_opt(path.as_ref(), name); - resolve_rec(path, child, repo.clone(), incomplete_filenodes.clone()) - .map({ - let name = name.clone(); - move |v| v.map(|v| (name, v)) - }) + resolve_rec( + ctx.clone(), + path, + child, + repo.clone(), + incomplete_filenodes.clone(), + ).map({ + let name = name.clone(); + move |v| v.map(|v| (name, v)) + }) }) .collect::>() }; @@ -874,7 +906,7 @@ impl MemoryManifestEntry { }) .collect::>>(); if let Some(entries) = entries { - merge_entries(path, entries, repo, incomplete_filenodes).boxify() + merge_entries(ctx, path, entries, repo, incomplete_filenodes).boxify() } else { future::ok(None).boxify() } @@ -882,7 +914,7 @@ impl MemoryManifestEntry { _ => future::ok(None).boxify(), } } - resolve_rec(None, self.clone(), repo, incomplete_filenodes).map(|_| ()) + resolve_rec(ctx, None, self.clone(), repo, incomplete_filenodes).map(|_| ()) } } @@ -907,6 +939,7 @@ impl MemoryRootManifest { } fn create_conflict( + ctx: CoreContext, repo: BlobRepo, incomplete_filenodes: IncompleteFilenodes, p1_root: MemoryManifestEntry, @@ -914,6 +947,7 @@ impl MemoryRootManifest { ) -> BoxFuture { p1_root .merge_with_conflicts( + ctx, p2_root, repo.get_blobstore(), repo.get_logger(), @@ -926,6 +960,7 @@ impl MemoryRootManifest { /// Create an in-memory manifest, backed by the given blobstore, and based on mp1 and mp2 pub fn new( + ctx: CoreContext, repo: BlobRepo, incomplete_filenodes: IncompleteFilenodes, mp1: Option<&HgNodeHash>, @@ -943,6 +978,7 @@ impl MemoryRootManifest { MemoryManifestEntry::convert_treenode(p), )).boxify(), (Some(p1), Some(p2)) => Self::create_conflict( + ctx, repo, incomplete_filenodes, MemoryManifestEntry::convert_treenode(p1), @@ -961,9 +997,10 @@ impl MemoryRootManifest { /// manifest contains dir1/file1 and dir2/file2 and dir2 contains a conflict for file2, dir1 /// can still be saved to the blobstore. /// Returns the saved manifest ID - pub fn save(&self) -> BoxFuture { + pub fn save(&self, ctx: CoreContext) -> BoxFuture { self.root_entry .save( + ctx, &self.repo.get_blobstore(), &self.repo.get_logger(), &self.incomplete_filenodes, @@ -974,6 +1011,7 @@ impl MemoryRootManifest { fn find_path( &self, + ctx: CoreContext, path: &MPath, ) -> ( impl Future + Send, @@ -984,7 +1022,7 @@ impl MemoryRootManifest { None => Either::A(future::ok(self.root_entry.clone())), Some(filepath) => Either::B( self.root_entry - .find_mut(filepath.into_iter(), self.repo.get_blobstore()) + .find_mut(ctx, filepath.into_iter(), self.repo.get_blobstore()) .and_then({ let path = path.clone(); |entry| entry.ok_or(ErrorKind::PathNotFound(path).into()) @@ -996,17 +1034,28 @@ impl MemoryRootManifest { } /// Apply an add or remove based on whether the change is None (remove) or Some(blobentry) (add) - pub fn change_entry(&self, path: &MPath, entry: Option) -> BoxFuture<(), Error> { - let (target, filename) = self.find_path(path); + pub fn change_entry( + &self, + ctx: CoreContext, + path: &MPath, + entry: Option, + ) -> BoxFuture<(), Error> { + let (target, filename) = self.find_path(ctx, path); target .and_then(|target| target.change(filename, entry).into_future()) .boxify() } - pub fn resolve_trivial_conflicts(&self) -> impl Future + Send { - self.root_entry - .resolve_trivial_conflicts(self.repo.clone(), self.incomplete_filenodes.clone()) + pub fn resolve_trivial_conflicts( + &self, + ctx: CoreContext, + ) -> impl Future + Send { + self.root_entry.resolve_trivial_conflicts( + ctx, + self.repo.clone(), + self.incomplete_filenodes.clone(), + ) } pub fn unittest_root(&self) -> &MemoryManifestEntry { diff --git a/blobrepo/src/repo.rs b/blobrepo/src/repo.rs index 370e4d97d0..01bd7c08ca 100644 --- a/blobrepo/src/repo.rs +++ b/blobrepo/src/repo.rs @@ -409,13 +409,17 @@ impl BlobRepo { ) } - fn fetch(&self, key: &K) -> impl Future + Send + fn fetch( + &self, + ctx: CoreContext, + key: &K, + ) -> impl Future + Send where K: MononokeId, { let blobstore_key = key.blobstore_key(); self.blobstore - .get(blobstore_key.clone()) + .get(ctx, blobstore_key.clone()) .and_then(move |blob| { blob.ok_or(ErrorKind::MissingTypedKeyEntry(blobstore_key).into()) .and_then(|blob| <::Value>::from_blob(blob.into())) @@ -423,14 +427,18 @@ impl BlobRepo { } // this is supposed to be used only from unittest - pub fn unittest_fetch(&self, key: &K) -> impl Future + Send + pub fn unittest_fetch( + &self, + ctx: CoreContext, + key: &K, + ) -> impl Future + Send where K: MononokeId, { - self.fetch(key) + self.fetch(ctx, key) } - fn store(&self, value: V) -> impl Future + Send + fn store(&self, ctx: CoreContext, value: V) -> impl Future + Send where V: BlobstoreValue, K: MononokeId, @@ -438,65 +446,83 @@ impl BlobRepo { let blob = value.into_blob(); let key = *blob.id(); self.blobstore - .put(key.blobstore_key(), blob.into()) + .put(ctx, key.blobstore_key(), blob.into()) .map(move |_| key) } // this is supposed to be used only from unittest - pub fn unittest_store(&self, value: V) -> impl Future + Send + pub fn unittest_store( + &self, + ctx: CoreContext, + value: V, + ) -> impl Future + Send where V: BlobstoreValue, K: MononokeId, { - self.store(value) + self.store(ctx, value) } - pub fn get_file_content(&self, key: &HgNodeHash) -> BoxFuture { + pub fn get_file_content( + &self, + ctx: CoreContext, + key: &HgNodeHash, + ) -> BoxFuture { STATS::get_file_content.add_value(1); - fetch_file_content_from_blobstore(&self.blobstore, *key).boxify() + fetch_file_content_from_blobstore(ctx, &self.blobstore, *key).boxify() } pub fn get_file_content_by_content_id( &self, + ctx: CoreContext, id: ContentId, ) -> impl Future { - fetch_file_contents(&self.blobstore, id) + fetch_file_contents(ctx, &self.blobstore, id) } - pub fn get_file_size(&self, key: &HgFileNodeId) -> impl Future { - fetch_file_size_from_blobstore(&self.blobstore, *key) + pub fn get_file_size( + &self, + ctx: CoreContext, + key: &HgFileNodeId, + ) -> impl Future { + fetch_file_size_from_blobstore(ctx, &self.blobstore, *key) } pub fn get_file_content_id( &self, + ctx: CoreContext, key: &HgFileNodeId, ) -> impl Future { - fetch_file_content_id_from_blobstore(&self.blobstore, *key) + fetch_file_content_id_from_blobstore(ctx, &self.blobstore, *key) } pub fn get_file_sha256( &self, + ctx: CoreContext, content_id: ContentId, ) -> impl Future { let blobrepo = self.clone(); cloned!(content_id, self.blobstore); // try to get sha256 from blobstore from a blob to avoid calculation - self.get_alias_content_id_to_sha256(content_id) + self.get_alias_content_id_to_sha256(ctx.clone(), content_id) .and_then(move |res| match res { Some(file_content_sha256) => Ok(file_content_sha256).into_future().left_future(), - None => fetch_file_content_sha256_from_blobstore(&blobstore, content_id) - .and_then(move |alias| { - blobrepo - .put_alias_content_id_to_sha256(content_id, alias) - .map(move |()| alias) - }) - .right_future(), + None => { + fetch_file_content_sha256_from_blobstore(ctx.clone(), &blobstore, content_id) + .and_then(move |alias| { + blobrepo + .put_alias_content_id_to_sha256(ctx, content_id, alias) + .map(move |()| alias) + }) + .right_future() + } }) } fn put_alias_content_id_to_sha256( &self, + ctx: CoreContext, content_id: ContentId, alias_content: Sha256, ) -> impl Future { @@ -504,11 +530,13 @@ impl BlobRepo { // Contents = alias.sha256.SHA256HASH (BlobstoreBytes) let contents = BlobstoreBytes::from_bytes(Bytes::from(alias_content.as_ref())); - self.upload_blobstore_bytes(alias_key, contents).map(|_| ()) + self.upload_blobstore_bytes(ctx, alias_key, contents) + .map(|_| ()) } fn get_alias_content_id_to_sha256( &self, + ctx: CoreContext, content_id: ContentId, ) -> impl Future, Error = Error> { // Ok: Some(value) - found alias blob, None - alias blob nor found (lazy upload) @@ -517,7 +545,7 @@ impl BlobRepo { let alias_content_id = get_content_id_alias_key(content_id); self.blobstore - .get(alias_content_id.clone()) + .get(ctx, alias_content_id.clone()) .map(|content_key_bytes| { content_key_bytes.and_then(|bytes| Sha256::from_bytes(bytes.as_bytes()).ok()) }) @@ -525,6 +553,7 @@ impl BlobRepo { pub fn upload_file_content_by_alias( &self, + ctx: CoreContext, _alias: Sha256, raw_file_content: Bytes, ) -> impl Future { @@ -532,24 +561,26 @@ impl BlobRepo { let alias_key = get_sha256_alias(&raw_file_content); // Raw contents = file content only, excluding metadata in the beginning let contents = FileContents::Bytes(raw_file_content); - self.upload_blob(contents.into_blob(), alias_key) + self.upload_blob(ctx, contents.into_blob(), alias_key) .map(|_| ()) .boxify() } pub fn get_file_content_by_alias( &self, + ctx: CoreContext, alias: Sha256, ) -> impl Future { let blobstore = self.blobstore.clone(); - self.get_file_content_id_by_alias(alias) - .and_then(move |content_id| fetch_file_contents(&blobstore, content_id)) + self.get_file_content_id_by_alias(ctx.clone(), alias) + .and_then(move |content_id| fetch_file_contents(ctx, &blobstore, content_id)) .from_err() } pub fn get_file_content_id_by_alias( &self, + ctx: CoreContext, alias: Sha256, ) -> impl Future { STATS::get_file_content.add_value(1); @@ -557,7 +588,7 @@ impl BlobRepo { let blobstore = self.blobstore.clone(); blobstore - .get(prefixed_key.clone()) + .get(ctx, prefixed_key.clone()) .and_then(move |bytes| { let content_key_bytes = match bytes { Some(bytes) => bytes, @@ -595,10 +626,11 @@ impl BlobRepo { pub fn generate_lfs_file( &self, + ctx: CoreContext, content_id: ContentId, file_size: u64, ) -> impl Future { - self.get_file_sha256(content_id) + self.get_file_sha256(ctx, content_id) .and_then(move |alias| File::generate_lfs_file(alias, file_size)) .map(|bytes| FileContents::Bytes(bytes)) } @@ -608,19 +640,24 @@ impl BlobRepo { /// The raw filenode content is crucial for operation like delta application. It is stored in /// untouched represenation that came from Mercurial client. - pub fn get_raw_hg_content(&self, key: &HgNodeHash) -> BoxFuture { + pub fn get_raw_hg_content( + &self, + ctx: CoreContext, + key: &HgNodeHash, + ) -> BoxFuture { STATS::get_raw_hg_content.add_value(1); - fetch_raw_filenode_bytes(&self.blobstore, *key) + fetch_raw_filenode_bytes(ctx, &self.blobstore, *key) } // Fetches copy data from blobstore instead of from filenodes db. This should be used only // during committing. pub(crate) fn get_hg_file_copy_from_blobstore( &self, + ctx: CoreContext, key: &HgNodeHash, ) -> BoxFuture, Error> { STATS::get_hg_file_copy_from_blobstore.add_value(1); - fetch_rename_from_blobstore(&self.blobstore, *key) + fetch_rename_from_blobstore(ctx, &self.blobstore, *key) .map(|rename| rename.map(|(path, hash)| (RepoPath::FilePath(path), hash))) .boxify() } @@ -628,6 +665,7 @@ impl BlobRepo { pub fn get_changesets(&self, ctx: CoreContext) -> BoxStream { STATS::get_changesets.add_value(1); HgBlobChangesetStream { + ctx: ctx.clone(), repo: self.clone(), state: BCState::Idle, heads: self.get_heads_maybe_stale(ctx).boxify(), @@ -778,21 +816,23 @@ impl BlobRepo { pub fn get_changeset_by_changesetid( &self, + ctx: CoreContext, changesetid: &HgChangesetId, ) -> BoxFuture { STATS::get_changeset_by_changesetid.add_value(1); let chid = changesetid.clone(); - HgBlobChangeset::load(&self.blobstore, &chid) + HgBlobChangeset::load(ctx, &self.blobstore, &chid) .and_then(move |cs| cs.ok_or(ErrorKind::ChangesetMissing(chid).into())) .boxify() } pub fn get_manifest_by_nodeid( &self, + ctx: CoreContext, manifestid: &HgManifestId, ) -> BoxFuture, Error> { STATS::get_manifest_by_nodeid.add_value(1); - BlobManifest::load(&self.blobstore, &manifestid) + BlobManifest::load(ctx, &self.blobstore, &manifestid) .and_then({ let manifestid = *manifestid; move |mf| mf.ok_or(ErrorKind::ManifestMissing(manifestid).into()) @@ -908,10 +948,11 @@ impl BlobRepo { pub fn get_bonsai_changeset( &self, + ctx: CoreContext, bonsai_cs_id: ChangesetId, ) -> BoxFuture { STATS::get_bonsai_changeset.add_value(1); - self.fetch(&bonsai_cs_id).boxify() + self.fetch(ctx, &bonsai_cs_id).boxify() } // TODO(stash): make it accept ChangesetId @@ -954,6 +995,7 @@ impl BlobRepo { fn upload_blobstore_bytes( &self, + ctx: CoreContext, key: String, contents: BlobstoreBytes, ) -> impl Future + Send { @@ -972,7 +1014,7 @@ impl BlobRepo { ); } - self.blobstore.put(key.clone(), contents).timed({ + self.blobstore.put(ctx, key.clone(), contents).timed({ let logger = self.logger.clone(); move |stats, result| { if result.is_ok() { @@ -985,6 +1027,7 @@ impl BlobRepo { pub fn upload_blob( &self, + ctx: CoreContext, blob: Blob, alias_key: String, ) -> impl Future + Send @@ -999,12 +1042,12 @@ impl BlobRepo { // Upload {alias.sha256.sha256(blob_contents): blobstore_key} let alias_key_operation = { let contents = BlobstoreBytes::from_bytes(blobstore_key.as_bytes()); - self.upload_blobstore_bytes(alias_key, contents) + self.upload_blobstore_bytes(ctx.clone(), alias_key, contents) }; // Upload {blobstore_key: blob_contents} let blobstore_key_operation = - self.upload_blobstore_bytes(blobstore_key, blob_contents.clone()); + self.upload_blobstore_bytes(ctx, blobstore_key, blob_contents.clone()); blobstore_key_operation .join(alias_key_operation) @@ -1013,10 +1056,12 @@ impl BlobRepo { pub fn upload_alias_to_file_content_id( &self, + ctx: CoreContext, alias: Sha256, content_id: ContentId, ) -> impl Future + Send { self.upload_blobstore_bytes( + ctx, get_sha256_alias_key(alias.to_hex().to_string()), BlobstoreBytes::from_bytes(content_id.blobstore_key().as_bytes()), ) @@ -1054,8 +1099,8 @@ impl BlobRepo { (Some(parent), None, Some(change)) | (None, Some(parent), Some(change)) => { let store = self.get_blobstore(); let parent = parent.into_nodehash(); - cloned!(change, path); - fetch_file_envelope(&store, parent) + cloned!(ctx, change, path); + fetch_file_envelope(ctx.clone(), &store, parent) .map(move |parent_envelope| { if parent_envelope.content_id() == change.content_id() && change.copy_from().is_none() @@ -1104,14 +1149,14 @@ impl BlobRepo { Some(change) => { let copy_from_fut = match change.copy_from() { None => future::ok(None).left_future(), - Some((path, bcs_id)) => self.get_hg_from_bonsai_changeset(ctx, *bcs_id) + Some((path, bcs_id)) => self.get_hg_from_bonsai_changeset(ctx.clone(), *bcs_id) .and_then({ - cloned!(repo); - move |cs_id| repo.get_changeset_by_changesetid(&cs_id) + cloned!(ctx, repo); + move |cs_id| repo.get_changeset_by_changesetid(ctx, &cs_id) }) .and_then({ - cloned!(repo, path); - move |cs| repo.find_file_in_manifest(&path, *cs.manifestid()) + cloned!(ctx, repo, path); + move |cs| repo.find_file_in_manifest(ctx, &path, *cs.manifestid()) }) .and_then({ cloned!(path); @@ -1123,7 +1168,7 @@ impl BlobRepo { .right_future(), }; let upload_fut = copy_from_fut.and_then({ - cloned!(repo, path, change); + cloned!(ctx, repo, path, change); move |copy_from| { let upload_entry = UploadHgFileEntry { upload_node_id: UploadHgNodeHash::Generate, @@ -1136,7 +1181,7 @@ impl BlobRepo { p2: p2.clone().map(|h| h.into_nodehash()), path: path.clone(), }; - let upload_fut = match upload_entry.upload(&repo) { + let upload_fut = match upload_entry.upload(ctx, &repo) { Ok((_, upload_fut)) => upload_fut.map(move |(entry, _)| { let node_info = IncompleteFilenodeInfo { path: RepoPath::FilePath(path), @@ -1165,13 +1210,14 @@ impl BlobRepo { /// child manifest contains this entry, because it might have been removed. pub fn check_case_conflict_in_manifest( &self, + ctx: CoreContext, parent_mf_id: &HgManifestId, child_mf_id: &HgManifestId, path: MPath, ) -> impl Future { let repo = self.clone(); let child_mf_id = child_mf_id.clone(); - self.get_manifest_by_nodeid(&parent_mf_id) + self.get_manifest_by_nodeid(ctx.clone(), &parent_mf_id) .and_then(move |mf| { loop_fn( (None, mf, path.into_iter()), @@ -1189,7 +1235,7 @@ impl BlobRepo { match entry.get_type() { Type::File(_) => future::ok(Loop::Break(false)).boxify(), Type::Tree => entry - .get_content() + .get_content(ctx.clone()) .map(move |content| match content { Content::Tree(mf) => { Loop::Continue((Some(cur_path), mf, elements)) @@ -1227,9 +1273,11 @@ impl BlobRepo { // this has been deleted and it's no longer a conflict. let mut check_futs = vec![]; for fullpath in potential_conflicts { - let check_fut = - repo.find_path_in_manifest(fullpath, child_mf_id.clone()) - .map(|content| content.is_some()); + let check_fut = repo.find_path_in_manifest( + ctx.clone(), + fullpath, + child_mf_id.clone(), + ).map(|content| content.is_some()); check_futs.push(check_fut); } @@ -1249,11 +1297,13 @@ impl BlobRepo { pub fn find_path_in_manifest( &self, + ctx: CoreContext, path: Option, manifest: HgManifestId, ) -> impl Future, Error = Error> + Send { // single fold step, converts path elemnt in content to content, if any fn find_content_in_content( + ctx: CoreContext, content: BoxFuture, Error>, path_element: MPathElement, ) -> BoxFuture, Error> { @@ -1262,30 +1312,36 @@ impl BlobRepo { None => future::ok(None).left_future(), Some(Content::Tree(manifest)) => match manifest.lookup(&path_element) { None => future::ok(None).left_future(), - Some(entry) => entry.get_content().map(Some).right_future(), + Some(entry) => entry.get_content(ctx).map(Some).right_future(), }, Some(_) => future::ok(None).left_future(), }) .boxify() } - self.get_manifest_by_nodeid(&manifest) + self.get_manifest_by_nodeid(ctx.clone(), &manifest) .and_then(move |manifest| { let content_init = future::ok(Some(Content::Tree(manifest))).boxify(); match path { None => content_init, - Some(path) => path.into_iter().fold(content_init, find_content_in_content), + Some(path) => { + path.into_iter() + .fold(content_init, move |content, path_element| { + find_content_in_content(ctx.clone(), content, path_element) + }) + } } }) } pub fn find_file_in_manifest( &self, + ctx: CoreContext, path: &MPath, manifest: HgManifestId, ) -> impl Future, Error = Error> + Send { let (dirname, basename) = path.split_dirname(); - self.find_path_in_manifest(dirname, manifest).map({ + self.find_path_in_manifest(ctx, dirname, manifest).map({ let basename = basename.clone(); move |content| match content { None => None, @@ -1312,6 +1368,7 @@ impl BlobRepo { let p1 = manifest_p1.map(|id| id.into_nodehash()); let p2 = manifest_p2.map(|id| id.into_nodehash()); MemoryRootManifest::new( + ctx.clone(), self.clone(), IncompleteFilenodes::new(), p1.as_ref(), @@ -1329,13 +1386,13 @@ impl BlobRepo { cloned!(path, memory_manifest, incomplete_filenodes); let p1 = manifest_p1 .map(|manifest| { - repo.find_file_in_manifest(&path, manifest) + repo.find_file_in_manifest(ctx.clone(), &path, manifest) .map(|o| o.map(|(_, x)| x)) }) .into_future(); let p2 = manifest_p2 .map(|manifest| { - repo.find_file_in_manifest(&path, manifest) + repo.find_file_in_manifest(ctx.clone(), &path, manifest) .map(|o| o.map(|(_, x)| x)) }) .into_future(); @@ -1354,13 +1411,16 @@ impl BlobRepo { ) } }) - .and_then(move |entry| match entry { - None => memory_manifest.change_entry(&path, None), - Some((entry, node_infos)) => { - for node_info in node_infos { - incomplete_filenodes.add(node_info); + .and_then({ + cloned!(ctx); + move |entry| match entry { + None => memory_manifest.change_entry(ctx, &path, None), + Some((entry, node_infos)) => { + for node_info in node_infos { + incomplete_filenodes.add(node_info); + } + memory_manifest.change_entry(ctx, &path, Some(entry)) } - memory_manifest.change_entry(&path, Some(entry)) } }); futures.push(future); @@ -1368,10 +1428,10 @@ impl BlobRepo { future::join_all(futures) .and_then({ - let memory_manifest = memory_manifest.clone(); - move |_| memory_manifest.resolve_trivial_conflicts() + cloned!(ctx, memory_manifest); + move |_| memory_manifest.resolve_trivial_conflicts(ctx) }) - .and_then(move |_| memory_manifest.save()) + .and_then(move |_| memory_manifest.save(ctx)) .map({ cloned!(incomplete_filenodes); move |m| { @@ -1397,7 +1457,7 @@ impl BlobRepo { repo: &BlobRepo, bcs_id: ChangesetId, ) -> BoxFuture { - repo.fetch(&bcs_id) + repo.fetch(ctx.clone(), &bcs_id) .and_then({ cloned!(ctx, repo); move |bcs| { @@ -1405,8 +1465,10 @@ impl BlobRepo { .map(|p_bcs_id| { repo.get_hg_from_bonsai_changeset(ctx.clone(), *p_bcs_id) .and_then({ - cloned!(repo); - move |p_cs_id| repo.get_changeset_by_changesetid(&p_cs_id) + cloned!(ctx, repo); + move |p_cs_id| { + repo.get_changeset_by_changesetid(ctx, &p_cs_id) + } }) }) .collect::>(); @@ -1469,7 +1531,7 @@ impl BlobRepo { let cs = try_boxfuture!(HgBlobChangeset::new(content)); let cs_id = cs.get_changeset_id(); - cs.save(repo.blobstore.clone()) + cs.save(ctx.clone(), repo.blobstore.clone()) .and_then({ cloned!(ctx, repo); move |_| incomplete_filenodes.upload(ctx, cs_id, &repo) @@ -1539,13 +1601,15 @@ impl UploadHgTreeEntry { // adding the entries to a changeset. pub fn upload( self, + ctx: CoreContext, repo: &BlobRepo, ) -> Result<(HgNodeHash, BoxFuture<(HgBlobEntry, RepoPath), Error>)> { - self.upload_to_blobstore(&repo.blobstore, &repo.logger) + self.upload_to_blobstore(ctx, &repo.blobstore, &repo.logger) } pub(crate) fn upload_to_blobstore( self, + ctx: CoreContext, blobstore: &RepoBlobstore, logger: &Logger, ) -> Result<(HgNodeHash, BoxFuture<(HgBlobEntry, RepoPath), Error>)> { @@ -1616,7 +1680,7 @@ impl UploadHgTreeEntry { // Upload the blob. let upload = blobstore - .put(blobstore_key, envelope_blob.into()) + .put(ctx, blobstore_key, envelope_blob.into()) .map({ let path = path.clone(); move |()| (blob_entry, path) @@ -1650,6 +1714,7 @@ impl UploadHgFileContents { /// and metadata. fn execute( self, + ctx: CoreContext, repo: &BlobRepo, p1: Option, p2: Option, @@ -1664,7 +1729,7 @@ impl UploadHgFileContents { match self { UploadHgFileContents::ContentUploaded(cbmeta) => { let upload_fut = future::ok(()); - let compute_fut = Self::compute(cbmeta.clone(), repo, p1, p2); + let compute_fut = Self::compute(ctx, cbmeta.clone(), repo, p1, p2); let cbinfo = ContentBlobInfo { path, meta: cbmeta }; (cbinfo, Either::A(upload_fut), Either::A(compute_fut)) } @@ -1693,7 +1758,7 @@ impl UploadHgFileContents { }, }; - let upload_fut = repo.upload_blob(contents_blob, alias_key) + let upload_fut = repo.upload_blob(ctx, contents_blob, alias_key) .map(|_content_id| ()) .timed({ let logger = repo.logger.clone(); @@ -1718,6 +1783,7 @@ impl UploadHgFileContents { } fn compute( + ctx: CoreContext, cbmeta: ContentBlobMeta, repo: &BlobRepo, p1: Option, @@ -1725,7 +1791,7 @@ impl UploadHgFileContents { ) -> impl Future { // Computing the file node hash requires fetching the blob and gluing it together with the // metadata. - repo.fetch(&cbmeta.id).map(move |file_contents| { + repo.fetch(ctx, &cbmeta.id).map(move |file_contents| { let size = file_contents.size() as u64; let mut metadata = Vec::new(); File::generate_metadata(cbmeta.copy_from.as_ref(), &file_contents, &mut metadata) @@ -1764,6 +1830,7 @@ pub struct UploadHgFileEntry { impl UploadHgFileEntry { pub fn upload( self, + ctx: CoreContext, repo: &BlobRepo, ) -> Result<(ContentBlobInfo, BoxFuture<(HgBlobEntry, RepoPath), Error>)> { STATS::upload_hg_file_entry.add_value(1); @@ -1776,7 +1843,8 @@ impl UploadHgFileEntry { path, } = self; - let (cbinfo, content_upload, compute_fut) = contents.execute(repo, p1, p2, path.clone()); + let (cbinfo, content_upload, compute_fut) = + contents.execute(ctx.clone(), repo, p1, p2, path.clone()); let content_id = cbinfo.meta.id; let blobstore = repo.blobstore.clone(); @@ -1821,7 +1889,7 @@ impl UploadHgFileEntry { ); let envelope_upload = blobstore - .put(blobstore_key, envelope_blob.into()) + .put(ctx, blobstore_key, envelope_blob.into()) .timed({ let path = path.clone(); move |stats, result| { @@ -1904,7 +1972,11 @@ pub fn save_bonsai_changesets( // Order of inserting bonsai changesets objects doesn't matter, so we can join them let mut bonsai_object_futs = FuturesUnordered::new(); for bcs in bonsai_changesets.values() { - bonsai_object_futs.push(save_bonsai_changeset_object(blobstore.clone(), bcs.clone())); + bonsai_object_futs.push(save_bonsai_changeset_object( + ctx.clone(), + blobstore.clone(), + bcs.clone(), + )); } let bonsai_objects = bonsai_object_futs.collect(); @@ -1995,7 +2067,6 @@ impl CreateChangeset { scuba_logger.add("changeset_uuid", format!("{}", uuid)); let entry_processor = UploadEntries::new( - ctx.clone(), repo.blobstore.clone(), repo.repoid.clone(), scuba_logger.clone(), @@ -2004,6 +2075,7 @@ impl CreateChangeset { let expected_nodeid = self.expected_nodeid; let upload_entries = process_entries( + ctx.clone(), repo.clone(), &entry_processor, self.root_manifest, @@ -2020,7 +2092,7 @@ impl CreateChangeset { .join(parents_data) .from_err() .and_then({ - cloned!(repo, repo.filenodes, repo.blobstore, mut scuba_logger); + cloned!(ctx, repo, repo.filenodes, repo.blobstore, mut scuba_logger); let expected_files = self.expected_files; let cs_metadata = self.cs_metadata; @@ -2035,21 +2107,32 @@ impl CreateChangeset { future::ok(expected_files).boxify() } else { STATS::create_changeset_compute_cf.add_value(1); - fetch_parent_manifests(repo.clone(), &parent_manifest_hashes) - .and_then(move |(p1_manifest, p2_manifest)| { + fetch_parent_manifests( + ctx.clone(), + repo.clone(), + &parent_manifest_hashes, + ).and_then({ + cloned!(ctx); + move |(p1_manifest, p2_manifest)| { compute_changed_files( + ctx.clone(), &root_manifest, p1_manifest.as_ref(), p2_manifest.as_ref(), ) - }) + } + }) .boxify() }; let p1_mf = parent_manifest_hashes.get(0).cloned(); let check_case_conflicts = if must_check_case_conflicts { - check_case_conflicts(repo.clone(), root_hash.clone(), p1_mf) - .left_future() + check_case_conflicts( + ctx.clone(), + repo.clone(), + root_hash.clone(), + p1_mf, + ).left_future() } else { future::ok(()).right_future() }; @@ -2060,13 +2143,17 @@ impl CreateChangeset { STATS::create_changeset_cf_count.add_value(files.len() as i64); make_new_changeset(parents, root_hash, cs_metadata, files) }) - .and_then(move |hg_cs| { - create_bonsai_changeset_object( - hg_cs.clone(), - parent_manifest_hashes, - bonsai_parents, - repo.clone(), - ).map(|bonsai_cs| (hg_cs, bonsai_cs)) + .and_then({ + cloned!(ctx); + move |hg_cs| { + create_bonsai_changeset_object( + ctx, + hg_cs.clone(), + parent_manifest_hashes, + bonsai_parents, + repo.clone(), + ).map(|bonsai_cs| (hg_cs, bonsai_cs)) + } }); changesets @@ -2112,17 +2199,18 @@ impl CreateChangeset { signal_parent_ready.send((bcs_id, cs_id, manifest_id)); let bonsai_cs_fut = save_bonsai_changeset_object( + ctx.clone(), blobstore.clone(), bonsai_cs.clone(), ); blobcs - .save(blobstore) + .save(ctx.clone(), blobstore) .join(bonsai_cs_fut) .context("While writing to blobstore") .join( entry_processor - .finalize(filenodes, cs_id) + .finalize(ctx, filenodes, cs_id) .context("While finalizing processing"), ) .from_err() @@ -2252,6 +2340,7 @@ impl Clone for BlobRepo { } pub struct HgBlobChangesetStream { + ctx: CoreContext, repo: BlobRepo, seen: HashSet, heads: BoxStream, @@ -2278,8 +2367,10 @@ impl Stream for HgBlobChangesetStream { // haven't seen before WaitCS( next, - self.repo - .get_changeset_by_changesetid(&HgChangesetId::new(next)), + self.repo.get_changeset_by_changesetid( + self.ctx.clone(), + &HgChangesetId::new(next), + ), ) } else { Idle // already done it diff --git a/blobrepo/src/repo_commit.rs b/blobrepo/src/repo_commit.rs index b31184999e..02dfdfdf92 100644 --- a/blobrepo/src/repo_commit.rs +++ b/blobrepo/src/repo_commit.rs @@ -79,16 +79,16 @@ impl ChangesetHandle { } pub fn ready_cs_handle(ctx: CoreContext, repo: Arc, hg_cs: HgChangesetId) -> Self { - let bonsai_cs = repo.get_bonsai_from_hg(ctx, &hg_cs) + let bonsai_cs = repo.get_bonsai_from_hg(ctx.clone(), &hg_cs) .and_then(move |bonsai_id| { bonsai_id.ok_or(ErrorKind::BonsaiMappingNotFound(hg_cs).into()) }) .and_then({ - cloned!(repo); - move |bonsai_id| repo.get_bonsai_changeset(bonsai_id) + cloned!(ctx, repo); + move |bonsai_id| repo.get_bonsai_changeset(ctx, bonsai_id) }); - let cs = repo.get_changeset_by_changesetid(&hg_cs); + let cs = repo.get_changeset_by_changesetid(ctx, &hg_cs); let (trigger, can_be_parent) = oneshot::channel(); let fut = bonsai_cs.join(cs); @@ -132,20 +132,17 @@ struct UploadEntriesState { #[derive(Clone)] pub struct UploadEntries { - ctx: CoreContext, scuba_logger: ScubaSampleBuilder, inner: Arc>, } impl UploadEntries { pub fn new( - ctx: CoreContext, blobstore: RepoBlobstore, repoid: RepositoryId, scuba_logger: ScubaSampleBuilder, ) -> Self { Self { - ctx, scuba_logger, inner: Arc::new(Mutex::new(UploadEntriesState { required_entries: HashMap::new(), @@ -163,14 +160,19 @@ impl UploadEntries { /// Parse a manifest and record the referenced blobs so that we know whether or not we have /// a complete changeset with all blobs, or whether there is missing data. - fn process_manifest(&self, entry: &HgBlobEntry, path: RepoPath) -> BoxFuture<(), Error> { + fn process_manifest( + &self, + ctx: CoreContext, + entry: &HgBlobEntry, + path: RepoPath, + ) -> BoxFuture<(), Error> { let inner_mutex = self.inner.clone(); - let parents_found = self.find_parents(entry, path.clone()); + let parents_found = self.find_parents(ctx.clone(), entry, path.clone()); let entry_hash = entry.get_hash().into_nodehash(); let entry_type = entry.get_type(); entry - .get_content() + .get_content(ctx) .and_then(move |content| match content { Content::Tree(manifest) => { for entry in manifest.list() { @@ -199,10 +201,15 @@ impl UploadEntries { .boxify() } - fn find_parents(&self, entry: &HgBlobEntry, path: RepoPath) -> BoxFuture<(), Error> { + fn find_parents( + &self, + ctx: CoreContext, + entry: &HgBlobEntry, + path: RepoPath, + ) -> BoxFuture<(), Error> { let inner_mutex = self.inner.clone(); entry - .get_parents() + .get_parents(ctx) .and_then(move |parents| { let mut inner = inner_mutex.lock().expect("Lock poisoned"); let node_keys = parents.into_iter().map(move |hash| HgNodeKey { @@ -222,7 +229,11 @@ impl UploadEntries { /// `process_one_entry` and can be called after it. /// It is safe to call this multiple times, but not recommended - every manifest passed to /// this function is assumed required for this commit, even if it is not the root. - pub fn process_root_manifest(&self, entry: &HgBlobEntry) -> BoxFuture<(), Error> { + pub fn process_root_manifest( + &self, + ctx: CoreContext, + entry: &HgBlobEntry, + ) -> BoxFuture<(), Error> { if entry.get_type() != manifest::Type::Tree { return future::err( ErrorKind::NotAManifest(entry.get_hash().into_nodehash(), entry.get_type()).into(), @@ -234,10 +245,15 @@ impl UploadEntries { .required_entries .insert(RepoPath::root(), *entry.get_hash()); } - self.process_one_entry(entry, RepoPath::root()) + self.process_one_entry(ctx, entry, RepoPath::root()) } - pub fn process_one_entry(&self, entry: &HgBlobEntry, path: RepoPath) -> BoxFuture<(), Error> { + pub fn process_one_entry( + &self, + ctx: CoreContext, + entry: &HgBlobEntry, + path: RepoPath, + ) -> BoxFuture<(), Error> { { let mut inner = self.inner.lock().expect("Lock poisoned"); inner.uploaded_entries.insert(path.clone(), entry.clone()); @@ -251,7 +267,7 @@ impl UploadEntries { entry.get_hash(), path ), - self.process_manifest(entry, path), + self.process_manifest(ctx, entry, path), ) } else { STATS::process_file_entry.add_value(1); @@ -261,7 +277,7 @@ impl UploadEntries { entry.get_hash(), path ), - self.find_parents(&entry, path), + self.find_parents(ctx, &entry, path), ) }; @@ -270,6 +286,7 @@ impl UploadEntries { // Check the blobstore to see whether a particular node is present. fn assert_in_blobstore( + ctx: CoreContext, blobstore: RepoBlobstore, node_id: HgNodeHash, is_tree: bool, @@ -279,10 +296,15 @@ impl UploadEntries { } else { HgFileNodeId::new(node_id).blobstore_key() }; - blobstore.assert_present(key) + blobstore.assert_present(ctx, key) } - pub fn finalize(self, filenodes: Arc, cs_id: HgNodeHash) -> BoxFuture<(), Error> { + pub fn finalize( + self, + ctx: CoreContext, + filenodes: Arc, + cs_id: HgNodeHash, + ) -> BoxFuture<(), Error> { let required_checks = { let inner = self.inner.lock().expect("Lock poisoned"); let required_len = inner.required_entries.len(); @@ -296,6 +318,7 @@ impl UploadEntries { } else { let path = path.clone(); let assert = Self::assert_in_blobstore( + ctx.clone(), inner.blobstore.clone(), entryid.into_nodehash(), path.is_tree(), @@ -333,6 +356,7 @@ impl UploadEntries { .iter() .map(|node_key| { let assert = Self::assert_in_blobstore( + ctx.clone(), inner.blobstore.clone(), node_key.hash, node_key.path.is_tree(), @@ -385,23 +409,30 @@ impl UploadEntries { let filenodeinfos = stream::futures_unordered(uploaded_entries.into_iter().map(|(path, blobentry)| { - blobentry.get_parents().and_then(move |parents| { - compute_copy_from_info(&path, &blobentry, &parents).map(move |copyfrom| { - let (p1, p2) = parents.get_nodes(); - FilenodeInfo { - path, - filenode: HgFileNodeId::new(blobentry.get_hash().into_nodehash()), - p1: p1.cloned().map(HgFileNodeId::new), - p2: p2.cloned().map(HgFileNodeId::new), - copyfrom, - linknode: HgChangesetId::new(cs_id), - } - }) + blobentry.get_parents(ctx.clone()).and_then({ + cloned!(ctx); + move |parents| { + compute_copy_from_info(ctx, &path, &blobentry, &parents).map( + move |copyfrom| { + let (p1, p2) = parents.get_nodes(); + FilenodeInfo { + path, + filenode: HgFileNodeId::new( + blobentry.get_hash().into_nodehash(), + ), + p1: p1.cloned().map(HgFileNodeId::new), + p2: p2.cloned().map(HgFileNodeId::new), + copyfrom, + linknode: HgChangesetId::new(cs_id), + } + }, + ) + } }) })).boxify(); filenodes - .add_filenodes(self.ctx.clone(), filenodeinfos, &inner.repoid) + .add_filenodes(ctx, filenodeinfos, &inner.repoid) .timed({ let mut scuba_logger = self.scuba_logger(); move |stats, result| { @@ -423,6 +454,7 @@ impl UploadEntries { } fn compute_copy_from_info( + ctx: CoreContext, path: &RepoPath, blobentry: &HgBlobEntry, parents: &HgParents, @@ -432,7 +464,7 @@ fn compute_copy_from_info( &RepoPath::FilePath(_) => { STATS::finalize_compute_copy_from_info.add_value(1); blobentry - .get_raw_content() + .get_raw_content(ctx) .and_then({ let parents = parents.clone(); move |blob| { @@ -459,10 +491,11 @@ fn compute_copy_from_info( } fn compute_changed_files_pair( + ctx: CoreContext, to: &Box, from: &Box, ) -> BoxFuture, Error> { - changed_entry_stream(to, from, None) + changed_entry_stream(ctx, to, from, None) .filter_map(|change| match change.status { EntryStatus::Deleted(entry) | EntryStatus::Added(entry) @@ -495,18 +528,19 @@ fn compute_changed_files_pair( /// Changesets might as well make this function obsolete and that I am not familiar with creating /// mock Manifests I will postpone writing tests for this pub fn compute_changed_files( + ctx: CoreContext, root: &Box, p1: Option<&Box>, p2: Option<&Box>, ) -> BoxFuture, Error> { let empty = manifest::EmptyManifest {}.boxed(); match (p1, p2) { - (None, None) => compute_changed_files_pair(&root, &empty), + (None, None) => compute_changed_files_pair(ctx, &root, &empty), (Some(manifest), None) | (None, Some(manifest)) => { - compute_changed_files_pair(&root, &manifest) + compute_changed_files_pair(ctx, &root, &manifest) } - (Some(p1), Some(p2)) => compute_changed_files_pair(&root, &p1) - .join(compute_changed_files_pair(&root, &p2)) + (Some(p1), Some(p2)) => compute_changed_files_pair(ctx.clone(), &root, &p1) + .join(compute_changed_files_pair(ctx.clone(), &root, &p2)) .map(|(left, right)| { left.intersection(&right) .cloned() @@ -523,14 +557,15 @@ pub fn compute_changed_files( } fn compute_added_files( + ctx: CoreContext, child: &Box, parent: Option<&Box>, ) -> impl Future, Error = Error> { let s = match parent { - Some(parent) => changed_entry_stream(child, parent, None).boxify(), + Some(parent) => changed_entry_stream(ctx, child, parent, None).boxify(), None => { let empty = manifest::EmptyManifest {}; - changed_entry_stream(child, &empty, None).boxify() + changed_entry_stream(ctx, child, &empty, None).boxify() } }; @@ -551,20 +586,24 @@ fn compute_added_files( /// 1) Checks that there are no case conflicts between added files /// 2) Checks that added files do not create new case conflicts with already existing files pub fn check_case_conflicts( + ctx: CoreContext, repo: BlobRepo, child_root_mf: HgManifestId, parent_root_mf: Option, ) -> impl Future { - let child_mf_fut = repo.get_manifest_by_nodeid(&child_root_mf.clone()); + let child_mf_fut = repo.get_manifest_by_nodeid(ctx.clone(), &child_root_mf.clone()); let parent_mf_fut = parent_root_mf.map({ - cloned!(repo); - move |m| repo.get_manifest_by_nodeid(&m) + cloned!(ctx, repo); + move |m| repo.get_manifest_by_nodeid(ctx.clone(), &m) }); child_mf_fut .join(parent_mf_fut) - .and_then(move |(child_mf, parent_mf)| compute_added_files(&child_mf, parent_mf.as_ref())) + .and_then({ + cloned!(ctx); + move |(child_mf, parent_mf)| compute_added_files(ctx, &child_mf, parent_mf.as_ref()) + }) .and_then( |added_files| match mononoke_types::check_case_conflicts(added_files.clone()) { Some(path) => Err(ErrorKind::CaseConflict(path).into()), @@ -576,6 +615,7 @@ pub fn check_case_conflicts( let mut case_conflict_checks = stream::FuturesUnordered::new(); for f in added_files { case_conflict_checks.push(repo.check_case_conflict_in_manifest( + ctx.clone(), &parent_root_mf, &child_root_mf, f.clone(), @@ -603,6 +643,7 @@ fn mercurial_mpath_comparator(a: &MPath, b: &MPath) -> ::std::cmp::Ordering { } pub fn process_entries( + ctx: CoreContext, repo: BlobRepo, entry_processor: &UploadEntries, root_manifest: BoxFuture, Error>, @@ -612,14 +653,14 @@ pub fn process_entries( .context("While uploading root manifest") .from_err() .and_then({ - let entry_processor = entry_processor.clone(); + cloned!(ctx, entry_processor); move |root_manifest| match root_manifest { None => future::ok(None).boxify(), Some((entry, path)) => { let hash = entry.get_hash().into_nodehash(); if entry.get_type() == manifest::Type::Tree && path == RepoPath::RootPath { entry_processor - .process_root_manifest(&entry) + .process_root_manifest(ctx, &entry) .map(move |_| Some(hash)) .boxify() } else { @@ -634,8 +675,8 @@ pub fn process_entries( .context("While uploading child entries") .from_err() .map({ - let entry_processor = entry_processor.clone(); - move |(entry, path)| entry_processor.process_one_entry(&entry, path) + cloned!(ctx, entry_processor); + move |(entry, path)| entry_processor.process_one_entry(ctx.clone(), &entry, path) }) .buffer_unordered(100) .for_each(|()| future::ok(())); @@ -648,7 +689,7 @@ pub fn process_entries( manifest::EmptyManifest.boxed(), HgManifestId::new(NULL_HASH), )).boxify(), - Some(root_hash) => repo.get_manifest_by_nodeid(&HgManifestId::new(root_hash)) + Some(root_hash) => repo.get_manifest_by_nodeid(ctx, &HgManifestId::new(root_hash)) .context("While fetching root manifest") .from_err() .map(move |m| (m, HgManifestId::new(root_hash))) @@ -737,16 +778,17 @@ pub fn handle_parents( } pub fn fetch_parent_manifests( + ctx: CoreContext, repo: BlobRepo, parent_manifest_hashes: &Vec, ) -> BoxFuture<(Option>, Option>), Error> { let p1_manifest_hash = parent_manifest_hashes.get(0); let p2_manifest_hash = parent_manifest_hashes.get(1); let p1_manifest = p1_manifest_hash.map({ - cloned!(repo); - move |m| repo.get_manifest_by_nodeid(&m) + cloned!(ctx, repo); + move |m| repo.get_manifest_by_nodeid(ctx, &m) }); - let p2_manifest = p2_manifest_hash.map(move |m| repo.get_manifest_by_nodeid(&m)); + let p2_manifest = p2_manifest_hash.map(move |m| repo.get_manifest_by_nodeid(ctx, &m)); p1_manifest.join(p2_manifest).boxify() } diff --git a/blobrepo/test/main.rs b/blobrepo/test/main.rs index 8c40d559e7..41e0ee9502 100644 --- a/blobrepo/test/main.rs +++ b/blobrepo/test/main.rs @@ -9,6 +9,8 @@ extern crate ascii; extern crate async_unit; extern crate bytes; +#[macro_use] +extern crate cloned; extern crate failure_ext as failure; extern crate futures; extern crate futures_ext; @@ -58,14 +60,15 @@ use utils::{create_changeset_no_parents, create_changeset_one_parent, get_empty_ use tests_utils::{create_commit, store_files}; fn upload_blob_no_parents(repo: BlobRepo) { + let ctx = CoreContext::test_mock(); let expected_hash = string_to_nodehash("c3127cdbf2eae0f09653f9237d85c8436425b246"); let fake_path = RepoPath::file("fake/file").expect("Can't generate fake RepoPath"); // The blob does not exist... - assert!(run_future(repo.get_file_content(&expected_hash)).is_err()); + assert!(run_future(repo.get_file_content(ctx.clone(), &expected_hash)).is_err()); // We upload it... - let (hash, future) = upload_file_no_parents(&repo, "blob", &fake_path); + let (hash, future) = upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_path); assert!(hash == expected_hash); // The entry we're given is correct... @@ -77,14 +80,14 @@ fn upload_blob_no_parents(repo: BlobRepo) { entry.get_name() == Some(&MPathElement::new("file".into()).expect("valid MPathElement")) ); - let content = run_future(entry.get_content()).unwrap(); + let content = run_future(entry.get_content(ctx.clone())).unwrap(); match content { manifest::Content::File(FileContents::Bytes(f)) => assert_eq!(f.as_ref(), &b"blob"[..]), _ => panic!(), }; // And the blob now exists - let bytes = run_future(repo.get_file_content(&expected_hash)).unwrap(); + let bytes = run_future(repo.get_file_content(ctx.clone(), &expected_hash)).unwrap(); assert!(&bytes.into_bytes() == &b"blob"[..]); } @@ -95,16 +98,17 @@ test_both_repotypes!( ); fn upload_blob_one_parent(repo: BlobRepo) { + let ctx = CoreContext::test_mock(); let expected_hash = string_to_nodehash("c2d60b35a8e7e034042a9467783bbdac88a0d219"); let fake_path = RepoPath::file("fake/file").expect("Can't generate fake RepoPath"); - let (p1, future) = upload_file_no_parents(&repo, "blob", &fake_path); + let (p1, future) = upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_path); // The blob does not exist... - run_future(repo.get_file_content(&expected_hash)).is_err(); + run_future(repo.get_file_content(ctx.clone(), &expected_hash)).is_err(); // We upload it... - let (hash, future2) = upload_file_one_parent(&repo, "blob", &fake_path, p1); + let (hash, future2) = upload_file_one_parent(ctx.clone(), &repo, "blob", &fake_path, p1); assert!(hash == expected_hash); // The entry we're given is correct... @@ -117,13 +121,13 @@ fn upload_blob_one_parent(repo: BlobRepo) { entry.get_name() == Some(&MPathElement::new("file".into()).expect("valid MPathElement")) ); - let content = run_future(entry.get_content()).unwrap(); + let content = run_future(entry.get_content(ctx.clone())).unwrap(); match content { manifest::Content::File(FileContents::Bytes(f)) => assert_eq!(f.as_ref(), &b"blob"[..]), _ => panic!(), }; // And the blob now exists - let bytes = run_future(repo.get_file_content(&expected_hash)).unwrap(); + let bytes = run_future(repo.get_file_content(ctx.clone(), &expected_hash)).unwrap(); assert!(&bytes.into_bytes() == &b"blob"[..]); } @@ -136,6 +140,7 @@ test_both_repotypes!( #[test] fn upload_blob_aliases() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); // echo -n "blob" | sha256sum let alias_key = "alias.sha256.fa2c8cc4f28176bbeed4b736df569a34c79cd3723e9ec42f9674b4d46ac6b8b8"; @@ -151,19 +156,19 @@ fn upload_blob_aliases() { // The blob with alias does not exist... assert!( - run_future(prefixed_blobstore.get(alias_key.to_string())) + run_future(prefixed_blobstore.get(ctx.clone(), alias_key.to_string())) .unwrap() .is_none() ); // We upload file and wait until file is uploaded... - let (_, future) = upload_file_no_parents(&repo, "blob", &fake_path); + let (_, future) = upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_path); run_future(future).unwrap(); let expected_content = "content.blake2.07ccc95f3ee9252a9e1dbdeaef59844d6aabd9dcf911fa29f542e891a4c5e90a"; - let contents = run_future(prefixed_blobstore.get(alias_key.to_string())) + let contents = run_future(prefixed_blobstore.get(ctx.clone(), alias_key.to_string())) .unwrap() .unwrap(); assert_eq!(contents.as_bytes(), expected_content.as_bytes()); @@ -171,6 +176,7 @@ fn upload_blob_aliases() { } fn create_one_changeset(repo: BlobRepo) { + let ctx = CoreContext::test_mock(); let fake_file_path = RepoPath::file("dir/file").expect("Can't generate fake RepoPath"); let fake_dir_path = RepoPath::dir("dir").expect("Can't generate fake RepoPath"); let expected_files = vec![ @@ -182,13 +188,22 @@ fn create_one_changeset(repo: BlobRepo) { ]; let author: String = "author ".into(); - let (filehash, file_future) = upload_file_no_parents(&repo, "blob", &fake_file_path); + let (filehash, file_future) = + upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_file_path); - let (dirhash, manifest_dir_future) = - upload_manifest_no_parents(&repo, format!("file\0{}\n", filehash), &fake_dir_path); + let (dirhash, manifest_dir_future) = upload_manifest_no_parents( + ctx.clone(), + &repo, + format!("file\0{}\n", filehash), + &fake_dir_path, + ); - let (roothash, root_manifest_future) = - upload_manifest_no_parents(&repo, format!("dir\0{}t\n", dirhash), &RepoPath::root()); + let (roothash, root_manifest_future) = upload_manifest_no_parents( + ctx.clone(), + &repo, + format!("dir\0{}t\n", dirhash), + &RepoPath::root(), + ); let commit = create_changeset_no_parents( &repo, @@ -208,7 +223,7 @@ fn create_one_changeset(repo: BlobRepo) { ); // And check the file blob is present - let bytes = run_future(repo.get_file_content(&filehash)).unwrap(); + let bytes = run_future(repo.get_file_content(ctx.clone(), &filehash)).unwrap(); assert!(&bytes.into_bytes() == &b"blob"[..]); } @@ -224,13 +239,22 @@ fn create_two_changesets(repo: BlobRepo) { let fake_dir_path = RepoPath::dir("dir").expect("Can't generate fake RepoPath"); let utf_author: String = "\u{041F}\u{0451}\u{0442}\u{0440} ".into(); - let (filehash, file_future) = upload_file_no_parents(&repo, "blob", &fake_file_path); + let (filehash, file_future) = + upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_file_path); - let (dirhash, manifest_dir_future) = - upload_manifest_no_parents(&repo, format!("file\0{}\n", filehash), &fake_dir_path); + let (dirhash, manifest_dir_future) = upload_manifest_no_parents( + ctx.clone(), + &repo, + format!("file\0{}\n", filehash), + &fake_dir_path, + ); - let (roothash, root_manifest_future) = - upload_manifest_no_parents(&repo, format!("dir\0{}t\n", dirhash), &RepoPath::root()); + let (roothash, root_manifest_future) = upload_manifest_no_parents( + ctx.clone(), + &repo, + format!("dir\0{}t\n", dirhash), + &RepoPath::root(), + ); let commit1 = create_changeset_no_parents( &repo, @@ -239,8 +263,10 @@ fn create_two_changesets(repo: BlobRepo) { ); let fake_file_path_no_dir = RepoPath::file("file").expect("Can't generate fake RepoPath"); - let (filehash, file_future) = upload_file_no_parents(&repo, "blob", &fake_file_path_no_dir); + let (filehash, file_future) = + upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_file_path_no_dir); let (roothash, root_manifest_future) = upload_manifest_one_parent( + ctx.clone(), &repo, format!("file\0{}\n", filehash), &RepoPath::root(), @@ -296,13 +322,22 @@ fn check_bonsai_creation(repo: BlobRepo) { let fake_file_path = RepoPath::file("dir/file").expect("Can't generate fake RepoPath"); let fake_dir_path = RepoPath::dir("dir").expect("Can't generate fake RepoPath"); - let (filehash, file_future) = upload_file_no_parents(&repo, "blob", &fake_file_path); + let (filehash, file_future) = + upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_file_path); - let (dirhash, manifest_dir_future) = - upload_manifest_no_parents(&repo, format!("file\0{}\n", filehash), &fake_dir_path); + let (dirhash, manifest_dir_future) = upload_manifest_no_parents( + ctx.clone(), + &repo, + format!("file\0{}\n", filehash), + &fake_dir_path, + ); - let (_, root_manifest_future) = - upload_manifest_no_parents(&repo, format!("dir\0{}t\n", dirhash), &RepoPath::root()); + let (_, root_manifest_future) = upload_manifest_no_parents( + ctx.clone(), + &repo, + format!("dir\0{}t\n", dirhash), + &RepoPath::root(), + ); let commit = create_changeset_no_parents( &repo, @@ -313,9 +348,9 @@ fn check_bonsai_creation(repo: BlobRepo) { let commit = run_future(commit.get_completed_changeset()).unwrap(); let commit = &commit.1; let bonsai_cs_id = - run_future(repo.get_bonsai_from_hg(ctx, &commit.get_changeset_id())).unwrap(); + run_future(repo.get_bonsai_from_hg(ctx.clone(), &commit.get_changeset_id())).unwrap(); assert!(bonsai_cs_id.is_some()); - let bonsai = run_future(repo.get_bonsai_changeset(bonsai_cs_id.unwrap())).unwrap(); + let bonsai = run_future(repo.get_bonsai_changeset(ctx.clone(), bonsai_cs_id.unwrap())).unwrap(); assert_eq!( bonsai .file_changes() @@ -336,10 +371,15 @@ fn check_bonsai_creation_with_rename(repo: BlobRepo) { let parent = { let fake_file_path = RepoPath::file("file").expect("Can't generate fake RepoPath"); - let (filehash, file_future) = upload_file_no_parents(&repo, "blob", &fake_file_path); + let (filehash, file_future) = + upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_file_path); - let (_, root_manifest_future) = - upload_manifest_no_parents(&repo, format!("file\0{}\n", filehash), &RepoPath::root()); + let (_, root_manifest_future) = upload_manifest_no_parents( + ctx.clone(), + &repo, + format!("file\0{}\n", filehash), + &RepoPath::root(), + ); create_changeset_no_parents( &repo, @@ -353,12 +393,14 @@ fn check_bonsai_creation_with_rename(repo: BlobRepo) { RepoPath::file("file_rename").expect("Can't generate fake RepoPath"); let (filehash, file_future) = upload_file_no_parents( + ctx.clone(), &repo, "\x01\ncopy: file\ncopyrev: c3127cdbf2eae0f09653f9237d85c8436425b246\x01\nblob", &fake_renamed_file_path, ); let (_, root_manifest_future) = upload_manifest_no_parents( + ctx.clone(), &repo, format!("file_rename\0{}\n", filehash), &RepoPath::root(), @@ -383,8 +425,8 @@ fn check_bonsai_creation_with_rename(repo: BlobRepo) { .unwrap(); let bonsai_cs_id = - run_future(repo.get_bonsai_from_hg(ctx, &child_cs.get_changeset_id())).unwrap(); - let bonsai = run_future(repo.get_bonsai_changeset(bonsai_cs_id.unwrap())).unwrap(); + run_future(repo.get_bonsai_from_hg(ctx.clone(), &child_cs.get_changeset_id())).unwrap(); + let bonsai = run_future(repo.get_bonsai_changeset(ctx.clone(), bonsai_cs_id.unwrap())).unwrap(); let fc = bonsai.file_changes().collect::>(); let file = MPath::new("file").unwrap(); assert!(!fc[&file].is_some()); @@ -403,10 +445,15 @@ test_both_repotypes!( ); fn create_bad_changeset(repo: BlobRepo) { + let ctx = CoreContext::test_mock(); let dirhash = string_to_nodehash("c2d60b35a8e7e034042a9467783bbdac88a0d219"); - let (_, root_manifest_future) = - upload_manifest_no_parents(&repo, format!("dir\0{}t\n", dirhash), &RepoPath::root()); + let (_, root_manifest_future) = upload_manifest_no_parents( + ctx, + &repo, + format!("dir\0{}t\n", dirhash), + &RepoPath::root(), + ); let commit = create_changeset_no_parents(&repo, root_manifest_future.map(Some).boxify(), vec![]); @@ -427,11 +474,20 @@ fn create_double_linknode(repo: BlobRepo) { let fake_dir_path = RepoPath::dir("dir").expect("Can't generate fake RepoPath"); let (filehash, parent_commit) = { - let (filehash, file_future) = upload_file_no_parents(&repo, "blob", &fake_file_path); - let (dirhash, manifest_dir_future) = - upload_manifest_no_parents(&repo, format!("file\0{}\n", filehash), &fake_dir_path); - let (_, root_manifest_future) = - upload_manifest_no_parents(&repo, format!("dir\0{}t\n", dirhash), &RepoPath::root()); + let (filehash, file_future) = + upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_file_path); + let (dirhash, manifest_dir_future) = upload_manifest_no_parents( + ctx.clone(), + &repo, + format!("file\0{}\n", filehash), + &fake_dir_path, + ); + let (_, root_manifest_future) = upload_manifest_no_parents( + ctx.clone(), + &repo, + format!("dir\0{}t\n", dirhash), + &RepoPath::root(), + ); ( filehash, @@ -445,13 +501,21 @@ fn create_double_linknode(repo: BlobRepo) { let child_commit = { let (filehash, file_future) = - upload_file_one_parent(&repo, "blob", &fake_file_path, filehash); + upload_file_one_parent(ctx.clone(), &repo, "blob", &fake_file_path, filehash); - let (dirhash, manifest_dir_future) = - upload_manifest_no_parents(&repo, format!("file\0{}\n", filehash), &fake_dir_path); + let (dirhash, manifest_dir_future) = upload_manifest_no_parents( + ctx.clone(), + &repo, + format!("file\0{}\n", filehash), + &fake_dir_path, + ); - let (_, root_manifest_future) = - upload_manifest_no_parents(&repo, format!("dir\0{}t\n", dirhash), &RepoPath::root()); + let (_, root_manifest_future) = upload_manifest_no_parents( + ctx.clone(), + &repo, + format!("dir\0{}t\n", dirhash), + &RepoPath::root(), + ); create_changeset_one_parent( &repo, @@ -494,7 +558,8 @@ fn check_linknode_creation(repo: BlobRepo) { let path = RepoPath::file( MPath::new(format!("dir/file{}", id)).expect("String to MPath failed"), ).expect("Can't generate fake RepoPath"); - let (hash, future) = upload_file_no_parents(&repo, format!("blob id {}", id), &path); + let (hash, future) = + upload_file_no_parents(ctx.clone(), &repo, format!("blob id {}", id), &path); ((hash, format!("file{}", id)), future) }) .collect(); @@ -509,10 +574,14 @@ fn check_linknode_creation(repo: BlobRepo) { }); let (dirhash, manifest_dir_future) = - upload_manifest_no_parents(&repo, manifest, &fake_dir_path); + upload_manifest_no_parents(ctx.clone(), &repo, manifest, &fake_dir_path); - let (roothash, root_manifest_future) = - upload_manifest_no_parents(&repo, format!("dir\0{}t\n", dirhash), &RepoPath::root()); + let (roothash, root_manifest_future) = upload_manifest_no_parents( + ctx.clone(), + &repo, + format!("dir\0{}t\n", dirhash), + &RepoPath::root(), + ); uploads.push(manifest_dir_future); @@ -566,13 +635,14 @@ where K::Value: PartialEq + Arbitrary, { fn result(&self, g: &mut G) -> TestResult { + let ctx = CoreContext::test_mock(); let value = ::arbitrary(g); let value_cloned = value.clone(); let store_fetch_future = self.repo - .unittest_store(value) + .unittest_store(ctx.clone(), value) .and_then({ - let repo = self.repo.clone(); - move |key| repo.unittest_fetch(&key) + cloned!(ctx, self.repo); + move |key| repo.unittest_fetch(ctx, &key) }) .map(move |value_fetched| TestResult::from_bool(value_fetched == value_cloned)); run_future(store_fetch_future).expect("valid mononoke type") @@ -593,6 +663,7 @@ test_both_repotypes!( #[test] fn test_compute_changed_files_no_parents() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let repo = many_files_dirs::getrepo(None); let nodehash = string_to_nodehash("0c59c8d0da93cbf9d7f4b888f28823ffb2e3e480"); let expected = vec![ @@ -602,11 +673,13 @@ fn test_compute_changed_files_no_parents() { MPath::new(b"dir2/file_1_in_dir2").unwrap(), ]; - let cs = - run_future(repo.get_changeset_by_changesetid(&HgChangesetId::new(nodehash))).unwrap(); - let mf = run_future(repo.get_manifest_by_nodeid(&cs.manifestid())).unwrap(); + let cs = run_future(repo.get_changeset_by_changesetid( + ctx.clone(), + &HgChangesetId::new(nodehash), + )).unwrap(); + let mf = run_future(repo.get_manifest_by_nodeid(ctx.clone(), &cs.manifestid())).unwrap(); - let diff = run_future(compute_changed_files(&mf, None, None)).unwrap(); + let diff = run_future(compute_changed_files(ctx.clone(), &mf, None, None)).unwrap(); assert!( diff == expected, "Got {:?}, expected {:?}\n", @@ -619,6 +692,7 @@ fn test_compute_changed_files_no_parents() { #[test] fn test_compute_changed_files_one_parent() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); // Note that this is a commit and its parent commit, so you can use: // hg log -T"{node}\n{files % ' MPath::new(b\"{file}\").unwrap(),\\n'}\\n" -r $HASH // to see how Mercurial would compute the files list and confirm that it's the same @@ -635,15 +709,25 @@ fn test_compute_changed_files_one_parent() { MPath::new(b"dir1/subdir1/subsubdir2/file_2").unwrap(), ]; - let cs = - run_future(repo.get_changeset_by_changesetid(&HgChangesetId::new(nodehash))).unwrap(); - let mf = run_future(repo.get_manifest_by_nodeid(&cs.manifestid())).unwrap(); + let cs = run_future(repo.get_changeset_by_changesetid( + ctx.clone(), + &HgChangesetId::new(nodehash), + )).unwrap(); + let mf = run_future(repo.get_manifest_by_nodeid(ctx.clone(), &cs.manifestid())).unwrap(); - let parent_cs = - run_future(repo.get_changeset_by_changesetid(&HgChangesetId::new(parenthash))).unwrap(); - let parent_mf = run_future(repo.get_manifest_by_nodeid(&parent_cs.manifestid())).unwrap(); + let parent_cs = run_future(repo.get_changeset_by_changesetid( + ctx.clone(), + &HgChangesetId::new(parenthash), + )).unwrap(); + let parent_mf = + run_future(repo.get_manifest_by_nodeid(ctx.clone(), &parent_cs.manifestid())).unwrap(); - let diff = run_future(compute_changed_files(&mf, Some(&parent_mf), None)).unwrap(); + let diff = run_future(compute_changed_files( + ctx.clone(), + &mf, + Some(&parent_mf), + None, + )).unwrap(); assert!( diff == expected, "Got {:?}, expected {:?}\n", @@ -675,12 +759,13 @@ fn make_bonsai_changeset( } fn make_file_change( + ctx: CoreContext, content: impl AsRef<[u8]>, repo: &BlobRepo, ) -> impl Future + Send { let content = content.as_ref(); let content_size = content.len() as u64; - repo.unittest_store(FileContents::new_bytes(content.as_ref())) + repo.unittest_store(ctx, FileContents::new_bytes(content.as_ref())) .map(move |content_id| FileChange::new(content_id, FileType::Regular, content_size, None)) } @@ -689,15 +774,20 @@ fn test_get_manifest_from_bonsai() { async_unit::tokio_unit_test(|| { let ctx = CoreContext::test_mock(); let repo = merge_uneven::getrepo(None); - let get_manifest_for_changeset = |cs_nodehash: &str| -> HgManifestId { - *run_future(repo.get_changeset_by_changesetid(&HgChangesetId::new( - string_to_nodehash(cs_nodehash), - ))).unwrap() - .manifestid() + let get_manifest_for_changeset = { + cloned!(ctx, repo); + move |cs_nodehash: &str| -> HgManifestId { + *run_future(repo.get_changeset_by_changesetid( + ctx.clone(), + &HgChangesetId::new(string_to_nodehash(cs_nodehash)), + )).unwrap() + .manifestid() + } }; - let get_entries = - |ms_hash: &HgManifestId| -> BoxFuture>, Error> { - repo.get_manifest_by_nodeid(&ms_hash) + let get_entries = { + cloned!(ctx, repo); + move |ms_hash: &HgManifestId| -> BoxFuture>, Error> { + repo.get_manifest_by_nodeid(ctx.clone(), &ms_hash) .map(|ms| { ms.list() .map(|e| { @@ -707,7 +797,8 @@ fn test_get_manifest_from_bonsai() { .collect::>() }) .boxify() - }; + } + }; // #CONTENT // 1: 1 @@ -779,7 +870,7 @@ fn test_get_manifest_from_bonsai() { ); let br = entries.get("branch").expect("trivial merge should succeed"); - let br_parents = run_future(br.get_parents()) + let br_parents = run_future(br.get_parents(ctx.clone())) .unwrap() .into_iter() .collect::>(); @@ -789,20 +880,20 @@ fn test_get_manifest_from_bonsai() { // add file { let content_expected = &b"some awesome content"[..]; - let fc = run_future(make_file_change(content_expected, &repo)).unwrap(); + let fc = run_future(make_file_change(ctx.clone(), content_expected, &repo)).unwrap(); let bcs = make_bonsai_changeset(None, None, vec![("base", None), ("new", Some(fc))]); let (ms_hash, _) = - run_future(repo.get_manifest_from_bonsai(ctx, bcs, Some(&ms1), Some(&ms2))) + run_future(repo.get_manifest_from_bonsai(ctx.clone(), bcs, Some(&ms1), Some(&ms2))) .expect("adding new file should not produce coflict"); let entries = run_future(get_entries(&ms_hash)).unwrap(); let new = entries.get("new").expect("new file should be in entries"); - match run_future(new.get_content()).unwrap() { + match run_future(new.get_content(ctx.clone())).unwrap() { manifest::Content::File(content) => { assert_eq!(content, FileContents::new_bytes(content_expected)); } _ => panic!("content type mismatch"), }; - let new_parents = run_future(new.get_parents()).unwrap(); + let new_parents = run_future(new.get_parents(ctx.clone())).unwrap(); assert_eq!(new_parents, HgParents::None); } }); @@ -814,7 +905,7 @@ fn test_case_conflict_in_manifest() { let ctx = CoreContext::test_mock(); let repo = many_files_dirs::getrepo(None); let get_manifest_for_changeset = |cs_id: &HgChangesetId| -> HgManifestId { - *run_future(repo.get_changeset_by_changesetid(cs_id)) + *run_future(repo.get_changeset_by_changesetid(ctx.clone(), cs_id)) .unwrap() .manifestid() }; @@ -838,7 +929,11 @@ fn test_case_conflict_in_manifest() { ctx.clone(), repo.clone(), vec![bonsai_parent], - store_files(btreemap!{*path => Some("caseconflicttest")}, repo.clone()), + store_files( + ctx.clone(), + btreemap!{*path => Some("caseconflicttest")}, + repo.clone(), + ), ); let child_hg_cs = @@ -846,6 +941,7 @@ fn test_case_conflict_in_manifest() { let child_mf = get_manifest_for_changeset(&child_hg_cs); assert_eq!( run_future(repo.check_case_conflict_in_manifest( + ctx.clone(), &mf, &child_mf, MPath::new(path).unwrap() @@ -862,13 +958,19 @@ fn test_case_conflict_in_manifest() { #[test] fn test_case_conflict_two_changeset() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let repo = get_empty_lazy_repo(); let fake_file_path_1 = RepoPath::file("file").expect("Can't generate fake RepoPath"); - let (filehash_1, file_future_1) = upload_file_no_parents(&repo, "blob", &fake_file_path_1); + let (filehash_1, file_future_1) = + upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_file_path_1); - let (_roothash, root_manifest_future) = - upload_manifest_no_parents(&repo, format!("file\0{}\n", filehash_1), &RepoPath::root()); + let (_roothash, root_manifest_future) = upload_manifest_no_parents( + ctx.clone(), + &repo, + format!("file\0{}\n", filehash_1), + &RepoPath::root(), + ); let commit1 = create_changeset_no_parents( &repo, @@ -879,8 +981,9 @@ fn test_case_conflict_two_changeset() { let commit2 = { let fake_file_path_2 = RepoPath::file("FILE").expect("Can't generate fake RepoPath"); let (filehash_2, file_future_2) = - upload_file_no_parents(&repo, "blob", &fake_file_path_2); + upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_file_path_2); let (_roothash, root_manifest_future) = upload_manifest_no_parents( + ctx.clone(), &repo, format!("file\0{}\nFILE\0{}\n", filehash_1, filehash_2), &RepoPath::root(), @@ -907,14 +1010,18 @@ fn test_case_conflict_two_changeset() { #[test] fn test_case_conflict_inside_one_changeset() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let repo = get_empty_lazy_repo(); let fake_file_path_1 = RepoPath::file("file").expect("Can't generate fake RepoPath"); - let (filehash_1, file_future_1) = upload_file_no_parents(&repo, "blob", &fake_file_path_1); + let (filehash_1, file_future_1) = + upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_file_path_1); let fake_file_path_1 = RepoPath::file("FILE").expect("Can't generate fake RepoPath"); - let (filehash_2, file_future_2) = upload_file_no_parents(&repo, "blob", &fake_file_path_1); + let (filehash_2, file_future_2) = + upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_file_path_1); let (_roothash, root_manifest_future) = upload_manifest_no_parents( + ctx.clone(), &repo, format!("file\0{}\nFILE\0{}", filehash_1, filehash_2), &RepoPath::root(), @@ -933,13 +1040,19 @@ fn test_case_conflict_inside_one_changeset() { #[test] fn test_no_case_conflict_removal() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let repo = get_empty_lazy_repo(); let fake_file_path_1 = RepoPath::file("file").expect("Can't generate fake RepoPath"); - let (filehash_1, file_future_1) = upload_file_no_parents(&repo, "blob", &fake_file_path_1); + let (filehash_1, file_future_1) = + upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_file_path_1); - let (_roothash, root_manifest_future) = - upload_manifest_no_parents(&repo, format!("file\0{}\n", filehash_1), &RepoPath::root()); + let (_roothash, root_manifest_future) = upload_manifest_no_parents( + ctx.clone(), + &repo, + format!("file\0{}\n", filehash_1), + &RepoPath::root(), + ); let commit1 = create_changeset_no_parents( &repo, @@ -950,8 +1063,9 @@ fn test_no_case_conflict_removal() { let commit2 = { let fake_file_path_2 = RepoPath::file("FILE").expect("Can't generate fake RepoPath"); let (filehash_2, file_future_2) = - upload_file_no_parents(&repo, "blob", &fake_file_path_2); + upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_file_path_2); let (_roothash, root_manifest_future) = upload_manifest_no_parents( + ctx.clone(), &repo, format!("FILE\0{}\n", filehash_2), &RepoPath::root(), @@ -978,17 +1092,24 @@ fn test_no_case_conflict_removal() { #[test] fn test_no_case_conflict_removal_dir() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let repo = get_empty_lazy_repo(); let commit1 = { let fake_file_path = RepoPath::file("file").expect("Can't generate fake RepoPath"); let fake_dir_path = RepoPath::file("dir").expect("Can't generate fake RepoPath"); - let (filehash, file_future) = upload_file_no_parents(&repo, "blob", &fake_file_path); + let (filehash, file_future) = + upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_file_path); - let (dirhash_1, manifest_dir_future) = - upload_manifest_no_parents(&repo, format!("file\0{}\n", filehash), &fake_dir_path); + let (dirhash_1, manifest_dir_future) = upload_manifest_no_parents( + ctx.clone(), + &repo, + format!("file\0{}\n", filehash), + &fake_dir_path, + ); let (_roothash, root_manifest_future) = upload_manifest_no_parents( + ctx.clone(), &repo, format!("dir\0{}t\n", dirhash_1), &RepoPath::root(), @@ -1004,12 +1125,18 @@ fn test_no_case_conflict_removal_dir() { let commit2 = { let fake_file_path = RepoPath::file("file").expect("Can't generate fake RepoPath"); let fake_dir_path = RepoPath::file("DIR").expect("Can't generate fake RepoPath"); - let (filehash, file_future) = upload_file_no_parents(&repo, "blob", &fake_file_path); + let (filehash, file_future) = + upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_file_path); - let (dirhash_1, manifest_dir_future) = - upload_manifest_no_parents(&repo, format!("file\0{}\n", filehash), &fake_dir_path); + let (dirhash_1, manifest_dir_future) = upload_manifest_no_parents( + ctx.clone(), + &repo, + format!("file\0{}\n", filehash), + &fake_dir_path, + ); let (_roothash, root_manifest_future) = upload_manifest_no_parents( + ctx.clone(), &repo, format!("DIR\0{}t\n", dirhash_1), &RepoPath::root(), diff --git a/blobrepo/test/memory_manifest.rs b/blobrepo/test/memory_manifest.rs index 9a96a16384..f674f12df8 100644 --- a/blobrepo/test/memory_manifest.rs +++ b/blobrepo/test/memory_manifest.rs @@ -12,6 +12,7 @@ use utils::run_future; use blobrepo::HgBlobEntry; use blobrepo::internal::{IncompleteFilenodes, MemoryManifestEntry, MemoryRootManifest}; +use context::CoreContext; use fixtures::many_files_dirs; use mercurial_types::{Entry, FileType, HgManifestId, HgNodeHash, MPath, MPathElement, Type, nodehash::HgEntryId}; @@ -31,12 +32,14 @@ fn insert_entry(tree: &MemoryManifestEntry, path: MPathElement, entry: MemoryMan #[test] fn empty_manifest() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let repo = many_files_dirs::getrepo(None); // Create an empty memory manifest - let memory_manifest = MemoryRootManifest::new(repo, IncompleteFilenodes::new(), None, None) - .wait() - .expect("Could not create empty manifest"); + let memory_manifest = + MemoryRootManifest::new(ctx, repo, IncompleteFilenodes::new(), None, None) + .wait() + .expect("Could not create empty manifest"); if let MemoryManifestEntry::MemTree { base_manifest_id, @@ -59,16 +62,21 @@ fn empty_manifest() { #[test] fn load_manifest() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let repo = many_files_dirs::getrepo(None); let manifest_id = HgNodeHash::from_static_str("907f5b20e06dfb91057861d984423e84b64b5b7b") .expect("Could not get nodehash"); // Load a memory manifest - let memory_manifest = - MemoryRootManifest::new(repo, IncompleteFilenodes::new(), Some(&manifest_id), None) - .wait() - .expect("Could not load manifest"); + let memory_manifest = MemoryRootManifest::new( + ctx.clone(), + repo, + IncompleteFilenodes::new(), + Some(&manifest_id), + None, + ).wait() + .expect("Could not load manifest"); if let MemoryManifestEntry::MemTree { base_manifest_id, @@ -104,13 +112,18 @@ fn load_manifest() { #[test] fn save_manifest() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let repo = many_files_dirs::getrepo(None); // Create an empty memory manifest - let memory_manifest = - MemoryRootManifest::new(repo.clone(), IncompleteFilenodes::new(), None, None) - .wait() - .expect("Could not create empty manifest"); + let memory_manifest = MemoryRootManifest::new( + ctx.clone(), + repo.clone(), + IncompleteFilenodes::new(), + None, + None, + ).wait() + .expect("Could not create empty manifest"); // Add an unmodified entry let dir_nodehash = HgNodeHash::from_static_str("907f5b20e06dfb91057861d984423e84b64b5b7b") @@ -126,12 +139,12 @@ fn save_manifest() { insert_entry(&memory_manifest.unittest_root(), path.clone(), dir); let manifest_entry = memory_manifest - .save() + .save(ctx.clone()) .wait() .expect("Could not save manifest"); let manifest_id = HgManifestId::new(manifest_entry.get_hash().into_nodehash()); - let refound = repo.get_manifest_by_nodeid(&manifest_id) + let refound = repo.get_manifest_by_nodeid(ctx.clone(), &manifest_id) .map(|m| m.lookup(&path)) .wait() .expect("Lookup of entry just saved failed") @@ -149,6 +162,7 @@ fn save_manifest() { #[test] fn remove_item() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let repo = many_files_dirs::getrepo(None); let blobstore = repo.get_blobstore(); @@ -159,6 +173,7 @@ fn remove_item() { // Load a memory manifest let memory_manifest = MemoryRootManifest::new( + ctx.clone(), repo.clone(), IncompleteFilenodes::new(), Some(&manifest_id), @@ -173,6 +188,7 @@ fn remove_item() { // Remove a file memory_manifest .change_entry( + ctx.clone(), &MPath::new(b"dir2/file_1_in_dir2").expect("Can't create MPath"), None, ) @@ -187,7 +203,9 @@ fn remove_item() { .get(&dir2) .expect("dir2 is missing") .clone() - .map_or(false, |e| e.is_empty(&blobstore).wait().unwrap()), + .map_or(false, |e| e.is_empty(ctx.clone(), &blobstore) + .wait() + .unwrap()), "Bad after remove" ); if let Some(MemoryManifestEntry::MemTree { changes, .. }) = @@ -206,12 +224,12 @@ fn remove_item() { // And check that dir2 disappears over a save/reload operation let manifest_entry = memory_manifest - .save() + .save(ctx.clone()) .wait() .expect("Could not save manifest"); let manifest_id = HgManifestId::new(manifest_entry.get_hash().into_nodehash()); - let refound = repo.get_manifest_by_nodeid(&manifest_id) + let refound = repo.get_manifest_by_nodeid(ctx.clone(), &manifest_id) .map(|m| m.lookup(&dir2)) .wait() .expect("Lookup of entry just saved failed"); @@ -226,6 +244,7 @@ fn remove_item() { #[test] fn add_item() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let repo = many_files_dirs::getrepo(None); let blobstore = repo.get_blobstore(); @@ -237,6 +256,7 @@ fn add_item() { // Load a memory manifest let memory_manifest = MemoryRootManifest::new( + ctx.clone(), repo.clone(), IncompleteFilenodes::new(), Some(&manifest_id), @@ -249,6 +269,7 @@ fn add_item() { .expect("Could not get nodehash"); memory_manifest .change_entry( + ctx.clone(), &MPath::new(b"new_file").expect("Could not create MPath"), Some(HgBlobEntry::new( blobstore.clone(), @@ -262,12 +283,12 @@ fn add_item() { // And check that new_file persists let manifest_entry = memory_manifest - .save() + .save(ctx.clone()) .wait() .expect("Could not save manifest"); let manifest_id = HgManifestId::new(manifest_entry.get_hash().into_nodehash()); - let refound = repo.get_manifest_by_nodeid(&manifest_id) + let refound = repo.get_manifest_by_nodeid(ctx.clone(), &manifest_id) .map(|m| m.lookup(&new_file)) .wait() .expect("Lookup of entry just saved failed") @@ -283,6 +304,7 @@ fn add_item() { #[test] fn replace_item() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let repo = many_files_dirs::getrepo(None); let blobstore = repo.get_blobstore(); @@ -293,6 +315,7 @@ fn replace_item() { // Load a memory manifest let memory_manifest = MemoryRootManifest::new( + ctx.clone(), repo.clone(), IncompleteFilenodes::new(), Some(&manifest_id), @@ -305,6 +328,7 @@ fn replace_item() { .expect("Could not get nodehash"); memory_manifest .change_entry( + ctx.clone(), &MPath::new(b"1").expect("Could not create MPath"), Some(HgBlobEntry::new( blobstore.clone(), @@ -318,12 +342,12 @@ fn replace_item() { // And check that new_file persists let manifest_entry = memory_manifest - .save() + .save(ctx.clone()) .wait() .expect("Could not save manifest"); let manifest_id = HgManifestId::new(manifest_entry.get_hash().into_nodehash()); - let refound = repo.get_manifest_by_nodeid(&manifest_id) + let refound = repo.get_manifest_by_nodeid(ctx, &manifest_id) .map(|m| m.lookup(&new_file)) .wait() .expect("Lookup of entry just saved failed") @@ -339,6 +363,7 @@ fn replace_item() { #[test] fn conflict_resolution() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let repo = many_files_dirs::getrepo(None); let blobstore = repo.get_blobstore(); let logger = repo.get_logger(); @@ -398,6 +423,7 @@ fn conflict_resolution() { }; let merge = run_future(base.merge_with_conflicts( + ctx, other, blobstore, logger, @@ -436,6 +462,7 @@ fn conflict_resolution() { #[test] fn merge_manifests() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let repo = many_files_dirs::getrepo(None); let blobstore = repo.get_blobstore(); let logger = repo.get_logger(); @@ -521,6 +548,7 @@ fn merge_manifests() { }; let merged = base.merge_with_conflicts( + ctx, other, blobstore, logger, diff --git a/blobrepo/test/utils.rs b/blobrepo/test/utils.rs index 83fd61ea4e..081ee701f5 100644 --- a/blobrepo/test/utils.rs +++ b/blobrepo/test/utils.rs @@ -69,6 +69,7 @@ macro_rules! test_both_repotypes { } pub fn upload_file_no_parents( + ctx: CoreContext, repo: &BlobRepo, data: B, path: &RepoPath, @@ -77,6 +78,7 @@ where B: Into, { upload_hg_file_entry( + ctx, repo, data.into(), FileType::Regular, @@ -87,6 +89,7 @@ where } pub fn upload_file_one_parent( + ctx: CoreContext, repo: &BlobRepo, data: B, path: &RepoPath, @@ -96,6 +99,7 @@ where B: Into, { upload_hg_file_entry( + ctx, repo, data.into(), FileType::Regular, @@ -106,6 +110,7 @@ where } pub fn upload_manifest_no_parents( + ctx: CoreContext, repo: &BlobRepo, data: B, path: &RepoPath, @@ -113,10 +118,11 @@ pub fn upload_manifest_no_parents( where B: Into, { - upload_hg_tree_entry(repo, data.into(), path.clone(), None, None) + upload_hg_tree_entry(ctx, repo, data.into(), path.clone(), None, None) } pub fn upload_manifest_one_parent( + ctx: CoreContext, repo: &BlobRepo, data: B, path: &RepoPath, @@ -125,10 +131,11 @@ pub fn upload_manifest_one_parent( where B: Into, { - upload_hg_tree_entry(repo, data.into(), path.clone(), Some(p1), None) + upload_hg_tree_entry(ctx, repo, data.into(), path.clone(), Some(p1), None) } fn upload_hg_tree_entry( + ctx: CoreContext, repo: &BlobRepo, contents: Bytes, path: RepoPath, @@ -142,10 +149,11 @@ fn upload_hg_tree_entry( p2, path, }; - upload.upload(repo).unwrap() + upload.upload(ctx, repo).unwrap() } fn upload_hg_file_entry( + ctx: CoreContext, repo: &BlobRepo, contents: Bytes, file_type: FileType, @@ -166,7 +174,7 @@ fn upload_hg_file_entry( path: path.into_mpath().expect("expected a path to be present"), }; - let (_, upload_fut) = upload.upload(repo).unwrap(); + let (_, upload_fut) = upload.upload(ctx, repo).unwrap(); (node_id, upload_fut) } diff --git a/blobrepo_utils/src/bonsai/manifest.rs b/blobrepo_utils/src/bonsai/manifest.rs index 48c76ba7cf..c0372656a1 100644 --- a/blobrepo_utils/src/bonsai/manifest.rs +++ b/blobrepo_utils/src/bonsai/manifest.rs @@ -70,29 +70,39 @@ pub struct BonsaiMFVerifyDifference { impl BonsaiMFVerifyDifference { /// What entries changed from the original manifest to the roundtripped one. - pub fn changes(&self) -> impl Stream + Send { + pub fn changes( + &self, + ctx: CoreContext, + ) -> impl Stream + Send { let lookup_mf_id = HgManifestId::new(self.lookup_mf_id); let roundtrip_mf_id = HgManifestId::new(self.roundtrip_mf_id); - let original_mf = self.repo.get_manifest_by_nodeid(&lookup_mf_id); - let roundtrip_mf = self.repo.get_manifest_by_nodeid(&roundtrip_mf_id); + let original_mf = self.repo.get_manifest_by_nodeid(ctx.clone(), &lookup_mf_id); + let roundtrip_mf = self.repo + .get_manifest_by_nodeid(ctx.clone(), &roundtrip_mf_id); original_mf .join(roundtrip_mf) - .map(|(original_mf, roundtrip_mf)| { - changed_entry_stream(&roundtrip_mf, &original_mf, None) + .map({ + cloned!(ctx); + move |(original_mf, roundtrip_mf)| { + changed_entry_stream(ctx, &roundtrip_mf, &original_mf, None) + } }) .flatten_stream() } /// Whether there are any changes beyond the root manifest ID being different. #[inline] - pub fn has_changes(&self) -> impl Future + Send { - self.changes().not_empty() + pub fn has_changes(&self, ctx: CoreContext) -> impl Future + Send { + self.changes(ctx).not_empty() } /// Whether there are any files that changed. #[inline] - pub fn has_file_changes(&self) -> impl Future + Send { - self.changes() + pub fn has_file_changes( + &self, + ctx: CoreContext, + ) -> impl Future + Send { + self.changes(ctx) .filter(|item| !item.status.is_tree()) .not_empty() } @@ -184,19 +194,20 @@ impl ChangesetVisitor for BonsaiMFVerifyVisitor { debug!(logger, "Starting bonsai diff computation"); - let parents_fut = repo.get_changeset_parents(ctx, &changeset_id).and_then({ - let repo = repo.clone(); - move |parent_hashes| { - let changesets = parent_hashes - .into_iter() - .map(move |parent_id| repo.get_changeset_by_changesetid(&parent_id)); - future::join_all(changesets) - } - }); + let parents_fut = repo.get_changeset_parents(ctx.clone(), &changeset_id) + .and_then({ + cloned!(ctx, repo); + move |parent_hashes| { + let changesets = parent_hashes.into_iter().map(move |parent_id| { + repo.get_changeset_by_changesetid(ctx.clone(), &parent_id) + }); + future::join_all(changesets) + } + }); // Convert to bonsai first. let bonsai_diff_fut = parents_fut.and_then({ - let repo = repo.clone(); + cloned!(ctx, repo); move |parents| { let mut parents = parents.into_iter(); let p1: Option<_> = parents.next(); @@ -214,9 +225,10 @@ impl ChangesetVisitor for BonsaiMFVerifyVisitor { // Also fetch the manifest as we're interested in the computed node id. let root_mf_id = HgManifestId::new(root_entry.get_hash().into_nodehash()); - let root_mf_fut = BlobManifest::load(&repo.get_blobstore(), &root_mf_id); + let root_mf_fut = + BlobManifest::load(ctx.clone(), &repo.get_blobstore(), &root_mf_id); - bonsai_diff(root_entry, p1_entry, p2_entry) + bonsai_diff(ctx.clone(), root_entry, p1_entry, p2_entry) .collect() .join(root_mf_fut) .and_then(move |(diff, root_mf)| match root_mf { @@ -245,6 +257,7 @@ impl ChangesetVisitor for BonsaiMFVerifyVisitor { } apply_diff( + ctx.clone(), logger.clone(), repo.clone(), diff_result, @@ -285,7 +298,7 @@ impl ChangesetVisitor for BonsaiMFVerifyVisitor { if broken_merge { // This is a (potentially) broken merge. Ignore tree changes and // only check for file changes. - Either::B(Either::A(difference.has_file_changes().map( + Either::B(Either::A(difference.has_file_changes(ctx).map( move |has_file_changes| { if has_file_changes { BonsaiMFVerifyResult::Invalid(difference) @@ -298,7 +311,7 @@ impl ChangesetVisitor for BonsaiMFVerifyVisitor { // This is an empty changeset. Mercurial is relatively inconsistent // about creating new manifest nodes for such changesets, so it can // happen. - Either::B(Either::B(difference.has_changes().map( + Either::B(Either::B(difference.has_changes(ctx).map( move |has_changes| { if has_changes { BonsaiMFVerifyResult::Invalid(difference) @@ -322,6 +335,7 @@ impl ChangesetVisitor for BonsaiMFVerifyVisitor { // https://github.com/rust-lang/rust/issues/50865. // TODO: (rain1) T31595868 make apply_diff private once Rust 1.29 is released pub fn apply_diff( + ctx: CoreContext, logger: Logger, repo: BlobRepo, diff_result: Vec, @@ -329,6 +343,7 @@ pub fn apply_diff( manifest_p2: Option<&HgNodeHash>, ) -> impl Future + Send { MemoryRootManifest::new( + ctx.clone(), repo.clone(), IncompleteFilenodes::new(), manifest_p1, @@ -340,21 +355,21 @@ pub fn apply_diff( .into_iter() .map(|result| { let entry = make_entry(&repo, &result); - memory_manifest.change_entry(result.path(), entry) + memory_manifest.change_entry(ctx.clone(), result.path(), entry) }) .collect(); future::join_all(futures) .and_then({ - let memory_manifest = memory_manifest.clone(); - move |_| memory_manifest.resolve_trivial_conflicts() + cloned!(ctx, memory_manifest); + move |_| memory_manifest.resolve_trivial_conflicts(ctx) }) .and_then(move |_| { // This will cause tree entries to be written to the blobstore, but // those entries will be redirected to memory because of // repo.in_memory_writes(). debug!(logger, "Applying complete: now saving"); - memory_manifest.save() + memory_manifest.save(ctx) }) .map(|m| m.get_hash().into_nodehash()) } diff --git a/blobrepo_utils/src/changeset.rs b/blobrepo_utils/src/changeset.rs index 34c59ad0fd..5a907e9a18 100644 --- a/blobrepo_utils/src/changeset.rs +++ b/blobrepo_utils/src/changeset.rs @@ -199,7 +199,7 @@ where let visit_fut = shared .repo - .get_changeset_by_changesetid(&changeset_id) + .get_changeset_by_changesetid(ctx.clone(), &changeset_id) .and_then({ cloned!(ctx, shared.visitor, shared.repo); move |changeset| visitor.visit(ctx, logger, repo, changeset, follow_remaining) diff --git a/blobrepo_utils/test/main.rs b/blobrepo_utils/test/main.rs index 2d85a5b250..444fd3e099 100644 --- a/blobrepo_utils/test/main.rs +++ b/blobrepo_utils/test/main.rs @@ -54,7 +54,7 @@ mod test { .map(|heads| heads.into_iter().map(HgChangesetId::new)); let verify = BonsaiMFVerify { - ctx, + ctx: ctx.clone(), logger, repo, follow_limit: 1024, @@ -68,13 +68,13 @@ mod test { .and_then(|heads| verify.verify(heads).collect()); tokio::spawn( results - .and_then(|results| { - let diffs = results.into_iter().filter_map(|(res, meta)| { + .and_then(move |results| { + let diffs = results.into_iter().filter_map(move |(res, meta)| { match res { BonsaiMFVerifyResult::Invalid(difference) => { let cs_id = meta.changeset_id; Some(difference - .changes() + .changes(ctx.clone()) .collect() .map(move |changes| (cs_id, changes))) } diff --git a/blobstore/delayblob/src/lib.rs b/blobstore/delayblob/src/lib.rs index c00e936c76..43a9c662bf 100644 --- a/blobstore/delayblob/src/lib.rs +++ b/blobstore/delayblob/src/lib.rs @@ -12,6 +12,7 @@ extern crate tokio; extern crate futures_ext; extern crate blobstore; +extern crate context; extern crate mononoke_types; use std::fmt; @@ -26,6 +27,7 @@ use tokio::prelude::*; use tokio::timer::Delay; use blobstore::Blobstore; +use context::CoreContext; use mononoke_types::BlobstoreBytes; /// A blobstore that imposes a delay on all its operations, where the delay is generated by a @@ -75,27 +77,27 @@ impl Blobstore for DelayBlob where F: FnMut(()) -> Duration + 'static + Send + Sync, { - fn get(&self, key: String) -> BoxFuture, Error> { + fn get(&self, ctx: CoreContext, key: String) -> BoxFuture, Error> { let sleep = self.sleep(self.get_roundtrips); - let get = self.blobstore.get(key); + let get = self.blobstore.get(ctx, key); sleep.and_then(move |_| get).boxify() } - fn put(&self, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> { + fn put(&self, ctx: CoreContext, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> { let sleep = self.sleep(self.put_roundtrips); - let put = self.blobstore.put(key, value); + let put = self.blobstore.put(ctx, key, value); sleep.and_then(move |_| put).boxify() } - fn is_present(&self, key: String) -> BoxFuture { + fn is_present(&self, ctx: CoreContext, key: String) -> BoxFuture { let sleep = self.sleep(self.is_present_roundtrips); - let is_present = self.blobstore.is_present(key); + let is_present = self.blobstore.is_present(ctx, key); sleep.and_then(move |_| is_present).boxify() } - fn assert_present(&self, key: String) -> BoxFuture<(), Error> { + fn assert_present(&self, ctx: CoreContext, key: String) -> BoxFuture<(), Error> { let sleep = self.sleep(self.assert_present_roundtrips); - let assert_present = self.blobstore.assert_present(key); + let assert_present = self.blobstore.assert_present(ctx, key); sleep.and_then(move |_| assert_present).boxify() } } diff --git a/blobstore/fileblob/src/lib.rs b/blobstore/fileblob/src/lib.rs index 3e4146958a..33607dce9a 100644 --- a/blobstore/fileblob/src/lib.rs +++ b/blobstore/fileblob/src/lib.rs @@ -14,6 +14,7 @@ extern crate url; extern crate futures_ext; extern crate blobstore; +extern crate context; extern crate mononoke_types; use std::fs::{create_dir_all, File}; @@ -28,6 +29,7 @@ use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use futures_ext::{BoxFuture, FutureExt}; use blobstore::Blobstore; +use context::CoreContext; use mononoke_types::BlobstoreBytes; const PREFIX: &str = "blob"; @@ -63,7 +65,7 @@ impl Fileblob { } impl Blobstore for Fileblob { - fn get(&self, key: String) -> BoxFuture, Error> { + fn get(&self, _ctx: CoreContext, key: String) -> BoxFuture, Error> { let p = self.path(&key); poll_fn(move || { @@ -81,7 +83,7 @@ impl Blobstore for Fileblob { .boxify() } - fn put(&self, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> { + fn put(&self, _ctx: CoreContext, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> { let p = self.path(&key); poll_fn::<_, Error, _>(move || { diff --git a/blobstore/glusterblob/src/lib.rs b/blobstore/glusterblob/src/lib.rs index 5ccdf726ca..12f4a64cc7 100644 --- a/blobstore/glusterblob/src/lib.rs +++ b/blobstore/glusterblob/src/lib.rs @@ -7,6 +7,7 @@ extern crate blobstore; extern crate bytes; extern crate cloned; +extern crate context; extern crate failure_ext as failure; extern crate futures; extern crate futures_ext; @@ -38,6 +39,7 @@ use rand::prelude::*; use twox_hash::{XxHash, XxHash32}; use blobstore::Blobstore; +use context::CoreContext; use mononoke_types::BlobstoreBytes; // UID and GID we're using for file ownership and permissions checking. @@ -218,7 +220,7 @@ impl fmt::Debug for Glusterblob { impl Blobstore for Glusterblob { /// Fetch the value associated with `key`, or None if no value is present - fn get(&self, key: String) -> BoxFuture, Error> { + fn get(&self, _ctx: CoreContext, key: String) -> BoxFuture, Error> { let path = self.keydir(&*key); let datapath = path.join(Self::keyfile(&*key)); let metapath = path.join(Self::metafile(&*key)); @@ -297,7 +299,7 @@ impl Blobstore for Glusterblob { /// Associate `value` with `key` for future gets; if `put` is called with different `value`s /// for the same key, the implementation may return any `value` it's been given in response /// to a `get` for that `key`. - fn put(&self, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> { + fn put(&self, _ctx: CoreContext, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> { self.create_keydir(&*key) .and_then({ cloned!(self.ctxt); @@ -390,7 +392,7 @@ impl Blobstore for Glusterblob { /// implentation just calls `get`, and discards the return value; this can be overridden to /// avoid transferring data. In the absence of concurrent `put` calls, this must return /// `false` if `get` would return `None`, and `true` if `get` would return `Some(_)`. - fn is_present(&self, key: String) -> BoxFuture { + fn is_present(&self, _ctx: CoreContext, key: String) -> BoxFuture { let path = self.keydir(&*key); let datapath = path.join(Self::keyfile(&*key)); let metapath = path.join(Self::metafile(&*key)); diff --git a/blobstore/rocksblob/src/lib.rs b/blobstore/rocksblob/src/lib.rs index 063eb37d13..22d0b45c31 100644 --- a/blobstore/rocksblob/src/lib.rs +++ b/blobstore/rocksblob/src/lib.rs @@ -13,6 +13,7 @@ extern crate futures_ext; extern crate rocksdb; extern crate blobstore; +extern crate context; extern crate mononoke_types; use std::path::Path; @@ -24,6 +25,7 @@ use futures_ext::{BoxFuture, FutureExt}; use rocksdb::{Db, ReadOptions, WriteOptions}; use blobstore::Blobstore; +use context::CoreContext; use mononoke_types::BlobstoreBytes; pub type Result = std::result::Result; @@ -84,13 +86,13 @@ impl Future for PutBlob { } impl Blobstore for Rocksblob where { - fn get(&self, key: String) -> BoxFuture, Error> { + fn get(&self, _ctx: CoreContext, key: String) -> BoxFuture, Error> { let db = self.db.clone(); GetBlob(db, key).boxify() } - fn put(&self, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> { + fn put(&self, _ctx: CoreContext, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> { let db = self.db.clone(); PutBlob(db, key, value).boxify() diff --git a/blobstore/src/counted_blobstore.rs b/blobstore/src/counted_blobstore.rs index 4b7afba607..1a1d8bcec2 100644 --- a/blobstore/src/counted_blobstore.rs +++ b/blobstore/src/counted_blobstore.rs @@ -9,6 +9,7 @@ use futures::Future; use futures_ext::{BoxFuture, FutureExt}; use stats::DynamicTimeseries; +use context::CoreContext; use mononoke_types::BlobstoreBytes; use {Blobstore, CacheBlobstoreExt}; @@ -54,11 +55,11 @@ impl CountedBlobstore { } impl Blobstore for CountedBlobstore { - fn get(&self, key: String) -> BoxFuture, Error> { + fn get(&self, ctx: CoreContext, key: String) -> BoxFuture, Error> { let name = self.name; STATS::get.add_value(1, (name,)); self.blobstore - .get(key) + .get(ctx, key) .then(move |res| { match res { Ok(Some(_)) => STATS::get_hit.add_value(1, (name,)), @@ -70,11 +71,11 @@ impl Blobstore for CountedBlobstore { .boxify() } - fn put(&self, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> { + fn put(&self, ctx: CoreContext, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> { let name = self.name; STATS::put.add_value(1, (name,)); self.blobstore - .put(key, value) + .put(ctx, key, value) .then(move |res| { match res { Ok(()) => STATS::put_ok.add_value(1, (name,)), @@ -85,11 +86,11 @@ impl Blobstore for CountedBlobstore { .boxify() } - fn is_present(&self, key: String) -> BoxFuture { + fn is_present(&self, ctx: CoreContext, key: String) -> BoxFuture { let name = self.name; STATS::is_present.add_value(1, (name,)); self.blobstore - .is_present(key) + .is_present(ctx, key) .then(move |res| { match res { Ok(true) => STATS::is_present_hit.add_value(1, (name,)), @@ -101,11 +102,11 @@ impl Blobstore for CountedBlobstore { .boxify() } - fn assert_present(&self, key: String) -> BoxFuture<(), Error> { + fn assert_present(&self, ctx: CoreContext, key: String) -> BoxFuture<(), Error> { let name = self.name; STATS::assert_present.add_value(1, (name,)); self.blobstore - .assert_present(key) + .assert_present(ctx, key) .then(move |res| { match res { Ok(()) => STATS::assert_present_ok.add_value(1, (name,)), @@ -119,8 +120,12 @@ impl Blobstore for CountedBlobstore { impl CacheBlobstoreExt for CountedBlobstore { #[inline] - fn get_no_cache_fill(&self, key: String) -> BoxFuture, Error> { - self.as_inner().get_no_cache_fill(key) + fn get_no_cache_fill( + &self, + ctx: CoreContext, + key: String, + ) -> BoxFuture, Error> { + self.as_inner().get_no_cache_fill(ctx, key) } #[inline] diff --git a/blobstore/src/lib.rs b/blobstore/src/lib.rs index e253165ed6..3d287acbb4 100644 --- a/blobstore/src/lib.rs +++ b/blobstore/src/lib.rs @@ -15,6 +15,7 @@ extern crate tokio; extern crate tokio_timer; extern crate cachelib; +extern crate context; extern crate fbwhoami; extern crate futures_ext; extern crate memcache; @@ -31,6 +32,7 @@ use failure::Error; use futures::{future, Future}; use futures_ext::{BoxFuture, FutureExt}; +use context::CoreContext; use mononoke_types::BlobstoreBytes; mod cachelib_cache; @@ -87,22 +89,22 @@ pub use errors::ErrorKind; /// uses of Blobstores pub trait Blobstore: fmt::Debug + Send + Sync + 'static { /// Fetch the value associated with `key`, or None if no value is present - fn get(&self, key: String) -> BoxFuture, Error>; + fn get(&self, ctx: CoreContext, key: String) -> BoxFuture, Error>; /// Associate `value` with `key` for future gets; if `put` is called with different `value`s /// for the same key, the implementation may return any `value` it's been given in response /// to a `get` for that `key`. - fn put(&self, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error>; + fn put(&self, ctx: CoreContext, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error>; /// Check that `get` will return a value for a given `key`, and not None. The provided /// implentation just calls `get`, and discards the return value; this can be overridden to /// avoid transferring data. In the absence of concurrent `put` calls, this must return /// `false` if `get` would return `None`, and `true` if `get` would return `Some(_)`. - fn is_present(&self, key: String) -> BoxFuture { - self.get(key).map(|opt| opt.is_some()).boxify() + fn is_present(&self, ctx: CoreContext, key: String) -> BoxFuture { + self.get(ctx, key).map(|opt| opt.is_some()).boxify() } /// Errors if a given `key` is not present in the blob store. Useful to abort a chained /// future computation early if it cannot succeed unless the `key` is present - fn assert_present(&self, key: String) -> BoxFuture<(), Error> { - self.is_present(key.clone()) + fn assert_present(&self, ctx: CoreContext, key: String) -> BoxFuture<(), Error> { + self.is_present(ctx, key.clone()) .and_then(|present| { if present { future::ok(()) @@ -115,31 +117,31 @@ pub trait Blobstore: fmt::Debug + Send + Sync + 'static { } impl Blobstore for Arc { - fn get(&self, key: String) -> BoxFuture, Error> { - self.as_ref().get(key) + fn get(&self, ctx: CoreContext, key: String) -> BoxFuture, Error> { + self.as_ref().get(ctx, key) } - fn put(&self, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> { - self.as_ref().put(key, value) + fn put(&self, ctx: CoreContext, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> { + self.as_ref().put(ctx, key, value) } - fn is_present(&self, key: String) -> BoxFuture { - self.as_ref().is_present(key) + fn is_present(&self, ctx: CoreContext, key: String) -> BoxFuture { + self.as_ref().is_present(ctx, key) } - fn assert_present(&self, key: String) -> BoxFuture<(), Error> { - self.as_ref().assert_present(key) + fn assert_present(&self, ctx: CoreContext, key: String) -> BoxFuture<(), Error> { + self.as_ref().assert_present(ctx, key) } } impl Blobstore for Box { - fn get(&self, key: String) -> BoxFuture, Error> { - self.as_ref().get(key) + fn get(&self, ctx: CoreContext, key: String) -> BoxFuture, Error> { + self.as_ref().get(ctx, key) } - fn put(&self, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> { - self.as_ref().put(key, value) + fn put(&self, ctx: CoreContext, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> { + self.as_ref().put(ctx, key, value) } - fn is_present(&self, key: String) -> BoxFuture { - self.as_ref().is_present(key) + fn is_present(&self, ctx: CoreContext, key: String) -> BoxFuture { + self.as_ref().is_present(ctx, key) } - fn assert_present(&self, key: String) -> BoxFuture<(), Error> { - self.as_ref().assert_present(key) + fn assert_present(&self, ctx: CoreContext, key: String) -> BoxFuture<(), Error> { + self.as_ref().assert_present(ctx, key) } } diff --git a/blobstore/src/locking_cache.rs b/blobstore/src/locking_cache.rs index ff6fb43700..9406a9d9b4 100644 --- a/blobstore/src/locking_cache.rs +++ b/blobstore/src/locking_cache.rs @@ -12,6 +12,7 @@ use futures::{future, Future, IntoFuture, future::Either}; use futures_ext::{BoxFuture, FutureExt}; use tokio; +use context::CoreContext; use mononoke_types::BlobstoreBytes; use Blobstore; @@ -21,7 +22,11 @@ use Blobstore; /// /// This is primarily used by the admin command to manually check memcache. pub trait CacheBlobstoreExt: Blobstore { - fn get_no_cache_fill(&self, key: String) -> BoxFuture, Error>; + fn get_no_cache_fill( + &self, + ctx: CoreContext, + key: String, + ) -> BoxFuture, Error>; fn get_cache_only(&self, key: String) -> BoxFuture, Error>; } @@ -224,12 +229,12 @@ where L: LeaseOps + Clone, T: Blobstore + Clone, { - fn get(&self, key: String) -> BoxFuture, Error> { + fn get(&self, ctx: CoreContext, key: String) -> BoxFuture, Error> { let cache_get = self.cache_get(&key); let cache_put = self.cache_put_closure(&key); let blobstore_get = future::lazy({ let blobstore = self.blobstore.clone(); - move || blobstore.get(key) + move || blobstore.get(ctx, key) }); cache_get @@ -245,7 +250,7 @@ where .boxify() } - fn put(&self, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> { + fn put(&self, ctx: CoreContext, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> { let can_put = self.take_put_lease(&key); let cache_put = self.cache_put(&key, value.clone()) .join(future::lazy({ @@ -261,7 +266,7 @@ where let key = key.clone(); move || { blobstore - .put(key.clone(), value) + .put(ctx, key.clone(), value) .or_else(move |r| lease.release_lease(&key, false).then(|_| Err(r))) } }); @@ -277,11 +282,11 @@ where .boxify() } - fn is_present(&self, key: String) -> BoxFuture { + fn is_present(&self, ctx: CoreContext, key: String) -> BoxFuture { let cache_check = self.cache_is_present(&key); let blobstore_check = future::lazy({ let blobstore = self.blobstore.clone(); - move || blobstore.is_present(key) + move || blobstore.is_present(ctx, key) }); cache_check @@ -302,9 +307,13 @@ where L: LeaseOps + Clone, T: Blobstore + Clone, { - fn get_no_cache_fill(&self, key: String) -> BoxFuture, Error> { + fn get_no_cache_fill( + &self, + ctx: CoreContext, + key: String, + ) -> BoxFuture, Error> { let cache_get = self.cache_get(&key); - let blobstore_get = self.blobstore.get(key); + let blobstore_get = self.blobstore.get(ctx, key); cache_get .and_then(move |blob| { diff --git a/blobstore/src/mem_writes.rs b/blobstore/src/mem_writes.rs index a3e6d35e13..edd8c6b3fb 100644 --- a/blobstore/src/mem_writes.rs +++ b/blobstore/src/mem_writes.rs @@ -10,6 +10,7 @@ use futures::future::Either; use futures_ext::{BoxFuture, FutureExt}; +use context::CoreContext; use mononoke_types::BlobstoreBytes; use {Blobstore, EagerMemblob}; @@ -32,45 +33,45 @@ impl MemWritesBlobstore { } impl Blobstore for MemWritesBlobstore { - fn put(&self, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> { + fn put(&self, ctx: CoreContext, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> { // Don't write the key if it's already present. - self.is_present(key.clone()) + self.is_present(ctx.clone(), key.clone()) .and_then({ let memblob = self.memblob.clone(); move |is_present| { if is_present { Either::A(Ok(()).into_future()) } else { - Either::B(memblob.put(key, value)) + Either::B(memblob.put(ctx, key, value)) } } }) .boxify() } - fn get(&self, key: String) -> BoxFuture, Error> { + fn get(&self, ctx: CoreContext, key: String) -> BoxFuture, Error> { self.memblob - .get(key.clone()) + .get(ctx.clone(), key.clone()) .and_then({ let inner = self.inner.clone(); move |val| match val { Some(val) => Either::A(Ok(Some(val)).into_future()), - None => Either::B(inner.get(key)), + None => Either::B(inner.get(ctx, key)), } }) .boxify() } - fn is_present(&self, key: String) -> BoxFuture { + fn is_present(&self, ctx: CoreContext, key: String) -> BoxFuture { self.memblob - .is_present(key.clone()) + .is_present(ctx.clone(), key.clone()) .and_then({ let inner = self.inner.clone(); move |is_present| { if is_present { Either::A(Ok(true).into_future()) } else { - Either::B(inner.is_present(key)) + Either::B(inner.is_present(ctx, key)) } } }) @@ -86,24 +87,29 @@ mod test { #[test] fn basic_read() { + let ctx = CoreContext::test_mock(); let inner = EagerMemblob::new(); let foo_key = "foo".to_string(); inner - .put(foo_key.clone(), BlobstoreBytes::from_bytes("foobar")) + .put( + ctx.clone(), + foo_key.clone(), + BlobstoreBytes::from_bytes("foobar"), + ) .wait() .expect("initial put should work"); let outer = MemWritesBlobstore::new(inner.clone()); assert!( outer - .is_present(foo_key.clone()) + .is_present(ctx.clone(), foo_key.clone()) .wait() .expect("is_present to inner should work") ); assert_eq!( outer - .get(foo_key.clone()) + .get(ctx, foo_key.clone()) .wait() .expect("get to inner should work") .expect("value should be present") @@ -114,18 +120,23 @@ mod test { #[test] fn redirect_writes() { + let ctx = CoreContext::test_mock(); let inner = EagerMemblob::new(); let foo_key = "foo".to_string(); let outer = MemWritesBlobstore::new(inner.clone()); outer - .put(foo_key.clone(), BlobstoreBytes::from_bytes("foobar")) + .put( + ctx.clone(), + foo_key.clone(), + BlobstoreBytes::from_bytes("foobar"), + ) .wait() .expect("put should work"); assert!( !inner - .is_present(foo_key.clone()) + .is_present(ctx.clone(), foo_key.clone()) .wait() .expect("is_present on inner should work"), "foo should not be present in inner", @@ -133,7 +144,7 @@ mod test { assert!( outer - .is_present(foo_key.clone()) + .is_present(ctx.clone(), foo_key.clone()) .wait() .expect("is_present on outer should work"), "foo should be present in outer", @@ -141,7 +152,7 @@ mod test { assert_eq!( outer - .get(foo_key.clone()) + .get(ctx, foo_key.clone()) .wait() .expect("get to outer should work") .expect("value should be present") @@ -152,21 +163,30 @@ mod test { #[test] fn present_in_inner() { + let ctx = CoreContext::test_mock(); let inner = EagerMemblob::new(); let foo_key = "foo".to_string(); inner - .put(foo_key.clone(), BlobstoreBytes::from_bytes("foobar")) + .put( + ctx.clone(), + foo_key.clone(), + BlobstoreBytes::from_bytes("foobar"), + ) .wait() .expect("initial put should work"); let outer = MemWritesBlobstore::new(inner.clone()); outer - .put(foo_key.clone(), BlobstoreBytes::from_bytes("foobar")) + .put( + ctx.clone(), + foo_key.clone(), + BlobstoreBytes::from_bytes("foobar"), + ) .wait() .expect("put should work"); assert!( outer - .is_present(foo_key.clone()) + .is_present(ctx.clone(), foo_key.clone()) .wait() .expect("is_present on outer should work"), "foo should be present in outer", @@ -174,12 +194,16 @@ mod test { // Change the value in inner. inner - .put(foo_key.clone(), BlobstoreBytes::from_bytes("bazquux")) + .put( + ctx.clone(), + foo_key.clone(), + BlobstoreBytes::from_bytes("bazquux"), + ) .wait() .expect("second put should work"); assert_eq!( outer - .get(foo_key.clone()) + .get(ctx, foo_key.clone()) .wait() .expect("get to outer should work") .expect("value should be present") diff --git a/blobstore/src/memblob.rs b/blobstore/src/memblob.rs index 3201991396..46cc3ada0e 100644 --- a/blobstore/src/memblob.rs +++ b/blobstore/src/memblob.rs @@ -12,6 +12,7 @@ use failure::Error; use futures::future::{lazy, IntoFuture}; use futures_ext::{BoxFuture, FutureExt}; +use context::CoreContext; use mononoke_types::BlobstoreBytes; use Blobstore; @@ -47,14 +48,14 @@ impl LazyMemblob { } impl Blobstore for EagerMemblob { - fn put(&self, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> { + fn put(&self, _ctx: CoreContext, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> { let mut inner = self.hash.lock().expect("lock poison"); inner.insert(key, value); Ok(()).into_future().boxify() } - fn get(&self, key: String) -> BoxFuture, Error> { + fn get(&self, _ctx: CoreContext, key: String) -> BoxFuture, Error> { let inner = self.hash.lock().expect("lock poison"); Ok(inner.get(&key).map(Clone::clone)).into_future().boxify() @@ -62,7 +63,7 @@ impl Blobstore for EagerMemblob { } impl Blobstore for LazyMemblob { - fn put(&self, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> { + fn put(&self, _ctx: CoreContext, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> { let hash = self.hash.clone(); lazy(move || { @@ -73,7 +74,7 @@ impl Blobstore for LazyMemblob { }).boxify() } - fn get(&self, key: String) -> BoxFuture, Error> { + fn get(&self, _ctx: CoreContext, key: String) -> BoxFuture, Error> { let hash = self.hash.clone(); lazy(move || { diff --git a/blobstore/src/prefix.rs b/blobstore/src/prefix.rs index 062170c476..dcf1b025c5 100644 --- a/blobstore/src/prefix.rs +++ b/blobstore/src/prefix.rs @@ -9,6 +9,7 @@ use inlinable_string::InlinableString; use futures_ext::BoxFuture; +use context::CoreContext; use mononoke_types::BlobstoreBytes; use {Blobstore, CacheBlobstoreExt}; @@ -40,8 +41,12 @@ impl PrefixBlobstore { impl CacheBlobstoreExt for PrefixBlobstore { #[inline] - fn get_no_cache_fill(&self, key: String) -> BoxFuture, Error> { - self.blobstore.get_no_cache_fill(self.prepend(key)) + fn get_no_cache_fill( + &self, + ctx: CoreContext, + key: String, + ) -> BoxFuture, Error> { + self.blobstore.get_no_cache_fill(ctx, self.prepend(key)) } #[inline] @@ -52,18 +57,18 @@ impl CacheBlobstoreExt for PrefixBlobstore { impl Blobstore for PrefixBlobstore { #[inline] - fn get(&self, key: String) -> BoxFuture, Error> { - self.blobstore.get(self.prepend(key)) + fn get(&self, ctx: CoreContext, key: String) -> BoxFuture, Error> { + self.blobstore.get(ctx, self.prepend(key)) } #[inline] - fn put(&self, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> { - self.blobstore.put(self.prepend(key), value) + fn put(&self, ctx: CoreContext, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> { + self.blobstore.put(ctx, self.prepend(key), value) } #[inline] - fn is_present(&self, key: String) -> BoxFuture { - self.blobstore.is_present(self.prepend(key)) + fn is_present(&self, ctx: CoreContext, key: String) -> BoxFuture { + self.blobstore.is_present(ctx, self.prepend(key)) } } @@ -78,6 +83,7 @@ mod test { #[test] fn test_prefix() { + let ctx = CoreContext::test_mock(); let base = EagerMemblob::new(); let prefixed = PrefixBlobstore::new(base.clone(), "prefix123-"); let unprefixed_key = "foobar".to_string(); @@ -86,6 +92,7 @@ mod test { // This is EagerMemblob (immediate future completion) so calling wait() is fine. prefixed .put( + ctx.clone(), unprefixed_key.clone(), BlobstoreBytes::from_bytes("test foobar"), ) @@ -95,7 +102,7 @@ mod test { // Test that both the prefixed and the unprefixed stores can access the key. assert_eq!( prefixed - .get(unprefixed_key.clone()) + .get(ctx.clone(), unprefixed_key.clone()) .wait() .expect("get should succeed") .expect("value should be present") @@ -103,7 +110,7 @@ mod test { Bytes::from("test foobar"), ); assert_eq!( - base.get(prefixed_key.clone()) + base.get(ctx.clone(), prefixed_key.clone()) .wait() .expect("get should succeed") .expect("value should be present") @@ -114,12 +121,12 @@ mod test { // Test that is_present works for both the prefixed and unprefixed stores. assert!( prefixed - .is_present(unprefixed_key.clone()) + .is_present(ctx.clone(), unprefixed_key.clone()) .wait() .expect("is_present should succeed") ); assert!( - base.is_present(prefixed_key.clone()) + base.is_present(ctx.clone(), prefixed_key.clone()) .wait() .expect("is_present should succeed") ); diff --git a/blobstore/test/main.rs b/blobstore/test/main.rs index faa0945262..903bba1960 100644 --- a/blobstore/test/main.rs +++ b/blobstore/test/main.rs @@ -18,6 +18,7 @@ extern crate tempdir; extern crate tokio; extern crate blobstore; +extern crate context; extern crate fileblob; extern crate glusterblob; extern crate mononoke_types; @@ -33,6 +34,7 @@ use tempdir::TempDir; use tokio::{prelude::*, runtime::Runtime}; use blobstore::{Blobstore, EagerMemblob}; +use context::CoreContext; use fileblob::Fileblob; use glusterblob::Glusterblob; use mononoke_types::BlobstoreBytes; @@ -45,6 +47,7 @@ where B::Future: Send + 'static, Error: From, { + let ctx = CoreContext::test_mock(); let blobstore = blobstore.into_future().map_err(|err| err.into()); let foo = "foo".to_string(); @@ -52,8 +55,12 @@ where let fut = future::lazy(|| { blobstore.and_then(|blobstore| { blobstore - .put(foo.clone(), BlobstoreBytes::from_bytes(&b"bar"[..])) - .and_then(move |_| blobstore.get(foo)) + .put( + ctx.clone(), + foo.clone(), + BlobstoreBytes::from_bytes(&b"bar"[..]), + ) + .and_then(move |_| blobstore.get(ctx, foo)) }) }); @@ -73,10 +80,12 @@ where B::Future: Send + 'static, Error: From, { + let ctx = CoreContext::test_mock(); let blobstore = blobstore.into_future().map_err(|err| err.into()); - let fut = - future::lazy(move || blobstore.and_then(|blobstore| blobstore.get("missing".to_string()))); + let fut = future::lazy(move || { + blobstore.and_then(|blobstore| blobstore.get(ctx, "missing".to_string())) + }); let mut runtime = Runtime::new().expect("runtime creation failed"); let out = runtime.block_on(fut).expect("get failed"); @@ -91,6 +100,7 @@ where B::Future: Send + 'static, Error: From, { + let ctx = CoreContext::test_mock(); let blobstore = Box::new(blobstore.into_future().map_err(|err| err.into())); let foo = "foo".to_string(); @@ -98,8 +108,12 @@ where let fut = future::lazy(|| { blobstore.and_then(|blobstore| { blobstore - .put(foo.clone(), BlobstoreBytes::from_bytes(&b"bar"[..])) - .and_then(move |_| blobstore.get(foo)) + .put( + ctx.clone(), + foo.clone(), + BlobstoreBytes::from_bytes(&b"bar"[..]), + ) + .and_then(move |_| blobstore.get(ctx, foo)) }) }); let mut runtime = Runtime::new().expect("runtime creation failed"); diff --git a/bonsai-utils/src/composite.rs b/bonsai-utils/src/composite.rs index a2f9c51806..21fc0fa4ec 100644 --- a/bonsai-utils/src/composite.rs +++ b/bonsai-utils/src/composite.rs @@ -9,6 +9,7 @@ use std::collections::{btree_map, BTreeMap, HashMap}; use failure::Error; use futures::{future, stream, Future, Stream}; +use context::CoreContext; use mercurial_types::{Entry, HgEntryId, Type, manifest::Content}; use mononoke_types::{FileType, MPathElement}; @@ -72,11 +73,14 @@ impl CompositeEntry { self.trees.contains_key(hash) } - pub fn manifest(&self) -> impl Future + Send { + pub fn manifest( + &self, + ctx: CoreContext, + ) -> impl Future + Send { // Manifests can only exist for tree entries. If self.trees is empty then an empty // composite manifest will be returned. This is by design. let mf_futs = self.trees.values().map(|entry| { - entry.get_content().map({ + entry.get_content(ctx.clone()).map({ move |content| match content { Content::Tree(mf) => mf, _other => unreachable!("tree content must be a manifest"), diff --git a/bonsai-utils/src/diff.rs b/bonsai-utils/src/diff.rs index ea156d8f66..90c78fe900 100644 --- a/bonsai-utils/src/diff.rs +++ b/bonsai-utils/src/diff.rs @@ -13,6 +13,7 @@ use failure::Error; use futures::{stream, Future, Stream, future::{self, Either}}; use itertools::{EitherOrBoth, Itertools}; +use context::CoreContext; use futures_ext::{select_all, BoxStream, StreamExt}; use mercurial_types::{Entry, HgEntryId, Manifest, Type}; use mercurial_types::manifest::{Content, EmptyManifest}; @@ -25,6 +26,7 @@ use composite::CompositeEntry; /// /// Items may be returned in arbitrary order. pub fn bonsai_diff( + ctx: CoreContext, root_entry: Box, p1_entry: Option>, p2_entry: Option>, @@ -37,7 +39,7 @@ pub fn bonsai_diff( composite_entry.add_parent(entry); } - WorkingEntry::new(root_entry).bonsai_diff_tree(None, composite_entry) + WorkingEntry::new(root_entry).bonsai_diff_tree(ctx, None, composite_entry) } #[derive(Clone, Debug, Eq, PartialEq)] @@ -146,10 +148,13 @@ impl WorkingEntry { } #[inline] - fn manifest(&self) -> impl Future, Error = Error> + Send { + fn manifest( + &self, + ctx: CoreContext, + ) -> impl Future, Error = Error> + Send { match self { WorkingEntry::Tree(entry) => { - Either::A(entry.get_content().map(|content| match content { + Either::A(entry.get_content(ctx).map(|content| match content { Content::Tree(mf) => mf, _ => unreachable!("tree entries can only return manifests"), })) @@ -161,6 +166,7 @@ impl WorkingEntry { /// The path here corresponds to the path associated with this working entry. fn bonsai_diff( self, + ctx: CoreContext, path: MPath, composite_entry: CompositeEntry, ) -> impl Stream + Send { @@ -171,7 +177,7 @@ impl WorkingEntry { stream::empty().boxify() } Some(BonsaiDiffResult::Deleted(..)) | None => { - self.bonsai_diff_tree(Some(path), composite_entry) + self.bonsai_diff_tree(ctx, Some(path), composite_entry) } }; let file_stream = stream::iter_ok(file_result); @@ -227,6 +233,7 @@ impl WorkingEntry { /// methods. fn bonsai_diff_tree( self, + ctx: CoreContext, path: Option, composite_entry: CompositeEntry, ) -> BoxStream { @@ -248,9 +255,9 @@ impl WorkingEntry { } } - let working_mf_fut = self.manifest(); + let working_mf_fut = self.manifest(ctx.clone()); composite_entry - .manifest() + .manifest(ctx.clone()) .join(working_mf_fut) .map(move |(composite_mf, working_mf)| { let sub_streams = composite_mf @@ -261,12 +268,12 @@ impl WorkingEntry { .expect("manifest entries should have names"); cname.cmp(wname) }) - .map(|entry_pair| { + .map(move |entry_pair| { match entry_pair { EitherOrBoth::Left((name, centry)) => { // This entry was removed from the working set. let sub_path = MPath::join_opt_element(path.as_ref(), &name); - WorkingEntry::absent().bonsai_diff(sub_path, centry) + WorkingEntry::absent().bonsai_diff(ctx.clone(), sub_path, centry) } EitherOrBoth::Right(wentry) => { // This entry was added to the working set. @@ -276,14 +283,17 @@ impl WorkingEntry { .expect("manifest entries should have names"); MPath::join_opt_element(path.as_ref(), name) }; - WorkingEntry::new(wentry) - .bonsai_diff(sub_path, CompositeEntry::new()) + WorkingEntry::new(wentry).bonsai_diff( + ctx.clone(), + sub_path, + CompositeEntry::new(), + ) } EitherOrBoth::Both((name, centry), wentry) => { // This entry is present in both the working set and at least one of // the parents. let sub_path = MPath::join_opt_element(path.as_ref(), &name); - WorkingEntry::new(wentry).bonsai_diff(sub_path, centry) + WorkingEntry::new(wentry).bonsai_diff(ctx.clone(), sub_path, centry) } } }); diff --git a/bonsai-utils/src/lib.rs b/bonsai-utils/src/lib.rs index 322792b666..acb098dd61 100644 --- a/bonsai-utils/src/lib.rs +++ b/bonsai-utils/src/lib.rs @@ -12,6 +12,7 @@ extern crate failure_ext as failure; extern crate futures; extern crate itertools; +extern crate context; extern crate futures_ext; extern crate mercurial_types; extern crate mononoke_types; diff --git a/bonsai-utils/test/main.rs b/bonsai-utils/test/main.rs index 1040e78244..a03012d894 100644 --- a/bonsai-utils/test/main.rs +++ b/bonsai-utils/test/main.rs @@ -13,6 +13,7 @@ extern crate pretty_assertions; extern crate async_unit; extern crate bonsai_utils; +extern crate context; extern crate mercurial_types; extern crate mercurial_types_mocks; extern crate mononoke_types; @@ -24,6 +25,7 @@ use futures::{Future, Stream}; use async_unit::tokio_unit_test; use bonsai_utils::{bonsai_diff, BonsaiDiffResult}; +use context::CoreContext; use mercurial_types::{Entry, HgEntryId}; use mercurial_types_mocks::manifest::{MockEntry, MockManifest}; use mercurial_types_mocks::nodehash::*; @@ -34,10 +36,11 @@ use fixtures::ManifestFixture; #[test] fn diff_basic() { tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let parent_entry = root_entry(&fixtures::BASIC1); let working_entry = root_entry(&fixtures::BASIC2); - let diff = compute_diff(working_entry, Some(parent_entry), None); + let diff = compute_diff(ctx.clone(), working_entry, Some(parent_entry), None); let expected_diff = vec![ deleted("dir1/file-to-dir"), // dir1/file-to-dir/foobar *is* a result, because it has changed and its parent is @@ -58,7 +61,7 @@ fn diff_basic() { let parent2 = root_entry(&fixtures::BASIC1); let working_entry = root_entry(&fixtures::BASIC2); - let diff = compute_diff(working_entry, Some(parent1), Some(parent2)); + let diff = compute_diff(ctx.clone(), working_entry, Some(parent1), Some(parent2)); assert_eq!(diff, expected_diff); }) } @@ -66,10 +69,11 @@ fn diff_basic() { #[test] fn diff_truncate() { tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let parent_entry = root_entry(&fixtures::TRUNCATE1); let working_entry = root_entry(&fixtures::TRUNCATE2); - let diff = bonsai_diff(working_entry, Some(parent_entry), None); + let diff = bonsai_diff(ctx, working_entry, Some(parent_entry), None); let paths = diff.collect().wait().expect("computing diff failed"); assert_eq!(paths, vec![]); }) @@ -78,11 +82,12 @@ fn diff_truncate() { #[test] fn diff_merge1() { tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let parent1 = root_entry(&fixtures::BASIC1); let parent2 = root_entry(&fixtures::BASIC2); let working_entry = root_entry(&fixtures::BASIC2); - let diff = compute_diff(working_entry, Some(parent1), Some(parent2)); + let diff = compute_diff(ctx.clone(), working_entry, Some(parent1), Some(parent2)); // Compare this result to expected_diff in diff_basic. let expected_diff = vec![ @@ -114,11 +119,12 @@ fn root_entry(mf: &ManifestFixture) -> Box { } fn compute_diff( + ctx: CoreContext, working_entry: Box, p1_entry: Option>, p2_entry: Option>, ) -> Vec { - let diff_stream = bonsai_diff(working_entry, p1_entry, p2_entry); + let diff_stream = bonsai_diff(ctx, working_entry, p1_entry, p2_entry); let mut paths = diff_stream.collect().wait().expect("computing diff failed"); paths.sort_unstable(); diff --git a/bundle2-resolver/src/changegroup/filelog.rs b/bundle2-resolver/src/changegroup/filelog.rs index 66065050c6..5d01f36b6d 100644 --- a/bundle2-resolver/src/changegroup/filelog.rs +++ b/bundle2-resolver/src/changegroup/filelog.rs @@ -9,6 +9,7 @@ use std::mem; use std::sync::Arc; use bytes::Bytes; +use context::CoreContext; use failure::Compat; use futures::{Future, IntoFuture, Stream}; use futures::future::Shared; @@ -60,7 +61,7 @@ impl UploadableHgBlob for Filelog { Shared>>, ); - fn upload(self, repo: &BlobRepo) -> Result<(HgNodeKey, Self::Value)> { + fn upload(self, ctx: CoreContext, repo: &BlobRepo) -> Result<(HgNodeKey, Self::Value)> { let node_key = self.node_key; let path = match &node_key.path { RepoPath::FilePath(path) => path.clone(), @@ -83,7 +84,7 @@ impl UploadableHgBlob for Filelog { path, }; - let (cbinfo, fut) = upload.upload(repo)?; + let (cbinfo, fut) = upload.upload(ctx, repo)?; Ok(( node_key, (cbinfo, fut.map_err(Error::compat).boxify().shared()), @@ -91,7 +92,11 @@ impl UploadableHgBlob for Filelog { } } -pub fn convert_to_revlog_filelog(repo: Arc, deltaed: S) -> BoxStream +pub fn convert_to_revlog_filelog( + ctx: CoreContext, + repo: Arc, + deltaed: S, +) -> BoxStream where S: Stream + Send + 'static, { @@ -109,15 +114,15 @@ where } = chunk; delta_cache - .decode(node.clone(), base.into_option(), delta) + .decode(ctx.clone(), node.clone(), base.into_option(), delta) .and_then({ - cloned!(node, path, repo); + cloned!(ctx, node, path, repo); move |data| { parse_rev_flags(flags_value) .into_future() .and_then(move |flags| { - get_filelog_data(repo, data, flags).map(move |file_log_data| { - Filelog { + get_filelog_data(ctx.clone(), repo, data, flags).map( + move |file_log_data| Filelog { node_key: HgNodeKey { path: RepoPath::FilePath(path), hash: node, @@ -127,8 +132,8 @@ where linknode, data: file_log_data, flags, - } - }) + }, + ) }) } }) @@ -145,6 +150,7 @@ where } fn generate_lfs_meta_data( + ctx: CoreContext, repo: Arc, data: Bytes, ) -> impl Future { @@ -154,7 +160,7 @@ fn generate_lfs_meta_data( .into_future() .and_then(move |lfs_content| { ( - repo.get_file_content_id_by_alias(lfs_content.oid()), + repo.get_file_content_id_by_alias(ctx, lfs_content.oid()), Ok(lfs_content.copy_from()), ) }) @@ -165,12 +171,13 @@ fn generate_lfs_meta_data( } fn get_filelog_data( + ctx: CoreContext, repo: Arc, data: Bytes, flags: RevFlags, ) -> impl Future { if flags.contains(RevFlags::REVIDX_EXTSTORED) { - generate_lfs_meta_data(repo, data) + generate_lfs_meta_data(ctx, repo, data) .map(|cbmeta| FilelogData::LfsMetaData(cbmeta)) .left_future() } else { @@ -193,6 +200,7 @@ impl DeltaCache { fn decode( &mut self, + ctx: CoreContext, node: HgNodeHash, base: Option, delta: Delta, @@ -225,7 +233,7 @@ impl DeltaCache { }) .boxify(), None => self.repo - .get_raw_hg_content(&base) + .get_raw_hg_content(ctx, &base) .and_then(move |blob| { let bytes = blob.into_inner(); delta::apply(bytes.as_ref(), &delta) @@ -365,12 +373,13 @@ mod tests { } } - fn check_conversion(inp: I, exp: J) + fn check_conversion(ctx: CoreContext, inp: I, exp: J) where I: IntoIterator, J: IntoIterator, { let result = convert_to_revlog_filelog( + ctx, Arc::new(BlobRepo::new_memblob_empty(None, None).unwrap()), iter_ok(inp.into_iter().collect::>()), ).collect() @@ -456,6 +465,7 @@ mod tests { #[test] fn two_fulltext_files() { + let ctx = CoreContext::test_mock(); let f1 = Filelog { node_key: HgNodeKey { path: RepoPath::FilePath(MPath::new(b"test").unwrap()), @@ -481,12 +491,13 @@ mod tests { }; check_conversion( + ctx, vec![filelog_to_deltaed(&f1), filelog_to_deltaed(&f2)], vec![f1, f2], ); } - fn files_check_order(correct_order: bool) { + fn files_check_order(ctx: CoreContext, correct_order: bool) { let f1 = Filelog { node_key: HgNodeKey { path: RepoPath::FilePath(MPath::new(b"test").unwrap()), @@ -524,6 +535,7 @@ mod tests { }; let result = convert_to_revlog_filelog( + ctx, Arc::new(BlobRepo::new_memblob_empty(None, None).unwrap()), iter_ok(inp), ).collect() @@ -543,12 +555,12 @@ mod tests { #[test] fn files_order_correct() { - files_check_order(true); + files_check_order(CoreContext::test_mock(), true); } #[test] fn files_order_incorrect() { - files_check_order(false); + files_check_order(CoreContext::test_mock(), false); } quickcheck! { @@ -558,7 +570,9 @@ mod tests { } fn correct_conversion_single(f: Filelog) -> bool { + let ctx = CoreContext::test_mock(); check_conversion( + ctx, vec![filelog_to_deltaed(&f)], vec![f], ); @@ -567,6 +581,7 @@ mod tests { } fn correct_conversion_delta_against_first(f: Filelog, fs: Vec) -> bool { + let ctx = CoreContext::test_mock(); let mut hash_gen = NodeHashGen::new(); let mut f = f.clone(); @@ -586,12 +601,13 @@ mod tests { deltas.push(delta); } - check_conversion(deltas, vec![f].into_iter().chain(fs)); + check_conversion(ctx, deltas, vec![f].into_iter().chain(fs)); true } fn correct_conversion_delta_against_next(fs: Vec) -> bool { + let ctx = CoreContext::test_mock(); let mut hash_gen = NodeHashGen::new(); let mut fs = fs.clone(); @@ -617,7 +633,7 @@ mod tests { deltas }; - check_conversion(deltas, fs); + check_conversion(ctx, deltas, fs); true } diff --git a/bundle2-resolver/src/getbundle_response.rs b/bundle2-resolver/src/getbundle_response.rs index 71649126ac..46da3eb3f0 100644 --- a/bundle2-resolver/src/getbundle_response.rs +++ b/bundle2-resolver/src/getbundle_response.rs @@ -89,10 +89,10 @@ pub fn create_getbundle_response( .get_hg_from_bonsai_changeset(ctx.clone(), bonsai) .map(|cs| cs.into_nodehash()) .and_then({ - cloned!(blobrepo); + cloned!(ctx, blobrepo); move |node| { blobrepo - .get_changeset_by_changesetid(&HgChangesetId::new(node)) + .get_changeset_by_changesetid(ctx, &HgChangesetId::new(node)) .map(move |cs| (node, cs)) } }) diff --git a/bundle2-resolver/src/pushrebase.rs b/bundle2-resolver/src/pushrebase.rs index e57da17920..7fb1c5719a 100644 --- a/bundle2-resolver/src/pushrebase.rs +++ b/bundle2-resolver/src/pushrebase.rs @@ -231,8 +231,8 @@ fn fetch_bonsai_changesets( move |bcs_cs| bcs_cs.ok_or(ErrorKind::BonsaiNotFoundForHgChangeset(hg_cs).into()) }) .and_then({ - cloned!(repo); - move |bcs_id| repo.get_bonsai_changeset(bcs_id).from_err() + cloned!(ctx, repo); + move |bcs_id| repo.get_bonsai_changeset(ctx, bcs_id).from_err() }) .with_context(move |_| format!("While intitial bonsai changesets fetching")) .map_err(Error::from) @@ -285,7 +285,7 @@ fn find_closest_root( roots: Vec, ) -> impl Future { let roots: HashSet<_> = roots.into_iter().collect(); - get_bookmark_value(ctx, repo, &bookmark) + get_bookmark_value(ctx.clone(), repo, &bookmark) .from_err() .and_then({ cloned!(repo); @@ -306,7 +306,7 @@ fn find_closest_root( if roots.contains(&id) { ok(Loop::Break(id)).left_future() } else { - repo.get_bonsai_changeset(id) + repo.get_bonsai_changeset(ctx.clone(), id) .map(move |bcs| { queue.extend(bcs.parents()); Loop::Continue((queue, depth + 1)) @@ -329,12 +329,12 @@ fn find_changed_files_between_manfiests( descendant: ChangesetId, ) -> impl Future, Error = PushrebaseError> { let id_to_manifest = { - cloned!(repo); + cloned!(ctx, repo); move |bcs_id| { repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id) .and_then({ - cloned!(repo); - move |cs_id| repo.get_changeset_by_changesetid(&cs_id) + cloned!(ctx, repo); + move |cs_id| repo.get_changeset_by_changesetid(ctx, &cs_id) }) .map({ cloned!(repo); @@ -345,14 +345,17 @@ fn find_changed_files_between_manfiests( (id_to_manifest(descendant), id_to_manifest(ancestor)) .into_future() - .and_then(|(d_mf, a_mf)| { - bonsai_diff(d_mf, Some(a_mf), None) - .map(|diff| match diff { - BonsaiDiffResult::Changed(path, ..) - | BonsaiDiffResult::ChangedReusedId(path, ..) - | BonsaiDiffResult::Deleted(path) => path, - }) - .collect() + .and_then({ + cloned!(ctx); + move |(d_mf, a_mf)| { + bonsai_diff(ctx, d_mf, Some(a_mf), None) + .map(|diff| match diff { + BonsaiDiffResult::Changed(path, ..) + | BonsaiDiffResult::ChangedReusedId(path, ..) + | BonsaiDiffResult::Deleted(path) => path, + }) + .collect() + } }) .from_err() } @@ -365,8 +368,8 @@ fn fetch_bonsai_range( descendant: ChangesetId, ) -> impl Future, Error = PushrebaseError> { cloned!(repo); - RangeNodeStream::new(ctx, &repo, ancestor, descendant) - .map(move |id| repo.get_bonsai_changeset(id)) + RangeNodeStream::new(ctx.clone(), &repo, ancestor, descendant) + .map(move |id| repo.get_bonsai_changeset(ctx.clone(), id)) .buffered(100) .collect() .from_err() @@ -382,9 +385,9 @@ fn find_changed_files( cloned!(repo); RangeNodeStream::new(ctx.clone(), &repo, ancestor, descendant) .map({ - cloned!(repo); + cloned!(ctx, repo); move |bcs_id| { - repo.get_bonsai_changeset(bcs_id) + repo.get_bonsai_changeset(ctx.clone(), bcs_id) .map(move |bcs| (bcs_id, bcs)) } }) @@ -602,10 +605,10 @@ fn find_rebased_set( root: ChangesetId, head: ChangesetId, ) -> impl Future, Error = PushrebaseError> { - RangeNodeStream::new(ctx, &repo, root, head) + RangeNodeStream::new(ctx.clone(), &repo, root, head) .map({ cloned!(repo); - move |bcs_id| repo.get_bonsai_changeset(bcs_id) + move |bcs_id| repo.get_bonsai_changeset(ctx.clone(), bcs_id) }) .buffered(100) .collect() @@ -679,7 +682,11 @@ mod tests { ctx.clone(), repo.clone(), parents, - store_files(btreemap!{"file" => Some("content")}, repo.clone()), + store_files( + ctx.clone(), + btreemap!{"file" => Some("content")}, + repo.clone(), + ), ); let hg_cs = repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id) .wait() @@ -714,13 +721,21 @@ mod tests { ctx.clone(), repo.clone(), vec![p], - store_files(btreemap!{"file" => Some("content")}, repo.clone()), + store_files( + ctx.clone(), + btreemap!{"file" => Some("content")}, + repo.clone(), + ), ); let bcs_id_2 = create_commit( ctx.clone(), repo.clone(), vec![bcs_id_1], - store_files(btreemap!{"file2" => Some("content")}, repo.clone()), + store_files( + ctx.clone(), + btreemap!{"file2" => Some("content")}, + repo.clone(), + ), ); assert_eq!( @@ -770,10 +785,15 @@ mod tests { ctx.clone(), repo.clone(), vec![p], - store_files(btreemap!{"file" => Some("content")}, repo.clone()), + store_files( + ctx.clone(), + btreemap!{"file" => Some("content")}, + repo.clone(), + ), ); let rename = store_rename( + ctx.clone(), (MPath::new("file").unwrap(), bcs_id_1), "file_renamed", "content", @@ -855,19 +875,23 @@ mod tests { ctx.clone(), repo.clone(), vec![root0], - store_files(btreemap!{"f0" => Some("f0"), "files" => None}, repo.clone()), + store_files( + ctx.clone(), + btreemap!{"f0" => Some("f0"), "files" => None}, + repo.clone(), + ), ); let bcs_id_2 = create_commit( ctx.clone(), repo.clone(), vec![bcs_id_1, root1], - store_files(btreemap!{"f1" => Some("f1")}, repo.clone()), + store_files(ctx.clone(), btreemap!{"f1" => Some("f1")}, repo.clone()), ); let bcs_id_3 = create_commit( ctx.clone(), repo.clone(), vec![bcs_id_2], - store_files(btreemap!{"f2" => Some("f2")}, repo.clone()), + store_files(ctx.clone(), btreemap!{"f2" => Some("f2")}, repo.clone()), ); let book = Bookmark::new("master").unwrap(); @@ -961,19 +985,23 @@ mod tests { ctx.clone(), repo.clone(), vec![root], - store_files(btreemap!{"f0" => Some("f0")}, repo.clone()), + store_files(ctx.clone(), btreemap!{"f0" => Some("f0")}, repo.clone()), ); let bcs_id_2 = create_commit( ctx.clone(), repo.clone(), vec![bcs_id_1], - store_files(btreemap!{"9/file" => Some("file")}, repo.clone()), + store_files( + ctx.clone(), + btreemap!{"9/file" => Some("file")}, + repo.clone(), + ), ); let bcs_id_3 = create_commit( ctx.clone(), repo.clone(), vec![bcs_id_2], - store_files(btreemap!{"f1" => Some("f1")}, repo.clone()), + store_files(ctx.clone(), btreemap!{"f1" => Some("f1")}, repo.clone()), ); let book = Bookmark::new("master").unwrap(); @@ -1033,19 +1061,19 @@ mod tests { ctx.clone(), repo.clone(), vec![root], - store_files(btreemap!{"FILE" => Some("file")}, repo.clone()), + store_files(ctx.clone(), btreemap!{"FILE" => Some("file")}, repo.clone()), ); let bcs_id_2 = create_commit( ctx.clone(), repo.clone(), vec![bcs_id_1], - store_files(btreemap!{"FILE" => None}, repo.clone()), + store_files(ctx.clone(), btreemap!{"FILE" => None}, repo.clone()), ); let bcs_id_3 = create_commit( ctx.clone(), repo.clone(), vec![bcs_id_2], - store_files(btreemap!{"file" => Some("file")}, repo.clone()), + store_files(ctx.clone(), btreemap!{"file" => Some("file")}, repo.clone()), ); let hgcss = vec![ repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id_1) @@ -1090,6 +1118,7 @@ mod tests { repo.clone(), vec![root], store_files( + ctx.clone(), btreemap!{"DIR/a" => Some("a"), "DIR/b" => Some("b")}, repo.clone(), ), @@ -1099,6 +1128,7 @@ mod tests { repo.clone(), vec![bcs_id_1], store_files( + ctx.clone(), btreemap!{"dir/a" => Some("a"), "DIR/a" => None, "DIR/b" => None}, repo.clone(), ), @@ -1148,6 +1178,7 @@ mod tests { repo.clone(), vec![head], store_files( + ctx.clone(), btreemap!{file.as_ref() => Some(content.as_ref())}, repo.clone(), ), @@ -1183,7 +1214,7 @@ mod tests { ctx.clone(), repo.clone(), vec![root], - store_files(btreemap!{"file" => Some("data")}, repo.clone()), + store_files(ctx.clone(), btreemap!{"file" => Some("data")}, repo.clone()), ); let hgcss = vec![ repo_arc @@ -1235,7 +1266,7 @@ mod tests { ctx.clone(), repo.clone(), vec![root], - store_files(btreemap!{"file" => Some("data")}, repo.clone()), + store_files(ctx.clone(), btreemap!{"file" => Some("data")}, repo.clone()), ); let hgcss = vec![ repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs) @@ -1272,15 +1303,16 @@ mod tests { rewritedates: true, ..Default::default() }; - let bcs_rewrite_date = do_pushrebase(ctx, Arc::new(repo.clone()), config, book, hgcss) - .wait() - .expect("push-rebase failed"); + let bcs_rewrite_date = + do_pushrebase(ctx.clone(), Arc::new(repo.clone()), config, book, hgcss) + .wait() + .expect("push-rebase failed"); - let bcs = repo.get_bonsai_changeset(bcs).wait().unwrap(); - let bcs_keep_date = repo.get_bonsai_changeset(bcs_keep_date.head) + let bcs = repo.get_bonsai_changeset(ctx.clone(), bcs).wait().unwrap(); + let bcs_keep_date = repo.get_bonsai_changeset(ctx.clone(), bcs_keep_date.head) .wait() .unwrap(); - let bcs_rewrite_date = repo.get_bonsai_changeset(bcs_rewrite_date.head) + let bcs_rewrite_date = repo.get_bonsai_changeset(ctx.clone(), bcs_rewrite_date.head) .wait() .unwrap(); @@ -1306,6 +1338,7 @@ mod tests { repo.clone(), vec![root], store_files( + ctx.clone(), btreemap!{"Dir1/file_1_in_dir1" => Some("data")}, repo.clone(), ), @@ -1372,8 +1405,10 @@ mod tests { let root_hg = &HgChangesetId::from_str("2d7d4ba9ce0a6ffd222de7785b249ead9c51c536").unwrap(); - let root_cs = repo.get_changeset_by_changesetid(&root_hg).wait().unwrap(); - let root_1_id = repo.find_file_in_manifest(&path_1, *root_cs.manifestid()) + let root_cs = repo.get_changeset_by_changesetid(ctx.clone(), &root_hg) + .wait() + .unwrap(); + let root_1_id = repo.find_file_in_manifest(ctx.clone(), &path_1, *root_cs.manifestid()) .wait() .unwrap() .unwrap(); @@ -1383,7 +1418,7 @@ mod tests { .wait() .unwrap() .unwrap(); - let root_bcs = repo.get_bonsai_changeset(root).wait().unwrap(); + let root_bcs = repo.get_bonsai_changeset(ctx.clone(), root).wait().unwrap(); let file_1 = root_bcs .file_changes() .find(|(path, _)| path == &&path_1) @@ -1427,7 +1462,9 @@ mod tests { hgcss, ).wait() .expect("pushrebase failed"); - let result_bcs = repo.get_bonsai_changeset(result.head).wait().unwrap(); + let result_bcs = repo.get_bonsai_changeset(ctx.clone(), result.head) + .wait() + .unwrap(); let file_1_result = result_bcs .file_changes() .find(|(path, _)| path == &&path_1) @@ -1436,16 +1473,17 @@ mod tests { .unwrap(); assert_eq!(file_1_result, &file_1_exec); - let result_hg = repo.get_hg_from_bonsai_changeset(ctx, result.head) + let result_hg = repo.get_hg_from_bonsai_changeset(ctx.clone(), result.head) .wait() .unwrap(); - let result_cs = repo.get_changeset_by_changesetid(&result_hg) + let result_cs = repo.get_changeset_by_changesetid(ctx.clone(), &result_hg) .wait() .unwrap(); - let result_1_id = repo.find_file_in_manifest(&path_1, *result_cs.manifestid()) - .wait() - .unwrap() - .unwrap(); + let result_1_id = + repo.find_file_in_manifest(ctx.clone(), &path_1, *result_cs.manifestid()) + .wait() + .unwrap() + .unwrap(); // `result_1_id` should be equal to `root_1_id`, because executable flag // is not a part of file envelope @@ -1482,7 +1520,11 @@ mod tests { ctx.clone(), repo.clone(), parents.clone(), - store_files(btreemap!{ f.as_ref() => Some("content")}, repo.clone()), + store_files( + ctx.clone(), + btreemap!{ f.as_ref() => Some("content")}, + repo.clone(), + ), ); let hg_cs = repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id) .wait() diff --git a/bundle2-resolver/src/resolver.rs b/bundle2-resolver/src/resolver.rs index 2641ec545a..911e3b5b92 100644 --- a/bundle2-resolver/src/resolver.rs +++ b/bundle2-resolver/src/resolver.rs @@ -81,7 +81,7 @@ fn resolve_push( bundle2: BoxStream, ) -> BoxFuture { resolver - .maybe_resolve_changegroup(bundle2) + .maybe_resolve_changegroup(ctx.clone(), bundle2) .and_then({ cloned!(resolver); move |(cg_push, bundle2)| { @@ -103,11 +103,11 @@ fn resolve_push( } }) .and_then({ - cloned!(resolver); + cloned!(ctx, resolver); move |(cg_push, bookmark_push, bundle2)| { if let Some(cg_push) = cg_push { resolver - .resolve_b2xtreegroup2(bundle2) + .resolve_b2xtreegroup2(ctx, bundle2) .map(|(manifests, bundle2)| { (Some((cg_push, manifests)), bookmark_push, bundle2) }) @@ -201,18 +201,18 @@ fn resolve_pushrebase( resolver .maybe_resolve_pushvars(bundle2) .and_then({ - cloned!(resolver); + cloned!(ctx, resolver); move |(maybe_pushvars, bundle2)| { resolver - .resolve_b2xtreegroup2(bundle2) + .resolve_b2xtreegroup2(ctx, bundle2) .map(move |(manifests, bundle2)| (manifests, maybe_pushvars, bundle2)) } }) .and_then({ - cloned!(resolver); + cloned!(ctx, resolver); move |(manifests, maybe_pushvars, bundle2)| { resolver - .maybe_resolve_changegroup(bundle2) + .maybe_resolve_changegroup(ctx, bundle2) .map(move |(cg_push, bundle2)| (cg_push, manifests, maybe_pushvars, bundle2)) } }) @@ -270,7 +270,7 @@ fn resolve_pushrebase( cloned!(ctx, resolver); move |(changesets, bookmark_pushes, maybe_pushvars, onto)| { resolver - .run_hooks(changesets.clone(), maybe_pushvars, &onto) + .run_hooks(ctx.clone(), changesets.clone(), maybe_pushvars, &onto) .map_err(|err| match err { RunHooksError::Failures((cs_hook_failures, file_hook_failures)) => { let mut err_msgs = vec![]; @@ -483,6 +483,7 @@ impl Bundle2Resolver { /// their upload should be used for uploading changesets fn maybe_resolve_changegroup( &self, + ctx: CoreContext, bundle2: BoxStream, ) -> BoxFuture<(Option, BoxStream), Error> { let repo = self.repo.clone(); @@ -498,10 +499,11 @@ impl Bundle2Resolver { let (c, f) = split_changegroup(parts); convert_to_revlog_changesets(c) .collect() - .and_then(|changesets| { + .and_then(move |changesets| { upload_hg_blobs( + ctx.clone(), repo.clone(), - convert_to_revlog_filelog(repo, f), + convert_to_revlog_filelog(ctx.clone(), repo, f), UploadBlobsType::EnsureNoDuplicates, ).map(move |upload_map| { let mut filelogs = HashMap::new(); @@ -591,6 +593,7 @@ impl Bundle2Resolver { /// their upload as well as their parsed content should be used for uploading changesets. fn resolve_b2xtreegroup2( &self, + ctx: CoreContext, bundle2: BoxStream, ) -> BoxFuture<(Manifests, BoxStream), Error> { let repo = self.repo.clone(); @@ -600,6 +603,7 @@ impl Bundle2Resolver { Some(Bundle2Item::B2xTreegroup2(_, parts)) | Some(Bundle2Item::B2xRebasePack(_, parts)) => { upload_hg_blobs( + ctx, repo, TreemanifestBundle2Parser::new(parts), UploadBlobsType::IgnoreDuplicates, @@ -943,6 +947,7 @@ impl Bundle2Resolver { fn run_hooks( &self, + ctx: CoreContext, changesets: Changesets, pushvars: Option>, onto_bookmark: &Bookmark, @@ -953,11 +958,13 @@ impl Bundle2Resolver { futs.push( self.hook_manager .run_changeset_hooks_for_bookmark( + ctx.clone(), hg_cs_id.clone(), onto_bookmark, pushvars.clone(), ) .join(self.hook_manager.run_file_hooks_for_bookmark( + ctx.clone(), hg_cs_id, onto_bookmark, pushvars.clone(), diff --git a/bundle2-resolver/src/upload_blobs.rs b/bundle2-resolver/src/upload_blobs.rs index b18a4ff2c6..f006181014 100644 --- a/bundle2-resolver/src/upload_blobs.rs +++ b/bundle2-resolver/src/upload_blobs.rs @@ -11,6 +11,7 @@ use futures::Stream; use futures_ext::{BoxFuture, FutureExt}; use blobrepo::BlobRepo; +use context::CoreContext; use mercurial_types::HgNodeKey; use errors::*; @@ -19,14 +20,14 @@ use errors::*; pub trait UploadableHgBlob { type Value: Send + 'static; - fn upload(self, repo: &BlobRepo) -> Result<(HgNodeKey, Self::Value)>; + fn upload(self, ctx: CoreContext, repo: &BlobRepo) -> Result<(HgNodeKey, Self::Value)>; } /// Represents data that is Thrift-encoded and can be uploaded to the blobstore. pub trait UploadableBlob { type Value: Send + 'static; - fn upload(self, repo: &BlobRepo) -> Result<(HgNodeKey, Self::Value)>; + fn upload(self, ctx: CoreContext, repo: &BlobRepo) -> Result<(HgNodeKey, Self::Value)>; } #[derive(PartialEq, Eq)] @@ -37,6 +38,7 @@ pub enum UploadBlobsType { use self::UploadBlobsType::*; pub fn upload_hg_blobs( + ctx: CoreContext, repo: Arc, blobs: S, ubtype: UploadBlobsType, @@ -47,7 +49,7 @@ where { blobs .fold(HashMap::new(), move |mut map, item| { - let (key, value) = item.upload(&repo)?; + let (key, value) = item.upload(ctx.clone(), &repo)?; ensure_msg!( map.insert(key.clone(), value).is_none() || ubtype == IgnoreDuplicates, "HgBlob {:?} already provided before", diff --git a/bundle2-resolver/src/wirepackparser.rs b/bundle2-resolver/src/wirepackparser.rs index ee16256ea9..462b7d765e 100644 --- a/bundle2-resolver/src/wirepackparser.rs +++ b/bundle2-resolver/src/wirepackparser.rs @@ -8,6 +8,7 @@ use std::fmt::Debug; use std::mem; use bytes::Bytes; +use context::CoreContext; use failure::Compat; use futures::{Future, Poll, Stream}; use futures::future::Shared; @@ -91,7 +92,7 @@ impl UploadableHgBlob for TreemanifestEntry { Shared>>, ); - fn upload(self, repo: &BlobRepo) -> Result<(HgNodeKey, Self::Value)> { + fn upload(self, ctx: CoreContext, repo: &BlobRepo) -> Result<(HgNodeKey, Self::Value)> { let node_key = self.node_key; let manifest_content = self.manifest_content; let p1 = self.p1; @@ -110,7 +111,7 @@ impl UploadableHgBlob for TreemanifestEntry { p2: self.p2, path: node_key.path.clone(), }; - upload.upload(repo).map(move |(_node, value)| { + upload.upload(ctx, repo).map(move |(_node, value)| { ( node_key, ( diff --git a/cache-warmup/src/cache-warmup.rs b/cache-warmup/src/cache-warmup.rs index 208eb548f5..385df3f7c8 100644 --- a/cache-warmup/src/cache-warmup.rs +++ b/cache-warmup/src/cache-warmup.rs @@ -55,7 +55,7 @@ fn blobstore_and_filenodes_warmup( ) -> BoxFuture<(), Error> { // TODO(stash): Arbitrary number. Tweak somehow? let buffer_size = 100; - repo.get_changeset_by_changesetid(&revision) + repo.get_changeset_by_changesetid(ctx.clone(), &revision) .map({ let repo = repo.clone(); move |cs| repo.get_root_entry(&cs.manifestid()) @@ -65,7 +65,7 @@ fn blobstore_and_filenodes_warmup( info!(logger, "starting precaching"); let rootpath = None; let mut i = 0; - recursive_entry_stream(rootpath, root_entry) + recursive_entry_stream(ctx.clone(), rootpath, root_entry) .filter(|&(ref _path, ref entry)| entry.get_type() == Type::Tree) .map(move |(path, entry)| { let hash = entry.get_hash(); diff --git a/cmdlib/src/args.rs b/cmdlib/src/args.rs index bde9078dd5..4d59d3faed 100644 --- a/cmdlib/src/args.rs +++ b/cmdlib/src/args.rs @@ -23,6 +23,7 @@ use cachelib; use slog_glog_fmt::default_drain as glog_drain; use changesets::{SqlChangesets, SqlConstructors}; +use context::CoreContext; use hooks::HookManager; use mercurial_types::RepositoryId; use metaconfig::{ManifoldArgs, RepoConfigs, RepoReadOnly, RepoType}; @@ -246,14 +247,22 @@ pub fn open_sql_changesets(matches: &ArgMatches) -> Result { /// Create a new `MononokeRepo` -- for local instances, expect its contents to be empty. #[inline] -pub fn create_repo<'a>(logger: &Logger, matches: &ArgMatches<'a>) -> Result { - open_repo_internal(logger, matches, true) +pub fn create_repo<'a>( + ctx: CoreContext, + logger: &Logger, + matches: &ArgMatches<'a>, +) -> Result { + open_repo_internal(ctx, logger, matches, true) } /// Open an existing `BlobRepo` -- for local instances, expect contents to already be there. #[inline] -pub fn open_repo<'a>(logger: &Logger, matches: &ArgMatches<'a>) -> Result { - open_repo_internal(logger, matches, false) +pub fn open_repo<'a>( + ctx: CoreContext, + logger: &Logger, + matches: &ArgMatches<'a>, +) -> Result { + open_repo_internal(ctx, logger, matches, false) } pub fn setup_repo_dir>(data_dir: P, create: bool) -> Result<()> { @@ -451,6 +460,7 @@ fn find_repo_type<'a>(matches: &ArgMatches<'a>) -> Result<(String, RepoType)> { } fn open_repo_internal<'a>( + ctx: CoreContext, logger: &Logger, matches: &ArgMatches<'a>, create: bool, @@ -481,7 +491,7 @@ fn open_repo_internal<'a>( let blobrepo = open_blobrepo(logger.clone(), repotype.clone(), repo_id, myrouter_port)?; let hook_manager = - HookManager::new_with_blobrepo(Default::default(), blobrepo.clone(), logger.clone()); + HookManager::new_with_blobrepo(ctx, Default::default(), blobrepo.clone(), logger); // TODO fixup imports Ok(MononokeRepo::new( blobrepo, diff --git a/cmdlib/src/blobimport_lib/changeset.rs b/cmdlib/src/blobimport_lib/changeset.rs index 61db53d168..ae06f66168 100644 --- a/cmdlib/src/blobimport_lib/changeset.rs +++ b/cmdlib/src/blobimport_lib/changeset.rs @@ -147,6 +147,7 @@ fn parse_changeset(revlog_repo: RevlogRepo, csid: HgChangesetId) -> ParseChanges } fn upload_entry( + ctx: CoreContext, blobrepo: &BlobRepo, entry: RevlogEntry, path: Option, @@ -183,7 +184,7 @@ fn upload_entry( p2: p2.cloned(), path: RepoPath::DirectoryPath(path), }; - let (_, upload_fut) = try_boxfuture!(upload.upload(&blobrepo)); + let (_, upload_fut) = try_boxfuture!(upload.upload(ctx, &blobrepo)); upload_fut } Type::File(ft) => { @@ -195,7 +196,7 @@ fn upload_entry( p2: p2.cloned(), path, }; - let (_, upload_fut) = try_boxfuture!(upload.upload(&blobrepo)); + let (_, upload_fut) = try_boxfuture!(upload.upload(ctx, &blobrepo)); upload_fut } } @@ -243,8 +244,7 @@ impl UploadChangesets { changesets .and_then({ - let revlogrepo = revlogrepo.clone(); - let blobrepo = blobrepo.clone(); + cloned!(ctx, revlogrepo, blobrepo); move |csid| { let ParseChangeset { revlogcs, @@ -253,7 +253,7 @@ impl UploadChangesets { } = parse_changeset(revlogrepo.clone(), HgChangesetId::new(csid)); let rootmf = rootmf.map({ - let blobrepo = blobrepo.clone(); + cloned!(ctx, blobrepo); move |rootmf| { match rootmf { None => future::ok(None).boxify(), @@ -271,7 +271,7 @@ impl UploadChangesets { path: RepoPath::root(), }; upload - .upload(&blobrepo) + .upload(ctx, &blobrepo) .into_future() .and_then(|(_, entry)| entry) .map(Some) @@ -282,8 +282,8 @@ impl UploadChangesets { }); let entries = entries.map({ - let blobrepo = blobrepo.clone(); - move |(path, entry)| upload_entry(&blobrepo, entry, path) + cloned!(ctx, blobrepo); + move |(path, entry)| upload_entry(ctx.clone(), &blobrepo, entry, path) }); revlogcs diff --git a/cmds/admin/main.rs b/cmds/admin/main.rs index e2fa149881..9734acdd54 100644 --- a/cmds/admin/main.rs +++ b/cmds/admin/main.rs @@ -186,6 +186,7 @@ fn setup_app<'a, 'b>() -> App<'a, 'b> { } fn fetch_content_from_manifest( + ctx: CoreContext, logger: Logger, mf: Box, element: MPathElement, @@ -198,7 +199,7 @@ fn fetch_content_from_manifest( element, entry.get_hash() ); - entry.get_content() + entry.get_content(ctx) } None => try_boxfuture!(Err(format_err!("failed to lookup element {:?}", element))), } @@ -228,38 +229,38 @@ fn fetch_content( path: &str, ) -> BoxFuture { let path = try_boxfuture!(MPath::new(path)); - let resolved_cs_id = resolve_hg_rev(ctx, repo, rev); + let resolved_cs_id = resolve_hg_rev(ctx.clone(), repo, rev); let mf = resolved_cs_id .and_then({ - cloned!(repo); - move |cs_id| repo.get_changeset_by_changesetid(&cs_id) + cloned!(ctx, repo); + move |cs_id| repo.get_changeset_by_changesetid(ctx, &cs_id) }) .map(|cs| cs.manifestid().clone()) .and_then({ - cloned!(repo); - move |root_mf_id| repo.get_manifest_by_nodeid(&root_mf_id) + cloned!(ctx, repo); + move |root_mf_id| repo.get_manifest_by_nodeid(ctx, &root_mf_id) }); let all_but_last = iter_ok::<_, Error>(path.clone().into_iter().rev().skip(1).rev()); let folded: BoxFuture<_, Error> = mf.and_then({ - cloned!(logger); + cloned!(ctx, logger); move |mf| { all_but_last.fold(mf, move |mf, element| { - fetch_content_from_manifest(logger.clone(), mf, element).and_then(|content| { - match content { + fetch_content_from_manifest(ctx.clone(), logger.clone(), mf, element).and_then( + |content| match content { Content::Tree(mf) => Ok(mf), content => Err(format_err!("expected tree entry, found {:?}", content)), - } - }) + }, + ) }) } }).boxify(); let basename = path.basename().clone(); folded - .and_then(move |mf| fetch_content_from_manifest(logger.clone(), mf, basename)) + .and_then(move |mf| fetch_content_from_manifest(ctx, logger.clone(), mf, basename)) .boxify() } @@ -272,7 +273,7 @@ pub fn fetch_bonsai_changeset( hg_changeset_id .and_then({ - let repo = repo.clone(); + cloned!(ctx, repo); move |hg_cs| repo.get_bonsai_from_hg(ctx, &hg_cs) }) .and_then({ @@ -280,12 +281,13 @@ pub fn fetch_bonsai_changeset( move |maybe_bonsai| maybe_bonsai.ok_or(err_msg(format!("bonsai not found for {}", rev))) }) .and_then({ - cloned!(repo); - move |bonsai| repo.get_bonsai_changeset(bonsai) + cloned!(ctx, repo); + move |bonsai| repo.get_bonsai_changeset(ctx, bonsai) }) } fn get_cache( + ctx: CoreContext, blobstore: &B, key: String, mode: String, @@ -293,9 +295,9 @@ fn get_cache( if mode == "cache-only" { blobstore.get_cache_only(key) } else if mode == "no-fill" { - blobstore.get_no_cache_fill(key) + blobstore.get_no_cache_fill(ctx, key) } else { - blobstore.get(key) + blobstore.get(ctx, key) } } @@ -331,11 +333,13 @@ fn slice_to_str(slice: &[u8]) -> String { } fn hg_manifest_diff( + ctx: CoreContext, repo: BlobRepo, left: &HgManifestId, right: &HgManifestId, ) -> impl Future, Error = Error> { bonsai_diff( + ctx, repo.get_root_entry(left), Some(repo.get_root_entry(right)), None, @@ -366,13 +370,14 @@ fn hg_manifest_diff( } fn hg_changeset_diff( + ctx: CoreContext, repo: BlobRepo, left_id: &HgChangesetId, right_id: &HgChangesetId, ) -> impl Future { ( - repo.get_changeset_by_changesetid(left_id), - repo.get_changeset_by_changesetid(right_id), + repo.get_changeset_by_changesetid(ctx.clone(), left_id), + repo.get_changeset_by_changesetid(ctx.clone(), right_id), ).into_future() .and_then({ cloned!(repo, left_id, right_id); @@ -418,10 +423,12 @@ fn hg_changeset_diff( )) } - hg_manifest_diff(repo, left.manifestid(), right.manifestid()).map(move |mdiff| { - diff.diff.extend(mdiff); - diff - }) + hg_manifest_diff(ctx, repo, left.manifestid(), right.manifestid()).map( + move |mdiff| { + diff.diff.extend(mdiff); + diff + }, + ) } }) } @@ -441,24 +448,27 @@ fn build_skiplist_index( repo.get_bonsai_heads_maybe_stale(ctx.clone()) .collect() - .and_then(move |heads| { - loop_fn( - (heads.into_iter(), skiplist_index), - move |(mut heads, skiplist_index)| match heads.next() { - Some(head) => { - let f = skiplist_index.add_node( - ctx.clone(), - cs_fetcher.clone(), - head, - max_index_depth, - ); + .and_then({ + cloned!(ctx); + move |heads| { + loop_fn( + (heads.into_iter(), skiplist_index), + move |(mut heads, skiplist_index)| match heads.next() { + Some(head) => { + let f = skiplist_index.add_node( + ctx.clone(), + cs_fetcher.clone(), + head, + max_index_depth, + ); - f.map(move |()| Loop::Continue((heads, skiplist_index))) - .boxify() - } - None => ok(Loop::Break(skiplist_index)).boxify(), - }, - ) + f.map(move |()| Loop::Continue((heads, skiplist_index))) + .boxify() + } + None => ok(Loop::Break(skiplist_index)).boxify(), + }, + ) + } }) .inspect(|skiplist_index| { println!( @@ -484,20 +494,24 @@ fn build_skiplist_index( compact_protocol::serialize(&thrift_merge_graph) }) - .and_then(move |bytes| { - debug!(logger, "storing {} bytes", bytes.len()); - blobstore.put(key, BlobstoreBytes::from_bytes(bytes)) + .and_then({ + cloned!(ctx); + move |bytes| { + debug!(logger, "storing {} bytes", bytes.len()); + blobstore.put(ctx, key, BlobstoreBytes::from_bytes(bytes)) + } }) .boxify() } fn read_skiplist_index( + ctx: CoreContext, repo: BlobRepo, key: S, logger: Logger, ) -> BoxFuture<(), Error> { repo.get_blobstore() - .get(key.to_string()) + .get(ctx, key.to_string()) .and_then(move |maybebytes| { match maybebytes { Some(bytes) => { @@ -525,6 +539,7 @@ fn main() -> Result<()> { let future = match matches.subcommand() { (BLOBSTORE_FETCH, Some(sub_m)) => { + let ctx = CoreContext::test_mock(); let key = sub_m.value_of("KEY").unwrap().to_string(); let decode_as = sub_m.value_of("decode-as").map(|val| val.to_string()); let use_memcache = sub_m.value_of("use-memcache").map(|val| val.to_string()); @@ -536,9 +551,9 @@ fn main() -> Result<()> { match (use_memcache, no_prefix) { (None, false) => { let blobstore = PrefixBlobstore::new(blobstore, repo_id.prefix()); - blobstore.get(key.clone()).boxify() + blobstore.get(ctx, key.clone()).boxify() } - (None, true) => blobstore.get(key.clone()).boxify(), + (None, true) => blobstore.get(ctx, key.clone()).boxify(), (Some(mode), false) => { let blobstore = new_memcache_blobstore( blobstore, @@ -546,7 +561,7 @@ fn main() -> Result<()> { manifold_args.bucket.as_ref(), ).unwrap(); let blobstore = PrefixBlobstore::new(blobstore, repo_id.prefix()); - get_cache(&blobstore, key.clone(), mode) + get_cache(ctx.clone(), &blobstore, key.clone(), mode) } (Some(mode), true) => { let blobstore = new_memcache_blobstore( @@ -554,7 +569,7 @@ fn main() -> Result<()> { "manifold", manifold_args.bucket.as_ref(), ).unwrap(); - get_cache(&blobstore, key.clone(), mode) + get_cache(ctx.clone(), &blobstore, key.clone(), mode) } }.map(move |value| { println!("{:?}", value); @@ -588,7 +603,7 @@ fn main() -> Result<()> { // TODO(T37478150, luk) This is not a test case, fix it up in future diffs let ctx = CoreContext::test_mock(); - let repo = args::open_repo(&logger, &matches)?; + let repo = args::open_repo(ctx.clone(), &logger, &matches)?; fetch_bonsai_changeset(ctx, rev, repo.blobrepo()) .map(|bcs| { println!("{:?}", bcs); @@ -604,7 +619,7 @@ fn main() -> Result<()> { // TODO(T37478150, luk) This is not a test case, fix it up in future diffs let ctx = CoreContext::test_mock(); - let repo = args::open_repo(&logger, &matches)?; + let repo = args::open_repo(ctx.clone(), &logger, &matches)?; fetch_content(ctx, logger.clone(), repo.blobrepo(), rev, path) .and_then(|content| { match content { @@ -652,16 +667,19 @@ fn main() -> Result<()> { config_repo::handle_command(sub_m) } (BOOKMARKS, Some(sub_m)) => { - args::init_cachelib(&matches); - let repo = args::open_repo(&logger, &matches)?; - // TODO(T37478150, luk) This is not a test case, fix it up in future diffs let ctx = CoreContext::test_mock(); + args::init_cachelib(&matches); + let repo = args::open_repo(ctx.clone(), &logger, &matches)?; + bookmarks_manager::handle_command(ctx, &repo.blobrepo(), sub_m, logger) } (HG_CHANGESET, Some(sub_m)) => match sub_m.subcommand() { (HG_CHANGESET_DIFF, Some(sub_m)) => { + // TODO(T37478150, luk) This is not a test case, fix it up in future diffs + let ctx = CoreContext::test_mock(); + let left_cs = sub_m .value_of("LEFT_CS") .ok_or(format_err!("LEFT_CS argument expected")) @@ -672,12 +690,14 @@ fn main() -> Result<()> { .and_then(HgChangesetId::from_str); args::init_cachelib(&matches); - let repo = args::open_repo(&logger, &matches)?.blobrepo().clone(); + let repo = args::open_repo(ctx.clone(), &logger, &matches)? + .blobrepo() + .clone(); (left_cs, right_cs) .into_future() .and_then(move |(left_cs, right_cs)| { - hg_changeset_diff(repo, &left_cs, &right_cs) + hg_changeset_diff(ctx, repo, &left_cs, &right_cs) }) .and_then(|diff| { serde_json::to_writer(io::stdout(), &diff) @@ -696,8 +716,13 @@ fn main() -> Result<()> { .ok_or(format_err!("STOP_CS argument expected")) .and_then(HgChangesetId::from_str); + // TODO(T37478150, luk) This is not a test case, fix it up in future diffs + let ctx = CoreContext::test_mock(); + args::init_cachelib(&matches); - let repo = args::open_repo(&logger, &matches)?.blobrepo().clone(); + let repo = args::open_repo(ctx.clone(), &logger, &matches)? + .blobrepo() + .clone(); let ctx = CoreContext::test_mock(); (start_cs, stop_cs) @@ -746,16 +771,22 @@ fn main() -> Result<()> { (SKIPLIST, Some(sub_m)) => match sub_m.subcommand() { (SKIPLIST_BUILD, Some(sub_m)) => { args::init_cachelib(&matches); - let repo = args::open_repo(&logger, &matches)?.blobrepo().clone(); let ctx = CoreContext::test_mock(); + let repo = args::open_repo(ctx.clone(), &logger, &matches)? + .blobrepo() + .clone(); build_skiplist_index(ctx, repo, sub_m.value_of("BLOBSTORE_KEY").unwrap(), logger) } (SKIPLIST_READ, Some(sub_m)) => { args::init_cachelib(&matches); - let repo = args::open_repo(&logger, &matches)?.blobrepo().clone(); + let ctx = CoreContext::test_mock(); + let repo = args::open_repo(ctx.clone(), &logger, &matches)? + .blobrepo() + .clone(); read_skiplist_index( + ctx.clone(), repo, sub_m .value_of("BLOBSTORE_KEY") diff --git a/cmds/aliasverify.rs b/cmds/aliasverify.rs index 20ce693059..c749d4611d 100644 --- a/cmds/aliasverify.rs +++ b/cmds/aliasverify.rs @@ -12,6 +12,7 @@ extern crate clap; #[macro_use] extern crate cloned; extern crate cmdlib; +extern crate context; extern crate failure_ext as failure; extern crate futures; extern crate futures_ext; @@ -35,6 +36,7 @@ use blobrepo::BlobRepo; use blobrepo::alias::get_sha256; use changesets::SqlChangesets; use cmdlib::args; +use context::CoreContext; use mercurial_types::RepositoryId; use mononoke_types::{ChangesetId, ContentId, FileChange, hash::Sha256}; @@ -79,6 +81,7 @@ impl AliasVerification { fn get_file_changes_vector( &self, + ctx: CoreContext, bcs_id: ChangesetId, ) -> BoxFuture>, Error> { let cs_cnt = self.cs_processed.fetch_add(1, Ordering::Relaxed); @@ -88,7 +91,7 @@ impl AliasVerification { } self.blobrepo - .get_bonsai_changeset(bcs_id) + .get_bonsai_changeset(ctx, bcs_id) .map(|bcs| { let file_changes: Vec<_> = bcs.file_changes() .map(|(_, file_change)| file_change.cloned()) @@ -119,6 +122,7 @@ impl AliasVerification { fn process_missing_alias_blob( &self, + ctx: CoreContext, alias: Sha256, content_id: ContentId, ) -> impl Future { @@ -133,19 +137,20 @@ impl AliasVerification { match mode { Mode::Verify => Ok(()).into_future().left_future(), Mode::Generate => blobrepo - .upload_alias_to_file_content_id(alias, content_id) + .upload_alias_to_file_content_id(ctx, alias, content_id) .right_future(), } } fn process_alias( &self, + ctx: CoreContext, alias: Sha256, content_id: ContentId, ) -> impl Future { let av = self.clone(); self.blobrepo - .get_file_content_id_by_alias(alias) + .get_file_content_id_by_alias(ctx.clone(), alias) .then(move |result| match result { Ok(content_id_from_blobstore) => { av.check_alias_blob(alias, content_id, content_id_from_blobstore) @@ -153,7 +158,7 @@ impl AliasVerification { } Err(_) => { // the blob with alias is not found - av.process_missing_alias_blob(alias, content_id) + av.process_missing_alias_blob(ctx, alias, content_id) .right_future() } }) @@ -161,14 +166,15 @@ impl AliasVerification { pub fn process_file_content( &self, + ctx: CoreContext, content_id: ContentId, ) -> impl Future { let repo = self.blobrepo.clone(); let av = self.clone(); - repo.get_file_content_by_content_id(content_id) + repo.get_file_content_by_content_id(ctx.clone(), content_id) .map(|content| get_sha256(&content.into_bytes())) - .and_then(move |alias| av.process_alias(alias, content_id)) + .and_then(move |alias| av.process_alias(ctx, alias, content_id)) } fn print_report(&self, partial: bool) { @@ -183,7 +189,12 @@ impl AliasVerification { ); } - fn get_bounded(&self, min_id: u64, max_id: u64) -> impl Future { + fn get_bounded( + &self, + ctx: CoreContext, + min_id: u64, + max_id: u64, + ) -> impl Future { let av = self.clone(); let av_for_process = self.clone(); let av_for_report = self.clone(); @@ -196,7 +207,10 @@ impl AliasVerification { // stream of cs_id .get_list_bs_cs_id_in_range(self.repoid, min_id, max_id) // future of vectors of file changes - .map(move |bcs_id| av.get_file_changes_vector(bcs_id)) + .map({ + cloned!(ctx); + move |bcs_id| av.get_file_changes_vector(ctx.clone(), bcs_id) + }) .buffer_unordered(1000) // Stream of file_changes .map( move |file_changes_vec| { @@ -212,7 +226,7 @@ impl AliasVerification { if let Some(file_change) = file_change { let content_id = file_change.content_id().clone(); av_for_process - .process_file_content(content_id) + .process_file_content(ctx.clone(), content_id) .left_future() } else { Ok(()).into_future().right_future() @@ -225,6 +239,7 @@ impl AliasVerification { pub fn verify_all( &self, + ctx: CoreContext, step: u64, min_cs_db_id: u64, ) -> impl Future { @@ -247,7 +262,7 @@ impl AliasVerification { stream::iter_ok(bounds.into_iter()) }) .flatten_stream() - .and_then(move |(min_val, max_val)| av.get_bounded(min_val, max_val)) + .and_then(move |(min_val, max_val)| av.get_bounded(ctx.clone(), min_val, max_val)) .for_each(|()| Ok(())) .map(move |()| av_for_report.print_report(false)) } @@ -290,10 +305,11 @@ fn setup_app<'a, 'b>() -> App<'a, 'b> { fn main() -> Result<()> { let matches = setup_app().get_matches(); + let ctx = CoreContext::test_mock(); let logger = args::get_logger(&matches); args::init_cachelib(&matches); - let repo = args::open_repo(&logger, &matches)?; + let repo = args::open_repo(ctx.clone(), &logger, &matches)?; let blobrepo = Arc::new(repo.blobrepo().clone()); let sqlchangesets = Arc::new(args::open_sql_changesets(&matches)?); @@ -316,7 +332,7 @@ fn main() -> Result<()> { let repoid = args::get_repo_id(&matches); let aliasimport = AliasVerification::new(logger, blobrepo, repoid, sqlchangesets, mode) - .verify_all(step, min_cs_db_id); + .verify_all(ctx, step, min_cs_db_id); let mut runtime = tokio::runtime::Runtime::new()?; let result = runtime.block_on(aliasimport); diff --git a/cmds/blobimport.rs b/cmds/blobimport.rs index 99cb3f861c..3c0371210c 100644 --- a/cmds/blobimport.rs +++ b/cmds/blobimport.rs @@ -58,10 +58,11 @@ fn setup_app<'a, 'b>() -> App<'a, 'b> { fn main() -> Result<()> { let matches = setup_app().get_matches(); + let ctx = CoreContext::test_mock(); let logger = args::get_logger(&matches); args::init_cachelib(&matches); - let repo = args::create_repo(&logger, &matches)?; + let repo = args::create_repo(ctx, &logger, &matches)?; let blobrepo = Arc::new(repo.blobrepo().clone()); let revlogrepo_path = matches diff --git a/cmds/bonsai_verify/main.rs b/cmds/bonsai_verify/main.rs index 5a0d512189..87bcf26205 100644 --- a/cmds/bonsai_verify/main.rs +++ b/cmds/bonsai_verify/main.rs @@ -7,6 +7,8 @@ #![deny(warnings)] extern crate clap; +#[macro_use] +extern crate cloned; extern crate failure_ext as failure; extern crate futures; #[macro_use] @@ -86,7 +88,10 @@ fn main() -> Result<()> { let matches = setup_app().get_matches(); let logger = args::get_logger(&matches); args::init_cachelib(&matches); - let repo = args::open_repo(&logger, &matches)?; + + // TODO(luk): This is not a test use case, fix it in next diffs + let ctx = CoreContext::test_mock(); + let repo = args::open_repo(ctx.clone(), &logger, &matches)?; let config = config::get_config(&matches).expect("getting configuration failed"); let start_points = get_start_points(&matches); @@ -102,9 +107,6 @@ fn main() -> Result<()> { // matter much. let (end_sender, end_receiver) = ::std::sync::mpsc::channel(); - // TODO(luk): This is not a test use case, fix it in next diffs - let ctx = CoreContext::test_mock(); - // The future::lazy is to ensure that bonsai_verify (which calls tokio::spawn) is called after // tokio::run, not before. let verify_fut = future::lazy({ @@ -115,7 +117,7 @@ fn main() -> Result<()> { let ignored = ignored.clone(); move || { let bonsai_verify = BonsaiMFVerify { - ctx: ctx, + ctx: ctx.clone(), logger: logger.clone(), repo: repo.blobrepo().clone(), follow_limit, @@ -126,7 +128,7 @@ fn main() -> Result<()> { bonsai_verify .verify(start_points) .and_then({ - let logger = logger.clone(); + cloned!(ctx, logger); move |(result, meta)| { let logger = logger.new(o!["changeset_id" => format!("{}", meta.changeset_id)]); @@ -175,7 +177,7 @@ fn main() -> Result<()> { if print_changes { let logger = logger.clone(); let diff_fut = difference - .changes() + .changes(ctx.clone()) .map(move |changed_entry| { info!( logger, diff --git a/cmds/runhook.rs b/cmds/runhook.rs index 7558e5d1ee..b996dded8f 100644 --- a/cmds/runhook.rs +++ b/cmds/runhook.rs @@ -18,6 +18,7 @@ extern crate blobstore; extern crate cachelib; extern crate clap; extern crate cmdlib; +extern crate context; extern crate failure_ext as failure; extern crate futures; #[macro_use] @@ -42,6 +43,7 @@ extern crate tempdir; use blobrepo::BlobRepo; use bookmarks::Bookmark; use clap::{App, ArgMatches}; +use context::CoreContext; use failure::{Error, Result}; use futures::Future; use futures_ext::{BoxFuture, FutureExt}; @@ -77,6 +79,7 @@ fn run_hook( cmdlib::args::init_cachelib(&matches); + let ctx = CoreContext::test_mock(); let logger = { let level = if matches.is_present("debug") { Level::Debug @@ -109,7 +112,8 @@ fn run_hook( println!("Hook file is {} revision is {:?}", hook_file, revstr); println!("Hook code is {}", code); println!("=============================="); - let mut hook_manager = HookManager::new_with_blobrepo(Default::default(), repo.clone(), logger); + let mut hook_manager = + HookManager::new_with_blobrepo(ctx.clone(), Default::default(), repo.clone(), logger); let hook = LuaHook { name: String::from("testhook"), code, @@ -124,7 +128,7 @@ fn run_hook( let id = try_boxfuture!(HgChangesetId::from_str(revstr)); if file_hook { hook_manager - .run_file_hooks_for_bookmark(id, &bookmark, None) + .run_file_hooks_for_bookmark(ctx, id, &bookmark, None) .map(|executions| { for execution in executions.iter() { if let (_, HookExecution::Rejected(_)) = execution { @@ -137,7 +141,7 @@ fn run_hook( .boxify() } else { hook_manager - .run_changeset_hooks_for_bookmark(id, &bookmark, None) + .run_changeset_hooks_for_bookmark(ctx, id, &bookmark, None) .map(|executions| executions.get(0).unwrap().1.clone()) .boxify() } diff --git a/hgproto/src/commands.rs b/hgproto/src/commands.rs index 8c67e8bb24..b155b304ed 100644 --- a/hgproto/src/commands.rs +++ b/hgproto/src/commands.rs @@ -549,6 +549,7 @@ pub trait HgCommands { mod test { use super::*; + use context::CoreContext; use futures::{future, stream}; use hooks::{InMemoryChangesetStore, InMemoryFileContentStore}; use slog::{Discard, Drain}; @@ -678,10 +679,12 @@ mod test { } fn create_hook_manager() -> Arc { + let ctx = CoreContext::test_mock(); let changeset_store = InMemoryChangesetStore::new(); let content_store = InMemoryFileContentStore::new(); let logger = Logger::root(Discard {}.ignore_res(), o!()); Arc::new(HookManager::new( + ctx, "some_repo".into(), Box::new(changeset_store), Arc::new(content_store), diff --git a/hgproto/src/lib.rs b/hgproto/src/lib.rs index 28429514f6..be4728a517 100644 --- a/hgproto/src/lib.rs +++ b/hgproto/src/lib.rs @@ -31,6 +31,8 @@ extern crate maplit; #[macro_use] extern crate nom; +#[cfg(test)] +extern crate context; extern crate futures_ext; extern crate mercurial; extern crate mercurial_bundles; diff --git a/hook_tailer/tailer.rs b/hook_tailer/tailer.rs index b7324bcc66..d7ca65a8f2 100644 --- a/hook_tailer/tailer.rs +++ b/hook_tailer/tailer.rs @@ -47,6 +47,7 @@ impl Tailer { let content_store = BlobRepoFileContentStore::new((*repo).clone()); let mut hook_manager = HookManager::new( + ctx.clone(), repo_name, Box::new(changeset_store), Arc::new(content_store), @@ -230,15 +231,15 @@ fn run_hooks_for_changeset( cs: ChangesetId, logger: Logger, ) -> impl Future { - repo.get_hg_from_bonsai_changeset(ctx, cs) + repo.get_hg_from_bonsai_changeset(ctx.clone(), cs) .and_then(move |hg_cs| { debug!(logger, "Running file hooks for changeset {:?}", hg_cs); - hm.run_file_hooks_for_bookmark(hg_cs.clone(), &bm, None) + hm.run_file_hooks_for_bookmark(ctx.clone(), hg_cs.clone(), &bm, None) .map(move |res| (hg_cs, res)) .and_then(move |(hg_cs, file_res)| { let hg_cs = hg_cs.clone(); debug!(logger, "Running changeset hooks for changeset {:?}", hg_cs); - hm.run_changeset_hooks_for_bookmark(hg_cs.clone(), &bm, None) + hm.run_changeset_hooks_for_bookmark(ctx, hg_cs.clone(), &bm, None) .map(move |res| { let hook_results = HookResults { file_hooks_results: file_res, diff --git a/hooks/src/hook_loader.rs b/hooks/src/hook_loader.rs index 48e168348f..762e44d93f 100644 --- a/hooks/src/hook_loader.rs +++ b/hooks/src/hook_loader.rs @@ -84,6 +84,7 @@ mod test { use super::ErrorKind; use super::super::*; use async_unit; + use context::CoreContext; use fixtures::many_files_dirs; use metaconfig::repoconfig::{BookmarkParams, HookParams, RepoReadOnly, RepoType}; use slog::{Discard, Drain}; @@ -231,9 +232,10 @@ mod test { } fn hook_manager_blobrepo() -> HookManager { + let ctx = CoreContext::test_mock(); let repo = many_files_dirs::getrepo(None); let logger = Logger::root(Discard {}.ignore_res(), o!()); - HookManager::new_with_blobrepo(Default::default(), repo, logger) + HookManager::new_with_blobrepo(ctx, Default::default(), repo, logger) } } diff --git a/hooks/src/lib.rs b/hooks/src/lib.rs index 3bd1d5e927..a563a28dda 100644 --- a/hooks/src/lib.rs +++ b/hooks/src/lib.rs @@ -51,6 +51,7 @@ extern crate regex; extern crate slog; extern crate tempdir; +extern crate context; extern crate srclient; extern crate thrift; @@ -65,6 +66,7 @@ use asyncmemo::{Asyncmemo, Filler, Weight}; use blobrepo::{BlobRepo, HgBlobChangeset}; use bookmarks::Bookmark; use bytes::Bytes; +use context::CoreContext; pub use errors::*; use failure::{Error, FutureFailureErrorExt}; use futures::{failed, finished, Future, IntoFuture, Stream}; @@ -102,6 +104,7 @@ pub struct HookManager { impl HookManager { pub fn new( + ctx: CoreContext, repo_name: String, changeset_store: Box, content_store: Arc, @@ -112,6 +115,7 @@ impl HookManager { let file_hooks = Arc::new(Mutex::new(HashMap::new())); let filler = HookCacheFiller { + ctx, file_hooks: file_hooks.clone(), repo_name: repo_name.clone(), }; @@ -145,11 +149,13 @@ impl HookManager { } pub fn new_with_blobrepo( + ctx: CoreContext, hook_manager_params: HookManagerParams, blobrepo: BlobRepo, logger: Logger, ) -> HookManager { HookManager::new( + ctx, format!("repo-{:?}", blobrepo.get_repoid()), Box::new(BlobRepoChangesetStore::new(blobrepo.clone())), Arc::new(BlobRepoFileContentStore::new(blobrepo.clone())), @@ -202,6 +208,7 @@ impl HookManager { pub fn run_changeset_hooks_for_bookmark( &self, + ctx: CoreContext, changeset_id: HgChangesetId, bookmark: &Bookmark, maybe_pushvars: Option>, @@ -213,7 +220,7 @@ impl HookManager { .into_iter() .filter(|name| self.changeset_hooks.contains_key(name)) .collect(); - self.run_changeset_hooks_for_changeset_id(changeset_id, hooks, maybe_pushvars) + self.run_changeset_hooks_for_changeset_id(ctx, changeset_id, hooks, maybe_pushvars) } None => return finished(Vec::new()).boxify(), } @@ -221,6 +228,7 @@ impl HookManager { fn run_changeset_hooks_for_changeset_id( &self, + ctx: CoreContext, changeset_id: HgChangesetId, hooks: Vec, maybe_pushvars: Option>, @@ -236,7 +244,7 @@ impl HookManager { .collect(); let hooks = try_boxfuture!(hooks); let repo_name = self.repo_name.clone(); - self.get_hook_changeset(changeset_id) + self.get_hook_changeset(ctx.clone(), changeset_id) .and_then({ move |hcs| { let hooks = HookManager::filter_bypassed_hooks( @@ -246,6 +254,7 @@ impl HookManager { ); HookManager::run_changeset_hooks_for_changeset( + ctx, repo_name, hcs.clone(), hooks.clone(), @@ -269,6 +278,7 @@ impl HookManager { } fn run_changeset_hooks_for_changeset( + ctx: CoreContext, repo_name: String, changeset: HookChangeset, hooks: Vec<(String, Arc>)>, @@ -278,18 +288,19 @@ impl HookManager { .map(move |(hook_name, hook)| { let hook_context: HookContext = HookContext::new(hook_name.clone(), repo_name.clone(), changeset.clone()); - HookManager::run_changeset_hook(hook.clone(), hook_context) + HookManager::run_changeset_hook(ctx.clone(), hook.clone(), hook_context) }) .collect(); futures::future::join_all(v).boxify() } fn run_changeset_hook( + ctx: CoreContext, hook: Arc>, hook_context: HookContext, ) -> BoxFuture<(String, HookExecution), Error> { let hook_name = hook_context.hook_name.clone(); - hook.run(hook_context) + hook.run(ctx, hook_context) .map({ cloned!(hook_name); move |he| (hook_name, he) @@ -303,6 +314,7 @@ impl HookManager { pub fn run_file_hooks_for_bookmark( &self, + ctx: CoreContext, changeset_id: HgChangesetId, bookmark: &Bookmark, maybe_pushvars: Option>, @@ -321,6 +333,7 @@ impl HookManager { .filter_map(|name| file_hooks.get(&name).map(|hook| (name, hook.clone()))) .collect(); self.run_file_hooks_for_changeset_id( + ctx, changeset_id, hooks, maybe_pushvars, @@ -333,6 +346,7 @@ impl HookManager { fn run_file_hooks_for_changeset_id( &self, + ctx: CoreContext, changeset_id: HgChangesetId, hooks: Vec<(String, (Arc>, Option))>, maybe_pushvars: Option>, @@ -343,7 +357,7 @@ impl HookManager { "Running file hooks for changeset id {:?}", changeset_id ); let cache = self.cache.clone(); - self.get_hook_changeset(changeset_id) + self.get_hook_changeset(ctx.clone(), changeset_id) .and_then(move |hcs| { let hooks = HookManager::filter_bypassed_hooks( hooks.clone(), @@ -433,11 +447,15 @@ impl HookManager { .boxify() } - fn get_hook_changeset(&self, changeset_id: HgChangesetId) -> BoxFuture { + fn get_hook_changeset( + &self, + ctx: CoreContext, + changeset_id: HgChangesetId, + ) -> BoxFuture { let content_store = self.content_store.clone(); let hg_changeset = self.changeset_store - .get_changeset_by_changesetid(&changeset_id); - let changed_files = self.changeset_store.get_changed_files(&changeset_id); + .get_changeset_by_changesetid(ctx.clone(), &changeset_id); + let changed_files = self.changeset_store.get_changed_files(ctx, &changeset_id); let reviewers_acl_checker = self.reviewers_acl_checker.clone(); Box::new((hg_changeset, changed_files).into_future().and_then( move |(changeset, changed_files)| { @@ -512,7 +530,11 @@ pub trait Hook: Send + Sync where T: Clone, { - fn run(&self, hook_context: HookContext) -> BoxFuture; + fn run( + &self, + ctx: CoreContext, + hook_context: HookContext, + ) -> BoxFuture; } /// Represents a changeset - more user friendly than the blob changeset @@ -615,9 +637,9 @@ impl HookFile { } } - pub fn contains_string(&self, data: &str) -> BoxFuture { + pub fn contains_string(&self, ctx: CoreContext, data: &str) -> BoxFuture { let data = data.to_string(); - self.file_content() + self.file_content(ctx) .and_then(move |bytes| { let str_content = str::from_utf8(&bytes)?.to_string(); Ok(str_content.contains(&data)) @@ -625,17 +647,17 @@ impl HookFile { .boxify() } - pub fn len(&self) -> BoxFuture { - self.file_content() + pub fn len(&self, ctx: CoreContext) -> BoxFuture { + self.file_content(ctx) .and_then(|bytes| Ok(bytes.len() as u64)) .boxify() } - pub fn file_content(&self) -> BoxFuture { + pub fn file_content(&self, ctx: CoreContext) -> BoxFuture { let path = try_boxfuture!(MPath::new(self.path.as_bytes())); let changeset_id = self.changeset_id.clone(); self.content_store - .get_file_content_for_changeset(self.changeset_id, path.clone()) + .get_file_content_for_changeset(ctx, self.changeset_id, path.clone()) .and_then(move |opt| { opt.ok_or(ErrorKind::NoFileContent(changeset_id, path.into()).into()) }) @@ -643,11 +665,11 @@ impl HookFile { .boxify() } - pub fn file_type(&self) -> BoxFuture { + pub fn file_type(&self, ctx: CoreContext) -> BoxFuture { let path = try_boxfuture!(MPath::new(self.path.as_bytes())); let changeset_id = self.changeset_id.clone(); self.content_store - .get_file_content_for_changeset(self.changeset_id, path.clone()) + .get_file_content_for_changeset(ctx, self.changeset_id, path.clone()) .and_then(move |opt| { opt.ok_or(ErrorKind::NoFileContent(changeset_id, path.into()).into()) }) @@ -677,10 +699,10 @@ impl HookChangeset { } } - pub fn file_content(&self, path: String) -> BoxFuture, Error> { + pub fn file_content(&self, ctx: CoreContext, path: String) -> BoxFuture, Error> { let path = try_boxfuture!(MPath::new(path.as_bytes())); self.content_store - .get_file_content_for_changeset(self.changeset_id, path.clone()) + .get_file_content_for_changeset(ctx, self.changeset_id, path.clone()) .map(|opt| opt.map(|(_, bytes)| bytes)) .boxify() } @@ -726,11 +748,13 @@ impl HookRejectionInfo { pub trait ChangesetStore: Send + Sync { fn get_changeset_by_changesetid( &self, + ctx: CoreContext, changesetid: &HgChangesetId, ) -> BoxFuture; fn get_changed_files( &self, + ctx: CoreContext, changesetid: &HgChangesetId, ) -> BoxFuture, Error>; } @@ -742,37 +766,44 @@ pub struct BlobRepoChangesetStore { impl ChangesetStore for BlobRepoChangesetStore { fn get_changeset_by_changesetid( &self, + ctx: CoreContext, changesetid: &HgChangesetId, ) -> BoxFuture { - self.repo.get_changeset_by_changesetid(changesetid) + self.repo.get_changeset_by_changesetid(ctx, changesetid) } fn get_changed_files( &self, + ctx: CoreContext, changesetid: &HgChangesetId, ) -> BoxFuture, Error> { cloned!(self.repo); self.repo - .get_changeset_by_changesetid(changesetid) - .and_then(move |cs| { - let mf_id = cs.manifestid(); - let mf = repo.get_manifest_by_nodeid(&mf_id); - let parents = cs.parents(); - let (maybe_p1, _) = parents.get_nodes(); - // TODO(stash): generate changed file stream correctly for merges - let p_mf = match maybe_p1.cloned() { - Some(p1) => repo.get_changeset_by_changesetid(&HgChangesetId::new(p1)) - .and_then({ - cloned!(repo); - move |p1| repo.get_manifest_by_nodeid(&p1.manifestid()) - }) - .left_future(), - None => finished(get_empty_manifest()).right_future(), - }; - (mf, p_mf) + .get_changeset_by_changesetid(ctx.clone(), changesetid) + .and_then({ + cloned!(ctx); + move |cs| { + let mf_id = cs.manifestid(); + let mf = repo.get_manifest_by_nodeid(ctx.clone(), &mf_id); + let parents = cs.parents(); + let (maybe_p1, _) = parents.get_nodes(); + // TODO(stash): generate changed file stream correctly for merges + let p_mf = match maybe_p1.cloned() { + Some(p1) => { + repo.get_changeset_by_changesetid(ctx.clone(), &HgChangesetId::new(p1)) + .and_then({ + cloned!(repo); + move |p1| repo.get_manifest_by_nodeid(ctx, &p1.manifestid()) + }) + .left_future() + } + None => finished(get_empty_manifest()).right_future(), + }; + (mf, p_mf) + } }) - .and_then(|(mf, p_mf)| { - manifest_utils::changed_file_stream(&mf, &p_mf, None) + .and_then(move |(mf, p_mf)| { + manifest_utils::changed_file_stream(ctx, &mf, &p_mf, None) .map(|changed_entry| { let path = changed_entry .get_full_path() @@ -799,6 +830,7 @@ pub struct InMemoryChangesetStore { impl ChangesetStore for InMemoryChangesetStore { fn get_changeset_by_changesetid( &self, + _ctx: CoreContext, changesetid: &HgChangesetId, ) -> BoxFuture { match self.map.get(changesetid) { @@ -811,6 +843,7 @@ impl ChangesetStore for InMemoryChangesetStore { fn get_changed_files( &self, + _ctx: CoreContext, changesetid: &HgChangesetId, ) -> BoxFuture, Error> { match self.map.get(changesetid) { @@ -843,6 +876,7 @@ impl InMemoryChangesetStore { pub trait FileContentStore: Send + Sync { fn get_file_content_for_changeset( &self, + ctx: CoreContext, changesetid: HgChangesetId, path: MPath, ) -> BoxFuture, Error>; @@ -856,6 +890,7 @@ pub struct InMemoryFileContentStore { impl FileContentStore for InMemoryFileContentStore { fn get_file_content_for_changeset( &self, + _ctx: CoreContext, changesetid: HgChangesetId, path: MPath, ) -> BoxFuture, Error> { @@ -888,18 +923,22 @@ pub struct BlobRepoFileContentStore { impl FileContentStore for BlobRepoFileContentStore { fn get_file_content_for_changeset( &self, + ctx: CoreContext, changesetid: HgChangesetId, path: MPath, ) -> BoxFuture, Error> { let repo = self.repo.clone(); let repo2 = repo.clone(); - repo.get_changeset_by_changesetid(&changesetid) - .and_then(move |changeset| { - repo.find_file_in_manifest(&path, changeset.manifestid().clone()) + repo.get_changeset_by_changesetid(ctx.clone(), &changesetid) + .and_then({ + cloned!(ctx); + move |changeset| { + repo.find_file_in_manifest(ctx, &path, changeset.manifestid().clone()) + } }) .and_then(move |opt| match opt { Some((file_type, hash)) => repo2 - .get_file_content(&hash.into_nodehash()) + .get_file_content(ctx, &hash.into_nodehash()) .map(move |content| Some((file_type, content))) .boxify(), None => finished(None).boxify(), @@ -922,6 +961,7 @@ impl BlobRepoFileContentStore { } struct HookCacheFiller { + ctx: CoreContext, repo_name: String, file_hooks: FileHooks, } @@ -940,7 +980,7 @@ impl Filler for HookCacheFiller { self.repo_name.clone(), key.file.clone(), ); - arc_hook.0.run(hook_context) + arc_hook.0.run(self.ctx.clone(), hook_context) } None => panic!("Can't find hook {}", key.hook_name), // TODO } @@ -1033,7 +1073,11 @@ mod test { } impl Hook for FnChangesetHook { - fn run(&self, context: HookContext) -> BoxFuture { + fn run( + &self, + _ctx: CoreContext, + context: HookContext, + ) -> BoxFuture { finished((self.f)(context)).boxify() } } @@ -1054,7 +1098,11 @@ mod test { } impl Hook for ContextMatchingChangesetHook { - fn run(&self, context: HookContext) -> BoxFuture { + fn run( + &self, + _ctx: CoreContext, + context: HookContext, + ) -> BoxFuture { assert_eq!(self.expected_context, context); Box::new(finished(HookExecution::Accepted)) } @@ -1072,11 +1120,15 @@ mod test { } impl Hook for ContainsStringMatchingChangesetHook { - fn run(&self, context: HookContext) -> BoxFuture { + fn run( + &self, + ctx: CoreContext, + context: HookContext, + ) -> BoxFuture { let mut futs = stream::FuturesUnordered::new(); for file in context.data.files { let fut = match self.expected_content.get(&file.path) { - Some(content) => file.contains_string(&content), + Some(content) => file.contains_string(ctx.clone(), &content), None => Box::new(finished(false)), }; futs.push(fut); @@ -1107,13 +1159,17 @@ mod test { } impl Hook for FileContentMatchingChangesetHook { - fn run(&self, context: HookContext) -> BoxFuture { + fn run( + &self, + ctx: CoreContext, + context: HookContext, + ) -> BoxFuture { let mut futs = stream::FuturesUnordered::new(); for file in context.data.files { let fut = match self.expected_content.get(&file.path) { Some(expected_content) => { let expected_content = expected_content.clone(); - file.file_content() + file.file_content(ctx.clone()) .map(move |content| { let content = str::from_utf8(&*content).unwrap().to_string(); content.contains(&expected_content) @@ -1150,13 +1206,17 @@ mod test { } impl Hook for LengthMatchingChangesetHook { - fn run(&self, context: HookContext) -> BoxFuture { + fn run( + &self, + ctx: CoreContext, + context: HookContext, + ) -> BoxFuture { let mut futs = stream::FuturesUnordered::new(); for file in context.data.files { let fut = match self.expected_lengths.get(&file.path) { Some(expected_length) => { let expected_length = *expected_length; - file.len() + file.len(ctx.clone()) .map(move |length| length == expected_length) .boxify() } @@ -1191,11 +1251,15 @@ mod test { } impl Hook for OtherFileMatchingChangesetHook { - fn run(&self, context: HookContext) -> BoxFuture { + fn run( + &self, + ctx: CoreContext, + context: HookContext, + ) -> BoxFuture { let expected_content = self.expected_content.clone(); context .data - .file_content(self.file_path.clone()) + .file_content(ctx, self.file_path.clone()) .map(|opt| opt.map(|content| str::from_utf8(&*content).unwrap().to_string())) .map(move |opt| { if opt == expected_content { @@ -1230,7 +1294,11 @@ mod test { } impl Hook for FnFileHook { - fn run(&self, context: HookContext) -> BoxFuture { + fn run( + &self, + _ctx: CoreContext, + context: HookContext, + ) -> BoxFuture { finished((self.f)(context)).boxify() } } @@ -1251,7 +1319,11 @@ mod test { } impl Hook for PathMatchingFileHook { - fn run(&self, context: HookContext) -> BoxFuture { + fn run( + &self, + _ctx: CoreContext, + context: HookContext, + ) -> BoxFuture { finished(if self.paths.contains(&context.data.path) { HookExecution::Accepted } else { @@ -1270,10 +1342,14 @@ mod test { } impl Hook for ContainsStringMatchingFileHook { - fn run(&self, context: HookContext) -> BoxFuture { + fn run( + &self, + ctx: CoreContext, + context: HookContext, + ) -> BoxFuture { context .data - .contains_string(&self.content) + .contains_string(ctx, &self.content) .map(|contains| { if contains { HookExecution::Accepted @@ -1295,11 +1371,15 @@ mod test { } impl Hook for FileContentMatchingFileHook { - fn run(&self, context: HookContext) -> BoxFuture { + fn run( + &self, + ctx: CoreContext, + context: HookContext, + ) -> BoxFuture { let expected_content = self.content.clone(); context .data - .file_content() + .file_content(ctx) .map(move |content| { let content = str::from_utf8(&*content).unwrap().to_string(); if content.contains(&expected_content) { @@ -1322,11 +1402,15 @@ mod test { } impl Hook for IsSymLinkMatchingFileHook { - fn run(&self, context: HookContext) -> BoxFuture { + fn run( + &self, + ctx: CoreContext, + context: HookContext, + ) -> BoxFuture { let is_symlink = self.is_symlink; context .data - .file_type() + .file_type(ctx) .map(move |file_type| { let actual = match file_type { FileType::Symlink => true, @@ -1352,11 +1436,15 @@ mod test { } impl Hook for LengthMatchingFileHook { - fn run(&self, context: HookContext) -> BoxFuture { + fn run( + &self, + ctx: CoreContext, + context: HookContext, + ) -> BoxFuture { let exp_length = self.length; context .data - .len() + .len(ctx) .map(move |length| { if length == exp_length { HookExecution::Accepted @@ -1375,6 +1463,7 @@ mod test { #[test] fn test_changeset_hook_accepted() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hooks: HashMap>> = hashmap! { "hook1".to_string() => always_accepting_changeset_hook() }; @@ -1384,13 +1473,14 @@ mod test { let expected = hashmap! { "hook1".to_string() => HookExecution::Accepted }; - run_changeset_hooks("bm1", hooks, bookmarks, expected); + run_changeset_hooks(ctx, "bm1", hooks, bookmarks, expected); }); } #[test] fn test_changeset_hook_rejected() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hooks: HashMap>> = hashmap! { "hook1".to_string() => always_rejecting_changeset_hook() }; @@ -1400,13 +1490,14 @@ mod test { let expected = hashmap! { "hook1".to_string() => default_rejection() }; - run_changeset_hooks("bm1", hooks, bookmarks, expected); + run_changeset_hooks(ctx, "bm1", hooks, bookmarks, expected); }); } #[test] fn test_changeset_hook_mix() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hooks: HashMap>> = hashmap! { "hook1".to_string() => always_accepting_changeset_hook(), "hook2".to_string() => always_rejecting_changeset_hook(), @@ -1421,13 +1512,14 @@ mod test { "hook2".to_string() => default_rejection(), "hook3".to_string() => HookExecution::Accepted, }; - run_changeset_hooks("bm1", hooks, bookmarks, expected); + run_changeset_hooks(ctx, "bm1", hooks, bookmarks, expected); }); } #[test] fn test_changeset_hook_context() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let files = vec![ "dir1/subdir1/subsubdir1/file_1".to_string(), "dir1/subdir1/subsubdir2/file_1".to_string(), @@ -1472,13 +1564,14 @@ mod test { let expected = hashmap! { "hook1".to_string() => HookExecution::Accepted }; - run_changeset_hooks("bm1", hooks, bookmarks, expected); + run_changeset_hooks(ctx, "bm1", hooks, bookmarks, expected); }); } #[test] fn test_changeset_hook_contains_string() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hook1_map = hashmap![ "dir1/subdir1/subsubdir1/file_1".to_string() => "elephants".to_string(), "dir1/subdir1/subsubdir2/file_1".to_string() => "hippopatami".to_string(), @@ -1507,13 +1600,14 @@ mod test { "hook2".to_string() => default_rejection(), "hook3".to_string() => default_rejection(), }; - run_changeset_hooks("bm1", hooks, bookmarks, expected); + run_changeset_hooks(ctx, "bm1", hooks, bookmarks, expected); }); } #[test] fn test_changeset_hook_other_file_content() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hooks: HashMap>> = hashmap! { "hook1".to_string() => other_file_matching_changeset_hook("dir1/subdir1/subsubdir1/file_1".to_string(), Some("elephants".to_string())), "hook2".to_string() => other_file_matching_changeset_hook("dir1/subdir1/subsubdir1/file_1".to_string(), Some("giraffes".to_string())), @@ -1531,13 +1625,14 @@ mod test { "hook4".to_string() => HookExecution::Accepted, "hook5".to_string() => default_rejection(), }; - run_changeset_hooks("bm1", hooks, bookmarks, expected); + run_changeset_hooks(ctx, "bm1", hooks, bookmarks, expected); }); } #[test] fn test_changeset_hook_file_content() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hook1_map = hashmap![ "dir1/subdir1/subsubdir1/file_1".to_string() => "elephants".to_string(), "dir1/subdir1/subsubdir2/file_1".to_string() => "hippopatami".to_string(), @@ -1566,13 +1661,14 @@ mod test { "hook2".to_string() => default_rejection(), "hook3".to_string() => default_rejection(), }; - run_changeset_hooks("bm1", hooks, bookmarks, expected); + run_changeset_hooks(ctx, "bm1", hooks, bookmarks, expected); }); } #[test] fn test_changeset_hook_lengths() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hook1_map = hashmap![ "dir1/subdir1/subsubdir1/file_1".to_string() => 9, "dir1/subdir1/subsubdir2/file_1".to_string() => 11, @@ -1603,13 +1699,14 @@ mod test { "hook2".to_string() => default_rejection(), "hook3".to_string() => default_rejection(), }; - run_changeset_hooks("bm1", hooks, bookmarks, expected); + run_changeset_hooks(ctx, "bm1", hooks, bookmarks, expected); }); } #[test] fn test_file_hook_accepted() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hooks: HashMap>> = hashmap! { "hook1".to_string() => always_accepting_file_hook() }; @@ -1623,13 +1720,14 @@ mod test { "dir1/subdir1/subsubdir2/file_2".to_string() => HookExecution::Accepted, } }; - run_file_hooks("bm1", hooks, bookmarks, expected); + run_file_hooks(ctx, "bm1", hooks, bookmarks, expected); }); } #[test] fn test_file_hook_rejected() { async_unit::tokio_unit_test(move || { + let ctx = CoreContext::test_mock(); let hooks: HashMap>> = hashmap! { "hook1".to_string() => always_rejecting_file_hook() }; @@ -1643,13 +1741,14 @@ mod test { "dir1/subdir1/subsubdir2/file_2".to_string() => default_rejection(), } }; - run_file_hooks("bm1", hooks, bookmarks, expected); + run_file_hooks(ctx, "bm1", hooks, bookmarks, expected); }); } #[test] fn test_file_hook_mix() { async_unit::tokio_unit_test(move || { + let ctx = CoreContext::test_mock(); let hooks: HashMap>> = hashmap! { "hook1".to_string() => always_rejecting_file_hook(), "hook2".to_string() => always_accepting_file_hook() @@ -1669,13 +1768,14 @@ mod test { "dir1/subdir1/subsubdir2/file_2".to_string() => HookExecution::Accepted, } }; - run_file_hooks("bm1", hooks, bookmarks, expected); + run_file_hooks(ctx, "bm1", hooks, bookmarks, expected); }); } #[test] fn test_file_hooks_paths() { async_unit::tokio_unit_test(move || { + let ctx = CoreContext::test_mock(); let matching_paths = hashset![ "dir1/subdir1/subsubdir2/file_1".to_string(), "dir1/subdir1/subsubdir2/file_2".to_string(), @@ -1693,13 +1793,14 @@ mod test { "dir1/subdir1/subsubdir2/file_2".to_string() => HookExecution::Accepted, } }; - run_file_hooks("bm1", hooks, bookmarks, expected); + run_file_hooks(ctx, "bm1", hooks, bookmarks, expected); }); } #[test] fn test_file_hooks_paths_mix() { async_unit::tokio_unit_test(move || { + let ctx = CoreContext::test_mock(); let matching_paths1 = hashset![ "dir1/subdir1/subsubdir2/file_1".to_string(), "dir1/subdir1/subsubdir2/file_2".to_string(), @@ -1724,13 +1825,14 @@ mod test { "dir1/subdir1/subsubdir2/file_2".to_string() => default_rejection(), } }; - run_file_hooks("bm1", hooks, bookmarks, expected); + run_file_hooks(ctx, "bm1", hooks, bookmarks, expected); }); } #[test] fn test_file_hook_contains_string() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hooks: HashMap>> = hashmap! { "hook1".to_string() => contains_string_matching_file_hook("elephants".to_string()), "hook2".to_string() => contains_string_matching_file_hook("hippopatami".to_string()), @@ -1758,13 +1860,14 @@ mod test { "dir1/subdir1/subsubdir2/file_2".to_string() => HookExecution::Accepted, }, }; - run_file_hooks("bm1", hooks, bookmarks, expected); + run_file_hooks(ctx, "bm1", hooks, bookmarks, expected); }); } #[test] fn test_file_hook_file_content() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hooks: HashMap>> = hashmap! { "hook1".to_string() => file_content_matching_file_hook("elephants".to_string()), "hook2".to_string() => file_content_matching_file_hook("hippopatami".to_string()), @@ -1792,13 +1895,14 @@ mod test { "dir1/subdir1/subsubdir2/file_2".to_string() => HookExecution::Accepted, }, }; - run_file_hooks("bm1", hooks, bookmarks, expected); + run_file_hooks(ctx, "bm1", hooks, bookmarks, expected); }); } #[test] fn test_file_hook_is_symlink() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hooks: HashMap>> = hashmap! { "hook1".to_string() => is_symlink_matching_file_hook(true), "hook2".to_string() => is_symlink_matching_file_hook(false), @@ -1820,13 +1924,14 @@ mod test { "dir1/subdir1/subsubdir2/file_2".to_string() => HookExecution::Accepted, }, }; - run_file_hooks("bm1", hooks, bookmarks, expected); + run_file_hooks(ctx, "bm1", hooks, bookmarks, expected); }); } #[test] fn test_file_hook_length() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hooks: HashMap>> = hashmap! { "hook1".to_string() => length_matching_file_hook("elephants".len() as u64), "hook2".to_string() => length_matching_file_hook("hippopatami".len() as u64), @@ -1860,7 +1965,7 @@ mod test { "dir1/subdir1/subsubdir2/file_2".to_string() => default_rejection(), }, }; - run_file_hooks("bm1", hooks, bookmarks, expected); + run_file_hooks(ctx, "bm1", hooks, bookmarks, expected); }); } @@ -1883,6 +1988,7 @@ mod test { #[test] fn test_with_blob_store() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hooks: HashMap>> = hashmap! { "hook1".to_string() => always_accepting_changeset_hook() }; @@ -1892,20 +1998,22 @@ mod test { let expected = hashmap! { "hook1".to_string() => HookExecution::Accepted }; - run_changeset_hooks_with_mgr("bm1", hooks, bookmarks, expected, false); + run_changeset_hooks_with_mgr(ctx, "bm1", hooks, bookmarks, expected, false); }); } fn run_changeset_hooks( + ctx: CoreContext, bookmark_name: &str, hooks: HashMap>>, bookmarks: HashMap>, expected: HashMap, ) { - run_changeset_hooks_with_mgr(bookmark_name, hooks, bookmarks, expected, true) + run_changeset_hooks_with_mgr(ctx, bookmark_name, hooks, bookmarks, expected, true) } fn run_changeset_hooks_with_mgr( + ctx: CoreContext, bookmark_name: &str, hooks: HashMap>>, bookmarks: HashMap>, @@ -1917,6 +2025,7 @@ mod test { hook_manager.register_changeset_hook(&hook_name, hook.into(), None); } let fut = hook_manager.run_changeset_hooks_for_bookmark( + ctx, default_changeset_id(), &Bookmark::new(bookmark_name).unwrap(), None, @@ -1929,15 +2038,17 @@ mod test { } fn run_file_hooks( + ctx: CoreContext, bookmark_name: &str, hooks: HashMap>>, bookmarks: HashMap>, expected: HashMap>, ) { - run_file_hooks_with_mgr(bookmark_name, hooks, bookmarks, expected, true) + run_file_hooks_with_mgr(ctx, bookmark_name, hooks, bookmarks, expected, true) } fn run_file_hooks_with_mgr( + ctx: CoreContext, bookmark_name: &str, hooks: HashMap>>, bookmarks: HashMap>, @@ -1950,6 +2061,7 @@ mod test { } let fut: BoxFuture, Error> = hook_manager .run_file_hooks_for_bookmark( + ctx, default_changeset_id(), &Bookmark::new(bookmark_name).unwrap(), None, @@ -1989,11 +2101,13 @@ mod test { } fn hook_manager_blobrepo() -> HookManager { + let ctx = CoreContext::test_mock(); let repo = many_files_dirs::getrepo(None); let changeset_store = BlobRepoChangesetStore::new(repo.clone()); let content_store = BlobRepoFileContentStore::new(repo); let logger = Logger::root(Discard {}.ignore_res(), o!()); HookManager::new( + ctx, "some_repo".into(), Box::new(changeset_store), Arc::new(content_store), @@ -2003,10 +2117,13 @@ mod test { } fn hook_manager_inmem() -> HookManager { + let ctx = CoreContext::test_mock(); let repo = many_files_dirs::getrepo(None); // Load up an in memory store with a single commit from the many_files_dirs store let cs_id = HgChangesetId::from_str("d261bc7900818dea7c86935b3fb17a33b2e3a6b4").unwrap(); - let cs = repo.get_changeset_by_changesetid(&cs_id).wait().unwrap(); + let cs = repo.get_changeset_by_changesetid(ctx.clone(), &cs_id) + .wait() + .unwrap(); let mut changeset_store = InMemoryChangesetStore::new(); changeset_store.insert(&cs_id, &cs); let mut content_store = InMemoryFileContentStore::new(); @@ -2024,6 +2141,7 @@ mod test { ); let logger = Logger::root(Discard {}.ignore_res(), o!()); HookManager::new( + ctx, "some_repo".into(), Box::new(changeset_store), Arc::new(content_store), diff --git a/hooks/src/lua_hook.rs b/hooks/src/lua_hook.rs index fbb661264a..04dec84f7d 100644 --- a/hooks/src/lua_hook.rs +++ b/hooks/src/lua_hook.rs @@ -12,6 +12,7 @@ use super::{ChangedFileType, Hook, HookChangeset, HookChangesetParents, HookCont HookExecution, HookFile, HookRejectionInfo}; use super::errors::*; use aclchecker::Identity; +use context::CoreContext; use failure::Error; use futures::{failed, Future}; use futures::future::{ok, result}; @@ -105,7 +106,11 @@ pub struct LuaHook { } impl Hook for LuaHook { - fn run(&self, context: HookContext) -> BoxFuture { + fn run( + &self, + ctx: CoreContext, + context: HookContext, + ) -> BoxFuture { let mut hook_info = hashmap! { "repo_name" => context.repo_name.to_string(), "author" => context.data.author.to_string(), @@ -134,10 +139,11 @@ impl Hook for LuaHook { let files_map2 = files_map.clone(); let contains_string = { + cloned!(ctx); move |path: String, string: String| -> Result { match files_map.get(&path) { Some(file) => { - let future = file.contains_string(&string) + let future = file.contains_string(ctx.clone(), &string) .map_err(|err| { LuaError::ExecutionError(format!( "failed to get file content: {}", @@ -153,11 +159,11 @@ impl Hook for LuaHook { }; let contains_string = function2(contains_string); let file_content = { - let context2 = context.clone(); + cloned!(ctx, context); move |path: String| -> Result { - let future = context2 + let future = context .data - .file_content(path) + .file_content(ctx.clone(), path) .map_err(|err| { LuaError::ExecutionError(format!("failed to get file content: {}", err)) }) @@ -170,10 +176,11 @@ impl Hook for LuaHook { }; let file_content = function1(file_content); let file_len = { + cloned!(ctx); move |path: String| -> Result { match files_map2.get(&path) { Some(file) => { - let future = file.len() + let future = file.len(ctx.clone()) .map_err(|err| { LuaError::ExecutionError(format!( "failed to get file content: {}", @@ -271,7 +278,11 @@ impl Hook for LuaHook { } impl Hook for LuaHook { - fn run(&self, context: HookContext) -> BoxFuture { + fn run( + &self, + ctx: CoreContext, + context: HookContext, + ) -> BoxFuture { let hook_info = hashmap! { "repo_name" => context.repo_name.to_string(), }; @@ -279,11 +290,11 @@ impl Hook for LuaHook { code.push_str(HOOK_START_CODE_BASE); code.push_str(&self.code); let contains_string = { - cloned!(context); + cloned!(ctx, context); move |string: String| -> Result { let future = context .data - .contains_string(&string) + .contains_string(ctx.clone(), &string) .map_err(|err| { LuaError::ExecutionError(format!("failed to get file content: {}", err)) }) @@ -293,11 +304,11 @@ impl Hook for LuaHook { }; let contains_string = function1(contains_string); let file_content = { - cloned!(context); + cloned!(ctx, context); move || -> Result { let future = context .data - .file_content() + .file_content(ctx.clone()) .map_err(|err| { LuaError::ExecutionError(format!("failed to get file content: {}", err)) }) @@ -307,11 +318,11 @@ impl Hook for LuaHook { }; let file_content = function0(file_content); let is_symlink = { - cloned!(context); + cloned!(ctx, context); move || -> Result { let future = context .data - .file_type() + .file_type(ctx.clone()) .map_err(|err| { LuaError::ExecutionError(format!("failed to get file content: {}", err)) }) @@ -327,11 +338,11 @@ impl Hook for LuaHook { }; let is_symlink = function0(is_symlink); let file_len = { - cloned!(context); + cloned!(ctx, context); move || -> Result { let future = context .data - .len() + .len(ctx.clone()) .map_err(|err| { LuaError::ExecutionError(format!("failed to get file content: {}", err)) }) @@ -754,6 +765,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_cs_hook_simple_rejected() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let changeset = default_changeset(); let code = String::from( "hook = function (ctx)\n\ @@ -761,7 +773,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - run_changeset_hook(code, changeset), + run_changeset_hook(ctx.clone(), code, changeset), Ok(HookExecution::Rejected(_)) ); }); @@ -770,6 +782,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_cs_hook_reviewers() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let changeset = default_changeset(); let code = String::from( "hook = function (ctx)\n\ @@ -778,7 +791,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - run_changeset_hook(code, changeset), + run_changeset_hook(ctx.clone(), code, changeset), Ok(HookExecution::Accepted) ); @@ -801,13 +814,17 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", return #reviewers == 2\n\ end", ); - assert_matches!(run_changeset_hook(code, hcs), Ok(HookExecution::Accepted)); + assert_matches!( + run_changeset_hook(ctx.clone(), code, hcs), + Ok(HookExecution::Accepted) + ); }); } #[test] fn test_cs_hook_test_plan() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let changeset = default_changeset(); let code = String::from( "hook = function (ctx)\n\ @@ -816,7 +833,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - run_changeset_hook(code, changeset), + run_changeset_hook(ctx.clone(), code, changeset), Ok(HookExecution::Accepted) ); @@ -839,13 +856,17 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", return test_plan == 'testinprod'\n\ end", ); - assert_matches!(run_changeset_hook(code, hcs), Ok(HookExecution::Accepted)); + assert_matches!( + run_changeset_hook(ctx.clone(), code, hcs), + Ok(HookExecution::Accepted) + ); }); } #[test] fn test_cs_hook_author_unixname() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let changeset = default_changeset(); let code = String::from( "hook = function (ctx)\n\ @@ -853,7 +874,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - run_changeset_hook(code, changeset), + run_changeset_hook(ctx.clone(), code, changeset), Ok(HookExecution::Accepted) ); @@ -875,13 +896,17 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", return ctx.info.author_unixname == 'stash'\n\ end", ); - assert_matches!(run_changeset_hook(code, hcs), Ok(HookExecution::Accepted)); + assert_matches!( + run_changeset_hook(ctx.clone(), code, hcs), + Ok(HookExecution::Accepted) + ); }); } #[test] fn test_cs_hook_valid_reviewer() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let changeset = default_changeset(); let code = String::from( "hook = function (ctx)\n\ @@ -889,7 +914,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - run_changeset_hook(code, changeset), + run_changeset_hook(ctx.clone(), code, changeset), Ok(HookExecution::Accepted) ); }); @@ -898,6 +923,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_cs_hook_not_valid_reviewer() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let changeset = default_changeset(); let code = String::from( "hook = function (ctx)\n\ @@ -905,7 +931,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - run_changeset_hook(code, changeset), + run_changeset_hook(ctx.clone(), code, changeset), Ok(HookExecution::Accepted) ); }); @@ -914,6 +940,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_cs_hook_rejected_short_and_long_desc() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let changeset = default_changeset(); let code = String::from( "hook = function (ctx)\n\ @@ -921,7 +948,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - run_changeset_hook(code, changeset), + run_changeset_hook(ctx.clone(), code, changeset), Ok(HookExecution::Rejected(HookRejectionInfo{ref description, ref long_description})) if description==&"emus" && long_description==&"ostriches" @@ -932,6 +959,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_cs_hook_author() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let changeset = default_changeset(); let code = String::from( "hook = function (ctx)\n\ @@ -939,7 +967,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - run_changeset_hook(code, changeset), + run_changeset_hook(ctx.clone(), code, changeset), Ok(HookExecution::Accepted) ); }); @@ -948,6 +976,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_cs_hook_file_paths() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let changeset = default_changeset(); // Arrays passed from rust -> lua appear to be 1 indexed in Lua land let code = String::from( @@ -958,7 +987,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - run_changeset_hook(code, changeset), + run_changeset_hook(ctx.clone(), code, changeset), Ok(HookExecution::Accepted) ); }); @@ -967,6 +996,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_cs_hook_file_contains_string_match() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let changeset = default_changeset(); let code = String::from( "hook = function (ctx)\n\ @@ -976,7 +1006,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - run_changeset_hook(code, changeset), + run_changeset_hook(ctx.clone(), code, changeset), Ok(HookExecution::Accepted) ); }); @@ -985,6 +1015,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_cs_hook_path_regex_match() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let changeset = default_changeset(); let code = String::from( "hook = function (ctx)\n\ @@ -994,7 +1025,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - run_changeset_hook(code, changeset), + run_changeset_hook(ctx.clone(), code, changeset), Ok(HookExecution::Accepted) ); }); @@ -1003,6 +1034,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_cs_hook_regex_match() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let changeset = default_changeset(); let code = String::from( "hook = function (ctx)\n\ @@ -1012,7 +1044,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - run_changeset_hook(code, changeset), + run_changeset_hook(ctx.clone(), code, changeset), Ok(HookExecution::Accepted) ); }); @@ -1021,6 +1053,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_cs_hook_file_content_match() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let changeset = default_changeset(); let code = String::from( "hook = function (ctx)\n\ @@ -1031,7 +1064,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - run_changeset_hook(code, changeset), + run_changeset_hook(ctx.clone(), code, changeset), Ok(HookExecution::Accepted) ); }); @@ -1040,6 +1073,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_cs_hook_other_file_content_match() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let changeset = default_changeset(); let code = String::from( "hook = function (ctx)\n\ @@ -1049,7 +1083,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - run_changeset_hook(code, changeset), + run_changeset_hook(ctx.clone(), code, changeset), Ok(HookExecution::Accepted) ); }); @@ -1058,6 +1092,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_file_content_not_found_returns_nil() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let changeset = default_changeset(); let code = String::from( "hook = function (ctx)\n\ @@ -1065,7 +1100,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - run_changeset_hook(code, changeset), + run_changeset_hook(ctx.clone(), code, changeset), Ok(HookExecution::Accepted) ); }); @@ -1074,6 +1109,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_cs_hook_check_type() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let changeset = default_changeset(); let code = String::from( "hook = function (ctx)\n\ @@ -1093,7 +1129,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - run_changeset_hook(code, changeset), + run_changeset_hook(ctx.clone(), code, changeset), Ok(HookExecution::Accepted) ); }); @@ -1102,6 +1138,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_cs_hook_deleted() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let changeset = default_changeset(); let code = String::from( "hook = function (ctx)\n\ @@ -1114,7 +1151,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - run_changeset_hook(code, changeset), + run_changeset_hook(ctx.clone(), code, changeset), Ok(HookExecution::Accepted) ); }); @@ -1123,6 +1160,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_cs_hook_file_len() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let changeset = default_changeset(); let code = String::from( "hook = function (ctx)\n\ @@ -1133,7 +1171,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - run_changeset_hook(code, changeset), + run_changeset_hook(ctx.clone(), code, changeset), Ok(HookExecution::Accepted) ); }); @@ -1142,6 +1180,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_cs_hook_comments() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let changeset = default_changeset(); let code = String::from( "hook = function (ctx)\n\ @@ -1149,7 +1188,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - run_changeset_hook(code, changeset), + run_changeset_hook(ctx.clone(), code, changeset), Ok(HookExecution::Accepted) ); }); @@ -1158,6 +1197,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_cs_hook_repo_name() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let changeset = default_changeset(); let code = String::from( "hook = function (ctx)\n\ @@ -1165,7 +1205,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - run_changeset_hook(code, changeset), + run_changeset_hook(ctx.clone(), code, changeset), Ok(HookExecution::Accepted) ); }); @@ -1174,6 +1214,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_cs_hook_one_parent() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let changeset = default_changeset(); let code = String::from( "hook = function (ctx)\n\ @@ -1182,7 +1223,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - run_changeset_hook(code, changeset), + run_changeset_hook(ctx.clone(), code, changeset), Ok(HookExecution::Accepted) ); }); @@ -1191,6 +1232,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_cs_hook_two_parents() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let mut changeset = default_changeset(); changeset.parents = HookChangesetParents::Two("p1-hash".into(), "p2-hash".into()); let code = String::from( @@ -1200,7 +1242,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - run_changeset_hook(code, changeset), + run_changeset_hook(ctx.clone(), code, changeset), Ok(HookExecution::Accepted) ); }); @@ -1209,6 +1251,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_cs_hook_no_parents() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let mut changeset = default_changeset(); changeset.parents = HookChangesetParents::None; let code = String::from( @@ -1218,7 +1261,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - run_changeset_hook(code, changeset), + run_changeset_hook(ctx.clone(), code, changeset), Ok(HookExecution::Accepted) ); }); @@ -1227,6 +1270,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_cs_hook_no_hook_func() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let changeset = default_changeset(); let code = String::from( "elephants = function (ctx)\n\ @@ -1234,7 +1278,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - err_downcast!(run_changeset_hook(code, changeset).unwrap_err(), err: ErrorKind => err), + err_downcast!(run_changeset_hook(ctx.clone(), code, changeset).unwrap_err(), err: ErrorKind => err), Ok(ErrorKind::HookRuntimeError(ref msg)) if msg.contains("no hook function") ); }); @@ -1243,10 +1287,11 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_cs_hook_invalid_hook() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let changeset = default_changeset(); let code = String::from("invalid code"); assert_matches!( - err_downcast!(run_changeset_hook(code, changeset).unwrap_err(), err: ErrorKind => err), + err_downcast!(run_changeset_hook(ctx.clone(), code, changeset).unwrap_err(), err: ErrorKind => err), Ok(ErrorKind::HookParseError(ref err_msg)) if err_msg.starts_with("Syntax error:") ); @@ -1256,6 +1301,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_cs_hook_exception() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let changeset = default_changeset(); let code = String::from( "hook = function (ctx)\n\ @@ -1266,7 +1312,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - err_downcast!(run_changeset_hook(code, changeset).unwrap_err(), err: ErrorKind => err), + err_downcast!(run_changeset_hook(ctx.clone(), code, changeset).unwrap_err(), err: ErrorKind => err), Ok(ErrorKind::HookRuntimeError(ref err_msg)) if err_msg.starts_with("LuaError") ); @@ -1276,6 +1322,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_cs_hook_invalid_return_val() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let changeset = default_changeset(); let code = String::from( "hook = function (ctx)\n\ @@ -1283,7 +1330,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - err_downcast!(run_changeset_hook(code, changeset).unwrap_err(), err: ErrorKind => err), + err_downcast!(run_changeset_hook(ctx.clone(), code, changeset).unwrap_err(), err: ErrorKind => err), Ok(ErrorKind::HookRuntimeError(ref err_msg)) if err_msg.contains("invalid hook return type") ); @@ -1293,6 +1340,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_cs_hook_invalid_short_desc() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let changeset = default_changeset(); let code = String::from( "hook = function (ctx)\n\ @@ -1300,7 +1348,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - err_downcast!(run_changeset_hook(code, changeset).unwrap_err(), err: ErrorKind => err), + err_downcast!(run_changeset_hook(ctx.clone(), code, changeset).unwrap_err(), err: ErrorKind => err), Ok(ErrorKind::HookRuntimeError(ref err_msg)) if err_msg.contains("invalid hook failure short description type") ); @@ -1310,6 +1358,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_cs_hook_invalid_long_desc() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let changeset = default_changeset(); let code = String::from( "hook = function (ctx)\n\ @@ -1317,7 +1366,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - err_downcast!(run_changeset_hook(code, changeset).unwrap_err(), err: ErrorKind => err), + err_downcast!(run_changeset_hook(ctx.clone(), code, changeset).unwrap_err(), err: ErrorKind => err), Ok(ErrorKind::HookRuntimeError(ref err_msg)) if err_msg.contains("invalid hook failure long description type") ); @@ -1327,6 +1376,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_cs_hook_desc_when_hooks_is_accepted() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let changeset = default_changeset(); let code = String::from( "hook = function (ctx)\n\ @@ -1334,7 +1384,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - err_downcast!(run_changeset_hook(code, changeset).unwrap_err(), err: ErrorKind => err), + err_downcast!(run_changeset_hook(ctx.clone(), code, changeset).unwrap_err(), err: ErrorKind => err), Ok(ErrorKind::HookRuntimeError(ref err_msg)) if err_msg.contains("failure description must only be set if hook fails") ); @@ -1344,6 +1394,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_cs_hook_long_desc_when_hooks_is_accepted() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let changeset = default_changeset(); let code = String::from( "hook = function (ctx)\n\ @@ -1351,7 +1402,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - err_downcast!(run_changeset_hook(code, changeset).unwrap_err(), err: ErrorKind => err), + err_downcast!(run_changeset_hook(ctx.clone(), code, changeset).unwrap_err(), err: ErrorKind => err), Ok(ErrorKind::HookRuntimeError(ref err_msg)) if err_msg.contains("failure long description must only be set if hook fails") ); @@ -1361,6 +1412,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_cs_hook_no_io_nor_os() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let changeset = default_changeset(); let code = String::from( "hook = function (ctx)\n\ @@ -1368,7 +1420,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - run_changeset_hook(code, changeset), + run_changeset_hook(ctx.clone(), code, changeset), Ok(HookExecution::Accepted) ); }); @@ -1377,32 +1429,41 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_file_hook_path() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hook_file = default_hook_added_file(); let code = String::from( "hook = function (ctx)\n\ return ctx.file.path == \"/a/b/c.txt\"\n\ end", ); - assert_matches!(run_file_hook(code, hook_file), Ok(HookExecution::Accepted)); + assert_matches!( + run_file_hook(ctx.clone(), code, hook_file), + Ok(HookExecution::Accepted) + ); }); } #[test] fn test_file_hook_contains_string_matches() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hook_file = default_hook_added_file(); let code = String::from( "hook = function (ctx)\n\ return ctx.file.contains_string(\"sausages\")\n\ end", ); - assert_matches!(run_file_hook(code, hook_file), Ok(HookExecution::Accepted)); + assert_matches!( + run_file_hook(ctx.clone(), code, hook_file), + Ok(HookExecution::Accepted) + ); }); } #[test] fn test_file_hook_contains_string_no_matches() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hook_file = default_hook_added_file(); let code = String::from( "hook = function (ctx)\n\ @@ -1410,7 +1471,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - run_file_hook(code, hook_file), + run_file_hook(ctx.clone(), code, hook_file), Ok(HookExecution::Rejected(_)) ); }); @@ -1419,6 +1480,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_file_hook_path_regex_match_no_matches() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hook_file = default_hook_added_file(); let code = String::from( "hook = function (ctx)\n\ @@ -1426,7 +1488,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - run_file_hook(code, hook_file), + run_file_hook(ctx.clone(), code, hook_file), Ok(HookExecution::Rejected(_)) ); }); @@ -1435,6 +1497,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_file_hook_regex_match_no_matches() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hook_file = default_hook_added_file(); let code = String::from( "hook = function (ctx)\n\ @@ -1442,7 +1505,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - run_file_hook(code, hook_file), + run_file_hook(ctx.clone(), code, hook_file), Ok(HookExecution::Rejected(_)) ); }); @@ -1451,32 +1514,41 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_file_hook_path_regex_match_matches() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hook_file = default_hook_added_file(); let code = String::from( "hook = function (ctx)\n\ return ctx.file.path_regex_match(\"a*.txt\")\n\ end", ); - assert_matches!(run_file_hook(code, hook_file), Ok(HookExecution::Accepted)); + assert_matches!( + run_file_hook(ctx.clone(), code, hook_file), + Ok(HookExecution::Accepted) + ); }); } #[test] fn test_file_hook_regex_match_matches() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hook_file = default_hook_added_file(); let code = String::from( "hook = function (ctx)\n\ return ctx.regex_match(\"a*.txt\", ctx.file.path)\n\ end", ); - assert_matches!(run_file_hook(code, hook_file), Ok(HookExecution::Accepted)); + assert_matches!( + run_file_hook(ctx.clone(), code, hook_file), + Ok(HookExecution::Accepted) + ); }); } #[test] fn test_file_hook_path_regex_match_invalid_regex() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hook_file = default_hook_added_file(); let code = String::from( "hook = function (ctx)\n\ @@ -1484,7 +1556,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - err_downcast!(run_file_hook(code, hook_file).unwrap_err(), err: ErrorKind => err), + err_downcast!(run_file_hook(ctx.clone(),code, hook_file).unwrap_err(), err: ErrorKind => err), Ok(ErrorKind::HookRuntimeError(ref err_msg)) if err_msg.contains("invalid regex") ); @@ -1494,6 +1566,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_file_hook_regex_match_invalid_regex() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hook_file = default_hook_added_file(); let code = String::from( "hook = function (ctx)\n\ @@ -1501,7 +1574,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - err_downcast!(run_file_hook(code, hook_file).unwrap_err(), err: ErrorKind => err), + err_downcast!(run_file_hook(ctx.clone(),code, hook_file).unwrap_err(), err: ErrorKind => err), Ok(ErrorKind::HookRuntimeError(ref err_msg)) if err_msg.contains("invalid regex") ); @@ -1511,32 +1584,41 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_file_hook_content_matches() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hook_file = default_hook_added_file(); let code = String::from( "hook = function (ctx)\n\ return ctx.file.content() == \"sausages\"\n\ end", ); - assert_matches!(run_file_hook(code, hook_file), Ok(HookExecution::Accepted)); + assert_matches!( + run_file_hook(ctx.clone(), code, hook_file), + Ok(HookExecution::Accepted) + ); }); } #[test] fn test_file_hook_is_symlink() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hook_file = default_hook_symlink_file(); let code = String::from( "hook = function (ctx)\n\ return ctx.file.is_symlink()\n\ end", ); - assert_matches!(run_file_hook(code, hook_file), Ok(HookExecution::Accepted)); + assert_matches!( + run_file_hook(ctx.clone(), code, hook_file), + Ok(HookExecution::Accepted) + ); }); } #[test] fn test_file_hook_is_not_symlink() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hook_file = default_hook_added_file(); let code = String::from( "hook = function (ctx)\n\ @@ -1544,7 +1626,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - run_file_hook(code, hook_file), + run_file_hook(ctx.clone(), code, hook_file), Ok(HookExecution::Rejected(_)) ); }); @@ -1553,32 +1635,41 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_file_hook_removed() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hook_file = default_hook_removed_file(); let code = String::from( "hook = function (ctx)\n\ return ctx.file.path == \"/a/b/c.txt\" and ctx.file.is_deleted()\n\ end", ); - assert_matches!(run_file_hook(code, hook_file), Ok(HookExecution::Accepted)); + assert_matches!( + run_file_hook(ctx.clone(), code, hook_file), + Ok(HookExecution::Accepted) + ); }); } #[test] fn test_file_hook_len_matches() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hook_file = default_hook_added_file(); let code = String::from( "hook = function (ctx)\n\ return ctx.file.len() == 8\n\ end", ); - assert_matches!(run_file_hook(code, hook_file), Ok(HookExecution::Accepted)); + assert_matches!( + run_file_hook(ctx.clone(), code, hook_file), + Ok(HookExecution::Accepted) + ); }); } #[test] fn test_file_hook_len_no_matches() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hook_file = default_hook_added_file(); let code = String::from( "hook = function (ctx)\n\ @@ -1586,7 +1677,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - run_file_hook(code, hook_file), + run_file_hook(ctx.clone(), code, hook_file), Ok(HookExecution::Rejected(_)) ); }); @@ -1595,19 +1686,24 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_file_hook_repo_name() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hook_file = default_hook_added_file(); let code = String::from( "hook = function (ctx)\n\ return ctx.info.repo_name == \"some-repo\"\n\ end", ); - assert_matches!(run_file_hook(code, hook_file), Ok(HookExecution::Accepted)); + assert_matches!( + run_file_hook(ctx.clone(), code, hook_file), + Ok(HookExecution::Accepted) + ); }); } #[test] fn test_file_hook_rejected() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hook_file = default_hook_added_file(); let code = String::from( "hook = function (ctx)\n\ @@ -1615,7 +1711,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - run_file_hook(code, hook_file), + run_file_hook(ctx.clone(), code, hook_file), Ok(HookExecution::Rejected(_)) ); }); @@ -1624,6 +1720,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_file_hook_no_hook_func() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hook_file = default_hook_added_file(); let code = String::from( "elephants = function (ctx)\n\ @@ -1631,7 +1728,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - err_downcast!(run_file_hook(code, hook_file).unwrap_err(), err: ErrorKind => err), + err_downcast!(run_file_hook(ctx.clone(),code, hook_file).unwrap_err(), err: ErrorKind => err), Ok(ErrorKind::HookRuntimeError(ref err_msg)) if err_msg.contains("no hook function") ); }); @@ -1640,10 +1737,11 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_file_hook_invalid_hook() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hook_file = default_hook_added_file(); let code = String::from("invalid code"); assert_matches!( - err_downcast!(run_file_hook(code, hook_file).unwrap_err(), err: ErrorKind => err), + err_downcast!(run_file_hook(ctx.clone(),code, hook_file).unwrap_err(), err: ErrorKind => err), Ok(ErrorKind::HookParseError(ref err_msg)) if err_msg.starts_with("Syntax error:") ); @@ -1653,6 +1751,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_file_hook_exception() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hook_file = default_hook_added_file(); let code = String::from( "hook = function (ctx)\n\ @@ -1663,7 +1762,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - err_downcast!(run_file_hook(code, hook_file).unwrap_err(), err: ErrorKind => err), + err_downcast!(run_file_hook(ctx.clone(),code, hook_file).unwrap_err(), err: ErrorKind => err), Ok(ErrorKind::HookRuntimeError(ref err_msg)) if err_msg.starts_with("LuaError") ); @@ -1673,6 +1772,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_file_hook_invalid_return_val() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hook_file = default_hook_added_file(); let code = String::from( "hook = function (ctx)\n\ @@ -1680,7 +1780,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - err_downcast!(run_file_hook(code, hook_file).unwrap_err(), err: ErrorKind => err), + err_downcast!(run_file_hook(ctx.clone(),code, hook_file).unwrap_err(), err: ErrorKind => err), Ok(ErrorKind::HookRuntimeError(ref err_msg)) if err_msg.contains("invalid hook return type") ); @@ -1690,6 +1790,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_file_hook_invalid_short_desc() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hook_file = default_hook_added_file(); let code = String::from( "hook = function (ctx)\n\ @@ -1697,7 +1798,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - err_downcast!(run_file_hook(code, hook_file).unwrap_err(), err: ErrorKind => err), + err_downcast!(run_file_hook(ctx.clone(),code, hook_file).unwrap_err(), err: ErrorKind => err), Ok(ErrorKind::HookRuntimeError(ref err_msg)) if err_msg.contains("invalid hook failure short description type") ); @@ -1707,6 +1808,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_file_hook_invalid_long_desc() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hook_file = default_hook_added_file(); let code = String::from( "hook = function (ctx)\n\ @@ -1714,7 +1816,7 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", end", ); assert_matches!( - err_downcast!(run_file_hook(code, hook_file).unwrap_err(), err: ErrorKind => err), + err_downcast!(run_file_hook(ctx.clone(),code, hook_file).unwrap_err(), err: ErrorKind => err), Ok(ErrorKind::HookRuntimeError(ref err_msg)) if err_msg.contains("invalid hook failure long description type") ); @@ -1724,26 +1826,38 @@ Signature: 111111111:1111111111:bbbbbbbbbbbbbbbb", #[test] fn test_file_hook_no_io_nor_os() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hook_file = default_hook_added_file(); let code = String::from( "hook = function (ctx)\n\ return io == nil and os == nil\n end", ); - assert_matches!(run_file_hook(code, hook_file), Ok(HookExecution::Accepted)); + assert_matches!( + run_file_hook(ctx.clone(), code, hook_file), + Ok(HookExecution::Accepted) + ); }); } - fn run_changeset_hook(code: String, changeset: HookChangeset) -> Result { + fn run_changeset_hook( + ctx: CoreContext, + code: String, + changeset: HookChangeset, + ) -> Result { let hook = LuaHook::new(String::from("testhook"), code.to_string()); let context = HookContext::new(hook.name.clone(), "some-repo".into(), changeset); - hook.run(context).wait() + hook.run(ctx, context).wait() } - fn run_file_hook(code: String, hook_file: HookFile) -> Result { + fn run_file_hook( + ctx: CoreContext, + code: String, + hook_file: HookFile, + ) -> Result { let hook = LuaHook::new(String::from("testhook"), code.to_string()); let context = HookContext::new(hook.name.clone(), "some-repo".into(), hook_file); - hook.run(context).wait() + hook.run(ctx, context).wait() } fn default_changeset() -> HookChangeset { diff --git a/hooks/src/rust_hook.rs b/hooks/src/rust_hook.rs index 3c979e9b6f..fcd145cfe9 100644 --- a/hooks/src/rust_hook.rs +++ b/hooks/src/rust_hook.rs @@ -10,6 +10,7 @@ #![deny(warnings)] use super::{Hook, HookChangeset, HookContext, HookExecution}; +use context::CoreContext; use failure::Error; use futures::finished; use futures_ext::{BoxFuture, FutureExt}; @@ -19,7 +20,11 @@ pub struct RustHook { } impl Hook for RustHook { - fn run(&self, _context: HookContext) -> BoxFuture { + fn run( + &self, + _ctx: CoreContext, + _context: HookContext, + ) -> BoxFuture { finished(HookExecution::Accepted).boxify() } } diff --git a/hooks_old/src/lib.rs b/hooks_old/src/lib.rs index b75213295c..0119435232 100644 --- a/hooks_old/src/lib.rs +++ b/hooks_old/src/lib.rs @@ -20,6 +20,7 @@ extern crate maplit; extern crate tempdir; extern crate blobrepo; +extern crate context; extern crate hlua_futures; extern crate mercurial; extern crate mercurial_types; @@ -38,6 +39,7 @@ use futures::Future; use hlua::{AnyLuaValue, Lua, LuaError, PushGuard}; use blobrepo::BlobRepo; +use context::CoreContext; use hlua_futures::{AnyFuture, LuaCoroutine, LuaCoroutineBuilder}; use mercurial_types::{Changeset, HgNodeHash}; use mercurial_types::nodehash::HgChangesetId; @@ -67,6 +69,7 @@ pub struct HookContext<'hook> { impl<'hook> HookContext<'hook> { fn run<'a, 'lua>( &self, + ctx: CoreContext, lua: &'a mut Lua<'lua>, ) -> Result>, bool>> { let repo = self.repo.clone(); @@ -78,7 +81,7 @@ impl<'hook> HookContext<'hook> { let changesetid = HgChangesetId::from_ascii_str(&hash) .with_context(|_| ErrorKind::InvalidHash(name.clone(), hash.into()))?; - let future = repo.get_changeset_by_changesetid(&changesetid) + let future = repo.get_changeset_by_changesetid(ctx.clone(), &changesetid) .map_err(|err| LuaError::ExecutionError(format!("failed to get author: {}", err))) .map(|cs| AnyLuaValue::LuaString(String::from_utf8_lossy(cs.user()).into_owned())); Ok(AnyFuture::new(future)) @@ -113,11 +116,12 @@ impl<'lua> HookManager<'lua> { pub fn run_hook<'hook>( &mut self, + ctx: CoreContext, hook: HookContext<'hook>, ) -> Result>, bool>> { // TODO: with multiple Lua contexts, choose a context to run in. Probably use a queue or // something. - hook.run(&mut self.lua) + hook.run(ctx, &mut self.lua) } } @@ -128,6 +132,7 @@ mod test { #[test] fn test_hook() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let hook_info = hashmap! { "repo" => "fbsource".into(), "bookmark" => "master".into(), @@ -151,7 +156,7 @@ mod test { end", }; - let coroutine_fut = hook_manager.run_hook(hook).unwrap(); + let coroutine_fut = hook_manager.run_hook(ctx, hook).unwrap(); let result = coroutine_fut.wait(); assert!(result.unwrap()); }) diff --git a/mercurial-types/mocks/lib.rs b/mercurial-types/mocks/lib.rs index d85a17461c..a824c24725 100644 --- a/mercurial-types/mocks/lib.rs +++ b/mercurial-types/mocks/lib.rs @@ -17,6 +17,7 @@ extern crate maplit; #[cfg(test)] extern crate async_unit; +extern crate context; extern crate futures_ext; extern crate mercurial_types; extern crate mononoke_types; diff --git a/mercurial-types/mocks/manifest.rs b/mercurial-types/mocks/manifest.rs index 8648d72d6c..c431f4c19b 100644 --- a/mercurial-types/mocks/manifest.rs +++ b/mercurial-types/mocks/manifest.rs @@ -13,6 +13,7 @@ use failure::{Error, ResultExt}; use futures::IntoFuture; use futures_ext::{BoxFuture, FutureExt}; +use context::CoreContext; use mercurial_types::{Entry, FileType, HgBlob, MPath, MPathElement, Manifest, RepoPath, Type}; use mercurial_types::blobnode::HgParents; use mercurial_types::manifest::Content; @@ -249,16 +250,16 @@ impl Entry for MockEntry { fn get_type(&self) -> Type { self.ty.expect("ty is not set!") } - fn get_parents(&self) -> BoxFuture { + fn get_parents(&self, _ctx: CoreContext) -> BoxFuture { unimplemented!(); } - fn get_raw_content(&self) -> BoxFuture { + fn get_raw_content(&self, _ctx: CoreContext) -> BoxFuture { unimplemented!(); } - fn get_content(&self) -> BoxFuture { + fn get_content(&self, _ctx: CoreContext) -> BoxFuture { Ok((self.content_factory)()).into_future().boxify() } - fn get_size(&self) -> BoxFuture, Error> { + fn get_size(&self, _ctx: CoreContext) -> BoxFuture, Error> { unimplemented!(); } fn get_hash(&self) -> &HgEntryId { @@ -286,6 +287,7 @@ mod test { #[test] fn lookup() { async_unit::tokio_unit_test(|| { + let ctx = CoreContext::test_mock(); let paths = btreemap! { "foo/bar1" => (FileType::Regular, "bar1"), "foo/bar2" => (FileType::Symlink, "bar2"), @@ -304,7 +306,7 @@ mod test { .lookup(&MPathElement::new(b"foo".to_vec()).unwrap()) .expect("foo should be present"); let foo_content = foo_entry - .get_content() + .get_content(ctx.clone()) .wait() .expect("content fetch should work"); let foo_manifest = match foo_content { @@ -316,7 +318,7 @@ mod test { .lookup(&MPathElement::new(b"bar1".to_vec()).unwrap()) .expect("bar1 should be present"); let bar1_content = bar1_entry - .get_content() + .get_content(ctx.clone()) .wait() .expect("content fetch should work"); match bar1_content { @@ -330,7 +332,7 @@ mod test { .lookup(&MPathElement::new(b"bar2".to_vec()).unwrap()) .expect("bar2 should be present"); let bar2_content = bar2_entry - .get_content() + .get_content(ctx.clone()) .wait() .expect("content fetch should work"); match bar2_content { diff --git a/mercurial-types/src/lib.rs b/mercurial-types/src/lib.rs index f5e77b9ee1..46a6cd3ca0 100644 --- a/mercurial-types/src/lib.rs +++ b/mercurial-types/src/lib.rs @@ -77,6 +77,7 @@ extern crate serde; #[macro_use] extern crate serde_derive; +extern crate context; extern crate futures_ext; extern crate mercurial_thrift; extern crate mononoke_types; diff --git a/mercurial-types/src/manifest.rs b/mercurial-types/src/manifest.rs index 7a8f81cd29..8fa9734c7c 100644 --- a/mercurial-types/src/manifest.rs +++ b/mercurial-types/src/manifest.rs @@ -9,6 +9,7 @@ use std::iter; use failure::Error; +use context::CoreContext; use futures_ext::{BoxFuture, FutureExt}; use mononoke_types::{FileContents, FileType, MPathElement}; @@ -176,17 +177,17 @@ pub trait Entry: Send + 'static { fn get_type(&self) -> Type; /// Get the parents (in the history graph) of the referred-to object - fn get_parents(&self) -> BoxFuture; + fn get_parents(&self, ctx: CoreContext) -> BoxFuture; /// Get the raw content of the object as it exists in the blobstore, /// without any interpretation. This is only really useful for doing a bit-level duplication. - fn get_raw_content(&self) -> BoxFuture; + fn get_raw_content(&self, ctx: CoreContext) -> BoxFuture; /// Get the interpreted content of the object. This will likely require IO - fn get_content(&self) -> BoxFuture; + fn get_content(&self, ctx: CoreContext) -> BoxFuture; /// Get the logical size of the entry. Some entries don't really have a meaningful size. - fn get_size(&self) -> BoxFuture, Error>; + fn get_size(&self, ctx: CoreContext) -> BoxFuture, Error>; /// Get the identity of the object this entry refers to. fn get_hash(&self) -> &HgEntryId; @@ -231,20 +232,20 @@ where self.entry.get_type() } - fn get_parents(&self) -> BoxFuture { - self.entry.get_parents().boxify() + fn get_parents(&self, ctx: CoreContext) -> BoxFuture { + self.entry.get_parents(ctx).boxify() } - fn get_raw_content(&self) -> BoxFuture { - self.entry.get_raw_content().boxify() + fn get_raw_content(&self, ctx: CoreContext) -> BoxFuture { + self.entry.get_raw_content(ctx).boxify() } - fn get_content(&self) -> BoxFuture { - self.entry.get_content().boxify() + fn get_content(&self, ctx: CoreContext) -> BoxFuture { + self.entry.get_content(ctx).boxify() } - fn get_size(&self) -> BoxFuture, Error> { - self.entry.get_size().boxify() + fn get_size(&self, ctx: CoreContext) -> BoxFuture, Error> { + self.entry.get_size(ctx).boxify() } fn get_hash(&self) -> &HgEntryId { @@ -261,20 +262,20 @@ impl Entry for Box { (**self).get_type() } - fn get_parents(&self) -> BoxFuture { - (**self).get_parents() + fn get_parents(&self, ctx: CoreContext) -> BoxFuture { + (**self).get_parents(ctx) } - fn get_raw_content(&self) -> BoxFuture { - (**self).get_raw_content() + fn get_raw_content(&self, ctx: CoreContext) -> BoxFuture { + (**self).get_raw_content(ctx) } - fn get_content(&self) -> BoxFuture { - (**self).get_content() + fn get_content(&self, ctx: CoreContext) -> BoxFuture { + (**self).get_content(ctx) } - fn get_size(&self) -> BoxFuture, Error> { - (**self).get_size() + fn get_size(&self, ctx: CoreContext) -> BoxFuture, Error> { + (**self).get_size(ctx) } fn get_hash(&self) -> &HgEntryId { diff --git a/mercurial-types/src/manifest_utils.rs b/mercurial-types/src/manifest_utils.rs index 89ebbe4348..e139a31577 100644 --- a/mercurial-types/src/manifest_utils.rs +++ b/mercurial-types/src/manifest_utils.rs @@ -10,6 +10,7 @@ use std::hash::{Hash, Hasher}; use std::iter::FromIterator; use std::sync::{Arc, Mutex}; +use context::CoreContext; use futures::IntoFuture; use futures::future::{self, Future}; use futures::stream::{empty, once, Stream}; @@ -215,6 +216,7 @@ impl Hash for NewEntry { /// parents simultaniously and produce the intersection result while /// traversing pub fn new_entry_intersection_stream( + ctx: CoreContext, root: &M, p1: Option<&P1M>, p2: Option<&P2M>, @@ -226,21 +228,21 @@ where { if p1.is_none() || p2.is_none() { let ces = if let Some(p1) = p1 { - changed_entry_stream(root, p1, None) + changed_entry_stream(ctx, root, p1, None) } else if let Some(p2) = p2 { - changed_entry_stream(root, p2, None) + changed_entry_stream(ctx, root, p2, None) } else { - changed_entry_stream(root, &EmptyManifest {}, None) + changed_entry_stream(ctx, root, &EmptyManifest {}, None) }; ces.filter_map(NewEntry::from_changed_entry) .map(NewEntry::into_tuple) .boxify() } else { - let p1 = - changed_entry_stream(root, p1.unwrap(), None).filter_map(NewEntry::from_changed_entry); - let p2 = - changed_entry_stream(root, p2.unwrap(), None).filter_map(NewEntry::from_changed_entry); + let p1 = changed_entry_stream(ctx.clone(), root, p1.unwrap(), None) + .filter_map(NewEntry::from_changed_entry); + let p2 = changed_entry_stream(ctx, root, p2.unwrap(), None) + .filter_map(NewEntry::from_changed_entry); p2.collect() .map(move |p2| { @@ -350,6 +352,7 @@ impl Pruner for CombinatorPruner { /// and Added directory entry. The same *does not* apply for changes between the various /// file types (Regular, Executable and Symlink): those will only be one Modified entry. pub fn changed_entry_stream( + ctx: CoreContext, to: &TM, from: &FM, path: Option, @@ -358,10 +361,11 @@ where TM: Manifest, FM: Manifest, { - changed_entry_stream_with_pruner(to, from, path, NoopPruner, None).boxify() + changed_entry_stream_with_pruner(ctx, to, from, path, NoopPruner, None).boxify() } pub fn changed_file_stream( + ctx: CoreContext, to: &TM, from: &FM, path: Option, @@ -370,12 +374,13 @@ where TM: Manifest, FM: Manifest, { - changed_entry_stream_with_pruner(to, from, path, NoopPruner, None) + changed_entry_stream_with_pruner(ctx, to, from, path, NoopPruner, None) .filter(|changed_entry| !changed_entry.status.is_tree()) .boxify() } pub fn changed_entry_stream_with_pruner( + ctx: CoreContext, to: &TM, from: &FM, path: Option, @@ -399,7 +404,13 @@ where move |entry| pruner.keep(entry) }) .map(|entry| { - recursive_changed_entry_stream(entry, 1, pruner.clone(), max_depth) + recursive_changed_entry_stream( + ctx.clone(), + entry, + 1, + pruner.clone(), + max_depth, + ) }), ) }) @@ -411,6 +422,7 @@ where /// that differ. If input isn't a tree, then a stream with a single entry is returned, otherwise /// subtrees are recursively compared. fn recursive_changed_entry_stream( + ctx: CoreContext, changed_entry: ChangedEntry, depth: usize, pruner: impl Pruner + Send + Clone + 'static, @@ -423,7 +435,10 @@ fn recursive_changed_entry_stream( let (to_mf, from_mf, path) = match &changed_entry.status { EntryStatus::Added(entry) => { let empty_mf: Box = Box::new(EmptyManifest {}); - let to_mf = entry.get_content().map(get_tree_content).boxify(); + let to_mf = entry + .get_content(ctx.clone()) + .map(get_tree_content) + .boxify(); let from_mf = Ok(empty_mf).into_future().boxify(); let dirname = changed_entry.dirname.clone(); @@ -435,7 +450,10 @@ fn recursive_changed_entry_stream( EntryStatus::Deleted(entry) => { let empty_mf: Box = Box::new(EmptyManifest {}); let to_mf = Ok(empty_mf).into_future().boxify(); - let from_mf = entry.get_content().map(get_tree_content).boxify(); + let from_mf = entry + .get_content(ctx.clone()) + .map(get_tree_content) + .boxify(); let dirname = changed_entry.dirname.clone(); let entry_path = entry.get_name().cloned(); @@ -450,8 +468,14 @@ fn recursive_changed_entry_stream( debug_assert!(to_entry.get_type().is_tree() == from_entry.get_type().is_tree()); debug_assert!(to_entry.get_type().is_tree()); - let to_mf = to_entry.get_content().map(get_tree_content).boxify(); - let from_mf = from_entry.get_content().map(get_tree_content).boxify(); + let to_mf = to_entry + .get_content(ctx.clone()) + .map(get_tree_content) + .boxify(); + let from_mf = from_entry + .get_content(ctx.clone()) + .map(get_tree_content) + .boxify(); let dirname = changed_entry.dirname.clone(); let entry_path = to_entry.get_name().cloned(); @@ -474,6 +498,7 @@ fn recursive_changed_entry_stream( }) .map(|entry| { recursive_changed_entry_stream( + ctx.clone(), entry, depth + 1, pruner.clone(), @@ -493,28 +518,28 @@ fn recursive_changed_entry_stream( /// their path from the root of the repo. /// For a non-tree entry returns a stream with a single (entry, path) pair. pub fn recursive_entry_stream( + ctx: CoreContext, rootpath: Option, entry: Box, ) -> BoxStream<(Option, Box), Error> { - let subentries = match entry.get_type() { - Type::File(_) => empty().boxify(), - Type::Tree => { - let entry_basename = entry.get_name(); - let path = MPath::join_opt(rootpath.as_ref(), entry_basename); + let subentries = + match entry.get_type() { + Type::File(_) => empty().boxify(), + Type::Tree => { + let entry_basename = entry.get_name(); + let path = MPath::join_opt(rootpath.as_ref(), entry_basename); - entry - .get_content() - .map(|content| { - select_all( - get_tree_content(content) - .list() - .map(move |entry| recursive_entry_stream(path.clone(), entry)), - ) - }) - .flatten_stream() - .boxify() - } - }; + entry + .get_content(ctx.clone()) + .map(|content| { + select_all(get_tree_content(content).list().map(move |entry| { + recursive_entry_stream(ctx.clone(), path.clone(), entry) + })) + }) + .flatten_stream() + .boxify() + } + }; once(Ok((rootpath, entry))).chain(subentries).boxify() } diff --git a/mercurial-types/tests/src/lib.rs b/mercurial-types/tests/src/lib.rs index d899e5f23f..8c16350e82 100644 --- a/mercurial-types/tests/src/lib.rs +++ b/mercurial-types/tests/src/lib.rs @@ -9,6 +9,7 @@ extern crate async_unit; extern crate blobrepo; +extern crate context; extern crate fixtures; extern crate futures; extern crate futures_ext; @@ -24,6 +25,7 @@ use std::str::FromStr; use std::sync::Arc; use blobrepo::BlobRepo; +use context::CoreContext; use fixtures::{linear, many_files_dirs}; use futures::{Future, Stream}; use futures::executor::spawn; @@ -39,12 +41,18 @@ use mercurial_types::nodehash::{HgChangesetId, HgEntryId, HgNodeHash}; use mercurial_types_mocks::manifest::{ContentFactory, MockEntry, MockManifest}; use mercurial_types_mocks::nodehash; -fn get_root_manifest(repo: Arc, changesetid: &HgChangesetId) -> Box { - let cs = repo.get_changeset_by_changesetid(changesetid) +fn get_root_manifest( + ctx: CoreContext, + repo: Arc, + changesetid: &HgChangesetId, +) -> Box { + let cs = repo.get_changeset_by_changesetid(ctx.clone(), changesetid) .wait() .unwrap(); let manifestid = cs.manifestid(); - repo.get_manifest_by_nodeid(&manifestid).wait().unwrap() + repo.get_manifest_by_nodeid(ctx, &manifestid) + .wait() + .unwrap() } fn get_hash(c: char) -> HgEntryId { @@ -192,12 +200,14 @@ fn test_diff_sorted_vecs_one_empty() { } fn find_changed_entry_status_stream( + ctx: CoreContext, manifest: Box, basemanifest: Box, pruner: impl Pruner + Send + Clone + 'static, max_depth: Option, ) -> Vec { let mut stream = spawn(changed_entry_stream_with_pruner( + ctx, &manifest, &basemanifest, None, @@ -274,6 +284,7 @@ fn check_changed_paths( } fn do_check_with_pruner( + ctx: CoreContext, repo: Arc, main_hash: HgNodeHash, base_hash: HgNodeHash, @@ -284,11 +295,17 @@ fn do_check_with_pruner( max_depth: Option, ) { { - let manifest = get_root_manifest(repo.clone(), &HgChangesetId::new(main_hash)); - let base_manifest = get_root_manifest(repo.clone(), &HgChangesetId::new(base_hash)); + let manifest = get_root_manifest(ctx.clone(), repo.clone(), &HgChangesetId::new(main_hash)); + let base_manifest = + get_root_manifest(ctx.clone(), repo.clone(), &HgChangesetId::new(base_hash)); - let res = - find_changed_entry_status_stream(manifest, base_manifest, pruner.clone(), max_depth); + let res = find_changed_entry_status_stream( + ctx.clone(), + manifest, + base_manifest, + pruner.clone(), + max_depth, + ); check_changed_paths( res, @@ -301,10 +318,17 @@ fn do_check_with_pruner( // Vice-versa: compare base_hash to main_hash. Deleted paths become added, added become // deleted. { - let manifest = get_root_manifest(repo.clone(), &HgChangesetId::new(base_hash)); - let base_manifest = get_root_manifest(repo.clone(), &HgChangesetId::new(main_hash)); + let manifest = get_root_manifest(ctx.clone(), repo.clone(), &HgChangesetId::new(base_hash)); + let base_manifest = + get_root_manifest(ctx.clone(), repo.clone(), &HgChangesetId::new(main_hash)); - let res = find_changed_entry_status_stream(manifest, base_manifest, pruner, max_depth); + let res = find_changed_entry_status_stream( + ctx.clone(), + manifest, + base_manifest, + pruner, + max_depth, + ); check_changed_paths( res, @@ -316,6 +340,7 @@ fn do_check_with_pruner( } fn do_check( + ctx: CoreContext, repo: Arc, main_hash: HgNodeHash, base_hash: HgNodeHash, @@ -324,6 +349,7 @@ fn do_check( expected_modified: Vec<&str>, ) { do_check_with_pruner( + ctx, repo, main_hash, base_hash, @@ -338,12 +364,14 @@ fn do_check( #[test] fn test_recursive_changed_entry_stream_linear() { async_unit::tokio_unit_test(|| -> Result<_, !> { + let ctx = CoreContext::test_mock(); let repo = Arc::new(linear::getrepo(None)); let main_hash = HgNodeHash::from_str("79a13814c5ce7330173ec04d279bf95ab3f652fb").unwrap(); let base_hash = HgNodeHash::from_str("a5ffa77602a066db7d5cfb9fb5823a0895717c5a").unwrap(); let expected_modified = vec!["10"]; do_check( + ctx.clone(), repo, main_hash, base_hash, @@ -358,6 +386,7 @@ fn test_recursive_changed_entry_stream_linear() { #[test] fn test_recursive_changed_entry_stream_simple() { async_unit::tokio_unit_test(|| -> Result<_, !> { + let ctx = CoreContext::test_mock(); let repo = Arc::new(many_files_dirs::getrepo(None)); let main_hash = HgNodeHash::from_str("2f866e7e549760934e31bf0420a873f65100ad63").unwrap(); let base_hash = HgNodeHash::from_str("5a28e25f924a5d209b82ce0713d8d83e68982bc8").unwrap(); @@ -383,7 +412,15 @@ fn test_recursive_changed_entry_stream_simple() { "dir2", "dir2/file_1_in_dir2", ]; - do_check(repo, main_hash, base_hash, expected_added, vec![], vec![]); + do_check( + ctx.clone(), + repo, + main_hash, + base_hash, + expected_added, + vec![], + vec![], + ); Ok(()) }).expect("test failed") } @@ -391,6 +428,7 @@ fn test_recursive_changed_entry_stream_simple() { #[test] fn test_recursive_entry_stream() { async_unit::tokio_unit_test(|| -> Result<_, !> { + let ctx = CoreContext::test_mock(); let repo = Arc::new(many_files_dirs::getrepo(None)); let changesetid = HgNodeHash::from_str("2f866e7e549760934e31bf0420a873f65100ad63").unwrap(); @@ -403,13 +441,13 @@ fn test_recursive_entry_stream() { // dir1/subdir1/file_1 // dir2/file_1_in_dir2 - let cs = repo.get_changeset_by_changesetid(&HgChangesetId::new(changesetid)) + let cs = repo.get_changeset_by_changesetid(ctx.clone(), &HgChangesetId::new(changesetid)) .wait() .unwrap(); let manifestid = cs.manifestid(); let root_entry = repo.get_root_entry(&manifestid); - let fut = recursive_entry_stream(None, root_entry).collect(); + let fut = recursive_entry_stream(ctx.clone(), None, root_entry).collect(); let res = fut.wait().unwrap(); let mut actual = hashset![]; @@ -432,14 +470,14 @@ fn test_recursive_entry_stream() { assert_eq!(actual, expected); - let root_mf = repo.get_manifest_by_nodeid(&manifestid) + let root_mf = repo.get_manifest_by_nodeid(ctx.clone(), &manifestid) .wait() .unwrap(); let path_element = MPathElement::new(Vec::from("dir1".as_bytes())).unwrap(); let subentry = root_mf.lookup(&path_element).unwrap(); - let res = recursive_entry_stream(None, subentry) + let res = recursive_entry_stream(ctx.clone(), None, subentry) .collect() .wait() .unwrap(); @@ -465,6 +503,7 @@ fn test_recursive_entry_stream() { #[test] fn test_recursive_changed_entry_stream_changed_dirs() { async_unit::tokio_unit_test(|| -> Result<_, !> { + let ctx = CoreContext::test_mock(); let repo = Arc::new(many_files_dirs::getrepo(None)); let main_hash = HgNodeHash::from_str("d261bc7900818dea7c86935b3fb17a33b2e3a6b4").unwrap(); let base_hash = HgNodeHash::from_str("2f866e7e549760934e31bf0420a873f65100ad63").unwrap(); @@ -482,6 +521,7 @@ fn test_recursive_changed_entry_stream_changed_dirs() { ]; let expected_modified = vec!["dir1", "dir1/subdir1"]; do_check( + ctx.clone(), repo, main_hash, base_hash, @@ -496,6 +536,7 @@ fn test_recursive_changed_entry_stream_changed_dirs() { #[test] fn test_recursive_changed_entry_stream_dirs_replaced_with_file() { async_unit::tokio_unit_test(|| -> Result<_, !> { + let ctx = CoreContext::test_mock(); let repo = Arc::new(many_files_dirs::getrepo(None)); let main_hash = HgNodeHash::from_str("0c59c8d0da93cbf9d7f4b888f28823ffb2e3e480").unwrap(); let base_hash = HgNodeHash::from_str("d261bc7900818dea7c86935b3fb17a33b2e3a6b4").unwrap(); @@ -523,6 +564,7 @@ fn test_recursive_changed_entry_stream_dirs_replaced_with_file() { "dir1/subdir1/subsubdir2/file_2", ]; do_check( + ctx.clone(), repo, main_hash, base_hash, @@ -537,6 +579,7 @@ fn test_recursive_changed_entry_stream_dirs_replaced_with_file() { #[test] fn test_depth_parameter() { async_unit::tokio_unit_test(|| -> Result<_, !> { + let ctx = CoreContext::test_mock(); let repo = Arc::new(many_files_dirs::getrepo(None)); let main_hash = HgNodeHash::from_str("d261bc7900818dea7c86935b3fb17a33b2e3a6b4").unwrap(); let base_hash = HgNodeHash::from_str("2f866e7e549760934e31bf0420a873f65100ad63").unwrap(); @@ -554,6 +597,7 @@ fn test_depth_parameter() { ]; let expected_modified = vec!["dir1", "dir1/subdir1"]; do_check_with_pruner( + ctx.clone(), repo.clone(), main_hash, base_hash, @@ -567,6 +611,7 @@ fn test_depth_parameter() { let expected_added = vec!["dir1/subdir1/subsubdir1", "dir1/subdir1/subsubdir2"]; let expected_modified = vec!["dir1", "dir1/subdir1"]; do_check_with_pruner( + ctx.clone(), repo.clone(), main_hash, base_hash, @@ -580,6 +625,7 @@ fn test_depth_parameter() { let expected_added = vec![]; let expected_modified = vec!["dir1", "dir1/subdir1"]; do_check_with_pruner( + ctx.clone(), repo.clone(), main_hash, base_hash, @@ -593,6 +639,7 @@ fn test_depth_parameter() { let expected_added = vec![]; let expected_modified = vec!["dir1"]; do_check_with_pruner( + ctx.clone(), repo.clone(), main_hash, base_hash, @@ -606,6 +653,7 @@ fn test_depth_parameter() { let expected_added = vec![]; let expected_modified = vec![]; do_check_with_pruner( + ctx.clone(), repo.clone(), main_hash, base_hash, @@ -636,6 +684,7 @@ where #[test] fn test_recursive_changed_entry_prune() { async_unit::tokio_unit_test(|| -> Result<_, !> { + let ctx = CoreContext::test_mock(); let repo = Arc::new(many_files_dirs::getrepo(None)); let main_hash = HgNodeHash::from_str("0c59c8d0da93cbf9d7f4b888f28823ffb2e3e480").unwrap(); let base_hash = HgNodeHash::from_str("d261bc7900818dea7c86935b3fb17a33b2e3a6b4").unwrap(); @@ -652,6 +701,7 @@ fn test_recursive_changed_entry_prune() { let expected_added = vec!["dir1"]; let expected_deleted = vec!["dir1", "dir1/file_1_in_dir1", "dir1/file_2_in_dir1"]; do_check_with_pruner( + ctx.clone(), repo.clone(), main_hash, base_hash, @@ -685,6 +735,7 @@ fn test_recursive_changed_entry_prune() { "dir1/subdir1/subsubdir2/file_1", ]; do_check_with_pruner( + ctx.clone(), repo, main_hash, base_hash, @@ -712,6 +763,7 @@ fn test_recursive_changed_entry_prune() { #[test] fn test_recursive_changed_entry_prune_visited() { async_unit::tokio_unit_test(|| -> Result<_, !> { + let ctx = CoreContext::test_mock(); let repo = Arc::new(many_files_dirs::getrepo(None)); let main_hash_1 = HgNodeHash::from_str("2f866e7e549760934e31bf0420a873f65100ad63").unwrap(); let main_hash_2 = HgNodeHash::from_str("d261bc7900818dea7c86935b3fb17a33b2e3a6b4").unwrap(); @@ -737,28 +789,38 @@ fn test_recursive_changed_entry_prune_visited() { // A dir1/subdir1/subsubdir2/file_1 // A dir1/subdir1/subsubdir2/file_2 - let manifest_1 = get_root_manifest(repo.clone(), &HgChangesetId::new(main_hash_1)); - let manifest_2 = get_root_manifest(repo.clone(), &HgChangesetId::new(main_hash_2)); - let basemanifest = get_root_manifest(repo.clone(), &HgChangesetId::new(base_hash)); + let manifest_1 = + get_root_manifest(ctx.clone(), repo.clone(), &HgChangesetId::new(main_hash_1)); + let manifest_2 = + get_root_manifest(ctx.clone(), repo.clone(), &HgChangesetId::new(main_hash_2)); + let basemanifest = + get_root_manifest(ctx.clone(), repo.clone(), &HgChangesetId::new(base_hash)); let pruner = VisitedPruner::new(); let first_stream = changed_entry_stream_with_pruner( + ctx.clone(), &manifest_1, &basemanifest, None, pruner.clone(), None, ); - let second_stream = - changed_entry_stream_with_pruner(&manifest_2, &basemanifest, None, pruner, None); + let second_stream = changed_entry_stream_with_pruner( + ctx.clone(), + &manifest_2, + &basemanifest, + None, + pruner, + None, + ); let mut res = spawn(select_all(vec![first_stream, second_stream]).collect()); let res = res.wait_future().unwrap(); let unique_len = res.len(); assert_eq!(unique_len, 15); - let first_stream = changed_entry_stream(&manifest_1, &basemanifest, None); - let second_stream = changed_entry_stream(&manifest_2, &basemanifest, None); + let first_stream = changed_entry_stream(ctx.clone(), &manifest_1, &basemanifest, None); + let second_stream = changed_entry_stream(ctx.clone(), &manifest_2, &basemanifest, None); let mut res = spawn(select_all(vec![first_stream, second_stream]).collect()); let res = res.wait_future().unwrap(); // Make sure that more entries are produced without VisitedPruner i.e. some entries are @@ -772,6 +834,7 @@ fn test_recursive_changed_entry_prune_visited() { #[test] fn test_recursive_changed_entry_prune_visited_no_files() { async_unit::tokio_unit_test(|| -> Result<_, !> { + let ctx = CoreContext::test_mock(); let repo = Arc::new(many_files_dirs::getrepo(None)); let main_hash_1 = HgNodeHash::from_str("2f866e7e549760934e31bf0420a873f65100ad63").unwrap(); let main_hash_2 = HgNodeHash::from_str("d261bc7900818dea7c86935b3fb17a33b2e3a6b4").unwrap(); @@ -797,29 +860,51 @@ fn test_recursive_changed_entry_prune_visited_no_files() { // A dir1/subdir1/subsubdir2/file_1 // A dir1/subdir1/subsubdir2/file_2 - let manifest_1 = get_root_manifest(repo.clone(), &HgChangesetId::new(main_hash_1)); - let manifest_2 = get_root_manifest(repo.clone(), &HgChangesetId::new(main_hash_2)); - let basemanifest = get_root_manifest(repo.clone(), &HgChangesetId::new(base_hash)); + let manifest_1 = + get_root_manifest(ctx.clone(), repo.clone(), &HgChangesetId::new(main_hash_1)); + let manifest_2 = + get_root_manifest(ctx.clone(), repo.clone(), &HgChangesetId::new(main_hash_2)); + let basemanifest = + get_root_manifest(ctx.clone(), repo.clone(), &HgChangesetId::new(base_hash)); let pruner = CombinatorPruner::new(FilePruner, VisitedPruner::new()); let first_stream = changed_entry_stream_with_pruner( + ctx.clone(), &manifest_1, &basemanifest, None, pruner.clone(), None, ); - let second_stream = - changed_entry_stream_with_pruner(&manifest_2, &basemanifest, None, pruner, None); + let second_stream = changed_entry_stream_with_pruner( + ctx.clone(), + &manifest_2, + &basemanifest, + None, + pruner, + None, + ); let mut res = spawn(select_all(vec![first_stream, second_stream]).collect()); let res = res.wait_future().unwrap(); let unique_len = res.len(); assert_eq!(unique_len, 7); - let first_stream = - changed_entry_stream_with_pruner(&manifest_1, &basemanifest, None, FilePruner, None); - let second_stream = - changed_entry_stream_with_pruner(&manifest_2, &basemanifest, None, FilePruner, None); + let first_stream = changed_entry_stream_with_pruner( + ctx.clone(), + &manifest_1, + &basemanifest, + None, + FilePruner, + None, + ); + let second_stream = changed_entry_stream_with_pruner( + ctx.clone(), + &manifest_2, + &basemanifest, + None, + FilePruner, + None, + ); let mut res = spawn(select_all(vec![first_stream, second_stream]).collect()); let res = res.wait_future().unwrap(); // Make sure that more entries are produced without VisitedPruner i.e. some entries are @@ -833,6 +918,7 @@ fn test_recursive_changed_entry_prune_visited_no_files() { #[test] fn test_visited_pruner_different_files_same_hash() { async_unit::tokio_unit_test(|| -> Result<_, !> { + let ctx = CoreContext::test_mock(); let paths = btreemap! { "foo1" => (FileType::Regular, "content", HgEntryId::new(NULL_HASH)), "foo2" => (FileType::Symlink, "content", HgEntryId::new(NULL_HASH)), @@ -841,8 +927,14 @@ fn test_visited_pruner_different_files_same_hash() { MockManifest::from_path_hashes(paths, BTreeMap::new()).expect("manifest is valid"); let pruner = VisitedPruner::new(); - let stream = - changed_entry_stream_with_pruner(&root_manifest, &EmptyManifest {}, None, pruner, None); + let stream = changed_entry_stream_with_pruner( + ctx.clone(), + &root_manifest, + &EmptyManifest {}, + None, + pruner, + None, + ); let mut res = spawn(stream.collect()); let res = res.wait_future().unwrap(); @@ -854,6 +946,7 @@ fn test_visited_pruner_different_files_same_hash() { #[test] fn test_file_pruner() { async_unit::tokio_unit_test(|| -> Result<_, !> { + let ctx = CoreContext::test_mock(); let paths = btreemap! { "foo1" => (FileType::Regular, "content", HgEntryId::new(NULL_HASH)), "foo2" => (FileType::Symlink, "content", HgEntryId::new(NULL_HASH)), @@ -862,8 +955,14 @@ fn test_file_pruner() { MockManifest::from_path_hashes(paths, BTreeMap::new()).expect("manifest is valid"); let pruner = FilePruner; - let stream = - changed_entry_stream_with_pruner(&root_manifest, &EmptyManifest {}, None, pruner, None); + let stream = changed_entry_stream_with_pruner( + ctx.clone(), + &root_manifest, + &EmptyManifest {}, + None, + pruner, + None, + ); let mut res = spawn(stream.collect()); let res = res.wait_future().unwrap(); @@ -875,6 +974,7 @@ fn test_file_pruner() { #[test] fn test_deleted_pruner() { async_unit::tokio_unit_test(|| -> Result<_, !> { + let ctx = CoreContext::test_mock(); let paths = btreemap! { "foo1" => (FileType::Regular, "content", HgEntryId::new(NULL_HASH)), "foo2" => (FileType::Symlink, "content", HgEntryId::new(NULL_HASH)), @@ -883,8 +983,14 @@ fn test_deleted_pruner() { MockManifest::from_path_hashes(paths, BTreeMap::new()).expect("manifest is valid"); let pruner = DeletedPruner; - let stream = - changed_entry_stream_with_pruner(&root_manifest, &EmptyManifest {}, None, pruner, None); + let stream = changed_entry_stream_with_pruner( + ctx.clone(), + &root_manifest, + &EmptyManifest {}, + None, + pruner, + None, + ); let mut res = spawn(stream.collect()); let res = res.wait_future().unwrap(); @@ -895,8 +1001,14 @@ fn test_deleted_pruner() { ); let pruner = DeletedPruner; - let stream = - changed_entry_stream_with_pruner(&EmptyManifest {}, &root_manifest, None, pruner, None); + let stream = changed_entry_stream_with_pruner( + ctx.clone(), + &EmptyManifest {}, + &root_manifest, + None, + pruner, + None, + ); let mut res = spawn(stream.collect()); let res = res.wait_future().unwrap(); diff --git a/mononoke-api/src/lib.rs b/mononoke-api/src/lib.rs index eb96a0b8ec..57c864ebb3 100644 --- a/mononoke-api/src/lib.rs +++ b/mononoke-api/src/lib.rs @@ -36,15 +36,16 @@ use mononoke_types::MPath; use errors::ErrorKind; pub fn get_content_by_path( + ctx: CoreContext, repo: Arc, changesetid: HgChangesetId, path: Option, ) -> impl Future { - repo.get_changeset_by_changesetid(&changesetid) + repo.get_changeset_by_changesetid(ctx.clone(), &changesetid) .from_err() .and_then({ - let path = path.clone(); - move |changeset| repo.find_path_in_manifest(path, *changeset.manifestid()) + cloned!(ctx, path); + move |changeset| repo.find_path_in_manifest(ctx, path, *changeset.manifestid()) }) .and_then(|content| { content.ok_or_else(move || { diff --git a/repo_client/src/client/mod.rs b/repo_client/src/client/mod.rs index ba215056a1..53fae1f015 100644 --- a/repo_client/src/client/mod.rs +++ b/repo_client/src/client/mod.rs @@ -349,6 +349,7 @@ impl RepoClient { let visited_pruner = VisitedPruner::new(); select_all(params.mfnodes.iter().map(|manifest_id| { get_changed_manifests_stream( + self.ctx.clone(), self.repo.blobrepo(), &manifest_id, &basemfnode, @@ -361,6 +362,7 @@ impl RepoClient { } else { match params.mfnodes.get(0) { Some(mfnode) => get_changed_manifests_stream( + self.ctx.clone(), self.repo.blobrepo(), &mfnode, &basemfnode, @@ -429,6 +431,7 @@ impl HgCommands for RepoClient { info!(self.logger(), "between pairs {:?}", pairs); struct ParentStream { + ctx: CoreContext, repo: MononokeRepo, n: HgNodeHash, bottom: HgNodeHash, @@ -436,8 +439,14 @@ impl HgCommands for RepoClient { }; impl ParentStream { - fn new(repo: &MononokeRepo, top: HgNodeHash, bottom: HgNodeHash) -> Self { + fn new( + ctx: CoreContext, + repo: &MononokeRepo, + top: HgNodeHash, + bottom: HgNodeHash, + ) -> Self { ParentStream { + ctx, repo: repo.clone(), n: top, bottom: bottom, @@ -456,11 +465,10 @@ impl HgCommands for RepoClient { } self.wait_cs = self.wait_cs.take().or_else(|| { - Some( - self.repo - .blobrepo() - .get_changeset_by_changesetid(&HgChangesetId::new(self.n)), - ) + Some(self.repo.blobrepo().get_changeset_by_changesetid( + self.ctx.clone(), + &HgChangesetId::new(self.n), + )) }); let cs = try_ready!(self.wait_cs.as_mut().unwrap().poll()); self.wait_cs = None; // got it @@ -476,11 +484,11 @@ impl HgCommands for RepoClient { // TODO(jsgf): do pairs in parallel? // TODO: directly return stream of streams - let repo = self.repo.clone(); + cloned!(self.ctx, self.repo); stream::iter_ok(pairs.into_iter()) .and_then(move |(top, bottom)| { let mut f = 1; - ParentStream::new(&repo, top, bottom) + ParentStream::new(ctx.clone(), &repo, top, bottom) .enumerate() .filter(move |&(i, _)| { if i == f { @@ -867,7 +875,7 @@ impl HgCommands for RepoClient { fetcher, repoid, }) => fetcher - .fetch_changelog(*repoid, blobstore.clone()) + .fetch_changelog(self.ctx.clone(), *repoid, blobstore.clone()) .right_future(), }; @@ -922,6 +930,7 @@ impl HgCommands for RepoClient { } fn get_changed_manifests_stream( + ctx: CoreContext, repo: &BlobRepo, mfid: &HgNodeHash, basemfid: &HgNodeHash, @@ -931,12 +940,17 @@ fn get_changed_manifests_stream( trace: TraceContext, ) -> BoxStream<(Box, Option), Error> { let mfid = HgManifestId::new(*mfid); - let manifest = repo.get_manifest_by_nodeid(&mfid) - .traced(&trace, "fetch rootmf", trace_args!()); + let manifest = repo.get_manifest_by_nodeid(ctx.clone(), &mfid).traced( + &trace, + "fetch rootmf", + trace_args!(), + ); let basemfid = HgManifestId::new(*basemfid); - let basemanifest = - repo.get_manifest_by_nodeid(&basemfid) - .traced(&trace, "fetch baserootmf", trace_args!()); + let basemanifest = repo.get_manifest_by_nodeid(ctx.clone(), &basemfid).traced( + &trace, + "fetch baserootmf", + trace_args!(), + ); let root_entry_stream = stream::once(Ok((repo.get_root_entry(&mfid), rootpath.clone()))); @@ -947,9 +961,16 @@ fn get_changed_manifests_stream( let changed_entries = manifest .join(basemanifest) .map({ - let rootpath = rootpath.clone(); + cloned!(ctx, rootpath); move |(mf, basemf)| { - changed_entry_stream_with_pruner(&mf, &basemf, rootpath, pruner, Some(max_depth)) + changed_entry_stream_with_pruner( + ctx, + &mf, + &basemf, + rootpath, + pruner, + Some(max_depth), + ) } }) .flatten_stream(); @@ -995,7 +1016,7 @@ fn fetch_treepack_part_input( let node = entry.get_hash().clone(); let path = repo_path.clone(); - let parents = entry.get_parents().traced( + let parents = entry.get_parents(ctx.clone()).traced( &trace, "fetching parents", trace_args!( @@ -1004,18 +1025,19 @@ fn fetch_treepack_part_input( ), ); - let linknode_fut = repo.get_linknode(ctx, &repo_path, &entry.get_hash().into_nodehash()) - .traced( - &trace, - "fetching linknode", - trace_args!( + let linknode_fut = + repo.get_linknode(ctx.clone(), &repo_path, &entry.get_hash().into_nodehash()) + .traced( + &trace, + "fetching linknode", + trace_args!( "node" => node.to_string(), "path" => path.to_string() ), - ); + ); let content_fut = entry - .get_raw_content() + .get_raw_content(ctx.clone()) .map(|blob| blob.into_inner()) .traced( &trace, @@ -1028,8 +1050,8 @@ fn fetch_treepack_part_input( let validate_content = if validate_content { entry - .get_raw_content() - .join(entry.get_parents()) + .get_raw_content(ctx.clone()) + .join(entry.get_parents(ctx)) .and_then(move |(content, parents)| { let (p1, p2) = parents.get_nodes(); let actual = node.into_nodehash(); diff --git a/repo_client/src/client/remotefilelog.rs b/repo_client/src/client/remotefilelog.rs index 051063020a..bbe6067473 100644 --- a/repo_client/src/client/remotefilelog.rs +++ b/repo_client/src/client/remotefilelog.rs @@ -45,7 +45,7 @@ pub fn create_remotefilelog_blob( let trace_args = trace_args!("node" => node.to_string(), "path" => path.to_string()); // raw_content includes copy information - let raw_content_bytes = repo.get_file_size(&HgFileNodeId::new(node)) + let raw_content_bytes = repo.get_file_size(ctx.clone(), &HgFileNodeId::new(node)) .map({ move |file_size| match lfs_params.threshold { Some(threshold) => (file_size <= threshold, file_size), @@ -53,20 +53,20 @@ pub fn create_remotefilelog_blob( } }) .and_then({ - cloned!(repo); + cloned!(ctx, repo); move |(direct_fetching_file, file_size)| { if direct_fetching_file { ( - repo.get_file_content(&node).left_future(), + repo.get_file_content(ctx, &node).left_future(), Ok(RevFlags::REVIDX_DEFAULT_FLAGS).into_future(), ) } else { // pass content id to prevent envelope fetching cloned!(repo); ( - repo.get_file_content_id(&HgFileNodeId::new(node)) + repo.get_file_content_id(ctx.clone(), &HgFileNodeId::new(node)) .and_then(move |content_id| { - repo.generate_lfs_file(content_id, file_size) + repo.generate_lfs_file(ctx, content_id, file_size) }) .right_future(), Ok(RevFlags::REVIDX_EXTSTORED).into_future(), @@ -184,7 +184,7 @@ fn validate_content( actual: HgNodeHash, mut scuba_logger: ScubaSampleBuilder, ) -> impl Future { - let file_content = repo.get_file_content(&actual); + let file_content = repo.get_file_content(ctx.clone(), &actual); let repopath = RepoPath::FilePath(path.clone()); let filenode = repo.get_filenode(ctx, &repopath, &actual); diff --git a/repo_client/src/client/streaming_clone.rs b/repo_client/src/client/streaming_clone.rs index 60b8d29283..5c2cb051a2 100644 --- a/repo_client/src/client/streaming_clone.rs +++ b/repo_client/src/client/streaming_clone.rs @@ -13,6 +13,7 @@ use futures_ext::{BoxFuture, FutureExt}; use sql::Connection; use blobstore::Blobstore; +use context::CoreContext; use mercurial_types::RepositoryId; use mononoke_types::BlobstoreBytes; @@ -62,6 +63,7 @@ impl SqlStreamingChunksFetcher { pub fn fetch_changelog( &self, + ctx: CoreContext, repo_id: RepositoryId, blobstore: impl Blobstore + Clone, ) -> BoxFuture { @@ -75,7 +77,7 @@ impl SqlStreamingChunksFetcher { let data_blob_key = String::from_utf8_lossy(&data_blob_name).into_owned(); res.data_blobs.push( blobstore - .get(data_blob_key.clone()) + .get(ctx.clone(), data_blob_key.clone()) .and_then(|data| { data.ok_or( ErrorKind::MissingStreamingBlob(data_blob_key).into(), @@ -87,7 +89,7 @@ impl SqlStreamingChunksFetcher { let idx_blob_key = String::from_utf8_lossy(&idx_blob_name).into_owned(); res.index_blobs.push( blobstore - .get(idx_blob_key.clone()) + .get(ctx.clone(), idx_blob_key.clone()) .and_then(|data| { data.ok_or(ErrorKind::MissingStreamingBlob(idx_blob_key).into()) }) diff --git a/revset/src/range.rs b/revset/src/range.rs index c8e663fccf..0cedb6bc8b 100644 --- a/revset/src/range.rs +++ b/revset/src/range.rs @@ -55,7 +55,7 @@ fn make_pending( Box::new( { let repo = repo.clone(); - repo.get_bonsai_changeset(child.hash) + repo.get_bonsai_changeset(ctx.clone(), child.hash) .map(move |cs| { let parents: Vec<_> = cs.parents().cloned().collect(); (child, parents) diff --git a/server/repo_listener/src/repo_handlers.rs b/server/repo_listener/src/repo_handlers.rs index 77a877312e..cb4907ff2a 100644 --- a/server/repo_listener/src/repo_handlers.rs +++ b/server/repo_listener/src/repo_handlers.rs @@ -54,6 +54,8 @@ pub fn repo_handlers( root_log, "Start warming for repo {}, type {:?}", reponame, config.repotype ); + // TODO(T37478150, luk): this is not a test use case, need to address this later + let ctx = CoreContext::test_mock(); let ensure_myrouter_ready = match config.get_db_address() { None => future::ok(()).left_future(), Some(db_address) => { @@ -82,8 +84,12 @@ pub fn repo_handlers( None => Default::default(), }; - let mut hook_manager = - HookManager::new_with_blobrepo(hook_manager_params, blobrepo.clone(), logger); + let mut hook_manager = HookManager::new_with_blobrepo( + ctx.clone(), + hook_manager_params, + blobrepo.clone(), + logger, + ); info!(root_log, "Loading hooks"); try_boxfuture!(load_hooks(&mut hook_manager, config.clone())); @@ -118,7 +124,7 @@ pub fn repo_handlers( Some(skiplist_index_blobstore_key) => { let blobstore = repo.blobrepo().get_blobstore(); blobstore - .get(skiplist_index_blobstore_key) + .get(ctx.clone(), skiplist_index_blobstore_key) .and_then(|maybebytes| { let map = match maybebytes { Some(bytes) => { @@ -136,9 +142,7 @@ pub fn repo_handlers( // TODO (T32873881): Arc should become BlobRepo let initial_warmup = ensure_myrouter_ready.and_then({ - cloned!(reponame, listen_log); - // TODO(T37478150, luk): this is not a test use case, need to address this later - let ctx = CoreContext::test_mock(); + cloned!(ctx, reponame, listen_log); let blobrepo = repo.blobrepo().clone(); move |()| { cache_warmup(ctx, Arc::new(blobrepo), config.cache_warmup, listen_log) diff --git a/tests/fixtures/src/lib.rs b/tests/fixtures/src/lib.rs index 8bd46d7a4d..1461ab630d 100644 --- a/tests/fixtures/src/lib.rs +++ b/tests/fixtures/src/lib.rs @@ -33,6 +33,7 @@ use mononoke_types::{BonsaiChangesetMut, DateTime, FileChange, FileContents, Fil use slog::Logger; fn store_files( + ctx: CoreContext, files: BTreeMap<&str, Option<&str>>, repo: BlobRepo, ) -> BTreeMap> { @@ -44,7 +45,7 @@ fn store_files( Some(content) => { let size = content.len(); let content = FileContents::Bytes(Bytes::from(content)); - let content_id = repo.unittest_store(content).wait().unwrap(); + let content_id = repo.unittest_store(ctx.clone(), content).wait().unwrap(); let file_change = FileChange::new(content_id, FileType::Regular, size as u64, None); res.insert(path, Some(file_change)); @@ -63,7 +64,7 @@ fn create_bonsai_changeset_from_test_data( commit_metadata: BTreeMap<&str, &str>, ) { let ctx = CoreContext::test_mock(); - let file_changes = store_files(files, blobrepo.clone()); + let file_changes = store_files(ctx.clone(), files, blobrepo.clone()); let date: Vec<_> = commit_metadata .get("author_date") .unwrap() diff --git a/tests/utils/src/lib.rs b/tests/utils/src/lib.rs index 9f3eb81e0a..5750217ffe 100644 --- a/tests/utils/src/lib.rs +++ b/tests/utils/src/lib.rs @@ -24,6 +24,7 @@ use mononoke_types::{BonsaiChangesetMut, ChangesetId, DateTime, FileChange, File use std::collections::BTreeMap; pub fn store_files( + ctx: CoreContext, files: BTreeMap<&str, Option<&str>>, repo: BlobRepo, ) -> BTreeMap> { @@ -35,7 +36,7 @@ pub fn store_files( Some(content) => { let size = content.len(); let content = FileContents::Bytes(Bytes::from(content)); - let content_id = repo.unittest_store(content).wait().unwrap(); + let content_id = repo.unittest_store(ctx.clone(), content).wait().unwrap(); let file_change = FileChange::new(content_id, FileType::Regular, size as u64, None); res.insert(path, Some(file_change)); @@ -49,6 +50,7 @@ pub fn store_files( } pub fn store_rename( + ctx: CoreContext, copy_src: (MPath, ChangesetId), path: &str, content: &str, @@ -57,7 +59,7 @@ pub fn store_rename( let path = MPath::new(path).unwrap(); let size = content.len(); let content = FileContents::Bytes(Bytes::from(content)); - let content_id = repo.unittest_store(content).wait().unwrap(); + let content_id = repo.unittest_store(ctx, content).wait().unwrap(); let file_change = FileChange::new(content_id, FileType::Regular, size as u64, Some(copy_src)); (path, Some(file_change))