mononoke: pass CoreContext down to blobstore

Reviewed By: jsgf

Differential Revision: D13324220

fbshipit-source-id: af7a2a650338ea66e504ea0acae1d103af10f8e3
This commit is contained in:
Lukas Piatkowski 2018-12-04 11:28:47 -08:00 committed by Facebook Github Bot
parent ba5db40d9a
commit 5d9a151d85
67 changed files with 2189 additions and 1110 deletions

View File

@ -13,6 +13,7 @@ use chrono::{DateTime, FixedOffset};
use failure::{err_msg, Error};
use blobrepo::HgBlobChangeset;
use context::CoreContext;
use futures::prelude::*;
use futures_ext::{spawn_future, BoxFuture, FutureExt};
use mercurial_types::{Changeset as HgChangeset, Entry as HgEntry, Type};
@ -77,7 +78,10 @@ pub struct EntryWithSizeAndContentHash {
}
impl EntryWithSizeAndContentHash {
pub fn materialize_future(entry: Box<HgEntry + Sync>) -> BoxFuture<Self, Error> {
pub fn materialize_future(
ctx: CoreContext,
entry: Box<HgEntry + Sync>,
) -> BoxFuture<Self, Error> {
let name = try_boxfuture!(
entry
.get_name()
@ -89,7 +93,7 @@ impl EntryWithSizeAndContentHash {
let ttype = entry.get_type().into();
let hash = entry.get_hash().to_string();
spawn_future(entry.get_content().and_then(move |content| {
spawn_future(entry.get_content(ctx).and_then(move |content| {
let size = match &content {
Content::File(contents)
| Content::Executable(contents)

View File

@ -89,6 +89,7 @@ impl MononokeRepo {
fn get_raw_file(
&self,
ctx: CoreContext,
changeset: String,
path: String,
) -> BoxFuture<MononokeRepoResponse, ErrorKind> {
@ -101,7 +102,7 @@ impl MononokeRepo {
let changesetid = try_boxfuture!(FS::get_changeset_id(changeset));
let repo = self.repo.clone();
api::get_content_by_path(repo, changesetid, Some(mpath))
api::get_content_by_path(ctx, repo, changesetid, Some(mpath))
.and_then(move |content| match content {
Content::File(content)
| Content::Executable(content)
@ -175,11 +176,15 @@ impl MononokeRepo {
.boxify()
}
fn get_blob_content(&self, hash: String) -> BoxFuture<MononokeRepoResponse, ErrorKind> {
fn get_blob_content(
&self,
ctx: CoreContext,
hash: String,
) -> BoxFuture<MononokeRepoResponse, ErrorKind> {
let blobhash = try_boxfuture!(FS::get_nodehash(&hash));
self.repo
.get_file_content(&blobhash)
.get_file_content(ctx, &blobhash)
.and_then(move |content| match content {
FileContents::Bytes(content) => {
Ok(MononokeRepoResponse::GetBlobContent { content })
@ -191,6 +196,7 @@ impl MononokeRepo {
fn list_directory(
&self,
ctx: CoreContext,
changeset: String,
path: String,
) -> BoxFuture<MononokeRepoResponse, ErrorKind> {
@ -202,7 +208,7 @@ impl MononokeRepo {
let changesetid = try_boxfuture!(FS::get_changeset_id(changeset));
let repo = self.repo.clone();
api::get_content_by_path(repo, changesetid, mpath)
api::get_content_by_path(ctx, repo, changesetid, mpath)
.and_then(move |content| match content {
Content::Tree(tree) => Ok(tree),
_ => Err(ErrorKind::InvalidInput(path.to_string(), None).into()),
@ -218,16 +224,19 @@ impl MononokeRepo {
.boxify()
}
fn get_tree(&self, hash: String) -> BoxFuture<MononokeRepoResponse, ErrorKind> {
fn get_tree(
&self,
ctx: CoreContext,
hash: String,
) -> BoxFuture<MononokeRepoResponse, ErrorKind> {
let treehash = try_boxfuture!(FS::get_nodehash(&hash));
let treemanifestid = HgManifestId::new(treehash);
self.repo
.get_manifest_by_nodeid(&treemanifestid)
.map(|tree| {
join_all(
tree.list()
.map(|entry| EntryWithSizeAndContentHash::materialize_future(entry)),
)
.get_manifest_by_nodeid(ctx.clone(), &treemanifestid)
.map(move |tree| {
join_all(tree.list().map(move |entry| {
EntryWithSizeAndContentHash::materialize_future(ctx.clone(), entry)
}))
})
.flatten()
.map(|files| MononokeRepoResponse::GetTree { files })
@ -235,22 +244,30 @@ impl MononokeRepo {
.boxify()
}
fn get_changeset(&self, hash: String) -> BoxFuture<MononokeRepoResponse, ErrorKind> {
fn get_changeset(
&self,
ctx: CoreContext,
hash: String,
) -> BoxFuture<MononokeRepoResponse, ErrorKind> {
let changesetid = try_boxfuture!(FS::get_changeset_id(hash));
self.repo
.get_changeset_by_changesetid(&changesetid)
.get_changeset_by_changesetid(ctx, &changesetid)
.and_then(|changeset| changeset.try_into().map_err(From::from))
.map(|changeset| MononokeRepoResponse::GetChangeset { changeset })
.from_err()
.boxify()
}
fn download_large_file(&self, oid: String) -> BoxFuture<MononokeRepoResponse, ErrorKind> {
fn download_large_file(
&self,
ctx: CoreContext,
oid: String,
) -> BoxFuture<MononokeRepoResponse, ErrorKind> {
let sha256_oid = try_boxfuture!(FS::get_sha256_oid(oid));
self.repo
.get_file_content_by_alias(sha256_oid)
.get_file_content_by_alias(ctx, sha256_oid)
.and_then(move |content| match content {
FileContents::Bytes(content) => {
Ok(MononokeRepoResponse::DownloadLargeFile { content })
@ -262,6 +279,7 @@ impl MononokeRepo {
fn upload_large_file(
&self,
ctx: CoreContext,
oid: String,
body: Bytes,
) -> BoxFuture<MononokeRepoResponse, ErrorKind> {
@ -278,7 +296,7 @@ impl MononokeRepo {
}
self.repo
.upload_file_content_by_alias(sha256_oid, body)
.upload_file_content_by_alias(ctx, sha256_oid, body)
.and_then(|_| Ok(MononokeRepoResponse::UploadLargeFile {}))
.from_err()
.boxify()
@ -312,23 +330,23 @@ impl MononokeRepo {
use MononokeRepoQuery::*;
match msg {
GetRawFile { changeset, path } => self.get_raw_file(changeset, path),
GetBlobContent { hash } => self.get_blob_content(hash),
ListDirectory { changeset, path } => self.list_directory(changeset, path),
GetTree { hash } => self.get_tree(hash),
GetChangeset { hash } => self.get_changeset(hash),
GetRawFile { changeset, path } => self.get_raw_file(ctx, changeset, path),
GetBlobContent { hash } => self.get_blob_content(ctx, hash),
ListDirectory { changeset, path } => self.list_directory(ctx, changeset, path),
GetTree { hash } => self.get_tree(ctx, hash),
GetChangeset { hash } => self.get_changeset(ctx, hash),
IsAncestor {
proposed_ancestor,
proposed_descendent,
} => self.is_ancestor(ctx, proposed_ancestor, proposed_descendent),
DownloadLargeFile { oid } => self.download_large_file(oid),
DownloadLargeFile { oid } => self.download_large_file(ctx, oid),
LfsBatch {
repo_name,
req,
lfs_url,
} => self.lfs_batch(repo_name, req, lfs_url),
UploadLargeFile { oid, body } => self.upload_large_file(oid, body),
UploadLargeFile { oid, body } => self.upload_large_file(ctx, oid, body),
}
}
}

View File

@ -13,6 +13,7 @@ use futures::future::{join_all, Future};
use futures_ext::FutureExt;
use bonsai_utils;
use context::CoreContext;
use mercurial_types::{Changeset, HgFileNodeId, HgManifestId, HgNodeHash, MPath};
use mononoke_types::{BlobstoreValue, BonsaiChangeset, BonsaiChangesetMut, ChangesetId, FileChange,
MononokeId};
@ -24,12 +25,14 @@ use errors::*;
/// Creates bonsai changeset from already created HgBlobChangeset.
pub fn create_bonsai_changeset_object(
ctx: CoreContext,
cs: HgBlobChangeset,
parent_manifests: Vec<HgManifestId>,
bonsai_parents: Vec<ChangesetId>,
repo: BlobRepo,
) -> impl Future<Item = BonsaiChangeset, Error = Error> {
let file_changes = find_file_changes(
ctx,
cs.clone(),
parent_manifests,
repo.clone(),
@ -67,6 +70,7 @@ pub fn create_bonsai_changeset_object(
}
pub fn save_bonsai_changeset_object(
ctx: CoreContext,
blobstore: RepoBlobstore,
bonsai_cs: BonsaiChangeset,
) -> impl Future<Item = (), Error = Error> {
@ -74,11 +78,14 @@ pub fn save_bonsai_changeset_object(
let bcs_id = bonsai_blob.id().clone();
let blobstore_key = bcs_id.blobstore_key();
blobstore.put(blobstore_key, bonsai_blob.into()).map(|_| ())
blobstore
.put(ctx, blobstore_key, bonsai_blob.into())
.map(|_| ())
}
// Finds files that were changed in the commit and returns it in the format suitable for BonsaiChangeset
fn find_file_changes(
ctx: CoreContext,
cs: HgBlobChangeset,
parent_manifests: Vec<HgManifestId>,
repo: BlobRepo,
@ -93,17 +100,18 @@ fn find_file_changes(
.get(1)
.map(|root_mf| repo.get_root_entry(root_mf));
bonsai_utils::bonsai_diff(root_entry, p1_root_entry, p2_root_entry)
bonsai_utils::bonsai_diff(ctx.clone(), root_entry, p1_root_entry, p2_root_entry)
.map(move |changed_file| match changed_file {
bonsai_utils::BonsaiDiffResult::Changed(path, ty, entry_id) => {
let file_node_id = entry_id.into_nodehash();
cloned!(bonsai_parents, repo, parent_manifests);
repo.get_file_content(&file_node_id)
cloned!(ctx, bonsai_parents, repo, parent_manifests);
repo.get_file_content(ctx.clone(), &file_node_id)
.and_then(move |file_contents| {
let size = file_contents.size();
let content_id = file_contents.into_blob().id().clone();
get_copy_info(
ctx,
repo,
bonsai_parents,
path.clone(),
@ -122,8 +130,8 @@ fn find_file_changes(
}
bonsai_utils::BonsaiDiffResult::ChangedReusedId(path, ty, entry_id) => {
let file_node_id = entry_id.into_nodehash();
cloned!(repo);
repo.get_file_content(&file_node_id).and_then(move |file_contents| {
cloned!(ctx, repo);
repo.get_file_content(ctx, &file_node_id).and_then(move |file_contents| {
let size = file_contents.size();
let content_id = file_contents.into_blob().id().clone();
@ -150,61 +158,64 @@ fn find_file_changes(
// In hg copy information is (path, filenode), in bonsai it's (path, parent cs id). That means that
// we need to find a parent from which this filenode was copied.
fn get_copy_info(
ctx: CoreContext,
repo: BlobRepo,
bonsai_parents: Vec<ChangesetId>,
copy_from_path: MPath,
nodehash: HgNodeHash,
parent_manifests: Vec<HgManifestId>,
) -> impl Future<Item = Option<(MPath, ChangesetId)>, Error = Error> {
repo.get_hg_file_copy_from_blobstore(&nodehash).and_then({
cloned!(repo);
move |maybecopy| match maybecopy {
Some((repopath, copyfromnode)) => {
let repopath: Result<MPath> = repopath
.mpath()
.cloned()
.ok_or(ErrorKind::UnexpectedRootPath.into());
repo.get_hg_file_copy_from_blobstore(ctx.clone(), &nodehash)
.and_then({
cloned!(repo);
move |maybecopy| match maybecopy {
Some((repopath, copyfromnode)) => {
let repopath: Result<MPath> = repopath
.mpath()
.cloned()
.ok_or(ErrorKind::UnexpectedRootPath.into());
let parents_bonsai_and_mfs =
bonsai_parents.into_iter().zip(parent_manifests.into_iter());
let parents_bonsai_and_mfs =
bonsai_parents.into_iter().zip(parent_manifests.into_iter());
repopath
.into_future()
.and_then(move |repopath| {
join_all(parents_bonsai_and_mfs.map({
cloned!(repopath);
move |(bonsai_parent, parent_mf)| {
repo.find_file_in_manifest(&repopath, parent_mf).map(
move |res| match res {
Some((_, node)) if node == HgFileNodeId::new(copyfromnode) => {
Some(bonsai_parent)
}
_ => None,
},
)
repopath
.into_future()
.and_then(move |repopath| {
join_all(parents_bonsai_and_mfs.map({
cloned!(ctx, repopath);
move |(bonsai_parent, parent_mf)| {
repo.find_file_in_manifest(ctx.clone(), &repopath, parent_mf)
.map(move |res| match res {
Some((_, node))
if node == HgFileNodeId::new(copyfromnode) =>
{
Some(bonsai_parent)
}
_ => None,
})
}
})).map(move |res| (res, repopath))
})
.and_then(move |(copied_from_bonsai_commits, repopath)| {
let copied_from: Vec<_> = copied_from_bonsai_commits
.into_iter()
.filter_map(|x| x)
.collect();
match copied_from.get(0) {
Some(bonsai_cs_copied_from) => {
Ok(Some((repopath, bonsai_cs_copied_from.clone())))
}
None => Err(ErrorKind::IncorrectCopyInfo {
from_path: copy_from_path,
from_node: nodehash,
to_path: repopath.clone(),
to_node: copyfromnode,
}.into()),
}
})).map(move |res| (res, repopath))
})
.and_then(move |(copied_from_bonsai_commits, repopath)| {
let copied_from: Vec<_> = copied_from_bonsai_commits
.into_iter()
.filter_map(|x| x)
.collect();
match copied_from.get(0) {
Some(bonsai_cs_copied_from) => {
Ok(Some((repopath, bonsai_cs_copied_from.clone())))
}
None => Err(ErrorKind::IncorrectCopyInfo {
from_path: copy_from_path,
from_node: nodehash,
to_path: repopath.clone(),
to_node: copyfromnode,
}.into()),
}
})
.boxify()
})
.boxify()
}
None => Ok(None).into_future().boxify(),
}
None => Ok(None).into_future().boxify(),
}
})
})
}

View File

@ -13,6 +13,7 @@ use futures::future::{Either, Future, IntoFuture};
use blobstore::Blobstore;
use context::CoreContext;
use mercurial;
use mercurial::changeset::Extra;
use mercurial::revlogrepo::RevlogChangeset;
@ -142,6 +143,7 @@ impl HgBlobChangeset {
}
pub fn load(
ctx: CoreContext,
blobstore: &RepoBlobstore,
changesetid: &HgChangesetId,
) -> impl Future<Item = Option<Self>, Error = Error> + Send + 'static {
@ -157,7 +159,7 @@ impl HgBlobChangeset {
let key = changesetid.blobstore_key();
let fut = blobstore
.get(key.clone())
.get(ctx, key.clone())
.and_then(move |got| match got {
None => Ok(None),
Some(bytes) => {
@ -185,6 +187,7 @@ impl HgBlobChangeset {
pub fn save(
&self,
ctx: CoreContext,
blobstore: RepoBlobstore,
) -> impl Future<Item = (), Error = Error> + Send + 'static {
let key = self.changesetid.blobstore_key();
@ -207,7 +210,7 @@ impl HgBlobChangeset {
Ok(envelope.into_blob())
})
.into_future()
.and_then(move |blob| blobstore.put(key, blob.into()))
.and_then(move |blob| blobstore.put(ctx, key, blob.into()))
}
#[inline]

View File

@ -179,7 +179,7 @@ impl CachingChangesetFetcher {
format!("changesetscache_{}", bucket * self.cache_bucket_size)
}
fn fill_cache(&self, gen_num: u64) -> impl Future<Item = (), Error = Error> {
fn fill_cache(&self, ctx: CoreContext, gen_num: u64) -> impl Future<Item = (), Error = Error> {
let blobstore_cache_key = self.get_blobstore_cache_key(gen_num);
if !self.already_fetched_blobs
.lock()
@ -188,7 +188,7 @@ impl CachingChangesetFetcher {
{
cloned!(self.fetches_from_blobstore);
self.blobstore
.get(blobstore_cache_key.clone())
.get(ctx, blobstore_cache_key.clone())
.map({
let cs_fetcher = self.clone();
move |val| {
@ -228,15 +228,21 @@ impl CachingChangesetFetcher {
cloned!(self.repo_id, self.max_request_latency);
self.cache_requests.fetch_add(1, Ordering::Relaxed);
cachelib::get_cached_or_fill(&self.cache_pool, cache_key, move || {
self.cache_misses.fetch_add(1, Ordering::Relaxed);
self.changesets.get(ctx, repo_id, cs_id)
cachelib::get_cached_or_fill(&self.cache_pool, cache_key, {
cloned!(ctx);
move || {
self.cache_misses.fetch_add(1, Ordering::Relaxed);
self.changesets.get(ctx.clone(), repo_id, cs_id)
}
}).and_then(move |maybe_cs| maybe_cs.ok_or_else(|| err_msg(format!("{} not found", cs_id))))
.and_then({
let cs_fetcher = self.clone();
move |cs| {
if cs_fetcher.too_many_cache_misses() {
cs_fetcher.fill_cache(cs.gen).map(|()| cs).left_future()
cs_fetcher
.fill_cache(ctx, cs.gen)
.map(|()| cs)
.left_future()
} else {
future::ok(cs).right_future()
}
@ -393,13 +399,18 @@ mod tests {
}
impl Blobstore for TestBlobstore {
fn get(&self, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error> {
fn get(&self, _ctx: CoreContext, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error> {
let blobstore = self.blobstore.lock().unwrap();
self.get_counter.fetch_add(1, Ordering::Relaxed);
Ok(blobstore.get(&key).cloned()).into_future().boxify()
}
fn put(&self, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> {
fn put(
&self,
_ctx: CoreContext,
key: String,
value: BlobstoreBytes,
) -> BoxFuture<(), Error> {
let mut blobstore = self.blobstore.lock().unwrap();
blobstore.insert(key, value);
Ok(()).into_future().boxify()
@ -552,6 +563,7 @@ mod tests {
// Blob cache entries with gen number 0 up to 4
blobstore.put(
ctx.clone(),
"changesetscache_4".to_string(),
BlobstoreBytes::from_bytes(serialize_cs_entries(vec![
cs.get(ctx.clone(), REPO_ZERO, FIVES_CSID)

View File

@ -20,6 +20,7 @@ use mercurial_types::nodehash::HgEntryId;
use mononoke_types::{ContentId, FileContents, MononokeId, hash::Sha256};
use blobstore::Blobstore;
use context::CoreContext;
use errors::*;
@ -44,15 +45,16 @@ impl PartialEq for HgBlobEntry {
impl Eq for HgBlobEntry {}
pub fn fetch_raw_filenode_bytes(
ctx: CoreContext,
blobstore: &RepoBlobstore,
node_id: HgNodeHash,
) -> BoxFuture<HgBlob, Error> {
fetch_file_envelope(blobstore, node_id)
fetch_file_envelope(ctx.clone(), blobstore, node_id)
.and_then({
let blobstore = blobstore.clone();
move |envelope| {
let envelope = envelope.into_mut();
let file_contents_fut = fetch_file_contents(&blobstore, envelope.content_id);
let file_contents_fut = fetch_file_contents(ctx, &blobstore, envelope.content_id);
let mut metadata = envelope.metadata;
if metadata.is_empty() {
@ -76,47 +78,52 @@ pub fn fetch_raw_filenode_bytes(
}
pub fn fetch_file_content_from_blobstore(
ctx: CoreContext,
blobstore: &RepoBlobstore,
node_id: HgNodeHash,
) -> impl Future<Item = FileContents, Error = Error> {
fetch_file_envelope(blobstore, node_id).and_then({
fetch_file_envelope(ctx.clone(), blobstore, node_id).and_then({
let blobstore = blobstore.clone();
move |envelope| {
let content_id = envelope.content_id();
fetch_file_contents(&blobstore, content_id.clone())
fetch_file_contents(ctx, &blobstore, content_id.clone())
}
})
}
pub fn fetch_file_size_from_blobstore(
ctx: CoreContext,
blobstore: &RepoBlobstore,
node_id: HgFileNodeId,
) -> impl Future<Item = u64, Error = Error> {
fetch_file_envelope(blobstore, node_id.into_nodehash())
fetch_file_envelope(ctx, blobstore, node_id.into_nodehash())
.map({ |envelope| envelope.content_size() })
}
pub fn fetch_file_content_id_from_blobstore(
ctx: CoreContext,
blobstore: &RepoBlobstore,
node_id: HgFileNodeId,
) -> impl Future<Item = ContentId, Error = Error> {
fetch_file_envelope(blobstore, node_id.into_nodehash())
fetch_file_envelope(ctx, blobstore, node_id.into_nodehash())
.map({ |envelope| *envelope.content_id() })
}
pub fn fetch_file_content_sha256_from_blobstore(
ctx: CoreContext,
blobstore: &RepoBlobstore,
content_id: ContentId,
) -> impl Future<Item = Sha256, Error = Error> {
fetch_file_contents(blobstore, content_id)
fetch_file_contents(ctx, blobstore, content_id)
.map(|file_content| get_sha256(&file_content.into_bytes()))
}
pub fn fetch_rename_from_blobstore(
ctx: CoreContext,
blobstore: &RepoBlobstore,
node_id: HgNodeHash,
) -> impl Future<Item = Option<(MPath, HgNodeHash)>, Error = Error> {
fetch_file_envelope(blobstore, node_id).and_then(|envelope| {
fetch_file_envelope(ctx, blobstore, node_id).and_then(|envelope| {
let envelope = envelope.into_mut();
// This is a bit of a hack because metadata is not the complete file. However, it's
@ -130,10 +137,11 @@ pub fn fetch_rename_from_blobstore(
}
pub fn fetch_file_envelope(
ctx: CoreContext,
blobstore: &RepoBlobstore,
node_id: HgNodeHash,
) -> impl Future<Item = HgFileEnvelope, Error = Error> {
fetch_file_envelope_opt(blobstore, node_id)
fetch_file_envelope_opt(ctx, blobstore, node_id)
.and_then(move |envelope| {
let envelope = envelope.ok_or(ErrorKind::HgContentMissing(
node_id,
@ -145,12 +153,13 @@ pub fn fetch_file_envelope(
}
pub fn fetch_file_envelope_opt(
ctx: CoreContext,
blobstore: &RepoBlobstore,
node_id: HgNodeHash,
) -> impl Future<Item = Option<HgFileEnvelope>, Error = Error> {
let blobstore_key = HgFileNodeId::new(node_id).blobstore_key();
blobstore
.get(blobstore_key.clone())
.get(ctx, blobstore_key.clone())
.context("While fetching manifest envelope blob")
.map_err(Error::from)
.and_then(move |bytes| {
@ -173,12 +182,13 @@ pub fn fetch_file_envelope_opt(
}
pub fn fetch_file_contents(
ctx: CoreContext,
blobstore: &RepoBlobstore,
content_id: ContentId,
) -> impl Future<Item = FileContents, Error = Error> {
let blobstore_key = content_id.blobstore_key();
blobstore
.get(blobstore_key.clone())
.get(ctx, blobstore_key.clone())
.context("While fetching content blob")
.map_err(Error::from)
.and_then(move |bytes| {
@ -212,10 +222,12 @@ impl HgBlobEntry {
}
}
fn get_raw_content_inner(&self) -> BoxFuture<HgBlob, Error> {
fn get_raw_content_inner(&self, ctx: CoreContext) -> BoxFuture<HgBlob, Error> {
match self.ty {
Type::Tree => fetch_raw_manifest_bytes(&self.blobstore, self.id.into_nodehash()),
Type::File(_) => fetch_raw_filenode_bytes(&self.blobstore, self.id.into_nodehash()),
Type::Tree => fetch_raw_manifest_bytes(ctx, &self.blobstore, self.id.into_nodehash()),
Type::File(_) => {
fetch_raw_filenode_bytes(ctx, &self.blobstore, self.id.into_nodehash())
}
}
}
}
@ -225,32 +237,36 @@ impl Entry for HgBlobEntry {
self.ty
}
fn get_parents(&self) -> BoxFuture<HgParents, Error> {
fn get_parents(&self, ctx: CoreContext) -> BoxFuture<HgParents, Error> {
match self.ty {
Type::Tree => fetch_manifest_envelope(&self.blobstore, self.id.into_nodehash())
.map(move |envelope| {
let (p1, p2) = envelope.parents();
HgParents::new(p1, p2)
})
.boxify(),
Type::File(_) => fetch_file_envelope(&self.blobstore, self.id.into_nodehash())
.map(move |envelope| {
let (p1, p2) = envelope.parents();
HgParents::new(p1, p2)
})
.boxify(),
Type::Tree => {
fetch_manifest_envelope(ctx.clone(), &self.blobstore, self.id.into_nodehash())
.map(move |envelope| {
let (p1, p2) = envelope.parents();
HgParents::new(p1, p2)
})
.boxify()
}
Type::File(_) => {
fetch_file_envelope(ctx.clone(), &self.blobstore, self.id.into_nodehash())
.map(move |envelope| {
let (p1, p2) = envelope.parents();
HgParents::new(p1, p2)
})
.boxify()
}
}
}
fn get_raw_content(&self) -> BoxFuture<HgBlob, Error> {
self.get_raw_content_inner()
fn get_raw_content(&self, ctx: CoreContext) -> BoxFuture<HgBlob, Error> {
self.get_raw_content_inner(ctx)
}
fn get_content(&self) -> BoxFuture<Content, Error> {
fn get_content(&self, ctx: CoreContext) -> BoxFuture<Content, Error> {
let blobstore = self.blobstore.clone();
match self.ty {
Type::Tree => {
BlobManifest::load(&blobstore, &HgManifestId::new(self.id.into_nodehash()))
BlobManifest::load(ctx, &blobstore, &HgManifestId::new(self.id.into_nodehash()))
.and_then({
let node_id = self.id.into_nodehash();
move |blob_manifest| {
@ -266,10 +282,11 @@ impl Entry for HgBlobEntry {
.from_err()
.boxify()
}
Type::File(ft) => fetch_file_envelope(&blobstore, self.id.into_nodehash())
Type::File(ft) => fetch_file_envelope(ctx.clone(), &blobstore, self.id.into_nodehash())
.and_then(move |envelope| {
let envelope = envelope.into_mut();
let file_contents_fut = fetch_file_contents(&blobstore, envelope.content_id);
let file_contents_fut =
fetch_file_contents(ctx, &blobstore, envelope.content_id);
file_contents_fut.map(move |contents| match ft {
FileType::Regular => Content::File(contents),
FileType::Executable => Content::Executable(contents),
@ -286,12 +303,14 @@ impl Entry for HgBlobEntry {
}
// XXX get_size should probably return a u64, not a usize
fn get_size(&self) -> BoxFuture<Option<usize>, Error> {
fn get_size(&self, ctx: CoreContext) -> BoxFuture<Option<usize>, Error> {
match self.ty {
Type::Tree => future::ok(None).boxify(),
Type::File(_) => fetch_file_envelope(&self.blobstore, self.id.into_nodehash())
.map(|envelope| Some(envelope.content_size() as usize))
.boxify(),
Type::File(_) => {
fetch_file_envelope(ctx.clone(), &self.blobstore, self.id.into_nodehash())
.map(|envelope| Some(envelope.content_size() as usize))
.boxify()
}
}
}

View File

@ -13,6 +13,7 @@ use failure::{Error, FutureFailureErrorExt, Result, ResultExt};
use futures::future::{Future, IntoFuture};
use futures_ext::{BoxFuture, FutureExt};
use context::CoreContext;
use mercurial_types::{Entry, FileType, HgBlob, HgManifestEnvelope, MPathElement, Manifest, Type};
use mercurial_types::nodehash::{HgEntryId, HgManifestId, HgNodeHash, NULL_HASH};
@ -84,10 +85,11 @@ impl ManifestContent {
}
pub fn fetch_raw_manifest_bytes(
ctx: CoreContext,
blobstore: &RepoBlobstore,
node_id: HgNodeHash,
) -> BoxFuture<HgBlob, Error> {
fetch_manifest_envelope(blobstore, node_id)
fetch_manifest_envelope(ctx, blobstore, node_id)
.map(move |envelope| {
let envelope = envelope.into_mut();
HgBlob::from(envelope.contents)
@ -97,10 +99,11 @@ pub fn fetch_raw_manifest_bytes(
}
pub fn fetch_manifest_envelope(
ctx: CoreContext,
blobstore: &RepoBlobstore,
node_id: HgNodeHash,
) -> impl Future<Item = HgManifestEnvelope, Error = Error> {
fetch_manifest_envelope_opt(blobstore, node_id)
fetch_manifest_envelope_opt(ctx, blobstore, node_id)
.and_then(move |envelope| {
let envelope = envelope.ok_or(ErrorKind::HgContentMissing(node_id, Type::Tree))?;
Ok(envelope)
@ -110,12 +113,13 @@ pub fn fetch_manifest_envelope(
/// Like `fetch_manifest_envelope`, but returns None if the manifest wasn't found.
pub fn fetch_manifest_envelope_opt(
ctx: CoreContext,
blobstore: &RepoBlobstore,
node_id: HgNodeHash,
) -> impl Future<Item = Option<HgManifestEnvelope>, Error = Error> {
let blobstore_key = HgManifestId::new(node_id).blobstore_key();
blobstore
.get(blobstore_key.clone())
.get(ctx, blobstore_key.clone())
.context("While fetching manifest envelope blob")
.map_err(Error::from)
.and_then(move |bytes| {
@ -149,6 +153,7 @@ pub struct BlobManifest {
impl BlobManifest {
pub fn load(
ctx: CoreContext,
blobstore: &RepoBlobstore,
manifestid: &HgManifestId,
) -> BoxFuture<Option<Self>, Error> {
@ -164,7 +169,7 @@ impl BlobManifest {
})).into_future()
.boxify()
} else {
fetch_manifest_envelope_opt(&blobstore, manifestid.into_nodehash())
fetch_manifest_envelope_opt(ctx, &blobstore, manifestid.into_nodehash())
.and_then({
let blobstore = blobstore.clone();
move |envelope| match envelope {

View File

@ -18,6 +18,7 @@ use futures_ext::{BoxFuture, FutureExt};
use slog::Logger;
use context::CoreContext;
use mercurial_types::{Entry, HgFileNodeId, HgManifestId, HgNodeHash, MPath, MPathElement,
Manifest, RepoPath, Type};
use mercurial_types::manifest::Content;
@ -95,6 +96,7 @@ impl MemoryManifestEntry {
/// True iff this entry is a tree with no children
pub fn is_empty(
&self,
ctx: CoreContext,
blobstore: &RepoBlobstore,
) -> impl Future<Item = bool, Error = Error> + Send {
match self {
@ -110,14 +112,14 @@ impl MemoryManifestEntry {
if changes_are_empty {
Either::B(future::ok(base_manifest_id.is_none()))
} else {
let is_empty_rec = self.get_new_children(blobstore)
let is_empty_rec = self.get_new_children(ctx.clone(), blobstore)
.and_then({
let blobstore = blobstore.clone();
cloned!(ctx, blobstore);
move |children| {
future::join_all(
children
.into_iter()
.map(move |(_, child)| child.is_empty(&blobstore)),
.map(move |(_, child)| child.is_empty(ctx.clone(), &blobstore)),
)
}
})
@ -171,6 +173,7 @@ impl MemoryManifestEntry {
/// Save all manifests represented here to the blobstore
pub fn save(
&self,
ctx: CoreContext,
blobstore: &RepoBlobstore,
logger: &Logger,
incomplete_filenodes: &IncompleteFilenodes,
@ -192,21 +195,25 @@ impl MemoryManifestEntry {
let p1 = *p1;
let p2 = *p2;
if self.is_modified() {
self.get_new_children(blobstore)
self.get_new_children(ctx.clone(), blobstore)
.and_then({
cloned!(logger, blobstore, incomplete_filenodes);
move |new_children| {
// First save only the non-empty children
let entries = stream::iter_ok(new_children.into_iter())
.and_then({
cloned!(blobstore);
cloned!(ctx, blobstore);
move |(path_elem, entry)| {
(entry.is_empty(&blobstore), Ok(path_elem), Ok(entry))
(
entry.is_empty(ctx.clone(), &blobstore),
Ok(path_elem),
Ok(entry),
)
}
})
.filter(|(empty, ..)| !empty)
.and_then({
cloned!(logger, blobstore, path, incomplete_filenodes);
cloned!(ctx, logger, blobstore, path, incomplete_filenodes);
move |(_, path_elem, entry)| {
let path_elem = path_elem.clone();
// This is safe, because we only save trees
@ -214,6 +221,7 @@ impl MemoryManifestEntry {
extend_repopath_with_dir(&path, &path_elem);
entry
.save(
ctx.clone(),
&blobstore,
&logger,
&incomplete_filenodes,
@ -247,7 +255,7 @@ impl MemoryManifestEntry {
path,
};
upload_manifest
.upload_to_blobstore(&blobstore, &logger)
.upload_to_blobstore(ctx, &blobstore, &logger)
.map(|(_hash, future)| future)
.into_future()
.flatten()
@ -319,6 +327,7 @@ impl MemoryManifestEntry {
// The list of this node's children, or empty if it's not a tree with children.
fn get_new_children(
&self,
ctx: CoreContext,
blobstore: &RepoBlobstore,
) -> impl Future<Item = BTreeMap<MPathElement, Self>, Error = Error> + Send {
match self {
@ -328,13 +337,13 @@ impl MemoryManifestEntry {
..
} => match base_manifest_id {
Some(manifest_id) => Either::B(
BlobManifest::load(blobstore, &HgManifestId::new(*manifest_id))
BlobManifest::load(ctx.clone(), blobstore, &HgManifestId::new(*manifest_id))
.and_then({
let manifest_id = HgManifestId::new(*manifest_id);
move |m| m.ok_or(ErrorKind::ManifestMissing(manifest_id).into())
})
.and_then({
let blobstore = blobstore.clone();
cloned!(blobstore);
move |m| {
let mut children = BTreeMap::new();
for entry in m.list() {
@ -383,6 +392,7 @@ impl MemoryManifestEntry {
}
fn merge_trees(
ctx: CoreContext,
mut children: BTreeMap<MPathElement, MemoryManifestEntry>,
other_children: BTreeMap<MPathElement, MemoryManifestEntry>,
blobstore: RepoBlobstore,
@ -408,6 +418,7 @@ impl MemoryManifestEntry {
conflicts.push(
conflict_entry
.merge_with_conflicts(
ctx.clone(),
other_entry,
blobstore.clone(),
logger.clone(),
@ -442,6 +453,7 @@ impl MemoryManifestEntry {
/// structure in strict order, so that first entry is p1, second is p2 etc.
pub fn merge_with_conflicts(
self,
ctx: CoreContext,
other: Self,
blobstore: RepoBlobstore,
logger: Logger,
@ -451,6 +463,7 @@ impl MemoryManifestEntry {
use self::MemoryManifestEntry::*;
if self.is_modified() {
return self.save(
ctx.clone(),
&blobstore,
&logger,
&incomplete_filenodes,
@ -458,6 +471,7 @@ impl MemoryManifestEntry {
).map(|entry| Self::convert_treenode(&entry.get_hash().into_nodehash()))
.and_then(move |saved| {
saved.merge_with_conflicts(
ctx,
other,
blobstore,
logger,
@ -470,6 +484,7 @@ impl MemoryManifestEntry {
if other.is_modified() {
return other
.save(
ctx.clone(),
&blobstore,
&logger,
&incomplete_filenodes,
@ -478,6 +493,7 @@ impl MemoryManifestEntry {
.map(|entry| Self::convert_treenode(&entry.get_hash().into_nodehash()))
.and_then(move |saved| {
self.merge_with_conflicts(
ctx,
saved,
blobstore,
logger,
@ -545,13 +561,14 @@ impl MemoryManifestEntry {
future::ok(self.clone()).boxify()
} else {
// Otherwise, merge on an entry-by-entry basis
self.get_new_children(&blobstore)
.join(other.get_new_children(&blobstore))
self.get_new_children(ctx.clone(), &blobstore)
.join(other.get_new_children(ctx.clone(), &blobstore))
.and_then({
let p1 = p1.clone();
let p2 = p2.clone();
move |(children, other_children)| {
Self::merge_trees(
ctx,
children,
other_children,
blobstore,
@ -645,6 +662,7 @@ impl MemoryManifestEntry {
/// way through the path)
pub fn find_mut(
&self,
ctx: CoreContext,
mut path: impl Iterator<Item = MPathElement> + Send + 'static,
blobstore: RepoBlobstore,
) -> BoxFuture<Option<Self>, Error> {
@ -673,12 +691,10 @@ impl MemoryManifestEntry {
// Do the lookup in base_manifest_id
if let Some(manifest_id) = base_manifest_id {
let manifest_id = HgManifestId::new(*manifest_id);
BlobManifest::load(&blobstore, &manifest_id)
.and_then(
move |m| {
m.ok_or(ErrorKind::ManifestMissing(manifest_id).into())
}
)
BlobManifest::load(ctx.clone(), &blobstore, &manifest_id)
.and_then(move |m| {
m.ok_or(ErrorKind::ManifestMissing(manifest_id).into())
})
.map({
let entry_changes = entry_changes.clone();
let element = element.clone();
@ -696,9 +712,16 @@ impl MemoryManifestEntry {
} else {
future::ok(()).boxify()
}
}.and_then(move |_| {
let mut changes = entry_changes.lock().expect("lock poisoned");
Self::find_mut_helper(&mut changes, element).find_mut(path, blobstore)
}.and_then({
cloned!(ctx);
move |_| {
let mut changes = entry_changes.lock().expect("lock poisoned");
Self::find_mut_helper(&mut changes, element).find_mut(
ctx,
path,
blobstore,
)
}
})
.boxify()
}
@ -747,36 +770,38 @@ impl MemoryManifestEntry {
/// Resolve conflicts when blobs point to the same data but have different parents
pub fn resolve_trivial_conflicts(
&self,
ctx: CoreContext,
repo: BlobRepo,
incomplete_filenodes: IncompleteFilenodes,
) -> impl Future<Item = (), Error = Error> + Send {
fn merge_content(
ctx: CoreContext,
entries: Vec<HgBlobEntry>,
) -> impl Future<Item = Option<(FileType, FileContents)>, Error = Error> + Send {
if let Some(Type::File(file_type)) = entries.first().map(|e| e.get_type()) {
let fut = future::join_all(entries.into_iter().map(|e| e.get_content())).map(
move |content| {
let mut iter = content.iter();
if let Some(first) = iter.next() {
if iter.all(|other| match (first, other) {
(Content::File(c0), Content::File(c1))
| (Content::Executable(c0), Content::Executable(c1))
| (Content::Symlink(c0), Content::Symlink(c1)) => c0 == c1,
_ => false,
}) {
return match first {
Content::Executable(file_content)
| Content::File(file_content)
| Content::Symlink(file_content) => {
Some((file_type, file_content.clone()))
}
_ => unreachable!(),
};
if let Some(Type::File(file_type)) = entries.first().map(move |e| e.get_type()) {
let fut = future::join_all(
entries.into_iter().map(move |e| e.get_content(ctx.clone())),
).map(move |content| {
let mut iter = content.iter();
if let Some(first) = iter.next() {
if iter.all(|other| match (first, other) {
(Content::File(c0), Content::File(c1))
| (Content::Executable(c0), Content::Executable(c1))
| (Content::Symlink(c0), Content::Symlink(c1)) => c0 == c1,
_ => false,
}) {
return match first {
Content::Executable(file_content)
| Content::File(file_content)
| Content::Symlink(file_content) => {
Some((file_type, file_content.clone()))
}
_ => unreachable!(),
};
};
None
},
);
};
None
});
Either::A(fut)
} else {
Either::B(future::ok(None))
@ -784,6 +809,7 @@ impl MemoryManifestEntry {
}
fn merge_entries(
ctx: CoreContext,
path: Option<MPath>,
entries: Vec<HgBlobEntry>,
repo: BlobRepo,
@ -793,7 +819,7 @@ impl MemoryManifestEntry {
.iter()
.map(|e| e.get_hash().into_nodehash())
.collect::<Vec<_>>();
merge_content(entries).and_then(move |content| {
merge_content(ctx.clone(), entries).and_then(move |content| {
let mut parents = parents.into_iter();
if let Some((file_type, file_content)) = content {
let path = try_boxfuture!(path.ok_or(ErrorKind::EmptyFilePath).into());
@ -809,7 +835,7 @@ impl MemoryManifestEntry {
p2: p2.clone(),
path: path,
};
let (_, upload_future) = try_boxfuture!(upload_entry.upload(&repo));
let (_, upload_future) = try_boxfuture!(upload_entry.upload(ctx, &repo));
upload_future
.map(move |(entry, path)| {
incomplete_filenodes.add(IncompleteFilenodeInfo {
@ -829,6 +855,7 @@ impl MemoryManifestEntry {
}
fn resolve_rec(
ctx: CoreContext,
path: Option<MPath>,
node: MemoryManifestEntry,
repo: BlobRepo,
@ -843,11 +870,16 @@ impl MemoryManifestEntry {
.flat_map(|(k, v)| v.clone().map(|v| (k, v)))
.map(|(name, child)| {
let path = MPath::join_opt(path.as_ref(), name);
resolve_rec(path, child, repo.clone(), incomplete_filenodes.clone())
.map({
let name = name.clone();
move |v| v.map(|v| (name, v))
})
resolve_rec(
ctx.clone(),
path,
child,
repo.clone(),
incomplete_filenodes.clone(),
).map({
let name = name.clone();
move |v| v.map(|v| (name, v))
})
})
.collect::<Vec<_>>()
};
@ -874,7 +906,7 @@ impl MemoryManifestEntry {
})
.collect::<Option<Vec<_>>>();
if let Some(entries) = entries {
merge_entries(path, entries, repo, incomplete_filenodes).boxify()
merge_entries(ctx, path, entries, repo, incomplete_filenodes).boxify()
} else {
future::ok(None).boxify()
}
@ -882,7 +914,7 @@ impl MemoryManifestEntry {
_ => future::ok(None).boxify(),
}
}
resolve_rec(None, self.clone(), repo, incomplete_filenodes).map(|_| ())
resolve_rec(ctx, None, self.clone(), repo, incomplete_filenodes).map(|_| ())
}
}
@ -907,6 +939,7 @@ impl MemoryRootManifest {
}
fn create_conflict(
ctx: CoreContext,
repo: BlobRepo,
incomplete_filenodes: IncompleteFilenodes,
p1_root: MemoryManifestEntry,
@ -914,6 +947,7 @@ impl MemoryRootManifest {
) -> BoxFuture<Self, Error> {
p1_root
.merge_with_conflicts(
ctx,
p2_root,
repo.get_blobstore(),
repo.get_logger(),
@ -926,6 +960,7 @@ impl MemoryRootManifest {
/// Create an in-memory manifest, backed by the given blobstore, and based on mp1 and mp2
pub fn new(
ctx: CoreContext,
repo: BlobRepo,
incomplete_filenodes: IncompleteFilenodes,
mp1: Option<&HgNodeHash>,
@ -943,6 +978,7 @@ impl MemoryRootManifest {
MemoryManifestEntry::convert_treenode(p),
)).boxify(),
(Some(p1), Some(p2)) => Self::create_conflict(
ctx,
repo,
incomplete_filenodes,
MemoryManifestEntry::convert_treenode(p1),
@ -961,9 +997,10 @@ impl MemoryRootManifest {
/// manifest contains dir1/file1 and dir2/file2 and dir2 contains a conflict for file2, dir1
/// can still be saved to the blobstore.
/// Returns the saved manifest ID
pub fn save(&self) -> BoxFuture<HgBlobEntry, Error> {
pub fn save(&self, ctx: CoreContext) -> BoxFuture<HgBlobEntry, Error> {
self.root_entry
.save(
ctx,
&self.repo.get_blobstore(),
&self.repo.get_logger(),
&self.incomplete_filenodes,
@ -974,6 +1011,7 @@ impl MemoryRootManifest {
fn find_path(
&self,
ctx: CoreContext,
path: &MPath,
) -> (
impl Future<Item = MemoryManifestEntry, Error = Error> + Send,
@ -984,7 +1022,7 @@ impl MemoryRootManifest {
None => Either::A(future::ok(self.root_entry.clone())),
Some(filepath) => Either::B(
self.root_entry
.find_mut(filepath.into_iter(), self.repo.get_blobstore())
.find_mut(ctx, filepath.into_iter(), self.repo.get_blobstore())
.and_then({
let path = path.clone();
|entry| entry.ok_or(ErrorKind::PathNotFound(path).into())
@ -996,17 +1034,28 @@ impl MemoryRootManifest {
}
/// Apply an add or remove based on whether the change is None (remove) or Some(blobentry) (add)
pub fn change_entry(&self, path: &MPath, entry: Option<HgBlobEntry>) -> BoxFuture<(), Error> {
let (target, filename) = self.find_path(path);
pub fn change_entry(
&self,
ctx: CoreContext,
path: &MPath,
entry: Option<HgBlobEntry>,
) -> BoxFuture<(), Error> {
let (target, filename) = self.find_path(ctx, path);
target
.and_then(|target| target.change(filename, entry).into_future())
.boxify()
}
pub fn resolve_trivial_conflicts(&self) -> impl Future<Item = (), Error = Error> + Send {
self.root_entry
.resolve_trivial_conflicts(self.repo.clone(), self.incomplete_filenodes.clone())
pub fn resolve_trivial_conflicts(
&self,
ctx: CoreContext,
) -> impl Future<Item = (), Error = Error> + Send {
self.root_entry.resolve_trivial_conflicts(
ctx,
self.repo.clone(),
self.incomplete_filenodes.clone(),
)
}
pub fn unittest_root(&self) -> &MemoryManifestEntry {

View File

@ -409,13 +409,17 @@ impl BlobRepo {
)
}
fn fetch<K>(&self, key: &K) -> impl Future<Item = K::Value, Error = Error> + Send
fn fetch<K>(
&self,
ctx: CoreContext,
key: &K,
) -> impl Future<Item = K::Value, Error = Error> + Send
where
K: MononokeId,
{
let blobstore_key = key.blobstore_key();
self.blobstore
.get(blobstore_key.clone())
.get(ctx, blobstore_key.clone())
.and_then(move |blob| {
blob.ok_or(ErrorKind::MissingTypedKeyEntry(blobstore_key).into())
.and_then(|blob| <<K as MononokeId>::Value>::from_blob(blob.into()))
@ -423,14 +427,18 @@ impl BlobRepo {
}
// this is supposed to be used only from unittest
pub fn unittest_fetch<K>(&self, key: &K) -> impl Future<Item = K::Value, Error = Error> + Send
pub fn unittest_fetch<K>(
&self,
ctx: CoreContext,
key: &K,
) -> impl Future<Item = K::Value, Error = Error> + Send
where
K: MononokeId,
{
self.fetch(key)
self.fetch(ctx, key)
}
fn store<K, V>(&self, value: V) -> impl Future<Item = K, Error = Error> + Send
fn store<K, V>(&self, ctx: CoreContext, value: V) -> impl Future<Item = K, Error = Error> + Send
where
V: BlobstoreValue<Key = K>,
K: MononokeId<Value = V>,
@ -438,65 +446,83 @@ impl BlobRepo {
let blob = value.into_blob();
let key = *blob.id();
self.blobstore
.put(key.blobstore_key(), blob.into())
.put(ctx, key.blobstore_key(), blob.into())
.map(move |_| key)
}
// this is supposed to be used only from unittest
pub fn unittest_store<K, V>(&self, value: V) -> impl Future<Item = K, Error = Error> + Send
pub fn unittest_store<K, V>(
&self,
ctx: CoreContext,
value: V,
) -> impl Future<Item = K, Error = Error> + Send
where
V: BlobstoreValue<Key = K>,
K: MononokeId<Value = V>,
{
self.store(value)
self.store(ctx, value)
}
pub fn get_file_content(&self, key: &HgNodeHash) -> BoxFuture<FileContents, Error> {
pub fn get_file_content(
&self,
ctx: CoreContext,
key: &HgNodeHash,
) -> BoxFuture<FileContents, Error> {
STATS::get_file_content.add_value(1);
fetch_file_content_from_blobstore(&self.blobstore, *key).boxify()
fetch_file_content_from_blobstore(ctx, &self.blobstore, *key).boxify()
}
pub fn get_file_content_by_content_id(
&self,
ctx: CoreContext,
id: ContentId,
) -> impl Future<Item = FileContents, Error = Error> {
fetch_file_contents(&self.blobstore, id)
fetch_file_contents(ctx, &self.blobstore, id)
}
pub fn get_file_size(&self, key: &HgFileNodeId) -> impl Future<Item = u64, Error = Error> {
fetch_file_size_from_blobstore(&self.blobstore, *key)
pub fn get_file_size(
&self,
ctx: CoreContext,
key: &HgFileNodeId,
) -> impl Future<Item = u64, Error = Error> {
fetch_file_size_from_blobstore(ctx, &self.blobstore, *key)
}
pub fn get_file_content_id(
&self,
ctx: CoreContext,
key: &HgFileNodeId,
) -> impl Future<Item = ContentId, Error = Error> {
fetch_file_content_id_from_blobstore(&self.blobstore, *key)
fetch_file_content_id_from_blobstore(ctx, &self.blobstore, *key)
}
pub fn get_file_sha256(
&self,
ctx: CoreContext,
content_id: ContentId,
) -> impl Future<Item = Sha256, Error = Error> {
let blobrepo = self.clone();
cloned!(content_id, self.blobstore);
// try to get sha256 from blobstore from a blob to avoid calculation
self.get_alias_content_id_to_sha256(content_id)
self.get_alias_content_id_to_sha256(ctx.clone(), content_id)
.and_then(move |res| match res {
Some(file_content_sha256) => Ok(file_content_sha256).into_future().left_future(),
None => fetch_file_content_sha256_from_blobstore(&blobstore, content_id)
.and_then(move |alias| {
blobrepo
.put_alias_content_id_to_sha256(content_id, alias)
.map(move |()| alias)
})
.right_future(),
None => {
fetch_file_content_sha256_from_blobstore(ctx.clone(), &blobstore, content_id)
.and_then(move |alias| {
blobrepo
.put_alias_content_id_to_sha256(ctx, content_id, alias)
.map(move |()| alias)
})
.right_future()
}
})
}
fn put_alias_content_id_to_sha256(
&self,
ctx: CoreContext,
content_id: ContentId,
alias_content: Sha256,
) -> impl Future<Item = (), Error = Error> {
@ -504,11 +530,13 @@ impl BlobRepo {
// Contents = alias.sha256.SHA256HASH (BlobstoreBytes)
let contents = BlobstoreBytes::from_bytes(Bytes::from(alias_content.as_ref()));
self.upload_blobstore_bytes(alias_key, contents).map(|_| ())
self.upload_blobstore_bytes(ctx, alias_key, contents)
.map(|_| ())
}
fn get_alias_content_id_to_sha256(
&self,
ctx: CoreContext,
content_id: ContentId,
) -> impl Future<Item = Option<Sha256>, Error = Error> {
// Ok: Some(value) - found alias blob, None - alias blob nor found (lazy upload)
@ -517,7 +545,7 @@ impl BlobRepo {
let alias_content_id = get_content_id_alias_key(content_id);
self.blobstore
.get(alias_content_id.clone())
.get(ctx, alias_content_id.clone())
.map(|content_key_bytes| {
content_key_bytes.and_then(|bytes| Sha256::from_bytes(bytes.as_bytes()).ok())
})
@ -525,6 +553,7 @@ impl BlobRepo {
pub fn upload_file_content_by_alias(
&self,
ctx: CoreContext,
_alias: Sha256,
raw_file_content: Bytes,
) -> impl Future<Item = (), Error = Error> {
@ -532,24 +561,26 @@ impl BlobRepo {
let alias_key = get_sha256_alias(&raw_file_content);
// Raw contents = file content only, excluding metadata in the beginning
let contents = FileContents::Bytes(raw_file_content);
self.upload_blob(contents.into_blob(), alias_key)
self.upload_blob(ctx, contents.into_blob(), alias_key)
.map(|_| ())
.boxify()
}
pub fn get_file_content_by_alias(
&self,
ctx: CoreContext,
alias: Sha256,
) -> impl Future<Item = FileContents, Error = Error> {
let blobstore = self.blobstore.clone();
self.get_file_content_id_by_alias(alias)
.and_then(move |content_id| fetch_file_contents(&blobstore, content_id))
self.get_file_content_id_by_alias(ctx.clone(), alias)
.and_then(move |content_id| fetch_file_contents(ctx, &blobstore, content_id))
.from_err()
}
pub fn get_file_content_id_by_alias(
&self,
ctx: CoreContext,
alias: Sha256,
) -> impl Future<Item = ContentId, Error = Error> {
STATS::get_file_content.add_value(1);
@ -557,7 +588,7 @@ impl BlobRepo {
let blobstore = self.blobstore.clone();
blobstore
.get(prefixed_key.clone())
.get(ctx, prefixed_key.clone())
.and_then(move |bytes| {
let content_key_bytes = match bytes {
Some(bytes) => bytes,
@ -595,10 +626,11 @@ impl BlobRepo {
pub fn generate_lfs_file(
&self,
ctx: CoreContext,
content_id: ContentId,
file_size: u64,
) -> impl Future<Item = FileContents, Error = Error> {
self.get_file_sha256(content_id)
self.get_file_sha256(ctx, content_id)
.and_then(move |alias| File::generate_lfs_file(alias, file_size))
.map(|bytes| FileContents::Bytes(bytes))
}
@ -608,19 +640,24 @@ impl BlobRepo {
/// The raw filenode content is crucial for operation like delta application. It is stored in
/// untouched represenation that came from Mercurial client.
pub fn get_raw_hg_content(&self, key: &HgNodeHash) -> BoxFuture<HgBlob, Error> {
pub fn get_raw_hg_content(
&self,
ctx: CoreContext,
key: &HgNodeHash,
) -> BoxFuture<HgBlob, Error> {
STATS::get_raw_hg_content.add_value(1);
fetch_raw_filenode_bytes(&self.blobstore, *key)
fetch_raw_filenode_bytes(ctx, &self.blobstore, *key)
}
// Fetches copy data from blobstore instead of from filenodes db. This should be used only
// during committing.
pub(crate) fn get_hg_file_copy_from_blobstore(
&self,
ctx: CoreContext,
key: &HgNodeHash,
) -> BoxFuture<Option<(RepoPath, HgNodeHash)>, Error> {
STATS::get_hg_file_copy_from_blobstore.add_value(1);
fetch_rename_from_blobstore(&self.blobstore, *key)
fetch_rename_from_blobstore(ctx, &self.blobstore, *key)
.map(|rename| rename.map(|(path, hash)| (RepoPath::FilePath(path), hash)))
.boxify()
}
@ -628,6 +665,7 @@ impl BlobRepo {
pub fn get_changesets(&self, ctx: CoreContext) -> BoxStream<HgNodeHash, Error> {
STATS::get_changesets.add_value(1);
HgBlobChangesetStream {
ctx: ctx.clone(),
repo: self.clone(),
state: BCState::Idle,
heads: self.get_heads_maybe_stale(ctx).boxify(),
@ -778,21 +816,23 @@ impl BlobRepo {
pub fn get_changeset_by_changesetid(
&self,
ctx: CoreContext,
changesetid: &HgChangesetId,
) -> BoxFuture<HgBlobChangeset, Error> {
STATS::get_changeset_by_changesetid.add_value(1);
let chid = changesetid.clone();
HgBlobChangeset::load(&self.blobstore, &chid)
HgBlobChangeset::load(ctx, &self.blobstore, &chid)
.and_then(move |cs| cs.ok_or(ErrorKind::ChangesetMissing(chid).into()))
.boxify()
}
pub fn get_manifest_by_nodeid(
&self,
ctx: CoreContext,
manifestid: &HgManifestId,
) -> BoxFuture<Box<Manifest + Sync>, Error> {
STATS::get_manifest_by_nodeid.add_value(1);
BlobManifest::load(&self.blobstore, &manifestid)
BlobManifest::load(ctx, &self.blobstore, &manifestid)
.and_then({
let manifestid = *manifestid;
move |mf| mf.ok_or(ErrorKind::ManifestMissing(manifestid).into())
@ -908,10 +948,11 @@ impl BlobRepo {
pub fn get_bonsai_changeset(
&self,
ctx: CoreContext,
bonsai_cs_id: ChangesetId,
) -> BoxFuture<BonsaiChangeset, Error> {
STATS::get_bonsai_changeset.add_value(1);
self.fetch(&bonsai_cs_id).boxify()
self.fetch(ctx, &bonsai_cs_id).boxify()
}
// TODO(stash): make it accept ChangesetId
@ -954,6 +995,7 @@ impl BlobRepo {
fn upload_blobstore_bytes(
&self,
ctx: CoreContext,
key: String,
contents: BlobstoreBytes,
) -> impl Future<Item = (), Error = Error> + Send {
@ -972,7 +1014,7 @@ impl BlobRepo {
);
}
self.blobstore.put(key.clone(), contents).timed({
self.blobstore.put(ctx, key.clone(), contents).timed({
let logger = self.logger.clone();
move |stats, result| {
if result.is_ok() {
@ -985,6 +1027,7 @@ impl BlobRepo {
pub fn upload_blob<Id>(
&self,
ctx: CoreContext,
blob: Blob<Id>,
alias_key: String,
) -> impl Future<Item = Id, Error = Error> + Send
@ -999,12 +1042,12 @@ impl BlobRepo {
// Upload {alias.sha256.sha256(blob_contents): blobstore_key}
let alias_key_operation = {
let contents = BlobstoreBytes::from_bytes(blobstore_key.as_bytes());
self.upload_blobstore_bytes(alias_key, contents)
self.upload_blobstore_bytes(ctx.clone(), alias_key, contents)
};
// Upload {blobstore_key: blob_contents}
let blobstore_key_operation =
self.upload_blobstore_bytes(blobstore_key, blob_contents.clone());
self.upload_blobstore_bytes(ctx, blobstore_key, blob_contents.clone());
blobstore_key_operation
.join(alias_key_operation)
@ -1013,10 +1056,12 @@ impl BlobRepo {
pub fn upload_alias_to_file_content_id(
&self,
ctx: CoreContext,
alias: Sha256,
content_id: ContentId,
) -> impl Future<Item = (), Error = Error> + Send {
self.upload_blobstore_bytes(
ctx,
get_sha256_alias_key(alias.to_hex().to_string()),
BlobstoreBytes::from_bytes(content_id.blobstore_key().as_bytes()),
)
@ -1054,8 +1099,8 @@ impl BlobRepo {
(Some(parent), None, Some(change)) | (None, Some(parent), Some(change)) => {
let store = self.get_blobstore();
let parent = parent.into_nodehash();
cloned!(change, path);
fetch_file_envelope(&store, parent)
cloned!(ctx, change, path);
fetch_file_envelope(ctx.clone(), &store, parent)
.map(move |parent_envelope| {
if parent_envelope.content_id() == change.content_id()
&& change.copy_from().is_none()
@ -1104,14 +1149,14 @@ impl BlobRepo {
Some(change) => {
let copy_from_fut = match change.copy_from() {
None => future::ok(None).left_future(),
Some((path, bcs_id)) => self.get_hg_from_bonsai_changeset(ctx, *bcs_id)
Some((path, bcs_id)) => self.get_hg_from_bonsai_changeset(ctx.clone(), *bcs_id)
.and_then({
cloned!(repo);
move |cs_id| repo.get_changeset_by_changesetid(&cs_id)
cloned!(ctx, repo);
move |cs_id| repo.get_changeset_by_changesetid(ctx, &cs_id)
})
.and_then({
cloned!(repo, path);
move |cs| repo.find_file_in_manifest(&path, *cs.manifestid())
cloned!(ctx, repo, path);
move |cs| repo.find_file_in_manifest(ctx, &path, *cs.manifestid())
})
.and_then({
cloned!(path);
@ -1123,7 +1168,7 @@ impl BlobRepo {
.right_future(),
};
let upload_fut = copy_from_fut.and_then({
cloned!(repo, path, change);
cloned!(ctx, repo, path, change);
move |copy_from| {
let upload_entry = UploadHgFileEntry {
upload_node_id: UploadHgNodeHash::Generate,
@ -1136,7 +1181,7 @@ impl BlobRepo {
p2: p2.clone().map(|h| h.into_nodehash()),
path: path.clone(),
};
let upload_fut = match upload_entry.upload(&repo) {
let upload_fut = match upload_entry.upload(ctx, &repo) {
Ok((_, upload_fut)) => upload_fut.map(move |(entry, _)| {
let node_info = IncompleteFilenodeInfo {
path: RepoPath::FilePath(path),
@ -1165,13 +1210,14 @@ impl BlobRepo {
/// child manifest contains this entry, because it might have been removed.
pub fn check_case_conflict_in_manifest(
&self,
ctx: CoreContext,
parent_mf_id: &HgManifestId,
child_mf_id: &HgManifestId,
path: MPath,
) -> impl Future<Item = bool, Error = Error> {
let repo = self.clone();
let child_mf_id = child_mf_id.clone();
self.get_manifest_by_nodeid(&parent_mf_id)
self.get_manifest_by_nodeid(ctx.clone(), &parent_mf_id)
.and_then(move |mf| {
loop_fn(
(None, mf, path.into_iter()),
@ -1189,7 +1235,7 @@ impl BlobRepo {
match entry.get_type() {
Type::File(_) => future::ok(Loop::Break(false)).boxify(),
Type::Tree => entry
.get_content()
.get_content(ctx.clone())
.map(move |content| match content {
Content::Tree(mf) => {
Loop::Continue((Some(cur_path), mf, elements))
@ -1227,9 +1273,11 @@ impl BlobRepo {
// this has been deleted and it's no longer a conflict.
let mut check_futs = vec![];
for fullpath in potential_conflicts {
let check_fut =
repo.find_path_in_manifest(fullpath, child_mf_id.clone())
.map(|content| content.is_some());
let check_fut = repo.find_path_in_manifest(
ctx.clone(),
fullpath,
child_mf_id.clone(),
).map(|content| content.is_some());
check_futs.push(check_fut);
}
@ -1249,11 +1297,13 @@ impl BlobRepo {
pub fn find_path_in_manifest(
&self,
ctx: CoreContext,
path: Option<MPath>,
manifest: HgManifestId,
) -> impl Future<Item = Option<Content>, Error = Error> + Send {
// single fold step, converts path elemnt in content to content, if any
fn find_content_in_content(
ctx: CoreContext,
content: BoxFuture<Option<Content>, Error>,
path_element: MPathElement,
) -> BoxFuture<Option<Content>, Error> {
@ -1262,30 +1312,36 @@ impl BlobRepo {
None => future::ok(None).left_future(),
Some(Content::Tree(manifest)) => match manifest.lookup(&path_element) {
None => future::ok(None).left_future(),
Some(entry) => entry.get_content().map(Some).right_future(),
Some(entry) => entry.get_content(ctx).map(Some).right_future(),
},
Some(_) => future::ok(None).left_future(),
})
.boxify()
}
self.get_manifest_by_nodeid(&manifest)
self.get_manifest_by_nodeid(ctx.clone(), &manifest)
.and_then(move |manifest| {
let content_init = future::ok(Some(Content::Tree(manifest))).boxify();
match path {
None => content_init,
Some(path) => path.into_iter().fold(content_init, find_content_in_content),
Some(path) => {
path.into_iter()
.fold(content_init, move |content, path_element| {
find_content_in_content(ctx.clone(), content, path_element)
})
}
}
})
}
pub fn find_file_in_manifest(
&self,
ctx: CoreContext,
path: &MPath,
manifest: HgManifestId,
) -> impl Future<Item = Option<(FileType, HgFileNodeId)>, Error = Error> + Send {
let (dirname, basename) = path.split_dirname();
self.find_path_in_manifest(dirname, manifest).map({
self.find_path_in_manifest(ctx, dirname, manifest).map({
let basename = basename.clone();
move |content| match content {
None => None,
@ -1312,6 +1368,7 @@ impl BlobRepo {
let p1 = manifest_p1.map(|id| id.into_nodehash());
let p2 = manifest_p2.map(|id| id.into_nodehash());
MemoryRootManifest::new(
ctx.clone(),
self.clone(),
IncompleteFilenodes::new(),
p1.as_ref(),
@ -1329,13 +1386,13 @@ impl BlobRepo {
cloned!(path, memory_manifest, incomplete_filenodes);
let p1 = manifest_p1
.map(|manifest| {
repo.find_file_in_manifest(&path, manifest)
repo.find_file_in_manifest(ctx.clone(), &path, manifest)
.map(|o| o.map(|(_, x)| x))
})
.into_future();
let p2 = manifest_p2
.map(|manifest| {
repo.find_file_in_manifest(&path, manifest)
repo.find_file_in_manifest(ctx.clone(), &path, manifest)
.map(|o| o.map(|(_, x)| x))
})
.into_future();
@ -1354,13 +1411,16 @@ impl BlobRepo {
)
}
})
.and_then(move |entry| match entry {
None => memory_manifest.change_entry(&path, None),
Some((entry, node_infos)) => {
for node_info in node_infos {
incomplete_filenodes.add(node_info);
.and_then({
cloned!(ctx);
move |entry| match entry {
None => memory_manifest.change_entry(ctx, &path, None),
Some((entry, node_infos)) => {
for node_info in node_infos {
incomplete_filenodes.add(node_info);
}
memory_manifest.change_entry(ctx, &path, Some(entry))
}
memory_manifest.change_entry(&path, Some(entry))
}
});
futures.push(future);
@ -1368,10 +1428,10 @@ impl BlobRepo {
future::join_all(futures)
.and_then({
let memory_manifest = memory_manifest.clone();
move |_| memory_manifest.resolve_trivial_conflicts()
cloned!(ctx, memory_manifest);
move |_| memory_manifest.resolve_trivial_conflicts(ctx)
})
.and_then(move |_| memory_manifest.save())
.and_then(move |_| memory_manifest.save(ctx))
.map({
cloned!(incomplete_filenodes);
move |m| {
@ -1397,7 +1457,7 @@ impl BlobRepo {
repo: &BlobRepo,
bcs_id: ChangesetId,
) -> BoxFuture<HgChangesetId, Error> {
repo.fetch(&bcs_id)
repo.fetch(ctx.clone(), &bcs_id)
.and_then({
cloned!(ctx, repo);
move |bcs| {
@ -1405,8 +1465,10 @@ impl BlobRepo {
.map(|p_bcs_id| {
repo.get_hg_from_bonsai_changeset(ctx.clone(), *p_bcs_id)
.and_then({
cloned!(repo);
move |p_cs_id| repo.get_changeset_by_changesetid(&p_cs_id)
cloned!(ctx, repo);
move |p_cs_id| {
repo.get_changeset_by_changesetid(ctx, &p_cs_id)
}
})
})
.collect::<Vec<_>>();
@ -1469,7 +1531,7 @@ impl BlobRepo {
let cs = try_boxfuture!(HgBlobChangeset::new(content));
let cs_id = cs.get_changeset_id();
cs.save(repo.blobstore.clone())
cs.save(ctx.clone(), repo.blobstore.clone())
.and_then({
cloned!(ctx, repo);
move |_| incomplete_filenodes.upload(ctx, cs_id, &repo)
@ -1539,13 +1601,15 @@ impl UploadHgTreeEntry {
// adding the entries to a changeset.
pub fn upload(
self,
ctx: CoreContext,
repo: &BlobRepo,
) -> Result<(HgNodeHash, BoxFuture<(HgBlobEntry, RepoPath), Error>)> {
self.upload_to_blobstore(&repo.blobstore, &repo.logger)
self.upload_to_blobstore(ctx, &repo.blobstore, &repo.logger)
}
pub(crate) fn upload_to_blobstore(
self,
ctx: CoreContext,
blobstore: &RepoBlobstore,
logger: &Logger,
) -> Result<(HgNodeHash, BoxFuture<(HgBlobEntry, RepoPath), Error>)> {
@ -1616,7 +1680,7 @@ impl UploadHgTreeEntry {
// Upload the blob.
let upload = blobstore
.put(blobstore_key, envelope_blob.into())
.put(ctx, blobstore_key, envelope_blob.into())
.map({
let path = path.clone();
move |()| (blob_entry, path)
@ -1650,6 +1714,7 @@ impl UploadHgFileContents {
/// and metadata.
fn execute(
self,
ctx: CoreContext,
repo: &BlobRepo,
p1: Option<HgNodeHash>,
p2: Option<HgNodeHash>,
@ -1664,7 +1729,7 @@ impl UploadHgFileContents {
match self {
UploadHgFileContents::ContentUploaded(cbmeta) => {
let upload_fut = future::ok(());
let compute_fut = Self::compute(cbmeta.clone(), repo, p1, p2);
let compute_fut = Self::compute(ctx, cbmeta.clone(), repo, p1, p2);
let cbinfo = ContentBlobInfo { path, meta: cbmeta };
(cbinfo, Either::A(upload_fut), Either::A(compute_fut))
}
@ -1693,7 +1758,7 @@ impl UploadHgFileContents {
},
};
let upload_fut = repo.upload_blob(contents_blob, alias_key)
let upload_fut = repo.upload_blob(ctx, contents_blob, alias_key)
.map(|_content_id| ())
.timed({
let logger = repo.logger.clone();
@ -1718,6 +1783,7 @@ impl UploadHgFileContents {
}
fn compute(
ctx: CoreContext,
cbmeta: ContentBlobMeta,
repo: &BlobRepo,
p1: Option<HgNodeHash>,
@ -1725,7 +1791,7 @@ impl UploadHgFileContents {
) -> impl Future<Item = (HgNodeHash, Bytes, u64), Error = Error> {
// Computing the file node hash requires fetching the blob and gluing it together with the
// metadata.
repo.fetch(&cbmeta.id).map(move |file_contents| {
repo.fetch(ctx, &cbmeta.id).map(move |file_contents| {
let size = file_contents.size() as u64;
let mut metadata = Vec::new();
File::generate_metadata(cbmeta.copy_from.as_ref(), &file_contents, &mut metadata)
@ -1764,6 +1830,7 @@ pub struct UploadHgFileEntry {
impl UploadHgFileEntry {
pub fn upload(
self,
ctx: CoreContext,
repo: &BlobRepo,
) -> Result<(ContentBlobInfo, BoxFuture<(HgBlobEntry, RepoPath), Error>)> {
STATS::upload_hg_file_entry.add_value(1);
@ -1776,7 +1843,8 @@ impl UploadHgFileEntry {
path,
} = self;
let (cbinfo, content_upload, compute_fut) = contents.execute(repo, p1, p2, path.clone());
let (cbinfo, content_upload, compute_fut) =
contents.execute(ctx.clone(), repo, p1, p2, path.clone());
let content_id = cbinfo.meta.id;
let blobstore = repo.blobstore.clone();
@ -1821,7 +1889,7 @@ impl UploadHgFileEntry {
);
let envelope_upload = blobstore
.put(blobstore_key, envelope_blob.into())
.put(ctx, blobstore_key, envelope_blob.into())
.timed({
let path = path.clone();
move |stats, result| {
@ -1904,7 +1972,11 @@ pub fn save_bonsai_changesets(
// Order of inserting bonsai changesets objects doesn't matter, so we can join them
let mut bonsai_object_futs = FuturesUnordered::new();
for bcs in bonsai_changesets.values() {
bonsai_object_futs.push(save_bonsai_changeset_object(blobstore.clone(), bcs.clone()));
bonsai_object_futs.push(save_bonsai_changeset_object(
ctx.clone(),
blobstore.clone(),
bcs.clone(),
));
}
let bonsai_objects = bonsai_object_futs.collect();
@ -1995,7 +2067,6 @@ impl CreateChangeset {
scuba_logger.add("changeset_uuid", format!("{}", uuid));
let entry_processor = UploadEntries::new(
ctx.clone(),
repo.blobstore.clone(),
repo.repoid.clone(),
scuba_logger.clone(),
@ -2004,6 +2075,7 @@ impl CreateChangeset {
let expected_nodeid = self.expected_nodeid;
let upload_entries = process_entries(
ctx.clone(),
repo.clone(),
&entry_processor,
self.root_manifest,
@ -2020,7 +2092,7 @@ impl CreateChangeset {
.join(parents_data)
.from_err()
.and_then({
cloned!(repo, repo.filenodes, repo.blobstore, mut scuba_logger);
cloned!(ctx, repo, repo.filenodes, repo.blobstore, mut scuba_logger);
let expected_files = self.expected_files;
let cs_metadata = self.cs_metadata;
@ -2035,21 +2107,32 @@ impl CreateChangeset {
future::ok(expected_files).boxify()
} else {
STATS::create_changeset_compute_cf.add_value(1);
fetch_parent_manifests(repo.clone(), &parent_manifest_hashes)
.and_then(move |(p1_manifest, p2_manifest)| {
fetch_parent_manifests(
ctx.clone(),
repo.clone(),
&parent_manifest_hashes,
).and_then({
cloned!(ctx);
move |(p1_manifest, p2_manifest)| {
compute_changed_files(
ctx.clone(),
&root_manifest,
p1_manifest.as_ref(),
p2_manifest.as_ref(),
)
})
}
})
.boxify()
};
let p1_mf = parent_manifest_hashes.get(0).cloned();
let check_case_conflicts = if must_check_case_conflicts {
check_case_conflicts(repo.clone(), root_hash.clone(), p1_mf)
.left_future()
check_case_conflicts(
ctx.clone(),
repo.clone(),
root_hash.clone(),
p1_mf,
).left_future()
} else {
future::ok(()).right_future()
};
@ -2060,13 +2143,17 @@ impl CreateChangeset {
STATS::create_changeset_cf_count.add_value(files.len() as i64);
make_new_changeset(parents, root_hash, cs_metadata, files)
})
.and_then(move |hg_cs| {
create_bonsai_changeset_object(
hg_cs.clone(),
parent_manifest_hashes,
bonsai_parents,
repo.clone(),
).map(|bonsai_cs| (hg_cs, bonsai_cs))
.and_then({
cloned!(ctx);
move |hg_cs| {
create_bonsai_changeset_object(
ctx,
hg_cs.clone(),
parent_manifest_hashes,
bonsai_parents,
repo.clone(),
).map(|bonsai_cs| (hg_cs, bonsai_cs))
}
});
changesets
@ -2112,17 +2199,18 @@ impl CreateChangeset {
signal_parent_ready.send((bcs_id, cs_id, manifest_id));
let bonsai_cs_fut = save_bonsai_changeset_object(
ctx.clone(),
blobstore.clone(),
bonsai_cs.clone(),
);
blobcs
.save(blobstore)
.save(ctx.clone(), blobstore)
.join(bonsai_cs_fut)
.context("While writing to blobstore")
.join(
entry_processor
.finalize(filenodes, cs_id)
.finalize(ctx, filenodes, cs_id)
.context("While finalizing processing"),
)
.from_err()
@ -2252,6 +2340,7 @@ impl Clone for BlobRepo {
}
pub struct HgBlobChangesetStream {
ctx: CoreContext,
repo: BlobRepo,
seen: HashSet<HgNodeHash>,
heads: BoxStream<HgNodeHash, Error>,
@ -2278,8 +2367,10 @@ impl Stream for HgBlobChangesetStream {
// haven't seen before
WaitCS(
next,
self.repo
.get_changeset_by_changesetid(&HgChangesetId::new(next)),
self.repo.get_changeset_by_changesetid(
self.ctx.clone(),
&HgChangesetId::new(next),
),
)
} else {
Idle // already done it

View File

@ -79,16 +79,16 @@ impl ChangesetHandle {
}
pub fn ready_cs_handle(ctx: CoreContext, repo: Arc<BlobRepo>, hg_cs: HgChangesetId) -> Self {
let bonsai_cs = repo.get_bonsai_from_hg(ctx, &hg_cs)
let bonsai_cs = repo.get_bonsai_from_hg(ctx.clone(), &hg_cs)
.and_then(move |bonsai_id| {
bonsai_id.ok_or(ErrorKind::BonsaiMappingNotFound(hg_cs).into())
})
.and_then({
cloned!(repo);
move |bonsai_id| repo.get_bonsai_changeset(bonsai_id)
cloned!(ctx, repo);
move |bonsai_id| repo.get_bonsai_changeset(ctx, bonsai_id)
});
let cs = repo.get_changeset_by_changesetid(&hg_cs);
let cs = repo.get_changeset_by_changesetid(ctx, &hg_cs);
let (trigger, can_be_parent) = oneshot::channel();
let fut = bonsai_cs.join(cs);
@ -132,20 +132,17 @@ struct UploadEntriesState {
#[derive(Clone)]
pub struct UploadEntries {
ctx: CoreContext,
scuba_logger: ScubaSampleBuilder,
inner: Arc<Mutex<UploadEntriesState>>,
}
impl UploadEntries {
pub fn new(
ctx: CoreContext,
blobstore: RepoBlobstore,
repoid: RepositoryId,
scuba_logger: ScubaSampleBuilder,
) -> Self {
Self {
ctx,
scuba_logger,
inner: Arc::new(Mutex::new(UploadEntriesState {
required_entries: HashMap::new(),
@ -163,14 +160,19 @@ impl UploadEntries {
/// Parse a manifest and record the referenced blobs so that we know whether or not we have
/// a complete changeset with all blobs, or whether there is missing data.
fn process_manifest(&self, entry: &HgBlobEntry, path: RepoPath) -> BoxFuture<(), Error> {
fn process_manifest(
&self,
ctx: CoreContext,
entry: &HgBlobEntry,
path: RepoPath,
) -> BoxFuture<(), Error> {
let inner_mutex = self.inner.clone();
let parents_found = self.find_parents(entry, path.clone());
let parents_found = self.find_parents(ctx.clone(), entry, path.clone());
let entry_hash = entry.get_hash().into_nodehash();
let entry_type = entry.get_type();
entry
.get_content()
.get_content(ctx)
.and_then(move |content| match content {
Content::Tree(manifest) => {
for entry in manifest.list() {
@ -199,10 +201,15 @@ impl UploadEntries {
.boxify()
}
fn find_parents(&self, entry: &HgBlobEntry, path: RepoPath) -> BoxFuture<(), Error> {
fn find_parents(
&self,
ctx: CoreContext,
entry: &HgBlobEntry,
path: RepoPath,
) -> BoxFuture<(), Error> {
let inner_mutex = self.inner.clone();
entry
.get_parents()
.get_parents(ctx)
.and_then(move |parents| {
let mut inner = inner_mutex.lock().expect("Lock poisoned");
let node_keys = parents.into_iter().map(move |hash| HgNodeKey {
@ -222,7 +229,11 @@ impl UploadEntries {
/// `process_one_entry` and can be called after it.
/// It is safe to call this multiple times, but not recommended - every manifest passed to
/// this function is assumed required for this commit, even if it is not the root.
pub fn process_root_manifest(&self, entry: &HgBlobEntry) -> BoxFuture<(), Error> {
pub fn process_root_manifest(
&self,
ctx: CoreContext,
entry: &HgBlobEntry,
) -> BoxFuture<(), Error> {
if entry.get_type() != manifest::Type::Tree {
return future::err(
ErrorKind::NotAManifest(entry.get_hash().into_nodehash(), entry.get_type()).into(),
@ -234,10 +245,15 @@ impl UploadEntries {
.required_entries
.insert(RepoPath::root(), *entry.get_hash());
}
self.process_one_entry(entry, RepoPath::root())
self.process_one_entry(ctx, entry, RepoPath::root())
}
pub fn process_one_entry(&self, entry: &HgBlobEntry, path: RepoPath) -> BoxFuture<(), Error> {
pub fn process_one_entry(
&self,
ctx: CoreContext,
entry: &HgBlobEntry,
path: RepoPath,
) -> BoxFuture<(), Error> {
{
let mut inner = self.inner.lock().expect("Lock poisoned");
inner.uploaded_entries.insert(path.clone(), entry.clone());
@ -251,7 +267,7 @@ impl UploadEntries {
entry.get_hash(),
path
),
self.process_manifest(entry, path),
self.process_manifest(ctx, entry, path),
)
} else {
STATS::process_file_entry.add_value(1);
@ -261,7 +277,7 @@ impl UploadEntries {
entry.get_hash(),
path
),
self.find_parents(&entry, path),
self.find_parents(ctx, &entry, path),
)
};
@ -270,6 +286,7 @@ impl UploadEntries {
// Check the blobstore to see whether a particular node is present.
fn assert_in_blobstore(
ctx: CoreContext,
blobstore: RepoBlobstore,
node_id: HgNodeHash,
is_tree: bool,
@ -279,10 +296,15 @@ impl UploadEntries {
} else {
HgFileNodeId::new(node_id).blobstore_key()
};
blobstore.assert_present(key)
blobstore.assert_present(ctx, key)
}
pub fn finalize(self, filenodes: Arc<Filenodes>, cs_id: HgNodeHash) -> BoxFuture<(), Error> {
pub fn finalize(
self,
ctx: CoreContext,
filenodes: Arc<Filenodes>,
cs_id: HgNodeHash,
) -> BoxFuture<(), Error> {
let required_checks = {
let inner = self.inner.lock().expect("Lock poisoned");
let required_len = inner.required_entries.len();
@ -296,6 +318,7 @@ impl UploadEntries {
} else {
let path = path.clone();
let assert = Self::assert_in_blobstore(
ctx.clone(),
inner.blobstore.clone(),
entryid.into_nodehash(),
path.is_tree(),
@ -333,6 +356,7 @@ impl UploadEntries {
.iter()
.map(|node_key| {
let assert = Self::assert_in_blobstore(
ctx.clone(),
inner.blobstore.clone(),
node_key.hash,
node_key.path.is_tree(),
@ -385,23 +409,30 @@ impl UploadEntries {
let filenodeinfos =
stream::futures_unordered(uploaded_entries.into_iter().map(|(path, blobentry)| {
blobentry.get_parents().and_then(move |parents| {
compute_copy_from_info(&path, &blobentry, &parents).map(move |copyfrom| {
let (p1, p2) = parents.get_nodes();
FilenodeInfo {
path,
filenode: HgFileNodeId::new(blobentry.get_hash().into_nodehash()),
p1: p1.cloned().map(HgFileNodeId::new),
p2: p2.cloned().map(HgFileNodeId::new),
copyfrom,
linknode: HgChangesetId::new(cs_id),
}
})
blobentry.get_parents(ctx.clone()).and_then({
cloned!(ctx);
move |parents| {
compute_copy_from_info(ctx, &path, &blobentry, &parents).map(
move |copyfrom| {
let (p1, p2) = parents.get_nodes();
FilenodeInfo {
path,
filenode: HgFileNodeId::new(
blobentry.get_hash().into_nodehash(),
),
p1: p1.cloned().map(HgFileNodeId::new),
p2: p2.cloned().map(HgFileNodeId::new),
copyfrom,
linknode: HgChangesetId::new(cs_id),
}
},
)
}
})
})).boxify();
filenodes
.add_filenodes(self.ctx.clone(), filenodeinfos, &inner.repoid)
.add_filenodes(ctx, filenodeinfos, &inner.repoid)
.timed({
let mut scuba_logger = self.scuba_logger();
move |stats, result| {
@ -423,6 +454,7 @@ impl UploadEntries {
}
fn compute_copy_from_info(
ctx: CoreContext,
path: &RepoPath,
blobentry: &HgBlobEntry,
parents: &HgParents,
@ -432,7 +464,7 @@ fn compute_copy_from_info(
&RepoPath::FilePath(_) => {
STATS::finalize_compute_copy_from_info.add_value(1);
blobentry
.get_raw_content()
.get_raw_content(ctx)
.and_then({
let parents = parents.clone();
move |blob| {
@ -459,10 +491,11 @@ fn compute_copy_from_info(
}
fn compute_changed_files_pair(
ctx: CoreContext,
to: &Box<Manifest + Sync>,
from: &Box<Manifest + Sync>,
) -> BoxFuture<HashSet<MPath>, Error> {
changed_entry_stream(to, from, None)
changed_entry_stream(ctx, to, from, None)
.filter_map(|change| match change.status {
EntryStatus::Deleted(entry)
| EntryStatus::Added(entry)
@ -495,18 +528,19 @@ fn compute_changed_files_pair(
/// Changesets might as well make this function obsolete and that I am not familiar with creating
/// mock Manifests I will postpone writing tests for this
pub fn compute_changed_files(
ctx: CoreContext,
root: &Box<Manifest + Sync>,
p1: Option<&Box<Manifest + Sync>>,
p2: Option<&Box<Manifest + Sync>>,
) -> BoxFuture<Vec<MPath>, Error> {
let empty = manifest::EmptyManifest {}.boxed();
match (p1, p2) {
(None, None) => compute_changed_files_pair(&root, &empty),
(None, None) => compute_changed_files_pair(ctx, &root, &empty),
(Some(manifest), None) | (None, Some(manifest)) => {
compute_changed_files_pair(&root, &manifest)
compute_changed_files_pair(ctx, &root, &manifest)
}
(Some(p1), Some(p2)) => compute_changed_files_pair(&root, &p1)
.join(compute_changed_files_pair(&root, &p2))
(Some(p1), Some(p2)) => compute_changed_files_pair(ctx.clone(), &root, &p1)
.join(compute_changed_files_pair(ctx.clone(), &root, &p2))
.map(|(left, right)| {
left.intersection(&right)
.cloned()
@ -523,14 +557,15 @@ pub fn compute_changed_files(
}
fn compute_added_files(
ctx: CoreContext,
child: &Box<Manifest + Sync>,
parent: Option<&Box<Manifest + Sync>>,
) -> impl Future<Item = Vec<MPath>, Error = Error> {
let s = match parent {
Some(parent) => changed_entry_stream(child, parent, None).boxify(),
Some(parent) => changed_entry_stream(ctx, child, parent, None).boxify(),
None => {
let empty = manifest::EmptyManifest {};
changed_entry_stream(child, &empty, None).boxify()
changed_entry_stream(ctx, child, &empty, None).boxify()
}
};
@ -551,20 +586,24 @@ fn compute_added_files(
/// 1) Checks that there are no case conflicts between added files
/// 2) Checks that added files do not create new case conflicts with already existing files
pub fn check_case_conflicts(
ctx: CoreContext,
repo: BlobRepo,
child_root_mf: HgManifestId,
parent_root_mf: Option<HgManifestId>,
) -> impl Future<Item = (), Error = Error> {
let child_mf_fut = repo.get_manifest_by_nodeid(&child_root_mf.clone());
let child_mf_fut = repo.get_manifest_by_nodeid(ctx.clone(), &child_root_mf.clone());
let parent_mf_fut = parent_root_mf.map({
cloned!(repo);
move |m| repo.get_manifest_by_nodeid(&m)
cloned!(ctx, repo);
move |m| repo.get_manifest_by_nodeid(ctx.clone(), &m)
});
child_mf_fut
.join(parent_mf_fut)
.and_then(move |(child_mf, parent_mf)| compute_added_files(&child_mf, parent_mf.as_ref()))
.and_then({
cloned!(ctx);
move |(child_mf, parent_mf)| compute_added_files(ctx, &child_mf, parent_mf.as_ref())
})
.and_then(
|added_files| match mononoke_types::check_case_conflicts(added_files.clone()) {
Some(path) => Err(ErrorKind::CaseConflict(path).into()),
@ -576,6 +615,7 @@ pub fn check_case_conflicts(
let mut case_conflict_checks = stream::FuturesUnordered::new();
for f in added_files {
case_conflict_checks.push(repo.check_case_conflict_in_manifest(
ctx.clone(),
&parent_root_mf,
&child_root_mf,
f.clone(),
@ -603,6 +643,7 @@ fn mercurial_mpath_comparator(a: &MPath, b: &MPath) -> ::std::cmp::Ordering {
}
pub fn process_entries(
ctx: CoreContext,
repo: BlobRepo,
entry_processor: &UploadEntries,
root_manifest: BoxFuture<Option<(HgBlobEntry, RepoPath)>, Error>,
@ -612,14 +653,14 @@ pub fn process_entries(
.context("While uploading root manifest")
.from_err()
.and_then({
let entry_processor = entry_processor.clone();
cloned!(ctx, entry_processor);
move |root_manifest| match root_manifest {
None => future::ok(None).boxify(),
Some((entry, path)) => {
let hash = entry.get_hash().into_nodehash();
if entry.get_type() == manifest::Type::Tree && path == RepoPath::RootPath {
entry_processor
.process_root_manifest(&entry)
.process_root_manifest(ctx, &entry)
.map(move |_| Some(hash))
.boxify()
} else {
@ -634,8 +675,8 @@ pub fn process_entries(
.context("While uploading child entries")
.from_err()
.map({
let entry_processor = entry_processor.clone();
move |(entry, path)| entry_processor.process_one_entry(&entry, path)
cloned!(ctx, entry_processor);
move |(entry, path)| entry_processor.process_one_entry(ctx.clone(), &entry, path)
})
.buffer_unordered(100)
.for_each(|()| future::ok(()));
@ -648,7 +689,7 @@ pub fn process_entries(
manifest::EmptyManifest.boxed(),
HgManifestId::new(NULL_HASH),
)).boxify(),
Some(root_hash) => repo.get_manifest_by_nodeid(&HgManifestId::new(root_hash))
Some(root_hash) => repo.get_manifest_by_nodeid(ctx, &HgManifestId::new(root_hash))
.context("While fetching root manifest")
.from_err()
.map(move |m| (m, HgManifestId::new(root_hash)))
@ -737,16 +778,17 @@ pub fn handle_parents(
}
pub fn fetch_parent_manifests(
ctx: CoreContext,
repo: BlobRepo,
parent_manifest_hashes: &Vec<HgManifestId>,
) -> BoxFuture<(Option<Box<Manifest + Sync>>, Option<Box<Manifest + Sync>>), Error> {
let p1_manifest_hash = parent_manifest_hashes.get(0);
let p2_manifest_hash = parent_manifest_hashes.get(1);
let p1_manifest = p1_manifest_hash.map({
cloned!(repo);
move |m| repo.get_manifest_by_nodeid(&m)
cloned!(ctx, repo);
move |m| repo.get_manifest_by_nodeid(ctx, &m)
});
let p2_manifest = p2_manifest_hash.map(move |m| repo.get_manifest_by_nodeid(&m));
let p2_manifest = p2_manifest_hash.map(move |m| repo.get_manifest_by_nodeid(ctx, &m));
p1_manifest.join(p2_manifest).boxify()
}

View File

@ -9,6 +9,8 @@
extern crate ascii;
extern crate async_unit;
extern crate bytes;
#[macro_use]
extern crate cloned;
extern crate failure_ext as failure;
extern crate futures;
extern crate futures_ext;
@ -58,14 +60,15 @@ use utils::{create_changeset_no_parents, create_changeset_one_parent, get_empty_
use tests_utils::{create_commit, store_files};
fn upload_blob_no_parents(repo: BlobRepo) {
let ctx = CoreContext::test_mock();
let expected_hash = string_to_nodehash("c3127cdbf2eae0f09653f9237d85c8436425b246");
let fake_path = RepoPath::file("fake/file").expect("Can't generate fake RepoPath");
// The blob does not exist...
assert!(run_future(repo.get_file_content(&expected_hash)).is_err());
assert!(run_future(repo.get_file_content(ctx.clone(), &expected_hash)).is_err());
// We upload it...
let (hash, future) = upload_file_no_parents(&repo, "blob", &fake_path);
let (hash, future) = upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_path);
assert!(hash == expected_hash);
// The entry we're given is correct...
@ -77,14 +80,14 @@ fn upload_blob_no_parents(repo: BlobRepo) {
entry.get_name() == Some(&MPathElement::new("file".into()).expect("valid MPathElement"))
);
let content = run_future(entry.get_content()).unwrap();
let content = run_future(entry.get_content(ctx.clone())).unwrap();
match content {
manifest::Content::File(FileContents::Bytes(f)) => assert_eq!(f.as_ref(), &b"blob"[..]),
_ => panic!(),
};
// And the blob now exists
let bytes = run_future(repo.get_file_content(&expected_hash)).unwrap();
let bytes = run_future(repo.get_file_content(ctx.clone(), &expected_hash)).unwrap();
assert!(&bytes.into_bytes() == &b"blob"[..]);
}
@ -95,16 +98,17 @@ test_both_repotypes!(
);
fn upload_blob_one_parent(repo: BlobRepo) {
let ctx = CoreContext::test_mock();
let expected_hash = string_to_nodehash("c2d60b35a8e7e034042a9467783bbdac88a0d219");
let fake_path = RepoPath::file("fake/file").expect("Can't generate fake RepoPath");
let (p1, future) = upload_file_no_parents(&repo, "blob", &fake_path);
let (p1, future) = upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_path);
// The blob does not exist...
run_future(repo.get_file_content(&expected_hash)).is_err();
run_future(repo.get_file_content(ctx.clone(), &expected_hash)).is_err();
// We upload it...
let (hash, future2) = upload_file_one_parent(&repo, "blob", &fake_path, p1);
let (hash, future2) = upload_file_one_parent(ctx.clone(), &repo, "blob", &fake_path, p1);
assert!(hash == expected_hash);
// The entry we're given is correct...
@ -117,13 +121,13 @@ fn upload_blob_one_parent(repo: BlobRepo) {
entry.get_name() == Some(&MPathElement::new("file".into()).expect("valid MPathElement"))
);
let content = run_future(entry.get_content()).unwrap();
let content = run_future(entry.get_content(ctx.clone())).unwrap();
match content {
manifest::Content::File(FileContents::Bytes(f)) => assert_eq!(f.as_ref(), &b"blob"[..]),
_ => panic!(),
};
// And the blob now exists
let bytes = run_future(repo.get_file_content(&expected_hash)).unwrap();
let bytes = run_future(repo.get_file_content(ctx.clone(), &expected_hash)).unwrap();
assert!(&bytes.into_bytes() == &b"blob"[..]);
}
@ -136,6 +140,7 @@ test_both_repotypes!(
#[test]
fn upload_blob_aliases() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
// echo -n "blob" | sha256sum
let alias_key =
"alias.sha256.fa2c8cc4f28176bbeed4b736df569a34c79cd3723e9ec42f9674b4d46ac6b8b8";
@ -151,19 +156,19 @@ fn upload_blob_aliases() {
// The blob with alias does not exist...
assert!(
run_future(prefixed_blobstore.get(alias_key.to_string()))
run_future(prefixed_blobstore.get(ctx.clone(), alias_key.to_string()))
.unwrap()
.is_none()
);
// We upload file and wait until file is uploaded...
let (_, future) = upload_file_no_parents(&repo, "blob", &fake_path);
let (_, future) = upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_path);
run_future(future).unwrap();
let expected_content =
"content.blake2.07ccc95f3ee9252a9e1dbdeaef59844d6aabd9dcf911fa29f542e891a4c5e90a";
let contents = run_future(prefixed_blobstore.get(alias_key.to_string()))
let contents = run_future(prefixed_blobstore.get(ctx.clone(), alias_key.to_string()))
.unwrap()
.unwrap();
assert_eq!(contents.as_bytes(), expected_content.as_bytes());
@ -171,6 +176,7 @@ fn upload_blob_aliases() {
}
fn create_one_changeset(repo: BlobRepo) {
let ctx = CoreContext::test_mock();
let fake_file_path = RepoPath::file("dir/file").expect("Can't generate fake RepoPath");
let fake_dir_path = RepoPath::dir("dir").expect("Can't generate fake RepoPath");
let expected_files = vec![
@ -182,13 +188,22 @@ fn create_one_changeset(repo: BlobRepo) {
];
let author: String = "author <author@fb.com>".into();
let (filehash, file_future) = upload_file_no_parents(&repo, "blob", &fake_file_path);
let (filehash, file_future) =
upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_file_path);
let (dirhash, manifest_dir_future) =
upload_manifest_no_parents(&repo, format!("file\0{}\n", filehash), &fake_dir_path);
let (dirhash, manifest_dir_future) = upload_manifest_no_parents(
ctx.clone(),
&repo,
format!("file\0{}\n", filehash),
&fake_dir_path,
);
let (roothash, root_manifest_future) =
upload_manifest_no_parents(&repo, format!("dir\0{}t\n", dirhash), &RepoPath::root());
let (roothash, root_manifest_future) = upload_manifest_no_parents(
ctx.clone(),
&repo,
format!("dir\0{}t\n", dirhash),
&RepoPath::root(),
);
let commit = create_changeset_no_parents(
&repo,
@ -208,7 +223,7 @@ fn create_one_changeset(repo: BlobRepo) {
);
// And check the file blob is present
let bytes = run_future(repo.get_file_content(&filehash)).unwrap();
let bytes = run_future(repo.get_file_content(ctx.clone(), &filehash)).unwrap();
assert!(&bytes.into_bytes() == &b"blob"[..]);
}
@ -224,13 +239,22 @@ fn create_two_changesets(repo: BlobRepo) {
let fake_dir_path = RepoPath::dir("dir").expect("Can't generate fake RepoPath");
let utf_author: String = "\u{041F}\u{0451}\u{0442}\u{0440} <peter@fb.com>".into();
let (filehash, file_future) = upload_file_no_parents(&repo, "blob", &fake_file_path);
let (filehash, file_future) =
upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_file_path);
let (dirhash, manifest_dir_future) =
upload_manifest_no_parents(&repo, format!("file\0{}\n", filehash), &fake_dir_path);
let (dirhash, manifest_dir_future) = upload_manifest_no_parents(
ctx.clone(),
&repo,
format!("file\0{}\n", filehash),
&fake_dir_path,
);
let (roothash, root_manifest_future) =
upload_manifest_no_parents(&repo, format!("dir\0{}t\n", dirhash), &RepoPath::root());
let (roothash, root_manifest_future) = upload_manifest_no_parents(
ctx.clone(),
&repo,
format!("dir\0{}t\n", dirhash),
&RepoPath::root(),
);
let commit1 = create_changeset_no_parents(
&repo,
@ -239,8 +263,10 @@ fn create_two_changesets(repo: BlobRepo) {
);
let fake_file_path_no_dir = RepoPath::file("file").expect("Can't generate fake RepoPath");
let (filehash, file_future) = upload_file_no_parents(&repo, "blob", &fake_file_path_no_dir);
let (filehash, file_future) =
upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_file_path_no_dir);
let (roothash, root_manifest_future) = upload_manifest_one_parent(
ctx.clone(),
&repo,
format!("file\0{}\n", filehash),
&RepoPath::root(),
@ -296,13 +322,22 @@ fn check_bonsai_creation(repo: BlobRepo) {
let fake_file_path = RepoPath::file("dir/file").expect("Can't generate fake RepoPath");
let fake_dir_path = RepoPath::dir("dir").expect("Can't generate fake RepoPath");
let (filehash, file_future) = upload_file_no_parents(&repo, "blob", &fake_file_path);
let (filehash, file_future) =
upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_file_path);
let (dirhash, manifest_dir_future) =
upload_manifest_no_parents(&repo, format!("file\0{}\n", filehash), &fake_dir_path);
let (dirhash, manifest_dir_future) = upload_manifest_no_parents(
ctx.clone(),
&repo,
format!("file\0{}\n", filehash),
&fake_dir_path,
);
let (_, root_manifest_future) =
upload_manifest_no_parents(&repo, format!("dir\0{}t\n", dirhash), &RepoPath::root());
let (_, root_manifest_future) = upload_manifest_no_parents(
ctx.clone(),
&repo,
format!("dir\0{}t\n", dirhash),
&RepoPath::root(),
);
let commit = create_changeset_no_parents(
&repo,
@ -313,9 +348,9 @@ fn check_bonsai_creation(repo: BlobRepo) {
let commit = run_future(commit.get_completed_changeset()).unwrap();
let commit = &commit.1;
let bonsai_cs_id =
run_future(repo.get_bonsai_from_hg(ctx, &commit.get_changeset_id())).unwrap();
run_future(repo.get_bonsai_from_hg(ctx.clone(), &commit.get_changeset_id())).unwrap();
assert!(bonsai_cs_id.is_some());
let bonsai = run_future(repo.get_bonsai_changeset(bonsai_cs_id.unwrap())).unwrap();
let bonsai = run_future(repo.get_bonsai_changeset(ctx.clone(), bonsai_cs_id.unwrap())).unwrap();
assert_eq!(
bonsai
.file_changes()
@ -336,10 +371,15 @@ fn check_bonsai_creation_with_rename(repo: BlobRepo) {
let parent = {
let fake_file_path = RepoPath::file("file").expect("Can't generate fake RepoPath");
let (filehash, file_future) = upload_file_no_parents(&repo, "blob", &fake_file_path);
let (filehash, file_future) =
upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_file_path);
let (_, root_manifest_future) =
upload_manifest_no_parents(&repo, format!("file\0{}\n", filehash), &RepoPath::root());
let (_, root_manifest_future) = upload_manifest_no_parents(
ctx.clone(),
&repo,
format!("file\0{}\n", filehash),
&RepoPath::root(),
);
create_changeset_no_parents(
&repo,
@ -353,12 +393,14 @@ fn check_bonsai_creation_with_rename(repo: BlobRepo) {
RepoPath::file("file_rename").expect("Can't generate fake RepoPath");
let (filehash, file_future) = upload_file_no_parents(
ctx.clone(),
&repo,
"\x01\ncopy: file\ncopyrev: c3127cdbf2eae0f09653f9237d85c8436425b246\x01\nblob",
&fake_renamed_file_path,
);
let (_, root_manifest_future) = upload_manifest_no_parents(
ctx.clone(),
&repo,
format!("file_rename\0{}\n", filehash),
&RepoPath::root(),
@ -383,8 +425,8 @@ fn check_bonsai_creation_with_rename(repo: BlobRepo) {
.unwrap();
let bonsai_cs_id =
run_future(repo.get_bonsai_from_hg(ctx, &child_cs.get_changeset_id())).unwrap();
let bonsai = run_future(repo.get_bonsai_changeset(bonsai_cs_id.unwrap())).unwrap();
run_future(repo.get_bonsai_from_hg(ctx.clone(), &child_cs.get_changeset_id())).unwrap();
let bonsai = run_future(repo.get_bonsai_changeset(ctx.clone(), bonsai_cs_id.unwrap())).unwrap();
let fc = bonsai.file_changes().collect::<BTreeMap<_, _>>();
let file = MPath::new("file").unwrap();
assert!(!fc[&file].is_some());
@ -403,10 +445,15 @@ test_both_repotypes!(
);
fn create_bad_changeset(repo: BlobRepo) {
let ctx = CoreContext::test_mock();
let dirhash = string_to_nodehash("c2d60b35a8e7e034042a9467783bbdac88a0d219");
let (_, root_manifest_future) =
upload_manifest_no_parents(&repo, format!("dir\0{}t\n", dirhash), &RepoPath::root());
let (_, root_manifest_future) = upload_manifest_no_parents(
ctx,
&repo,
format!("dir\0{}t\n", dirhash),
&RepoPath::root(),
);
let commit =
create_changeset_no_parents(&repo, root_manifest_future.map(Some).boxify(), vec![]);
@ -427,11 +474,20 @@ fn create_double_linknode(repo: BlobRepo) {
let fake_dir_path = RepoPath::dir("dir").expect("Can't generate fake RepoPath");
let (filehash, parent_commit) = {
let (filehash, file_future) = upload_file_no_parents(&repo, "blob", &fake_file_path);
let (dirhash, manifest_dir_future) =
upload_manifest_no_parents(&repo, format!("file\0{}\n", filehash), &fake_dir_path);
let (_, root_manifest_future) =
upload_manifest_no_parents(&repo, format!("dir\0{}t\n", dirhash), &RepoPath::root());
let (filehash, file_future) =
upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_file_path);
let (dirhash, manifest_dir_future) = upload_manifest_no_parents(
ctx.clone(),
&repo,
format!("file\0{}\n", filehash),
&fake_dir_path,
);
let (_, root_manifest_future) = upload_manifest_no_parents(
ctx.clone(),
&repo,
format!("dir\0{}t\n", dirhash),
&RepoPath::root(),
);
(
filehash,
@ -445,13 +501,21 @@ fn create_double_linknode(repo: BlobRepo) {
let child_commit = {
let (filehash, file_future) =
upload_file_one_parent(&repo, "blob", &fake_file_path, filehash);
upload_file_one_parent(ctx.clone(), &repo, "blob", &fake_file_path, filehash);
let (dirhash, manifest_dir_future) =
upload_manifest_no_parents(&repo, format!("file\0{}\n", filehash), &fake_dir_path);
let (dirhash, manifest_dir_future) = upload_manifest_no_parents(
ctx.clone(),
&repo,
format!("file\0{}\n", filehash),
&fake_dir_path,
);
let (_, root_manifest_future) =
upload_manifest_no_parents(&repo, format!("dir\0{}t\n", dirhash), &RepoPath::root());
let (_, root_manifest_future) = upload_manifest_no_parents(
ctx.clone(),
&repo,
format!("dir\0{}t\n", dirhash),
&RepoPath::root(),
);
create_changeset_one_parent(
&repo,
@ -494,7 +558,8 @@ fn check_linknode_creation(repo: BlobRepo) {
let path = RepoPath::file(
MPath::new(format!("dir/file{}", id)).expect("String to MPath failed"),
).expect("Can't generate fake RepoPath");
let (hash, future) = upload_file_no_parents(&repo, format!("blob id {}", id), &path);
let (hash, future) =
upload_file_no_parents(ctx.clone(), &repo, format!("blob id {}", id), &path);
((hash, format!("file{}", id)), future)
})
.collect();
@ -509,10 +574,14 @@ fn check_linknode_creation(repo: BlobRepo) {
});
let (dirhash, manifest_dir_future) =
upload_manifest_no_parents(&repo, manifest, &fake_dir_path);
upload_manifest_no_parents(ctx.clone(), &repo, manifest, &fake_dir_path);
let (roothash, root_manifest_future) =
upload_manifest_no_parents(&repo, format!("dir\0{}t\n", dirhash), &RepoPath::root());
let (roothash, root_manifest_future) = upload_manifest_no_parents(
ctx.clone(),
&repo,
format!("dir\0{}t\n", dirhash),
&RepoPath::root(),
);
uploads.push(manifest_dir_future);
@ -566,13 +635,14 @@ where
K::Value: PartialEq + Arbitrary,
{
fn result<G: Gen>(&self, g: &mut G) -> TestResult {
let ctx = CoreContext::test_mock();
let value = <K::Value as Arbitrary>::arbitrary(g);
let value_cloned = value.clone();
let store_fetch_future = self.repo
.unittest_store(value)
.unittest_store(ctx.clone(), value)
.and_then({
let repo = self.repo.clone();
move |key| repo.unittest_fetch(&key)
cloned!(ctx, self.repo);
move |key| repo.unittest_fetch(ctx, &key)
})
.map(move |value_fetched| TestResult::from_bool(value_fetched == value_cloned));
run_future(store_fetch_future).expect("valid mononoke type")
@ -593,6 +663,7 @@ test_both_repotypes!(
#[test]
fn test_compute_changed_files_no_parents() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = many_files_dirs::getrepo(None);
let nodehash = string_to_nodehash("0c59c8d0da93cbf9d7f4b888f28823ffb2e3e480");
let expected = vec![
@ -602,11 +673,13 @@ fn test_compute_changed_files_no_parents() {
MPath::new(b"dir2/file_1_in_dir2").unwrap(),
];
let cs =
run_future(repo.get_changeset_by_changesetid(&HgChangesetId::new(nodehash))).unwrap();
let mf = run_future(repo.get_manifest_by_nodeid(&cs.manifestid())).unwrap();
let cs = run_future(repo.get_changeset_by_changesetid(
ctx.clone(),
&HgChangesetId::new(nodehash),
)).unwrap();
let mf = run_future(repo.get_manifest_by_nodeid(ctx.clone(), &cs.manifestid())).unwrap();
let diff = run_future(compute_changed_files(&mf, None, None)).unwrap();
let diff = run_future(compute_changed_files(ctx.clone(), &mf, None, None)).unwrap();
assert!(
diff == expected,
"Got {:?}, expected {:?}\n",
@ -619,6 +692,7 @@ fn test_compute_changed_files_no_parents() {
#[test]
fn test_compute_changed_files_one_parent() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
// Note that this is a commit and its parent commit, so you can use:
// hg log -T"{node}\n{files % ' MPath::new(b\"{file}\").unwrap(),\\n'}\\n" -r $HASH
// to see how Mercurial would compute the files list and confirm that it's the same
@ -635,15 +709,25 @@ fn test_compute_changed_files_one_parent() {
MPath::new(b"dir1/subdir1/subsubdir2/file_2").unwrap(),
];
let cs =
run_future(repo.get_changeset_by_changesetid(&HgChangesetId::new(nodehash))).unwrap();
let mf = run_future(repo.get_manifest_by_nodeid(&cs.manifestid())).unwrap();
let cs = run_future(repo.get_changeset_by_changesetid(
ctx.clone(),
&HgChangesetId::new(nodehash),
)).unwrap();
let mf = run_future(repo.get_manifest_by_nodeid(ctx.clone(), &cs.manifestid())).unwrap();
let parent_cs =
run_future(repo.get_changeset_by_changesetid(&HgChangesetId::new(parenthash))).unwrap();
let parent_mf = run_future(repo.get_manifest_by_nodeid(&parent_cs.manifestid())).unwrap();
let parent_cs = run_future(repo.get_changeset_by_changesetid(
ctx.clone(),
&HgChangesetId::new(parenthash),
)).unwrap();
let parent_mf =
run_future(repo.get_manifest_by_nodeid(ctx.clone(), &parent_cs.manifestid())).unwrap();
let diff = run_future(compute_changed_files(&mf, Some(&parent_mf), None)).unwrap();
let diff = run_future(compute_changed_files(
ctx.clone(),
&mf,
Some(&parent_mf),
None,
)).unwrap();
assert!(
diff == expected,
"Got {:?}, expected {:?}\n",
@ -675,12 +759,13 @@ fn make_bonsai_changeset(
}
fn make_file_change(
ctx: CoreContext,
content: impl AsRef<[u8]>,
repo: &BlobRepo,
) -> impl Future<Item = FileChange, Error = Error> + Send {
let content = content.as_ref();
let content_size = content.len() as u64;
repo.unittest_store(FileContents::new_bytes(content.as_ref()))
repo.unittest_store(ctx, FileContents::new_bytes(content.as_ref()))
.map(move |content_id| FileChange::new(content_id, FileType::Regular, content_size, None))
}
@ -689,15 +774,20 @@ fn test_get_manifest_from_bonsai() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = merge_uneven::getrepo(None);
let get_manifest_for_changeset = |cs_nodehash: &str| -> HgManifestId {
*run_future(repo.get_changeset_by_changesetid(&HgChangesetId::new(
string_to_nodehash(cs_nodehash),
))).unwrap()
.manifestid()
let get_manifest_for_changeset = {
cloned!(ctx, repo);
move |cs_nodehash: &str| -> HgManifestId {
*run_future(repo.get_changeset_by_changesetid(
ctx.clone(),
&HgChangesetId::new(string_to_nodehash(cs_nodehash)),
)).unwrap()
.manifestid()
}
};
let get_entries =
|ms_hash: &HgManifestId| -> BoxFuture<HashMap<String, Box<Entry + Sync>>, Error> {
repo.get_manifest_by_nodeid(&ms_hash)
let get_entries = {
cloned!(ctx, repo);
move |ms_hash: &HgManifestId| -> BoxFuture<HashMap<String, Box<Entry + Sync>>, Error> {
repo.get_manifest_by_nodeid(ctx.clone(), &ms_hash)
.map(|ms| {
ms.list()
.map(|e| {
@ -707,7 +797,8 @@ fn test_get_manifest_from_bonsai() {
.collect::<HashMap<_, _>>()
})
.boxify()
};
}
};
// #CONTENT
// 1: 1
@ -779,7 +870,7 @@ fn test_get_manifest_from_bonsai() {
);
let br = entries.get("branch").expect("trivial merge should succeed");
let br_parents = run_future(br.get_parents())
let br_parents = run_future(br.get_parents(ctx.clone()))
.unwrap()
.into_iter()
.collect::<HashSet<_>>();
@ -789,20 +880,20 @@ fn test_get_manifest_from_bonsai() {
// add file
{
let content_expected = &b"some awesome content"[..];
let fc = run_future(make_file_change(content_expected, &repo)).unwrap();
let fc = run_future(make_file_change(ctx.clone(), content_expected, &repo)).unwrap();
let bcs = make_bonsai_changeset(None, None, vec![("base", None), ("new", Some(fc))]);
let (ms_hash, _) =
run_future(repo.get_manifest_from_bonsai(ctx, bcs, Some(&ms1), Some(&ms2)))
run_future(repo.get_manifest_from_bonsai(ctx.clone(), bcs, Some(&ms1), Some(&ms2)))
.expect("adding new file should not produce coflict");
let entries = run_future(get_entries(&ms_hash)).unwrap();
let new = entries.get("new").expect("new file should be in entries");
match run_future(new.get_content()).unwrap() {
match run_future(new.get_content(ctx.clone())).unwrap() {
manifest::Content::File(content) => {
assert_eq!(content, FileContents::new_bytes(content_expected));
}
_ => panic!("content type mismatch"),
};
let new_parents = run_future(new.get_parents()).unwrap();
let new_parents = run_future(new.get_parents(ctx.clone())).unwrap();
assert_eq!(new_parents, HgParents::None);
}
});
@ -814,7 +905,7 @@ fn test_case_conflict_in_manifest() {
let ctx = CoreContext::test_mock();
let repo = many_files_dirs::getrepo(None);
let get_manifest_for_changeset = |cs_id: &HgChangesetId| -> HgManifestId {
*run_future(repo.get_changeset_by_changesetid(cs_id))
*run_future(repo.get_changeset_by_changesetid(ctx.clone(), cs_id))
.unwrap()
.manifestid()
};
@ -838,7 +929,11 @@ fn test_case_conflict_in_manifest() {
ctx.clone(),
repo.clone(),
vec![bonsai_parent],
store_files(btreemap!{*path => Some("caseconflicttest")}, repo.clone()),
store_files(
ctx.clone(),
btreemap!{*path => Some("caseconflicttest")},
repo.clone(),
),
);
let child_hg_cs =
@ -846,6 +941,7 @@ fn test_case_conflict_in_manifest() {
let child_mf = get_manifest_for_changeset(&child_hg_cs);
assert_eq!(
run_future(repo.check_case_conflict_in_manifest(
ctx.clone(),
&mf,
&child_mf,
MPath::new(path).unwrap()
@ -862,13 +958,19 @@ fn test_case_conflict_in_manifest() {
#[test]
fn test_case_conflict_two_changeset() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = get_empty_lazy_repo();
let fake_file_path_1 = RepoPath::file("file").expect("Can't generate fake RepoPath");
let (filehash_1, file_future_1) = upload_file_no_parents(&repo, "blob", &fake_file_path_1);
let (filehash_1, file_future_1) =
upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_file_path_1);
let (_roothash, root_manifest_future) =
upload_manifest_no_parents(&repo, format!("file\0{}\n", filehash_1), &RepoPath::root());
let (_roothash, root_manifest_future) = upload_manifest_no_parents(
ctx.clone(),
&repo,
format!("file\0{}\n", filehash_1),
&RepoPath::root(),
);
let commit1 = create_changeset_no_parents(
&repo,
@ -879,8 +981,9 @@ fn test_case_conflict_two_changeset() {
let commit2 = {
let fake_file_path_2 = RepoPath::file("FILE").expect("Can't generate fake RepoPath");
let (filehash_2, file_future_2) =
upload_file_no_parents(&repo, "blob", &fake_file_path_2);
upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_file_path_2);
let (_roothash, root_manifest_future) = upload_manifest_no_parents(
ctx.clone(),
&repo,
format!("file\0{}\nFILE\0{}\n", filehash_1, filehash_2),
&RepoPath::root(),
@ -907,14 +1010,18 @@ fn test_case_conflict_two_changeset() {
#[test]
fn test_case_conflict_inside_one_changeset() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = get_empty_lazy_repo();
let fake_file_path_1 = RepoPath::file("file").expect("Can't generate fake RepoPath");
let (filehash_1, file_future_1) = upload_file_no_parents(&repo, "blob", &fake_file_path_1);
let (filehash_1, file_future_1) =
upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_file_path_1);
let fake_file_path_1 = RepoPath::file("FILE").expect("Can't generate fake RepoPath");
let (filehash_2, file_future_2) = upload_file_no_parents(&repo, "blob", &fake_file_path_1);
let (filehash_2, file_future_2) =
upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_file_path_1);
let (_roothash, root_manifest_future) = upload_manifest_no_parents(
ctx.clone(),
&repo,
format!("file\0{}\nFILE\0{}", filehash_1, filehash_2),
&RepoPath::root(),
@ -933,13 +1040,19 @@ fn test_case_conflict_inside_one_changeset() {
#[test]
fn test_no_case_conflict_removal() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = get_empty_lazy_repo();
let fake_file_path_1 = RepoPath::file("file").expect("Can't generate fake RepoPath");
let (filehash_1, file_future_1) = upload_file_no_parents(&repo, "blob", &fake_file_path_1);
let (filehash_1, file_future_1) =
upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_file_path_1);
let (_roothash, root_manifest_future) =
upload_manifest_no_parents(&repo, format!("file\0{}\n", filehash_1), &RepoPath::root());
let (_roothash, root_manifest_future) = upload_manifest_no_parents(
ctx.clone(),
&repo,
format!("file\0{}\n", filehash_1),
&RepoPath::root(),
);
let commit1 = create_changeset_no_parents(
&repo,
@ -950,8 +1063,9 @@ fn test_no_case_conflict_removal() {
let commit2 = {
let fake_file_path_2 = RepoPath::file("FILE").expect("Can't generate fake RepoPath");
let (filehash_2, file_future_2) =
upload_file_no_parents(&repo, "blob", &fake_file_path_2);
upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_file_path_2);
let (_roothash, root_manifest_future) = upload_manifest_no_parents(
ctx.clone(),
&repo,
format!("FILE\0{}\n", filehash_2),
&RepoPath::root(),
@ -978,17 +1092,24 @@ fn test_no_case_conflict_removal() {
#[test]
fn test_no_case_conflict_removal_dir() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = get_empty_lazy_repo();
let commit1 = {
let fake_file_path = RepoPath::file("file").expect("Can't generate fake RepoPath");
let fake_dir_path = RepoPath::file("dir").expect("Can't generate fake RepoPath");
let (filehash, file_future) = upload_file_no_parents(&repo, "blob", &fake_file_path);
let (filehash, file_future) =
upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_file_path);
let (dirhash_1, manifest_dir_future) =
upload_manifest_no_parents(&repo, format!("file\0{}\n", filehash), &fake_dir_path);
let (dirhash_1, manifest_dir_future) = upload_manifest_no_parents(
ctx.clone(),
&repo,
format!("file\0{}\n", filehash),
&fake_dir_path,
);
let (_roothash, root_manifest_future) = upload_manifest_no_parents(
ctx.clone(),
&repo,
format!("dir\0{}t\n", dirhash_1),
&RepoPath::root(),
@ -1004,12 +1125,18 @@ fn test_no_case_conflict_removal_dir() {
let commit2 = {
let fake_file_path = RepoPath::file("file").expect("Can't generate fake RepoPath");
let fake_dir_path = RepoPath::file("DIR").expect("Can't generate fake RepoPath");
let (filehash, file_future) = upload_file_no_parents(&repo, "blob", &fake_file_path);
let (filehash, file_future) =
upload_file_no_parents(ctx.clone(), &repo, "blob", &fake_file_path);
let (dirhash_1, manifest_dir_future) =
upload_manifest_no_parents(&repo, format!("file\0{}\n", filehash), &fake_dir_path);
let (dirhash_1, manifest_dir_future) = upload_manifest_no_parents(
ctx.clone(),
&repo,
format!("file\0{}\n", filehash),
&fake_dir_path,
);
let (_roothash, root_manifest_future) = upload_manifest_no_parents(
ctx.clone(),
&repo,
format!("DIR\0{}t\n", dirhash_1),
&RepoPath::root(),

View File

@ -12,6 +12,7 @@ use utils::run_future;
use blobrepo::HgBlobEntry;
use blobrepo::internal::{IncompleteFilenodes, MemoryManifestEntry, MemoryRootManifest};
use context::CoreContext;
use fixtures::many_files_dirs;
use mercurial_types::{Entry, FileType, HgManifestId, HgNodeHash, MPath, MPathElement, Type,
nodehash::HgEntryId};
@ -31,12 +32,14 @@ fn insert_entry(tree: &MemoryManifestEntry, path: MPathElement, entry: MemoryMan
#[test]
fn empty_manifest() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = many_files_dirs::getrepo(None);
// Create an empty memory manifest
let memory_manifest = MemoryRootManifest::new(repo, IncompleteFilenodes::new(), None, None)
.wait()
.expect("Could not create empty manifest");
let memory_manifest =
MemoryRootManifest::new(ctx, repo, IncompleteFilenodes::new(), None, None)
.wait()
.expect("Could not create empty manifest");
if let MemoryManifestEntry::MemTree {
base_manifest_id,
@ -59,16 +62,21 @@ fn empty_manifest() {
#[test]
fn load_manifest() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = many_files_dirs::getrepo(None);
let manifest_id = HgNodeHash::from_static_str("907f5b20e06dfb91057861d984423e84b64b5b7b")
.expect("Could not get nodehash");
// Load a memory manifest
let memory_manifest =
MemoryRootManifest::new(repo, IncompleteFilenodes::new(), Some(&manifest_id), None)
.wait()
.expect("Could not load manifest");
let memory_manifest = MemoryRootManifest::new(
ctx.clone(),
repo,
IncompleteFilenodes::new(),
Some(&manifest_id),
None,
).wait()
.expect("Could not load manifest");
if let MemoryManifestEntry::MemTree {
base_manifest_id,
@ -104,13 +112,18 @@ fn load_manifest() {
#[test]
fn save_manifest() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = many_files_dirs::getrepo(None);
// Create an empty memory manifest
let memory_manifest =
MemoryRootManifest::new(repo.clone(), IncompleteFilenodes::new(), None, None)
.wait()
.expect("Could not create empty manifest");
let memory_manifest = MemoryRootManifest::new(
ctx.clone(),
repo.clone(),
IncompleteFilenodes::new(),
None,
None,
).wait()
.expect("Could not create empty manifest");
// Add an unmodified entry
let dir_nodehash = HgNodeHash::from_static_str("907f5b20e06dfb91057861d984423e84b64b5b7b")
@ -126,12 +139,12 @@ fn save_manifest() {
insert_entry(&memory_manifest.unittest_root(), path.clone(), dir);
let manifest_entry = memory_manifest
.save()
.save(ctx.clone())
.wait()
.expect("Could not save manifest");
let manifest_id = HgManifestId::new(manifest_entry.get_hash().into_nodehash());
let refound = repo.get_manifest_by_nodeid(&manifest_id)
let refound = repo.get_manifest_by_nodeid(ctx.clone(), &manifest_id)
.map(|m| m.lookup(&path))
.wait()
.expect("Lookup of entry just saved failed")
@ -149,6 +162,7 @@ fn save_manifest() {
#[test]
fn remove_item() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = many_files_dirs::getrepo(None);
let blobstore = repo.get_blobstore();
@ -159,6 +173,7 @@ fn remove_item() {
// Load a memory manifest
let memory_manifest = MemoryRootManifest::new(
ctx.clone(),
repo.clone(),
IncompleteFilenodes::new(),
Some(&manifest_id),
@ -173,6 +188,7 @@ fn remove_item() {
// Remove a file
memory_manifest
.change_entry(
ctx.clone(),
&MPath::new(b"dir2/file_1_in_dir2").expect("Can't create MPath"),
None,
)
@ -187,7 +203,9 @@ fn remove_item() {
.get(&dir2)
.expect("dir2 is missing")
.clone()
.map_or(false, |e| e.is_empty(&blobstore).wait().unwrap()),
.map_or(false, |e| e.is_empty(ctx.clone(), &blobstore)
.wait()
.unwrap()),
"Bad after remove"
);
if let Some(MemoryManifestEntry::MemTree { changes, .. }) =
@ -206,12 +224,12 @@ fn remove_item() {
// And check that dir2 disappears over a save/reload operation
let manifest_entry = memory_manifest
.save()
.save(ctx.clone())
.wait()
.expect("Could not save manifest");
let manifest_id = HgManifestId::new(manifest_entry.get_hash().into_nodehash());
let refound = repo.get_manifest_by_nodeid(&manifest_id)
let refound = repo.get_manifest_by_nodeid(ctx.clone(), &manifest_id)
.map(|m| m.lookup(&dir2))
.wait()
.expect("Lookup of entry just saved failed");
@ -226,6 +244,7 @@ fn remove_item() {
#[test]
fn add_item() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = many_files_dirs::getrepo(None);
let blobstore = repo.get_blobstore();
@ -237,6 +256,7 @@ fn add_item() {
// Load a memory manifest
let memory_manifest = MemoryRootManifest::new(
ctx.clone(),
repo.clone(),
IncompleteFilenodes::new(),
Some(&manifest_id),
@ -249,6 +269,7 @@ fn add_item() {
.expect("Could not get nodehash");
memory_manifest
.change_entry(
ctx.clone(),
&MPath::new(b"new_file").expect("Could not create MPath"),
Some(HgBlobEntry::new(
blobstore.clone(),
@ -262,12 +283,12 @@ fn add_item() {
// And check that new_file persists
let manifest_entry = memory_manifest
.save()
.save(ctx.clone())
.wait()
.expect("Could not save manifest");
let manifest_id = HgManifestId::new(manifest_entry.get_hash().into_nodehash());
let refound = repo.get_manifest_by_nodeid(&manifest_id)
let refound = repo.get_manifest_by_nodeid(ctx.clone(), &manifest_id)
.map(|m| m.lookup(&new_file))
.wait()
.expect("Lookup of entry just saved failed")
@ -283,6 +304,7 @@ fn add_item() {
#[test]
fn replace_item() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = many_files_dirs::getrepo(None);
let blobstore = repo.get_blobstore();
@ -293,6 +315,7 @@ fn replace_item() {
// Load a memory manifest
let memory_manifest = MemoryRootManifest::new(
ctx.clone(),
repo.clone(),
IncompleteFilenodes::new(),
Some(&manifest_id),
@ -305,6 +328,7 @@ fn replace_item() {
.expect("Could not get nodehash");
memory_manifest
.change_entry(
ctx.clone(),
&MPath::new(b"1").expect("Could not create MPath"),
Some(HgBlobEntry::new(
blobstore.clone(),
@ -318,12 +342,12 @@ fn replace_item() {
// And check that new_file persists
let manifest_entry = memory_manifest
.save()
.save(ctx.clone())
.wait()
.expect("Could not save manifest");
let manifest_id = HgManifestId::new(manifest_entry.get_hash().into_nodehash());
let refound = repo.get_manifest_by_nodeid(&manifest_id)
let refound = repo.get_manifest_by_nodeid(ctx, &manifest_id)
.map(|m| m.lookup(&new_file))
.wait()
.expect("Lookup of entry just saved failed")
@ -339,6 +363,7 @@ fn replace_item() {
#[test]
fn conflict_resolution() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = many_files_dirs::getrepo(None);
let blobstore = repo.get_blobstore();
let logger = repo.get_logger();
@ -398,6 +423,7 @@ fn conflict_resolution() {
};
let merge = run_future(base.merge_with_conflicts(
ctx,
other,
blobstore,
logger,
@ -436,6 +462,7 @@ fn conflict_resolution() {
#[test]
fn merge_manifests() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let repo = many_files_dirs::getrepo(None);
let blobstore = repo.get_blobstore();
let logger = repo.get_logger();
@ -521,6 +548,7 @@ fn merge_manifests() {
};
let merged = base.merge_with_conflicts(
ctx,
other,
blobstore,
logger,

View File

@ -69,6 +69,7 @@ macro_rules! test_both_repotypes {
}
pub fn upload_file_no_parents<B>(
ctx: CoreContext,
repo: &BlobRepo,
data: B,
path: &RepoPath,
@ -77,6 +78,7 @@ where
B: Into<Bytes>,
{
upload_hg_file_entry(
ctx,
repo,
data.into(),
FileType::Regular,
@ -87,6 +89,7 @@ where
}
pub fn upload_file_one_parent<B>(
ctx: CoreContext,
repo: &BlobRepo,
data: B,
path: &RepoPath,
@ -96,6 +99,7 @@ where
B: Into<Bytes>,
{
upload_hg_file_entry(
ctx,
repo,
data.into(),
FileType::Regular,
@ -106,6 +110,7 @@ where
}
pub fn upload_manifest_no_parents<B>(
ctx: CoreContext,
repo: &BlobRepo,
data: B,
path: &RepoPath,
@ -113,10 +118,11 @@ pub fn upload_manifest_no_parents<B>(
where
B: Into<Bytes>,
{
upload_hg_tree_entry(repo, data.into(), path.clone(), None, None)
upload_hg_tree_entry(ctx, repo, data.into(), path.clone(), None, None)
}
pub fn upload_manifest_one_parent<B>(
ctx: CoreContext,
repo: &BlobRepo,
data: B,
path: &RepoPath,
@ -125,10 +131,11 @@ pub fn upload_manifest_one_parent<B>(
where
B: Into<Bytes>,
{
upload_hg_tree_entry(repo, data.into(), path.clone(), Some(p1), None)
upload_hg_tree_entry(ctx, repo, data.into(), path.clone(), Some(p1), None)
}
fn upload_hg_tree_entry(
ctx: CoreContext,
repo: &BlobRepo,
contents: Bytes,
path: RepoPath,
@ -142,10 +149,11 @@ fn upload_hg_tree_entry(
p2,
path,
};
upload.upload(repo).unwrap()
upload.upload(ctx, repo).unwrap()
}
fn upload_hg_file_entry(
ctx: CoreContext,
repo: &BlobRepo,
contents: Bytes,
file_type: FileType,
@ -166,7 +174,7 @@ fn upload_hg_file_entry(
path: path.into_mpath().expect("expected a path to be present"),
};
let (_, upload_fut) = upload.upload(repo).unwrap();
let (_, upload_fut) = upload.upload(ctx, repo).unwrap();
(node_id, upload_fut)
}

View File

@ -70,29 +70,39 @@ pub struct BonsaiMFVerifyDifference {
impl BonsaiMFVerifyDifference {
/// What entries changed from the original manifest to the roundtripped one.
pub fn changes(&self) -> impl Stream<Item = ChangedEntry, Error = Error> + Send {
pub fn changes(
&self,
ctx: CoreContext,
) -> impl Stream<Item = ChangedEntry, Error = Error> + Send {
let lookup_mf_id = HgManifestId::new(self.lookup_mf_id);
let roundtrip_mf_id = HgManifestId::new(self.roundtrip_mf_id);
let original_mf = self.repo.get_manifest_by_nodeid(&lookup_mf_id);
let roundtrip_mf = self.repo.get_manifest_by_nodeid(&roundtrip_mf_id);
let original_mf = self.repo.get_manifest_by_nodeid(ctx.clone(), &lookup_mf_id);
let roundtrip_mf = self.repo
.get_manifest_by_nodeid(ctx.clone(), &roundtrip_mf_id);
original_mf
.join(roundtrip_mf)
.map(|(original_mf, roundtrip_mf)| {
changed_entry_stream(&roundtrip_mf, &original_mf, None)
.map({
cloned!(ctx);
move |(original_mf, roundtrip_mf)| {
changed_entry_stream(ctx, &roundtrip_mf, &original_mf, None)
}
})
.flatten_stream()
}
/// Whether there are any changes beyond the root manifest ID being different.
#[inline]
pub fn has_changes(&self) -> impl Future<Item = bool, Error = Error> + Send {
self.changes().not_empty()
pub fn has_changes(&self, ctx: CoreContext) -> impl Future<Item = bool, Error = Error> + Send {
self.changes(ctx).not_empty()
}
/// Whether there are any files that changed.
#[inline]
pub fn has_file_changes(&self) -> impl Future<Item = bool, Error = Error> + Send {
self.changes()
pub fn has_file_changes(
&self,
ctx: CoreContext,
) -> impl Future<Item = bool, Error = Error> + Send {
self.changes(ctx)
.filter(|item| !item.status.is_tree())
.not_empty()
}
@ -184,19 +194,20 @@ impl ChangesetVisitor for BonsaiMFVerifyVisitor {
debug!(logger, "Starting bonsai diff computation");
let parents_fut = repo.get_changeset_parents(ctx, &changeset_id).and_then({
let repo = repo.clone();
move |parent_hashes| {
let changesets = parent_hashes
.into_iter()
.map(move |parent_id| repo.get_changeset_by_changesetid(&parent_id));
future::join_all(changesets)
}
});
let parents_fut = repo.get_changeset_parents(ctx.clone(), &changeset_id)
.and_then({
cloned!(ctx, repo);
move |parent_hashes| {
let changesets = parent_hashes.into_iter().map(move |parent_id| {
repo.get_changeset_by_changesetid(ctx.clone(), &parent_id)
});
future::join_all(changesets)
}
});
// Convert to bonsai first.
let bonsai_diff_fut = parents_fut.and_then({
let repo = repo.clone();
cloned!(ctx, repo);
move |parents| {
let mut parents = parents.into_iter();
let p1: Option<_> = parents.next();
@ -214,9 +225,10 @@ impl ChangesetVisitor for BonsaiMFVerifyVisitor {
// Also fetch the manifest as we're interested in the computed node id.
let root_mf_id = HgManifestId::new(root_entry.get_hash().into_nodehash());
let root_mf_fut = BlobManifest::load(&repo.get_blobstore(), &root_mf_id);
let root_mf_fut =
BlobManifest::load(ctx.clone(), &repo.get_blobstore(), &root_mf_id);
bonsai_diff(root_entry, p1_entry, p2_entry)
bonsai_diff(ctx.clone(), root_entry, p1_entry, p2_entry)
.collect()
.join(root_mf_fut)
.and_then(move |(diff, root_mf)| match root_mf {
@ -245,6 +257,7 @@ impl ChangesetVisitor for BonsaiMFVerifyVisitor {
}
apply_diff(
ctx.clone(),
logger.clone(),
repo.clone(),
diff_result,
@ -285,7 +298,7 @@ impl ChangesetVisitor for BonsaiMFVerifyVisitor {
if broken_merge {
// This is a (potentially) broken merge. Ignore tree changes and
// only check for file changes.
Either::B(Either::A(difference.has_file_changes().map(
Either::B(Either::A(difference.has_file_changes(ctx).map(
move |has_file_changes| {
if has_file_changes {
BonsaiMFVerifyResult::Invalid(difference)
@ -298,7 +311,7 @@ impl ChangesetVisitor for BonsaiMFVerifyVisitor {
// This is an empty changeset. Mercurial is relatively inconsistent
// about creating new manifest nodes for such changesets, so it can
// happen.
Either::B(Either::B(difference.has_changes().map(
Either::B(Either::B(difference.has_changes(ctx).map(
move |has_changes| {
if has_changes {
BonsaiMFVerifyResult::Invalid(difference)
@ -322,6 +335,7 @@ impl ChangesetVisitor for BonsaiMFVerifyVisitor {
// https://github.com/rust-lang/rust/issues/50865.
// TODO: (rain1) T31595868 make apply_diff private once Rust 1.29 is released
pub fn apply_diff(
ctx: CoreContext,
logger: Logger,
repo: BlobRepo,
diff_result: Vec<BonsaiDiffResult>,
@ -329,6 +343,7 @@ pub fn apply_diff(
manifest_p2: Option<&HgNodeHash>,
) -> impl Future<Item = HgNodeHash, Error = Error> + Send {
MemoryRootManifest::new(
ctx.clone(),
repo.clone(),
IncompleteFilenodes::new(),
manifest_p1,
@ -340,21 +355,21 @@ pub fn apply_diff(
.into_iter()
.map(|result| {
let entry = make_entry(&repo, &result);
memory_manifest.change_entry(result.path(), entry)
memory_manifest.change_entry(ctx.clone(), result.path(), entry)
})
.collect();
future::join_all(futures)
.and_then({
let memory_manifest = memory_manifest.clone();
move |_| memory_manifest.resolve_trivial_conflicts()
cloned!(ctx, memory_manifest);
move |_| memory_manifest.resolve_trivial_conflicts(ctx)
})
.and_then(move |_| {
// This will cause tree entries to be written to the blobstore, but
// those entries will be redirected to memory because of
// repo.in_memory_writes().
debug!(logger, "Applying complete: now saving");
memory_manifest.save()
memory_manifest.save(ctx)
})
.map(|m| m.get_hash().into_nodehash())
}

View File

@ -199,7 +199,7 @@ where
let visit_fut = shared
.repo
.get_changeset_by_changesetid(&changeset_id)
.get_changeset_by_changesetid(ctx.clone(), &changeset_id)
.and_then({
cloned!(ctx, shared.visitor, shared.repo);
move |changeset| visitor.visit(ctx, logger, repo, changeset, follow_remaining)

View File

@ -54,7 +54,7 @@ mod test {
.map(|heads| heads.into_iter().map(HgChangesetId::new));
let verify = BonsaiMFVerify {
ctx,
ctx: ctx.clone(),
logger,
repo,
follow_limit: 1024,
@ -68,13 +68,13 @@ mod test {
.and_then(|heads| verify.verify(heads).collect());
tokio::spawn(
results
.and_then(|results| {
let diffs = results.into_iter().filter_map(|(res, meta)| {
.and_then(move |results| {
let diffs = results.into_iter().filter_map(move |(res, meta)| {
match res {
BonsaiMFVerifyResult::Invalid(difference) => {
let cs_id = meta.changeset_id;
Some(difference
.changes()
.changes(ctx.clone())
.collect()
.map(move |changes| (cs_id, changes)))
}

View File

@ -12,6 +12,7 @@ extern crate tokio;
extern crate futures_ext;
extern crate blobstore;
extern crate context;
extern crate mononoke_types;
use std::fmt;
@ -26,6 +27,7 @@ use tokio::prelude::*;
use tokio::timer::Delay;
use blobstore::Blobstore;
use context::CoreContext;
use mononoke_types::BlobstoreBytes;
/// A blobstore that imposes a delay on all its operations, where the delay is generated by a
@ -75,27 +77,27 @@ impl<F> Blobstore for DelayBlob<F>
where
F: FnMut(()) -> Duration + 'static + Send + Sync,
{
fn get(&self, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error> {
fn get(&self, ctx: CoreContext, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error> {
let sleep = self.sleep(self.get_roundtrips);
let get = self.blobstore.get(key);
let get = self.blobstore.get(ctx, key);
sleep.and_then(move |_| get).boxify()
}
fn put(&self, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> {
fn put(&self, ctx: CoreContext, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> {
let sleep = self.sleep(self.put_roundtrips);
let put = self.blobstore.put(key, value);
let put = self.blobstore.put(ctx, key, value);
sleep.and_then(move |_| put).boxify()
}
fn is_present(&self, key: String) -> BoxFuture<bool, Error> {
fn is_present(&self, ctx: CoreContext, key: String) -> BoxFuture<bool, Error> {
let sleep = self.sleep(self.is_present_roundtrips);
let is_present = self.blobstore.is_present(key);
let is_present = self.blobstore.is_present(ctx, key);
sleep.and_then(move |_| is_present).boxify()
}
fn assert_present(&self, key: String) -> BoxFuture<(), Error> {
fn assert_present(&self, ctx: CoreContext, key: String) -> BoxFuture<(), Error> {
let sleep = self.sleep(self.assert_present_roundtrips);
let assert_present = self.blobstore.assert_present(key);
let assert_present = self.blobstore.assert_present(ctx, key);
sleep.and_then(move |_| assert_present).boxify()
}
}

View File

@ -14,6 +14,7 @@ extern crate url;
extern crate futures_ext;
extern crate blobstore;
extern crate context;
extern crate mononoke_types;
use std::fs::{create_dir_all, File};
@ -28,6 +29,7 @@ use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use futures_ext::{BoxFuture, FutureExt};
use blobstore::Blobstore;
use context::CoreContext;
use mononoke_types::BlobstoreBytes;
const PREFIX: &str = "blob";
@ -63,7 +65,7 @@ impl Fileblob {
}
impl Blobstore for Fileblob {
fn get(&self, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error> {
fn get(&self, _ctx: CoreContext, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error> {
let p = self.path(&key);
poll_fn(move || {
@ -81,7 +83,7 @@ impl Blobstore for Fileblob {
.boxify()
}
fn put(&self, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> {
fn put(&self, _ctx: CoreContext, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> {
let p = self.path(&key);
poll_fn::<_, Error, _>(move || {

View File

@ -7,6 +7,7 @@
extern crate blobstore;
extern crate bytes;
extern crate cloned;
extern crate context;
extern crate failure_ext as failure;
extern crate futures;
extern crate futures_ext;
@ -38,6 +39,7 @@ use rand::prelude::*;
use twox_hash::{XxHash, XxHash32};
use blobstore::Blobstore;
use context::CoreContext;
use mononoke_types::BlobstoreBytes;
// UID and GID we're using for file ownership and permissions checking.
@ -218,7 +220,7 @@ impl fmt::Debug for Glusterblob {
impl Blobstore for Glusterblob {
/// Fetch the value associated with `key`, or None if no value is present
fn get(&self, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error> {
fn get(&self, _ctx: CoreContext, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error> {
let path = self.keydir(&*key);
let datapath = path.join(Self::keyfile(&*key));
let metapath = path.join(Self::metafile(&*key));
@ -297,7 +299,7 @@ impl Blobstore for Glusterblob {
/// Associate `value` with `key` for future gets; if `put` is called with different `value`s
/// for the same key, the implementation may return any `value` it's been given in response
/// to a `get` for that `key`.
fn put(&self, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> {
fn put(&self, _ctx: CoreContext, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> {
self.create_keydir(&*key)
.and_then({
cloned!(self.ctxt);
@ -390,7 +392,7 @@ impl Blobstore for Glusterblob {
/// implentation just calls `get`, and discards the return value; this can be overridden to
/// avoid transferring data. In the absence of concurrent `put` calls, this must return
/// `false` if `get` would return `None`, and `true` if `get` would return `Some(_)`.
fn is_present(&self, key: String) -> BoxFuture<bool, Error> {
fn is_present(&self, _ctx: CoreContext, key: String) -> BoxFuture<bool, Error> {
let path = self.keydir(&*key);
let datapath = path.join(Self::keyfile(&*key));
let metapath = path.join(Self::metafile(&*key));

View File

@ -13,6 +13,7 @@ extern crate futures_ext;
extern crate rocksdb;
extern crate blobstore;
extern crate context;
extern crate mononoke_types;
use std::path::Path;
@ -24,6 +25,7 @@ use futures_ext::{BoxFuture, FutureExt};
use rocksdb::{Db, ReadOptions, WriteOptions};
use blobstore::Blobstore;
use context::CoreContext;
use mononoke_types::BlobstoreBytes;
pub type Result<T> = std::result::Result<T, Error>;
@ -84,13 +86,13 @@ impl Future for PutBlob {
}
impl Blobstore for Rocksblob where {
fn get(&self, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error> {
fn get(&self, _ctx: CoreContext, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error> {
let db = self.db.clone();
GetBlob(db, key).boxify()
}
fn put(&self, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> {
fn put(&self, _ctx: CoreContext, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> {
let db = self.db.clone();
PutBlob(db, key, value).boxify()

View File

@ -9,6 +9,7 @@ use futures::Future;
use futures_ext::{BoxFuture, FutureExt};
use stats::DynamicTimeseries;
use context::CoreContext;
use mononoke_types::BlobstoreBytes;
use {Blobstore, CacheBlobstoreExt};
@ -54,11 +55,11 @@ impl<T: Blobstore> CountedBlobstore<T> {
}
impl<T: Blobstore> Blobstore for CountedBlobstore<T> {
fn get(&self, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error> {
fn get(&self, ctx: CoreContext, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error> {
let name = self.name;
STATS::get.add_value(1, (name,));
self.blobstore
.get(key)
.get(ctx, key)
.then(move |res| {
match res {
Ok(Some(_)) => STATS::get_hit.add_value(1, (name,)),
@ -70,11 +71,11 @@ impl<T: Blobstore> Blobstore for CountedBlobstore<T> {
.boxify()
}
fn put(&self, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> {
fn put(&self, ctx: CoreContext, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> {
let name = self.name;
STATS::put.add_value(1, (name,));
self.blobstore
.put(key, value)
.put(ctx, key, value)
.then(move |res| {
match res {
Ok(()) => STATS::put_ok.add_value(1, (name,)),
@ -85,11 +86,11 @@ impl<T: Blobstore> Blobstore for CountedBlobstore<T> {
.boxify()
}
fn is_present(&self, key: String) -> BoxFuture<bool, Error> {
fn is_present(&self, ctx: CoreContext, key: String) -> BoxFuture<bool, Error> {
let name = self.name;
STATS::is_present.add_value(1, (name,));
self.blobstore
.is_present(key)
.is_present(ctx, key)
.then(move |res| {
match res {
Ok(true) => STATS::is_present_hit.add_value(1, (name,)),
@ -101,11 +102,11 @@ impl<T: Blobstore> Blobstore for CountedBlobstore<T> {
.boxify()
}
fn assert_present(&self, key: String) -> BoxFuture<(), Error> {
fn assert_present(&self, ctx: CoreContext, key: String) -> BoxFuture<(), Error> {
let name = self.name;
STATS::assert_present.add_value(1, (name,));
self.blobstore
.assert_present(key)
.assert_present(ctx, key)
.then(move |res| {
match res {
Ok(()) => STATS::assert_present_ok.add_value(1, (name,)),
@ -119,8 +120,12 @@ impl<T: Blobstore> Blobstore for CountedBlobstore<T> {
impl<T: CacheBlobstoreExt> CacheBlobstoreExt for CountedBlobstore<T> {
#[inline]
fn get_no_cache_fill(&self, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error> {
self.as_inner().get_no_cache_fill(key)
fn get_no_cache_fill(
&self,
ctx: CoreContext,
key: String,
) -> BoxFuture<Option<BlobstoreBytes>, Error> {
self.as_inner().get_no_cache_fill(ctx, key)
}
#[inline]

View File

@ -15,6 +15,7 @@ extern crate tokio;
extern crate tokio_timer;
extern crate cachelib;
extern crate context;
extern crate fbwhoami;
extern crate futures_ext;
extern crate memcache;
@ -31,6 +32,7 @@ use failure::Error;
use futures::{future, Future};
use futures_ext::{BoxFuture, FutureExt};
use context::CoreContext;
use mononoke_types::BlobstoreBytes;
mod cachelib_cache;
@ -87,22 +89,22 @@ pub use errors::ErrorKind;
/// uses of Blobstores
pub trait Blobstore: fmt::Debug + Send + Sync + 'static {
/// Fetch the value associated with `key`, or None if no value is present
fn get(&self, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error>;
fn get(&self, ctx: CoreContext, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error>;
/// Associate `value` with `key` for future gets; if `put` is called with different `value`s
/// for the same key, the implementation may return any `value` it's been given in response
/// to a `get` for that `key`.
fn put(&self, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error>;
fn put(&self, ctx: CoreContext, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error>;
/// Check that `get` will return a value for a given `key`, and not None. The provided
/// implentation just calls `get`, and discards the return value; this can be overridden to
/// avoid transferring data. In the absence of concurrent `put` calls, this must return
/// `false` if `get` would return `None`, and `true` if `get` would return `Some(_)`.
fn is_present(&self, key: String) -> BoxFuture<bool, Error> {
self.get(key).map(|opt| opt.is_some()).boxify()
fn is_present(&self, ctx: CoreContext, key: String) -> BoxFuture<bool, Error> {
self.get(ctx, key).map(|opt| opt.is_some()).boxify()
}
/// Errors if a given `key` is not present in the blob store. Useful to abort a chained
/// future computation early if it cannot succeed unless the `key` is present
fn assert_present(&self, key: String) -> BoxFuture<(), Error> {
self.is_present(key.clone())
fn assert_present(&self, ctx: CoreContext, key: String) -> BoxFuture<(), Error> {
self.is_present(ctx, key.clone())
.and_then(|present| {
if present {
future::ok(())
@ -115,31 +117,31 @@ pub trait Blobstore: fmt::Debug + Send + Sync + 'static {
}
impl Blobstore for Arc<Blobstore> {
fn get(&self, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error> {
self.as_ref().get(key)
fn get(&self, ctx: CoreContext, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error> {
self.as_ref().get(ctx, key)
}
fn put(&self, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> {
self.as_ref().put(key, value)
fn put(&self, ctx: CoreContext, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> {
self.as_ref().put(ctx, key, value)
}
fn is_present(&self, key: String) -> BoxFuture<bool, Error> {
self.as_ref().is_present(key)
fn is_present(&self, ctx: CoreContext, key: String) -> BoxFuture<bool, Error> {
self.as_ref().is_present(ctx, key)
}
fn assert_present(&self, key: String) -> BoxFuture<(), Error> {
self.as_ref().assert_present(key)
fn assert_present(&self, ctx: CoreContext, key: String) -> BoxFuture<(), Error> {
self.as_ref().assert_present(ctx, key)
}
}
impl Blobstore for Box<Blobstore> {
fn get(&self, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error> {
self.as_ref().get(key)
fn get(&self, ctx: CoreContext, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error> {
self.as_ref().get(ctx, key)
}
fn put(&self, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> {
self.as_ref().put(key, value)
fn put(&self, ctx: CoreContext, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> {
self.as_ref().put(ctx, key, value)
}
fn is_present(&self, key: String) -> BoxFuture<bool, Error> {
self.as_ref().is_present(key)
fn is_present(&self, ctx: CoreContext, key: String) -> BoxFuture<bool, Error> {
self.as_ref().is_present(ctx, key)
}
fn assert_present(&self, key: String) -> BoxFuture<(), Error> {
self.as_ref().assert_present(key)
fn assert_present(&self, ctx: CoreContext, key: String) -> BoxFuture<(), Error> {
self.as_ref().assert_present(ctx, key)
}
}

View File

@ -12,6 +12,7 @@ use futures::{future, Future, IntoFuture, future::Either};
use futures_ext::{BoxFuture, FutureExt};
use tokio;
use context::CoreContext;
use mononoke_types::BlobstoreBytes;
use Blobstore;
@ -21,7 +22,11 @@ use Blobstore;
///
/// This is primarily used by the admin command to manually check memcache.
pub trait CacheBlobstoreExt: Blobstore {
fn get_no_cache_fill(&self, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error>;
fn get_no_cache_fill(
&self,
ctx: CoreContext,
key: String,
) -> BoxFuture<Option<BlobstoreBytes>, Error>;
fn get_cache_only(&self, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error>;
}
@ -224,12 +229,12 @@ where
L: LeaseOps + Clone,
T: Blobstore + Clone,
{
fn get(&self, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error> {
fn get(&self, ctx: CoreContext, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error> {
let cache_get = self.cache_get(&key);
let cache_put = self.cache_put_closure(&key);
let blobstore_get = future::lazy({
let blobstore = self.blobstore.clone();
move || blobstore.get(key)
move || blobstore.get(ctx, key)
});
cache_get
@ -245,7 +250,7 @@ where
.boxify()
}
fn put(&self, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> {
fn put(&self, ctx: CoreContext, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> {
let can_put = self.take_put_lease(&key);
let cache_put = self.cache_put(&key, value.clone())
.join(future::lazy({
@ -261,7 +266,7 @@ where
let key = key.clone();
move || {
blobstore
.put(key.clone(), value)
.put(ctx, key.clone(), value)
.or_else(move |r| lease.release_lease(&key, false).then(|_| Err(r)))
}
});
@ -277,11 +282,11 @@ where
.boxify()
}
fn is_present(&self, key: String) -> BoxFuture<bool, Error> {
fn is_present(&self, ctx: CoreContext, key: String) -> BoxFuture<bool, Error> {
let cache_check = self.cache_is_present(&key);
let blobstore_check = future::lazy({
let blobstore = self.blobstore.clone();
move || blobstore.is_present(key)
move || blobstore.is_present(ctx, key)
});
cache_check
@ -302,9 +307,13 @@ where
L: LeaseOps + Clone,
T: Blobstore + Clone,
{
fn get_no_cache_fill(&self, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error> {
fn get_no_cache_fill(
&self,
ctx: CoreContext,
key: String,
) -> BoxFuture<Option<BlobstoreBytes>, Error> {
let cache_get = self.cache_get(&key);
let blobstore_get = self.blobstore.get(key);
let blobstore_get = self.blobstore.get(ctx, key);
cache_get
.and_then(move |blob| {

View File

@ -10,6 +10,7 @@ use futures::future::Either;
use futures_ext::{BoxFuture, FutureExt};
use context::CoreContext;
use mononoke_types::BlobstoreBytes;
use {Blobstore, EagerMemblob};
@ -32,45 +33,45 @@ impl<T: Blobstore + Clone> MemWritesBlobstore<T> {
}
impl<T: Blobstore + Clone> Blobstore for MemWritesBlobstore<T> {
fn put(&self, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> {
fn put(&self, ctx: CoreContext, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> {
// Don't write the key if it's already present.
self.is_present(key.clone())
self.is_present(ctx.clone(), key.clone())
.and_then({
let memblob = self.memblob.clone();
move |is_present| {
if is_present {
Either::A(Ok(()).into_future())
} else {
Either::B(memblob.put(key, value))
Either::B(memblob.put(ctx, key, value))
}
}
})
.boxify()
}
fn get(&self, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error> {
fn get(&self, ctx: CoreContext, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error> {
self.memblob
.get(key.clone())
.get(ctx.clone(), key.clone())
.and_then({
let inner = self.inner.clone();
move |val| match val {
Some(val) => Either::A(Ok(Some(val)).into_future()),
None => Either::B(inner.get(key)),
None => Either::B(inner.get(ctx, key)),
}
})
.boxify()
}
fn is_present(&self, key: String) -> BoxFuture<bool, Error> {
fn is_present(&self, ctx: CoreContext, key: String) -> BoxFuture<bool, Error> {
self.memblob
.is_present(key.clone())
.is_present(ctx.clone(), key.clone())
.and_then({
let inner = self.inner.clone();
move |is_present| {
if is_present {
Either::A(Ok(true).into_future())
} else {
Either::B(inner.is_present(key))
Either::B(inner.is_present(ctx, key))
}
}
})
@ -86,24 +87,29 @@ mod test {
#[test]
fn basic_read() {
let ctx = CoreContext::test_mock();
let inner = EagerMemblob::new();
let foo_key = "foo".to_string();
inner
.put(foo_key.clone(), BlobstoreBytes::from_bytes("foobar"))
.put(
ctx.clone(),
foo_key.clone(),
BlobstoreBytes::from_bytes("foobar"),
)
.wait()
.expect("initial put should work");
let outer = MemWritesBlobstore::new(inner.clone());
assert!(
outer
.is_present(foo_key.clone())
.is_present(ctx.clone(), foo_key.clone())
.wait()
.expect("is_present to inner should work")
);
assert_eq!(
outer
.get(foo_key.clone())
.get(ctx, foo_key.clone())
.wait()
.expect("get to inner should work")
.expect("value should be present")
@ -114,18 +120,23 @@ mod test {
#[test]
fn redirect_writes() {
let ctx = CoreContext::test_mock();
let inner = EagerMemblob::new();
let foo_key = "foo".to_string();
let outer = MemWritesBlobstore::new(inner.clone());
outer
.put(foo_key.clone(), BlobstoreBytes::from_bytes("foobar"))
.put(
ctx.clone(),
foo_key.clone(),
BlobstoreBytes::from_bytes("foobar"),
)
.wait()
.expect("put should work");
assert!(
!inner
.is_present(foo_key.clone())
.is_present(ctx.clone(), foo_key.clone())
.wait()
.expect("is_present on inner should work"),
"foo should not be present in inner",
@ -133,7 +144,7 @@ mod test {
assert!(
outer
.is_present(foo_key.clone())
.is_present(ctx.clone(), foo_key.clone())
.wait()
.expect("is_present on outer should work"),
"foo should be present in outer",
@ -141,7 +152,7 @@ mod test {
assert_eq!(
outer
.get(foo_key.clone())
.get(ctx, foo_key.clone())
.wait()
.expect("get to outer should work")
.expect("value should be present")
@ -152,21 +163,30 @@ mod test {
#[test]
fn present_in_inner() {
let ctx = CoreContext::test_mock();
let inner = EagerMemblob::new();
let foo_key = "foo".to_string();
inner
.put(foo_key.clone(), BlobstoreBytes::from_bytes("foobar"))
.put(
ctx.clone(),
foo_key.clone(),
BlobstoreBytes::from_bytes("foobar"),
)
.wait()
.expect("initial put should work");
let outer = MemWritesBlobstore::new(inner.clone());
outer
.put(foo_key.clone(), BlobstoreBytes::from_bytes("foobar"))
.put(
ctx.clone(),
foo_key.clone(),
BlobstoreBytes::from_bytes("foobar"),
)
.wait()
.expect("put should work");
assert!(
outer
.is_present(foo_key.clone())
.is_present(ctx.clone(), foo_key.clone())
.wait()
.expect("is_present on outer should work"),
"foo should be present in outer",
@ -174,12 +194,16 @@ mod test {
// Change the value in inner.
inner
.put(foo_key.clone(), BlobstoreBytes::from_bytes("bazquux"))
.put(
ctx.clone(),
foo_key.clone(),
BlobstoreBytes::from_bytes("bazquux"),
)
.wait()
.expect("second put should work");
assert_eq!(
outer
.get(foo_key.clone())
.get(ctx, foo_key.clone())
.wait()
.expect("get to outer should work")
.expect("value should be present")

View File

@ -12,6 +12,7 @@ use failure::Error;
use futures::future::{lazy, IntoFuture};
use futures_ext::{BoxFuture, FutureExt};
use context::CoreContext;
use mononoke_types::BlobstoreBytes;
use Blobstore;
@ -47,14 +48,14 @@ impl LazyMemblob {
}
impl Blobstore for EagerMemblob {
fn put(&self, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> {
fn put(&self, _ctx: CoreContext, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> {
let mut inner = self.hash.lock().expect("lock poison");
inner.insert(key, value);
Ok(()).into_future().boxify()
}
fn get(&self, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error> {
fn get(&self, _ctx: CoreContext, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error> {
let inner = self.hash.lock().expect("lock poison");
Ok(inner.get(&key).map(Clone::clone)).into_future().boxify()
@ -62,7 +63,7 @@ impl Blobstore for EagerMemblob {
}
impl Blobstore for LazyMemblob {
fn put(&self, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> {
fn put(&self, _ctx: CoreContext, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> {
let hash = self.hash.clone();
lazy(move || {
@ -73,7 +74,7 @@ impl Blobstore for LazyMemblob {
}).boxify()
}
fn get(&self, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error> {
fn get(&self, _ctx: CoreContext, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error> {
let hash = self.hash.clone();
lazy(move || {

View File

@ -9,6 +9,7 @@ use inlinable_string::InlinableString;
use futures_ext::BoxFuture;
use context::CoreContext;
use mononoke_types::BlobstoreBytes;
use {Blobstore, CacheBlobstoreExt};
@ -40,8 +41,12 @@ impl<T: Blobstore + Clone> PrefixBlobstore<T> {
impl<T: CacheBlobstoreExt + Clone> CacheBlobstoreExt for PrefixBlobstore<T> {
#[inline]
fn get_no_cache_fill(&self, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error> {
self.blobstore.get_no_cache_fill(self.prepend(key))
fn get_no_cache_fill(
&self,
ctx: CoreContext,
key: String,
) -> BoxFuture<Option<BlobstoreBytes>, Error> {
self.blobstore.get_no_cache_fill(ctx, self.prepend(key))
}
#[inline]
@ -52,18 +57,18 @@ impl<T: CacheBlobstoreExt + Clone> CacheBlobstoreExt for PrefixBlobstore<T> {
impl<T: Blobstore + Clone> Blobstore for PrefixBlobstore<T> {
#[inline]
fn get(&self, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error> {
self.blobstore.get(self.prepend(key))
fn get(&self, ctx: CoreContext, key: String) -> BoxFuture<Option<BlobstoreBytes>, Error> {
self.blobstore.get(ctx, self.prepend(key))
}
#[inline]
fn put(&self, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> {
self.blobstore.put(self.prepend(key), value)
fn put(&self, ctx: CoreContext, key: String, value: BlobstoreBytes) -> BoxFuture<(), Error> {
self.blobstore.put(ctx, self.prepend(key), value)
}
#[inline]
fn is_present(&self, key: String) -> BoxFuture<bool, Error> {
self.blobstore.is_present(self.prepend(key))
fn is_present(&self, ctx: CoreContext, key: String) -> BoxFuture<bool, Error> {
self.blobstore.is_present(ctx, self.prepend(key))
}
}
@ -78,6 +83,7 @@ mod test {
#[test]
fn test_prefix() {
let ctx = CoreContext::test_mock();
let base = EagerMemblob::new();
let prefixed = PrefixBlobstore::new(base.clone(), "prefix123-");
let unprefixed_key = "foobar".to_string();
@ -86,6 +92,7 @@ mod test {
// This is EagerMemblob (immediate future completion) so calling wait() is fine.
prefixed
.put(
ctx.clone(),
unprefixed_key.clone(),
BlobstoreBytes::from_bytes("test foobar"),
)
@ -95,7 +102,7 @@ mod test {
// Test that both the prefixed and the unprefixed stores can access the key.
assert_eq!(
prefixed
.get(unprefixed_key.clone())
.get(ctx.clone(), unprefixed_key.clone())
.wait()
.expect("get should succeed")
.expect("value should be present")
@ -103,7 +110,7 @@ mod test {
Bytes::from("test foobar"),
);
assert_eq!(
base.get(prefixed_key.clone())
base.get(ctx.clone(), prefixed_key.clone())
.wait()
.expect("get should succeed")
.expect("value should be present")
@ -114,12 +121,12 @@ mod test {
// Test that is_present works for both the prefixed and unprefixed stores.
assert!(
prefixed
.is_present(unprefixed_key.clone())
.is_present(ctx.clone(), unprefixed_key.clone())
.wait()
.expect("is_present should succeed")
);
assert!(
base.is_present(prefixed_key.clone())
base.is_present(ctx.clone(), prefixed_key.clone())
.wait()
.expect("is_present should succeed")
);

View File

@ -18,6 +18,7 @@ extern crate tempdir;
extern crate tokio;
extern crate blobstore;
extern crate context;
extern crate fileblob;
extern crate glusterblob;
extern crate mononoke_types;
@ -33,6 +34,7 @@ use tempdir::TempDir;
use tokio::{prelude::*, runtime::Runtime};
use blobstore::{Blobstore, EagerMemblob};
use context::CoreContext;
use fileblob::Fileblob;
use glusterblob::Glusterblob;
use mononoke_types::BlobstoreBytes;
@ -45,6 +47,7 @@ where
B::Future: Send + 'static,
Error: From<B::Error>,
{
let ctx = CoreContext::test_mock();
let blobstore = blobstore.into_future().map_err(|err| err.into());
let foo = "foo".to_string();
@ -52,8 +55,12 @@ where
let fut = future::lazy(|| {
blobstore.and_then(|blobstore| {
blobstore
.put(foo.clone(), BlobstoreBytes::from_bytes(&b"bar"[..]))
.and_then(move |_| blobstore.get(foo))
.put(
ctx.clone(),
foo.clone(),
BlobstoreBytes::from_bytes(&b"bar"[..]),
)
.and_then(move |_| blobstore.get(ctx, foo))
})
});
@ -73,10 +80,12 @@ where
B::Future: Send + 'static,
Error: From<B::Error>,
{
let ctx = CoreContext::test_mock();
let blobstore = blobstore.into_future().map_err(|err| err.into());
let fut =
future::lazy(move || blobstore.and_then(|blobstore| blobstore.get("missing".to_string())));
let fut = future::lazy(move || {
blobstore.and_then(|blobstore| blobstore.get(ctx, "missing".to_string()))
});
let mut runtime = Runtime::new().expect("runtime creation failed");
let out = runtime.block_on(fut).expect("get failed");
@ -91,6 +100,7 @@ where
B::Future: Send + 'static,
Error: From<B::Error>,
{
let ctx = CoreContext::test_mock();
let blobstore = Box::new(blobstore.into_future().map_err(|err| err.into()));
let foo = "foo".to_string();
@ -98,8 +108,12 @@ where
let fut = future::lazy(|| {
blobstore.and_then(|blobstore| {
blobstore
.put(foo.clone(), BlobstoreBytes::from_bytes(&b"bar"[..]))
.and_then(move |_| blobstore.get(foo))
.put(
ctx.clone(),
foo.clone(),
BlobstoreBytes::from_bytes(&b"bar"[..]),
)
.and_then(move |_| blobstore.get(ctx, foo))
})
});
let mut runtime = Runtime::new().expect("runtime creation failed");

View File

@ -9,6 +9,7 @@ use std::collections::{btree_map, BTreeMap, HashMap};
use failure::Error;
use futures::{future, stream, Future, Stream};
use context::CoreContext;
use mercurial_types::{Entry, HgEntryId, Type, manifest::Content};
use mononoke_types::{FileType, MPathElement};
@ -72,11 +73,14 @@ impl CompositeEntry {
self.trees.contains_key(hash)
}
pub fn manifest(&self) -> impl Future<Item = CompositeManifest, Error = Error> + Send {
pub fn manifest(
&self,
ctx: CoreContext,
) -> impl Future<Item = CompositeManifest, Error = Error> + Send {
// Manifests can only exist for tree entries. If self.trees is empty then an empty
// composite manifest will be returned. This is by design.
let mf_futs = self.trees.values().map(|entry| {
entry.get_content().map({
entry.get_content(ctx.clone()).map({
move |content| match content {
Content::Tree(mf) => mf,
_other => unreachable!("tree content must be a manifest"),

View File

@ -13,6 +13,7 @@ use failure::Error;
use futures::{stream, Future, Stream, future::{self, Either}};
use itertools::{EitherOrBoth, Itertools};
use context::CoreContext;
use futures_ext::{select_all, BoxStream, StreamExt};
use mercurial_types::{Entry, HgEntryId, Manifest, Type};
use mercurial_types::manifest::{Content, EmptyManifest};
@ -25,6 +26,7 @@ use composite::CompositeEntry;
///
/// Items may be returned in arbitrary order.
pub fn bonsai_diff(
ctx: CoreContext,
root_entry: Box<Entry + Sync>,
p1_entry: Option<Box<Entry + Sync>>,
p2_entry: Option<Box<Entry + Sync>>,
@ -37,7 +39,7 @@ pub fn bonsai_diff(
composite_entry.add_parent(entry);
}
WorkingEntry::new(root_entry).bonsai_diff_tree(None, composite_entry)
WorkingEntry::new(root_entry).bonsai_diff_tree(ctx, None, composite_entry)
}
#[derive(Clone, Debug, Eq, PartialEq)]
@ -146,10 +148,13 @@ impl WorkingEntry {
}
#[inline]
fn manifest(&self) -> impl Future<Item = Box<Manifest + Sync + 'static>, Error = Error> + Send {
fn manifest(
&self,
ctx: CoreContext,
) -> impl Future<Item = Box<Manifest + Sync + 'static>, Error = Error> + Send {
match self {
WorkingEntry::Tree(entry) => {
Either::A(entry.get_content().map(|content| match content {
Either::A(entry.get_content(ctx).map(|content| match content {
Content::Tree(mf) => mf,
_ => unreachable!("tree entries can only return manifests"),
}))
@ -161,6 +166,7 @@ impl WorkingEntry {
/// The path here corresponds to the path associated with this working entry.
fn bonsai_diff(
self,
ctx: CoreContext,
path: MPath,
composite_entry: CompositeEntry,
) -> impl Stream<Item = BonsaiDiffResult, Error = Error> + Send {
@ -171,7 +177,7 @@ impl WorkingEntry {
stream::empty().boxify()
}
Some(BonsaiDiffResult::Deleted(..)) | None => {
self.bonsai_diff_tree(Some(path), composite_entry)
self.bonsai_diff_tree(ctx, Some(path), composite_entry)
}
};
let file_stream = stream::iter_ok(file_result);
@ -227,6 +233,7 @@ impl WorkingEntry {
/// methods.
fn bonsai_diff_tree(
self,
ctx: CoreContext,
path: Option<MPath>,
composite_entry: CompositeEntry,
) -> BoxStream<BonsaiDiffResult, Error> {
@ -248,9 +255,9 @@ impl WorkingEntry {
}
}
let working_mf_fut = self.manifest();
let working_mf_fut = self.manifest(ctx.clone());
composite_entry
.manifest()
.manifest(ctx.clone())
.join(working_mf_fut)
.map(move |(composite_mf, working_mf)| {
let sub_streams = composite_mf
@ -261,12 +268,12 @@ impl WorkingEntry {
.expect("manifest entries should have names");
cname.cmp(wname)
})
.map(|entry_pair| {
.map(move |entry_pair| {
match entry_pair {
EitherOrBoth::Left((name, centry)) => {
// This entry was removed from the working set.
let sub_path = MPath::join_opt_element(path.as_ref(), &name);
WorkingEntry::absent().bonsai_diff(sub_path, centry)
WorkingEntry::absent().bonsai_diff(ctx.clone(), sub_path, centry)
}
EitherOrBoth::Right(wentry) => {
// This entry was added to the working set.
@ -276,14 +283,17 @@ impl WorkingEntry {
.expect("manifest entries should have names");
MPath::join_opt_element(path.as_ref(), name)
};
WorkingEntry::new(wentry)
.bonsai_diff(sub_path, CompositeEntry::new())
WorkingEntry::new(wentry).bonsai_diff(
ctx.clone(),
sub_path,
CompositeEntry::new(),
)
}
EitherOrBoth::Both((name, centry), wentry) => {
// This entry is present in both the working set and at least one of
// the parents.
let sub_path = MPath::join_opt_element(path.as_ref(), &name);
WorkingEntry::new(wentry).bonsai_diff(sub_path, centry)
WorkingEntry::new(wentry).bonsai_diff(ctx.clone(), sub_path, centry)
}
}
});

View File

@ -12,6 +12,7 @@ extern crate failure_ext as failure;
extern crate futures;
extern crate itertools;
extern crate context;
extern crate futures_ext;
extern crate mercurial_types;
extern crate mononoke_types;

View File

@ -13,6 +13,7 @@ extern crate pretty_assertions;
extern crate async_unit;
extern crate bonsai_utils;
extern crate context;
extern crate mercurial_types;
extern crate mercurial_types_mocks;
extern crate mononoke_types;
@ -24,6 +25,7 @@ use futures::{Future, Stream};
use async_unit::tokio_unit_test;
use bonsai_utils::{bonsai_diff, BonsaiDiffResult};
use context::CoreContext;
use mercurial_types::{Entry, HgEntryId};
use mercurial_types_mocks::manifest::{MockEntry, MockManifest};
use mercurial_types_mocks::nodehash::*;
@ -34,10 +36,11 @@ use fixtures::ManifestFixture;
#[test]
fn diff_basic() {
tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let parent_entry = root_entry(&fixtures::BASIC1);
let working_entry = root_entry(&fixtures::BASIC2);
let diff = compute_diff(working_entry, Some(parent_entry), None);
let diff = compute_diff(ctx.clone(), working_entry, Some(parent_entry), None);
let expected_diff = vec![
deleted("dir1/file-to-dir"),
// dir1/file-to-dir/foobar *is* a result, because it has changed and its parent is
@ -58,7 +61,7 @@ fn diff_basic() {
let parent2 = root_entry(&fixtures::BASIC1);
let working_entry = root_entry(&fixtures::BASIC2);
let diff = compute_diff(working_entry, Some(parent1), Some(parent2));
let diff = compute_diff(ctx.clone(), working_entry, Some(parent1), Some(parent2));
assert_eq!(diff, expected_diff);
})
}
@ -66,10 +69,11 @@ fn diff_basic() {
#[test]
fn diff_truncate() {
tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let parent_entry = root_entry(&fixtures::TRUNCATE1);
let working_entry = root_entry(&fixtures::TRUNCATE2);
let diff = bonsai_diff(working_entry, Some(parent_entry), None);
let diff = bonsai_diff(ctx, working_entry, Some(parent_entry), None);
let paths = diff.collect().wait().expect("computing diff failed");
assert_eq!(paths, vec![]);
})
@ -78,11 +82,12 @@ fn diff_truncate() {
#[test]
fn diff_merge1() {
tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let parent1 = root_entry(&fixtures::BASIC1);
let parent2 = root_entry(&fixtures::BASIC2);
let working_entry = root_entry(&fixtures::BASIC2);
let diff = compute_diff(working_entry, Some(parent1), Some(parent2));
let diff = compute_diff(ctx.clone(), working_entry, Some(parent1), Some(parent2));
// Compare this result to expected_diff in diff_basic.
let expected_diff = vec![
@ -114,11 +119,12 @@ fn root_entry(mf: &ManifestFixture) -> Box<Entry + Sync> {
}
fn compute_diff(
ctx: CoreContext,
working_entry: Box<Entry + Sync>,
p1_entry: Option<Box<Entry + Sync>>,
p2_entry: Option<Box<Entry + Sync>>,
) -> Vec<BonsaiDiffResult> {
let diff_stream = bonsai_diff(working_entry, p1_entry, p2_entry);
let diff_stream = bonsai_diff(ctx, working_entry, p1_entry, p2_entry);
let mut paths = diff_stream.collect().wait().expect("computing diff failed");
paths.sort_unstable();

View File

@ -9,6 +9,7 @@ use std::mem;
use std::sync::Arc;
use bytes::Bytes;
use context::CoreContext;
use failure::Compat;
use futures::{Future, IntoFuture, Stream};
use futures::future::Shared;
@ -60,7 +61,7 @@ impl UploadableHgBlob for Filelog {
Shared<BoxFuture<(HgBlobEntry, RepoPath), Compat<Error>>>,
);
fn upload(self, repo: &BlobRepo) -> Result<(HgNodeKey, Self::Value)> {
fn upload(self, ctx: CoreContext, repo: &BlobRepo) -> Result<(HgNodeKey, Self::Value)> {
let node_key = self.node_key;
let path = match &node_key.path {
RepoPath::FilePath(path) => path.clone(),
@ -83,7 +84,7 @@ impl UploadableHgBlob for Filelog {
path,
};
let (cbinfo, fut) = upload.upload(repo)?;
let (cbinfo, fut) = upload.upload(ctx, repo)?;
Ok((
node_key,
(cbinfo, fut.map_err(Error::compat).boxify().shared()),
@ -91,7 +92,11 @@ impl UploadableHgBlob for Filelog {
}
}
pub fn convert_to_revlog_filelog<S>(repo: Arc<BlobRepo>, deltaed: S) -> BoxStream<Filelog, Error>
pub fn convert_to_revlog_filelog<S>(
ctx: CoreContext,
repo: Arc<BlobRepo>,
deltaed: S,
) -> BoxStream<Filelog, Error>
where
S: Stream<Item = FilelogDeltaed, Error = Error> + Send + 'static,
{
@ -109,15 +114,15 @@ where
} = chunk;
delta_cache
.decode(node.clone(), base.into_option(), delta)
.decode(ctx.clone(), node.clone(), base.into_option(), delta)
.and_then({
cloned!(node, path, repo);
cloned!(ctx, node, path, repo);
move |data| {
parse_rev_flags(flags_value)
.into_future()
.and_then(move |flags| {
get_filelog_data(repo, data, flags).map(move |file_log_data| {
Filelog {
get_filelog_data(ctx.clone(), repo, data, flags).map(
move |file_log_data| Filelog {
node_key: HgNodeKey {
path: RepoPath::FilePath(path),
hash: node,
@ -127,8 +132,8 @@ where
linknode,
data: file_log_data,
flags,
}
})
},
)
})
}
})
@ -145,6 +150,7 @@ where
}
fn generate_lfs_meta_data(
ctx: CoreContext,
repo: Arc<BlobRepo>,
data: Bytes,
) -> impl Future<Item = ContentBlobMeta, Error = Error> {
@ -154,7 +160,7 @@ fn generate_lfs_meta_data(
.into_future()
.and_then(move |lfs_content| {
(
repo.get_file_content_id_by_alias(lfs_content.oid()),
repo.get_file_content_id_by_alias(ctx, lfs_content.oid()),
Ok(lfs_content.copy_from()),
)
})
@ -165,12 +171,13 @@ fn generate_lfs_meta_data(
}
fn get_filelog_data(
ctx: CoreContext,
repo: Arc<BlobRepo>,
data: Bytes,
flags: RevFlags,
) -> impl Future<Item = FilelogData, Error = Error> {
if flags.contains(RevFlags::REVIDX_EXTSTORED) {
generate_lfs_meta_data(repo, data)
generate_lfs_meta_data(ctx, repo, data)
.map(|cbmeta| FilelogData::LfsMetaData(cbmeta))
.left_future()
} else {
@ -193,6 +200,7 @@ impl DeltaCache {
fn decode(
&mut self,
ctx: CoreContext,
node: HgNodeHash,
base: Option<HgNodeHash>,
delta: Delta,
@ -225,7 +233,7 @@ impl DeltaCache {
})
.boxify(),
None => self.repo
.get_raw_hg_content(&base)
.get_raw_hg_content(ctx, &base)
.and_then(move |blob| {
let bytes = blob.into_inner();
delta::apply(bytes.as_ref(), &delta)
@ -365,12 +373,13 @@ mod tests {
}
}
fn check_conversion<I, J>(inp: I, exp: J)
fn check_conversion<I, J>(ctx: CoreContext, inp: I, exp: J)
where
I: IntoIterator<Item = FilelogDeltaed>,
J: IntoIterator<Item = Filelog>,
{
let result = convert_to_revlog_filelog(
ctx,
Arc::new(BlobRepo::new_memblob_empty(None, None).unwrap()),
iter_ok(inp.into_iter().collect::<Vec<_>>()),
).collect()
@ -456,6 +465,7 @@ mod tests {
#[test]
fn two_fulltext_files() {
let ctx = CoreContext::test_mock();
let f1 = Filelog {
node_key: HgNodeKey {
path: RepoPath::FilePath(MPath::new(b"test").unwrap()),
@ -481,12 +491,13 @@ mod tests {
};
check_conversion(
ctx,
vec![filelog_to_deltaed(&f1), filelog_to_deltaed(&f2)],
vec![f1, f2],
);
}
fn files_check_order(correct_order: bool) {
fn files_check_order(ctx: CoreContext, correct_order: bool) {
let f1 = Filelog {
node_key: HgNodeKey {
path: RepoPath::FilePath(MPath::new(b"test").unwrap()),
@ -524,6 +535,7 @@ mod tests {
};
let result = convert_to_revlog_filelog(
ctx,
Arc::new(BlobRepo::new_memblob_empty(None, None).unwrap()),
iter_ok(inp),
).collect()
@ -543,12 +555,12 @@ mod tests {
#[test]
fn files_order_correct() {
files_check_order(true);
files_check_order(CoreContext::test_mock(), true);
}
#[test]
fn files_order_incorrect() {
files_check_order(false);
files_check_order(CoreContext::test_mock(), false);
}
quickcheck! {
@ -558,7 +570,9 @@ mod tests {
}
fn correct_conversion_single(f: Filelog) -> bool {
let ctx = CoreContext::test_mock();
check_conversion(
ctx,
vec![filelog_to_deltaed(&f)],
vec![f],
);
@ -567,6 +581,7 @@ mod tests {
}
fn correct_conversion_delta_against_first(f: Filelog, fs: Vec<Filelog>) -> bool {
let ctx = CoreContext::test_mock();
let mut hash_gen = NodeHashGen::new();
let mut f = f.clone();
@ -586,12 +601,13 @@ mod tests {
deltas.push(delta);
}
check_conversion(deltas, vec![f].into_iter().chain(fs));
check_conversion(ctx, deltas, vec![f].into_iter().chain(fs));
true
}
fn correct_conversion_delta_against_next(fs: Vec<Filelog>) -> bool {
let ctx = CoreContext::test_mock();
let mut hash_gen = NodeHashGen::new();
let mut fs = fs.clone();
@ -617,7 +633,7 @@ mod tests {
deltas
};
check_conversion(deltas, fs);
check_conversion(ctx, deltas, fs);
true
}

View File

@ -89,10 +89,10 @@ pub fn create_getbundle_response(
.get_hg_from_bonsai_changeset(ctx.clone(), bonsai)
.map(|cs| cs.into_nodehash())
.and_then({
cloned!(blobrepo);
cloned!(ctx, blobrepo);
move |node| {
blobrepo
.get_changeset_by_changesetid(&HgChangesetId::new(node))
.get_changeset_by_changesetid(ctx, &HgChangesetId::new(node))
.map(move |cs| (node, cs))
}
})

View File

@ -231,8 +231,8 @@ fn fetch_bonsai_changesets(
move |bcs_cs| bcs_cs.ok_or(ErrorKind::BonsaiNotFoundForHgChangeset(hg_cs).into())
})
.and_then({
cloned!(repo);
move |bcs_id| repo.get_bonsai_changeset(bcs_id).from_err()
cloned!(ctx, repo);
move |bcs_id| repo.get_bonsai_changeset(ctx, bcs_id).from_err()
})
.with_context(move |_| format!("While intitial bonsai changesets fetching"))
.map_err(Error::from)
@ -285,7 +285,7 @@ fn find_closest_root(
roots: Vec<ChangesetId>,
) -> impl Future<Item = ChangesetId, Error = PushrebaseError> {
let roots: HashSet<_> = roots.into_iter().collect();
get_bookmark_value(ctx, repo, &bookmark)
get_bookmark_value(ctx.clone(), repo, &bookmark)
.from_err()
.and_then({
cloned!(repo);
@ -306,7 +306,7 @@ fn find_closest_root(
if roots.contains(&id) {
ok(Loop::Break(id)).left_future()
} else {
repo.get_bonsai_changeset(id)
repo.get_bonsai_changeset(ctx.clone(), id)
.map(move |bcs| {
queue.extend(bcs.parents());
Loop::Continue((queue, depth + 1))
@ -329,12 +329,12 @@ fn find_changed_files_between_manfiests(
descendant: ChangesetId,
) -> impl Future<Item = Vec<MPath>, Error = PushrebaseError> {
let id_to_manifest = {
cloned!(repo);
cloned!(ctx, repo);
move |bcs_id| {
repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id)
.and_then({
cloned!(repo);
move |cs_id| repo.get_changeset_by_changesetid(&cs_id)
cloned!(ctx, repo);
move |cs_id| repo.get_changeset_by_changesetid(ctx, &cs_id)
})
.map({
cloned!(repo);
@ -345,14 +345,17 @@ fn find_changed_files_between_manfiests(
(id_to_manifest(descendant), id_to_manifest(ancestor))
.into_future()
.and_then(|(d_mf, a_mf)| {
bonsai_diff(d_mf, Some(a_mf), None)
.map(|diff| match diff {
BonsaiDiffResult::Changed(path, ..)
| BonsaiDiffResult::ChangedReusedId(path, ..)
| BonsaiDiffResult::Deleted(path) => path,
})
.collect()
.and_then({
cloned!(ctx);
move |(d_mf, a_mf)| {
bonsai_diff(ctx, d_mf, Some(a_mf), None)
.map(|diff| match diff {
BonsaiDiffResult::Changed(path, ..)
| BonsaiDiffResult::ChangedReusedId(path, ..)
| BonsaiDiffResult::Deleted(path) => path,
})
.collect()
}
})
.from_err()
}
@ -365,8 +368,8 @@ fn fetch_bonsai_range(
descendant: ChangesetId,
) -> impl Future<Item = Vec<BonsaiChangeset>, Error = PushrebaseError> {
cloned!(repo);
RangeNodeStream::new(ctx, &repo, ancestor, descendant)
.map(move |id| repo.get_bonsai_changeset(id))
RangeNodeStream::new(ctx.clone(), &repo, ancestor, descendant)
.map(move |id| repo.get_bonsai_changeset(ctx.clone(), id))
.buffered(100)
.collect()
.from_err()
@ -382,9 +385,9 @@ fn find_changed_files(
cloned!(repo);
RangeNodeStream::new(ctx.clone(), &repo, ancestor, descendant)
.map({
cloned!(repo);
cloned!(ctx, repo);
move |bcs_id| {
repo.get_bonsai_changeset(bcs_id)
repo.get_bonsai_changeset(ctx.clone(), bcs_id)
.map(move |bcs| (bcs_id, bcs))
}
})
@ -602,10 +605,10 @@ fn find_rebased_set(
root: ChangesetId,
head: ChangesetId,
) -> impl Future<Item = Vec<BonsaiChangeset>, Error = PushrebaseError> {
RangeNodeStream::new(ctx, &repo, root, head)
RangeNodeStream::new(ctx.clone(), &repo, root, head)
.map({
cloned!(repo);
move |bcs_id| repo.get_bonsai_changeset(bcs_id)
move |bcs_id| repo.get_bonsai_changeset(ctx.clone(), bcs_id)
})
.buffered(100)
.collect()
@ -679,7 +682,11 @@ mod tests {
ctx.clone(),
repo.clone(),
parents,
store_files(btreemap!{"file" => Some("content")}, repo.clone()),
store_files(
ctx.clone(),
btreemap!{"file" => Some("content")},
repo.clone(),
),
);
let hg_cs = repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id)
.wait()
@ -714,13 +721,21 @@ mod tests {
ctx.clone(),
repo.clone(),
vec![p],
store_files(btreemap!{"file" => Some("content")}, repo.clone()),
store_files(
ctx.clone(),
btreemap!{"file" => Some("content")},
repo.clone(),
),
);
let bcs_id_2 = create_commit(
ctx.clone(),
repo.clone(),
vec![bcs_id_1],
store_files(btreemap!{"file2" => Some("content")}, repo.clone()),
store_files(
ctx.clone(),
btreemap!{"file2" => Some("content")},
repo.clone(),
),
);
assert_eq!(
@ -770,10 +785,15 @@ mod tests {
ctx.clone(),
repo.clone(),
vec![p],
store_files(btreemap!{"file" => Some("content")}, repo.clone()),
store_files(
ctx.clone(),
btreemap!{"file" => Some("content")},
repo.clone(),
),
);
let rename = store_rename(
ctx.clone(),
(MPath::new("file").unwrap(), bcs_id_1),
"file_renamed",
"content",
@ -855,19 +875,23 @@ mod tests {
ctx.clone(),
repo.clone(),
vec![root0],
store_files(btreemap!{"f0" => Some("f0"), "files" => None}, repo.clone()),
store_files(
ctx.clone(),
btreemap!{"f0" => Some("f0"), "files" => None},
repo.clone(),
),
);
let bcs_id_2 = create_commit(
ctx.clone(),
repo.clone(),
vec![bcs_id_1, root1],
store_files(btreemap!{"f1" => Some("f1")}, repo.clone()),
store_files(ctx.clone(), btreemap!{"f1" => Some("f1")}, repo.clone()),
);
let bcs_id_3 = create_commit(
ctx.clone(),
repo.clone(),
vec![bcs_id_2],
store_files(btreemap!{"f2" => Some("f2")}, repo.clone()),
store_files(ctx.clone(), btreemap!{"f2" => Some("f2")}, repo.clone()),
);
let book = Bookmark::new("master").unwrap();
@ -961,19 +985,23 @@ mod tests {
ctx.clone(),
repo.clone(),
vec![root],
store_files(btreemap!{"f0" => Some("f0")}, repo.clone()),
store_files(ctx.clone(), btreemap!{"f0" => Some("f0")}, repo.clone()),
);
let bcs_id_2 = create_commit(
ctx.clone(),
repo.clone(),
vec![bcs_id_1],
store_files(btreemap!{"9/file" => Some("file")}, repo.clone()),
store_files(
ctx.clone(),
btreemap!{"9/file" => Some("file")},
repo.clone(),
),
);
let bcs_id_3 = create_commit(
ctx.clone(),
repo.clone(),
vec![bcs_id_2],
store_files(btreemap!{"f1" => Some("f1")}, repo.clone()),
store_files(ctx.clone(), btreemap!{"f1" => Some("f1")}, repo.clone()),
);
let book = Bookmark::new("master").unwrap();
@ -1033,19 +1061,19 @@ mod tests {
ctx.clone(),
repo.clone(),
vec![root],
store_files(btreemap!{"FILE" => Some("file")}, repo.clone()),
store_files(ctx.clone(), btreemap!{"FILE" => Some("file")}, repo.clone()),
);
let bcs_id_2 = create_commit(
ctx.clone(),
repo.clone(),
vec![bcs_id_1],
store_files(btreemap!{"FILE" => None}, repo.clone()),
store_files(ctx.clone(), btreemap!{"FILE" => None}, repo.clone()),
);
let bcs_id_3 = create_commit(
ctx.clone(),
repo.clone(),
vec![bcs_id_2],
store_files(btreemap!{"file" => Some("file")}, repo.clone()),
store_files(ctx.clone(), btreemap!{"file" => Some("file")}, repo.clone()),
);
let hgcss = vec![
repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id_1)
@ -1090,6 +1118,7 @@ mod tests {
repo.clone(),
vec![root],
store_files(
ctx.clone(),
btreemap!{"DIR/a" => Some("a"), "DIR/b" => Some("b")},
repo.clone(),
),
@ -1099,6 +1128,7 @@ mod tests {
repo.clone(),
vec![bcs_id_1],
store_files(
ctx.clone(),
btreemap!{"dir/a" => Some("a"), "DIR/a" => None, "DIR/b" => None},
repo.clone(),
),
@ -1148,6 +1178,7 @@ mod tests {
repo.clone(),
vec![head],
store_files(
ctx.clone(),
btreemap!{file.as_ref() => Some(content.as_ref())},
repo.clone(),
),
@ -1183,7 +1214,7 @@ mod tests {
ctx.clone(),
repo.clone(),
vec![root],
store_files(btreemap!{"file" => Some("data")}, repo.clone()),
store_files(ctx.clone(), btreemap!{"file" => Some("data")}, repo.clone()),
);
let hgcss = vec![
repo_arc
@ -1235,7 +1266,7 @@ mod tests {
ctx.clone(),
repo.clone(),
vec![root],
store_files(btreemap!{"file" => Some("data")}, repo.clone()),
store_files(ctx.clone(), btreemap!{"file" => Some("data")}, repo.clone()),
);
let hgcss = vec![
repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs)
@ -1272,15 +1303,16 @@ mod tests {
rewritedates: true,
..Default::default()
};
let bcs_rewrite_date = do_pushrebase(ctx, Arc::new(repo.clone()), config, book, hgcss)
.wait()
.expect("push-rebase failed");
let bcs_rewrite_date =
do_pushrebase(ctx.clone(), Arc::new(repo.clone()), config, book, hgcss)
.wait()
.expect("push-rebase failed");
let bcs = repo.get_bonsai_changeset(bcs).wait().unwrap();
let bcs_keep_date = repo.get_bonsai_changeset(bcs_keep_date.head)
let bcs = repo.get_bonsai_changeset(ctx.clone(), bcs).wait().unwrap();
let bcs_keep_date = repo.get_bonsai_changeset(ctx.clone(), bcs_keep_date.head)
.wait()
.unwrap();
let bcs_rewrite_date = repo.get_bonsai_changeset(bcs_rewrite_date.head)
let bcs_rewrite_date = repo.get_bonsai_changeset(ctx.clone(), bcs_rewrite_date.head)
.wait()
.unwrap();
@ -1306,6 +1338,7 @@ mod tests {
repo.clone(),
vec![root],
store_files(
ctx.clone(),
btreemap!{"Dir1/file_1_in_dir1" => Some("data")},
repo.clone(),
),
@ -1372,8 +1405,10 @@ mod tests {
let root_hg =
&HgChangesetId::from_str("2d7d4ba9ce0a6ffd222de7785b249ead9c51c536").unwrap();
let root_cs = repo.get_changeset_by_changesetid(&root_hg).wait().unwrap();
let root_1_id = repo.find_file_in_manifest(&path_1, *root_cs.manifestid())
let root_cs = repo.get_changeset_by_changesetid(ctx.clone(), &root_hg)
.wait()
.unwrap();
let root_1_id = repo.find_file_in_manifest(ctx.clone(), &path_1, *root_cs.manifestid())
.wait()
.unwrap()
.unwrap();
@ -1383,7 +1418,7 @@ mod tests {
.wait()
.unwrap()
.unwrap();
let root_bcs = repo.get_bonsai_changeset(root).wait().unwrap();
let root_bcs = repo.get_bonsai_changeset(ctx.clone(), root).wait().unwrap();
let file_1 = root_bcs
.file_changes()
.find(|(path, _)| path == &&path_1)
@ -1427,7 +1462,9 @@ mod tests {
hgcss,
).wait()
.expect("pushrebase failed");
let result_bcs = repo.get_bonsai_changeset(result.head).wait().unwrap();
let result_bcs = repo.get_bonsai_changeset(ctx.clone(), result.head)
.wait()
.unwrap();
let file_1_result = result_bcs
.file_changes()
.find(|(path, _)| path == &&path_1)
@ -1436,16 +1473,17 @@ mod tests {
.unwrap();
assert_eq!(file_1_result, &file_1_exec);
let result_hg = repo.get_hg_from_bonsai_changeset(ctx, result.head)
let result_hg = repo.get_hg_from_bonsai_changeset(ctx.clone(), result.head)
.wait()
.unwrap();
let result_cs = repo.get_changeset_by_changesetid(&result_hg)
let result_cs = repo.get_changeset_by_changesetid(ctx.clone(), &result_hg)
.wait()
.unwrap();
let result_1_id = repo.find_file_in_manifest(&path_1, *result_cs.manifestid())
.wait()
.unwrap()
.unwrap();
let result_1_id =
repo.find_file_in_manifest(ctx.clone(), &path_1, *result_cs.manifestid())
.wait()
.unwrap()
.unwrap();
// `result_1_id` should be equal to `root_1_id`, because executable flag
// is not a part of file envelope
@ -1482,7 +1520,11 @@ mod tests {
ctx.clone(),
repo.clone(),
parents.clone(),
store_files(btreemap!{ f.as_ref() => Some("content")}, repo.clone()),
store_files(
ctx.clone(),
btreemap!{ f.as_ref() => Some("content")},
repo.clone(),
),
);
let hg_cs = repo.get_hg_from_bonsai_changeset(ctx.clone(), bcs_id)
.wait()

View File

@ -81,7 +81,7 @@ fn resolve_push(
bundle2: BoxStream<Bundle2Item, Error>,
) -> BoxFuture<Bytes, Error> {
resolver
.maybe_resolve_changegroup(bundle2)
.maybe_resolve_changegroup(ctx.clone(), bundle2)
.and_then({
cloned!(resolver);
move |(cg_push, bundle2)| {
@ -103,11 +103,11 @@ fn resolve_push(
}
})
.and_then({
cloned!(resolver);
cloned!(ctx, resolver);
move |(cg_push, bookmark_push, bundle2)| {
if let Some(cg_push) = cg_push {
resolver
.resolve_b2xtreegroup2(bundle2)
.resolve_b2xtreegroup2(ctx, bundle2)
.map(|(manifests, bundle2)| {
(Some((cg_push, manifests)), bookmark_push, bundle2)
})
@ -201,18 +201,18 @@ fn resolve_pushrebase(
resolver
.maybe_resolve_pushvars(bundle2)
.and_then({
cloned!(resolver);
cloned!(ctx, resolver);
move |(maybe_pushvars, bundle2)| {
resolver
.resolve_b2xtreegroup2(bundle2)
.resolve_b2xtreegroup2(ctx, bundle2)
.map(move |(manifests, bundle2)| (manifests, maybe_pushvars, bundle2))
}
})
.and_then({
cloned!(resolver);
cloned!(ctx, resolver);
move |(manifests, maybe_pushvars, bundle2)| {
resolver
.maybe_resolve_changegroup(bundle2)
.maybe_resolve_changegroup(ctx, bundle2)
.map(move |(cg_push, bundle2)| (cg_push, manifests, maybe_pushvars, bundle2))
}
})
@ -270,7 +270,7 @@ fn resolve_pushrebase(
cloned!(ctx, resolver);
move |(changesets, bookmark_pushes, maybe_pushvars, onto)| {
resolver
.run_hooks(changesets.clone(), maybe_pushvars, &onto)
.run_hooks(ctx.clone(), changesets.clone(), maybe_pushvars, &onto)
.map_err(|err| match err {
RunHooksError::Failures((cs_hook_failures, file_hook_failures)) => {
let mut err_msgs = vec![];
@ -483,6 +483,7 @@ impl Bundle2Resolver {
/// their upload should be used for uploading changesets
fn maybe_resolve_changegroup(
&self,
ctx: CoreContext,
bundle2: BoxStream<Bundle2Item, Error>,
) -> BoxFuture<(Option<ChangegroupPush>, BoxStream<Bundle2Item, Error>), Error> {
let repo = self.repo.clone();
@ -498,10 +499,11 @@ impl Bundle2Resolver {
let (c, f) = split_changegroup(parts);
convert_to_revlog_changesets(c)
.collect()
.and_then(|changesets| {
.and_then(move |changesets| {
upload_hg_blobs(
ctx.clone(),
repo.clone(),
convert_to_revlog_filelog(repo, f),
convert_to_revlog_filelog(ctx.clone(), repo, f),
UploadBlobsType::EnsureNoDuplicates,
).map(move |upload_map| {
let mut filelogs = HashMap::new();
@ -591,6 +593,7 @@ impl Bundle2Resolver {
/// their upload as well as their parsed content should be used for uploading changesets.
fn resolve_b2xtreegroup2(
&self,
ctx: CoreContext,
bundle2: BoxStream<Bundle2Item, Error>,
) -> BoxFuture<(Manifests, BoxStream<Bundle2Item, Error>), Error> {
let repo = self.repo.clone();
@ -600,6 +603,7 @@ impl Bundle2Resolver {
Some(Bundle2Item::B2xTreegroup2(_, parts))
| Some(Bundle2Item::B2xRebasePack(_, parts)) => {
upload_hg_blobs(
ctx,
repo,
TreemanifestBundle2Parser::new(parts),
UploadBlobsType::IgnoreDuplicates,
@ -943,6 +947,7 @@ impl Bundle2Resolver {
fn run_hooks(
&self,
ctx: CoreContext,
changesets: Changesets,
pushvars: Option<HashMap<String, Bytes>>,
onto_bookmark: &Bookmark,
@ -953,11 +958,13 @@ impl Bundle2Resolver {
futs.push(
self.hook_manager
.run_changeset_hooks_for_bookmark(
ctx.clone(),
hg_cs_id.clone(),
onto_bookmark,
pushvars.clone(),
)
.join(self.hook_manager.run_file_hooks_for_bookmark(
ctx.clone(),
hg_cs_id,
onto_bookmark,
pushvars.clone(),

View File

@ -11,6 +11,7 @@ use futures::Stream;
use futures_ext::{BoxFuture, FutureExt};
use blobrepo::BlobRepo;
use context::CoreContext;
use mercurial_types::HgNodeKey;
use errors::*;
@ -19,14 +20,14 @@ use errors::*;
pub trait UploadableHgBlob {
type Value: Send + 'static;
fn upload(self, repo: &BlobRepo) -> Result<(HgNodeKey, Self::Value)>;
fn upload(self, ctx: CoreContext, repo: &BlobRepo) -> Result<(HgNodeKey, Self::Value)>;
}
/// Represents data that is Thrift-encoded and can be uploaded to the blobstore.
pub trait UploadableBlob {
type Value: Send + 'static;
fn upload(self, repo: &BlobRepo) -> Result<(HgNodeKey, Self::Value)>;
fn upload(self, ctx: CoreContext, repo: &BlobRepo) -> Result<(HgNodeKey, Self::Value)>;
}
#[derive(PartialEq, Eq)]
@ -37,6 +38,7 @@ pub enum UploadBlobsType {
use self::UploadBlobsType::*;
pub fn upload_hg_blobs<S, B>(
ctx: CoreContext,
repo: Arc<BlobRepo>,
blobs: S,
ubtype: UploadBlobsType,
@ -47,7 +49,7 @@ where
{
blobs
.fold(HashMap::new(), move |mut map, item| {
let (key, value) = item.upload(&repo)?;
let (key, value) = item.upload(ctx.clone(), &repo)?;
ensure_msg!(
map.insert(key.clone(), value).is_none() || ubtype == IgnoreDuplicates,
"HgBlob {:?} already provided before",

View File

@ -8,6 +8,7 @@ use std::fmt::Debug;
use std::mem;
use bytes::Bytes;
use context::CoreContext;
use failure::Compat;
use futures::{Future, Poll, Stream};
use futures::future::Shared;
@ -91,7 +92,7 @@ impl UploadableHgBlob for TreemanifestEntry {
Shared<BoxFuture<(HgBlobEntry, RepoPath), Compat<Error>>>,
);
fn upload(self, repo: &BlobRepo) -> Result<(HgNodeKey, Self::Value)> {
fn upload(self, ctx: CoreContext, repo: &BlobRepo) -> Result<(HgNodeKey, Self::Value)> {
let node_key = self.node_key;
let manifest_content = self.manifest_content;
let p1 = self.p1;
@ -110,7 +111,7 @@ impl UploadableHgBlob for TreemanifestEntry {
p2: self.p2,
path: node_key.path.clone(),
};
upload.upload(repo).map(move |(_node, value)| {
upload.upload(ctx, repo).map(move |(_node, value)| {
(
node_key,
(

View File

@ -55,7 +55,7 @@ fn blobstore_and_filenodes_warmup(
) -> BoxFuture<(), Error> {
// TODO(stash): Arbitrary number. Tweak somehow?
let buffer_size = 100;
repo.get_changeset_by_changesetid(&revision)
repo.get_changeset_by_changesetid(ctx.clone(), &revision)
.map({
let repo = repo.clone();
move |cs| repo.get_root_entry(&cs.manifestid())
@ -65,7 +65,7 @@ fn blobstore_and_filenodes_warmup(
info!(logger, "starting precaching");
let rootpath = None;
let mut i = 0;
recursive_entry_stream(rootpath, root_entry)
recursive_entry_stream(ctx.clone(), rootpath, root_entry)
.filter(|&(ref _path, ref entry)| entry.get_type() == Type::Tree)
.map(move |(path, entry)| {
let hash = entry.get_hash();

View File

@ -23,6 +23,7 @@ use cachelib;
use slog_glog_fmt::default_drain as glog_drain;
use changesets::{SqlChangesets, SqlConstructors};
use context::CoreContext;
use hooks::HookManager;
use mercurial_types::RepositoryId;
use metaconfig::{ManifoldArgs, RepoConfigs, RepoReadOnly, RepoType};
@ -246,14 +247,22 @@ pub fn open_sql_changesets(matches: &ArgMatches) -> Result<SqlChangesets> {
/// Create a new `MononokeRepo` -- for local instances, expect its contents to be empty.
#[inline]
pub fn create_repo<'a>(logger: &Logger, matches: &ArgMatches<'a>) -> Result<MononokeRepo> {
open_repo_internal(logger, matches, true)
pub fn create_repo<'a>(
ctx: CoreContext,
logger: &Logger,
matches: &ArgMatches<'a>,
) -> Result<MononokeRepo> {
open_repo_internal(ctx, logger, matches, true)
}
/// Open an existing `BlobRepo` -- for local instances, expect contents to already be there.
#[inline]
pub fn open_repo<'a>(logger: &Logger, matches: &ArgMatches<'a>) -> Result<MononokeRepo> {
open_repo_internal(logger, matches, false)
pub fn open_repo<'a>(
ctx: CoreContext,
logger: &Logger,
matches: &ArgMatches<'a>,
) -> Result<MononokeRepo> {
open_repo_internal(ctx, logger, matches, false)
}
pub fn setup_repo_dir<P: AsRef<Path>>(data_dir: P, create: bool) -> Result<()> {
@ -451,6 +460,7 @@ fn find_repo_type<'a>(matches: &ArgMatches<'a>) -> Result<(String, RepoType)> {
}
fn open_repo_internal<'a>(
ctx: CoreContext,
logger: &Logger,
matches: &ArgMatches<'a>,
create: bool,
@ -481,7 +491,7 @@ fn open_repo_internal<'a>(
let blobrepo = open_blobrepo(logger.clone(), repotype.clone(), repo_id, myrouter_port)?;
let hook_manager =
HookManager::new_with_blobrepo(Default::default(), blobrepo.clone(), logger.clone());
HookManager::new_with_blobrepo(ctx, Default::default(), blobrepo.clone(), logger);
// TODO fixup imports
Ok(MononokeRepo::new(
blobrepo,

View File

@ -147,6 +147,7 @@ fn parse_changeset(revlog_repo: RevlogRepo, csid: HgChangesetId) -> ParseChanges
}
fn upload_entry(
ctx: CoreContext,
blobrepo: &BlobRepo,
entry: RevlogEntry,
path: Option<MPath>,
@ -183,7 +184,7 @@ fn upload_entry(
p2: p2.cloned(),
path: RepoPath::DirectoryPath(path),
};
let (_, upload_fut) = try_boxfuture!(upload.upload(&blobrepo));
let (_, upload_fut) = try_boxfuture!(upload.upload(ctx, &blobrepo));
upload_fut
}
Type::File(ft) => {
@ -195,7 +196,7 @@ fn upload_entry(
p2: p2.cloned(),
path,
};
let (_, upload_fut) = try_boxfuture!(upload.upload(&blobrepo));
let (_, upload_fut) = try_boxfuture!(upload.upload(ctx, &blobrepo));
upload_fut
}
}
@ -243,8 +244,7 @@ impl UploadChangesets {
changesets
.and_then({
let revlogrepo = revlogrepo.clone();
let blobrepo = blobrepo.clone();
cloned!(ctx, revlogrepo, blobrepo);
move |csid| {
let ParseChangeset {
revlogcs,
@ -253,7 +253,7 @@ impl UploadChangesets {
} = parse_changeset(revlogrepo.clone(), HgChangesetId::new(csid));
let rootmf = rootmf.map({
let blobrepo = blobrepo.clone();
cloned!(ctx, blobrepo);
move |rootmf| {
match rootmf {
None => future::ok(None).boxify(),
@ -271,7 +271,7 @@ impl UploadChangesets {
path: RepoPath::root(),
};
upload
.upload(&blobrepo)
.upload(ctx, &blobrepo)
.into_future()
.and_then(|(_, entry)| entry)
.map(Some)
@ -282,8 +282,8 @@ impl UploadChangesets {
});
let entries = entries.map({
let blobrepo = blobrepo.clone();
move |(path, entry)| upload_entry(&blobrepo, entry, path)
cloned!(ctx, blobrepo);
move |(path, entry)| upload_entry(ctx.clone(), &blobrepo, entry, path)
});
revlogcs

View File

@ -186,6 +186,7 @@ fn setup_app<'a, 'b>() -> App<'a, 'b> {
}
fn fetch_content_from_manifest(
ctx: CoreContext,
logger: Logger,
mf: Box<Manifest + Sync>,
element: MPathElement,
@ -198,7 +199,7 @@ fn fetch_content_from_manifest(
element,
entry.get_hash()
);
entry.get_content()
entry.get_content(ctx)
}
None => try_boxfuture!(Err(format_err!("failed to lookup element {:?}", element))),
}
@ -228,38 +229,38 @@ fn fetch_content(
path: &str,
) -> BoxFuture<Content, Error> {
let path = try_boxfuture!(MPath::new(path));
let resolved_cs_id = resolve_hg_rev(ctx, repo, rev);
let resolved_cs_id = resolve_hg_rev(ctx.clone(), repo, rev);
let mf = resolved_cs_id
.and_then({
cloned!(repo);
move |cs_id| repo.get_changeset_by_changesetid(&cs_id)
cloned!(ctx, repo);
move |cs_id| repo.get_changeset_by_changesetid(ctx, &cs_id)
})
.map(|cs| cs.manifestid().clone())
.and_then({
cloned!(repo);
move |root_mf_id| repo.get_manifest_by_nodeid(&root_mf_id)
cloned!(ctx, repo);
move |root_mf_id| repo.get_manifest_by_nodeid(ctx, &root_mf_id)
});
let all_but_last = iter_ok::<_, Error>(path.clone().into_iter().rev().skip(1).rev());
let folded: BoxFuture<_, Error> = mf.and_then({
cloned!(logger);
cloned!(ctx, logger);
move |mf| {
all_but_last.fold(mf, move |mf, element| {
fetch_content_from_manifest(logger.clone(), mf, element).and_then(|content| {
match content {
fetch_content_from_manifest(ctx.clone(), logger.clone(), mf, element).and_then(
|content| match content {
Content::Tree(mf) => Ok(mf),
content => Err(format_err!("expected tree entry, found {:?}", content)),
}
})
},
)
})
}
}).boxify();
let basename = path.basename().clone();
folded
.and_then(move |mf| fetch_content_from_manifest(logger.clone(), mf, basename))
.and_then(move |mf| fetch_content_from_manifest(ctx, logger.clone(), mf, basename))
.boxify()
}
@ -272,7 +273,7 @@ pub fn fetch_bonsai_changeset(
hg_changeset_id
.and_then({
let repo = repo.clone();
cloned!(ctx, repo);
move |hg_cs| repo.get_bonsai_from_hg(ctx, &hg_cs)
})
.and_then({
@ -280,12 +281,13 @@ pub fn fetch_bonsai_changeset(
move |maybe_bonsai| maybe_bonsai.ok_or(err_msg(format!("bonsai not found for {}", rev)))
})
.and_then({
cloned!(repo);
move |bonsai| repo.get_bonsai_changeset(bonsai)
cloned!(ctx, repo);
move |bonsai| repo.get_bonsai_changeset(ctx, bonsai)
})
}
fn get_cache<B: CacheBlobstoreExt>(
ctx: CoreContext,
blobstore: &B,
key: String,
mode: String,
@ -293,9 +295,9 @@ fn get_cache<B: CacheBlobstoreExt>(
if mode == "cache-only" {
blobstore.get_cache_only(key)
} else if mode == "no-fill" {
blobstore.get_no_cache_fill(key)
blobstore.get_no_cache_fill(ctx, key)
} else {
blobstore.get(key)
blobstore.get(ctx, key)
}
}
@ -331,11 +333,13 @@ fn slice_to_str(slice: &[u8]) -> String {
}
fn hg_manifest_diff(
ctx: CoreContext,
repo: BlobRepo,
left: &HgManifestId,
right: &HgManifestId,
) -> impl Future<Item = Option<ChangesetAttrDiff>, Error = Error> {
bonsai_diff(
ctx,
repo.get_root_entry(left),
Some(repo.get_root_entry(right)),
None,
@ -366,13 +370,14 @@ fn hg_manifest_diff(
}
fn hg_changeset_diff(
ctx: CoreContext,
repo: BlobRepo,
left_id: &HgChangesetId,
right_id: &HgChangesetId,
) -> impl Future<Item = ChangesetDiff, Error = Error> {
(
repo.get_changeset_by_changesetid(left_id),
repo.get_changeset_by_changesetid(right_id),
repo.get_changeset_by_changesetid(ctx.clone(), left_id),
repo.get_changeset_by_changesetid(ctx.clone(), right_id),
).into_future()
.and_then({
cloned!(repo, left_id, right_id);
@ -418,10 +423,12 @@ fn hg_changeset_diff(
))
}
hg_manifest_diff(repo, left.manifestid(), right.manifestid()).map(move |mdiff| {
diff.diff.extend(mdiff);
diff
})
hg_manifest_diff(ctx, repo, left.manifestid(), right.manifestid()).map(
move |mdiff| {
diff.diff.extend(mdiff);
diff
},
)
}
})
}
@ -441,24 +448,27 @@ fn build_skiplist_index<S: ToString>(
repo.get_bonsai_heads_maybe_stale(ctx.clone())
.collect()
.and_then(move |heads| {
loop_fn(
(heads.into_iter(), skiplist_index),
move |(mut heads, skiplist_index)| match heads.next() {
Some(head) => {
let f = skiplist_index.add_node(
ctx.clone(),
cs_fetcher.clone(),
head,
max_index_depth,
);
.and_then({
cloned!(ctx);
move |heads| {
loop_fn(
(heads.into_iter(), skiplist_index),
move |(mut heads, skiplist_index)| match heads.next() {
Some(head) => {
let f = skiplist_index.add_node(
ctx.clone(),
cs_fetcher.clone(),
head,
max_index_depth,
);
f.map(move |()| Loop::Continue((heads, skiplist_index)))
.boxify()
}
None => ok(Loop::Break(skiplist_index)).boxify(),
},
)
f.map(move |()| Loop::Continue((heads, skiplist_index)))
.boxify()
}
None => ok(Loop::Break(skiplist_index)).boxify(),
},
)
}
})
.inspect(|skiplist_index| {
println!(
@ -484,20 +494,24 @@ fn build_skiplist_index<S: ToString>(
compact_protocol::serialize(&thrift_merge_graph)
})
.and_then(move |bytes| {
debug!(logger, "storing {} bytes", bytes.len());
blobstore.put(key, BlobstoreBytes::from_bytes(bytes))
.and_then({
cloned!(ctx);
move |bytes| {
debug!(logger, "storing {} bytes", bytes.len());
blobstore.put(ctx, key, BlobstoreBytes::from_bytes(bytes))
}
})
.boxify()
}
fn read_skiplist_index<S: ToString>(
ctx: CoreContext,
repo: BlobRepo,
key: S,
logger: Logger,
) -> BoxFuture<(), Error> {
repo.get_blobstore()
.get(key.to_string())
.get(ctx, key.to_string())
.and_then(move |maybebytes| {
match maybebytes {
Some(bytes) => {
@ -525,6 +539,7 @@ fn main() -> Result<()> {
let future = match matches.subcommand() {
(BLOBSTORE_FETCH, Some(sub_m)) => {
let ctx = CoreContext::test_mock();
let key = sub_m.value_of("KEY").unwrap().to_string();
let decode_as = sub_m.value_of("decode-as").map(|val| val.to_string());
let use_memcache = sub_m.value_of("use-memcache").map(|val| val.to_string());
@ -536,9 +551,9 @@ fn main() -> Result<()> {
match (use_memcache, no_prefix) {
(None, false) => {
let blobstore = PrefixBlobstore::new(blobstore, repo_id.prefix());
blobstore.get(key.clone()).boxify()
blobstore.get(ctx, key.clone()).boxify()
}
(None, true) => blobstore.get(key.clone()).boxify(),
(None, true) => blobstore.get(ctx, key.clone()).boxify(),
(Some(mode), false) => {
let blobstore = new_memcache_blobstore(
blobstore,
@ -546,7 +561,7 @@ fn main() -> Result<()> {
manifold_args.bucket.as_ref(),
).unwrap();
let blobstore = PrefixBlobstore::new(blobstore, repo_id.prefix());
get_cache(&blobstore, key.clone(), mode)
get_cache(ctx.clone(), &blobstore, key.clone(), mode)
}
(Some(mode), true) => {
let blobstore = new_memcache_blobstore(
@ -554,7 +569,7 @@ fn main() -> Result<()> {
"manifold",
manifold_args.bucket.as_ref(),
).unwrap();
get_cache(&blobstore, key.clone(), mode)
get_cache(ctx.clone(), &blobstore, key.clone(), mode)
}
}.map(move |value| {
println!("{:?}", value);
@ -588,7 +603,7 @@ fn main() -> Result<()> {
// TODO(T37478150, luk) This is not a test case, fix it up in future diffs
let ctx = CoreContext::test_mock();
let repo = args::open_repo(&logger, &matches)?;
let repo = args::open_repo(ctx.clone(), &logger, &matches)?;
fetch_bonsai_changeset(ctx, rev, repo.blobrepo())
.map(|bcs| {
println!("{:?}", bcs);
@ -604,7 +619,7 @@ fn main() -> Result<()> {
// TODO(T37478150, luk) This is not a test case, fix it up in future diffs
let ctx = CoreContext::test_mock();
let repo = args::open_repo(&logger, &matches)?;
let repo = args::open_repo(ctx.clone(), &logger, &matches)?;
fetch_content(ctx, logger.clone(), repo.blobrepo(), rev, path)
.and_then(|content| {
match content {
@ -652,16 +667,19 @@ fn main() -> Result<()> {
config_repo::handle_command(sub_m)
}
(BOOKMARKS, Some(sub_m)) => {
args::init_cachelib(&matches);
let repo = args::open_repo(&logger, &matches)?;
// TODO(T37478150, luk) This is not a test case, fix it up in future diffs
let ctx = CoreContext::test_mock();
args::init_cachelib(&matches);
let repo = args::open_repo(ctx.clone(), &logger, &matches)?;
bookmarks_manager::handle_command(ctx, &repo.blobrepo(), sub_m, logger)
}
(HG_CHANGESET, Some(sub_m)) => match sub_m.subcommand() {
(HG_CHANGESET_DIFF, Some(sub_m)) => {
// TODO(T37478150, luk) This is not a test case, fix it up in future diffs
let ctx = CoreContext::test_mock();
let left_cs = sub_m
.value_of("LEFT_CS")
.ok_or(format_err!("LEFT_CS argument expected"))
@ -672,12 +690,14 @@ fn main() -> Result<()> {
.and_then(HgChangesetId::from_str);
args::init_cachelib(&matches);
let repo = args::open_repo(&logger, &matches)?.blobrepo().clone();
let repo = args::open_repo(ctx.clone(), &logger, &matches)?
.blobrepo()
.clone();
(left_cs, right_cs)
.into_future()
.and_then(move |(left_cs, right_cs)| {
hg_changeset_diff(repo, &left_cs, &right_cs)
hg_changeset_diff(ctx, repo, &left_cs, &right_cs)
})
.and_then(|diff| {
serde_json::to_writer(io::stdout(), &diff)
@ -696,8 +716,13 @@ fn main() -> Result<()> {
.ok_or(format_err!("STOP_CS argument expected"))
.and_then(HgChangesetId::from_str);
// TODO(T37478150, luk) This is not a test case, fix it up in future diffs
let ctx = CoreContext::test_mock();
args::init_cachelib(&matches);
let repo = args::open_repo(&logger, &matches)?.blobrepo().clone();
let repo = args::open_repo(ctx.clone(), &logger, &matches)?
.blobrepo()
.clone();
let ctx = CoreContext::test_mock();
(start_cs, stop_cs)
@ -746,16 +771,22 @@ fn main() -> Result<()> {
(SKIPLIST, Some(sub_m)) => match sub_m.subcommand() {
(SKIPLIST_BUILD, Some(sub_m)) => {
args::init_cachelib(&matches);
let repo = args::open_repo(&logger, &matches)?.blobrepo().clone();
let ctx = CoreContext::test_mock();
let repo = args::open_repo(ctx.clone(), &logger, &matches)?
.blobrepo()
.clone();
build_skiplist_index(ctx, repo, sub_m.value_of("BLOBSTORE_KEY").unwrap(), logger)
}
(SKIPLIST_READ, Some(sub_m)) => {
args::init_cachelib(&matches);
let repo = args::open_repo(&logger, &matches)?.blobrepo().clone();
let ctx = CoreContext::test_mock();
let repo = args::open_repo(ctx.clone(), &logger, &matches)?
.blobrepo()
.clone();
read_skiplist_index(
ctx.clone(),
repo,
sub_m
.value_of("BLOBSTORE_KEY")

View File

@ -12,6 +12,7 @@ extern crate clap;
#[macro_use]
extern crate cloned;
extern crate cmdlib;
extern crate context;
extern crate failure_ext as failure;
extern crate futures;
extern crate futures_ext;
@ -35,6 +36,7 @@ use blobrepo::BlobRepo;
use blobrepo::alias::get_sha256;
use changesets::SqlChangesets;
use cmdlib::args;
use context::CoreContext;
use mercurial_types::RepositoryId;
use mononoke_types::{ChangesetId, ContentId, FileChange, hash::Sha256};
@ -79,6 +81,7 @@ impl AliasVerification {
fn get_file_changes_vector(
&self,
ctx: CoreContext,
bcs_id: ChangesetId,
) -> BoxFuture<Vec<Option<FileChange>>, Error> {
let cs_cnt = self.cs_processed.fetch_add(1, Ordering::Relaxed);
@ -88,7 +91,7 @@ impl AliasVerification {
}
self.blobrepo
.get_bonsai_changeset(bcs_id)
.get_bonsai_changeset(ctx, bcs_id)
.map(|bcs| {
let file_changes: Vec<_> = bcs.file_changes()
.map(|(_, file_change)| file_change.cloned())
@ -119,6 +122,7 @@ impl AliasVerification {
fn process_missing_alias_blob(
&self,
ctx: CoreContext,
alias: Sha256,
content_id: ContentId,
) -> impl Future<Item = (), Error = Error> {
@ -133,19 +137,20 @@ impl AliasVerification {
match mode {
Mode::Verify => Ok(()).into_future().left_future(),
Mode::Generate => blobrepo
.upload_alias_to_file_content_id(alias, content_id)
.upload_alias_to_file_content_id(ctx, alias, content_id)
.right_future(),
}
}
fn process_alias(
&self,
ctx: CoreContext,
alias: Sha256,
content_id: ContentId,
) -> impl Future<Item = (), Error = Error> {
let av = self.clone();
self.blobrepo
.get_file_content_id_by_alias(alias)
.get_file_content_id_by_alias(ctx.clone(), alias)
.then(move |result| match result {
Ok(content_id_from_blobstore) => {
av.check_alias_blob(alias, content_id, content_id_from_blobstore)
@ -153,7 +158,7 @@ impl AliasVerification {
}
Err(_) => {
// the blob with alias is not found
av.process_missing_alias_blob(alias, content_id)
av.process_missing_alias_blob(ctx, alias, content_id)
.right_future()
}
})
@ -161,14 +166,15 @@ impl AliasVerification {
pub fn process_file_content(
&self,
ctx: CoreContext,
content_id: ContentId,
) -> impl Future<Item = (), Error = Error> {
let repo = self.blobrepo.clone();
let av = self.clone();
repo.get_file_content_by_content_id(content_id)
repo.get_file_content_by_content_id(ctx.clone(), content_id)
.map(|content| get_sha256(&content.into_bytes()))
.and_then(move |alias| av.process_alias(alias, content_id))
.and_then(move |alias| av.process_alias(ctx, alias, content_id))
}
fn print_report(&self, partial: bool) {
@ -183,7 +189,12 @@ impl AliasVerification {
);
}
fn get_bounded(&self, min_id: u64, max_id: u64) -> impl Future<Item = (), Error = Error> {
fn get_bounded(
&self,
ctx: CoreContext,
min_id: u64,
max_id: u64,
) -> impl Future<Item = (), Error = Error> {
let av = self.clone();
let av_for_process = self.clone();
let av_for_report = self.clone();
@ -196,7 +207,10 @@ impl AliasVerification {
// stream of cs_id
.get_list_bs_cs_id_in_range(self.repoid, min_id, max_id)
// future of vectors of file changes
.map(move |bcs_id| av.get_file_changes_vector(bcs_id))
.map({
cloned!(ctx);
move |bcs_id| av.get_file_changes_vector(ctx.clone(), bcs_id)
})
.buffer_unordered(1000)
// Stream of file_changes
.map( move |file_changes_vec| {
@ -212,7 +226,7 @@ impl AliasVerification {
if let Some(file_change) = file_change {
let content_id = file_change.content_id().clone();
av_for_process
.process_file_content(content_id)
.process_file_content(ctx.clone(), content_id)
.left_future()
} else {
Ok(()).into_future().right_future()
@ -225,6 +239,7 @@ impl AliasVerification {
pub fn verify_all(
&self,
ctx: CoreContext,
step: u64,
min_cs_db_id: u64,
) -> impl Future<Item = (), Error = Error> {
@ -247,7 +262,7 @@ impl AliasVerification {
stream::iter_ok(bounds.into_iter())
})
.flatten_stream()
.and_then(move |(min_val, max_val)| av.get_bounded(min_val, max_val))
.and_then(move |(min_val, max_val)| av.get_bounded(ctx.clone(), min_val, max_val))
.for_each(|()| Ok(()))
.map(move |()| av_for_report.print_report(false))
}
@ -290,10 +305,11 @@ fn setup_app<'a, 'b>() -> App<'a, 'b> {
fn main() -> Result<()> {
let matches = setup_app().get_matches();
let ctx = CoreContext::test_mock();
let logger = args::get_logger(&matches);
args::init_cachelib(&matches);
let repo = args::open_repo(&logger, &matches)?;
let repo = args::open_repo(ctx.clone(), &logger, &matches)?;
let blobrepo = Arc::new(repo.blobrepo().clone());
let sqlchangesets = Arc::new(args::open_sql_changesets(&matches)?);
@ -316,7 +332,7 @@ fn main() -> Result<()> {
let repoid = args::get_repo_id(&matches);
let aliasimport = AliasVerification::new(logger, blobrepo, repoid, sqlchangesets, mode)
.verify_all(step, min_cs_db_id);
.verify_all(ctx, step, min_cs_db_id);
let mut runtime = tokio::runtime::Runtime::new()?;
let result = runtime.block_on(aliasimport);

View File

@ -58,10 +58,11 @@ fn setup_app<'a, 'b>() -> App<'a, 'b> {
fn main() -> Result<()> {
let matches = setup_app().get_matches();
let ctx = CoreContext::test_mock();
let logger = args::get_logger(&matches);
args::init_cachelib(&matches);
let repo = args::create_repo(&logger, &matches)?;
let repo = args::create_repo(ctx, &logger, &matches)?;
let blobrepo = Arc::new(repo.blobrepo().clone());
let revlogrepo_path = matches

View File

@ -7,6 +7,8 @@
#![deny(warnings)]
extern crate clap;
#[macro_use]
extern crate cloned;
extern crate failure_ext as failure;
extern crate futures;
#[macro_use]
@ -86,7 +88,10 @@ fn main() -> Result<()> {
let matches = setup_app().get_matches();
let logger = args::get_logger(&matches);
args::init_cachelib(&matches);
let repo = args::open_repo(&logger, &matches)?;
// TODO(luk): This is not a test use case, fix it in next diffs
let ctx = CoreContext::test_mock();
let repo = args::open_repo(ctx.clone(), &logger, &matches)?;
let config = config::get_config(&matches).expect("getting configuration failed");
let start_points = get_start_points(&matches);
@ -102,9 +107,6 @@ fn main() -> Result<()> {
// matter much.
let (end_sender, end_receiver) = ::std::sync::mpsc::channel();
// TODO(luk): This is not a test use case, fix it in next diffs
let ctx = CoreContext::test_mock();
// The future::lazy is to ensure that bonsai_verify (which calls tokio::spawn) is called after
// tokio::run, not before.
let verify_fut = future::lazy({
@ -115,7 +117,7 @@ fn main() -> Result<()> {
let ignored = ignored.clone();
move || {
let bonsai_verify = BonsaiMFVerify {
ctx: ctx,
ctx: ctx.clone(),
logger: logger.clone(),
repo: repo.blobrepo().clone(),
follow_limit,
@ -126,7 +128,7 @@ fn main() -> Result<()> {
bonsai_verify
.verify(start_points)
.and_then({
let logger = logger.clone();
cloned!(ctx, logger);
move |(result, meta)| {
let logger = logger.new(o!["changeset_id" => format!("{}", meta.changeset_id)]);
@ -175,7 +177,7 @@ fn main() -> Result<()> {
if print_changes {
let logger = logger.clone();
let diff_fut = difference
.changes()
.changes(ctx.clone())
.map(move |changed_entry| {
info!(
logger,

View File

@ -18,6 +18,7 @@ extern crate blobstore;
extern crate cachelib;
extern crate clap;
extern crate cmdlib;
extern crate context;
extern crate failure_ext as failure;
extern crate futures;
#[macro_use]
@ -42,6 +43,7 @@ extern crate tempdir;
use blobrepo::BlobRepo;
use bookmarks::Bookmark;
use clap::{App, ArgMatches};
use context::CoreContext;
use failure::{Error, Result};
use futures::Future;
use futures_ext::{BoxFuture, FutureExt};
@ -77,6 +79,7 @@ fn run_hook(
cmdlib::args::init_cachelib(&matches);
let ctx = CoreContext::test_mock();
let logger = {
let level = if matches.is_present("debug") {
Level::Debug
@ -109,7 +112,8 @@ fn run_hook(
println!("Hook file is {} revision is {:?}", hook_file, revstr);
println!("Hook code is {}", code);
println!("==============================");
let mut hook_manager = HookManager::new_with_blobrepo(Default::default(), repo.clone(), logger);
let mut hook_manager =
HookManager::new_with_blobrepo(ctx.clone(), Default::default(), repo.clone(), logger);
let hook = LuaHook {
name: String::from("testhook"),
code,
@ -124,7 +128,7 @@ fn run_hook(
let id = try_boxfuture!(HgChangesetId::from_str(revstr));
if file_hook {
hook_manager
.run_file_hooks_for_bookmark(id, &bookmark, None)
.run_file_hooks_for_bookmark(ctx, id, &bookmark, None)
.map(|executions| {
for execution in executions.iter() {
if let (_, HookExecution::Rejected(_)) = execution {
@ -137,7 +141,7 @@ fn run_hook(
.boxify()
} else {
hook_manager
.run_changeset_hooks_for_bookmark(id, &bookmark, None)
.run_changeset_hooks_for_bookmark(ctx, id, &bookmark, None)
.map(|executions| executions.get(0).unwrap().1.clone())
.boxify()
}

View File

@ -549,6 +549,7 @@ pub trait HgCommands {
mod test {
use super::*;
use context::CoreContext;
use futures::{future, stream};
use hooks::{InMemoryChangesetStore, InMemoryFileContentStore};
use slog::{Discard, Drain};
@ -678,10 +679,12 @@ mod test {
}
fn create_hook_manager() -> Arc<HookManager> {
let ctx = CoreContext::test_mock();
let changeset_store = InMemoryChangesetStore::new();
let content_store = InMemoryFileContentStore::new();
let logger = Logger::root(Discard {}.ignore_res(), o!());
Arc::new(HookManager::new(
ctx,
"some_repo".into(),
Box::new(changeset_store),
Arc::new(content_store),

View File

@ -31,6 +31,8 @@ extern crate maplit;
#[macro_use]
extern crate nom;
#[cfg(test)]
extern crate context;
extern crate futures_ext;
extern crate mercurial;
extern crate mercurial_bundles;

View File

@ -47,6 +47,7 @@ impl Tailer {
let content_store = BlobRepoFileContentStore::new((*repo).clone());
let mut hook_manager = HookManager::new(
ctx.clone(),
repo_name,
Box::new(changeset_store),
Arc::new(content_store),
@ -230,15 +231,15 @@ fn run_hooks_for_changeset(
cs: ChangesetId,
logger: Logger,
) -> impl Future<Item = (HgChangesetId, HookResults), Error = Error> {
repo.get_hg_from_bonsai_changeset(ctx, cs)
repo.get_hg_from_bonsai_changeset(ctx.clone(), cs)
.and_then(move |hg_cs| {
debug!(logger, "Running file hooks for changeset {:?}", hg_cs);
hm.run_file_hooks_for_bookmark(hg_cs.clone(), &bm, None)
hm.run_file_hooks_for_bookmark(ctx.clone(), hg_cs.clone(), &bm, None)
.map(move |res| (hg_cs, res))
.and_then(move |(hg_cs, file_res)| {
let hg_cs = hg_cs.clone();
debug!(logger, "Running changeset hooks for changeset {:?}", hg_cs);
hm.run_changeset_hooks_for_bookmark(hg_cs.clone(), &bm, None)
hm.run_changeset_hooks_for_bookmark(ctx, hg_cs.clone(), &bm, None)
.map(move |res| {
let hook_results = HookResults {
file_hooks_results: file_res,

View File

@ -84,6 +84,7 @@ mod test {
use super::ErrorKind;
use super::super::*;
use async_unit;
use context::CoreContext;
use fixtures::many_files_dirs;
use metaconfig::repoconfig::{BookmarkParams, HookParams, RepoReadOnly, RepoType};
use slog::{Discard, Drain};
@ -231,9 +232,10 @@ mod test {
}
fn hook_manager_blobrepo() -> HookManager {
let ctx = CoreContext::test_mock();
let repo = many_files_dirs::getrepo(None);
let logger = Logger::root(Discard {}.ignore_res(), o!());
HookManager::new_with_blobrepo(Default::default(), repo, logger)
HookManager::new_with_blobrepo(ctx, Default::default(), repo, logger)
}
}

View File

@ -51,6 +51,7 @@ extern crate regex;
extern crate slog;
extern crate tempdir;
extern crate context;
extern crate srclient;
extern crate thrift;
@ -65,6 +66,7 @@ use asyncmemo::{Asyncmemo, Filler, Weight};
use blobrepo::{BlobRepo, HgBlobChangeset};
use bookmarks::Bookmark;
use bytes::Bytes;
use context::CoreContext;
pub use errors::*;
use failure::{Error, FutureFailureErrorExt};
use futures::{failed, finished, Future, IntoFuture, Stream};
@ -102,6 +104,7 @@ pub struct HookManager {
impl HookManager {
pub fn new(
ctx: CoreContext,
repo_name: String,
changeset_store: Box<ChangesetStore>,
content_store: Arc<FileContentStore>,
@ -112,6 +115,7 @@ impl HookManager {
let file_hooks = Arc::new(Mutex::new(HashMap::new()));
let filler = HookCacheFiller {
ctx,
file_hooks: file_hooks.clone(),
repo_name: repo_name.clone(),
};
@ -145,11 +149,13 @@ impl HookManager {
}
pub fn new_with_blobrepo(
ctx: CoreContext,
hook_manager_params: HookManagerParams,
blobrepo: BlobRepo,
logger: Logger,
) -> HookManager {
HookManager::new(
ctx,
format!("repo-{:?}", blobrepo.get_repoid()),
Box::new(BlobRepoChangesetStore::new(blobrepo.clone())),
Arc::new(BlobRepoFileContentStore::new(blobrepo.clone())),
@ -202,6 +208,7 @@ impl HookManager {
pub fn run_changeset_hooks_for_bookmark(
&self,
ctx: CoreContext,
changeset_id: HgChangesetId,
bookmark: &Bookmark,
maybe_pushvars: Option<HashMap<String, Bytes>>,
@ -213,7 +220,7 @@ impl HookManager {
.into_iter()
.filter(|name| self.changeset_hooks.contains_key(name))
.collect();
self.run_changeset_hooks_for_changeset_id(changeset_id, hooks, maybe_pushvars)
self.run_changeset_hooks_for_changeset_id(ctx, changeset_id, hooks, maybe_pushvars)
}
None => return finished(Vec::new()).boxify(),
}
@ -221,6 +228,7 @@ impl HookManager {
fn run_changeset_hooks_for_changeset_id(
&self,
ctx: CoreContext,
changeset_id: HgChangesetId,
hooks: Vec<String>,
maybe_pushvars: Option<HashMap<String, Bytes>>,
@ -236,7 +244,7 @@ impl HookManager {
.collect();
let hooks = try_boxfuture!(hooks);
let repo_name = self.repo_name.clone();
self.get_hook_changeset(changeset_id)
self.get_hook_changeset(ctx.clone(), changeset_id)
.and_then({
move |hcs| {
let hooks = HookManager::filter_bypassed_hooks(
@ -246,6 +254,7 @@ impl HookManager {
);
HookManager::run_changeset_hooks_for_changeset(
ctx,
repo_name,
hcs.clone(),
hooks.clone(),
@ -269,6 +278,7 @@ impl HookManager {
}
fn run_changeset_hooks_for_changeset(
ctx: CoreContext,
repo_name: String,
changeset: HookChangeset,
hooks: Vec<(String, Arc<Hook<HookChangeset>>)>,
@ -278,18 +288,19 @@ impl HookManager {
.map(move |(hook_name, hook)| {
let hook_context: HookContext<HookChangeset> =
HookContext::new(hook_name.clone(), repo_name.clone(), changeset.clone());
HookManager::run_changeset_hook(hook.clone(), hook_context)
HookManager::run_changeset_hook(ctx.clone(), hook.clone(), hook_context)
})
.collect();
futures::future::join_all(v).boxify()
}
fn run_changeset_hook(
ctx: CoreContext,
hook: Arc<Hook<HookChangeset>>,
hook_context: HookContext<HookChangeset>,
) -> BoxFuture<(String, HookExecution), Error> {
let hook_name = hook_context.hook_name.clone();
hook.run(hook_context)
hook.run(ctx, hook_context)
.map({
cloned!(hook_name);
move |he| (hook_name, he)
@ -303,6 +314,7 @@ impl HookManager {
pub fn run_file_hooks_for_bookmark(
&self,
ctx: CoreContext,
changeset_id: HgChangesetId,
bookmark: &Bookmark,
maybe_pushvars: Option<HashMap<String, Bytes>>,
@ -321,6 +333,7 @@ impl HookManager {
.filter_map(|name| file_hooks.get(&name).map(|hook| (name, hook.clone())))
.collect();
self.run_file_hooks_for_changeset_id(
ctx,
changeset_id,
hooks,
maybe_pushvars,
@ -333,6 +346,7 @@ impl HookManager {
fn run_file_hooks_for_changeset_id(
&self,
ctx: CoreContext,
changeset_id: HgChangesetId,
hooks: Vec<(String, (Arc<Hook<HookFile>>, Option<HookBypass>))>,
maybe_pushvars: Option<HashMap<String, Bytes>>,
@ -343,7 +357,7 @@ impl HookManager {
"Running file hooks for changeset id {:?}", changeset_id
);
let cache = self.cache.clone();
self.get_hook_changeset(changeset_id)
self.get_hook_changeset(ctx.clone(), changeset_id)
.and_then(move |hcs| {
let hooks = HookManager::filter_bypassed_hooks(
hooks.clone(),
@ -433,11 +447,15 @@ impl HookManager {
.boxify()
}
fn get_hook_changeset(&self, changeset_id: HgChangesetId) -> BoxFuture<HookChangeset, Error> {
fn get_hook_changeset(
&self,
ctx: CoreContext,
changeset_id: HgChangesetId,
) -> BoxFuture<HookChangeset, Error> {
let content_store = self.content_store.clone();
let hg_changeset = self.changeset_store
.get_changeset_by_changesetid(&changeset_id);
let changed_files = self.changeset_store.get_changed_files(&changeset_id);
.get_changeset_by_changesetid(ctx.clone(), &changeset_id);
let changed_files = self.changeset_store.get_changed_files(ctx, &changeset_id);
let reviewers_acl_checker = self.reviewers_acl_checker.clone();
Box::new((hg_changeset, changed_files).into_future().and_then(
move |(changeset, changed_files)| {
@ -512,7 +530,11 @@ pub trait Hook<T>: Send + Sync
where
T: Clone,
{
fn run(&self, hook_context: HookContext<T>) -> BoxFuture<HookExecution, Error>;
fn run(
&self,
ctx: CoreContext,
hook_context: HookContext<T>,
) -> BoxFuture<HookExecution, Error>;
}
/// Represents a changeset - more user friendly than the blob changeset
@ -615,9 +637,9 @@ impl HookFile {
}
}
pub fn contains_string(&self, data: &str) -> BoxFuture<bool, Error> {
pub fn contains_string(&self, ctx: CoreContext, data: &str) -> BoxFuture<bool, Error> {
let data = data.to_string();
self.file_content()
self.file_content(ctx)
.and_then(move |bytes| {
let str_content = str::from_utf8(&bytes)?.to_string();
Ok(str_content.contains(&data))
@ -625,17 +647,17 @@ impl HookFile {
.boxify()
}
pub fn len(&self) -> BoxFuture<u64, Error> {
self.file_content()
pub fn len(&self, ctx: CoreContext) -> BoxFuture<u64, Error> {
self.file_content(ctx)
.and_then(|bytes| Ok(bytes.len() as u64))
.boxify()
}
pub fn file_content(&self) -> BoxFuture<Bytes, Error> {
pub fn file_content(&self, ctx: CoreContext) -> BoxFuture<Bytes, Error> {
let path = try_boxfuture!(MPath::new(self.path.as_bytes()));
let changeset_id = self.changeset_id.clone();
self.content_store
.get_file_content_for_changeset(self.changeset_id, path.clone())
.get_file_content_for_changeset(ctx, self.changeset_id, path.clone())
.and_then(move |opt| {
opt.ok_or(ErrorKind::NoFileContent(changeset_id, path.into()).into())
})
@ -643,11 +665,11 @@ impl HookFile {
.boxify()
}
pub fn file_type(&self) -> BoxFuture<FileType, Error> {
pub fn file_type(&self, ctx: CoreContext) -> BoxFuture<FileType, Error> {
let path = try_boxfuture!(MPath::new(self.path.as_bytes()));
let changeset_id = self.changeset_id.clone();
self.content_store
.get_file_content_for_changeset(self.changeset_id, path.clone())
.get_file_content_for_changeset(ctx, self.changeset_id, path.clone())
.and_then(move |opt| {
opt.ok_or(ErrorKind::NoFileContent(changeset_id, path.into()).into())
})
@ -677,10 +699,10 @@ impl HookChangeset {
}
}
pub fn file_content(&self, path: String) -> BoxFuture<Option<Bytes>, Error> {
pub fn file_content(&self, ctx: CoreContext, path: String) -> BoxFuture<Option<Bytes>, Error> {
let path = try_boxfuture!(MPath::new(path.as_bytes()));
self.content_store
.get_file_content_for_changeset(self.changeset_id, path.clone())
.get_file_content_for_changeset(ctx, self.changeset_id, path.clone())
.map(|opt| opt.map(|(_, bytes)| bytes))
.boxify()
}
@ -726,11 +748,13 @@ impl HookRejectionInfo {
pub trait ChangesetStore: Send + Sync {
fn get_changeset_by_changesetid(
&self,
ctx: CoreContext,
changesetid: &HgChangesetId,
) -> BoxFuture<HgBlobChangeset, Error>;
fn get_changed_files(
&self,
ctx: CoreContext,
changesetid: &HgChangesetId,
) -> BoxFuture<Vec<(String, ChangedFileType)>, Error>;
}
@ -742,37 +766,44 @@ pub struct BlobRepoChangesetStore {
impl ChangesetStore for BlobRepoChangesetStore {
fn get_changeset_by_changesetid(
&self,
ctx: CoreContext,
changesetid: &HgChangesetId,
) -> BoxFuture<HgBlobChangeset, Error> {
self.repo.get_changeset_by_changesetid(changesetid)
self.repo.get_changeset_by_changesetid(ctx, changesetid)
}
fn get_changed_files(
&self,
ctx: CoreContext,
changesetid: &HgChangesetId,
) -> BoxFuture<Vec<(String, ChangedFileType)>, Error> {
cloned!(self.repo);
self.repo
.get_changeset_by_changesetid(changesetid)
.and_then(move |cs| {
let mf_id = cs.manifestid();
let mf = repo.get_manifest_by_nodeid(&mf_id);
let parents = cs.parents();
let (maybe_p1, _) = parents.get_nodes();
// TODO(stash): generate changed file stream correctly for merges
let p_mf = match maybe_p1.cloned() {
Some(p1) => repo.get_changeset_by_changesetid(&HgChangesetId::new(p1))
.and_then({
cloned!(repo);
move |p1| repo.get_manifest_by_nodeid(&p1.manifestid())
})
.left_future(),
None => finished(get_empty_manifest()).right_future(),
};
(mf, p_mf)
.get_changeset_by_changesetid(ctx.clone(), changesetid)
.and_then({
cloned!(ctx);
move |cs| {
let mf_id = cs.manifestid();
let mf = repo.get_manifest_by_nodeid(ctx.clone(), &mf_id);
let parents = cs.parents();
let (maybe_p1, _) = parents.get_nodes();
// TODO(stash): generate changed file stream correctly for merges
let p_mf = match maybe_p1.cloned() {
Some(p1) => {
repo.get_changeset_by_changesetid(ctx.clone(), &HgChangesetId::new(p1))
.and_then({
cloned!(repo);
move |p1| repo.get_manifest_by_nodeid(ctx, &p1.manifestid())
})
.left_future()
}
None => finished(get_empty_manifest()).right_future(),
};
(mf, p_mf)
}
})
.and_then(|(mf, p_mf)| {
manifest_utils::changed_file_stream(&mf, &p_mf, None)
.and_then(move |(mf, p_mf)| {
manifest_utils::changed_file_stream(ctx, &mf, &p_mf, None)
.map(|changed_entry| {
let path = changed_entry
.get_full_path()
@ -799,6 +830,7 @@ pub struct InMemoryChangesetStore {
impl ChangesetStore for InMemoryChangesetStore {
fn get_changeset_by_changesetid(
&self,
_ctx: CoreContext,
changesetid: &HgChangesetId,
) -> BoxFuture<HgBlobChangeset, Error> {
match self.map.get(changesetid) {
@ -811,6 +843,7 @@ impl ChangesetStore for InMemoryChangesetStore {
fn get_changed_files(
&self,
_ctx: CoreContext,
changesetid: &HgChangesetId,
) -> BoxFuture<Vec<(String, ChangedFileType)>, Error> {
match self.map.get(changesetid) {
@ -843,6 +876,7 @@ impl InMemoryChangesetStore {
pub trait FileContentStore: Send + Sync {
fn get_file_content_for_changeset(
&self,
ctx: CoreContext,
changesetid: HgChangesetId,
path: MPath,
) -> BoxFuture<Option<(FileType, Bytes)>, Error>;
@ -856,6 +890,7 @@ pub struct InMemoryFileContentStore {
impl FileContentStore for InMemoryFileContentStore {
fn get_file_content_for_changeset(
&self,
_ctx: CoreContext,
changesetid: HgChangesetId,
path: MPath,
) -> BoxFuture<Option<(FileType, Bytes)>, Error> {
@ -888,18 +923,22 @@ pub struct BlobRepoFileContentStore {
impl FileContentStore for BlobRepoFileContentStore {
fn get_file_content_for_changeset(
&self,
ctx: CoreContext,
changesetid: HgChangesetId,
path: MPath,
) -> BoxFuture<Option<(FileType, Bytes)>, Error> {
let repo = self.repo.clone();
let repo2 = repo.clone();
repo.get_changeset_by_changesetid(&changesetid)
.and_then(move |changeset| {
repo.find_file_in_manifest(&path, changeset.manifestid().clone())
repo.get_changeset_by_changesetid(ctx.clone(), &changesetid)
.and_then({
cloned!(ctx);
move |changeset| {
repo.find_file_in_manifest(ctx, &path, changeset.manifestid().clone())
}
})
.and_then(move |opt| match opt {
Some((file_type, hash)) => repo2
.get_file_content(&hash.into_nodehash())
.get_file_content(ctx, &hash.into_nodehash())
.map(move |content| Some((file_type, content)))
.boxify(),
None => finished(None).boxify(),
@ -922,6 +961,7 @@ impl BlobRepoFileContentStore {
}
struct HookCacheFiller {
ctx: CoreContext,
repo_name: String,
file_hooks: FileHooks,
}
@ -940,7 +980,7 @@ impl Filler for HookCacheFiller {
self.repo_name.clone(),
key.file.clone(),
);
arc_hook.0.run(hook_context)
arc_hook.0.run(self.ctx.clone(), hook_context)
}
None => panic!("Can't find hook {}", key.hook_name), // TODO
}
@ -1033,7 +1073,11 @@ mod test {
}
impl Hook<HookChangeset> for FnChangesetHook {
fn run(&self, context: HookContext<HookChangeset>) -> BoxFuture<HookExecution, Error> {
fn run(
&self,
_ctx: CoreContext,
context: HookContext<HookChangeset>,
) -> BoxFuture<HookExecution, Error> {
finished((self.f)(context)).boxify()
}
}
@ -1054,7 +1098,11 @@ mod test {
}
impl Hook<HookChangeset> for ContextMatchingChangesetHook {
fn run(&self, context: HookContext<HookChangeset>) -> BoxFuture<HookExecution, Error> {
fn run(
&self,
_ctx: CoreContext,
context: HookContext<HookChangeset>,
) -> BoxFuture<HookExecution, Error> {
assert_eq!(self.expected_context, context);
Box::new(finished(HookExecution::Accepted))
}
@ -1072,11 +1120,15 @@ mod test {
}
impl Hook<HookChangeset> for ContainsStringMatchingChangesetHook {
fn run(&self, context: HookContext<HookChangeset>) -> BoxFuture<HookExecution, Error> {
fn run(
&self,
ctx: CoreContext,
context: HookContext<HookChangeset>,
) -> BoxFuture<HookExecution, Error> {
let mut futs = stream::FuturesUnordered::new();
for file in context.data.files {
let fut = match self.expected_content.get(&file.path) {
Some(content) => file.contains_string(&content),
Some(content) => file.contains_string(ctx.clone(), &content),
None => Box::new(finished(false)),
};
futs.push(fut);
@ -1107,13 +1159,17 @@ mod test {
}
impl Hook<HookChangeset> for FileContentMatchingChangesetHook {
fn run(&self, context: HookContext<HookChangeset>) -> BoxFuture<HookExecution, Error> {
fn run(
&self,
ctx: CoreContext,
context: HookContext<HookChangeset>,
) -> BoxFuture<HookExecution, Error> {
let mut futs = stream::FuturesUnordered::new();
for file in context.data.files {
let fut = match self.expected_content.get(&file.path) {
Some(expected_content) => {
let expected_content = expected_content.clone();
file.file_content()
file.file_content(ctx.clone())
.map(move |content| {
let content = str::from_utf8(&*content).unwrap().to_string();
content.contains(&expected_content)
@ -1150,13 +1206,17 @@ mod test {
}
impl Hook<HookChangeset> for LengthMatchingChangesetHook {
fn run(&self, context: HookContext<HookChangeset>) -> BoxFuture<HookExecution, Error> {
fn run(
&self,
ctx: CoreContext,
context: HookContext<HookChangeset>,
) -> BoxFuture<HookExecution, Error> {
let mut futs = stream::FuturesUnordered::new();
for file in context.data.files {
let fut = match self.expected_lengths.get(&file.path) {
Some(expected_length) => {
let expected_length = *expected_length;
file.len()
file.len(ctx.clone())
.map(move |length| length == expected_length)
.boxify()
}
@ -1191,11 +1251,15 @@ mod test {
}
impl Hook<HookChangeset> for OtherFileMatchingChangesetHook {
fn run(&self, context: HookContext<HookChangeset>) -> BoxFuture<HookExecution, Error> {
fn run(
&self,
ctx: CoreContext,
context: HookContext<HookChangeset>,
) -> BoxFuture<HookExecution, Error> {
let expected_content = self.expected_content.clone();
context
.data
.file_content(self.file_path.clone())
.file_content(ctx, self.file_path.clone())
.map(|opt| opt.map(|content| str::from_utf8(&*content).unwrap().to_string()))
.map(move |opt| {
if opt == expected_content {
@ -1230,7 +1294,11 @@ mod test {
}
impl Hook<HookFile> for FnFileHook {
fn run(&self, context: HookContext<HookFile>) -> BoxFuture<HookExecution, Error> {
fn run(
&self,
_ctx: CoreContext,
context: HookContext<HookFile>,
) -> BoxFuture<HookExecution, Error> {
finished((self.f)(context)).boxify()
}
}
@ -1251,7 +1319,11 @@ mod test {
}
impl Hook<HookFile> for PathMatchingFileHook {
fn run(&self, context: HookContext<HookFile>) -> BoxFuture<HookExecution, Error> {
fn run(
&self,
_ctx: CoreContext,
context: HookContext<HookFile>,
) -> BoxFuture<HookExecution, Error> {
finished(if self.paths.contains(&context.data.path) {
HookExecution::Accepted
} else {
@ -1270,10 +1342,14 @@ mod test {
}
impl Hook<HookFile> for ContainsStringMatchingFileHook {
fn run(&self, context: HookContext<HookFile>) -> BoxFuture<HookExecution, Error> {
fn run(
&self,
ctx: CoreContext,
context: HookContext<HookFile>,
) -> BoxFuture<HookExecution, Error> {
context
.data
.contains_string(&self.content)
.contains_string(ctx, &self.content)
.map(|contains| {
if contains {
HookExecution::Accepted
@ -1295,11 +1371,15 @@ mod test {
}
impl Hook<HookFile> for FileContentMatchingFileHook {
fn run(&self, context: HookContext<HookFile>) -> BoxFuture<HookExecution, Error> {
fn run(
&self,
ctx: CoreContext,
context: HookContext<HookFile>,
) -> BoxFuture<HookExecution, Error> {
let expected_content = self.content.clone();
context
.data
.file_content()
.file_content(ctx)
.map(move |content| {
let content = str::from_utf8(&*content).unwrap().to_string();
if content.contains(&expected_content) {
@ -1322,11 +1402,15 @@ mod test {
}
impl Hook<HookFile> for IsSymLinkMatchingFileHook {
fn run(&self, context: HookContext<HookFile>) -> BoxFuture<HookExecution, Error> {
fn run(
&self,
ctx: CoreContext,
context: HookContext<HookFile>,
) -> BoxFuture<HookExecution, Error> {
let is_symlink = self.is_symlink;
context
.data
.file_type()
.file_type(ctx)
.map(move |file_type| {
let actual = match file_type {
FileType::Symlink => true,
@ -1352,11 +1436,15 @@ mod test {
}
impl Hook<HookFile> for LengthMatchingFileHook {
fn run(&self, context: HookContext<HookFile>) -> BoxFuture<HookExecution, Error> {
fn run(
&self,
ctx: CoreContext,
context: HookContext<HookFile>,
) -> BoxFuture<HookExecution, Error> {
let exp_length = self.length;
context
.data
.len()
.len(ctx)
.map(move |length| {
if length == exp_length {
HookExecution::Accepted
@ -1375,6 +1463,7 @@ mod test {
#[test]
fn test_changeset_hook_accepted() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let hooks: HashMap<String, Box<Hook<HookChangeset>>> = hashmap! {
"hook1".to_string() => always_accepting_changeset_hook()
};
@ -1384,13 +1473,14 @@ mod test {
let expected = hashmap! {
"hook1".to_string() => HookExecution::Accepted
};
run_changeset_hooks("bm1", hooks, bookmarks, expected);
run_changeset_hooks(ctx, "bm1", hooks, bookmarks, expected);
});
}
#[test]
fn test_changeset_hook_rejected() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let hooks: HashMap<String, Box<Hook<HookChangeset>>> = hashmap! {
"hook1".to_string() => always_rejecting_changeset_hook()
};
@ -1400,13 +1490,14 @@ mod test {
let expected = hashmap! {
"hook1".to_string() => default_rejection()
};
run_changeset_hooks("bm1", hooks, bookmarks, expected);
run_changeset_hooks(ctx, "bm1", hooks, bookmarks, expected);
});
}
#[test]
fn test_changeset_hook_mix() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let hooks: HashMap<String, Box<Hook<HookChangeset>>> = hashmap! {
"hook1".to_string() => always_accepting_changeset_hook(),
"hook2".to_string() => always_rejecting_changeset_hook(),
@ -1421,13 +1512,14 @@ mod test {
"hook2".to_string() => default_rejection(),
"hook3".to_string() => HookExecution::Accepted,
};
run_changeset_hooks("bm1", hooks, bookmarks, expected);
run_changeset_hooks(ctx, "bm1", hooks, bookmarks, expected);
});
}
#[test]
fn test_changeset_hook_context() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let files = vec![
"dir1/subdir1/subsubdir1/file_1".to_string(),
"dir1/subdir1/subsubdir2/file_1".to_string(),
@ -1472,13 +1564,14 @@ mod test {
let expected = hashmap! {
"hook1".to_string() => HookExecution::Accepted
};
run_changeset_hooks("bm1", hooks, bookmarks, expected);
run_changeset_hooks(ctx, "bm1", hooks, bookmarks, expected);
});
}
#[test]
fn test_changeset_hook_contains_string() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let hook1_map = hashmap![
"dir1/subdir1/subsubdir1/file_1".to_string() => "elephants".to_string(),
"dir1/subdir1/subsubdir2/file_1".to_string() => "hippopatami".to_string(),
@ -1507,13 +1600,14 @@ mod test {
"hook2".to_string() => default_rejection(),
"hook3".to_string() => default_rejection(),
};
run_changeset_hooks("bm1", hooks, bookmarks, expected);
run_changeset_hooks(ctx, "bm1", hooks, bookmarks, expected);
});
}
#[test]
fn test_changeset_hook_other_file_content() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let hooks: HashMap<String, Box<Hook<HookChangeset>>> = hashmap! {
"hook1".to_string() => other_file_matching_changeset_hook("dir1/subdir1/subsubdir1/file_1".to_string(), Some("elephants".to_string())),
"hook2".to_string() => other_file_matching_changeset_hook("dir1/subdir1/subsubdir1/file_1".to_string(), Some("giraffes".to_string())),
@ -1531,13 +1625,14 @@ mod test {
"hook4".to_string() => HookExecution::Accepted,
"hook5".to_string() => default_rejection(),
};
run_changeset_hooks("bm1", hooks, bookmarks, expected);
run_changeset_hooks(ctx, "bm1", hooks, bookmarks, expected);
});
}
#[test]
fn test_changeset_hook_file_content() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let hook1_map = hashmap![
"dir1/subdir1/subsubdir1/file_1".to_string() => "elephants".to_string(),
"dir1/subdir1/subsubdir2/file_1".to_string() => "hippopatami".to_string(),
@ -1566,13 +1661,14 @@ mod test {
"hook2".to_string() => default_rejection(),
"hook3".to_string() => default_rejection(),
};
run_changeset_hooks("bm1", hooks, bookmarks, expected);
run_changeset_hooks(ctx, "bm1", hooks, bookmarks, expected);
});
}
#[test]
fn test_changeset_hook_lengths() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let hook1_map = hashmap![
"dir1/subdir1/subsubdir1/file_1".to_string() => 9,
"dir1/subdir1/subsubdir2/file_1".to_string() => 11,
@ -1603,13 +1699,14 @@ mod test {
"hook2".to_string() => default_rejection(),
"hook3".to_string() => default_rejection(),
};
run_changeset_hooks("bm1", hooks, bookmarks, expected);
run_changeset_hooks(ctx, "bm1", hooks, bookmarks, expected);
});
}
#[test]
fn test_file_hook_accepted() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let hooks: HashMap<String, Box<Hook<HookFile>>> = hashmap! {
"hook1".to_string() => always_accepting_file_hook()
};
@ -1623,13 +1720,14 @@ mod test {
"dir1/subdir1/subsubdir2/file_2".to_string() => HookExecution::Accepted,
}
};
run_file_hooks("bm1", hooks, bookmarks, expected);
run_file_hooks(ctx, "bm1", hooks, bookmarks, expected);
});
}
#[test]
fn test_file_hook_rejected() {
async_unit::tokio_unit_test(move || {
let ctx = CoreContext::test_mock();
let hooks: HashMap<String, Box<Hook<HookFile>>> = hashmap! {
"hook1".to_string() => always_rejecting_file_hook()
};
@ -1643,13 +1741,14 @@ mod test {
"dir1/subdir1/subsubdir2/file_2".to_string() => default_rejection(),
}
};
run_file_hooks("bm1", hooks, bookmarks, expected);
run_file_hooks(ctx, "bm1", hooks, bookmarks, expected);
});
}
#[test]
fn test_file_hook_mix() {
async_unit::tokio_unit_test(move || {
let ctx = CoreContext::test_mock();
let hooks: HashMap<String, Box<Hook<HookFile>>> = hashmap! {
"hook1".to_string() => always_rejecting_file_hook(),
"hook2".to_string() => always_accepting_file_hook()
@ -1669,13 +1768,14 @@ mod test {
"dir1/subdir1/subsubdir2/file_2".to_string() => HookExecution::Accepted,
}
};
run_file_hooks("bm1", hooks, bookmarks, expected);
run_file_hooks(ctx, "bm1", hooks, bookmarks, expected);
});
}
#[test]
fn test_file_hooks_paths() {
async_unit::tokio_unit_test(move || {
let ctx = CoreContext::test_mock();
let matching_paths = hashset![
"dir1/subdir1/subsubdir2/file_1".to_string(),
"dir1/subdir1/subsubdir2/file_2".to_string(),
@ -1693,13 +1793,14 @@ mod test {
"dir1/subdir1/subsubdir2/file_2".to_string() => HookExecution::Accepted,
}
};
run_file_hooks("bm1", hooks, bookmarks, expected);
run_file_hooks(ctx, "bm1", hooks, bookmarks, expected);
});
}
#[test]
fn test_file_hooks_paths_mix() {
async_unit::tokio_unit_test(move || {
let ctx = CoreContext::test_mock();
let matching_paths1 = hashset![
"dir1/subdir1/subsubdir2/file_1".to_string(),
"dir1/subdir1/subsubdir2/file_2".to_string(),
@ -1724,13 +1825,14 @@ mod test {
"dir1/subdir1/subsubdir2/file_2".to_string() => default_rejection(),
}
};
run_file_hooks("bm1", hooks, bookmarks, expected);
run_file_hooks(ctx, "bm1", hooks, bookmarks, expected);
});
}
#[test]
fn test_file_hook_contains_string() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let hooks: HashMap<String, Box<Hook<HookFile>>> = hashmap! {
"hook1".to_string() => contains_string_matching_file_hook("elephants".to_string()),
"hook2".to_string() => contains_string_matching_file_hook("hippopatami".to_string()),
@ -1758,13 +1860,14 @@ mod test {
"dir1/subdir1/subsubdir2/file_2".to_string() => HookExecution::Accepted,
},
};
run_file_hooks("bm1", hooks, bookmarks, expected);
run_file_hooks(ctx, "bm1", hooks, bookmarks, expected);
});
}
#[test]
fn test_file_hook_file_content() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let hooks: HashMap<String, Box<Hook<HookFile>>> = hashmap! {
"hook1".to_string() => file_content_matching_file_hook("elephants".to_string()),
"hook2".to_string() => file_content_matching_file_hook("hippopatami".to_string()),
@ -1792,13 +1895,14 @@ mod test {
"dir1/subdir1/subsubdir2/file_2".to_string() => HookExecution::Accepted,
},
};
run_file_hooks("bm1", hooks, bookmarks, expected);
run_file_hooks(ctx, "bm1", hooks, bookmarks, expected);
});
}
#[test]
fn test_file_hook_is_symlink() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let hooks: HashMap<String, Box<Hook<HookFile>>> = hashmap! {
"hook1".to_string() => is_symlink_matching_file_hook(true),
"hook2".to_string() => is_symlink_matching_file_hook(false),
@ -1820,13 +1924,14 @@ mod test {
"dir1/subdir1/subsubdir2/file_2".to_string() => HookExecution::Accepted,
},
};
run_file_hooks("bm1", hooks, bookmarks, expected);
run_file_hooks(ctx, "bm1", hooks, bookmarks, expected);
});
}
#[test]
fn test_file_hook_length() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let hooks: HashMap<String, Box<Hook<HookFile>>> = hashmap! {
"hook1".to_string() => length_matching_file_hook("elephants".len() as u64),
"hook2".to_string() => length_matching_file_hook("hippopatami".len() as u64),
@ -1860,7 +1965,7 @@ mod test {
"dir1/subdir1/subsubdir2/file_2".to_string() => default_rejection(),
},
};
run_file_hooks("bm1", hooks, bookmarks, expected);
run_file_hooks(ctx, "bm1", hooks, bookmarks, expected);
});
}
@ -1883,6 +1988,7 @@ mod test {
#[test]
fn test_with_blob_store() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let hooks: HashMap<String, Box<Hook<HookChangeset>>> = hashmap! {
"hook1".to_string() => always_accepting_changeset_hook()
};
@ -1892,20 +1998,22 @@ mod test {
let expected = hashmap! {
"hook1".to_string() => HookExecution::Accepted
};
run_changeset_hooks_with_mgr("bm1", hooks, bookmarks, expected, false);
run_changeset_hooks_with_mgr(ctx, "bm1", hooks, bookmarks, expected, false);
});
}
fn run_changeset_hooks(
ctx: CoreContext,
bookmark_name: &str,
hooks: HashMap<String, Box<Hook<HookChangeset>>>,
bookmarks: HashMap<String, Vec<String>>,
expected: HashMap<String, HookExecution>,
) {
run_changeset_hooks_with_mgr(bookmark_name, hooks, bookmarks, expected, true)
run_changeset_hooks_with_mgr(ctx, bookmark_name, hooks, bookmarks, expected, true)
}
fn run_changeset_hooks_with_mgr(
ctx: CoreContext,
bookmark_name: &str,
hooks: HashMap<String, Box<Hook<HookChangeset>>>,
bookmarks: HashMap<String, Vec<String>>,
@ -1917,6 +2025,7 @@ mod test {
hook_manager.register_changeset_hook(&hook_name, hook.into(), None);
}
let fut = hook_manager.run_changeset_hooks_for_bookmark(
ctx,
default_changeset_id(),
&Bookmark::new(bookmark_name).unwrap(),
None,
@ -1929,15 +2038,17 @@ mod test {
}
fn run_file_hooks(
ctx: CoreContext,
bookmark_name: &str,
hooks: HashMap<String, Box<Hook<HookFile>>>,
bookmarks: HashMap<String, Vec<String>>,
expected: HashMap<String, HashMap<String, HookExecution>>,
) {
run_file_hooks_with_mgr(bookmark_name, hooks, bookmarks, expected, true)
run_file_hooks_with_mgr(ctx, bookmark_name, hooks, bookmarks, expected, true)
}
fn run_file_hooks_with_mgr(
ctx: CoreContext,
bookmark_name: &str,
hooks: HashMap<String, Box<Hook<HookFile>>>,
bookmarks: HashMap<String, Vec<String>>,
@ -1950,6 +2061,7 @@ mod test {
}
let fut: BoxFuture<Vec<(FileHookExecutionID, HookExecution)>, Error> = hook_manager
.run_file_hooks_for_bookmark(
ctx,
default_changeset_id(),
&Bookmark::new(bookmark_name).unwrap(),
None,
@ -1989,11 +2101,13 @@ mod test {
}
fn hook_manager_blobrepo() -> HookManager {
let ctx = CoreContext::test_mock();
let repo = many_files_dirs::getrepo(None);
let changeset_store = BlobRepoChangesetStore::new(repo.clone());
let content_store = BlobRepoFileContentStore::new(repo);
let logger = Logger::root(Discard {}.ignore_res(), o!());
HookManager::new(
ctx,
"some_repo".into(),
Box::new(changeset_store),
Arc::new(content_store),
@ -2003,10 +2117,13 @@ mod test {
}
fn hook_manager_inmem() -> HookManager {
let ctx = CoreContext::test_mock();
let repo = many_files_dirs::getrepo(None);
// Load up an in memory store with a single commit from the many_files_dirs store
let cs_id = HgChangesetId::from_str("d261bc7900818dea7c86935b3fb17a33b2e3a6b4").unwrap();
let cs = repo.get_changeset_by_changesetid(&cs_id).wait().unwrap();
let cs = repo.get_changeset_by_changesetid(ctx.clone(), &cs_id)
.wait()
.unwrap();
let mut changeset_store = InMemoryChangesetStore::new();
changeset_store.insert(&cs_id, &cs);
let mut content_store = InMemoryFileContentStore::new();
@ -2024,6 +2141,7 @@ mod test {
);
let logger = Logger::root(Discard {}.ignore_res(), o!());
HookManager::new(
ctx,
"some_repo".into(),
Box::new(changeset_store),
Arc::new(content_store),

File diff suppressed because it is too large Load Diff

View File

@ -10,6 +10,7 @@
#![deny(warnings)]
use super::{Hook, HookChangeset, HookContext, HookExecution};
use context::CoreContext;
use failure::Error;
use futures::finished;
use futures_ext::{BoxFuture, FutureExt};
@ -19,7 +20,11 @@ pub struct RustHook {
}
impl Hook<HookChangeset> for RustHook {
fn run(&self, _context: HookContext<HookChangeset>) -> BoxFuture<HookExecution, Error> {
fn run(
&self,
_ctx: CoreContext,
_context: HookContext<HookChangeset>,
) -> BoxFuture<HookExecution, Error> {
finished(HookExecution::Accepted).boxify()
}
}

View File

@ -20,6 +20,7 @@ extern crate maplit;
extern crate tempdir;
extern crate blobrepo;
extern crate context;
extern crate hlua_futures;
extern crate mercurial;
extern crate mercurial_types;
@ -38,6 +39,7 @@ use futures::Future;
use hlua::{AnyLuaValue, Lua, LuaError, PushGuard};
use blobrepo::BlobRepo;
use context::CoreContext;
use hlua_futures::{AnyFuture, LuaCoroutine, LuaCoroutineBuilder};
use mercurial_types::{Changeset, HgNodeHash};
use mercurial_types::nodehash::HgChangesetId;
@ -67,6 +69,7 @@ pub struct HookContext<'hook> {
impl<'hook> HookContext<'hook> {
fn run<'a, 'lua>(
&self,
ctx: CoreContext,
lua: &'a mut Lua<'lua>,
) -> Result<LuaCoroutine<PushGuard<&'a mut Lua<'lua>>, bool>> {
let repo = self.repo.clone();
@ -78,7 +81,7 @@ impl<'hook> HookContext<'hook> {
let changesetid = HgChangesetId::from_ascii_str(&hash)
.with_context(|_| ErrorKind::InvalidHash(name.clone(), hash.into()))?;
let future = repo.get_changeset_by_changesetid(&changesetid)
let future = repo.get_changeset_by_changesetid(ctx.clone(), &changesetid)
.map_err(|err| LuaError::ExecutionError(format!("failed to get author: {}", err)))
.map(|cs| AnyLuaValue::LuaString(String::from_utf8_lossy(cs.user()).into_owned()));
Ok(AnyFuture::new(future))
@ -113,11 +116,12 @@ impl<'lua> HookManager<'lua> {
pub fn run_hook<'hook>(
&mut self,
ctx: CoreContext,
hook: HookContext<'hook>,
) -> Result<LuaCoroutine<PushGuard<&mut Lua<'lua>>, bool>> {
// TODO: with multiple Lua contexts, choose a context to run in. Probably use a queue or
// something.
hook.run(&mut self.lua)
hook.run(ctx, &mut self.lua)
}
}
@ -128,6 +132,7 @@ mod test {
#[test]
fn test_hook() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let hook_info = hashmap! {
"repo" => "fbsource".into(),
"bookmark" => "master".into(),
@ -151,7 +156,7 @@ mod test {
end",
};
let coroutine_fut = hook_manager.run_hook(hook).unwrap();
let coroutine_fut = hook_manager.run_hook(ctx, hook).unwrap();
let result = coroutine_fut.wait();
assert!(result.unwrap());
})

View File

@ -17,6 +17,7 @@ extern crate maplit;
#[cfg(test)]
extern crate async_unit;
extern crate context;
extern crate futures_ext;
extern crate mercurial_types;
extern crate mononoke_types;

View File

@ -13,6 +13,7 @@ use failure::{Error, ResultExt};
use futures::IntoFuture;
use futures_ext::{BoxFuture, FutureExt};
use context::CoreContext;
use mercurial_types::{Entry, FileType, HgBlob, MPath, MPathElement, Manifest, RepoPath, Type};
use mercurial_types::blobnode::HgParents;
use mercurial_types::manifest::Content;
@ -249,16 +250,16 @@ impl Entry for MockEntry {
fn get_type(&self) -> Type {
self.ty.expect("ty is not set!")
}
fn get_parents(&self) -> BoxFuture<HgParents, Error> {
fn get_parents(&self, _ctx: CoreContext) -> BoxFuture<HgParents, Error> {
unimplemented!();
}
fn get_raw_content(&self) -> BoxFuture<HgBlob, Error> {
fn get_raw_content(&self, _ctx: CoreContext) -> BoxFuture<HgBlob, Error> {
unimplemented!();
}
fn get_content(&self) -> BoxFuture<Content, Error> {
fn get_content(&self, _ctx: CoreContext) -> BoxFuture<Content, Error> {
Ok((self.content_factory)()).into_future().boxify()
}
fn get_size(&self) -> BoxFuture<Option<usize>, Error> {
fn get_size(&self, _ctx: CoreContext) -> BoxFuture<Option<usize>, Error> {
unimplemented!();
}
fn get_hash(&self) -> &HgEntryId {
@ -286,6 +287,7 @@ mod test {
#[test]
fn lookup() {
async_unit::tokio_unit_test(|| {
let ctx = CoreContext::test_mock();
let paths = btreemap! {
"foo/bar1" => (FileType::Regular, "bar1"),
"foo/bar2" => (FileType::Symlink, "bar2"),
@ -304,7 +306,7 @@ mod test {
.lookup(&MPathElement::new(b"foo".to_vec()).unwrap())
.expect("foo should be present");
let foo_content = foo_entry
.get_content()
.get_content(ctx.clone())
.wait()
.expect("content fetch should work");
let foo_manifest = match foo_content {
@ -316,7 +318,7 @@ mod test {
.lookup(&MPathElement::new(b"bar1".to_vec()).unwrap())
.expect("bar1 should be present");
let bar1_content = bar1_entry
.get_content()
.get_content(ctx.clone())
.wait()
.expect("content fetch should work");
match bar1_content {
@ -330,7 +332,7 @@ mod test {
.lookup(&MPathElement::new(b"bar2".to_vec()).unwrap())
.expect("bar2 should be present");
let bar2_content = bar2_entry
.get_content()
.get_content(ctx.clone())
.wait()
.expect("content fetch should work");
match bar2_content {

View File

@ -77,6 +77,7 @@ extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate context;
extern crate futures_ext;
extern crate mercurial_thrift;
extern crate mononoke_types;

View File

@ -9,6 +9,7 @@ use std::iter;
use failure::Error;
use context::CoreContext;
use futures_ext::{BoxFuture, FutureExt};
use mononoke_types::{FileContents, FileType, MPathElement};
@ -176,17 +177,17 @@ pub trait Entry: Send + 'static {
fn get_type(&self) -> Type;
/// Get the parents (in the history graph) of the referred-to object
fn get_parents(&self) -> BoxFuture<HgParents, Error>;
fn get_parents(&self, ctx: CoreContext) -> BoxFuture<HgParents, Error>;
/// Get the raw content of the object as it exists in the blobstore,
/// without any interpretation. This is only really useful for doing a bit-level duplication.
fn get_raw_content(&self) -> BoxFuture<HgBlob, Error>;
fn get_raw_content(&self, ctx: CoreContext) -> BoxFuture<HgBlob, Error>;
/// Get the interpreted content of the object. This will likely require IO
fn get_content(&self) -> BoxFuture<Content, Error>;
fn get_content(&self, ctx: CoreContext) -> BoxFuture<Content, Error>;
/// Get the logical size of the entry. Some entries don't really have a meaningful size.
fn get_size(&self) -> BoxFuture<Option<usize>, Error>;
fn get_size(&self, ctx: CoreContext) -> BoxFuture<Option<usize>, Error>;
/// Get the identity of the object this entry refers to.
fn get_hash(&self) -> &HgEntryId;
@ -231,20 +232,20 @@ where
self.entry.get_type()
}
fn get_parents(&self) -> BoxFuture<HgParents, Error> {
self.entry.get_parents().boxify()
fn get_parents(&self, ctx: CoreContext) -> BoxFuture<HgParents, Error> {
self.entry.get_parents(ctx).boxify()
}
fn get_raw_content(&self) -> BoxFuture<HgBlob, Error> {
self.entry.get_raw_content().boxify()
fn get_raw_content(&self, ctx: CoreContext) -> BoxFuture<HgBlob, Error> {
self.entry.get_raw_content(ctx).boxify()
}
fn get_content(&self) -> BoxFuture<Content, Error> {
self.entry.get_content().boxify()
fn get_content(&self, ctx: CoreContext) -> BoxFuture<Content, Error> {
self.entry.get_content(ctx).boxify()
}
fn get_size(&self) -> BoxFuture<Option<usize>, Error> {
self.entry.get_size().boxify()
fn get_size(&self, ctx: CoreContext) -> BoxFuture<Option<usize>, Error> {
self.entry.get_size(ctx).boxify()
}
fn get_hash(&self) -> &HgEntryId {
@ -261,20 +262,20 @@ impl Entry for Box<Entry + Sync> {
(**self).get_type()
}
fn get_parents(&self) -> BoxFuture<HgParents, Error> {
(**self).get_parents()
fn get_parents(&self, ctx: CoreContext) -> BoxFuture<HgParents, Error> {
(**self).get_parents(ctx)
}
fn get_raw_content(&self) -> BoxFuture<HgBlob, Error> {
(**self).get_raw_content()
fn get_raw_content(&self, ctx: CoreContext) -> BoxFuture<HgBlob, Error> {
(**self).get_raw_content(ctx)
}
fn get_content(&self) -> BoxFuture<Content, Error> {
(**self).get_content()
fn get_content(&self, ctx: CoreContext) -> BoxFuture<Content, Error> {
(**self).get_content(ctx)
}
fn get_size(&self) -> BoxFuture<Option<usize>, Error> {
(**self).get_size()
fn get_size(&self, ctx: CoreContext) -> BoxFuture<Option<usize>, Error> {
(**self).get_size(ctx)
}
fn get_hash(&self) -> &HgEntryId {

View File

@ -10,6 +10,7 @@ use std::hash::{Hash, Hasher};
use std::iter::FromIterator;
use std::sync::{Arc, Mutex};
use context::CoreContext;
use futures::IntoFuture;
use futures::future::{self, Future};
use futures::stream::{empty, once, Stream};
@ -215,6 +216,7 @@ impl Hash for NewEntry {
/// parents simultaniously and produce the intersection result while
/// traversing
pub fn new_entry_intersection_stream<M, P1M, P2M>(
ctx: CoreContext,
root: &M,
p1: Option<&P1M>,
p2: Option<&P2M>,
@ -226,21 +228,21 @@ where
{
if p1.is_none() || p2.is_none() {
let ces = if let Some(p1) = p1 {
changed_entry_stream(root, p1, None)
changed_entry_stream(ctx, root, p1, None)
} else if let Some(p2) = p2 {
changed_entry_stream(root, p2, None)
changed_entry_stream(ctx, root, p2, None)
} else {
changed_entry_stream(root, &EmptyManifest {}, None)
changed_entry_stream(ctx, root, &EmptyManifest {}, None)
};
ces.filter_map(NewEntry::from_changed_entry)
.map(NewEntry::into_tuple)
.boxify()
} else {
let p1 =
changed_entry_stream(root, p1.unwrap(), None).filter_map(NewEntry::from_changed_entry);
let p2 =
changed_entry_stream(root, p2.unwrap(), None).filter_map(NewEntry::from_changed_entry);
let p1 = changed_entry_stream(ctx.clone(), root, p1.unwrap(), None)
.filter_map(NewEntry::from_changed_entry);
let p2 = changed_entry_stream(ctx, root, p2.unwrap(), None)
.filter_map(NewEntry::from_changed_entry);
p2.collect()
.map(move |p2| {
@ -350,6 +352,7 @@ impl<A: Pruner, B: Pruner> Pruner for CombinatorPruner<A, B> {
/// and Added directory entry. The same *does not* apply for changes between the various
/// file types (Regular, Executable and Symlink): those will only be one Modified entry.
pub fn changed_entry_stream<TM, FM>(
ctx: CoreContext,
to: &TM,
from: &FM,
path: Option<MPath>,
@ -358,10 +361,11 @@ where
TM: Manifest,
FM: Manifest,
{
changed_entry_stream_with_pruner(to, from, path, NoopPruner, None).boxify()
changed_entry_stream_with_pruner(ctx, to, from, path, NoopPruner, None).boxify()
}
pub fn changed_file_stream<TM, FM>(
ctx: CoreContext,
to: &TM,
from: &FM,
path: Option<MPath>,
@ -370,12 +374,13 @@ where
TM: Manifest,
FM: Manifest,
{
changed_entry_stream_with_pruner(to, from, path, NoopPruner, None)
changed_entry_stream_with_pruner(ctx, to, from, path, NoopPruner, None)
.filter(|changed_entry| !changed_entry.status.is_tree())
.boxify()
}
pub fn changed_entry_stream_with_pruner<TM, FM>(
ctx: CoreContext,
to: &TM,
from: &FM,
path: Option<MPath>,
@ -399,7 +404,13 @@ where
move |entry| pruner.keep(entry)
})
.map(|entry| {
recursive_changed_entry_stream(entry, 1, pruner.clone(), max_depth)
recursive_changed_entry_stream(
ctx.clone(),
entry,
1,
pruner.clone(),
max_depth,
)
}),
)
})
@ -411,6 +422,7 @@ where
/// that differ. If input isn't a tree, then a stream with a single entry is returned, otherwise
/// subtrees are recursively compared.
fn recursive_changed_entry_stream(
ctx: CoreContext,
changed_entry: ChangedEntry,
depth: usize,
pruner: impl Pruner + Send + Clone + 'static,
@ -423,7 +435,10 @@ fn recursive_changed_entry_stream(
let (to_mf, from_mf, path) = match &changed_entry.status {
EntryStatus::Added(entry) => {
let empty_mf: Box<Manifest> = Box::new(EmptyManifest {});
let to_mf = entry.get_content().map(get_tree_content).boxify();
let to_mf = entry
.get_content(ctx.clone())
.map(get_tree_content)
.boxify();
let from_mf = Ok(empty_mf).into_future().boxify();
let dirname = changed_entry.dirname.clone();
@ -435,7 +450,10 @@ fn recursive_changed_entry_stream(
EntryStatus::Deleted(entry) => {
let empty_mf: Box<Manifest> = Box::new(EmptyManifest {});
let to_mf = Ok(empty_mf).into_future().boxify();
let from_mf = entry.get_content().map(get_tree_content).boxify();
let from_mf = entry
.get_content(ctx.clone())
.map(get_tree_content)
.boxify();
let dirname = changed_entry.dirname.clone();
let entry_path = entry.get_name().cloned();
@ -450,8 +468,14 @@ fn recursive_changed_entry_stream(
debug_assert!(to_entry.get_type().is_tree() == from_entry.get_type().is_tree());
debug_assert!(to_entry.get_type().is_tree());
let to_mf = to_entry.get_content().map(get_tree_content).boxify();
let from_mf = from_entry.get_content().map(get_tree_content).boxify();
let to_mf = to_entry
.get_content(ctx.clone())
.map(get_tree_content)
.boxify();
let from_mf = from_entry
.get_content(ctx.clone())
.map(get_tree_content)
.boxify();
let dirname = changed_entry.dirname.clone();
let entry_path = to_entry.get_name().cloned();
@ -474,6 +498,7 @@ fn recursive_changed_entry_stream(
})
.map(|entry| {
recursive_changed_entry_stream(
ctx.clone(),
entry,
depth + 1,
pruner.clone(),
@ -493,28 +518,28 @@ fn recursive_changed_entry_stream(
/// their path from the root of the repo.
/// For a non-tree entry returns a stream with a single (entry, path) pair.
pub fn recursive_entry_stream(
ctx: CoreContext,
rootpath: Option<MPath>,
entry: Box<Entry + Sync>,
) -> BoxStream<(Option<MPath>, Box<Entry + Sync>), Error> {
let subentries = match entry.get_type() {
Type::File(_) => empty().boxify(),
Type::Tree => {
let entry_basename = entry.get_name();
let path = MPath::join_opt(rootpath.as_ref(), entry_basename);
let subentries =
match entry.get_type() {
Type::File(_) => empty().boxify(),
Type::Tree => {
let entry_basename = entry.get_name();
let path = MPath::join_opt(rootpath.as_ref(), entry_basename);
entry
.get_content()
.map(|content| {
select_all(
get_tree_content(content)
.list()
.map(move |entry| recursive_entry_stream(path.clone(), entry)),
)
})
.flatten_stream()
.boxify()
}
};
entry
.get_content(ctx.clone())
.map(|content| {
select_all(get_tree_content(content).list().map(move |entry| {
recursive_entry_stream(ctx.clone(), path.clone(), entry)
}))
})
.flatten_stream()
.boxify()
}
};
once(Ok((rootpath, entry))).chain(subentries).boxify()
}

View File

@ -9,6 +9,7 @@
extern crate async_unit;
extern crate blobrepo;
extern crate context;
extern crate fixtures;
extern crate futures;
extern crate futures_ext;
@ -24,6 +25,7 @@ use std::str::FromStr;
use std::sync::Arc;
use blobrepo::BlobRepo;
use context::CoreContext;
use fixtures::{linear, many_files_dirs};
use futures::{Future, Stream};
use futures::executor::spawn;
@ -39,12 +41,18 @@ use mercurial_types::nodehash::{HgChangesetId, HgEntryId, HgNodeHash};
use mercurial_types_mocks::manifest::{ContentFactory, MockEntry, MockManifest};
use mercurial_types_mocks::nodehash;
fn get_root_manifest(repo: Arc<BlobRepo>, changesetid: &HgChangesetId) -> Box<Manifest> {
let cs = repo.get_changeset_by_changesetid(changesetid)
fn get_root_manifest(
ctx: CoreContext,
repo: Arc<BlobRepo>,
changesetid: &HgChangesetId,
) -> Box<Manifest> {
let cs = repo.get_changeset_by_changesetid(ctx.clone(), changesetid)
.wait()
.unwrap();
let manifestid = cs.manifestid();
repo.get_manifest_by_nodeid(&manifestid).wait().unwrap()
repo.get_manifest_by_nodeid(ctx, &manifestid)
.wait()
.unwrap()
}
fn get_hash(c: char) -> HgEntryId {
@ -192,12 +200,14 @@ fn test_diff_sorted_vecs_one_empty() {
}
fn find_changed_entry_status_stream(
ctx: CoreContext,
manifest: Box<Manifest>,
basemanifest: Box<Manifest>,
pruner: impl Pruner + Send + Clone + 'static,
max_depth: Option<usize>,
) -> Vec<ChangedEntry> {
let mut stream = spawn(changed_entry_stream_with_pruner(
ctx,
&manifest,
&basemanifest,
None,
@ -274,6 +284,7 @@ fn check_changed_paths(
}
fn do_check_with_pruner(
ctx: CoreContext,
repo: Arc<BlobRepo>,
main_hash: HgNodeHash,
base_hash: HgNodeHash,
@ -284,11 +295,17 @@ fn do_check_with_pruner(
max_depth: Option<usize>,
) {
{
let manifest = get_root_manifest(repo.clone(), &HgChangesetId::new(main_hash));
let base_manifest = get_root_manifest(repo.clone(), &HgChangesetId::new(base_hash));
let manifest = get_root_manifest(ctx.clone(), repo.clone(), &HgChangesetId::new(main_hash));
let base_manifest =
get_root_manifest(ctx.clone(), repo.clone(), &HgChangesetId::new(base_hash));
let res =
find_changed_entry_status_stream(manifest, base_manifest, pruner.clone(), max_depth);
let res = find_changed_entry_status_stream(
ctx.clone(),
manifest,
base_manifest,
pruner.clone(),
max_depth,
);
check_changed_paths(
res,
@ -301,10 +318,17 @@ fn do_check_with_pruner(
// Vice-versa: compare base_hash to main_hash. Deleted paths become added, added become
// deleted.
{
let manifest = get_root_manifest(repo.clone(), &HgChangesetId::new(base_hash));
let base_manifest = get_root_manifest(repo.clone(), &HgChangesetId::new(main_hash));
let manifest = get_root_manifest(ctx.clone(), repo.clone(), &HgChangesetId::new(base_hash));
let base_manifest =
get_root_manifest(ctx.clone(), repo.clone(), &HgChangesetId::new(main_hash));
let res = find_changed_entry_status_stream(manifest, base_manifest, pruner, max_depth);
let res = find_changed_entry_status_stream(
ctx.clone(),
manifest,
base_manifest,
pruner,
max_depth,
);
check_changed_paths(
res,
@ -316,6 +340,7 @@ fn do_check_with_pruner(
}
fn do_check(
ctx: CoreContext,
repo: Arc<BlobRepo>,
main_hash: HgNodeHash,
base_hash: HgNodeHash,
@ -324,6 +349,7 @@ fn do_check(
expected_modified: Vec<&str>,
) {
do_check_with_pruner(
ctx,
repo,
main_hash,
base_hash,
@ -338,12 +364,14 @@ fn do_check(
#[test]
fn test_recursive_changed_entry_stream_linear() {
async_unit::tokio_unit_test(|| -> Result<_, !> {
let ctx = CoreContext::test_mock();
let repo = Arc::new(linear::getrepo(None));
let main_hash = HgNodeHash::from_str("79a13814c5ce7330173ec04d279bf95ab3f652fb").unwrap();
let base_hash = HgNodeHash::from_str("a5ffa77602a066db7d5cfb9fb5823a0895717c5a").unwrap();
let expected_modified = vec!["10"];
do_check(
ctx.clone(),
repo,
main_hash,
base_hash,
@ -358,6 +386,7 @@ fn test_recursive_changed_entry_stream_linear() {
#[test]
fn test_recursive_changed_entry_stream_simple() {
async_unit::tokio_unit_test(|| -> Result<_, !> {
let ctx = CoreContext::test_mock();
let repo = Arc::new(many_files_dirs::getrepo(None));
let main_hash = HgNodeHash::from_str("2f866e7e549760934e31bf0420a873f65100ad63").unwrap();
let base_hash = HgNodeHash::from_str("5a28e25f924a5d209b82ce0713d8d83e68982bc8").unwrap();
@ -383,7 +412,15 @@ fn test_recursive_changed_entry_stream_simple() {
"dir2",
"dir2/file_1_in_dir2",
];
do_check(repo, main_hash, base_hash, expected_added, vec![], vec![]);
do_check(
ctx.clone(),
repo,
main_hash,
base_hash,
expected_added,
vec![],
vec![],
);
Ok(())
}).expect("test failed")
}
@ -391,6 +428,7 @@ fn test_recursive_changed_entry_stream_simple() {
#[test]
fn test_recursive_entry_stream() {
async_unit::tokio_unit_test(|| -> Result<_, !> {
let ctx = CoreContext::test_mock();
let repo = Arc::new(many_files_dirs::getrepo(None));
let changesetid = HgNodeHash::from_str("2f866e7e549760934e31bf0420a873f65100ad63").unwrap();
@ -403,13 +441,13 @@ fn test_recursive_entry_stream() {
// dir1/subdir1/file_1
// dir2/file_1_in_dir2
let cs = repo.get_changeset_by_changesetid(&HgChangesetId::new(changesetid))
let cs = repo.get_changeset_by_changesetid(ctx.clone(), &HgChangesetId::new(changesetid))
.wait()
.unwrap();
let manifestid = cs.manifestid();
let root_entry = repo.get_root_entry(&manifestid);
let fut = recursive_entry_stream(None, root_entry).collect();
let fut = recursive_entry_stream(ctx.clone(), None, root_entry).collect();
let res = fut.wait().unwrap();
let mut actual = hashset![];
@ -432,14 +470,14 @@ fn test_recursive_entry_stream() {
assert_eq!(actual, expected);
let root_mf = repo.get_manifest_by_nodeid(&manifestid)
let root_mf = repo.get_manifest_by_nodeid(ctx.clone(), &manifestid)
.wait()
.unwrap();
let path_element = MPathElement::new(Vec::from("dir1".as_bytes())).unwrap();
let subentry = root_mf.lookup(&path_element).unwrap();
let res = recursive_entry_stream(None, subentry)
let res = recursive_entry_stream(ctx.clone(), None, subentry)
.collect()
.wait()
.unwrap();
@ -465,6 +503,7 @@ fn test_recursive_entry_stream() {
#[test]
fn test_recursive_changed_entry_stream_changed_dirs() {
async_unit::tokio_unit_test(|| -> Result<_, !> {
let ctx = CoreContext::test_mock();
let repo = Arc::new(many_files_dirs::getrepo(None));
let main_hash = HgNodeHash::from_str("d261bc7900818dea7c86935b3fb17a33b2e3a6b4").unwrap();
let base_hash = HgNodeHash::from_str("2f866e7e549760934e31bf0420a873f65100ad63").unwrap();
@ -482,6 +521,7 @@ fn test_recursive_changed_entry_stream_changed_dirs() {
];
let expected_modified = vec!["dir1", "dir1/subdir1"];
do_check(
ctx.clone(),
repo,
main_hash,
base_hash,
@ -496,6 +536,7 @@ fn test_recursive_changed_entry_stream_changed_dirs() {
#[test]
fn test_recursive_changed_entry_stream_dirs_replaced_with_file() {
async_unit::tokio_unit_test(|| -> Result<_, !> {
let ctx = CoreContext::test_mock();
let repo = Arc::new(many_files_dirs::getrepo(None));
let main_hash = HgNodeHash::from_str("0c59c8d0da93cbf9d7f4b888f28823ffb2e3e480").unwrap();
let base_hash = HgNodeHash::from_str("d261bc7900818dea7c86935b3fb17a33b2e3a6b4").unwrap();
@ -523,6 +564,7 @@ fn test_recursive_changed_entry_stream_dirs_replaced_with_file() {
"dir1/subdir1/subsubdir2/file_2",
];
do_check(
ctx.clone(),
repo,
main_hash,
base_hash,
@ -537,6 +579,7 @@ fn test_recursive_changed_entry_stream_dirs_replaced_with_file() {
#[test]
fn test_depth_parameter() {
async_unit::tokio_unit_test(|| -> Result<_, !> {
let ctx = CoreContext::test_mock();
let repo = Arc::new(many_files_dirs::getrepo(None));
let main_hash = HgNodeHash::from_str("d261bc7900818dea7c86935b3fb17a33b2e3a6b4").unwrap();
let base_hash = HgNodeHash::from_str("2f866e7e549760934e31bf0420a873f65100ad63").unwrap();
@ -554,6 +597,7 @@ fn test_depth_parameter() {
];
let expected_modified = vec!["dir1", "dir1/subdir1"];
do_check_with_pruner(
ctx.clone(),
repo.clone(),
main_hash,
base_hash,
@ -567,6 +611,7 @@ fn test_depth_parameter() {
let expected_added = vec!["dir1/subdir1/subsubdir1", "dir1/subdir1/subsubdir2"];
let expected_modified = vec!["dir1", "dir1/subdir1"];
do_check_with_pruner(
ctx.clone(),
repo.clone(),
main_hash,
base_hash,
@ -580,6 +625,7 @@ fn test_depth_parameter() {
let expected_added = vec![];
let expected_modified = vec!["dir1", "dir1/subdir1"];
do_check_with_pruner(
ctx.clone(),
repo.clone(),
main_hash,
base_hash,
@ -593,6 +639,7 @@ fn test_depth_parameter() {
let expected_added = vec![];
let expected_modified = vec!["dir1"];
do_check_with_pruner(
ctx.clone(),
repo.clone(),
main_hash,
base_hash,
@ -606,6 +653,7 @@ fn test_depth_parameter() {
let expected_added = vec![];
let expected_modified = vec![];
do_check_with_pruner(
ctx.clone(),
repo.clone(),
main_hash,
base_hash,
@ -636,6 +684,7 @@ where
#[test]
fn test_recursive_changed_entry_prune() {
async_unit::tokio_unit_test(|| -> Result<_, !> {
let ctx = CoreContext::test_mock();
let repo = Arc::new(many_files_dirs::getrepo(None));
let main_hash = HgNodeHash::from_str("0c59c8d0da93cbf9d7f4b888f28823ffb2e3e480").unwrap();
let base_hash = HgNodeHash::from_str("d261bc7900818dea7c86935b3fb17a33b2e3a6b4").unwrap();
@ -652,6 +701,7 @@ fn test_recursive_changed_entry_prune() {
let expected_added = vec!["dir1"];
let expected_deleted = vec!["dir1", "dir1/file_1_in_dir1", "dir1/file_2_in_dir1"];
do_check_with_pruner(
ctx.clone(),
repo.clone(),
main_hash,
base_hash,
@ -685,6 +735,7 @@ fn test_recursive_changed_entry_prune() {
"dir1/subdir1/subsubdir2/file_1",
];
do_check_with_pruner(
ctx.clone(),
repo,
main_hash,
base_hash,
@ -712,6 +763,7 @@ fn test_recursive_changed_entry_prune() {
#[test]
fn test_recursive_changed_entry_prune_visited() {
async_unit::tokio_unit_test(|| -> Result<_, !> {
let ctx = CoreContext::test_mock();
let repo = Arc::new(many_files_dirs::getrepo(None));
let main_hash_1 = HgNodeHash::from_str("2f866e7e549760934e31bf0420a873f65100ad63").unwrap();
let main_hash_2 = HgNodeHash::from_str("d261bc7900818dea7c86935b3fb17a33b2e3a6b4").unwrap();
@ -737,28 +789,38 @@ fn test_recursive_changed_entry_prune_visited() {
// A dir1/subdir1/subsubdir2/file_1
// A dir1/subdir1/subsubdir2/file_2
let manifest_1 = get_root_manifest(repo.clone(), &HgChangesetId::new(main_hash_1));
let manifest_2 = get_root_manifest(repo.clone(), &HgChangesetId::new(main_hash_2));
let basemanifest = get_root_manifest(repo.clone(), &HgChangesetId::new(base_hash));
let manifest_1 =
get_root_manifest(ctx.clone(), repo.clone(), &HgChangesetId::new(main_hash_1));
let manifest_2 =
get_root_manifest(ctx.clone(), repo.clone(), &HgChangesetId::new(main_hash_2));
let basemanifest =
get_root_manifest(ctx.clone(), repo.clone(), &HgChangesetId::new(base_hash));
let pruner = VisitedPruner::new();
let first_stream = changed_entry_stream_with_pruner(
ctx.clone(),
&manifest_1,
&basemanifest,
None,
pruner.clone(),
None,
);
let second_stream =
changed_entry_stream_with_pruner(&manifest_2, &basemanifest, None, pruner, None);
let second_stream = changed_entry_stream_with_pruner(
ctx.clone(),
&manifest_2,
&basemanifest,
None,
pruner,
None,
);
let mut res = spawn(select_all(vec![first_stream, second_stream]).collect());
let res = res.wait_future().unwrap();
let unique_len = res.len();
assert_eq!(unique_len, 15);
let first_stream = changed_entry_stream(&manifest_1, &basemanifest, None);
let second_stream = changed_entry_stream(&manifest_2, &basemanifest, None);
let first_stream = changed_entry_stream(ctx.clone(), &manifest_1, &basemanifest, None);
let second_stream = changed_entry_stream(ctx.clone(), &manifest_2, &basemanifest, None);
let mut res = spawn(select_all(vec![first_stream, second_stream]).collect());
let res = res.wait_future().unwrap();
// Make sure that more entries are produced without VisitedPruner i.e. some entries are
@ -772,6 +834,7 @@ fn test_recursive_changed_entry_prune_visited() {
#[test]
fn test_recursive_changed_entry_prune_visited_no_files() {
async_unit::tokio_unit_test(|| -> Result<_, !> {
let ctx = CoreContext::test_mock();
let repo = Arc::new(many_files_dirs::getrepo(None));
let main_hash_1 = HgNodeHash::from_str("2f866e7e549760934e31bf0420a873f65100ad63").unwrap();
let main_hash_2 = HgNodeHash::from_str("d261bc7900818dea7c86935b3fb17a33b2e3a6b4").unwrap();
@ -797,29 +860,51 @@ fn test_recursive_changed_entry_prune_visited_no_files() {
// A dir1/subdir1/subsubdir2/file_1
// A dir1/subdir1/subsubdir2/file_2
let manifest_1 = get_root_manifest(repo.clone(), &HgChangesetId::new(main_hash_1));
let manifest_2 = get_root_manifest(repo.clone(), &HgChangesetId::new(main_hash_2));
let basemanifest = get_root_manifest(repo.clone(), &HgChangesetId::new(base_hash));
let manifest_1 =
get_root_manifest(ctx.clone(), repo.clone(), &HgChangesetId::new(main_hash_1));
let manifest_2 =
get_root_manifest(ctx.clone(), repo.clone(), &HgChangesetId::new(main_hash_2));
let basemanifest =
get_root_manifest(ctx.clone(), repo.clone(), &HgChangesetId::new(base_hash));
let pruner = CombinatorPruner::new(FilePruner, VisitedPruner::new());
let first_stream = changed_entry_stream_with_pruner(
ctx.clone(),
&manifest_1,
&basemanifest,
None,
pruner.clone(),
None,
);
let second_stream =
changed_entry_stream_with_pruner(&manifest_2, &basemanifest, None, pruner, None);
let second_stream = changed_entry_stream_with_pruner(
ctx.clone(),
&manifest_2,
&basemanifest,
None,
pruner,
None,
);
let mut res = spawn(select_all(vec![first_stream, second_stream]).collect());
let res = res.wait_future().unwrap();
let unique_len = res.len();
assert_eq!(unique_len, 7);
let first_stream =
changed_entry_stream_with_pruner(&manifest_1, &basemanifest, None, FilePruner, None);
let second_stream =
changed_entry_stream_with_pruner(&manifest_2, &basemanifest, None, FilePruner, None);
let first_stream = changed_entry_stream_with_pruner(
ctx.clone(),
&manifest_1,
&basemanifest,
None,
FilePruner,
None,
);
let second_stream = changed_entry_stream_with_pruner(
ctx.clone(),
&manifest_2,
&basemanifest,
None,
FilePruner,
None,
);
let mut res = spawn(select_all(vec![first_stream, second_stream]).collect());
let res = res.wait_future().unwrap();
// Make sure that more entries are produced without VisitedPruner i.e. some entries are
@ -833,6 +918,7 @@ fn test_recursive_changed_entry_prune_visited_no_files() {
#[test]
fn test_visited_pruner_different_files_same_hash() {
async_unit::tokio_unit_test(|| -> Result<_, !> {
let ctx = CoreContext::test_mock();
let paths = btreemap! {
"foo1" => (FileType::Regular, "content", HgEntryId::new(NULL_HASH)),
"foo2" => (FileType::Symlink, "content", HgEntryId::new(NULL_HASH)),
@ -841,8 +927,14 @@ fn test_visited_pruner_different_files_same_hash() {
MockManifest::from_path_hashes(paths, BTreeMap::new()).expect("manifest is valid");
let pruner = VisitedPruner::new();
let stream =
changed_entry_stream_with_pruner(&root_manifest, &EmptyManifest {}, None, pruner, None);
let stream = changed_entry_stream_with_pruner(
ctx.clone(),
&root_manifest,
&EmptyManifest {},
None,
pruner,
None,
);
let mut res = spawn(stream.collect());
let res = res.wait_future().unwrap();
@ -854,6 +946,7 @@ fn test_visited_pruner_different_files_same_hash() {
#[test]
fn test_file_pruner() {
async_unit::tokio_unit_test(|| -> Result<_, !> {
let ctx = CoreContext::test_mock();
let paths = btreemap! {
"foo1" => (FileType::Regular, "content", HgEntryId::new(NULL_HASH)),
"foo2" => (FileType::Symlink, "content", HgEntryId::new(NULL_HASH)),
@ -862,8 +955,14 @@ fn test_file_pruner() {
MockManifest::from_path_hashes(paths, BTreeMap::new()).expect("manifest is valid");
let pruner = FilePruner;
let stream =
changed_entry_stream_with_pruner(&root_manifest, &EmptyManifest {}, None, pruner, None);
let stream = changed_entry_stream_with_pruner(
ctx.clone(),
&root_manifest,
&EmptyManifest {},
None,
pruner,
None,
);
let mut res = spawn(stream.collect());
let res = res.wait_future().unwrap();
@ -875,6 +974,7 @@ fn test_file_pruner() {
#[test]
fn test_deleted_pruner() {
async_unit::tokio_unit_test(|| -> Result<_, !> {
let ctx = CoreContext::test_mock();
let paths = btreemap! {
"foo1" => (FileType::Regular, "content", HgEntryId::new(NULL_HASH)),
"foo2" => (FileType::Symlink, "content", HgEntryId::new(NULL_HASH)),
@ -883,8 +983,14 @@ fn test_deleted_pruner() {
MockManifest::from_path_hashes(paths, BTreeMap::new()).expect("manifest is valid");
let pruner = DeletedPruner;
let stream =
changed_entry_stream_with_pruner(&root_manifest, &EmptyManifest {}, None, pruner, None);
let stream = changed_entry_stream_with_pruner(
ctx.clone(),
&root_manifest,
&EmptyManifest {},
None,
pruner,
None,
);
let mut res = spawn(stream.collect());
let res = res.wait_future().unwrap();
@ -895,8 +1001,14 @@ fn test_deleted_pruner() {
);
let pruner = DeletedPruner;
let stream =
changed_entry_stream_with_pruner(&EmptyManifest {}, &root_manifest, None, pruner, None);
let stream = changed_entry_stream_with_pruner(
ctx.clone(),
&EmptyManifest {},
&root_manifest,
None,
pruner,
None,
);
let mut res = spawn(stream.collect());
let res = res.wait_future().unwrap();

View File

@ -36,15 +36,16 @@ use mononoke_types::MPath;
use errors::ErrorKind;
pub fn get_content_by_path(
ctx: CoreContext,
repo: Arc<BlobRepo>,
changesetid: HgChangesetId,
path: Option<MPath>,
) -> impl Future<Item = Content, Error = Error> {
repo.get_changeset_by_changesetid(&changesetid)
repo.get_changeset_by_changesetid(ctx.clone(), &changesetid)
.from_err()
.and_then({
let path = path.clone();
move |changeset| repo.find_path_in_manifest(path, *changeset.manifestid())
cloned!(ctx, path);
move |changeset| repo.find_path_in_manifest(ctx, path, *changeset.manifestid())
})
.and_then(|content| {
content.ok_or_else(move || {

View File

@ -349,6 +349,7 @@ impl RepoClient {
let visited_pruner = VisitedPruner::new();
select_all(params.mfnodes.iter().map(|manifest_id| {
get_changed_manifests_stream(
self.ctx.clone(),
self.repo.blobrepo(),
&manifest_id,
&basemfnode,
@ -361,6 +362,7 @@ impl RepoClient {
} else {
match params.mfnodes.get(0) {
Some(mfnode) => get_changed_manifests_stream(
self.ctx.clone(),
self.repo.blobrepo(),
&mfnode,
&basemfnode,
@ -429,6 +431,7 @@ impl HgCommands for RepoClient {
info!(self.logger(), "between pairs {:?}", pairs);
struct ParentStream<CS> {
ctx: CoreContext,
repo: MononokeRepo,
n: HgNodeHash,
bottom: HgNodeHash,
@ -436,8 +439,14 @@ impl HgCommands for RepoClient {
};
impl<CS> ParentStream<CS> {
fn new(repo: &MononokeRepo, top: HgNodeHash, bottom: HgNodeHash) -> Self {
fn new(
ctx: CoreContext,
repo: &MononokeRepo,
top: HgNodeHash,
bottom: HgNodeHash,
) -> Self {
ParentStream {
ctx,
repo: repo.clone(),
n: top,
bottom: bottom,
@ -456,11 +465,10 @@ impl HgCommands for RepoClient {
}
self.wait_cs = self.wait_cs.take().or_else(|| {
Some(
self.repo
.blobrepo()
.get_changeset_by_changesetid(&HgChangesetId::new(self.n)),
)
Some(self.repo.blobrepo().get_changeset_by_changesetid(
self.ctx.clone(),
&HgChangesetId::new(self.n),
))
});
let cs = try_ready!(self.wait_cs.as_mut().unwrap().poll());
self.wait_cs = None; // got it
@ -476,11 +484,11 @@ impl HgCommands for RepoClient {
// TODO(jsgf): do pairs in parallel?
// TODO: directly return stream of streams
let repo = self.repo.clone();
cloned!(self.ctx, self.repo);
stream::iter_ok(pairs.into_iter())
.and_then(move |(top, bottom)| {
let mut f = 1;
ParentStream::new(&repo, top, bottom)
ParentStream::new(ctx.clone(), &repo, top, bottom)
.enumerate()
.filter(move |&(i, _)| {
if i == f {
@ -867,7 +875,7 @@ impl HgCommands for RepoClient {
fetcher,
repoid,
}) => fetcher
.fetch_changelog(*repoid, blobstore.clone())
.fetch_changelog(self.ctx.clone(), *repoid, blobstore.clone())
.right_future(),
};
@ -922,6 +930,7 @@ impl HgCommands for RepoClient {
}
fn get_changed_manifests_stream(
ctx: CoreContext,
repo: &BlobRepo,
mfid: &HgNodeHash,
basemfid: &HgNodeHash,
@ -931,12 +940,17 @@ fn get_changed_manifests_stream(
trace: TraceContext,
) -> BoxStream<(Box<Entry + Sync>, Option<MPath>), Error> {
let mfid = HgManifestId::new(*mfid);
let manifest = repo.get_manifest_by_nodeid(&mfid)
.traced(&trace, "fetch rootmf", trace_args!());
let manifest = repo.get_manifest_by_nodeid(ctx.clone(), &mfid).traced(
&trace,
"fetch rootmf",
trace_args!(),
);
let basemfid = HgManifestId::new(*basemfid);
let basemanifest =
repo.get_manifest_by_nodeid(&basemfid)
.traced(&trace, "fetch baserootmf", trace_args!());
let basemanifest = repo.get_manifest_by_nodeid(ctx.clone(), &basemfid).traced(
&trace,
"fetch baserootmf",
trace_args!(),
);
let root_entry_stream = stream::once(Ok((repo.get_root_entry(&mfid), rootpath.clone())));
@ -947,9 +961,16 @@ fn get_changed_manifests_stream(
let changed_entries = manifest
.join(basemanifest)
.map({
let rootpath = rootpath.clone();
cloned!(ctx, rootpath);
move |(mf, basemf)| {
changed_entry_stream_with_pruner(&mf, &basemf, rootpath, pruner, Some(max_depth))
changed_entry_stream_with_pruner(
ctx,
&mf,
&basemf,
rootpath,
pruner,
Some(max_depth),
)
}
})
.flatten_stream();
@ -995,7 +1016,7 @@ fn fetch_treepack_part_input(
let node = entry.get_hash().clone();
let path = repo_path.clone();
let parents = entry.get_parents().traced(
let parents = entry.get_parents(ctx.clone()).traced(
&trace,
"fetching parents",
trace_args!(
@ -1004,18 +1025,19 @@ fn fetch_treepack_part_input(
),
);
let linknode_fut = repo.get_linknode(ctx, &repo_path, &entry.get_hash().into_nodehash())
.traced(
&trace,
"fetching linknode",
trace_args!(
let linknode_fut =
repo.get_linknode(ctx.clone(), &repo_path, &entry.get_hash().into_nodehash())
.traced(
&trace,
"fetching linknode",
trace_args!(
"node" => node.to_string(),
"path" => path.to_string()
),
);
);
let content_fut = entry
.get_raw_content()
.get_raw_content(ctx.clone())
.map(|blob| blob.into_inner())
.traced(
&trace,
@ -1028,8 +1050,8 @@ fn fetch_treepack_part_input(
let validate_content = if validate_content {
entry
.get_raw_content()
.join(entry.get_parents())
.get_raw_content(ctx.clone())
.join(entry.get_parents(ctx))
.and_then(move |(content, parents)| {
let (p1, p2) = parents.get_nodes();
let actual = node.into_nodehash();

View File

@ -45,7 +45,7 @@ pub fn create_remotefilelog_blob(
let trace_args = trace_args!("node" => node.to_string(), "path" => path.to_string());
// raw_content includes copy information
let raw_content_bytes = repo.get_file_size(&HgFileNodeId::new(node))
let raw_content_bytes = repo.get_file_size(ctx.clone(), &HgFileNodeId::new(node))
.map({
move |file_size| match lfs_params.threshold {
Some(threshold) => (file_size <= threshold, file_size),
@ -53,20 +53,20 @@ pub fn create_remotefilelog_blob(
}
})
.and_then({
cloned!(repo);
cloned!(ctx, repo);
move |(direct_fetching_file, file_size)| {
if direct_fetching_file {
(
repo.get_file_content(&node).left_future(),
repo.get_file_content(ctx, &node).left_future(),
Ok(RevFlags::REVIDX_DEFAULT_FLAGS).into_future(),
)
} else {
// pass content id to prevent envelope fetching
cloned!(repo);
(
repo.get_file_content_id(&HgFileNodeId::new(node))
repo.get_file_content_id(ctx.clone(), &HgFileNodeId::new(node))
.and_then(move |content_id| {
repo.generate_lfs_file(content_id, file_size)
repo.generate_lfs_file(ctx, content_id, file_size)
})
.right_future(),
Ok(RevFlags::REVIDX_EXTSTORED).into_future(),
@ -184,7 +184,7 @@ fn validate_content(
actual: HgNodeHash,
mut scuba_logger: ScubaSampleBuilder,
) -> impl Future<Item = (), Error = Error> {
let file_content = repo.get_file_content(&actual);
let file_content = repo.get_file_content(ctx.clone(), &actual);
let repopath = RepoPath::FilePath(path.clone());
let filenode = repo.get_filenode(ctx, &repopath, &actual);

View File

@ -13,6 +13,7 @@ use futures_ext::{BoxFuture, FutureExt};
use sql::Connection;
use blobstore::Blobstore;
use context::CoreContext;
use mercurial_types::RepositoryId;
use mononoke_types::BlobstoreBytes;
@ -62,6 +63,7 @@ impl SqlStreamingChunksFetcher {
pub fn fetch_changelog(
&self,
ctx: CoreContext,
repo_id: RepositoryId,
blobstore: impl Blobstore + Clone,
) -> BoxFuture<RevlogStreamingChunks, Error> {
@ -75,7 +77,7 @@ impl SqlStreamingChunksFetcher {
let data_blob_key = String::from_utf8_lossy(&data_blob_name).into_owned();
res.data_blobs.push(
blobstore
.get(data_blob_key.clone())
.get(ctx.clone(), data_blob_key.clone())
.and_then(|data| {
data.ok_or(
ErrorKind::MissingStreamingBlob(data_blob_key).into(),
@ -87,7 +89,7 @@ impl SqlStreamingChunksFetcher {
let idx_blob_key = String::from_utf8_lossy(&idx_blob_name).into_owned();
res.index_blobs.push(
blobstore
.get(idx_blob_key.clone())
.get(ctx.clone(), idx_blob_key.clone())
.and_then(|data| {
data.ok_or(ErrorKind::MissingStreamingBlob(idx_blob_key).into())
})

View File

@ -55,7 +55,7 @@ fn make_pending(
Box::new(
{
let repo = repo.clone();
repo.get_bonsai_changeset(child.hash)
repo.get_bonsai_changeset(ctx.clone(), child.hash)
.map(move |cs| {
let parents: Vec<_> = cs.parents().cloned().collect();
(child, parents)

View File

@ -54,6 +54,8 @@ pub fn repo_handlers(
root_log,
"Start warming for repo {}, type {:?}", reponame, config.repotype
);
// TODO(T37478150, luk): this is not a test use case, need to address this later
let ctx = CoreContext::test_mock();
let ensure_myrouter_ready = match config.get_db_address() {
None => future::ok(()).left_future(),
Some(db_address) => {
@ -82,8 +84,12 @@ pub fn repo_handlers(
None => Default::default(),
};
let mut hook_manager =
HookManager::new_with_blobrepo(hook_manager_params, blobrepo.clone(), logger);
let mut hook_manager = HookManager::new_with_blobrepo(
ctx.clone(),
hook_manager_params,
blobrepo.clone(),
logger,
);
info!(root_log, "Loading hooks");
try_boxfuture!(load_hooks(&mut hook_manager, config.clone()));
@ -118,7 +124,7 @@ pub fn repo_handlers(
Some(skiplist_index_blobstore_key) => {
let blobstore = repo.blobrepo().get_blobstore();
blobstore
.get(skiplist_index_blobstore_key)
.get(ctx.clone(), skiplist_index_blobstore_key)
.and_then(|maybebytes| {
let map = match maybebytes {
Some(bytes) => {
@ -136,9 +142,7 @@ pub fn repo_handlers(
// TODO (T32873881): Arc<BlobRepo> should become BlobRepo
let initial_warmup = ensure_myrouter_ready.and_then({
cloned!(reponame, listen_log);
// TODO(T37478150, luk): this is not a test use case, need to address this later
let ctx = CoreContext::test_mock();
cloned!(ctx, reponame, listen_log);
let blobrepo = repo.blobrepo().clone();
move |()| {
cache_warmup(ctx, Arc::new(blobrepo), config.cache_warmup, listen_log)

View File

@ -33,6 +33,7 @@ use mononoke_types::{BonsaiChangesetMut, DateTime, FileChange, FileContents, Fil
use slog::Logger;
fn store_files(
ctx: CoreContext,
files: BTreeMap<&str, Option<&str>>,
repo: BlobRepo,
) -> BTreeMap<MPath, Option<FileChange>> {
@ -44,7 +45,7 @@ fn store_files(
Some(content) => {
let size = content.len();
let content = FileContents::Bytes(Bytes::from(content));
let content_id = repo.unittest_store(content).wait().unwrap();
let content_id = repo.unittest_store(ctx.clone(), content).wait().unwrap();
let file_change = FileChange::new(content_id, FileType::Regular, size as u64, None);
res.insert(path, Some(file_change));
@ -63,7 +64,7 @@ fn create_bonsai_changeset_from_test_data(
commit_metadata: BTreeMap<&str, &str>,
) {
let ctx = CoreContext::test_mock();
let file_changes = store_files(files, blobrepo.clone());
let file_changes = store_files(ctx.clone(), files, blobrepo.clone());
let date: Vec<_> = commit_metadata
.get("author_date")
.unwrap()

View File

@ -24,6 +24,7 @@ use mononoke_types::{BonsaiChangesetMut, ChangesetId, DateTime, FileChange, File
use std::collections::BTreeMap;
pub fn store_files(
ctx: CoreContext,
files: BTreeMap<&str, Option<&str>>,
repo: BlobRepo,
) -> BTreeMap<MPath, Option<FileChange>> {
@ -35,7 +36,7 @@ pub fn store_files(
Some(content) => {
let size = content.len();
let content = FileContents::Bytes(Bytes::from(content));
let content_id = repo.unittest_store(content).wait().unwrap();
let content_id = repo.unittest_store(ctx.clone(), content).wait().unwrap();
let file_change = FileChange::new(content_id, FileType::Regular, size as u64, None);
res.insert(path, Some(file_change));
@ -49,6 +50,7 @@ pub fn store_files(
}
pub fn store_rename(
ctx: CoreContext,
copy_src: (MPath, ChangesetId),
path: &str,
content: &str,
@ -57,7 +59,7 @@ pub fn store_rename(
let path = MPath::new(path).unwrap();
let size = content.len();
let content = FileContents::Bytes(Bytes::from(content));
let content_id = repo.unittest_store(content).wait().unwrap();
let content_id = repo.unittest_store(ctx, content).wait().unwrap();
let file_change = FileChange::new(content_id, FileType::Regular, size as u64, Some(copy_src));
(path, Some(file_change))