rustfmt effected files

Summary: Format files effected by next commit in a stack

Reviewed By: StanislavGlebik

Differential Revision: D13650639

fbshipit-source-id: d4e37acd2bcd29b291968a529543c202f6944e1a
This commit is contained in:
Pavel Aslanov 2019-01-14 09:29:33 -08:00 committed by Facebook Github Bot
parent 5dfab1ed2b
commit 2efacff245
24 changed files with 649 additions and 469 deletions

View File

@ -8,20 +8,21 @@ use std::collections::BTreeMap;
use blobstore::Blobstore;
use failure::prelude::*;
use futures::{IntoFuture, Stream};
use futures::future::{join_all, Future};
use futures::{IntoFuture, Stream};
use futures_ext::FutureExt;
use bonsai_utils;
use context::CoreContext;
use mercurial_types::{Changeset, HgFileNodeId, HgManifestId, HgNodeHash, MPath};
use mononoke_types::{BlobstoreValue, BonsaiChangeset, BonsaiChangesetMut, ChangesetId, FileChange,
MononokeId};
use mononoke_types::{
BlobstoreValue, BonsaiChangeset, BonsaiChangesetMut, ChangesetId, FileChange, MononokeId,
};
use repo::RepoBlobstore;
use errors::*;
use BlobRepo;
use HgBlobChangeset;
use errors::*;
/// Creates bonsai changeset from already created HgBlobChangeset.
pub fn create_bonsai_changeset_object(
@ -64,7 +65,8 @@ pub fn create_bonsai_changeset_object(
message,
extra,
file_changes,
}.freeze()
}
.freeze()
}
})
}
@ -194,7 +196,8 @@ fn get_copy_info(
_ => None,
})
}
})).map(move |res| (res, repopath))
}))
.map(move |res| (res, repopath))
})
.and_then(move |(copied_from_bonsai_commits, repopath)| {
let copied_from: Vec<_> = copied_from_bonsai_commits
@ -210,7 +213,8 @@ fn get_copy_info(
from_node: nodehash,
to_path: repopath.clone(),
to_node: copyfromnode,
}.into()),
}
.into()),
}
})
.boxify()

View File

@ -17,9 +17,11 @@ use context::CoreContext;
use mercurial;
use mercurial::changeset::Extra;
use mercurial::revlogrepo::RevlogChangeset;
use mercurial_types::{Changeset, HgBlobNode, HgChangesetEnvelope, HgChangesetEnvelopeMut,
HgNodeHash, HgParents, MPath};
use mercurial_types::nodehash::{HgChangesetId, HgManifestId, NULL_HASH};
use mercurial_types::{
Changeset, HgBlobNode, HgChangesetEnvelope, HgChangesetEnvelopeMut, HgNodeHash, HgParents,
MPath,
};
use mononoke_types::DateTime;
use errors::*;

View File

@ -17,7 +17,7 @@ use futures_stats::Timed;
use std::any::Any;
use std::collections::HashMap;
use std::collections::HashSet;
use std::sync::{Arc, Mutex, atomic::AtomicUsize, atomic::Ordering};
use std::sync::{atomic::AtomicUsize, atomic::Ordering, Arc, Mutex};
use std::time::Duration;
/// Trait that knows how to fetch DAG info about commits. Primary user is revsets
@ -180,7 +180,8 @@ impl CachingChangesetFetcher {
fn fill_cache(&self, ctx: CoreContext, gen_num: u64) -> impl Future<Item = (), Error = Error> {
let blobstore_cache_key = self.get_blobstore_cache_key(gen_num);
if !self.already_fetched_blobs
if !self
.already_fetched_blobs
.lock()
.unwrap()
.contains(&blobstore_cache_key)
@ -233,28 +234,29 @@ impl CachingChangesetFetcher {
self.cache_misses.fetch_add(1, Ordering::Relaxed);
self.changesets.get(ctx.clone(), repo_id, cs_id)
}
}).and_then(move |maybe_cs| maybe_cs.ok_or_else(|| err_msg(format!("{} not found", cs_id))))
.and_then({
let cs_fetcher = self.clone();
move |cs| {
if cs_fetcher.too_many_cache_misses() {
cs_fetcher
.fill_cache(ctx, cs.gen)
.map(|()| cs)
.left_future()
} else {
future::ok(cs).right_future()
}
}
})
.timed(move |stats, _| {
let mut maxlatency_value = max_request_latency.lock().expect("poisoned lock");
if stats.completion_time > *maxlatency_value {
*maxlatency_value = stats.completion_time
})
.and_then(move |maybe_cs| maybe_cs.ok_or_else(|| err_msg(format!("{} not found", cs_id))))
.and_then({
let cs_fetcher = self.clone();
move |cs| {
if cs_fetcher.too_many_cache_misses() {
cs_fetcher
.fill_cache(ctx, cs.gen)
.map(|()| cs)
.left_future()
} else {
future::ok(cs).right_future()
}
}
})
.timed(move |stats, _| {
let mut maxlatency_value = max_request_latency.lock().expect("poisoned lock");
if stats.completion_time > *maxlatency_value {
*maxlatency_value = stats.completion_time
}
Ok(())
})
Ok(())
})
}
}
@ -313,8 +315,9 @@ mod tests {
use cachelib::{get_or_create_pool, init_cache_once, LruCacheConfig, LruCachePool};
use changesets::{serialize_cs_entries, ChangesetEntry, ChangesetInsert};
use mononoke_types::BlobstoreBytes;
use mononoke_types_mocks::changesetid::{FIVES_CSID, FOURS_CSID, ONES_CSID, THREES_CSID,
TWOS_CSID};
use mononoke_types_mocks::changesetid::{
FIVES_CSID, FOURS_CSID, ONES_CSID, THREES_CSID, TWOS_CSID,
};
use mononoke_types_mocks::repo::REPO_ZERO;
use std::collections::HashMap;
use std::sync::Mutex;
@ -499,10 +502,7 @@ mod tests {
let blobstore_get_counter = Arc::new(AtomicUsize::new(0));
let blobstore = Arc::new(TestBlobstore::new(blobstore_get_counter.clone()));
let cs_fetcher = CachingChangesetFetcher::new_with_opts(
cs,
REPO_ZERO,
cache_pool,
blobstore,
cs, REPO_ZERO, cache_pool, blobstore,
0, /* will try to go to blobstore on every fetch */
2, /* 0, 2, 4 etc gen numbers might have a cache entry */
);
@ -580,10 +580,7 @@ mod tests {
let blobstore = Arc::new(blobstore);
let cs_fetcher = CachingChangesetFetcher::new_with_opts(
cs,
REPO_ZERO,
cache_pool,
blobstore,
cs, REPO_ZERO, cache_pool, blobstore,
0, /* will try to go to blobstore on every fetch */
2, /* 0, 2, 4 etc gen numbers might have a cache entry */
);

View File

@ -9,9 +9,10 @@ use std::fmt;
use ascii::AsciiString;
use bincode;
use mercurial_types::{HgBlob, HgChangesetId, HgFileNodeId, HgManifestId, HgNodeHash, HgParents,
MPath, RepoPath, Type};
use mononoke_types::{ChangesetId, ContentId, hash::Sha256};
use mercurial_types::{
HgBlob, HgChangesetId, HgFileNodeId, HgManifestId, HgNodeHash, HgParents, MPath, RepoPath, Type,
};
use mononoke_types::{hash::Sha256, ChangesetId, ContentId};
use HgBlobChangeset;
@ -42,66 +43,117 @@ impl fmt::Display for StateOpenError {
#[derive(Debug, Fail)]
pub enum ErrorKind {
#[fail(display = "Missing typed key entry for key: {}", _0)] MissingTypedKeyEntry(String),
#[fail(display = "Missing typed key entry for key: {}", _0)]
MissingTypedKeyEntry(String),
// TODO(anastasiyaz): Use general Alias Key instead of Sha256
#[fail(display = "Incorrect content of alias blob: {}", _0)] IncorrectAliasBlobContent(Sha256),
#[fail(display = "Error while opening state for {}", _0)] StateOpen(StateOpenError),
#[fail(display = "Changeset id {} is missing", _0)] ChangesetMissing(HgChangesetId),
#[fail(display = "Error while deserializing changeset retrieved from key '{}'", _0)]
#[fail(display = "Incorrect content of alias blob: {}", _0)]
IncorrectAliasBlobContent(Sha256),
#[fail(display = "Error while opening state for {}", _0)]
StateOpen(StateOpenError),
#[fail(display = "Changeset id {} is missing", _0)]
ChangesetMissing(HgChangesetId),
#[fail(
display = "Error while deserializing changeset retrieved from key '{}'",
_0
)]
ChangesetDeserializeFailed(String),
#[fail(display = "Error while deserializing manifest retrieved from key '{}'", _0)]
#[fail(
display = "Error while deserializing manifest retrieved from key '{}'",
_0
)]
ManifestDeserializeFailed(String),
#[fail(display = "Error while deserializing file node retrieved from key '{}'", _0)]
#[fail(
display = "Error while deserializing file node retrieved from key '{}'",
_0
)]
FileNodeDeserializeFailed(String),
#[fail(display = "Manifest id {} is missing", _0)] ManifestMissing(HgManifestId),
#[fail(display = "Node id {} is missing", _0)] NodeMissing(HgNodeHash),
#[fail(display = "Manifest id {} is missing", _0)]
ManifestMissing(HgManifestId),
#[fail(display = "Node id {} is missing", _0)]
NodeMissing(HgNodeHash),
#[fail(display = "Mercurial content missing for node {} (type {})", _0, _1)]
HgContentMissing(HgNodeHash, Type),
#[fail(display = "Content missing nodeid {}", _0)] ContentMissing(HgNodeHash),
#[fail(display = "Error while deserializing file contents retrieved from key '{}'", _0)]
#[fail(display = "Content missing nodeid {}", _0)]
ContentMissing(HgNodeHash),
#[fail(
display = "Error while deserializing file contents retrieved from key '{}'",
_0
)]
FileContentsDeserializeFailed(String),
#[fail(display = "Content blob missing for id: {}", _0)] ContentBlobMissing(ContentId),
#[fail(display = "Uploaded blob is incomplete {:?}", _0)] BadUploadBlob(HgBlob),
#[fail(display = "HgParents are not in blob store {:?}", _0)] ParentsUnknown(HgParents),
#[fail(display = "Content blob missing for id: {}", _0)]
ContentBlobMissing(ContentId),
#[fail(display = "Uploaded blob is incomplete {:?}", _0)]
BadUploadBlob(HgBlob),
#[fail(display = "HgParents are not in blob store {:?}", _0)]
ParentsUnknown(HgParents),
#[fail(display = "Serialization of node failed {} ({})", _0, _1)]
SerializationFailed(HgNodeHash, bincode::Error),
#[fail(display = "Root manifest is not a manifest (type {})", _0)] BadRootManifest(Type),
#[fail(display = "Root manifest is not a manifest (type {})", _0)]
BadRootManifest(Type),
#[fail(display = "Manifest type {} does not match uploaded type {}", _0, _1)]
ManifestTypeMismatch(Type, Type),
#[fail(display = "Node generation failed for unknown reason")] NodeGenerationFailed,
#[fail(display = "Path {} appears multiple times in manifests", _0)] DuplicateEntry(RepoPath),
#[fail(display = "Duplicate manifest hash {}", _0)] DuplicateManifest(HgNodeHash),
#[fail(display = "Missing entries in new changeset {}", _0)] MissingEntries(HgNodeHash),
#[fail(display = "Filenode is missing: {} {}", _0, _1)] MissingFilenode(RepoPath, HgFileNodeId),
#[fail(display = "Some manifests do not exist")] MissingManifests,
#[fail(display = "Node generation failed for unknown reason")]
NodeGenerationFailed,
#[fail(display = "Path {} appears multiple times in manifests", _0)]
DuplicateEntry(RepoPath),
#[fail(display = "Duplicate manifest hash {}", _0)]
DuplicateManifest(HgNodeHash),
#[fail(display = "Missing entries in new changeset {}", _0)]
MissingEntries(HgNodeHash),
#[fail(display = "Filenode is missing: {} {}", _0, _1)]
MissingFilenode(RepoPath, HgFileNodeId),
#[fail(display = "Some manifests do not exist")]
MissingManifests,
#[fail(display = "Expected {} to be a manifest, found a {} instead", _0, _1)]
NotAManifest(HgNodeHash, Type),
#[fail(display = "Inconsistent node hash for entry: path {}, provided: {}, computed: {}", _0,
_1, _2)]
#[fail(
display = "Inconsistent node hash for entry: path {}, provided: {}, computed: {}",
_0, _1, _2
)]
InconsistentEntryHash(RepoPath, HgNodeHash, HgNodeHash),
#[fail(display = "Inconsistent node hash for changeset: provided: {}, \
computed: {} for blob: {:#?}",
_0, _1, _2)]
#[fail(
display = "Inconsistent node hash for changeset: provided: {}, \
computed: {} for blob: {:#?}",
_0, _1, _2
)]
InconsistentChangesetHash(HgNodeHash, HgNodeHash, HgBlobChangeset),
#[fail(display = "Bookmark {} does not exist", _0)] BookmarkNotFound(AsciiString),
#[fail(display = "Bookmark {} does not exist", _0)]
BookmarkNotFound(AsciiString),
#[fail(display = "Unresolved conflicts when converting BonsaiChangeset to Manifest")]
UnresolvedConflicts,
#[fail(display = "Manifest without parents did not get changed by a BonsaiChangeset")]
UnchangedManifest,
#[fail(display = "Trying to merge a manifest with two existing parents p1 {} and p2 {}", _0,
_1)]
#[fail(
display = "Trying to merge a manifest with two existing parents p1 {} and p2 {}",
_0, _1
)]
ManifestAlreadyAMerge(HgNodeHash, HgNodeHash),
#[fail(display = "Path not found: {}", _0)] PathNotFound(MPath),
#[fail(display = "Remove called on non-directory")] NotADirectory,
#[fail(display = "Empty file path")] EmptyFilePath,
#[fail(display = "Memory manifest conflict can not contain single entry")] SingleEntryConflict,
#[fail(display = "Cannot find cache pool {}", _0)] MissingCachePool(String),
#[fail(display = "Bonsai cs {} not found", _0)] BonsaiNotFound(ChangesetId),
#[fail(display = "Path not found: {}", _0)]
PathNotFound(MPath),
#[fail(display = "Remove called on non-directory")]
NotADirectory,
#[fail(display = "Empty file path")]
EmptyFilePath,
#[fail(display = "Memory manifest conflict can not contain single entry")]
SingleEntryConflict,
#[fail(display = "Cannot find cache pool {}", _0)]
MissingCachePool(String),
#[fail(display = "Bonsai cs {} not found", _0)]
BonsaiNotFound(ChangesetId),
#[fail(display = "Bonsai changeset not found for hg changeset {}", _0)]
BonsaiMappingNotFound(HgChangesetId),
#[fail(display = "Root path wasn't expected at this context")] UnexpectedRootPath,
#[fail(display = "Incorrect copy info: not found a file version {} {} the file {} {} was copied from",
from_path, from_node, to_path, to_node)]
IncorrectCopyInfo{from_path: MPath, from_node: HgNodeHash, to_path: MPath, to_node: HgNodeHash},
#[fail(display = "Case conflict in a commit")] CaseConflict(MPath),
#[fail(display = "Root path wasn't expected at this context")]
UnexpectedRootPath,
#[fail(
display = "Incorrect copy info: not found a file version {} {} the file {} {} was copied from",
from_path, from_node, to_path, to_node
)]
IncorrectCopyInfo {
from_path: MPath,
from_node: HgNodeHash,
to_path: MPath,
to_node: HgNodeHash,
},
#[fail(display = "Case conflict in a commit")]
CaseConflict(MPath),
}

View File

@ -13,11 +13,13 @@ use futures_ext::{BoxFuture, FutureExt};
use super::alias::get_sha256;
use mercurial::file;
use mercurial_types::{FileType, HgBlob, HgFileEnvelope, HgFileNodeId, HgManifestId, HgNodeHash,
HgParents, MPath, MPathElement};
use mercurial_types::manifest::{Content, Entry, Manifest, Type};
use mercurial_types::nodehash::HgEntryId;
use mononoke_types::{ContentId, FileContents, MononokeId, hash::Sha256};
use mercurial_types::{
FileType, HgBlob, HgFileEnvelope, HgFileNodeId, HgManifestId, HgNodeHash, HgParents, MPath,
MPathElement,
};
use mononoke_types::{hash::Sha256, ContentId, FileContents, MononokeId};
use blobstore::Blobstore;
use context::CoreContext;
@ -204,7 +206,8 @@ pub(crate) fn get_rename_from_envelope(
envelope.metadata,
envelope.p1.as_ref(),
envelope.p2.as_ref(),
).copied_from()
)
.copied_from()
}
impl HgBlobEntry {

View File

@ -97,9 +97,10 @@ pub use changeset::{HgBlobChangeset, HgChangesetContent};
pub use changeset_fetcher::ChangesetFetcher;
pub use file::HgBlobEntry;
pub use manifest::BlobManifest;
pub use repo::{save_bonsai_changesets, BlobRepo, ChangesetMetadata, ContentBlobInfo,
ContentBlobMeta, CreateChangeset, UploadHgFileContents, UploadHgFileEntry,
UploadHgNodeHash, UploadHgTreeEntry};
pub use repo::{
save_bonsai_changesets, BlobRepo, ChangesetMetadata, ContentBlobInfo, ContentBlobMeta,
CreateChangeset, UploadHgFileContents, UploadHgFileEntry, UploadHgNodeHash, UploadHgTreeEntry,
};
pub use repo_commit::ChangesetHandle;
// TODO: This is exported for testing - is this the right place for it?
pub use repo_commit::compute_changed_files;

View File

@ -14,8 +14,8 @@ use futures::future::{Future, IntoFuture};
use futures_ext::{BoxFuture, FutureExt};
use context::CoreContext;
use mercurial_types::{Entry, FileType, HgBlob, HgManifestEnvelope, MPathElement, Manifest, Type};
use mercurial_types::nodehash::{HgEntryId, HgManifestId, HgNodeHash, NULL_HASH};
use mercurial_types::{Entry, FileType, HgBlob, HgManifestEnvelope, MPathElement, Manifest, Type};
use blobstore::Blobstore;
@ -166,8 +166,9 @@ impl BlobManifest {
p2: None,
computed_node_id: NULL_HASH,
content: ManifestContent::new_empty(),
})).into_future()
.boxify()
}))
.into_future()
.boxify()
} else {
fetch_manifest_envelope_opt(ctx, &blobstore, manifestid.into_nodehash())
.and_then({
@ -231,7 +232,8 @@ impl Manifest for BlobManifest {
path.clone(),
d.entryid().into_nodehash(),
d.flag(),
).boxed()
)
.boxed()
}
})
}
@ -245,7 +247,8 @@ impl Manifest for BlobManifest {
path,
d.entryid().into_nodehash(),
d.flag(),
).boxed()
)
.boxed()
}
});
Box::new(list_iter)
@ -276,10 +279,7 @@ impl Details {
}
};
Ok(Details {
entryid: entryid,
flag: flag,
})
Ok(Details { entryid, flag })
}
pub fn entryid(&self) -> &HgEntryId {

View File

@ -19,17 +19,19 @@ use futures_ext::{BoxFuture, FutureExt};
use slog::Logger;
use context::CoreContext;
use mercurial_types::{Entry, HgFileNodeId, HgManifestId, HgNodeHash, MPath, MPathElement,
Manifest, RepoPath, Type};
use mercurial_types::manifest::Content;
use mercurial_types::{
Entry, HgFileNodeId, HgManifestId, HgNodeHash, MPath, MPathElement, Manifest, RepoPath, Type,
};
use mononoke_types::{FileContents, FileType};
use file::HgBlobEntry;
use repo::{RepoBlobstore, UploadHgFileContents, UploadHgFileEntry, UploadHgNodeHash,
UploadHgTreeEntry};
use repo::{
RepoBlobstore, UploadHgFileContents, UploadHgFileEntry, UploadHgNodeHash, UploadHgTreeEntry,
};
use super::BlobRepo;
use super::utils::{IncompleteFilenodeInfo, IncompleteFilenodes};
use super::BlobRepo;
use errors::*;
use manifest::BlobManifest;
@ -244,7 +246,8 @@ impl MemoryManifestEntry {
"\0{}{}\n",
entry.get_hash().into_nodehash(),
entry.get_type().manifest_suffix(),
).expect("Writing to memory failed!");
)
.expect("Writing to memory failed!");
});
let upload_manifest = UploadHgTreeEntry {
@ -261,7 +264,7 @@ impl MemoryManifestEntry {
.flatten()
.map(move |(entry, path)| {
incomplete_filenodes.add(IncompleteFilenodeInfo {
path: path,
path,
filenode: HgFileNodeId::new(
entry.get_hash().into_nodehash(),
),
@ -352,9 +355,9 @@ impl MemoryManifestEntry {
.expect("Unnamed entry in a manifest")
.clone();
let memory_entry = match entry.get_type() {
Type::Tree => Self::convert_treenode(&entry
.get_hash()
.into_nodehash()),
Type::Tree => Self::convert_treenode(
&entry.get_hash().into_nodehash(),
),
_ => MemoryManifestEntry::Blob(HgBlobEntry::new(
blobstore.clone(),
name.clone(),
@ -462,13 +465,15 @@ impl MemoryManifestEntry {
) -> BoxFuture<Self, Error> {
use self::MemoryManifestEntry::*;
if self.is_modified() {
return self.save(
ctx.clone(),
&blobstore,
&logger,
&incomplete_filenodes,
repo_path.clone(),
).map(|entry| Self::convert_treenode(&entry.get_hash().into_nodehash()))
return self
.save(
ctx.clone(),
&blobstore,
&logger,
&incomplete_filenodes,
repo_path.clone(),
)
.map(|entry| Self::convert_treenode(&entry.get_hash().into_nodehash()))
.and_then(move |saved| {
saved.merge_with_conflicts(
ctx,
@ -555,7 +560,9 @@ impl MemoryManifestEntry {
let my_changes = my_changes.lock().expect("lock poisoned");
let other_changes = other_changes.lock().expect("lock poisoned");
// Two identical manifests, neither one modified
if my_id.is_some() && my_id == other_id && my_changes.is_empty()
if my_id.is_some()
&& my_id == other_id
&& my_changes.is_empty()
&& other_changes.is_empty()
{
future::ok(self.clone()).boxify()
@ -583,7 +590,8 @@ impl MemoryManifestEntry {
.boxify()
}
}
}.boxify()
}
.boxify()
}
// Only for use in find_mut_helper
@ -596,10 +604,7 @@ impl MemoryManifestEntry {
match entry {
MemoryManifestEntry::MemTree {
base_manifest_id, ..
} if !modified =>
{
*base_manifest_id
}
} if !modified => *base_manifest_id,
MemoryManifestEntry::Blob(blob) if blob.get_type() == Type::Tree => {
Some(blob.get_hash().into_nodehash())
}
@ -712,18 +717,16 @@ impl MemoryManifestEntry {
} else {
future::ok(()).boxify()
}
}.and_then({
}
.and_then({
cloned!(ctx);
move |_| {
let mut changes = entry_changes.lock().expect("lock poisoned");
Self::find_mut_helper(&mut changes, element).find_mut(
ctx,
path,
blobstore,
)
Self::find_mut_helper(&mut changes, element)
.find_mut(ctx, path, blobstore)
}
})
.boxify()
.boxify()
}
_ => future::ok(None).boxify(),
}
@ -779,29 +782,29 @@ impl MemoryManifestEntry {
entries: Vec<HgBlobEntry>,
) -> impl Future<Item = Option<(FileType, FileContents)>, Error = Error> + Send {
if let Some(Type::File(file_type)) = entries.first().map(move |e| e.get_type()) {
let fut = future::join_all(
entries.into_iter().map(move |e| e.get_content(ctx.clone())),
).map(move |content| {
let mut iter = content.iter();
if let Some(first) = iter.next() {
if iter.all(|other| match (first, other) {
(Content::File(c0), Content::File(c1))
| (Content::Executable(c0), Content::Executable(c1))
| (Content::Symlink(c0), Content::Symlink(c1)) => c0 == c1,
_ => false,
}) {
return match first {
Content::Executable(file_content)
| Content::File(file_content)
| Content::Symlink(file_content) => {
Some((file_type, file_content.clone()))
}
_ => unreachable!(),
let fut =
future::join_all(entries.into_iter().map(move |e| e.get_content(ctx.clone())))
.map(move |content| {
let mut iter = content.iter();
if let Some(first) = iter.next() {
if iter.all(|other| match (first, other) {
(Content::File(c0), Content::File(c1))
| (Content::Executable(c0), Content::Executable(c1))
| (Content::Symlink(c0), Content::Symlink(c1)) => c0 == c1,
_ => false,
}) {
return match first {
Content::Executable(file_content)
| Content::File(file_content)
| Content::Symlink(file_content) => {
Some((file_type, file_content.clone()))
}
_ => unreachable!(),
};
};
};
};
};
None
});
None
});
Either::A(fut)
} else {
Either::B(future::ok(None))
@ -830,16 +833,16 @@ impl MemoryManifestEntry {
let upload_entry = UploadHgFileEntry {
upload_node_id: UploadHgNodeHash::Generate,
contents: UploadHgFileContents::RawBytes(file_content.into_bytes()),
file_type: file_type,
file_type,
p1: p1.clone(),
p2: p2.clone(),
path: path,
path,
};
let (_, upload_future) = try_boxfuture!(upload_entry.upload(ctx, &repo));
upload_future
.map(move |(entry, path)| {
incomplete_filenodes.add(IncompleteFilenodeInfo {
path: path,
path,
filenode: HgFileNodeId::new(entry.get_hash().into_nodehash()),
p1: p1.map(|h| HgFileNodeId::new(h)),
p2: p2.map(|h| HgFileNodeId::new(h)),
@ -876,7 +879,8 @@ impl MemoryManifestEntry {
child,
repo.clone(),
incomplete_filenodes.clone(),
).map({
)
.map({
let name = name.clone();
move |v| v.map(|v| (name, v))
})
@ -971,12 +975,14 @@ impl MemoryRootManifest {
repo,
incomplete_filenodes,
MemoryManifestEntry::empty_tree(),
)).boxify(),
))
.boxify(),
(Some(p), None) | (None, Some(p)) => future::ok(Self::create(
repo,
incomplete_filenodes,
MemoryManifestEntry::convert_treenode(p),
)).boxify(),
))
.boxify(),
(Some(p1), Some(p2)) => Self::create_conflict(
ctx,
repo,

View File

@ -14,11 +14,11 @@ use std::time::Duration;
use std::usize;
use bytes::Bytes;
use failure::{Error, FutureFailureErrorExt, FutureFailureExt, Result, prelude::*};
use futures::IntoFuture;
use failure::{prelude::*, Error, FutureFailureErrorExt, FutureFailureExt, Result};
use futures::future::{self, loop_fn, ok, Either, Future, Loop};
use futures::stream::{FuturesUnordered, Stream};
use futures::sync::oneshot;
use futures::IntoFuture;
use futures_ext::{BoxFuture, BoxStream, FutureExt, StreamExt};
use futures_stats::{FutureStats, Timed};
use scribe::ScribeClient;
@ -33,12 +33,16 @@ use super::alias::{get_content_id_alias_key, get_sha256_alias, get_sha256_alias_
use super::changeset::HgChangesetContent;
use super::changeset_fetcher::{CachingChangesetFetcher, ChangesetFetcher, SimpleChangesetFetcher};
use super::utils::{sort_topological, IncompleteFilenodeInfo, IncompleteFilenodes};
use blobstore::{new_cachelib_blobstore, new_memcache_blobstore, Blobstore, EagerMemblob,
MemWritesBlobstore, PrefixBlobstore};
use blobstore::{
new_cachelib_blobstore, new_memcache_blobstore, Blobstore, EagerMemblob, MemWritesBlobstore,
PrefixBlobstore,
};
use blobstore_sync_queue::{BlobstoreSyncQueue, SqlBlobstoreSyncQueue};
use bonsai_generation::{create_bonsai_changeset_object, save_bonsai_changeset_object};
use bonsai_hg_mapping::{BonsaiHgMapping, BonsaiHgMappingEntry, BonsaiOrHgChangesetIds,
CachingBonsaiHgMapping, SqlBonsaiHgMapping};
use bonsai_hg_mapping::{
BonsaiHgMapping, BonsaiHgMappingEntry, BonsaiOrHgChangesetIds, CachingBonsaiHgMapping,
SqlBonsaiHgMapping,
};
use bookmarks::{self, Bookmark, BookmarkPrefix, Bookmarks};
use cachelib;
use changesets::{CachingChangests, ChangesetEntry, ChangesetInsert, Changesets, SqlChangesets};
@ -50,28 +54,33 @@ use fileblob::Fileblob;
use filenodes::{CachingFilenodes, FilenodeInfo, Filenodes};
use manifoldblob::ThriftManifoldBlob;
use mercurial::file::File;
use mercurial_types::{Changeset, Entry, HgBlob, HgBlobNode, HgChangesetId, HgFileEnvelopeMut,
HgFileNodeId, HgManifestEnvelopeMut, HgManifestId, HgNodeHash, HgParents,
Manifest, RepoPath, Type};
use mercurial_types::manifest::Content;
use mercurial_types::{
Changeset, Entry, HgBlob, HgBlobNode, HgChangesetId, HgFileEnvelopeMut, HgFileNodeId,
HgManifestEnvelopeMut, HgManifestId, HgNodeHash, HgParents, Manifest, RepoPath, Type,
};
use metaconfig::RemoteBlobstoreArgs;
use mononoke_types::{Blob, BlobstoreBytes, BlobstoreValue, BonsaiChangeset, ChangesetId,
ContentId, DateTime, FileChange, FileContents, FileType, Generation, MPath,
MPathElement, MononokeId, RepositoryId, hash::Blake2, hash::Sha256};
use mononoke_types::{
hash::Blake2, hash::Sha256, Blob, BlobstoreBytes, BlobstoreValue, BonsaiChangeset, ChangesetId,
ContentId, DateTime, FileChange, FileContents, FileType, Generation, MPath, MPathElement,
MononokeId, RepositoryId,
};
use multiplexedblob::MultiplexedBlobstore;
use rocksblob::Rocksblob;
use rocksdb;
use sqlfilenodes::{SqlConstructors, SqlFilenodes};
use BlobManifest;
use HgBlobChangeset;
use errors::*;
use file::{fetch_file_content_from_blobstore, fetch_file_content_id_from_blobstore,
fetch_file_contents, fetch_file_size_from_blobstore, fetch_raw_filenode_bytes,
fetch_rename_from_blobstore, HgBlobEntry, fetch_file_content_sha256_from_blobstore};
use file::{
fetch_file_content_from_blobstore, fetch_file_content_id_from_blobstore,
fetch_file_content_sha256_from_blobstore, fetch_file_contents, fetch_file_size_from_blobstore,
fetch_raw_filenode_bytes, fetch_rename_from_blobstore, HgBlobEntry,
};
use memory_manifest::MemoryRootManifest;
use post_commit::{self, PostCommitQueue};
use repo_commit::*;
use BlobManifest;
use HgBlobChangeset;
define_stats! {
prefix = "mononoke.blobrepo";
@ -272,12 +281,18 @@ impl BlobRepo {
logger.unwrap_or(Logger::root(Discard {}.ignore_res(), o!())),
Arc::new(SqlBookmarks::with_sqlite_in_memory()?),
blobstore.unwrap_or_else(|| Arc::new(EagerMemblob::new())),
Arc::new(SqlFilenodes::with_sqlite_in_memory()
.chain_err(ErrorKind::StateOpen(StateOpenError::Filenodes))?),
Arc::new(SqlChangesets::with_sqlite_in_memory()
.chain_err(ErrorKind::StateOpen(StateOpenError::Changesets))?),
Arc::new(SqlBonsaiHgMapping::with_sqlite_in_memory()
.chain_err(ErrorKind::StateOpen(StateOpenError::BonsaiHgMapping))?),
Arc::new(
SqlFilenodes::with_sqlite_in_memory()
.chain_err(ErrorKind::StateOpen(StateOpenError::Filenodes))?,
),
Arc::new(
SqlChangesets::with_sqlite_in_memory()
.chain_err(ErrorKind::StateOpen(StateOpenError::Changesets))?,
),
Arc::new(
SqlBonsaiHgMapping::with_sqlite_in_memory()
.chain_err(ErrorKind::StateOpen(StateOpenError::BonsaiHgMapping))?,
),
RepositoryId::new(0),
Arc::new(post_commit::Discard::new()),
))
@ -748,7 +763,8 @@ impl BlobRepo {
self.get_bonsai_from_hg(ctx.clone(), &changesetid)
.and_then(move |maybebonsai| match maybebonsai {
Some(bonsai) => repo.changesets
Some(bonsai) => repo
.changesets
.get(ctx, repoid, bonsai)
.map(|res| res.is_some())
.left_future(),
@ -893,7 +909,8 @@ impl BlobRepo {
let repo = self.clone();
move |cs_opt| match cs_opt {
None => future::ok(None).left_future(),
Some(cs) => repo.get_hg_from_bonsai_changeset(ctx, cs)
Some(cs) => repo
.get_hg_from_bonsai_changeset(ctx, cs)
.map(|cs| Some(cs))
.right_future(),
}
@ -1097,7 +1114,8 @@ impl BlobRepo {
self.get_bonsai_from_hg(ctx.clone(), &cs)
.and_then(move |maybebonsai| match maybebonsai {
Some(bonsai) => repo.changesets
Some(bonsai) => repo
.changesets
.get(ctx, repoid, bonsai)
.map(|res| res.map(|res| Generation::new(res.gen)))
.left_future(),
@ -1251,13 +1269,15 @@ impl BlobRepo {
.right_future()
}
_ => future::ok(None).left_future(),
}.and_then({
}
.and_then({
let repo = self.clone();
let change = change.cloned();
let path = path.clone();
move |maybe_entry| match maybe_entry {
None => repo.store_file_change(ctx, p1, p2, &path, change.as_ref())
None => repo
.store_file_change(ctx, p1, p2, &path, change.as_ref())
.right_future(),
_ => future::ok(maybe_entry).left_future(),
}
@ -1279,7 +1299,8 @@ impl BlobRepo {
Some(change) => {
let copy_from_fut = match change.copy_from() {
None => future::ok(None).left_future(),
Some((path, bcs_id)) => self.get_hg_from_bonsai_changeset(ctx.clone(), *bcs_id)
Some((path, bcs_id)) => self
.get_hg_from_bonsai_changeset(ctx.clone(), *bcs_id)
.and_then({
cloned!(ctx, repo);
move |cs_id| repo.get_changeset_by_changesetid(ctx, &cs_id)
@ -1403,11 +1424,13 @@ impl BlobRepo {
// this has been deleted and it's no longer a conflict.
let mut check_futs = vec![];
for fullpath in potential_conflicts {
let check_fut = repo.find_path_in_manifest(
ctx.clone(),
fullpath,
child_mf_id.clone(),
).map(|content| content.is_some());
let check_fut = repo
.find_path_in_manifest(
ctx.clone(),
fullpath,
child_mf_id.clone(),
)
.map(|content| content.is_some());
check_futs.push(check_fut);
}
@ -1477,11 +1500,13 @@ impl BlobRepo {
None => None,
Some(Content::Tree(manifest)) => match manifest.lookup(&basename) {
None => None,
Some(entry) => if let Type::File(t) = entry.get_type() {
Some((t, HgFileNodeId::new(entry.get_hash().into_nodehash())))
} else {
None
},
Some(entry) => {
if let Type::File(t) = entry.get_type() {
Some((t, HgFileNodeId::new(entry.get_hash().into_nodehash())))
} else {
None
}
}
},
Some(_) => None,
}
@ -1503,7 +1528,8 @@ impl BlobRepo {
IncompleteFilenodes::new(),
p1.as_ref(),
p2.as_ref(),
).and_then({
)
.and_then({
let repo = self.clone();
let manifest_p1 = manifest_p1.cloned();
let manifest_p2 = manifest_p2.cloned();
@ -1573,7 +1599,7 @@ impl BlobRepo {
})
}
})
.boxify()
.boxify()
}
pub fn get_hg_from_bonsai_changeset(
@ -1888,7 +1914,8 @@ impl UploadHgFileContents {
},
};
let upload_fut = repo.upload_blob(ctx, contents_blob, alias_key)
let upload_fut = repo
.upload_blob(ctx, contents_blob, alias_key)
.map(|_content_id| ())
.timed({
let logger = repo.logger.clone();
@ -1992,7 +2019,8 @@ impl UploadHgFileEntry {
RepoPath::FilePath(path),
node_id,
computed_node_id,
).into(),
)
.into(),
));
}
node_id
@ -2139,9 +2167,11 @@ pub fn save_bonsai_changesets(
move |_| {
repo.get_generation_number_by_bonsai(ctx, pc.get_changeset_id())
.map(move |gen| {
pc.complete(gen.expect(
"Just inserted changeset has no generation number",
))
pc.complete(
gen.expect(
"Just inserted changeset has no generation number",
),
)
})
}
}),
@ -2156,14 +2186,16 @@ pub fn save_bonsai_changesets(
loop_fn(
bonsai_complete_futs.into_iter(),
move |mut futs| match futs.next() {
Some(fut) => fut.and_then({
cloned!(postcommit_queue);
move |pc| {
postcommit_queue
.queue_commit(pc)
.map(|_| Loop::Continue(futs))
}
}).left_future(),
Some(fut) => fut
.and_then({
cloned!(postcommit_queue);
move |pc| {
postcommit_queue
.queue_commit(pc)
.map(|_| Loop::Continue(futs))
}
})
.left_future(),
None => ok(Loop::Break(())).right_future(),
},
)
@ -2214,8 +2246,9 @@ impl CreateChangeset {
&entry_processor,
self.root_manifest,
self.sub_entries,
).context("While processing entries")
.traced_with_id(&ctx.trace(), "uploading entries", trace_args!(), event_id);
)
.context("While processing entries")
.traced_with_id(&ctx.trace(), "uploading entries", trace_args!(), event_id);
let parents_complete = extract_parents_complete(&self.p1, &self.p2);
let parents_data = handle_parents(scuba_logger.clone(), self.p1, self.p2)
@ -2252,7 +2285,8 @@ impl CreateChangeset {
ctx.clone(),
repo.clone(),
&parent_manifest_hashes,
).and_then({
)
.and_then({
cloned!(ctx);
move |(p1_manifest, p2_manifest)| {
compute_changed_files(
@ -2263,7 +2297,7 @@ impl CreateChangeset {
)
}
})
.boxify()
.boxify()
};
let p1_mf = parent_manifest_hashes.get(0).cloned();
@ -2273,7 +2307,8 @@ impl CreateChangeset {
repo.clone(),
root_hash.clone(),
p1_mf,
).left_future()
)
.left_future()
} else {
future::ok(()).right_future()
};
@ -2293,7 +2328,8 @@ impl CreateChangeset {
parent_manifest_hashes,
bonsai_parents,
repo.clone(),
).map(|bonsai_cs| (hg_cs, bonsai_cs))
)
.map(|bonsai_cs| (hg_cs, bonsai_cs))
}
});
@ -2302,63 +2338,69 @@ impl CreateChangeset {
.and_then({
cloned!(ctx);
move |(blobcs, bonsai_cs)| {
let fut: BoxFuture<
(HgBlobChangeset, BonsaiChangeset),
Error,
> = (move || {
let bonsai_blob = bonsai_cs.clone().into_blob();
let bcs_id = bonsai_blob.id().clone();
let fut: BoxFuture<(HgBlobChangeset, BonsaiChangeset), Error> =
(move || {
let bonsai_blob = bonsai_cs.clone().into_blob();
let bcs_id = bonsai_blob.id().clone();
let cs_id = blobcs.get_changeset_id().into_nodehash();
let manifest_id = *blobcs.manifestid();
let cs_id = blobcs.get_changeset_id().into_nodehash();
let manifest_id = *blobcs.manifestid();
if let Some(expected_nodeid) = expected_nodeid {
if cs_id != expected_nodeid {
return future::err(
ErrorKind::InconsistentChangesetHash(
expected_nodeid,
cs_id,
blobcs,
).into(),
).boxify();
if let Some(expected_nodeid) = expected_nodeid {
if cs_id != expected_nodeid {
return future::err(
ErrorKind::InconsistentChangesetHash(
expected_nodeid,
cs_id,
blobcs,
)
.into(),
)
.boxify();
}
}
}
scuba_logger
.add("changeset_id", format!("{}", cs_id))
.log_with_msg("Changeset uuid to hash mapping", None);
// NOTE(luk): an attempt was made in D8187210 to split the
// upload_entries signal into upload_entries and
// processed_entries and to signal_parent_ready after
// upload_entries, so that one doesn't need to wait for the
// entries to be processed. There were no performance gains
// from that experiment
//
// We deliberately eat this error - this is only so that
// another changeset can start verifying data in the blob
// store while we verify this one
let _ =
signal_parent_ready.send((bcs_id, cs_id, manifest_id));
scuba_logger
.add("changeset_id", format!("{}", cs_id))
.log_with_msg(
"Changeset uuid to hash mapping",
None,
);
// NOTE(luk): an attempt was made in D8187210 to split the
// upload_entries signal into upload_entries and
// processed_entries and to signal_parent_ready after
// upload_entries, so that one doesn't need to wait for the
// entries to be processed. There were no performance gains
// from that experiment
//
// We deliberately eat this error - this is only so that
// another changeset can start verifying data in the blob
// store while we verify this one
let _ = signal_parent_ready.send((
bcs_id,
cs_id,
manifest_id,
));
let bonsai_cs_fut = save_bonsai_changeset_object(
ctx.clone(),
blobstore.clone(),
bonsai_cs.clone(),
);
let bonsai_cs_fut = save_bonsai_changeset_object(
ctx.clone(),
blobstore.clone(),
bonsai_cs.clone(),
);
blobcs
.save(ctx.clone(), blobstore)
.join(bonsai_cs_fut)
.context("While writing to blobstore")
.join(
entry_processor
.finalize(ctx, filenodes, cs_id)
.context("While finalizing processing"),
)
.from_err()
.map(move |_| (blobcs, bonsai_cs))
.boxify()
})();
blobcs
.save(ctx.clone(), blobstore)
.join(bonsai_cs_fut)
.context("While writing to blobstore")
.join(
entry_processor
.finalize(ctx, filenodes, cs_id)
.context("While finalizing processing"),
)
.from_err()
.map(move |_| (blobcs, bonsai_cs))
.boxify()
})();
fut.context(
"While creating and verifying Changeset for blobstore",

View File

@ -8,12 +8,13 @@ use std::collections::{HashMap, HashSet};
use std::mem;
use std::sync::{Arc, Mutex};
use failure::{err_msg, Compat, Error, FutureFailureErrorExt, Result, StreamFailureErrorExt,
prelude::*};
use futures::IntoFuture;
use failure::{
err_msg, prelude::*, Compat, Error, FutureFailureErrorExt, Result, StreamFailureErrorExt,
};
use futures::future::{self, Future, Shared, SharedError, SharedItem};
use futures::stream::{self, Stream};
use futures::sync::oneshot;
use futures::IntoFuture;
use futures_ext::{BoxFuture, BoxStream, FutureExt, StreamExt};
use futures_stats::Timed;
use scuba_ext::{ScubaSampleBuilder, ScubaSampleBuilderExt};
@ -23,19 +24,21 @@ use blobstore::Blobstore;
use context::CoreContext;
use filenodes::{FilenodeInfo, Filenodes};
use mercurial::file;
use mercurial_types::{Changeset, Entry, HgChangesetId, HgEntryId, HgNodeHash, HgNodeKey,
HgParents, MPath, Manifest, RepoPath, NULL_HASH};
use mercurial_types::manifest::{self, Content};
use mercurial_types::manifest_utils::{changed_entry_stream, EntryStatus};
use mercurial_types::nodehash::{HgFileNodeId, HgManifestId};
use mercurial_types::{
Changeset, Entry, HgChangesetId, HgEntryId, HgNodeHash, HgNodeKey, HgParents, MPath, Manifest,
RepoPath, NULL_HASH,
};
use mononoke_types::{self, BonsaiChangeset, ChangesetId, RepositoryId};
use BlobRepo;
use HgBlobChangeset;
use changeset::HgChangesetContent;
use errors::*;
use file::HgBlobEntry;
use repo::{ChangesetMetadata, RepoBlobstore};
use BlobRepo;
use HgBlobChangeset;
define_stats! {
prefix = "mononoke.blobrepo_commit";
@ -79,7 +82,8 @@ impl ChangesetHandle {
}
pub fn ready_cs_handle(ctx: CoreContext, repo: Arc<BlobRepo>, hg_cs: HgChangesetId) -> Self {
let bonsai_cs = repo.get_bonsai_from_hg(ctx.clone(), &hg_cs)
let bonsai_cs = repo
.get_bonsai_from_hg(ctx.clone(), &hg_cs)
.and_then(move |bonsai_id| {
bonsai_id.ok_or(ErrorKind::BonsaiMappingNotFound(hg_cs).into())
})
@ -94,7 +98,8 @@ impl ChangesetHandle {
let fut = bonsai_cs.join(cs);
Self {
can_be_parent: can_be_parent.shared(),
completion_future: fut.map_err(Error::compat)
completion_future: fut
.map_err(Error::compat)
.inspect(move |(bonsai_cs, hg_cs)| {
let _ = trigger.send((
bonsai_cs.get_changeset_id(),
@ -186,7 +191,8 @@ impl UploadEntries {
None => {
return future::err(err_msg(
"internal error: unexpected empty MPath",
)).boxify()
))
.boxify();
}
};
let path = match entry.get_type() {
@ -241,7 +247,8 @@ impl UploadEntries {
if entry.get_type() != manifest::Type::Tree {
return future::err(
ErrorKind::NotAManifest(entry.get_hash().into_nodehash(), entry.get_type()).into(),
).boxify();
)
.boxify();
}
{
let mut inner = self.inner.lock().expect("Lock poisoned");
@ -437,7 +444,8 @@ impl UploadEntries {
}
})
},
)).boxify();
))
.boxify();
filenodes
.add_filenodes(ctx, filenodeinfos, &inner.repoid)
@ -557,13 +565,14 @@ pub fn compute_changed_files(
.collect::<HashSet<MPath>>()
})
.boxify(),
}.map(|files| {
}
.map(|files| {
let mut files: Vec<MPath> = files.into_iter().collect();
files.sort_unstable_by(mercurial_mpath_comparator);
files
})
.boxify()
.boxify()
}
fn compute_added_files(
@ -588,7 +597,8 @@ fn compute_added_files(
}
}
_ => None,
}).collect()
})
.collect()
}
/// Checks if new commit (or to be precise, it's manifest) introduces any new case conflicts
@ -624,12 +634,15 @@ pub fn check_case_conflicts(
Some(parent_root_mf) => {
let mut case_conflict_checks = stream::FuturesUnordered::new();
for f in added_files {
case_conflict_checks.push(repo.check_case_conflict_in_manifest(
ctx.clone(),
&parent_root_mf,
&child_root_mf,
f.clone(),
).map(move |add_conflict| (add_conflict, f)));
case_conflict_checks.push(
repo.check_case_conflict_in_manifest(
ctx.clone(),
&parent_root_mf,
&child_root_mf,
f.clone(),
)
.map(move |add_conflict| (add_conflict, f)),
);
}
case_conflict_checks
@ -698,8 +711,10 @@ pub fn process_entries(
None => future::ok((
manifest::EmptyManifest.boxed(),
HgManifestId::new(NULL_HASH),
)).boxify(),
Some(root_hash) => repo.get_manifest_by_nodeid(ctx, &HgManifestId::new(root_hash))
))
.boxify(),
Some(root_hash) => repo
.get_manifest_by_nodeid(ctx, &HgManifestId::new(root_hash))
.context("While fetching root manifest")
.from_err()
.map(move |m| (m, HgManifestId::new(root_hash)))
@ -722,16 +737,19 @@ pub fn extract_parents_complete(
) -> BoxFuture<SharedItem<()>, SharedError<Compat<Error>>> {
match (p1.as_ref(), p2.as_ref()) {
(None, None) => future::ok(()).shared().boxify(),
(Some(p), None) | (None, Some(p)) => p.completion_future
(Some(p), None) | (None, Some(p)) => p
.completion_future
.clone()
.and_then(|_| future::ok(()).shared())
.boxify(),
(Some(p1), Some(p2)) => p1.completion_future
(Some(p1), Some(p2)) => p1
.completion_future
.clone()
.join(p2.completion_future.clone())
.and_then(|_| future::ok(()).shared())
.boxify(),
}.boxify()
}
.boxify()
}
pub fn handle_parents(

View File

@ -75,11 +75,12 @@ impl IncompleteFilenodes {
let filenodes = {
let mut filenodes = self.filenodes.lock().expect("lock poisoned");
mem::replace(&mut *filenodes, Vec::new())
}.into_iter()
.map({
cloned!(cs_id);
move |node_info| node_info.with_linknode(cs_id)
});
}
.into_iter()
.map({
cloned!(cs_id);
move |node_info| node_info.with_linknode(cs_id)
});
repo.get_filenodes()
.add_filenodes(ctx, stream::iter_ok(filenodes).boxify(), &repo.get_repoid())
.map(move |_| cs_id)
@ -105,7 +106,8 @@ where
let mut marks = HashMap::new();
let mut stack = Vec::new();
let mut output = Vec::new();
for node in dag.iter()
for node in dag
.iter()
.flat_map(|(n, ns)| iter::once(n).chain(ns))
.collect::<HashSet<_>>()
{
@ -146,19 +148,19 @@ mod test {
#[test]
fn sort_topological_test() {
let res = sort_topological(&hashmap!{1 => vec![2]});
let res = sort_topological(&hashmap! {1 => vec![2]});
assert_eq!(Some(vec![1, 2]), res);
let res = sort_topological(&hashmap!{1 => vec![1]});
let res = sort_topological(&hashmap! {1 => vec![1]});
assert_eq!(None, res);
let res = sort_topological(&hashmap!{1 => vec![2], 2 => vec![3]});
let res = sort_topological(&hashmap! {1 => vec![2], 2 => vec![3]});
assert_eq!(Some(vec![1, 2, 3]), res);
let res = sort_topological(&hashmap!{1 => vec![2, 3], 2 => vec![3]});
let res = sort_topological(&hashmap! {1 => vec![2, 3], 2 => vec![3]});
assert_eq!(Some(vec![1, 2, 3]), res);
let res = sort_topological(&hashmap!{1 => vec![2, 3], 2 => vec![4], 3 => vec![4]});
let res = sort_topological(&hashmap! {1 => vec![2, 3], 2 => vec![4], 3 => vec![4]});
assert!(Some(vec![1, 2, 3, 4]) == res || Some(vec![1, 3, 2, 4]) == res);
}
}

View File

@ -17,11 +17,11 @@ use futures_ext::FutureExt;
use panichandler::{self, Fate};
use scuba_ext::ScubaSampleBuilder;
use slog::{Drain, Logger};
use sloggers::Build;
use sloggers::terminal::{Destination, TerminalLoggerBuilder};
use sloggers::types::{Format, Severity, SourceLocation};
use sloggers::Build;
use tracing::TraceContext;
use upload_trace::{UploadTrace, manifold_thrift::thrift::RequestContext};
use upload_trace::{manifold_thrift::thrift::RequestContext, UploadTrace};
use uuid::Uuid;
use cachelib;
@ -156,19 +156,21 @@ impl MononokeApp {
app = add_cachelib_args(app, self.hide_advanced_args);
if self.local_instances {
app = app.arg(
Arg::with_name("blobstore")
.long("blobstore")
.value_name("TYPE")
.possible_values(&["files", "rocksdb", "manifold"])
.default_value("manifold")
.help("blobstore type"),
).arg(
Arg::with_name("data-dir")
.long("data-dir")
.value_name("DIR")
.help("local data directory (used for local blobstores)"),
);
app = app
.arg(
Arg::with_name("blobstore")
.long("blobstore")
.value_name("TYPE")
.possible_values(&["files", "rocksdb", "manifold"])
.default_value("manifold")
.help("blobstore type"),
)
.arg(
Arg::with_name("data-dir")
.long("data-dir")
.value_name("DIR")
.help("local data directory (used for local blobstores)"),
);
}
app
@ -462,19 +464,23 @@ pub fn init_cachelib<'a>(matches: &ArgMatches<'a>) {
cachelib::get_or_create_pool(
"blobstore-presence",
get_usize(matches, "presence-cache-size", available_space / 20),
).unwrap();
)
.unwrap();
cachelib::get_or_create_pool(
"changesets",
get_usize(matches, "changesets-cache-size", available_space / 20),
).unwrap();
)
.unwrap();
cachelib::get_or_create_pool(
"filenodes",
get_usize(matches, "filenodes-cache-size", available_space / 20),
).unwrap();
)
.unwrap();
cachelib::get_or_create_pool(
"bonsai_hg_mapping",
get_usize(matches, "idmapping-cache-size", available_space / 20),
).unwrap();
)
.unwrap();
cachelib::get_or_create_pool(
"blobstore-blobs",
get_usize(
@ -482,7 +488,8 @@ pub fn init_cachelib<'a>(matches: &ArgMatches<'a>) {
"blob-cache-size",
cachelib::get_available_space().unwrap(),
),
).unwrap();
)
.unwrap();
}
fn find_repo_type<'a>(matches: &ArgMatches<'a>) -> Result<(String, RepoType)> {

View File

@ -92,7 +92,8 @@ fn handle_get<'a>(
let json_flag: bool = args.is_present("json");
match changeset_type {
"hg" => repo.get_bookmark(ctx, &bookmark)
"hg" => repo
.get_bookmark(ctx, &bookmark)
.and_then(move |cs| {
let changeset_id_str = cs.expect("bookmark could not be found").to_string();
let output = format_output(json_flag, changeset_id_str, "hg");

View File

@ -54,9 +54,11 @@ use futures::prelude::*;
use futures::stream::iter_ok;
use futures_ext::{BoxFuture, FutureExt};
use manifoldblob::ManifoldBlob;
use mercurial_types::{Changeset, HgChangesetEnvelope, HgChangesetId, HgFileEnvelope,
HgManifestEnvelope, HgManifestId, MPath, MPathElement, Manifest};
use mercurial_types::manifest::Content;
use mercurial_types::{
Changeset, HgChangesetEnvelope, HgChangesetId, HgFileEnvelope, HgManifestEnvelope,
HgManifestId, MPath, MPathElement, Manifest,
};
use metaconfig::RemoteBlobstoreArgs;
use mononoke_types::{BlobstoreBytes, BlobstoreValue, BonsaiChangeset, ChangesetId, FileContents};
use reachabilityindex::{deserialize_skiplist_map, SkiplistIndex, SkiplistNodeType};
@ -261,19 +263,21 @@ fn fetch_content(
let all_but_last = iter_ok::<_, Error>(path.clone().into_iter().rev().skip(1).rev());
let folded: BoxFuture<_, Error> = mf.and_then({
cloned!(ctx, logger);
move |mf| {
all_but_last.fold(mf, move |mf, element| {
fetch_content_from_manifest(ctx.clone(), logger.clone(), mf, element).and_then(
|content| match content {
Content::Tree(mf) => Ok(mf),
content => Err(format_err!("expected tree entry, found {:?}", content)),
},
)
})
}
}).boxify();
let folded: BoxFuture<_, Error> = mf
.and_then({
cloned!(ctx, logger);
move |mf| {
all_but_last.fold(mf, move |mf, element| {
fetch_content_from_manifest(ctx.clone(), logger.clone(), mf, element).and_then(
|content| match content {
Content::Tree(mf) => Ok(mf),
content => Err(format_err!("expected tree entry, found {:?}", content)),
},
)
})
}
})
.boxify();
let basename = path.basename().clone();
folded
@ -327,11 +331,16 @@ struct ChangesetDiff {
#[derive(Serialize)]
enum ChangesetAttrDiff {
#[serde(rename = "user")] User(String, String),
#[serde(rename = "comments")] Comments(String, String),
#[serde(rename = "manifest")] Manifest(ManifestDiff),
#[serde(rename = "files")] Files(Vec<String>, Vec<String>),
#[serde(rename = "extra")] Extra(BTreeMap<String, String>, BTreeMap<String, String>),
#[serde(rename = "user")]
User(String, String),
#[serde(rename = "comments")]
Comments(String, String),
#[serde(rename = "manifest")]
Manifest(ManifestDiff),
#[serde(rename = "files")]
Files(Vec<String>, Vec<String>),
#[serde(rename = "extra")]
Extra(BTreeMap<String, String>, BTreeMap<String, String>),
}
#[derive(Serialize)]
@ -360,30 +369,31 @@ fn hg_manifest_diff(
repo.get_root_entry(left),
Some(repo.get_root_entry(right)),
None,
).collect()
.map(|diffs| {
let diff = diffs.into_iter().fold(
ManifestDiff {
modified: Vec::new(),
deleted: Vec::new(),
},
|mut mdiff, diff| {
match diff {
BonsaiDiffResult::Changed(path, ..)
| BonsaiDiffResult::ChangedReusedId(path, ..) => {
mdiff.modified.push(mpath_to_str(path))
}
BonsaiDiffResult::Deleted(path) => mdiff.deleted.push(mpath_to_str(path)),
};
mdiff
},
);
if diff.modified.is_empty() && diff.deleted.is_empty() {
None
} else {
Some(ChangesetAttrDiff::Manifest(diff))
}
})
)
.collect()
.map(|diffs| {
let diff = diffs.into_iter().fold(
ManifestDiff {
modified: Vec::new(),
deleted: Vec::new(),
},
|mut mdiff, diff| {
match diff {
BonsaiDiffResult::Changed(path, ..)
| BonsaiDiffResult::ChangedReusedId(path, ..) => {
mdiff.modified.push(mpath_to_str(path))
}
BonsaiDiffResult::Deleted(path) => mdiff.deleted.push(mpath_to_str(path)),
};
mdiff
},
);
if diff.modified.is_empty() && diff.deleted.is_empty() {
None
} else {
Some(ChangesetAttrDiff::Manifest(diff))
}
})
}
fn hg_changeset_diff(
@ -395,7 +405,8 @@ fn hg_changeset_diff(
(
repo.get_changeset_by_changesetid(ctx.clone(), left_id),
repo.get_changeset_by_changesetid(ctx.clone(), right_id),
).into_future()
)
.into_future()
.and_then({
cloned!(repo, left_id, right_id);
move |(left, right)| {
@ -581,7 +592,8 @@ fn main() -> Result<()> {
blobstore,
"manifold",
manifold_args.bucket.as_ref(),
).unwrap();
)
.unwrap();
let blobstore = PrefixBlobstore::new(blobstore, repo_id.prefix());
get_cache(ctx.clone(), &blobstore, key.clone(), mode)
}
@ -590,10 +602,12 @@ fn main() -> Result<()> {
blobstore,
"manifold",
manifold_args.bucket.as_ref(),
).unwrap();
)
.unwrap();
get_cache(ctx.clone(), &blobstore, key.clone(), mode)
}
}.map(move |value| {
}
.map(move |value| {
println!("{:?}", value);
if let Some(value) = value {
let decode_as = decode_as.as_ref().and_then(|val| {
@ -615,7 +629,7 @@ fn main() -> Result<()> {
}
}
})
.boxify()
.boxify()
}
(BONSAI_FETCH, Some(sub_m)) => {
let rev = sub_m.value_of("HG_CHANGESET_OR_BOOKMARK").unwrap();
@ -668,7 +682,8 @@ fn main() -> Result<()> {
for entry in entries {
let mut basename = String::from_utf8_lossy(
entry.get_name().expect("empty basename found").as_bytes(),
).to_string();
)
.to_string();
for _ in basename.len()..longest_len {
basename.push(' ');
}
@ -769,10 +784,11 @@ fn main() -> Result<()> {
&Arc::new(repo.clone()),
start_cs,
stop_cs,
).map(move |cs| repo.get_hg_from_bonsai_changeset(ctx.clone(), cs))
.buffered(100)
.map(|cs| cs.to_hex().to_string())
.collect()
)
.map(move |cs| repo.get_hg_from_bonsai_changeset(ctx.clone(), cs))
.buffered(100)
.map(|cs| cs.to_hex().to_string())
.collect()
}
})
.and_then(|css| {
@ -839,28 +855,30 @@ fn main() -> Result<()> {
ctx,
&HgChangesetId::from_str(source_hash)
.expect("source hash is not valid hg changeset id"),
).and_then(move |maybebonsai| {
match maybebonsai {
Some(bonsai) => {
println!("{}", bonsai);
}
None => {
panic!("no matching mononoke id found");
}
)
.and_then(move |maybebonsai| {
match maybebonsai {
Some(bonsai) => {
println!("{}", bonsai);
}
Ok(())
})
.boxify()
None => {
panic!("no matching mononoke id found");
}
}
Ok(())
})
.boxify()
} else {
repo.get_hg_from_bonsai_changeset(
ctx,
ChangesetId::from_str(source_hash)
.expect("source hash is not valid mononoke id"),
).and_then(move |mercurial| {
println!("{}", mercurial);
Ok(())
})
.boxify()
)
.and_then(move |mercurial| {
println!("{}", mercurial);
Ok(())
})
.boxify()
}
}
_ => {

View File

@ -22,7 +22,10 @@ extern crate slog;
extern crate tokio;
use std::cmp;
use std::sync::{Arc, atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT}};
use std::sync::{
atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT},
Arc,
};
use clap::{App, Arg};
use failure::{Error, Result};
@ -31,12 +34,12 @@ use futures_ext::{BoxFuture, FutureExt};
use slog::Logger;
use tokio::prelude::stream::iter_ok;
use blobrepo::BlobRepo;
use blobrepo::alias::get_sha256;
use blobrepo::BlobRepo;
use changesets::SqlChangesets;
use cmdlib::args;
use context::CoreContext;
use mononoke_types::{ChangesetId, ContentId, FileChange, RepositoryId, hash::Sha256};
use mononoke_types::{hash::Sha256, ChangesetId, ContentId, FileChange, RepositoryId};
#[derive(Debug, Clone)]
enum Mode {
@ -91,7 +94,8 @@ impl AliasVerification {
self.blobrepo
.get_bonsai_changeset(ctx, bcs_id)
.map(|bcs| {
let file_changes: Vec<_> = bcs.file_changes()
let file_changes: Vec<_> = bcs
.file_changes()
.map(|(_, file_change)| file_change.cloned())
.collect();
file_changes
@ -150,10 +154,9 @@ impl AliasVerification {
self.blobrepo
.get_file_content_id_by_alias(ctx.clone(), alias)
.then(move |result| match result {
Ok(content_id_from_blobstore) => {
av.check_alias_blob(alias, content_id, content_id_from_blobstore)
.left_future()
}
Ok(content_id_from_blobstore) => av
.check_alias_blob(alias, content_id, content_id_from_blobstore)
.left_future(),
Err(_) => {
// the blob with alias is not found
av.process_missing_alias_blob(ctx, alias, content_id)

View File

@ -53,7 +53,8 @@ fn setup_app<'a, 'b>() -> App<'a, 'b> {
.arg(
Arg::from_usage(
"--commits-limit [LIMIT] 'import only LIMIT first commits from revlog repo'",
).conflicts_with("changeset"),
)
.conflicts_with("changeset"),
)
}
@ -99,16 +100,17 @@ fn main() -> Result<()> {
skip,
commits_limit,
no_bookmark,
}.import()
.traced(ctx.trace(), "blobimport", trace_args!())
.map_err({
cloned!(ctx);
move |err| {
error!(ctx.logger(), "error while blobimporting"; SlogKVError(err));
::std::process::exit(1);
}
})
.then(move |result| args::upload_and_show_trace(ctx).then(move |_| result));
}
.import()
.traced(ctx.trace(), "blobimport", trace_args!())
.map_err({
cloned!(ctx);
move |err| {
error!(ctx.logger(), "error while blobimporting"; SlogKVError(err));
::std::process::exit(1);
}
})
.then(move |result| args::upload_and_show_trace(ctx).then(move |_| result));
let mut runtime = tokio::runtime::Runtime::new()?;
let result = runtime.block_on(blobimport);

View File

@ -38,8 +38,10 @@ pub struct Config {
impl Config {
fn new(config_serde: ConfigSerde) -> Result<Self> {
let broken_merges_before = match config_serde.broken_merges_before {
Some(dt) => Some(DateTime::from_rfc3339(&dt.to_string())
.context("error while parsing broken_merges_before")?),
Some(dt) => Some(
DateTime::from_rfc3339(&dt.to_string())
.context("error while parsing broken_merges_before")?,
),
None => None,
};

View File

@ -31,7 +31,10 @@ mod config;
use std::process;
use std::result;
use std::sync::{Arc, atomic::{AtomicUsize, Ordering}};
use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc,
};
use clap::{App, Arg, ArgMatches};
use failure::DisplayChain;
@ -221,12 +224,7 @@ fn main() -> Result<()> {
let end_points: Vec<_> = end_receiver.into_iter().collect();
process::exit(summarize(
logger,
end_points,
valid,
invalid,
errors,
ignored,
logger, end_points, valid, invalid, errors, ignored,
));
}

View File

@ -53,8 +53,8 @@ use context::CoreContext;
use failure::{Error, Result};
use futures::Future;
use futures_ext::{BoxFuture, FutureExt};
use hooks::{HookExecution, HookManager};
use hooks::lua_hook::LuaHook;
use hooks::{HookExecution, HookManager};
use mercurial_types::HgChangesetId;
use mononoke_types::RepositoryId;
use slog::{Drain, Level, Logger};
@ -77,7 +77,8 @@ fn run_hook(
"-p, --myrouter-port=[PORT] 'port for local myrouter instance'\n",
)),
false, /* hide_advanced_args */
).get_matches_from(args);
)
.get_matches_from(args);
cmdlib::args::init_cachelib(&matches);
@ -170,7 +171,8 @@ fn create_blobrepo(logger: &Logger, matches: &ArgMatches) -> BlobRepo {
filenode_shards,
RepositoryId::new(0),
myrouter_port,
).expect("failed to create blobrepo instance")
)
.expect("failed to create blobrepo instance")
}
// It all starts here

View File

@ -48,9 +48,9 @@ use clap::{App, ArgMatches};
use context::CoreContext;
use failure::Error;
use failure::Result;
use futures::Stream;
use futures::future::Future;
use futures::stream::repeat;
use futures::Stream;
use futures_ext::{BoxFuture, FutureExt};
use hooks::{ChangesetHookExecutionID, FileHookExecutionID, HookExecution};
use manifold::{ManifoldHttpClient, RequestContext};
@ -132,8 +132,7 @@ fn main() -> Result<()> {
Some(init_rev) => {
info!(
logger.clone(),
"Initial revision specified as argument {}",
init_rev
"Initial revision specified as argument {}", init_rev
);
let hash = HgNodeHash::from_str(init_rev)?;
let bytes = hash.as_bytes().into();
@ -156,14 +155,16 @@ fn main() -> Result<()> {
.map_err(|err| format_err!("Tokio timer error {:?}", err))
})
})
}).left_future()
})
.left_future()
} else {
let limit = cmdlib::args::get_u64(&matches, "limit", 1000);
let logger = logger.clone();
fut.then(move |_| {
let fut = tailer.run_with_limit(limit);
process_hook_results(fut, logger)
}).right_future()
})
.right_future()
};
tokio::run(fut.map(|_| ()).map_err(move |err| {
@ -213,7 +214,8 @@ fn process_hook_results(
info!(logger, "==== Changeset hooks stat: {} ====", cs_hooks_stat);
()
}).boxify()
})
.boxify()
}
struct HookExecutionStat {
@ -310,5 +312,6 @@ fn get_config<'a>(matches: &ArgMatches<'a>) -> Result<RepoConfigs> {
#[derive(Debug, Fail)]
pub enum ErrorKind {
#[fail(display = "No such repo '{}'", _0)] NoSuchRepo(String),
#[fail(display = "No such repo '{}'", _0)]
NoSuchRepo(String),
}

View File

@ -14,7 +14,9 @@ use failure::Error;
use failure::Result;
use futures::{Future, Stream};
use futures_ext::{spawn_future, BoxFuture, FutureExt};
use hooks::{BlobRepoChangesetStore, BlobRepoFileContentStore, HookManager, hook_loader::load_hooks};
use hooks::{
hook_loader::load_hooks, BlobRepoChangesetStore, BlobRepoFileContentStore, HookManager,
};
use manifold::{ManifoldHttpClient, PayloadRange};
use mercurial_types::{HgChangesetId, HgNodeHash};
use metaconfig::repoconfig::RepoConfig;
@ -129,7 +131,8 @@ impl Tailer {
let bm = self.bookmark.clone();
let hm = self.hook_manager.clone();
let bm_rev = self.repo
let bm_rev = self
.repo
.get_bookmark(ctx.clone(), &bm)
.and_then({
cloned!(bm);
@ -253,7 +256,10 @@ fn run_hooks_for_changeset(
#[derive(Debug, Fail)]
pub enum ErrorKind {
#[fail(display = "No such bookmark '{}'", _0)] NoSuchBookmark(Bookmark),
#[fail(display = "Cannot find last revision in blobstore")] NoLastRevision,
#[fail(display = "Cannot find bonsai for {}", _0)] BonsaiNotFound(HgChangesetId),
#[fail(display = "No such bookmark '{}'", _0)]
NoSuchBookmark(Bookmark),
#[fail(display = "Cannot find last revision in blobstore")]
NoLastRevision,
#[fail(display = "Cannot find bonsai for {}", _0)]
BonsaiNotFound(HgChangesetId),
}

View File

@ -10,7 +10,10 @@
use bookmarks::Bookmark;
use errors::*;
use failure::ResultExt;
use sql::mysql_async::{FromValueError, Value, prelude::{ConvIr, FromValue}};
use sql::mysql_async::{
prelude::{ConvIr, FromValue},
FromValueError, Value,
};
use std::collections::HashMap;
use std::fs::File;
use std::io::{BufReader, Read};
@ -318,7 +321,8 @@ impl RepoConfigs {
let e: Error = ErrorKind::InvalidFileStructure(format!(
"invalid repo path {:?}",
repo_config_path
)).into();
))
.into();
e
})?;
let reponame = reponame.to_string();
@ -328,7 +332,8 @@ impl RepoConfigs {
return Err(ErrorKind::InvalidFileStructure(format!(
"expected file server.toml in {}",
repo_config_path.to_string_lossy()
)).into());
))
.into());
}
fn read_file(path: &Path) -> Result<Vec<u8>> {
@ -465,7 +470,8 @@ impl RepoConfigs {
if blobstores.insert(blobstore.blobstore_id, args).is_some() {
return Err(ErrorKind::InvalidConfig(
"blobstore identifiers are not unique".into(),
).into());
)
.into());
}
}
@ -528,7 +534,8 @@ impl RepoConfigs {
hooks_opt = None;
}
let pushrebase = this.pushrebase
let pushrebase = this
.pushrebase
.map(|raw| {
let default = PushrebaseParams::default();
PushrebaseParams {
@ -642,16 +649,21 @@ struct RawRemoteBlobstoreConfig {
/// Types of repositories supported
#[derive(Clone, Debug, Deserialize)]
enum RawRepoType {
#[serde(rename = "blob:files")] Files,
#[serde(rename = "blob:rocks")] BlobRocks,
#[serde(rename = "blob:remote")] BlobRemote,
#[serde(rename = "blob:testdelay")] TestBlobDelayRocks,
#[serde(rename = "blob:files")]
Files,
#[serde(rename = "blob:rocks")]
BlobRocks,
#[serde(rename = "blob:remote")]
BlobRemote,
#[serde(rename = "blob:testdelay")]
TestBlobDelayRocks,
}
/// Types of blobstores supported
#[derive(Clone, Debug, Deserialize)]
enum RawBlobstoreType {
#[serde(rename = "manifold")] Manifold,
#[serde(rename = "manifold")]
Manifold,
}
#[derive(Clone, Debug, Deserialize)]
@ -795,16 +807,14 @@ mod test {
weightlimit: 4321,
disable_acl_checker: false,
}),
bookmarks: Some(vec![
BookmarkParams {
bookmark: Bookmark::new("master").unwrap(),
hooks: Some(vec![
"hook1".to_string(),
"hook2".to_string(),
"rust:rusthook".to_string(),
]),
},
]),
bookmarks: Some(vec![BookmarkParams {
bookmark: Bookmark::new("master").unwrap(),
hooks: Some(vec![
"hook1".to_string(),
"hook2".to_string(),
"rust:rusthook".to_string(),
]),
}]),
hooks: Some(vec![
HookParams {
name: "hook1".to_string(),

View File

@ -19,8 +19,8 @@ use scribe_cxx::ScribeCxxClient;
use blobrepo::BlobRepo;
use blobstore::{Blobstore, PrefixBlobstore};
use hooks::HookManager;
use metaconfig::{LfsParams, PushrebaseParams};
use metaconfig::repoconfig::{RepoReadOnly, RepoType};
use metaconfig::{LfsParams, PushrebaseParams};
use mononoke_types::RepositoryId;
use errors::*;
@ -157,10 +157,7 @@ pub fn open_blobrepo(
Duration::new(seconds, nanos)
};
BlobRepo::new_rocksdb_delayed(
logger,
&path,
repoid,
delay_gen,
logger, &path, repoid, delay_gen,
// Roundtrips to the server - i.e. how many delays to apply
2, // get
3, // put

View File

@ -8,7 +8,10 @@ use std::collections::HashMap;
use std::sync::Arc;
use failure::prelude::*;
use futures::{Future, future::{self, ok}};
use futures::{
future::{self, ok},
Future,
};
use futures_ext::{BoxFuture, FutureExt};
use slog::Logger;
use sql::myrouter;
@ -16,7 +19,7 @@ use sql::myrouter;
use blobstore::Blobstore;
use cache_warmup::cache_warmup;
use context::CoreContext;
use hooks::{HookManager, hook_loader::load_hooks};
use hooks::{hook_loader::load_hooks, HookManager};
use metaconfig::repoconfig::{RepoConfig, RepoType};
use mononoke_types::RepositoryId;
use phases::{CachingHintPhases, HintPhases, Phases, SqlConstructors, SqlPhases};
@ -158,7 +161,7 @@ pub fn repo_handlers(
});
ready_handle
.wait_for(initial_warmup.and_then(|()| skip_index))
.wait_for(initial_warmup.and_then(|()| skip_index))
.map({
cloned!(root_log);
move |skip_index| {
@ -178,7 +181,8 @@ pub fn repo_handlers(
RepoType::BlobRemote { ref db_address, .. } => {
let storage = Arc::new(SqlPhases::with_myrouter(
&db_address,
myrouter_port.expect("myrouter_port not provided for BlobRemote repo"),
myrouter_port
.expect("myrouter_port not provided for BlobRemote repo"),
));
Arc::new(CachingHintPhases::new(storage, skip_index.clone()))
}