mononoke: do not store bytes in pull_default bookmarks cache

Summary:
The overall goal of this stack is to add WarmBookmarksCache support to
repo_client to make Mononoke more resilient to lands of very large commits.

This diff just does a small refactoring that makes introducing
WarmBookmarksCache easier. In particular, later in cached_pull_default_bookmarks_maybe_stale cache I'd like to store
not only PullDefault bookmarks, but also Publishing bookmarks so that both
listkeys() and heads() method could be served from this cache. In order to do
that we need to store not only bookmark name, but also bookmark kind (i.e. is
it Publishing or PullDefault).

To do that let's store the actual Bookmarks and hg changeset objects instead of
raw bytes.

Reviewed By: farnz

Differential Revision: D22816710

fbshipit-source-id: 6ec3af8fe365d767689e8f6552f9af24cbcd0cb9
This commit is contained in:
Stanislau Hlebik 2020-07-30 03:33:14 -07:00 committed by Facebook GitHub Bot
parent d1322c621d
commit 6941d0cfe9

View File

@ -362,7 +362,7 @@ pub struct RepoClient {
// The client then gets a bookmark that points to a commit it does not yet have, and ignores it. // The client then gets a bookmark that points to a commit it does not yet have, and ignores it.
// We currently fix it by caching bookmarks at the beginning of discovery. // We currently fix it by caching bookmarks at the beginning of discovery.
// TODO: T45411456 Fix this by teaching the client to expect extra commits to correspond to the bookmarks. // TODO: T45411456 Fix this by teaching the client to expect extra commits to correspond to the bookmarks.
cached_pull_default_bookmarks_maybe_stale: Arc<Mutex<Option<HashMap<Vec<u8>, Vec<u8>>>>>, cached_pull_default_bookmarks_maybe_stale: Arc<Mutex<Option<HashMap<Bookmark, HgChangesetId>>>>,
wireproto_logging: Arc<WireprotoLogging>, wireproto_logging: Arc<WireprotoLogging>,
maybe_push_redirector_args: Option<PushRedirectorArgs>, maybe_push_redirector_args: Option<PushRedirectorArgs>,
force_lfs: Arc<AtomicBool>, force_lfs: Arc<AtomicBool>,
@ -372,12 +372,8 @@ pub struct RepoClient {
fn get_pull_default_bookmarks_maybe_stale_raw( fn get_pull_default_bookmarks_maybe_stale_raw(
ctx: CoreContext, ctx: CoreContext,
repo: BlobRepo, repo: BlobRepo,
) -> impl Future<Item = HashMap<Vec<u8>, Vec<u8>>, Error = Error> { ) -> impl Future<Item = HashMap<Bookmark, HgChangesetId>, Error = Error> {
repo.get_pull_default_bookmarks_maybe_stale(ctx) repo.get_pull_default_bookmarks_maybe_stale(ctx)
.map(|(book, cs): (Bookmark, HgChangesetId)| {
let hash: Vec<u8> = cs.into_nodehash().to_hex().into();
(book.into_name().into_byte_vec(), hash)
})
.fold(HashMap::new(), |mut map, item| { .fold(HashMap::new(), |mut map, item| {
map.insert(item.0, item.1); map.insert(item.0, item.1);
let ret: Result<_, Error> = Ok(map); let ret: Result<_, Error> = Ok(map);
@ -388,8 +384,8 @@ fn get_pull_default_bookmarks_maybe_stale_raw(
} }
fn update_pull_default_bookmarks_maybe_stale_cache_raw( fn update_pull_default_bookmarks_maybe_stale_cache_raw(
cache: Arc<Mutex<Option<HashMap<Vec<u8>, Vec<u8>>>>>, cache: Arc<Mutex<Option<HashMap<Bookmark, HgChangesetId>>>>,
bookmarks: HashMap<Vec<u8>, Vec<u8>>, bookmarks: HashMap<Bookmark, HgChangesetId>,
) { ) {
let mut maybe_cache = cache.lock().expect("lock poisoned"); let mut maybe_cache = cache.lock().expect("lock poisoned");
*maybe_cache = Some(bookmarks); *maybe_cache = Some(bookmarks);
@ -397,7 +393,7 @@ fn update_pull_default_bookmarks_maybe_stale_cache_raw(
fn update_pull_default_bookmarks_maybe_stale_cache( fn update_pull_default_bookmarks_maybe_stale_cache(
ctx: CoreContext, ctx: CoreContext,
cache: Arc<Mutex<Option<HashMap<Vec<u8>, Vec<u8>>>>>, cache: Arc<Mutex<Option<HashMap<Bookmark, HgChangesetId>>>>,
repo: BlobRepo, repo: BlobRepo,
) -> impl Future<Item = (), Error = Error> { ) -> impl Future<Item = (), Error = Error> {
get_pull_default_bookmarks_maybe_stale_raw(ctx, repo) get_pull_default_bookmarks_maybe_stale_raw(ctx, repo)
@ -406,9 +402,9 @@ fn update_pull_default_bookmarks_maybe_stale_cache(
fn get_pull_default_bookmarks_maybe_stale_updating_cache( fn get_pull_default_bookmarks_maybe_stale_updating_cache(
ctx: CoreContext, ctx: CoreContext,
cache: Arc<Mutex<Option<HashMap<Vec<u8>, Vec<u8>>>>>, cache: Arc<Mutex<Option<HashMap<Bookmark, HgChangesetId>>>>,
repo: BlobRepo, repo: BlobRepo,
) -> impl Future<Item = HashMap<Vec<u8>, Vec<u8>>, Error = Error> { ) -> impl Future<Item = HashMap<Bookmark, HgChangesetId>, Error = Error> {
get_pull_default_bookmarks_maybe_stale_raw(ctx, repo).inspect(move |bookmarks| { get_pull_default_bookmarks_maybe_stale_raw(ctx, repo).inspect(move |bookmarks| {
update_pull_default_bookmarks_maybe_stale_cache_raw(cache, bookmarks.clone()) update_pull_default_bookmarks_maybe_stale_cache_raw(cache, bookmarks.clone())
}) })
@ -501,6 +497,15 @@ impl RepoClient {
.left_future(), .left_future(),
Some(bookmarks) => future_old::ok(bookmarks).right_future(), Some(bookmarks) => future_old::ok(bookmarks).right_future(),
} }
.map(|bookmarks| {
bookmarks
.into_iter()
.map(|(book, cs)| {
let hash: Vec<u8> = cs.into_nodehash().to_hex().into();
(book.into_name().into_byte_vec(), hash)
})
.collect()
})
} }
fn create_bundle(&self, ctx: CoreContext, args: GetbundleArgs) -> BoxStream<BytesOld, Error> { fn create_bundle(&self, ctx: CoreContext, args: GetbundleArgs) -> BoxStream<BytesOld, Error> {