repo_import: switch to using a facet container instead of BlobRepo

Summary: Use a facet container for `repo_import`.  This will allow us to continute to call `bookmarks_movement` code when we add dependencies on `RepoCrossRepo`, which is not part of `BlobRepo`.

Reviewed By: mitrandir77

Differential Revision: D34222497

fbshipit-source-id: 8f321f4c11348782738336ae4ff157c0c6c658cf
This commit is contained in:
Mark Juggurnauth-Thomas 2022-02-17 08:48:35 -08:00 committed by Facebook GitHub Bot
parent c0aa44d589
commit 436e1fec54
5 changed files with 194 additions and 96 deletions

View File

@ -13,14 +13,20 @@ backsyncer = { version = "0.1.0", path = "../commit_rewriting/backsyncer" }
blobrepo = { version = "0.1.0", path = "../blobrepo" }
blobrepo_hg = { version = "0.1.0", path = "../blobrepo/blobrepo_hg" }
blobstore = { version = "0.1.0", path = "../blobstore" }
bonsai_git_mapping = { version = "0.1.0", path = "../bonsai_git_mapping" }
bonsai_globalrev_mapping = { version = "0.1.0", path = "../bonsai_globalrev_mapping" }
bonsai_hg_mapping = { version = "0.1.0", path = "../bonsai_hg_mapping" }
bookmarks = { version = "0.1.0", path = "../bookmarks" }
bookmarks_movement = { version = "0.1.0", path = "../bookmarks/bookmarks_movement" }
borrowed = { version = "0.1.0", git = "https://github.com/facebookexperimental/rust-shed.git", branch = "main" }
changeset_fetcher = { version = "0.1.0", path = "../blobrepo/changeset_fetcher" }
changesets = { version = "0.1.0", path = "../changesets" }
clap = "2.33"
cmdlib = { version = "0.1.0", path = "../cmdlib" }
context = { version = "0.1.0", path = "../server/context" }
cross_repo_sync = { version = "0.1.0", path = "../commit_rewriting/cross_repo_sync" }
derived_data_utils = { version = "0.1.0", path = "../derived_data/utils" }
facet = { version = "0.1.0", git = "https://github.com/facebookexperimental/rust-shed.git", branch = "main" }
fbinit = { version = "0.1.0", git = "https://github.com/facebookexperimental/rust-shed.git", branch = "main" }
futures = { version = "0.3.13", features = ["async-await", "compat"] }
import_tools = { version = "0.1.0", path = "../git/import_tools" }
@ -34,7 +40,13 @@ mononoke_hg_sync_job_helper_lib = { version = "0.1.0", path = "../mononoke_hg_sy
mononoke_types = { version = "0.1.0", path = "../mononoke_types" }
movers = { version = "0.1.0", path = "../commit_rewriting/movers" }
mutable_counters = { version = "0.1.0", path = "../mutable_counters" }
phases = { version = "0.1.0", path = "../phases" }
pushrebase = { version = "0.1.0", path = "../pushrebase" }
pushrebase_mutation_mapping = { version = "0.1.0", path = "../pushrebase_mutation_mapping" }
repo_blobstore = { version = "0.1.0", path = "../blobrepo/repo_blobstore" }
repo_cross_repo = { version = "0.1.0", path = "../repo_attributes/repo_cross_repo" }
repo_derived_data = { version = "0.1.0", path = "../repo_attributes/repo_derived_data" }
repo_identity = { version = "0.1.0", path = "../repo_attributes/repo_identity" }
segmented_changelog = { version = "0.1.0", path = "../segmented_changelog" }
serde = { version = "1.0.126", features = ["derive", "rc"] }
serde_json = { version = "1.0.64", features = ["float_roundtrip", "unbounded_depth"] }

View File

@ -8,10 +8,10 @@
#![type_length_limit = "4522397"]
use anyhow::{format_err, Context, Error};
use backsyncer::{backsync_latest, open_backsyncer_dbs, BacksyncLimit, TargetRepoDbs};
use blobrepo::{save_bonsai_changesets, BlobRepo};
use blobrepo::{save_bonsai_changesets, AsBlobRepo};
use blobrepo_hg::BlobRepoHg;
use blobstore::Loadable;
use bookmarks::{BookmarkName, BookmarkUpdateReason};
use bookmarks::{BookmarkName, BookmarkUpdateReason, BookmarksRef};
use borrowed::borrowed;
use clap::ArgMatches;
use cmdlib::args::{self, MononokeMatches};
@ -43,6 +43,7 @@ use mononoke_types::{BonsaiChangeset, BonsaiChangesetMut, ChangesetId, DateTime}
use movers::{DefaultAction, Mover};
use mutable_counters::SqlMutableCounters;
use pushrebase::do_pushrebase_bonsai;
use repo_blobstore::RepoBlobstoreRef;
use segmented_changelog::{seedheads_from_config, SeedHead, SegmentedChangelogTailer};
use serde::{Deserialize, Serialize};
use serde_json;
@ -60,12 +61,14 @@ use tokio::{
use topo_sort::sort_topological;
mod cli;
mod repo;
mod tests;
use crate::cli::{
setup_app, setup_import_args, ARG_BOOKMARK_SUFFIX, ARG_DEST_BOOKMARK, ARG_PHAB_CHECK_DISABLED,
CHECK_ADDITIONAL_SETUP_STEPS, IMPORT, RECOVER_PROCESS, SAVED_RECOVERY_FILE_PATH,
};
use crate::repo::Repo;
#[derive(Deserialize, Clone, Debug)]
struct GraphqlQueryObj {
@ -110,7 +113,7 @@ struct SmallRepoBackSyncVars {
large_to_small_syncer: CommitSyncer<SqlSyncedCommitMapping>,
target_repo_dbs: TargetRepoDbs,
small_repo_bookmark: BookmarkName,
small_repo: BlobRepo,
small_repo: Repo,
maybe_call_sign: Option<String>,
version: CommitSyncConfigVersion,
}
@ -159,7 +162,7 @@ pub struct RecoveryFields {
async fn rewrite_file_paths(
ctx: &CoreContext,
repo: &BlobRepo,
repo: &Repo,
mover: &Mover,
gitimport_bcs_ids: &[ChangesetId],
) -> Result<Vec<ChangesetId>, Error> {
@ -168,7 +171,7 @@ async fn rewrite_file_paths(
let len = gitimport_bcs_ids.len();
let gitimport_changesets = stream::iter(gitimport_bcs_ids.iter().map(|bcs_id| async move {
let bcs = bcs_id.load(ctx, &repo.get_blobstore()).await?;
let bcs = bcs_id.load(ctx, repo.repo_blobstore()).await?;
Result::<_, Error>::Ok(bcs)
}))
.buffered(len)
@ -182,7 +185,7 @@ async fn rewrite_file_paths(
bcs.clone().into_mut(),
&remapped_parents,
mover.clone(),
repo.clone(),
repo.as_blob_repo().clone(),
CommitRewrittenToEmpty::Discard,
)
.await?;
@ -227,7 +230,7 @@ async fn find_mapping_version(
async fn back_sync_commits_to_small_repo(
ctx: &CoreContext,
small_repo: &BlobRepo,
small_repo: &Repo,
large_to_small_syncer: &CommitSyncer<SqlSyncedCommitMapping>,
bcs_ids: &[ChangesetId],
version: &CommitSyncConfigVersion,
@ -323,14 +326,17 @@ async fn wait_until_backsynced_and_return_version(
async fn derive_bonsais_single_repo(
ctx: &CoreContext,
repo: &BlobRepo,
repo: &Repo,
bcs_ids: &[ChangesetId],
) -> Result<(), Error> {
let derived_data_types = &repo.get_active_derived_data_types_config().types;
let derived_data_types = &repo
.as_blob_repo()
.get_active_derived_data_types_config()
.types;
let derived_utils: Vec<_> = derived_data_types
.iter()
.map(|ty| derived_data_utils(ctx.fb, repo, ty))
.map(|ty| derived_data_utils(ctx.fb, repo.as_blob_repo(), ty))
.collect::<Result<_, _>>()?;
stream::iter(derived_utils)
@ -338,7 +344,7 @@ async fn derive_bonsais_single_repo(
.try_for_each_concurrent(derived_data_types.len(), |derived_util| async move {
for csid in bcs_ids {
derived_util
.derive(ctx.clone(), repo.clone(), csid.clone())
.derive(ctx.clone(), repo.as_blob_repo().clone(), csid.clone())
.map_ok(|_| ())
.await?;
}
@ -349,7 +355,7 @@ async fn derive_bonsais_single_repo(
async fn move_bookmark(
ctx: &CoreContext,
repo: &BlobRepo,
repo: &Repo,
shifted_bcs_ids: &[ChangesetId],
bookmark: &BookmarkName,
checker_flags: &CheckerFlags,
@ -372,7 +378,7 @@ async fn move_bookmark(
}
};
let maybe_old_csid = repo.get_bonsai_bookmark(ctx.clone(), bookmark).await?;
let maybe_old_csid = repo.bookmarks().get(ctx.clone(), bookmark).await?;
/* If the bookmark already exists, we should continue moving the
bookmark from the last commit it points to */
@ -381,7 +387,7 @@ async fn move_bookmark(
None => first_csid,
};
let mut transaction = repo.update_bookmark_transaction(ctx.clone());
let mut transaction = repo.bookmarks().create_transaction(ctx.clone());
if maybe_old_csid.is_none() {
transaction.create(
&bookmark,
@ -407,7 +413,7 @@ async fn move_bookmark(
.chunks(batch_size)
.into_iter()
{
transaction = repo.update_bookmark_transaction(ctx.clone());
transaction = repo.bookmarks().create_transaction(ctx.clone());
let (shifted_index, curr_csid) = match chunk.last() {
Some(tuple) => tuple,
None => {
@ -434,6 +440,7 @@ async fn move_bookmark(
let check_repo = async move {
let hg_csid = repo
.as_blob_repo()
.get_hg_from_bonsai_changeset(ctx.clone(), curr_csid.clone())
.await?;
check_dependent_systems(
@ -465,7 +472,8 @@ async fn move_bookmark(
.await?;
let small_repo_cs_id = small_repo_back_sync_vars
.small_repo
.get_bonsai_bookmark(ctx.clone(), &small_repo_back_sync_vars.small_repo_bookmark)
.bookmarks()
.get(ctx.clone(), &small_repo_back_sync_vars.small_repo_bookmark)
.await?
.ok_or_else(|| {
format_err!(
@ -476,6 +484,7 @@ async fn move_bookmark(
let small_repo_hg_csid = small_repo_back_sync_vars
.small_repo
.as_blob_repo()
.get_hg_from_bonsai_changeset(ctx.clone(), small_repo_cs_id)
.await?;
@ -507,7 +516,7 @@ async fn move_bookmark(
async fn merge_imported_commit(
ctx: &CoreContext,
repo: &BlobRepo,
repo: &Repo,
imported_cs_id: ChangesetId,
dest_bookmark: &BookmarkName,
changeset_args: ChangesetArgs,
@ -516,7 +525,7 @@ async fn merge_imported_commit(
ctx.logger(),
"Merging the imported commits into given bookmark, {}", dest_bookmark
);
let master_cs_id = match repo.get_bonsai_bookmark(ctx.clone(), dest_bookmark).await? {
let master_cs_id = match repo.bookmarks().get(ctx.clone(), dest_bookmark).await? {
Some(id) => id,
None => {
return Err(format_err!(
@ -580,14 +589,14 @@ async fn merge_imported_commit(
async fn push_merge_commit(
ctx: &CoreContext,
repo: &BlobRepo,
repo: &Repo,
merged_cs_id: ChangesetId,
bookmark_to_merge_into: &BookmarkName,
repo_config: &RepoConfig,
) -> Result<ChangesetId, Error> {
info!(ctx.logger(), "Running pushrebase");
let merged_cs = merged_cs_id.load(ctx, repo.blobstore()).await?;
let merged_cs = merged_cs_id.load(ctx, repo.repo_blobstore()).await?;
let pushrebase_flags = repo_config.pushrebase.flags;
let bookmark_attrs = BookmarkAttrs::new(ctx.fb, repo_config.bookmarks.clone()).await?;
let pushrebase_hooks = bookmarks_movement::get_pushrebase_hooks(
@ -600,7 +609,7 @@ async fn push_merge_commit(
let pushrebase_res = do_pushrebase_bonsai(
ctx,
repo,
repo.as_blob_repo(),
&pushrebase_flags,
bookmark_to_merge_into,
&hashset![merged_cs],
@ -619,16 +628,17 @@ async fn push_merge_commit(
async fn get_leaf_entries(
ctx: &CoreContext,
repo: &BlobRepo,
repo: &Repo,
cs_id: ChangesetId,
) -> Result<HashSet<MPath>, Error> {
let hg_cs_id = repo
.as_blob_repo()
.get_hg_from_bonsai_changeset(ctx.clone(), cs_id)
.await?;
let hg_cs = hg_cs_id.load(ctx, &repo.get_blobstore()).await?;
let hg_cs = hg_cs_id.load(ctx, repo.repo_blobstore()).await?;
hg_cs
.manifestid()
.list_leaf_entries(ctx.clone(), repo.get_blobstore())
.list_leaf_entries(ctx.clone(), repo.repo_blobstore().clone())
.map_ok(|(path, (_file_type, _filenode_id))| path)
.try_collect::<HashSet<_>>()
.await
@ -636,7 +646,7 @@ async fn get_leaf_entries(
async fn check_dependent_systems(
ctx: &CoreContext,
repo: &BlobRepo,
repo: &Repo,
checker_flags: &CheckerFlags,
hg_csid: HgChangesetId,
sleep_time: u64,
@ -661,7 +671,8 @@ async fn check_dependent_systems(
}
if !passed_hg_sync_check {
wait_for_latest_log_id_to_be_synced(ctx, repo, mutable_counters, sleep_time).await?;
wait_for_latest_log_id_to_be_synced(ctx, repo.as_blob_repo(), mutable_counters, sleep_time)
.await?;
}
Ok(())
@ -758,11 +769,11 @@ fn get_importing_bookmark(bookmark_suffix: &str) -> Result<BookmarkName, Error>
// Note: pushredirection only works from small repo to large repo.
async fn get_large_repo_config_if_pushredirected<'a>(
repo: &BlobRepo,
repo: &Repo,
live_commit_sync_config: &CfgrLiveCommitSyncConfig,
repos: &HashMap<String, RepoConfig>,
) -> Result<Option<RepoConfig>, Error> {
let repo_id = repo.get_repoid();
let repo_id = repo.repo_id();
let enabled = live_commit_sync_config.push_redirector_enabled_for_public(repo_id);
if enabled {
@ -846,18 +857,18 @@ where
async fn get_pushredirected_vars(
ctx: &CoreContext,
repo: &BlobRepo,
repo: &Repo,
repo_import_setting: &RepoImportSetting,
large_repo_config: &RepoConfig,
matches: &MononokeMatches<'_>,
live_commit_sync_config: CfgrLiveCommitSyncConfig,
) -> Result<(BlobRepo, RepoImportSetting, Syncers<SqlSyncedCommitMapping>), Error> {
) -> Result<(Repo, RepoImportSetting, Syncers<SqlSyncedCommitMapping>), Error> {
let caching = matches.caching();
let x_repo_syncer_lease = create_commit_syncer_lease(ctx.fb, caching)?;
let config_store = matches.config_store();
let large_repo_id = large_repo_config.repoid;
let large_repo: BlobRepo =
let large_repo: Repo =
args::open_repo_with_repo_id(ctx.fb, &ctx.logger(), large_repo_id, &matches).await?;
let common_commit_sync_config = live_commit_sync_config.get_common_config(large_repo_id)?;
@ -871,8 +882,8 @@ async fn get_pushredirected_vars(
let mapping = args::open_sql::<SqlSyncedCommitMapping>(ctx.fb, config_store, &matches)?;
let syncers = create_commit_syncers(
ctx,
repo.clone(),
large_repo.clone(),
repo.as_blob_repo().clone(),
large_repo.as_blob_repo().clone(),
mapping.clone(),
Arc::new(live_commit_sync_config),
x_repo_syncer_lease,
@ -911,7 +922,7 @@ async fn fetch_recovery_state(
async fn repo_import(
ctx: CoreContext,
mut repo: BlobRepo,
mut repo: Repo,
recovery_fields: &mut RecoveryFields,
matches: &MononokeMatches<'_>,
) -> Result<(), Error> {
@ -936,8 +947,7 @@ async fn repo_import(
importing_bookmark,
dest_bookmark,
};
let (_, mut repo_config) =
args::get_config_by_repoid(config_store, &matches, repo.get_repoid())?;
let (_, mut repo_config) = args::get_config_by_repoid(config_store, matches, repo.repo_id())?;
let mut call_sign = repo_config.phabricator_callsign.clone();
if !recovery_fields.phab_check_disabled && call_sign.is_none() {
return Err(format_err!(
@ -979,7 +989,7 @@ async fn repo_import(
.await?;
let target_repo_dbs = open_backsyncer_dbs(
ctx.clone(),
repo.clone(),
repo.as_blob_repo().clone(),
repo_config.storage_config.metadata,
mysql_options.clone(),
*readonly_storage,
@ -1049,7 +1059,8 @@ async fn repo_import(
let prefs = GitimportPreferences::default();
let target = FullRepoImport {};
info!(ctx.logger(), "Started importing git commits to Mononoke");
let import_map = import_tools::gitimport(&ctx, &repo, &path, &target, prefs).await?;
let import_map =
import_tools::gitimport(&ctx, repo.as_blob_repo(), path, &target, prefs).await?;
info!(ctx.logger(), "Added commits to Mononoke");
let bonsai_values: Vec<(ChangesetId, BonsaiChangeset)> =
@ -1201,7 +1212,7 @@ async fn repo_import(
async fn tail_segmented_changelog(
ctx: &CoreContext,
blobrepo: &BlobRepo,
repo: &Repo,
imported_cs_id: &ChangesetId,
storage_config_metadata: &MetadataDatabaseConfig,
mysql_options: &MysqlOptions,
@ -1212,7 +1223,7 @@ async fn tail_segmented_changelog(
let segmented_changelog_tailer = SegmentedChangelogTailer::build_from(
ctx,
blobrepo,
repo.as_blob_repo(),
storage_config_metadata,
mysql_options,
seed_heads,
@ -1221,7 +1232,7 @@ async fn tail_segmented_changelog(
)
.await?;
let repo_id = blobrepo.get_repoid();
let repo_id = repo.repo_id();
info!(
ctx.logger(),
@ -1241,7 +1252,7 @@ async fn tail_segmented_changelog(
async fn check_additional_setup_steps(
ctx: CoreContext,
repo: BlobRepo,
repo: Repo,
sub_arg_matches: &ArgMatches<'_>,
matches: &MononokeMatches<'_>,
) -> Result<(), Error> {
@ -1288,7 +1299,7 @@ async fn check_additional_setup_steps(
importing_bookmark,
dest_bookmark,
};
let (_, repo_config) = args::get_config_by_repoid(config_store, &matches, repo.get_repoid())?;
let (_, repo_config) = args::get_config_by_repoid(config_store, matches, repo.repo_id())?;
let call_sign = repo_config.phabricator_callsign;
let phab_check_disabled = sub_arg_matches.is_present(ARG_PHAB_CHECK_DISABLED);

View File

@ -0,0 +1,60 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use blobrepo::{AsBlobRepo, BlobRepo};
use bonsai_git_mapping::BonsaiGitMapping;
use bonsai_globalrev_mapping::BonsaiGlobalrevMapping;
use bonsai_hg_mapping::BonsaiHgMapping;
use bookmarks::{BookmarkUpdateLog, Bookmarks};
use changeset_fetcher::ChangesetFetcher;
use changesets::Changesets;
use mononoke_types::RepositoryId;
use phases::Phases;
use pushrebase_mutation_mapping::PushrebaseMutationMapping;
use repo_blobstore::RepoBlobstore;
use repo_cross_repo::RepoCrossRepo;
use repo_derived_data::RepoDerivedData;
use repo_identity::{RepoIdentity, RepoIdentityRef};
#[facet::container]
#[derive(Clone)]
pub struct Repo {
#[delegate(
RepoBlobstore,
RepoDerivedData,
RepoIdentity,
dyn BonsaiGitMapping,
dyn BonsaiGlobalrevMapping,
dyn BonsaiHgMapping,
dyn Bookmarks,
dyn BookmarkUpdateLog,
dyn ChangesetFetcher,
dyn Changesets,
dyn Phases,
dyn PushrebaseMutationMapping,
)]
blob_repo: BlobRepo,
#[facet]
repo_cross_repo: RepoCrossRepo,
}
impl Repo {
pub fn repo_id(&self) -> RepositoryId {
self.repo_identity().id()
}
pub fn name(&self) -> &str {
self.repo_identity().name()
}
}
impl AsBlobRepo for Repo {
fn as_blob_repo(&self) -> &BlobRepo {
&self.blob_repo
}
}

View File

@ -11,13 +11,15 @@ mod tests {
back_sync_commits_to_small_repo, check_dependent_systems, derive_bonsais_single_repo,
find_mapping_version, get_large_repo_config_if_pushredirected, get_large_repo_setting,
merge_imported_commit, move_bookmark, push_merge_commit, rewrite_file_paths, ChangesetArgs,
CheckerFlags, ImportStage, RecoveryFields, RepoImportSetting,
CheckerFlags, ImportStage, RecoveryFields, Repo, RepoImportSetting,
};
use anyhow::Result;
use ascii::AsciiString;
use blobrepo::BlobRepo;
use blobrepo::AsBlobRepo;
use blobstore::Loadable;
use bookmarks::{BookmarkName, BookmarkUpdateReason, Freshness};
use bookmarks::{
BookmarkName, BookmarkUpdateLogRef, BookmarkUpdateReason, BookmarksRef, Freshness,
};
use cacheblob::InProcessLease;
use cached_config::{ConfigStore, ModificationTime, TestSource};
use context::CoreContext;
@ -47,6 +49,7 @@ mod tests {
use mononoke_types_mocks::changesetid::{ONES_CSID as MON_CSID, TWOS_CSID};
use movers::{DefaultAction, Mover};
use mutable_counters::{MutableCounters, SqlMutableCounters};
use repo_blobstore::RepoBlobstoreRef;
use sql_construct::SqlConstruct;
use std::collections::HashMap;
use std::str::FromStr;
@ -67,8 +70,8 @@ mod tests {
MPath::new(s).unwrap()
}
fn create_repo(id: i32) -> Result<BlobRepo> {
let repo: BlobRepo = TestRepoFactory::new()?
fn create_repo(id: i32) -> Result<Repo> {
let repo: Repo = TestRepoFactory::new()?
.with_config_override(|config| {
config
.derived_data_config
@ -116,7 +119,7 @@ mod tests {
#[fbinit::test]
async fn test_move_bookmark(fb: FacebookInit) -> Result<()> {
let ctx = CoreContext::test_mock(fb);
let blob_repo = test_repo_factory::build_empty()?;
let repo: Repo = test_repo_factory::build_empty()?;
let mut recovery_fields = create_mock_recovery_fields();
let call_sign = Some("FBS".to_string());
let checker_flags = CheckerFlags {
@ -127,7 +130,7 @@ mod tests {
let mutable_counters = SqlMutableCounters::with_sqlite_in_memory().unwrap();
let changesets = create_from_dag(
&ctx,
&blob_repo,
repo.as_blob_repo(),
r##"
A-B-C-D-E-F-G
"##,
@ -138,7 +141,7 @@ mod tests {
let importing_bookmark = BookmarkName::new("repo_import_test_repo")?;
move_bookmark(
&ctx,
&blob_repo,
&repo,
&bcs_ids,
&importing_bookmark,
&checker_flags,
@ -149,7 +152,7 @@ mod tests {
)
.await?;
// Check the bookmark moves created BookmarkLogUpdate entries
let entries = blob_repo
let entries = repo
.bookmark_update_log()
.list_bookmark_log_entries(
ctx.clone(),
@ -178,7 +181,7 @@ mod tests {
#[fbinit::test]
async fn test_move_bookmark_with_existing_bookmark(fb: FacebookInit) -> Result<()> {
let ctx = CoreContext::test_mock(fb);
let blob_repo = test_repo_factory::build_empty()?;
let repo: Repo = test_repo_factory::build_empty()?;
let mut recovery_fields = create_mock_recovery_fields();
let checker_flags = CheckerFlags {
phab_check_disabled: true,
@ -188,7 +191,7 @@ mod tests {
let mutable_counters = SqlMutableCounters::with_sqlite_in_memory().unwrap();
let changesets = create_from_dag(
&ctx,
&blob_repo,
repo.as_blob_repo(),
r##"
A-B-C-D-E-F-G
"##,
@ -197,7 +200,7 @@ mod tests {
let bcs_ids: Vec<ChangesetId> = changesets.values().copied().collect();
let importing_bookmark = BookmarkName::new("repo_import_test_repo")?;
let mut txn = blob_repo.update_bookmark_transaction(ctx.clone());
let mut txn = repo.bookmarks().create_transaction(ctx.clone());
txn.create(
&importing_bookmark,
bcs_ids.first().unwrap().clone(),
@ -207,7 +210,7 @@ mod tests {
txn.commit().await.unwrap();
move_bookmark(
&ctx,
&blob_repo,
&repo,
&bcs_ids,
&importing_bookmark,
&checker_flags,
@ -218,7 +221,7 @@ mod tests {
)
.await?;
// Check the bookmark moves created BookmarkLogUpdate entries
let entries = blob_repo
let entries = repo
.bookmark_update_log()
.list_bookmark_log_entries(
ctx.clone(),
@ -255,7 +258,7 @@ mod tests {
#[fbinit::test]
async fn test_hg_sync_check(fb: FacebookInit) -> Result<()> {
let ctx = CoreContext::test_mock(fb);
let repo: BlobRepo = test_repo_factory::build_empty()?;
let repo: Repo = test_repo_factory::build_empty()?;
let checker_flags = CheckerFlags {
phab_check_disabled: true,
x_repo_check_disabled: true,
@ -264,7 +267,7 @@ mod tests {
let call_sign = None;
let sleep_time = 1;
let mutable_counters = SqlMutableCounters::with_sqlite_in_memory().unwrap();
let repo_id = repo.get_repoid();
let repo_id = repo.repo_id();
let bookmark = create_bookmark_name("book");
assert!(
@ -281,7 +284,7 @@ mod tests {
.is_err()
);
let mut txn = repo.update_bookmark_transaction(ctx.clone());
let mut txn = repo.bookmarks().create_transaction(ctx.clone());
txn.create(&bookmark, MON_CSID, BookmarkUpdateReason::TestMove, None)?;
txn.commit().await.unwrap();
assert!(
@ -314,7 +317,7 @@ mod tests {
)
.await?;
let mut txn = repo.update_bookmark_transaction(ctx.clone());
let mut txn = repo.bookmarks().create_transaction(ctx.clone());
txn.update(
&bookmark,
TWOS_CSID,
@ -363,16 +366,18 @@ mod tests {
let ctx = CoreContext::test_mock(fb);
let repo = create_repo(1)?;
let master_cs_id = CreateCommitContext::new_root(&ctx, &repo)
let master_cs_id = CreateCommitContext::new_root(&ctx, repo.as_blob_repo())
.add_file("a", "a")
.commit()
.await?;
let imported_cs_id = CreateCommitContext::new_root(&ctx, &repo)
let imported_cs_id = CreateCommitContext::new_root(&ctx, repo.as_blob_repo())
.add_file("b", "b")
.commit()
.await?;
let dest_bookmark = bookmark(&ctx, &repo, "master").set_to(master_cs_id).await?;
let dest_bookmark = bookmark(&ctx, repo.as_blob_repo(), "master")
.set_to(master_cs_id)
.await?;
let changeset_args = ChangesetArgs {
author: "user".to_string(),
@ -392,7 +397,7 @@ mod tests {
let pushed_cs_id =
push_merge_commit(&ctx, &repo, merged_cs_id, &dest_bookmark, &repo_config).await?;
let pushed_cs = pushed_cs_id.load(&ctx, repo.blobstore()).await?;
let pushed_cs = pushed_cs_id.load(&ctx, repo.repo_blobstore()).await?;
assert_eq!(
Globalrev::new(START_COMMIT_GLOBALREV),
@ -632,8 +637,8 @@ mod tests {
let live_commit_sync_config = get_large_repo_live_commit_sync_config();
let syncers_1 = create_commit_syncers(
&ctx,
small_repo_1.clone(),
large_repo.clone(),
small_repo_1.as_blob_repo().clone(),
large_repo.as_blob_repo().clone(),
mapping.clone(),
live_commit_sync_config.clone(),
Arc::new(InProcessLease::new()),
@ -658,8 +663,8 @@ mod tests {
let syncers_2 = create_commit_syncers(
&ctx,
small_repo_2.clone(),
large_repo.clone(),
small_repo_2.as_blob_repo().clone(),
large_repo.as_blob_repo().clone(),
mapping.clone(),
live_commit_sync_config,
Arc::new(InProcessLease::new()),
@ -699,7 +704,7 @@ mod tests {
let small_repo = create_repo(1)?;
let changesets = create_from_dag(
&ctx,
&large_repo,
large_repo.as_blob_repo(),
r##"
A-B
"##,
@ -711,8 +716,8 @@ mod tests {
let mapping = SqlSyncedCommitMapping::with_sqlite_in_memory().unwrap();
let syncers = create_commit_syncers(
&ctx,
small_repo.clone(),
large_repo.clone(),
small_repo.as_blob_repo().clone(),
large_repo.as_blob_repo().clone(),
mapping.clone(),
live_commit_sync_config,
Arc::new(InProcessLease::new()),
@ -748,7 +753,7 @@ mod tests {
rewrite_file_paths(&ctx, &large_repo, &combined_mover, &cs_ids).await?;
let large_repo_cs_a = &shifted_bcs_ids[0]
.load(&ctx, &large_repo.get_blobstore())
.load(&ctx, large_repo.repo_blobstore())
.await?;
let large_repo_cs_a_mpaths = get_file_changes_mpaths(&large_repo_cs_a);
assert_eq!(
@ -757,7 +762,7 @@ mod tests {
);
let large_repo_cs_b = &shifted_bcs_ids[1]
.load(&ctx, &large_repo.get_blobstore())
.load(&ctx, large_repo.repo_blobstore())
.await?;
let large_repo_cs_b_mpaths = get_file_changes_mpaths(&large_repo_cs_b);
assert_eq!(vec![mp("random_dir/B")], large_repo_cs_b_mpaths);
@ -772,13 +777,13 @@ mod tests {
.await?;
let small_repo_cs_a = &synced_bcs_ids[0]
.load(&ctx, &small_repo.get_blobstore())
.load(&ctx, small_repo.repo_blobstore())
.await?;
let small_repo_cs_a_mpaths = get_file_changes_mpaths(&small_repo_cs_a);
assert_eq!(vec![mp("dest_path_prefix/A")], small_repo_cs_a_mpaths);
let small_repo_cs_b = &synced_bcs_ids[1]
.load(&ctx, &small_repo.get_blobstore())
.load(&ctx, small_repo.repo_blobstore())
.await?;
let small_repo_cs_b_mpaths = get_file_changes_mpaths(&small_repo_cs_b);
assert_eq!(vec![mp("dest_path_prefix/B")], small_repo_cs_b_mpaths);
@ -788,15 +793,16 @@ mod tests {
async fn check_no_pending_commits(
ctx: &CoreContext,
repo: &BlobRepo,
repo: &Repo,
cs_ids: &[ChangesetId],
) -> Result<()> {
let derived_data_types = &repo.get_active_derived_data_types_config().types;
let blob_repo = repo.as_blob_repo();
let derived_data_types = &blob_repo.get_active_derived_data_types_config().types;
for derived_data_type in derived_data_types {
let derived_utils = derived_data_utils(ctx.fb, repo, derived_data_type)?;
let derived_utils = derived_data_utils(ctx.fb, blob_repo, derived_data_type)?;
let pending = derived_utils
.pending(ctx.clone(), repo.clone(), cs_ids.to_vec())
.pending(ctx.clone(), repo.as_blob_repo().clone(), cs_ids.to_vec())
.await?;
assert!(pending.is_empty());
}
@ -815,7 +821,7 @@ mod tests {
let repo_0_commits = create_from_dag(
&ctx,
&repo_0,
repo_0.as_blob_repo(),
r##"
A-B
"##,
@ -827,7 +833,7 @@ mod tests {
let repo_1 = create_repo(1)?;
let repo_1_commits = create_from_dag(
&ctx,
&repo_1,
repo_1.as_blob_repo(),
r##"
C-D
"##,
@ -856,7 +862,7 @@ mod tests {
let small_repo = create_repo(1)?;
let changesets = create_from_dag(
&ctx,
&large_repo,
large_repo.as_blob_repo(),
r##"
A-B
"##,
@ -869,8 +875,8 @@ mod tests {
let mapping = SqlSyncedCommitMapping::with_sqlite_in_memory()?;
let syncers = create_commit_syncers(
&ctx,
small_repo.clone(),
large_repo.clone(),
small_repo.as_blob_repo().clone(),
large_repo.as_blob_repo().clone(),
mapping.clone(),
live_commit_sync_config,
Arc::new(InProcessLease::new()),
@ -927,17 +933,17 @@ mod tests {
let large_repo = create_repo(0)?;
let small_repo = create_repo(1)?;
let root = CreateCommitContext::new_root(&ctx, &large_repo)
let root = CreateCommitContext::new_root(&ctx, large_repo.as_blob_repo())
.add_file("random_dir/B/file", "text")
.commit()
.await?;
let first_commit = CreateCommitContext::new(&ctx, &large_repo, vec![root])
let first_commit = CreateCommitContext::new(&ctx, large_repo.as_blob_repo(), vec![root])
.add_file("large_repo/justfile", "justtext")
.commit()
.await?;
bookmark(&ctx, &large_repo, "before_mapping_change")
bookmark(&ctx, large_repo.as_blob_repo(), "before_mapping_change")
.set_to(first_commit)
.await?;
@ -945,8 +951,8 @@ mod tests {
let mapping = SqlSyncedCommitMapping::with_sqlite_in_memory()?;
let syncers = create_commit_syncers(
&ctx,
small_repo.clone(),
large_repo.clone(),
small_repo.as_blob_repo().clone(),
large_repo.as_blob_repo().clone(),
mapping.clone(),
live_commit_sync_config,
Arc::new(InProcessLease::new()),
@ -963,7 +969,8 @@ mod tests {
)
.await?;
let wc = list_working_copy_utf8(&ctx, &small_repo, small_repo_cs_ids[0]).await?;
let wc =
list_working_copy_utf8(&ctx, small_repo.as_blob_repo(), small_repo_cs_ids[0]).await?;
assert_eq!(
wc,
hashmap! {
@ -971,7 +978,8 @@ mod tests {
}
);
let wc = list_working_copy_utf8(&ctx, &small_repo, small_repo_cs_ids[1]).await?;
let wc =
list_working_copy_utf8(&ctx, small_repo.as_blob_repo(), small_repo_cs_ids[1]).await?;
assert_eq!(
wc,
hashmap! {
@ -981,10 +989,11 @@ mod tests {
);
// Change mapping
let change_mapping_cs_id = CreateCommitContext::new(&ctx, &large_repo, vec![first_commit])
.commit()
.await?;
bookmark(&ctx, &large_repo, "after_mapping_change")
let change_mapping_cs_id =
CreateCommitContext::new(&ctx, large_repo.as_blob_repo(), vec![first_commit])
.commit()
.await?;
bookmark(&ctx, large_repo.as_blob_repo(), "after_mapping_change")
.set_to(change_mapping_cs_id)
.await?;

View File

@ -63,6 +63,8 @@
> --dest-bookmark master_bookmark
* using repo "repo" repoid RepositoryId(0) (glob)
*Reloading redacted config from configerator* (glob)
*] Initializing CfgrLiveCommitSyncConfig, repo: * (glob)
*] Done initializing CfgrLiveCommitSyncConfig, repo: * (glob)
* The importing bookmark name is: repo_import_new_repo. * (glob)
* The destination bookmark name is: master_bookmark. * (glob)
* Initializing CfgrLiveCommitSyncConfig (glob)
@ -89,6 +91,8 @@
> --dest-bookmark master_bookmark
* using repo "repo" repoid RepositoryId(0) (glob)
*Reloading redacted config from configerator* (glob)
*] Initializing CfgrLiveCommitSyncConfig, repo: * (glob)
*] Done initializing CfgrLiveCommitSyncConfig, repo: * (glob)
* The importing bookmark name is: repo_import_new_repo. * (glob)
* The destination bookmark name is: master_bookmark. * (glob)
* Initializing CfgrLiveCommitSyncConfig (glob)
@ -122,6 +126,8 @@
> --recovery-file-path "$GIT_REPO/recovery_file.json"
* using repo "repo" repoid RepositoryId(0) (glob)
*Reloading redacted config from configerator* (glob)
*] Initializing CfgrLiveCommitSyncConfig, repo: * (glob)
*] Done initializing CfgrLiveCommitSyncConfig, repo: * (glob)
* Initializing CfgrLiveCommitSyncConfig (glob)
* Done initializing CfgrLiveCommitSyncConfig (glob)
* Started importing git commits to Mononoke (glob)