mononoke: Remove lfs rollout tier

Summary:
This config option was used to slowly roll out LFS for a repo.
However, it is no longer used and can therefore be removed.

Reviewed By: StanislavGlebik

Differential Revision: D30511880

fbshipit-source-id: 59fe5925cc203aa609488fdf8ea29e9ff65ee862
This commit is contained in:
Harvey Hunt 2021-08-26 09:21:09 -07:00 committed by Facebook GitHub Bot
parent 0d7889f233
commit 14941cc830
8 changed files with 13 additions and 65 deletions

View File

@ -1,4 +1,4 @@
// @generated SignedSource<<3afdac2f49e2538fa0443f1e63f7dc05>>
// @generated SignedSource<<8d64fdd7f37a6b5cec6bda28f391f7a5>>
// DO NOT EDIT THIS FILE MANUALLY!
// This file is a mechanical copy of the version in the configerator repo. To
// modify it, edit the copy in the configerator repo instead and copy it over by
@ -470,9 +470,7 @@ struct RawLfsParams {
2: optional i32 rollout_percentage,
// Whether to generate lfs blobs in hg sync job
3: optional bool generate_lfs_blob_in_hg_sync_job,
// If a hostname is in this smc tier then it will get
// lfs pointers regardless of rollout_percentage
4: optional string rollout_smc_tier,
// 4: deleted
}
struct RawBundle2ReplayParams {

View File

@ -312,15 +312,10 @@ async fn bootstrap_repositories<'a>(
let repo = Repo::new(&env, name.clone(), config)
.await
.context("Error opening Repo")?;
let repo = MononokeRepo::new(
fb,
logger.clone(),
Arc::new(repo),
&mysql_options,
readonly_storage.clone(),
)
.await
.context("Error opening MononokeRepo")?;
let repo =
MononokeRepo::new(fb, Arc::new(repo), &mysql_options, readonly_storage.clone())
.await
.context("Error opening MononokeRepo")?;
let warmup = if no_cache_warmup {
None

View File

@ -796,7 +796,6 @@ mod test {
threshold = 1000
rollout_percentage = 56
generate_lfs_blob_in_hg_sync_job = true
rollout_smc_tier = "smc_tier"
[bundle2_replay_params]
preserve_raw_bundle2 = true
@ -1040,7 +1039,6 @@ mod test {
threshold: Some(1000),
rollout_percentage: 56,
generate_lfs_blob_in_hg_sync_job: true,
rollout_smc_tier: Some("smc_tier".to_string()),
},
wireproto_logging: WireprotoLoggingConfig {
scribe_category: Some("category".to_string()),

View File

@ -262,7 +262,6 @@ impl Convert for RawLfsParams {
generate_lfs_blob_in_hg_sync_job: self
.generate_lfs_blob_in_hg_sync_job
.unwrap_or(false),
rollout_smc_tier: self.rollout_smc_tier,
})
}
}

View File

@ -733,8 +733,6 @@ pub struct LfsParams {
pub rollout_percentage: u32,
/// Whether hg sync job should generate lfs blobs
pub generate_lfs_blob_in_hg_sync_job: bool,
/// Hosts in this smc tier will receive lfs pointers regardless of rollout_percentage
pub rollout_smc_tier: Option<String>,
}
/// Id used to discriminate diffirent underlying blobstore instances

View File

@ -30,21 +30,17 @@ use repo_blobstore::RepoBlobstore;
use repo_read_write_status::RepoReadWriteFetcher;
use reverse_filler_queue::ReverseFillerQueue;
use reverse_filler_queue::SqlReverseFillerQueue;
use slog::Logger;
use sql_construct::SqlConstructFromMetadataDatabaseConfig;
use sql_ext::facebook::MysqlOptions;
use std::fmt::{self, Debug};
use std::sync::{Arc, RwLock};
use std::sync::Arc;
use std::{
collections::{hash_map::DefaultHasher, HashSet},
collections::hash_map::DefaultHasher,
hash::{Hash, Hasher},
};
use streaming_clone::SqlStreamingChunksFetcher;
use warm_bookmarks_cache::WarmBookmarksCache;
#[cfg(fbcode_build)]
mod facebook;
#[derive(Clone)]
pub struct SqlStreamingCloneConfig {
pub blobstore: RepoBlobstore,
@ -58,8 +54,6 @@ pub struct MononokeRepo {
bookmark_attrs: BookmarkAttrs,
streaming_clone: SqlStreamingCloneConfig,
mutable_counters: Arc<dyn MutableCounters>,
// Hostnames that always get lfs pointers.
lfs_rolled_out_hostnames: Arc<RwLock<HashSet<String>>>,
// Reverse filler queue for recording accepted infinitepush bundles
// This field is `None` if we don't want recording to happen
maybe_reverse_filler_queue: Option<Arc<dyn ReverseFillerQueue>>,
@ -68,7 +62,6 @@ pub struct MononokeRepo {
impl MononokeRepo {
pub async fn new(
fb: FacebookInit,
logger: Logger,
repo: Arc<Repo>,
mysql_options: &MysqlOptions,
readonly_storage: ReadOnlyStorage,
@ -117,7 +110,6 @@ impl MononokeRepo {
Self::new_from_parts(
fb,
logger,
repo,
streaming_clone,
mutable_counters,
@ -128,31 +120,11 @@ impl MononokeRepo {
pub async fn new_from_parts(
fb: FacebookInit,
logger: Logger,
repo: Arc<Repo>,
streaming_clone: SqlStreamingCloneConfig,
mutable_counters: Arc<dyn MutableCounters>,
maybe_reverse_filler_queue: Option<Arc<dyn ReverseFillerQueue>>,
) -> Result<Self, Error> {
let lfs_rolled_out_hostnames = Arc::new(RwLock::new(HashSet::new()));
if let Some(rollout_smc_tier) = repo.config().lfs.rollout_smc_tier.as_ref() {
#[cfg(fbcode_build)]
{
crate::facebook::spawn_smc_tier_fetcher(
fb,
&logger,
lfs_rolled_out_hostnames.clone(),
rollout_smc_tier.clone(),
)
.await;
}
#[cfg(not(fbcode_build))]
{
let _ = (fb, logger, rollout_smc_tier);
}
}
// TODO: Update Metaconfig so we just have this in config:
let bookmark_attrs = BookmarkAttrs::new(fb, repo.config().bookmarks.clone()).await?;
@ -161,7 +133,6 @@ impl MononokeRepo {
streaming_clone,
mutable_counters,
maybe_reverse_filler_queue,
lfs_rolled_out_hostnames,
bookmark_attrs,
})
}
@ -213,14 +184,9 @@ impl MononokeRepo {
let allowed = match client_hostname {
Some(client_hostname) => {
let rolled_out_hostnames = self.lfs_rolled_out_hostnames.read().unwrap();
if rolled_out_hostnames.contains(client_hostname) {
true
} else {
let mut hasher = DefaultHasher::new();
client_hostname.hash(&mut hasher);
hasher.finish() % 100 < percentage.into()
}
let mut hasher = DefaultHasher::new();
client_hostname.hash(&mut hasher);
hasher.finish() % 100 < percentage.into()
}
None => {
// Randomize in case source hostname is not set to avoid

View File

@ -415,7 +415,6 @@ async fn run_and_check_if_lfs(
let blob_repo = repo.blob_repo().clone();
let mononoke_repo = MononokeRepo::new_from_parts(
ctx.fb,
ctx.logger().clone(),
Arc::new(repo),
SqlStreamingCloneConfig {
blobstore: blob_repo.get_blobstore(),

View File

@ -232,13 +232,8 @@ pub async fn repo_handlers<'a>(
WarmBookmarksCache"
);
let mononoke_repo = MononokeRepo::new(
ctx.fb,
ctx.logger().clone(),
repo.clone(),
mysql_options,
readonly_storage,
);
let mononoke_repo =
MononokeRepo::new(ctx.fb, repo.clone(), mysql_options, readonly_storage);
let (mononoke_repo, wireproto_logging, backsyncer_dbs) =
futures::future::try_join3(mononoke_repo, wireproto_logging, backsyncer_dbs).await?;