mononoke: split metaconfig crate

Summary:
Currently if a crate depends even on a single type from metaconfig then in
order to compile this trait buck first compiles metaconfig crate with all the
logic of parsing the configs.

This diff split metaconfig into two crates. The first one just holds the types for
"external consumption" by other crates. The second holds the parsing logic.

That makes builds faster

Reviewed By: jsgf, lukaspiatkowski

Differential Revision: D13877592

fbshipit-source-id: f353fb2d1737845bf1fa0de515ff8ef131020063
This commit is contained in:
Stanislau Hlebik 2019-01-31 00:31:11 -08:00 committed by Facebook Github Bot
parent 9f46866c75
commit 4d48415149
34 changed files with 411 additions and 379 deletions

View File

@ -22,7 +22,7 @@ use tokio::runtime::TaskExecutor;
use tracing::TraceContext;
use uuid::Uuid;
use metaconfig::repoconfig::RepoConfigs;
use metaconfig_parser::RepoConfigs;
use errors::ErrorKind;

View File

@ -23,8 +23,8 @@ use blobrepo::{get_sha256_alias, get_sha256_alias_key, BlobRepo};
use context::CoreContext;
use mercurial_types::manifest::Content;
use mercurial_types::HgManifestId;
use metaconfig::repoconfig::RepoConfig;
use metaconfig::repoconfig::RepoType::{BlobFiles, BlobRemote, BlobRocks, BlobSqlite};
use metaconfig_types::RepoConfig;
use metaconfig_types::RepoType::{BlobFiles, BlobRemote, BlobRocks, BlobSqlite};
use mononoke_types::{FileContents, RepositoryId};
use reachabilityindex::{GenerationNumberBFS, ReachabilityIndex};

View File

@ -30,7 +30,8 @@ extern crate futures;
extern crate futures_ext;
extern crate http;
extern crate mercurial_types;
extern crate metaconfig;
extern crate metaconfig_parser;
extern crate metaconfig_types;
extern crate mononoke_api as api;
extern crate mononoke_types;
extern crate panichandler;
@ -83,7 +84,7 @@ use slog_glog_fmt::{kv_categorizer, kv_defaults, GlogFormat};
use slog_logview::LogViewDrain;
use tokio::runtime::Runtime;
use metaconfig::RepoConfigs;
use metaconfig_parser::RepoConfigs;
use scuba_ext::ScubaSampleBuilder;
use actor::{

View File

@ -59,7 +59,7 @@ extern crate manifoldblob;
extern crate maplit;
extern crate mercurial;
extern crate mercurial_types;
extern crate metaconfig;
extern crate metaconfig_types;
extern crate mononoke_types;
#[cfg(test)]
extern crate mononoke_types_mocks;
@ -116,7 +116,7 @@ pub mod internal {
use failure::{err_msg, Error};
use futures::{future, Future, IntoFuture};
use futures_ext::FutureExt;
use metaconfig::RepoType;
use metaconfig_types::RepoType;
use mononoke_types::RepositoryId;
use scribe_cxx::ScribeCxxClient;
@ -126,7 +126,7 @@ pub fn open_blobrepo(
repoid: RepositoryId,
myrouter_port: Option<u16>,
) -> impl Future<Item = BlobRepo, Error = Error> {
use metaconfig::repoconfig::RepoType::*;
use metaconfig_types::RepoType::*;
match repotype {
BlobFiles(ref path) => BlobRepo::new_files(logger, &path, repoid)

View File

@ -50,7 +50,7 @@ use mercurial_types::{
Changeset, Entry, HgBlob, HgBlobNode, HgChangesetId, HgFileEnvelopeMut, HgFileNodeId,
HgManifestEnvelopeMut, HgManifestId, HgNodeHash, HgParents, Manifest, RepoPath, Type,
};
use metaconfig::RemoteBlobstoreArgs;
use metaconfig_types::RemoteBlobstoreArgs;
use mononoke_types::{
hash::Blake2, hash::Sha256, Blob, BlobstoreBytes, BlobstoreValue, BonsaiChangeset, ChangesetId,
ContentId, DateTime, FileChange, FileContents, FileType, Generation, MPath, MPathElement,

View File

@ -12,7 +12,7 @@ extern crate futures;
extern crate cloned;
extern crate context;
extern crate futures_ext;
extern crate metaconfig;
extern crate metaconfig_types;
extern crate mononoke_types;
#[macro_use]
extern crate sql;
@ -25,7 +25,7 @@ use context::CoreContext;
use failure::{format_err, Error};
use futures::{future, Future, IntoFuture};
use futures_ext::{BoxFuture, FutureExt};
use metaconfig::BlobstoreId;
use metaconfig_types::BlobstoreId;
use mononoke_types::{DateTime, RepositoryId, Timestamp};
use sql::Connection;
pub use sql_ext::SqlConstructors;

View File

@ -11,14 +11,15 @@
extern crate blobstore_sync_queue;
extern crate context;
extern crate futures;
extern crate metaconfig;
extern crate metaconfig_types;
extern crate mononoke_types;
extern crate tokio;
use blobstore_sync_queue::{BlobstoreSyncQueue, BlobstoreSyncQueueEntry, SqlBlobstoreSyncQueue,
SqlConstructors};
use blobstore_sync_queue::{
BlobstoreSyncQueue, BlobstoreSyncQueueEntry, SqlBlobstoreSyncQueue, SqlConstructors,
};
use context::CoreContext;
use metaconfig::BlobstoreId;
use metaconfig_types::BlobstoreId;
use mononoke_types::{DateTime, RepositoryId};
#[test]

View File

@ -25,7 +25,7 @@ use tokio::executor::spawn;
use blobstore::Blobstore;
use context::CoreContext;
use metaconfig::BlobstoreId;
use metaconfig_types::BlobstoreId;
use mononoke_types::BlobstoreBytes;
const SLOW_REQUEST_THRESHOLD: Duration = Duration::from_secs(5);

View File

@ -19,7 +19,7 @@ extern crate tokio;
extern crate blobstore;
extern crate blobstore_sync_queue;
extern crate context;
extern crate metaconfig;
extern crate metaconfig_types;
extern crate mononoke_types;
#[cfg(test)]

View File

@ -16,7 +16,7 @@ use scuba::ScubaClient;
use blobstore::Blobstore;
use blobstore_sync_queue::{BlobstoreSyncQueue, BlobstoreSyncQueueEntry};
use context::CoreContext;
use metaconfig::BlobstoreId;
use metaconfig_types::BlobstoreId;
use mononoke_types::{BlobstoreBytes, DateTime, RepositoryId};
use crate::base::{ErrorKind, MultiplexedBlobstoreBase, MultiplexedBlobstorePutHandler};

View File

@ -19,7 +19,7 @@ use futures_ext::{BoxFuture, FutureExt};
use blobstore::Blobstore;
use blobstore_sync_queue::{BlobstoreSyncQueue, SqlBlobstoreSyncQueue, SqlConstructors};
use context::CoreContext;
use metaconfig::BlobstoreId;
use metaconfig_types::BlobstoreId;
use mononoke_types::{BlobstoreBytes, RepositoryId};
use crate::base::{MultiplexedBlobstoreBase, MultiplexedBlobstorePutHandler};

View File

@ -57,7 +57,7 @@ extern crate mercurial_bundles;
extern crate mercurial_types;
#[cfg(test)]
extern crate mercurial_types_mocks;
extern crate metaconfig;
extern crate metaconfig_types;
extern crate mononoke_types;
extern crate phases;

View File

@ -44,11 +44,11 @@ use bonsai_utils::{bonsai_diff, BonsaiDiffResult};
use bookmarks::Bookmark;
use context::CoreContext;
use errors::*;
use futures::{Future, IntoFuture, Stream};
use futures::future::{err, join_all, loop_fn, ok, Loop};
use futures::{Future, IntoFuture, Stream};
use futures_ext::{BoxFuture, FutureExt};
use mercurial_types::{Changeset, HgChangesetId, MPath};
use metaconfig::PushrebaseParams;
use metaconfig_types::PushrebaseParams;
use mononoke_types::{check_case_conflicts, BonsaiChangeset, ChangesetId, DateTime, FileChange};
use revset::RangeNodeStream;

View File

@ -31,7 +31,7 @@ use mercurial_bundles::{
use mercurial_types::{
HgChangesetId, HgManifestId, HgNodeHash, HgNodeKey, MPath, RepoPath, NULL_HASH,
};
use metaconfig::{repoconfig::RepoReadOnly, PushrebaseParams};
use metaconfig_types::{PushrebaseParams, RepoReadOnly};
use mononoke_types::ChangesetId;
use pushrebase;
use reachabilityindex::LeastCommonAncestorsHint;

View File

@ -15,7 +15,7 @@ extern crate slog;
extern crate blobrepo;
extern crate context;
extern crate mercurial_types;
extern crate metaconfig;
extern crate metaconfig_types;
extern crate revset;
use std::sync::Arc;
@ -25,10 +25,10 @@ use bookmarks::Bookmark;
use context::CoreContext;
use futures::{Future, IntoFuture, Stream};
use futures_ext::{spawn_future, BoxFuture, FutureExt};
use mercurial_types::{Changeset, HgChangesetId, MPath, RepoPath};
use mercurial_types::manifest::{Entry, Type};
use mercurial_types::manifest_utils::recursive_entry_stream;
use metaconfig::CacheWarmupParams;
use mercurial_types::{Changeset, HgChangesetId, MPath, RepoPath};
use metaconfig_types::CacheWarmupParams;
use revset::AncestorsNodeStream;
use slog::Logger;

View File

@ -30,7 +30,8 @@ use slog_glog_fmt::default_drain as glog_drain;
use blobrepo::{open_blobrepo, BlobRepo};
use changesets::{SqlChangesets, SqlConstructors};
use context::CoreContext;
use metaconfig::{ManifoldArgs, MysqlBlobstoreArgs, RemoteBlobstoreArgs, RepoConfigs, RepoType};
use metaconfig_parser::RepoConfigs;
use metaconfig_types::{ManifoldArgs, MysqlBlobstoreArgs, RemoteBlobstoreArgs, RepoType};
use mononoke_types::RepositoryId;
const CACHE_ARGS: &[(&str, &str)] = &[

View File

@ -34,7 +34,8 @@ extern crate changesets;
extern crate context;
extern crate mercurial;
extern crate mercurial_types;
extern crate metaconfig;
extern crate metaconfig_parser;
extern crate metaconfig_types;
extern crate mononoke_types;
extern crate panichandler;
extern crate scuba_ext;

View File

@ -30,7 +30,7 @@ extern crate context;
extern crate futures_ext;
extern crate manifoldblob;
extern crate mercurial_types;
extern crate metaconfig;
extern crate metaconfig_types;
extern crate mononoke_types;
extern crate reachabilityindex;
extern crate revset;
@ -61,7 +61,7 @@ use mercurial_types::{
Changeset, HgChangesetEnvelope, HgChangesetId, HgFileEnvelope, HgManifestEnvelope,
HgManifestId, MPath, MPathElement, Manifest,
};
use metaconfig::RemoteBlobstoreArgs;
use metaconfig_types::RemoteBlobstoreArgs;
use mononoke_types::{
BlobstoreBytes, BlobstoreValue, BonsaiChangeset, ChangesetId, DateTime, FileChange,
FileContents, Generation, RepositoryId,

View File

@ -15,7 +15,7 @@ use futures::{
};
use futures_ext::FutureExt;
use itertools::Itertools;
use metaconfig::BlobstoreId;
use metaconfig_types::BlobstoreId;
use mononoke_types::{BlobstoreBytes, DateTime, RepositoryId};
use rate_limiter::RateLimiter;
use slog::Logger;

View File

@ -25,7 +25,7 @@ extern crate itertools;
extern crate lazy_static;
extern crate manifoldblob;
extern crate mercurial_types;
extern crate metaconfig;
extern crate metaconfig_types;
extern crate mononoke_types;
#[macro_use]
extern crate slog;
@ -52,7 +52,7 @@ use futures_ext::{spawn_future, BoxFuture, FutureExt};
use glusterblob::Glusterblob;
use healer::RepoHealer;
use manifoldblob::ThriftManifoldBlob;
use metaconfig::{RemoteBlobstoreArgs, RepoConfig, RepoType};
use metaconfig_types::{RemoteBlobstoreArgs, RepoConfig, RepoType};
use mononoke_types::RepositoryId;
use rate_limiter::RateLimiter;
use slog::Logger;

View File

@ -25,7 +25,8 @@ extern crate futures_ext;
extern crate hooks;
extern crate manifold;
extern crate mercurial_types;
extern crate metaconfig;
extern crate metaconfig_parser;
extern crate metaconfig_types;
extern crate mononoke_types;
extern crate panichandler;
extern crate repo_client;
@ -56,7 +57,7 @@ use futures_ext::{try_boxfuture, BoxFuture, FutureExt};
use hooks::{ChangesetHookExecutionID, FileHookExecutionID, HookExecution};
use manifold::{ManifoldHttpClient, RequestContext};
use mercurial_types::HgNodeHash;
use metaconfig::RepoConfigs;
use metaconfig_parser::RepoConfigs;
use mononoke_types::RepositoryId;
use slog::{Drain, Level, Logger};
use slog_glog_fmt::{kv_categorizer, kv_defaults, GlogFormat};

View File

@ -19,7 +19,7 @@ use hooks::{
};
use manifold::{ManifoldHttpClient, PayloadRange};
use mercurial_types::{HgChangesetId, HgNodeHash};
use metaconfig::repoconfig::RepoConfig;
use metaconfig_types::RepoConfig;
use mononoke_types::ChangesetId;
use revset::AncestorsNodeStream;
use slog::Logger;

View File

@ -8,13 +8,13 @@
#![deny(warnings)]
use super::{Hook, HookChangeset, HookManager};
use super::lua_hook::LuaHook;
use super::{Hook, HookChangeset, HookManager};
use bookmarks::Bookmark;
use facebook::rust_hooks::ensure_valid_email::EnsureValidEmailHook;
use facebook::rust_hooks::verify_integrity::VerifyIntegrityHook;
use failure::Error;
use metaconfig::repoconfig::{HookType, RepoConfig};
use metaconfig_types::{HookType, RepoConfig};
use std::collections::HashSet;
use std::sync::Arc;
@ -49,19 +49,21 @@ pub fn load_hooks(hook_manager: &mut HookManager, config: RepoConfig) -> Result<
hook_set.insert(name);
}
match config.bookmarks {
Some(bookmarks) => for bookmark_hook in bookmarks {
let bookmark = bookmark_hook.bookmark;
let hooks = bookmark_hook.hooks;
if let Some(hooks) = hooks {
let bm_hook_set: HashSet<String> = hooks.clone().into_iter().collect();
let diff: HashSet<_> = bm_hook_set.difference(&hook_set).collect();
if diff.len() != 0 {
return Err(ErrorKind::NoSuchBookmarkHook(bookmark).into());
} else {
hook_manager.set_hooks_for_bookmark(bookmark, hooks);
}
};
},
Some(bookmarks) => {
for bookmark_hook in bookmarks {
let bookmark = bookmark_hook.bookmark;
let hooks = bookmark_hook.hooks;
if let Some(hooks) = hooks {
let bm_hook_set: HashSet<String> = hooks.clone().into_iter().collect();
let diff: HashSet<_> = bm_hook_set.difference(&hook_set).collect();
if diff.len() != 0 {
return Err(ErrorKind::NoSuchBookmarkHook(bookmark).into());
} else {
hook_manager.set_hooks_for_bookmark(bookmark, hooks);
}
};
}
}
None => (),
}
Ok(())
@ -75,18 +77,19 @@ pub enum ErrorKind {
#[fail(display = "Hook(s) referenced in bookmark {} do not exist", _0)]
NoSuchBookmarkHook(Bookmark),
#[fail(display = "invalid rust hook: {}", _0)] InvalidRustHook(String),
#[fail(display = "invalid rust hook: {}", _0)]
InvalidRustHook(String),
}
#[cfg(test)]
mod test {
use super::*;
use super::ErrorKind;
use super::super::*;
use super::ErrorKind;
use super::*;
use async_unit;
use context::CoreContext;
use fixtures::many_files_dirs;
use metaconfig::repoconfig::{BookmarkParams, HookParams, RepoReadOnly, RepoType};
use metaconfig_types::{BookmarkParams, HookParams, RepoReadOnly, RepoType};
use slog::{Discard, Drain};
fn default_repo_config() -> RepoConfig {

View File

@ -41,7 +41,7 @@ extern crate linked_hash_map;
#[macro_use]
extern crate maplit;
extern crate mercurial_types;
extern crate metaconfig;
extern crate metaconfig_types;
extern crate mononoke_types;
#[cfg(test)]
#[macro_use]
@ -55,11 +55,11 @@ extern crate context;
extern crate srclient;
extern crate thrift;
pub mod lua_hook;
pub mod rust_hook;
pub mod hook_loader;
pub mod errors;
mod facebook;
pub mod hook_loader;
pub mod lua_hook;
pub mod rust_hook;
use aclchecker::{AclChecker, Identity};
use asyncmemo::{Asyncmemo, Filler, Weight};
@ -71,9 +71,12 @@ pub use errors::*;
use failure::{Error, FutureFailureErrorExt};
use futures::{failed, finished, Future, IntoFuture, Stream};
use futures_ext::{BoxFuture, FutureExt};
use mercurial_types::{Changeset, HgChangesetId, HgParents, MPath, manifest::get_empty_manifest,
manifest_utils::{self, EntryStatus}};
use metaconfig::repoconfig::{HookBypass, HookConfig, HookManagerParams};
use mercurial_types::{
manifest::get_empty_manifest,
manifest_utils::{self, EntryStatus},
Changeset, HgChangesetId, HgParents, MPath,
};
use metaconfig_types::{HookBypass, HookConfig, HookManagerParams};
use mononoke_types::{FileContents, FileType};
use slog::Logger;
use std::collections::{HashMap, HashSet};

View File

@ -19,6 +19,7 @@ extern crate futures;
#[macro_use]
#[cfg(test)]
extern crate maplit;
extern crate metaconfig_types;
extern crate serde;
#[macro_use]
extern crate serde_derive;
@ -30,9 +31,5 @@ extern crate toml;
pub mod errors;
pub mod repoconfig;
pub use repoconfig::{
BlobstoreId, CacheWarmupParams, LfsParams, ManifoldArgs, MysqlBlobstoreArgs, PushrebaseParams,
RemoteBlobstoreArgs, RepoConfig, RepoConfigs, RepoReadOnly, RepoType,
};
pub use errors::{Error, ErrorKind};
pub use repoconfig::RepoConfigs;

View File

@ -10,11 +10,10 @@
use bookmarks::Bookmark;
use errors::*;
use failure::ResultExt;
use scuba::ScubaValue;
use sql::mysql_async::{
from_value_opt,
prelude::{ConvIr, FromValue},
FromValueError, Value,
use metaconfig_types::{
BlobstoreId, BookmarkParams, CacheWarmupParams, GlusterArgs, HookBypass, HookConfig,
HookManagerParams, HookParams, HookType, LfsParams, ManifoldArgs, MysqlBlobstoreArgs,
PushrebaseParams, RemoteBlobstoreArgs, RepoConfig, RepoReadOnly, RepoType,
};
use std::collections::HashMap;
use std::fs::File;
@ -24,304 +23,6 @@ use std::path::{Path, PathBuf};
use std::str;
use toml;
/// Arguments for setting up a Manifold blobstore.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct ManifoldArgs {
/// Bucket of the backing Manifold blobstore to connect to
pub bucket: String,
/// Prefix to be prepended to all the keys. In prod it should be ""
pub prefix: String,
}
/// Arguments for settings up a Gluster blobstore
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct GlusterArgs {
/// Gluster tier
pub tier: String,
/// Nfs export name
pub export: String,
/// Content prefix path
pub basepath: String,
}
/// Arguments for setting up a Mysql blobstore.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct MysqlBlobstoreArgs {
/// Name of the Mysql shardmap to use
pub shardmap: String,
/// Number of shards in the Mysql shardmap
pub shard_num: NonZeroUsize,
}
/// Configuration of a single repository
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct RepoConfig {
/// If false, this repo config is completely ignored.
pub enabled: bool,
/// Defines the type of repository
pub repotype: RepoType,
/// How large a cache to use (in bytes) for RepoGenCache derived information
pub generation_cache_size: usize,
/// Numerical repo id of the repo.
pub repoid: i32,
/// Scuba table for logging performance of operations
pub scuba_table: Option<String>,
/// Parameters of how to warm up the cache
pub cache_warmup: Option<CacheWarmupParams>,
/// Configuration for bookmarks
pub bookmarks: Option<Vec<BookmarkParams>>,
/// Configuration for hooks
pub hooks: Option<Vec<HookParams>>,
/// Pushrebase configuration options
pub pushrebase: PushrebaseParams,
/// LFS configuration options
pub lfs: LfsParams,
/// Scribe category to log all wireproto requests with full arguments.
/// Used for replay on shadow tier.
pub wireproto_scribe_category: Option<String>,
/// What percent of read request verifies that returned content matches the hash
pub hash_validation_percentage: usize,
/// Should this repo reject write attempts
pub readonly: RepoReadOnly,
/// Params for the hook manager
pub hook_manager_params: Option<HookManagerParams>,
/// Skiplist blobstore key (used to make revset faster)
pub skiplist_index_blobstore_key: Option<String>,
}
impl RepoConfig {
/// Returns a db address that is referenced in this config or None if there is none
pub fn get_db_address(&self) -> Option<&str> {
match self.repotype {
RepoType::BlobRemote { ref db_address, .. } => Some(&db_address),
_ => None,
}
}
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
/// Is the repo read-only?
pub enum RepoReadOnly {
/// This repo is read-only and should not accept pushes or other writes
ReadOnly,
/// This repo should accept writes.
ReadWrite,
}
/// Configuration of warming up the Mononoke cache. This warmup happens on startup
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct CacheWarmupParams {
/// Bookmark to warmup cache for at the startup. If not set then the cache will be cold.
pub bookmark: Bookmark,
/// Max number to fetch during commit warmup. If not set in the config, then set to a default
/// value.
pub commit_limit: usize,
}
/// Configuration for the hook manager
#[derive(Debug, Clone, Eq, PartialEq, Deserialize)]
pub struct HookManagerParams {
/// Entry limit for the hook manager result cache
pub entrylimit: usize,
/// Weight limit for the hook manager result cache
pub weightlimit: usize,
/// Wether to disable the acl checker or not (intended for testing purposes)
pub disable_acl_checker: bool,
}
impl Default for HookManagerParams {
fn default() -> Self {
Self {
entrylimit: 1024 * 1024,
weightlimit: 100 * 1024 * 1024, // 100Mb
disable_acl_checker: false,
}
}
}
/// Configuration for a bookmark
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct BookmarkParams {
/// The bookmark
pub bookmark: Bookmark,
/// The hooks active for the bookmark
pub hooks: Option<Vec<String>>,
}
/// The type of the hook
#[derive(Debug, Clone, Eq, PartialEq, Deserialize)]
pub enum HookType {
/// A hook that runs on the whole changeset
PerChangeset,
/// A hook that runs on a file in a changeset
PerAddedOrModifiedFile,
}
/// Hook bypass
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum HookBypass {
/// Bypass that checks that a string is in the commit message
CommitMessage(String),
/// Bypass that checks that a string is in the commit message
Pushvar {
/// Name of the pushvar
name: String,
/// Value of the pushvar
value: String,
},
}
/// Configs that are being passed to the hook during runtime
#[derive(Debug, Default, Clone, Eq, PartialEq)]
pub struct HookConfig {
/// An optional way to bypass a hook
pub bypass: Option<HookBypass>,
/// Map of config to it's value. Values here are strings
pub strings: HashMap<String, String>,
/// Map of config to it's value. Values here are integers
pub ints: HashMap<String, i64>,
}
/// Configuration for a hook
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct HookParams {
/// The name of the hook
pub name: String,
/// The type of the hook
pub hook_type: HookType,
/// The code of the hook
pub code: Option<String>,
/// Configs that should be passed to hook
pub config: HookConfig,
}
/// Pushrebase configuration options
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct PushrebaseParams {
/// Update dates of rebased commits
pub rewritedates: bool,
/// How far will we go from bookmark to find rebase root
pub recursion_limit: usize,
}
impl Default for PushrebaseParams {
fn default() -> Self {
PushrebaseParams {
rewritedates: true,
recursion_limit: 16384, // this number is fairly arbirary
}
}
}
/// LFS configuration options
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct LfsParams {
/// threshold in bytes, If None, Lfs is disabled
pub threshold: Option<u64>,
}
impl Default for LfsParams {
fn default() -> Self {
LfsParams { threshold: None }
}
}
/// Remote blobstore arguments
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum RemoteBlobstoreArgs {
/// Manifold arguments
Manifold(ManifoldArgs),
/// Gluster blobstore arguemnts
Gluster(GlusterArgs),
/// Mysql blobstore arguments
Mysql(MysqlBlobstoreArgs),
/// Multiplexed
Multiplexed {
/// Scuba table for tracking performance of blobstore operations
scuba_table: Option<String>,
/// Multiplexed blobstores
blobstores: HashMap<BlobstoreId, RemoteBlobstoreArgs>,
},
}
impl From<ManifoldArgs> for RemoteBlobstoreArgs {
fn from(manifold_args: ManifoldArgs) -> Self {
RemoteBlobstoreArgs::Manifold(manifold_args)
}
}
impl From<GlusterArgs> for RemoteBlobstoreArgs {
fn from(gluster_args: GlusterArgs) -> Self {
RemoteBlobstoreArgs::Gluster(gluster_args)
}
}
/// Id used to discriminate diffirent underlying blobstore instances
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Ord, PartialOrd, Deserialize)]
pub struct BlobstoreId(u64);
impl BlobstoreId {
/// Construct blobstore from integer
pub fn new(id: u64) -> Self {
BlobstoreId(id)
}
}
impl From<BlobstoreId> for Value {
fn from(id: BlobstoreId) -> Self {
Value::UInt(id.0)
}
}
impl ConvIr<BlobstoreId> for BlobstoreId {
fn new(v: Value) -> std::result::Result<Self, FromValueError> {
Ok(BlobstoreId(from_value_opt(v)?))
}
fn commit(self) -> Self {
self
}
fn rollback(self) -> Value {
self.into()
}
}
impl FromValue for BlobstoreId {
type Intermediate = BlobstoreId;
}
impl From<BlobstoreId> for ScubaValue {
fn from(blobstore_id: BlobstoreId) -> Self {
ScubaValue::from(blobstore_id.0 as i64)
}
}
/// Types of repositories supported
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum RepoType {
/// Blob repository with path pointing to on-disk files with data. The files are stored in a
///
///
/// NOTE: this is read-only and for development/testing only. Production uses will break things.
BlobFiles(PathBuf),
/// Blob repository with path pointing to on-disk files with data. The files are stored in a
/// RocksDb database
BlobRocks(PathBuf),
/// Blob repository with path pointing to on-disk files with data. The files are stored in a
/// Sqlite database
BlobSqlite(PathBuf),
/// Blob repository with path pointing to the directory where a server socket is going to be.
BlobRemote {
/// Remote blobstores arguments
blobstores_args: RemoteBlobstoreArgs,
/// Identifies the SQL database to connect to.
db_address: String,
/// If present, the number of shards to spread filenodes across
filenode_shards: Option<usize>,
},
}
/// Configuration of a metaconfig repository
#[derive(Debug, Eq, PartialEq)]
pub struct MetaConfig {}

322
metaconfig/types/src/lib.rs Normal file
View File

@ -0,0 +1,322 @@
// Copyright (c) 2004-present, Facebook, Inc.
// All Rights Reserved.
//
// This software may be used and distributed according to the terms of the
// GNU General Public License version 2 or any later version.
//! Contains structures describing configuration of the entire repo. Those structures are
//! deserialized from TOML files from metaconfig repo
#![deny(missing_docs)]
#![deny(warnings)]
use bookmarks::Bookmark;
use scuba::ScubaValue;
use serde_derive::Deserialize;
use sql::mysql_async::{
from_value_opt,
prelude::{ConvIr, FromValue},
FromValueError, Value,
};
use std::collections::HashMap;
use std::num::NonZeroUsize;
use std::path::PathBuf;
use std::str;
/// Arguments for setting up a Manifold blobstore.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct ManifoldArgs {
/// Bucket of the backing Manifold blobstore to connect to
pub bucket: String,
/// Prefix to be prepended to all the keys. In prod it should be ""
pub prefix: String,
}
/// Arguments for settings up a Gluster blobstore
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct GlusterArgs {
/// Gluster tier
pub tier: String,
/// Nfs export name
pub export: String,
/// Content prefix path
pub basepath: String,
}
/// Arguments for setting up a Mysql blobstore.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct MysqlBlobstoreArgs {
/// Name of the Mysql shardmap to use
pub shardmap: String,
/// Number of shards in the Mysql shardmap
pub shard_num: NonZeroUsize,
}
/// Configuration of a single repository
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct RepoConfig {
/// If false, this repo config is completely ignored.
pub enabled: bool,
/// Defines the type of repository
pub repotype: RepoType,
/// How large a cache to use (in bytes) for RepoGenCache derived information
pub generation_cache_size: usize,
/// Numerical repo id of the repo.
pub repoid: i32,
/// Scuba table for logging performance of operations
pub scuba_table: Option<String>,
/// Parameters of how to warm up the cache
pub cache_warmup: Option<CacheWarmupParams>,
/// Configuration for bookmarks
pub bookmarks: Option<Vec<BookmarkParams>>,
/// Configuration for hooks
pub hooks: Option<Vec<HookParams>>,
/// Pushrebase configuration options
pub pushrebase: PushrebaseParams,
/// LFS configuration options
pub lfs: LfsParams,
/// Scribe category to log all wireproto requests with full arguments.
/// Used for replay on shadow tier.
pub wireproto_scribe_category: Option<String>,
/// What percent of read request verifies that returned content matches the hash
pub hash_validation_percentage: usize,
/// Should this repo reject write attempts
pub readonly: RepoReadOnly,
/// Params for the hook manager
pub hook_manager_params: Option<HookManagerParams>,
/// Skiplist blobstore key (used to make revset faster)
pub skiplist_index_blobstore_key: Option<String>,
}
impl RepoConfig {
/// Returns a db address that is referenced in this config or None if there is none
pub fn get_db_address(&self) -> Option<&str> {
match self.repotype {
RepoType::BlobRemote { ref db_address, .. } => Some(&db_address),
_ => None,
}
}
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
/// Is the repo read-only?
pub enum RepoReadOnly {
/// This repo is read-only and should not accept pushes or other writes
ReadOnly,
/// This repo should accept writes.
ReadWrite,
}
/// Configuration of warming up the Mononoke cache. This warmup happens on startup
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct CacheWarmupParams {
/// Bookmark to warmup cache for at the startup. If not set then the cache will be cold.
pub bookmark: Bookmark,
/// Max number to fetch during commit warmup. If not set in the config, then set to a default
/// value.
pub commit_limit: usize,
}
/// Configuration for the hook manager
#[derive(Debug, Clone, Eq, PartialEq, Deserialize)]
pub struct HookManagerParams {
/// Entry limit for the hook manager result cache
pub entrylimit: usize,
/// Weight limit for the hook manager result cache
pub weightlimit: usize,
/// Wether to disable the acl checker or not (intended for testing purposes)
pub disable_acl_checker: bool,
}
impl Default for HookManagerParams {
fn default() -> Self {
Self {
entrylimit: 1024 * 1024,
weightlimit: 100 * 1024 * 1024, // 100Mb
disable_acl_checker: false,
}
}
}
/// Configuration for a bookmark
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct BookmarkParams {
/// The bookmark
pub bookmark: Bookmark,
/// The hooks active for the bookmark
pub hooks: Option<Vec<String>>,
}
/// The type of the hook
#[derive(Debug, Clone, Eq, PartialEq, Deserialize)]
pub enum HookType {
/// A hook that runs on the whole changeset
PerChangeset,
/// A hook that runs on a file in a changeset
PerAddedOrModifiedFile,
}
/// Hook bypass
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum HookBypass {
/// Bypass that checks that a string is in the commit message
CommitMessage(String),
/// Bypass that checks that a string is in the commit message
Pushvar {
/// Name of the pushvar
name: String,
/// Value of the pushvar
value: String,
},
}
/// Configs that are being passed to the hook during runtime
#[derive(Debug, Default, Clone, Eq, PartialEq)]
pub struct HookConfig {
/// An optional way to bypass a hook
pub bypass: Option<HookBypass>,
/// Map of config to it's value. Values here are strings
pub strings: HashMap<String, String>,
/// Map of config to it's value. Values here are integers
pub ints: HashMap<String, i64>,
}
/// Configuration for a hook
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct HookParams {
/// The name of the hook
pub name: String,
/// The type of the hook
pub hook_type: HookType,
/// The code of the hook
pub code: Option<String>,
/// Configs that should be passed to hook
pub config: HookConfig,
}
/// Pushrebase configuration options
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct PushrebaseParams {
/// Update dates of rebased commits
pub rewritedates: bool,
/// How far will we go from bookmark to find rebase root
pub recursion_limit: usize,
}
impl Default for PushrebaseParams {
fn default() -> Self {
PushrebaseParams {
rewritedates: true,
recursion_limit: 16384, // this number is fairly arbirary
}
}
}
/// LFS configuration options
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct LfsParams {
/// threshold in bytes, If None, Lfs is disabled
pub threshold: Option<u64>,
}
impl Default for LfsParams {
fn default() -> Self {
LfsParams { threshold: None }
}
}
/// Remote blobstore arguments
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum RemoteBlobstoreArgs {
/// Manifold arguments
Manifold(ManifoldArgs),
/// Gluster blobstore arguemnts
Gluster(GlusterArgs),
/// Mysql blobstore arguments
Mysql(MysqlBlobstoreArgs),
/// Multiplexed
Multiplexed {
/// Scuba table for tracking performance of blobstore operations
scuba_table: Option<String>,
/// Multiplexed blobstores
blobstores: HashMap<BlobstoreId, RemoteBlobstoreArgs>,
},
}
impl From<ManifoldArgs> for RemoteBlobstoreArgs {
fn from(manifold_args: ManifoldArgs) -> Self {
RemoteBlobstoreArgs::Manifold(manifold_args)
}
}
impl From<GlusterArgs> for RemoteBlobstoreArgs {
fn from(gluster_args: GlusterArgs) -> Self {
RemoteBlobstoreArgs::Gluster(gluster_args)
}
}
/// Id used to discriminate diffirent underlying blobstore instances
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Ord, PartialOrd, Deserialize)]
pub struct BlobstoreId(u64);
impl BlobstoreId {
/// Construct blobstore from integer
pub fn new(id: u64) -> Self {
BlobstoreId(id)
}
}
impl From<BlobstoreId> for Value {
fn from(id: BlobstoreId) -> Self {
Value::UInt(id.0)
}
}
impl ConvIr<BlobstoreId> for BlobstoreId {
fn new(v: Value) -> std::result::Result<Self, FromValueError> {
Ok(BlobstoreId(from_value_opt(v)?))
}
fn commit(self) -> Self {
self
}
fn rollback(self) -> Value {
self.into()
}
}
impl FromValue for BlobstoreId {
type Intermediate = BlobstoreId;
}
impl From<BlobstoreId> for ScubaValue {
fn from(blobstore_id: BlobstoreId) -> Self {
ScubaValue::from(blobstore_id.0 as i64)
}
}
/// Types of repositories supported
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum RepoType {
/// Blob repository with path pointing to on-disk files with data. The files are stored in a
///
///
/// NOTE: this is read-only and for development/testing only. Production uses will break things.
BlobFiles(PathBuf),
/// Blob repository with path pointing to on-disk files with data. The files are stored in a
/// RocksDb database
BlobRocks(PathBuf),
/// Blob repository with path pointing to on-disk files with data. The files are stored in a
/// Sqlite database
BlobSqlite(PathBuf),
/// Blob repository with path pointing to the directory where a server socket is going to be.
BlobRemote {
/// Remote blobstores arguments
blobstores_args: RemoteBlobstoreArgs,
/// Identifies the SQL database to connect to.
db_address: String,
/// If present, the number of shards to spread filenodes across
filenode_shards: Option<usize>,
},
}

View File

@ -22,7 +22,7 @@ use mercurial_types::{
NULL_CSID, NULL_HASH,
};
use metaconfig::LfsParams;
use metaconfig_types::LfsParams;
use tracing::Traced;
use errors::*;

View File

@ -46,7 +46,7 @@ extern crate hooks;
extern crate mercurial;
extern crate mercurial_bundles;
extern crate mercurial_types;
extern crate metaconfig;
extern crate metaconfig_types;
extern crate mononoke_types;
extern crate phases;
extern crate reachabilityindex;

View File

@ -9,8 +9,8 @@ use blobstore::{Blobstore, PrefixBlobstore};
use client::streaming_clone::SqlStreamingChunksFetcher;
use errors::*;
use hooks::HookManager;
use metaconfig::repoconfig::RepoReadOnly;
use metaconfig::{LfsParams, PushrebaseParams};
use metaconfig_types::RepoReadOnly;
use metaconfig_types::{LfsParams, PushrebaseParams};
use mononoke_types::RepositoryId;
use std::fmt::{self, Debug};
use std::sync::Arc;

View File

@ -47,7 +47,7 @@ extern crate uuid;
extern crate cache_warmup;
extern crate hgproto;
extern crate hooks;
extern crate metaconfig;
extern crate metaconfig_types;
extern crate mononoke_types;
extern crate phases;
extern crate reachabilityindex;
@ -68,7 +68,7 @@ use openssl::ssl::SslAcceptor;
use slog::Logger;
use std::sync::atomic::AtomicBool;
use metaconfig::repoconfig::RepoConfig;
use metaconfig_types::RepoConfig;
use connection_acceptor::connection_acceptor;
use errors::*;

View File

@ -21,7 +21,7 @@ use blobstore::Blobstore;
use cache_warmup::cache_warmup;
use context::CoreContext;
use hooks::{hook_loader::load_hooks, HookManager};
use metaconfig::repoconfig::{RepoConfig, RepoType};
use metaconfig_types::{RepoConfig, RepoType};
use mononoke_types::RepositoryId;
use phases::{CachingHintPhases, HintPhases, Phases, SqlConstructors, SqlPhases};
use reachabilityindex::{deserialize_skiplist_map, LeastCommonAncestorsHint, SkiplistIndex};

View File

@ -31,7 +31,8 @@ extern crate cachelib;
extern crate cmdlib;
extern crate context;
extern crate mercurial_types;
extern crate metaconfig;
extern crate metaconfig_parser;
extern crate metaconfig_types;
extern crate panichandler;
extern crate ready_state;
extern crate repo_listener;
@ -50,7 +51,7 @@ use slog_glog_fmt::{kv_categorizer, kv_defaults, GlogFormat};
use slog_logview::LogViewDrain;
use tokio::runtime::Runtime;
use metaconfig::RepoConfigs;
use metaconfig_parser::RepoConfigs;
mod errors {
pub use failure::{Error, Result};