mirror of
https://github.com/facebook/sapling.git
synced 2024-10-11 01:07:15 +03:00
640f272598
Summary: Migrate the configuration of sql data managers from the old configuration using `sql_ext::SqlConstructors` to the new configuration using `sql_construct::SqlConstruct`. In the old configuration, sharded filenodes were included in the configuration of remote databases, even when that made no sense: ``` [storage.db.remote] db_address = "main_database" sharded_filenodes = { shard_map = "sharded_database", shard_num = 100 } [storage.blobstore.multiplexed] queue_db = { remote = { db_address = "queue_database", sharded_filenodes = { shard_map = "valid_config_but_meaningless", shard_num = 100 } } ``` This change separates out: * **DatabaseConfig**, which describes a single local or remote connection to a database, used in configuration like the queue database. * **MetadataDatabaseConfig**, which describes the multiple databases used for repo metadata. **MetadataDatabaseConfig** is either: * **Local**, which is a local sqlite database, the same as for **DatabaseConfig**; or * **Remote**, which contains: * `primary`, the database used for main metadata. * `filenodes`, the database used for filenodes, which may be sharded or unsharded. More fields can be added to **RemoteMetadataDatabaseConfig** when we want to add new databases. New configuration looks like: ``` [storage.metadata.remote] primary = { db_address = "main_database" } filenodes = { sharded = { shard_map = "sharded_database", shard_num = 100 } } [storage.blobstore.multiplexed] queue_db = { remote = { db_address = "queue_database" } } ``` The `sql_construct` crate facilitates this by providing the following traits: * **SqlConstruct** defines the basic rules for construction, and allows construction based on a local sqlite database. * **SqlShardedConstruct** defines the basic rules for construction based on sharded databases. * **FbSqlConstruct** and **FbShardedSqlConstruct** allow construction based on unsharded and sharded remote databases on Facebook infra. * **SqlConstructFromDatabaseConfig** allows construction based on the database defined in **DatabaseConfig**. * **SqlConstructFromMetadataDatabaseConfig** allows construction based on the appropriate database defined in **MetadataDatabaseConfig**. * **SqlShardableConstructFromMetadataDatabaseConfig** allows construction based on the appropriate shardable databases defined in **MetadataDatabaseConfig**. Sql database managers should implement: * **SqlConstruct** in order to define how to construct an unsharded instance from a single set of `SqlConnections`. * **SqlShardedConstruct**, if they are shardable, in order to define how to construct a sharded instance. * If the database is part of the repository metadata database config, either of: * **SqlConstructFromMetadataDatabaseConfig** if they are not shardable. By default they will use the primary metadata database, but this can be overridden by implementing `remote_database_config`. * **SqlShardableConstructFromMetadataDatabaseConfig** if they are shardable. They must implement `remote_database_config` to specify where to get the sharded or unsharded configuration from. Reviewed By: StanislavGlebik Differential Revision: D20734883 fbshipit-source-id: bb2f4cb3806edad2bbd54a47558a164e3190c5d1
96 lines
2.7 KiB
Rust
96 lines
2.7 KiB
Rust
/*
|
|
* Copyright (c) Facebook, Inc. and its affiliates.
|
|
*
|
|
* This software may be used and distributed according to the terms of the
|
|
* GNU General Public License version 2.
|
|
*/
|
|
|
|
use anyhow::{bail, Result};
|
|
use fbinit::FacebookInit;
|
|
use itertools::Itertools;
|
|
use std::collections::BTreeMap;
|
|
|
|
use cmdlib::args;
|
|
|
|
#[fbinit::main]
|
|
fn main(fb: FacebookInit) -> Result<()> {
|
|
let matches = args::MononokeApp::new("Lint Mononoke config files")
|
|
.with_advanced_args_hidden()
|
|
.build()
|
|
.version("0.0.0")
|
|
.about("Check Mononoke server configs for syntax and sanity.")
|
|
.args_from_usage(
|
|
r#"
|
|
-q --quiet 'Only print errors'
|
|
-v --verbose 'Dump content of configs'
|
|
"#,
|
|
)
|
|
.get_matches();
|
|
|
|
let quiet = matches.is_present("quiet");
|
|
let verbose = matches.is_present("verbose");
|
|
|
|
// Most of the work is done here - this validates that the files are present,
|
|
// are correctly formed, and have the right fields (not too many, not too few).
|
|
let configs = match args::read_configs(fb, &matches) {
|
|
Err(err) => {
|
|
eprintln!("Error loading configs: {:#?}", err);
|
|
return Err(err);
|
|
}
|
|
Ok(configs) => configs,
|
|
};
|
|
|
|
if verbose {
|
|
println!("Configs:\n{:#?}", configs)
|
|
}
|
|
|
|
// Keep track of what repo ids we've seen
|
|
let mut repoids = BTreeMap::<_, Vec<_>>::new();
|
|
// Have we seen something suspect?
|
|
let mut bad = false;
|
|
|
|
for (name, config) in &configs.repos {
|
|
let (isbad, locality) = match (
|
|
config.storage_config.metadata.is_local(),
|
|
config.storage_config.blobstore.is_local(),
|
|
) {
|
|
(true, true) => (false, "local"),
|
|
(false, false) => (false, "remote"),
|
|
(true, false) => (true, "MIXED - local DB, remote blobstore"),
|
|
(false, true) => (true, "MIXED - remote DB, local blobstore"),
|
|
};
|
|
|
|
bad |= isbad;
|
|
|
|
repoids
|
|
.entry(config.repoid)
|
|
.and_modify(|names| names.push(name.as_str()))
|
|
.or_insert(vec![name.as_str()]);
|
|
|
|
if isbad || !quiet {
|
|
println!(
|
|
"Repo {}: {} - enabled: {:?} locality: {}",
|
|
config.repoid, name, config.enabled, locality
|
|
);
|
|
}
|
|
}
|
|
|
|
for (id, names) in repoids {
|
|
assert!(!names.is_empty());
|
|
if names.len() > 1 {
|
|
eprintln!(
|
|
"ERROR: Repo Id {} used for repos: {}",
|
|
id,
|
|
names.into_iter().join(", ")
|
|
);
|
|
bad = true;
|
|
}
|
|
}
|
|
|
|
if bad {
|
|
bail!("Anomaly detected")
|
|
} else {
|
|
Ok(())
|
|
}
|
|
}
|