2019-10-11 23:51:17 +03:00
|
|
|
/*
|
|
|
|
* Copyright (c) Facebook, Inc. and its affiliates.
|
|
|
|
*
|
|
|
|
* This software may be used and distributed according to the terms of the
|
2020-02-11 13:42:43 +03:00
|
|
|
* GNU General Public License version 2.
|
2019-10-11 23:51:17 +03:00
|
|
|
*/
|
2019-02-11 17:13:24 +03:00
|
|
|
|
2019-05-09 19:55:04 +03:00
|
|
|
use std::{sync::Arc, time::Instant};
|
2019-02-11 17:13:24 +03:00
|
|
|
|
2019-12-07 03:26:57 +03:00
|
|
|
use anyhow::{bail, format_err, Error};
|
2019-02-11 17:13:24 +03:00
|
|
|
use clap::Arg;
|
|
|
|
use cloned::cloned;
|
2019-09-14 06:16:08 +03:00
|
|
|
use fbinit::FacebookInit;
|
2019-02-11 17:13:24 +03:00
|
|
|
use futures::{future, stream::Stream, Future, IntoFuture};
|
|
|
|
use futures_ext::FutureExt;
|
2020-01-22 03:10:07 +03:00
|
|
|
use futures_preview::compat::Future01CompatExt;
|
2019-02-11 17:13:24 +03:00
|
|
|
use serde_derive::{Deserialize, Serialize};
|
2020-01-14 01:09:38 +03:00
|
|
|
use tokio_compat::runtime;
|
2019-02-11 17:13:24 +03:00
|
|
|
|
|
|
|
use blobstore::Blobstore;
|
|
|
|
use blobstore_sync_queue::{BlobstoreSyncQueue, BlobstoreSyncQueueEntry, SqlBlobstoreSyncQueue};
|
|
|
|
use cmdlib::args;
|
|
|
|
use context::CoreContext;
|
|
|
|
use manifoldblob::{ManifoldRange, ThriftManifoldBlob};
|
2020-02-10 15:35:56 +03:00
|
|
|
use metaconfig_types::{BlobConfig, BlobstoreId, MetadataDBConfig, MultiplexId, StorageConfig};
|
2019-02-11 17:13:24 +03:00
|
|
|
use mononoke_types::{BlobstoreBytes, DateTime, RepositoryId};
|
2019-12-17 14:55:40 +03:00
|
|
|
use sql_ext::{MysqlOptions, SqlConstructors};
|
2019-02-11 17:13:24 +03:00
|
|
|
|
|
|
|
/// Save manifold continuation token each once per `PRESERVE_STATE_RATIO` entries
|
|
|
|
const PRESERVE_STATE_RATIO: usize = 10_000;
|
2019-07-17 19:31:16 +03:00
|
|
|
/// PRESERVE_STATE_RATIO should be divisible by CHUNK_SIZE as otherwise progress
|
|
|
|
/// reporting will be broken
|
2019-07-18 15:03:45 +03:00
|
|
|
const CHUNK_SIZE: usize = 5000;
|
2019-07-17 19:31:16 +03:00
|
|
|
const INIT_COUNT_VALUE: usize = 0;
|
2019-02-11 17:13:24 +03:00
|
|
|
|
2019-07-25 17:54:38 +03:00
|
|
|
const FLAT_NAMESPACE_PREFIX: &str = "flat/";
|
|
|
|
|
2019-05-09 19:55:04 +03:00
|
|
|
#[derive(Debug)]
|
|
|
|
struct ManifoldArgs {
|
|
|
|
bucket: String,
|
|
|
|
prefix: String,
|
|
|
|
}
|
|
|
|
|
2019-02-11 17:13:24 +03:00
|
|
|
/// Configuration options
|
|
|
|
#[derive(Debug)]
|
|
|
|
struct Config {
|
|
|
|
db_address: String,
|
|
|
|
myrouter_port: u16,
|
|
|
|
manifold_args: ManifoldArgs,
|
|
|
|
repo_id: RepositoryId,
|
|
|
|
src_blobstore_id: BlobstoreId,
|
|
|
|
dst_blobstore_id: BlobstoreId,
|
2020-02-10 15:35:56 +03:00
|
|
|
multiplex_id: MultiplexId,
|
2019-02-11 17:13:24 +03:00
|
|
|
start_key: Option<String>,
|
|
|
|
end_key: Option<String>,
|
|
|
|
ctx: CoreContext,
|
|
|
|
state_key: Option<String>,
|
|
|
|
dry_run: bool,
|
|
|
|
started_at: Instant,
|
2019-11-13 16:29:49 +03:00
|
|
|
readonly_storage: bool,
|
2019-02-11 17:13:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// State used to resume iteration in case of restart
|
|
|
|
#[derive(Debug, Clone)]
|
|
|
|
struct State {
|
|
|
|
count: usize,
|
|
|
|
init_range: Arc<ManifoldRange>,
|
|
|
|
current_range: Arc<ManifoldRange>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl State {
|
|
|
|
fn from_init(init_range: Arc<ManifoldRange>) -> Self {
|
|
|
|
Self {
|
2019-07-17 19:31:16 +03:00
|
|
|
count: INIT_COUNT_VALUE,
|
2019-02-11 17:13:24 +03:00
|
|
|
current_range: init_range.clone(),
|
|
|
|
init_range,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-17 19:31:16 +03:00
|
|
|
fn with_current_many(self, current_range: Arc<ManifoldRange>, num: usize) -> Self {
|
2019-02-11 17:13:24 +03:00
|
|
|
let State {
|
|
|
|
count, init_range, ..
|
|
|
|
} = self;
|
|
|
|
Self {
|
2019-07-17 19:31:16 +03:00
|
|
|
count: count + num,
|
2019-02-11 17:13:24 +03:00
|
|
|
init_range,
|
|
|
|
current_range,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Serialize, Deserialize)]
|
|
|
|
struct StateSerde {
|
|
|
|
init_range: ManifoldRange,
|
|
|
|
current_range: ManifoldRange,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl From<StateSerde> for State {
|
|
|
|
fn from(state: StateSerde) -> Self {
|
|
|
|
Self {
|
2019-07-17 19:31:16 +03:00
|
|
|
count: INIT_COUNT_VALUE,
|
2019-02-11 17:13:24 +03:00
|
|
|
init_range: Arc::new(state.init_range),
|
|
|
|
current_range: Arc::new(state.current_range),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a> From<&'a State> for StateSerde {
|
|
|
|
fn from(state: &'a State) -> Self {
|
|
|
|
Self {
|
|
|
|
init_range: (*state.init_range).clone(),
|
|
|
|
current_range: (*state.current_range).clone(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-14 06:16:08 +03:00
|
|
|
fn parse_args(fb: FacebookInit) -> Result<Config, Error> {
|
2019-10-28 17:57:34 +03:00
|
|
|
let app = args::MononokeApp::new("populate healer queue")
|
|
|
|
.build()
|
|
|
|
.version("0.0.0")
|
|
|
|
.about("Populate blobstore queue from existing manifold bucket")
|
|
|
|
.arg(
|
|
|
|
Arg::with_name("storage-id")
|
|
|
|
.long("storage-id")
|
|
|
|
.short("S")
|
|
|
|
.takes_value(true)
|
|
|
|
.value_name("STORAGEID")
|
|
|
|
.help("Storage identifier"),
|
|
|
|
)
|
|
|
|
.arg(
|
|
|
|
Arg::with_name("source-blobstore-id")
|
|
|
|
.long("source-blobstore-id")
|
|
|
|
.short("s")
|
|
|
|
.takes_value(true)
|
|
|
|
.value_name("SOURCE")
|
|
|
|
.help("source blobstore identifier"),
|
|
|
|
)
|
|
|
|
.arg(
|
|
|
|
Arg::with_name("destination-blobstore-id")
|
|
|
|
.long("destination-blobstore-id")
|
|
|
|
.short("d")
|
|
|
|
.takes_value(true)
|
|
|
|
.value_name("DESTINATION")
|
|
|
|
.help("destination blobstore identifier"),
|
|
|
|
)
|
|
|
|
.arg(
|
|
|
|
Arg::with_name("start-key")
|
|
|
|
.long("start-key")
|
|
|
|
.takes_value(true)
|
|
|
|
.value_name("START_KEY")
|
|
|
|
.help("if specified iteration will start from this key"),
|
|
|
|
)
|
|
|
|
.arg(
|
|
|
|
Arg::with_name("end-key")
|
|
|
|
.long("end-key")
|
|
|
|
.takes_value(true)
|
|
|
|
.value_name("END_KEY")
|
|
|
|
.help("if specified iteration will end at this key"),
|
|
|
|
)
|
|
|
|
.arg(
|
|
|
|
Arg::with_name("resume-state-key")
|
|
|
|
.long("resume-state-key")
|
|
|
|
.takes_value(true)
|
|
|
|
.value_name("STATE_MANIFOLD_KEY")
|
|
|
|
.help(
|
|
|
|
"manifold key which contains current iteration state and can be used to resume",
|
|
|
|
),
|
|
|
|
)
|
|
|
|
.arg(
|
|
|
|
Arg::with_name("dry-run")
|
|
|
|
.long("dry-run")
|
|
|
|
.help("do not add entries to a queue"),
|
|
|
|
);
|
2019-02-11 17:13:24 +03:00
|
|
|
|
|
|
|
let matches = app.get_matches();
|
2019-12-16 19:12:13 +03:00
|
|
|
let repo_id = args::get_repo_id(fb, &matches)?;
|
2019-10-13 10:44:52 +03:00
|
|
|
let logger = args::init_logging(fb, &matches);
|
2019-09-14 06:16:08 +03:00
|
|
|
let ctx = CoreContext::new_with_logger(fb, logger.clone());
|
2019-02-11 17:13:24 +03:00
|
|
|
|
2019-08-05 13:46:26 +03:00
|
|
|
let storage_id = matches
|
|
|
|
.value_of("storage-id")
|
2019-12-06 23:51:47 +03:00
|
|
|
.ok_or(Error::msg("`storage-id` argument required"))?;
|
2019-08-05 13:46:26 +03:00
|
|
|
|
2019-12-16 19:12:13 +03:00
|
|
|
let storage_config = args::read_storage_configs(fb, &matches)?
|
2019-08-05 13:46:26 +03:00
|
|
|
.remove(storage_id)
|
2019-12-06 23:51:47 +03:00
|
|
|
.ok_or(Error::msg("Unknown `storage-id`"))?;
|
2019-02-11 17:13:24 +03:00
|
|
|
|
|
|
|
let src_blobstore_id = matches
|
|
|
|
.value_of("source-blobstore-id")
|
2019-12-06 23:51:47 +03:00
|
|
|
.ok_or(Error::msg("`source-blobstore-id` argument is required"))
|
2019-02-11 17:13:24 +03:00
|
|
|
.and_then(|src| src.parse::<u64>().map_err(Error::from))
|
|
|
|
.map(BlobstoreId::new)?;
|
|
|
|
let dst_blobstore_id = matches
|
|
|
|
.value_of("destination-blobstore-id")
|
2019-12-06 23:51:47 +03:00
|
|
|
.ok_or(Error::msg(
|
|
|
|
"`destination-blobstore-id` argument is required",
|
|
|
|
))
|
2019-02-11 17:13:24 +03:00
|
|
|
.and_then(|dst| dst.parse::<u64>().map_err(Error::from))
|
|
|
|
.map(BlobstoreId::new)?;
|
|
|
|
if src_blobstore_id == dst_blobstore_id {
|
2019-12-06 23:51:47 +03:00
|
|
|
bail!("`source-blobstore-id` and `destination-blobstore-id` can not be equal");
|
2019-02-11 17:13:24 +03:00
|
|
|
}
|
|
|
|
|
2020-02-10 15:35:56 +03:00
|
|
|
let (blobstores, multiplex_id, db_address) = match storage_config {
|
2019-05-09 19:55:04 +03:00
|
|
|
StorageConfig {
|
|
|
|
dbconfig: MetadataDBConfig::Mysql { db_address, .. },
|
2020-02-10 15:35:56 +03:00
|
|
|
blobstore:
|
|
|
|
BlobConfig::Multiplexed {
|
|
|
|
blobstores,
|
|
|
|
multiplex_id,
|
|
|
|
..
|
|
|
|
},
|
|
|
|
} => (blobstores, multiplex_id, db_address),
|
2019-05-09 19:55:04 +03:00
|
|
|
storage => return Err(format_err!("unsupported storage: {:?}", storage)),
|
2019-02-11 17:13:24 +03:00
|
|
|
};
|
2019-05-09 19:55:04 +03:00
|
|
|
let manifold_args = blobstores
|
|
|
|
.iter()
|
|
|
|
.filter(|(id, _)| src_blobstore_id == *id)
|
|
|
|
.map(|(_, args)| args)
|
|
|
|
.next()
|
|
|
|
.ok_or(format_err!(
|
|
|
|
"failed to find source blobstore id: {:?}",
|
|
|
|
src_blobstore_id,
|
|
|
|
))
|
|
|
|
.and_then(|args| match args {
|
|
|
|
BlobConfig::Manifold { bucket, prefix } => Ok(ManifoldArgs {
|
|
|
|
bucket: bucket.clone(),
|
|
|
|
prefix: prefix.clone(),
|
2019-02-11 17:13:24 +03:00
|
|
|
}),
|
2019-12-06 23:51:47 +03:00
|
|
|
_ => bail!("source blobstore must be a manifold"),
|
2019-05-09 19:55:04 +03:00
|
|
|
})?;
|
2019-02-11 17:13:24 +03:00
|
|
|
|
2019-12-17 14:55:40 +03:00
|
|
|
let myrouter_port = args::parse_mysql_options(&matches)
|
|
|
|
.myrouter_port
|
|
|
|
.ok_or(Error::msg("myrouter-port must be specified"))?;
|
2019-02-11 17:13:24 +03:00
|
|
|
|
2019-11-13 16:29:49 +03:00
|
|
|
let readonly_storage = args::parse_readonly_storage(&matches);
|
|
|
|
|
2019-02-11 17:13:24 +03:00
|
|
|
Ok(Config {
|
|
|
|
repo_id,
|
|
|
|
db_address: db_address.clone(),
|
|
|
|
myrouter_port,
|
|
|
|
manifold_args,
|
|
|
|
src_blobstore_id,
|
|
|
|
dst_blobstore_id,
|
2020-02-10 15:35:56 +03:00
|
|
|
multiplex_id,
|
2019-02-11 17:13:24 +03:00
|
|
|
start_key: matches.value_of("start-key").map(String::from),
|
|
|
|
end_key: matches.value_of("end-key").map(String::from),
|
|
|
|
state_key: matches.value_of("resume-state-key").map(String::from),
|
mononoke: connect stdlog and slog
Summary:
This wires up the stdlog crate with our slog output. The upshot is that we can
now run binaries with `RUST_LOG` set and expect it to work.
This is nice because many crates use stdlog (e.g. Tokio, Hyper), so this is
convenient to get access to their logging. For example, if you run with
`RUST_LOG=gotham=info,hyper=debug`, then you get debug logs from Hyper and info
logs from Gotham.
The way this works is by registering a stdlog logger that uses the env_logger's
filter (the one that "invented" `RUST_LOG`) to filter logs, and routes them to
slog if they pass. Note that the slog Logger used there doesn't do any
filtering, since we already do it before sending logs there.
One thing to keep in mind is that we should only register the stdlog global
logger once. I've renamed `get_logger` to `init_logging` to make this clearer.
This behavior is similar to what we do with `init_cachelib`. I've updated
callsites accordingly.
Note that we explicitly tell the stdlog framework to ignore anything that we
won't consider for logging. If you don't set `RUST_LOG`, then the default
logging level is `Error`, which means that anything below error that is sent to
stdlog won't even get to out filtering logic (the stdlog macros for logging
check for the global level before actually logging), so this is cheap unless
you do set `RUST_LOG`.
As part of this, I've also updated all our binaries (and therefore, tests) to
use glog for logging. We had been meaning to do this, and it was convenient to
do it here because the other logger factory we were using didn't make it easy
to get a Drain without putting it a Logger.
Reviewed By: ahornby
Differential Revision: D17314200
fbshipit-source-id: 19b5e8edc3bbe7ba02ccec4f1852dc3587373fff
2019-09-12 14:11:54 +03:00
|
|
|
ctx,
|
2019-02-11 17:13:24 +03:00
|
|
|
dry_run: matches.is_present("dry-run"),
|
|
|
|
started_at: Instant::now(),
|
2019-11-13 16:29:49 +03:00
|
|
|
readonly_storage: readonly_storage.0,
|
2019-02-11 17:13:24 +03:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_resume_state(
|
|
|
|
manifold: &ThriftManifoldBlob,
|
|
|
|
config: &Config,
|
|
|
|
) -> impl Future<Item = State, Error = Error> {
|
|
|
|
let resume_state = match &config.state_key {
|
|
|
|
Some(state_key) => manifold
|
|
|
|
.get(config.ctx.clone(), state_key.clone())
|
|
|
|
.map(|data| {
|
|
|
|
data.and_then(|data| serde_json::from_slice::<StateSerde>(&*data.into_bytes()).ok())
|
|
|
|
.map(State::from)
|
|
|
|
})
|
|
|
|
.left_future(),
|
|
|
|
None => future::ok(None).right_future(),
|
|
|
|
};
|
|
|
|
|
|
|
|
let init_state = {
|
|
|
|
let start = format!(
|
2019-07-25 17:54:38 +03:00
|
|
|
"{}repo{:04}.{}",
|
|
|
|
FLAT_NAMESPACE_PREFIX,
|
2019-02-11 17:13:24 +03:00
|
|
|
config.repo_id.id(),
|
|
|
|
config.start_key.clone().unwrap_or_else(|| "".to_string())
|
|
|
|
);
|
|
|
|
let end = format!(
|
2019-07-25 17:54:38 +03:00
|
|
|
"{}repo{:04}.{}",
|
|
|
|
FLAT_NAMESPACE_PREFIX,
|
2019-02-11 17:13:24 +03:00
|
|
|
config.repo_id.id(),
|
|
|
|
config.end_key.clone().unwrap_or_else(|| "\x7f".to_string()),
|
|
|
|
);
|
|
|
|
State::from_init(Arc::new(ManifoldRange::from(start..end)))
|
|
|
|
};
|
|
|
|
|
|
|
|
resume_state.map(move |resume_state| match resume_state {
|
|
|
|
None => init_state,
|
|
|
|
// if initial_state mismatch, start from provided initial state
|
|
|
|
Some(ref resume_state) if resume_state.init_range != init_state.init_range => init_state,
|
|
|
|
Some(resume_state) => resume_state,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
fn put_resume_state(
|
|
|
|
manifold: &ThriftManifoldBlob,
|
|
|
|
config: &Config,
|
|
|
|
state: State,
|
|
|
|
) -> impl Future<Item = State, Error = Error> {
|
|
|
|
match &config.state_key {
|
2019-07-17 19:31:16 +03:00
|
|
|
Some(state_key) if state.count % PRESERVE_STATE_RATIO == INIT_COUNT_VALUE => {
|
2019-02-11 17:13:24 +03:00
|
|
|
let started_at = config.started_at;
|
|
|
|
let ctx = config.ctx.clone();
|
|
|
|
cloned!(state_key, manifold);
|
|
|
|
serde_json::to_vec(&StateSerde::from(&state))
|
|
|
|
.map(|state_json| BlobstoreBytes::from_bytes(state_json))
|
|
|
|
.map_err(Error::from)
|
|
|
|
.into_future()
|
|
|
|
.and_then(move |state_data| manifold.put(ctx, state_key, state_data))
|
|
|
|
.map(move |_| {
|
|
|
|
if termion::is_tty(&std::io::stderr()) {
|
|
|
|
let elapsed = started_at.elapsed().as_secs() as f64;
|
|
|
|
let count = state.count as f64;
|
2019-07-17 19:31:16 +03:00
|
|
|
eprintln!(
|
|
|
|
"Keys processed: {:.0} speed: {:.2}/s",
|
2019-02-11 17:13:24 +03:00
|
|
|
count,
|
|
|
|
count / elapsed
|
|
|
|
);
|
|
|
|
}
|
|
|
|
state
|
|
|
|
})
|
|
|
|
.left_future()
|
|
|
|
}
|
|
|
|
_ => future::ok(state).right_future(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn populate_healer_queue(
|
|
|
|
manifold: ThriftManifoldBlob,
|
|
|
|
queue: Arc<dyn BlobstoreSyncQueue>,
|
|
|
|
config: Arc<Config>,
|
|
|
|
) -> impl Future<Item = State, Error = Error> {
|
|
|
|
get_resume_state(&manifold, &config).and_then(move |state| {
|
|
|
|
manifold
|
|
|
|
.enumerate((*state.current_range).clone())
|
2019-07-25 17:54:38 +03:00
|
|
|
.and_then(|mut entry| {
|
|
|
|
// We are enumerating Manifold's flat/ namespace
|
|
|
|
// and all the keys contain the flat/ prefix, which
|
|
|
|
// we need to strip
|
|
|
|
if !entry.key.starts_with(FLAT_NAMESPACE_PREFIX) {
|
|
|
|
future::err(format_err!(
|
|
|
|
"Key {} is expected to start with {}, but does not",
|
|
|
|
entry.key,
|
|
|
|
FLAT_NAMESPACE_PREFIX
|
|
|
|
))
|
|
|
|
} else {
|
|
|
|
// safe to unwrap here, since we know exactly how the string starts
|
|
|
|
entry.key = entry.key.get(FLAT_NAMESPACE_PREFIX.len()..).unwrap().into();
|
|
|
|
future::ok(entry)
|
|
|
|
}
|
|
|
|
})
|
2019-07-17 19:31:16 +03:00
|
|
|
.chunks(CHUNK_SIZE)
|
|
|
|
.fold(state, move |state, entries| {
|
|
|
|
let range = entries[0].range.clone();
|
|
|
|
let state = state.with_current_many(range, entries.len());
|
|
|
|
let src_blobstore_id = config.src_blobstore_id;
|
2020-02-10 15:35:56 +03:00
|
|
|
let multiplex_id = config.multiplex_id;
|
2019-02-11 17:13:24 +03:00
|
|
|
|
|
|
|
let enqueue = if config.dry_run {
|
2019-05-08 16:30:54 +03:00
|
|
|
future::ok(()).left_future()
|
2019-02-11 17:13:24 +03:00
|
|
|
} else {
|
2019-07-17 19:31:16 +03:00
|
|
|
let iterator_box = Box::new(entries.into_iter().map(move |entry| {
|
2020-02-10 15:35:56 +03:00
|
|
|
BlobstoreSyncQueueEntry::new(
|
|
|
|
entry.key,
|
|
|
|
src_blobstore_id,
|
|
|
|
multiplex_id,
|
|
|
|
DateTime::now(),
|
|
|
|
)
|
2019-07-17 19:31:16 +03:00
|
|
|
}));
|
|
|
|
queue
|
|
|
|
.add_many(config.ctx.clone(), iterator_box)
|
2019-02-11 17:13:24 +03:00
|
|
|
.right_future()
|
|
|
|
};
|
|
|
|
|
|
|
|
enqueue.and_then({
|
|
|
|
cloned!(manifold, config);
|
|
|
|
move |_| put_resume_state(&manifold, &config, state)
|
|
|
|
})
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-09-14 06:16:08 +03:00
|
|
|
#[fbinit::main]
|
|
|
|
fn main(fb: FacebookInit) -> Result<(), Error> {
|
|
|
|
let config = Arc::new(parse_args(fb)?);
|
2020-01-20 18:01:27 +03:00
|
|
|
let manifold =
|
|
|
|
ThriftManifoldBlob::new(fb, config.manifold_args.bucket.clone(), None)?.into_inner();
|
2019-02-11 17:13:24 +03:00
|
|
|
let queue: Arc<dyn BlobstoreSyncQueue> = Arc::new(SqlBlobstoreSyncQueue::with_myrouter(
|
2019-07-17 22:53:39 +03:00
|
|
|
config.db_address.clone(),
|
2019-02-11 17:13:24 +03:00
|
|
|
config.myrouter_port,
|
2019-12-17 14:55:40 +03:00
|
|
|
MysqlOptions::default().myrouter_read_service_type(),
|
2019-11-13 16:29:49 +03:00
|
|
|
config.readonly_storage,
|
2019-02-11 17:13:24 +03:00
|
|
|
));
|
|
|
|
let mut runtime = runtime::Runtime::new()?;
|
2020-01-22 03:10:07 +03:00
|
|
|
runtime.block_on_std(populate_healer_queue(manifold, queue, config).compat())?;
|
2019-02-11 17:13:24 +03:00
|
|
|
Ok(())
|
|
|
|
}
|