mononoke: bump the timeouts for getpack

Summary:
There were a few instances of timed out getpack requests on ovrsource.
Example: https://fburl.com/sandcastle/yylr1w3v
Let's bump the timeout to unblock them.

Reviewed By: krallin

Differential Revision: D23900374

fbshipit-source-id: 3ee6e2d4f6b6ed12cd0c1516c686a03c87fa7cb4
This commit is contained in:
Stanislau Hlebik 2020-09-24 06:21:51 -07:00 committed by Facebook GitHub Bot
parent 7026030bb2
commit f6d3fc1fd7
3 changed files with 66 additions and 25 deletions

View File

@ -201,19 +201,45 @@ where
}
lazy_static! {
static ref TIMEOUT: Duration = Duration::from_secs(15 * 60);
// Bookmarks taking a long time is unexpected and bad - limit them specially
static ref BOOKMARKS_TIMEOUT: Duration = Duration::from_secs(3 * 60);
// getbundle requests can be very slow for huge commits
static ref GETBUNDLE_TIMEOUT: Duration = Duration::from_secs(30 * 60);
// clone requests can be rather long. Let's bump the timeout
static ref CLONE_TIMEOUT: Duration = Duration::from_secs(4 * 60 * 60);
// getfiles requests can be rather long. Let's bump the timeout
static ref GETPACK_TIMEOUT: Duration = Duration::from_secs(90 * 60);
static ref LOAD_LIMIT_TIMEFRAME: Duration = Duration::from_secs(1);
static ref SLOW_REQUEST_THRESHOLD: Duration = Duration::from_secs(1);
}
fn clone_timeout() -> Duration {
let timeout = tunables().get_repo_client_clone_timeout_secs();
if timeout > 0 {
Duration::from_secs(timeout as u64)
} else {
Duration::from_secs(4 * 60 * 60)
}
}
fn default_timeout() -> Duration {
let timeout = tunables().get_repo_client_default_timeout_secs();
if timeout > 0 {
Duration::from_secs(timeout as u64)
} else {
Duration::from_secs(15 * 60)
}
}
fn getbundle_timeout() -> Duration {
let timeout = tunables().get_repo_client_getbundle_timeout_secs();
if timeout > 0 {
Duration::from_secs(timeout as u64)
} else {
Duration::from_secs(30 * 60)
}
}
fn getpack_timeout() -> Duration {
let timeout = tunables().get_repo_client_getpack_timeout_secs();
if timeout > 0 {
Duration::from_secs(timeout as u64)
} else {
Duration::from_secs(5 * 60 * 60)
}
}
pub(crate) fn process_timeout_error(err: TimeoutError<Error>) -> Error {
match err.into_inner() {
Some(err) => err,
@ -738,7 +764,7 @@ impl RepoClient {
};
let s = s
.buffered_weight_limited(params)
.whole_stream_timeout(*GETPACK_TIMEOUT)
.whole_stream_timeout(getpack_timeout())
.map_err(process_stream_timeout_error)
.map({
cloned!(ctx);
@ -1068,7 +1094,7 @@ impl HgCommands for RepoClient {
}
})
.collect()
.timeout(*TIMEOUT)
.timeout(default_timeout())
.map_err(process_timeout_error)
.traced(self.session.trace(), ops::BETWEEN, trace_args!())
.timed(move |stats, _| {
@ -1104,7 +1130,7 @@ impl HgCommands for RepoClient {
}
future_old::ok(hostname)
.timeout(*TIMEOUT)
.timeout(default_timeout())
.map_err(process_timeout_error)
.traced(self.session.trace(), ops::CLIENTTELEMETRY, trace_args!())
.timed(move |stats, _| {
@ -1123,7 +1149,7 @@ impl HgCommands for RepoClient {
// that points to it.
self.get_publishing_bookmarks_maybe_stale(ctx)
.map(|map| map.into_iter().map(|(_, hg_cs_id)| hg_cs_id).collect())
.timeout(*TIMEOUT)
.timeout(default_timeout())
.map_err(process_timeout_error)
.traced(self.session.trace(), ops::HEADS, trace_args!())
.timed(move |stats, _| {
@ -1286,7 +1312,7 @@ impl HgCommands for RepoClient {
}
.boxed()
.compat()
.timeout(*TIMEOUT)
.timeout(default_timeout())
.map_err(process_timeout_error)
.traced(self.session.trace(), ops::LOOKUP, trace_args!())
.timed(move |stats, _| {
@ -1335,7 +1361,7 @@ impl HgCommands for RepoClient {
.map(move |node| found_hg_changesets.contains(&node))
.collect::<Vec<_>>()
})
.timeout(*TIMEOUT)
.timeout(default_timeout())
.map_err(process_timeout_error)
.traced(self.session.trace(), ops::KNOWN, trace_args!())
.timed(move |stats, known_nodes| {
@ -1368,7 +1394,7 @@ impl HgCommands for RepoClient {
.map(move |node| hg_bcs_mapping.contains_key(&node))
.collect::<Vec<_>>()
})
.timeout(*TIMEOUT)
.timeout(default_timeout())
.map_err(process_timeout_error)
.traced(self.session.trace(), ops::KNOWNNODES, trace_args!())
.timed(move |stats, known_nodes| {
@ -1399,7 +1425,7 @@ impl HgCommands for RepoClient {
let s = self
.create_bundle(ctx.clone(), args)
.whole_stream_timeout(*GETBUNDLE_TIMEOUT)
.whole_stream_timeout(getbundle_timeout())
.map_err(process_stream_timeout_error)
.traced(self.session.trace(), ops::GETBUNDLE, trace_args!())
.timed(move |stats, _| {
@ -1428,7 +1454,7 @@ impl HgCommands for RepoClient {
res.insert("capabilities".to_string(), caps);
future_old::ok(res)
.timeout(*TIMEOUT)
.timeout(default_timeout())
.map_err(process_timeout_error)
.traced(self.session.trace(), ops::HELLO, trace_args!())
.timed(move |stats, _| {
@ -1518,7 +1544,7 @@ impl HgCommands for RepoClient {
stream::futures_unordered(queries)
.concat2()
.map(|bookmarks| bookmarks.into_iter().collect())
.timeout(*TIMEOUT)
.timeout(default_timeout())
.map_err(process_timeout_error)
.traced(self.session.trace(), ops::LISTKEYS, trace_args!())
.timed(move |stats, _| {
@ -1727,7 +1753,7 @@ impl HgCommands for RepoClient {
})
.map(bytes_ext::copy_from_new)
.from_err()
.timeout(*TIMEOUT)
.timeout(default_timeout())
.map_err(process_timeout_error)
.inspect(move |_| STATS::push_success.add_value(1, (reponame,)))
.traced(&trace, ops::UNBUNDLE, trace_args!())
@ -1774,7 +1800,7 @@ impl HgCommands for RepoClient {
let s = self
.gettreepack_untimed(ctx.clone(), params)
.whole_stream_timeout(*TIMEOUT)
.whole_stream_timeout(default_timeout())
.map_err(process_stream_timeout_error)
.traced(self.session.trace(), ops::GETTREEPACK, trace_args!())
.inspect({
@ -1929,7 +1955,7 @@ impl HgCommands for RepoClient {
}
})
.flatten_stream()
.whole_stream_timeout(*CLONE_TIMEOUT)
.whole_stream_timeout(clone_timeout())
.map(bytes_ext::copy_from_new)
.map_err(process_stream_timeout_error)
.timed({
@ -1997,7 +2023,7 @@ impl HgCommands for RepoClient {
}
})
.buffered(100)
.whole_stream_timeout(*TIMEOUT)
.whole_stream_timeout(default_timeout())
.map_err(process_stream_timeout_error)
.inspect({
cloned!(ctx);

View File

@ -5,7 +5,7 @@
* GNU General Public License version 2.
*/
use super::{process_timeout_error, BOOKMARKS_TIMEOUT};
use super::process_timeout_error;
use anyhow::Error;
use blobrepo::BlobRepo;
use blobrepo_hg::{to_hg_bookmark_stream, BlobRepoHg};
@ -17,6 +17,7 @@ use mercurial_types::HgChangesetId;
use scuba_ext::ScubaSampleBuilderExt;
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use std::time::Duration;
use tokio_old::util::FutureExt as TokioFutureExt;
use tunables::tunables;
use warm_bookmarks_cache::WarmBookmarksCache;
@ -29,6 +30,15 @@ pub struct SessionBookmarkCache {
maybe_warm_bookmarks_cache: Option<Arc<WarmBookmarksCache>>,
}
fn bookmarks_timeout() -> Duration {
let timeout = tunables().get_repo_client_bookmarks_timeout_secs();
if timeout > 0 {
Duration::from_secs(timeout as u64)
} else {
Duration::from_secs(3 * 60)
}
}
impl SessionBookmarkCache {
pub fn new(
repo: BlobRepo,
@ -133,7 +143,7 @@ impl SessionBookmarkCache {
let ret: Result<_, Error> = Ok(map);
ret
})
.timeout(*BOOKMARKS_TIMEOUT)
.timeout(bookmarks_timeout())
.map_err(process_timeout_error)
}
}

View File

@ -90,6 +90,11 @@ pub struct MononokeTunables {
getbundle_use_low_gen_optimization: AtomicBool,
getbundle_low_gen_num_threshold: AtomicI64,
getbundle_low_gen_optimization_max_traversal_limit: AtomicI64,
repo_client_bookmarks_timeout_secs: AtomicI64,
repo_client_clone_timeout_secs: AtomicI64,
repo_client_default_timeout_secs: AtomicI64,
repo_client_getbundle_timeout_secs: AtomicI64,
repo_client_getpack_timeout_secs: AtomicI64,
}
fn log_tunables(tunables: &TunablesStruct) -> String {