mononoke: do not log JSON field extra_contents to scuba, log columns instead

Reviewed By: farnz

Differential Revision: D20500084

fbshipit-source-id: 296747780298fe81e4cca4cdb8af05548ba80169
This commit is contained in:
Kostia Balytskyi 2020-03-20 02:45:11 -07:00 committed by Facebook GitHub Bot
parent aab18f1a6f
commit 21f70ac6d1
2 changed files with 22 additions and 36 deletions

View File

@ -1121,7 +1121,7 @@ impl HgCommands for RepoClient {
// @wireprotocommand('known', 'nodes *'), but the '*' is ignored
fn known(&self, nodes: Vec<HgChangesetId>) -> HgCommandRes<Vec<bool>> {
let (ctx, mut command_logger) = self.start_command(ops::KNOWN);
let (ctx, command_logger) = self.start_command(ops::KNOWN);
let blobrepo = self.repo.blobrepo().clone();
@ -1164,12 +1164,12 @@ impl HgCommands for RepoClient {
.traced(self.session.trace(), ops::KNOWN, trace_args!())
.timed(move |stats, known_nodes| {
if let Ok(known) = known_nodes {
let extra_context = json!({
"num_known": known.len(),
"num_unknown": nodes_len - known.len(),
})
.to_string();
command_logger.add_scuba_extra("extra_context", extra_context);
ctx.perf_counters()
.add_to_counter(PerfCounterType::NumKnown, known.len() as i64);
ctx.perf_counters().add_to_counter(
PerfCounterType::NumUnknown,
(nodes_len - known.len()) as i64,
);
}
command_logger.without_wireproto().finalize_command(&stats);
Ok(())
@ -1178,7 +1178,7 @@ impl HgCommands for RepoClient {
}
fn knownnodes(&self, nodes: Vec<HgChangesetId>) -> HgCommandRes<Vec<bool>> {
let (ctx, mut command_logger) = self.start_command(ops::KNOWNNODES);
let (ctx, command_logger) = self.start_command(ops::KNOWNNODES);
let blobrepo = self.repo.blobrepo().clone();
@ -1198,12 +1198,12 @@ impl HgCommands for RepoClient {
.traced(self.session.trace(), ops::KNOWNNODES, trace_args!())
.timed(move |stats, known_nodes| {
if let Ok(known) = known_nodes {
let extra_context = json!({
"num_known": known.len(),
"num_unknown": nodes_len - known.len(),
})
.to_string();
command_logger.add_scuba_extra("extra_context", extra_context);
ctx.perf_counters()
.add_to_counter(PerfCounterType::NumKnown, known.len() as i64);
ctx.perf_counters().add_to_counter(
PerfCounterType::NumUnknown,
(nodes_len - known.len()) as i64,
);
}
command_logger.without_wireproto().finalize_command(&stats);
Ok(())

View File

@ -6,7 +6,6 @@
*/
use scuba_ext::ScubaSampleBuilder;
use std::collections::HashMap;
use std::sync::atomic::{AtomicI64, Ordering};
macro_rules! define_perf_counters {
@ -88,11 +87,14 @@ define_perf_counters! {
SqlWrites,
SumManifoldPollTime,
NullLinknode,
NumKnown,
NumUnknown,
}
}
impl PerfCounterType {
pub(crate) fn log_in_separate_column(&self) -> bool {
/// Whether to log the zero value of the counter
fn always_log(&self) -> bool {
use PerfCounterType::*;
match self {
@ -148,29 +150,13 @@ impl PerfCounters {
}
pub fn insert_perf_counters(&self, builder: &mut ScubaSampleBuilder) {
let mut extra = HashMap::new();
// NOTE: we log 0 to separate scuba columns mainly so that we can distinguish
// nulls i.e. "not logged" and 0 as in "zero calls to blobstore". Logging 0 allows
// counting avg, p50 etc statistic.
// However we do not log 0 in extras to save space
// NOTE: we log 0 mainly so that we can distinguish
// nulls i.e. "not logged" and 0 as in "zero calls to blobstore".
// Logging 0 allows counting avg, p50 etc statistic.
for key in PERF_COUNTERS.iter() {
let value = self.get_counter(*key);
if key.log_in_separate_column() {
if value != 0 || key.always_log() {
builder.add(key.name(), value);
} else {
if value != 0 {
extra.insert(key.name(), value);
}
}
}
if !extra.is_empty() {
if let Ok(extra) = serde_json::to_string(&extra) {
// Scuba does not support columns that are too long, we have to trim it
let limit = ::std::cmp::min(extra.len(), 1000);
builder.add("extra_context", &extra[..limit]);
}
}
}