scmstore: add write metrics

Summary: Adds new write metrics tracking to filescmstore, also replacing the existing LFS pointer write metric.

Reviewed By: DurhamG

Differential Revision: D29996487

fbshipit-source-id: f5be5ccc431a9162c9e737510283a543e5b948a2
This commit is contained in:
Meyer Jacobs 2021-08-12 19:33:46 -07:00 committed by Facebook GitHub Bot
parent c4033dace0
commit 57503f2632
10 changed files with 103 additions and 103 deletions

View File

@ -678,8 +678,6 @@ class remotefileslog(filelog.fileslog):
elif self.filescmstore:
scmstore = self.filescmstore
if scmstore:
writeptr = self.filescmstore.fallback_writeptr_count()
ui.metrics.gauge("fallback_writeptr", writeptr)
metrics = self.filescmstore.getmetrics()
for (metric, value) in metrics:
ui.metrics.gauge(metric, value)

View File

@ -1380,11 +1380,6 @@ py_class!(pub class filescmstore |py| {
let store = self.store(py);
mutabledeltastore::create_instance(py, store.get_shared_mutable())
}
def fallback_writeptr_count(&self) -> PyResult<PyInt> {
let store = self.store(py);
Ok(store.fallbacks().write_ptr_count().to_py_object(py))
}
});
impl ExtractInnerRef for filescmstore {

View File

@ -22,7 +22,7 @@ use crate::{
indexedlogdatastore::IndexedLogHgIdDataStore,
indexedlogutil::StoreType,
lfs::{LfsRemote, LfsStore},
scmstore::{file::FileStoreMetrics, ContentStoreFallbacks, FileStore, TreeStore},
scmstore::{file::FileStoreMetrics, FileStore, TreeStore},
util::{
get_cache_path, get_indexedlogdatastore_aux_path, get_indexedlogdatastore_path,
get_local_path, get_repo_name,
@ -359,7 +359,6 @@ impl<'a> FileStoreBuilder<'a> {
lfs_remote,
contentstore,
fallbacks: Arc::new(ContentStoreFallbacks::new()),
fetch_logger,
metrics: FileStoreMetrics::new(),

View File

@ -27,7 +27,7 @@ use crate::{
memcache::McData,
scmstore::{
file::{metrics::FileStoreFetchMetrics, LazyFile},
ContentStoreFallbacks, FileAttributes, FileAuxData, FileStore, StoreFile,
FileAttributes, FileAuxData, FileStore, StoreFile,
},
util, ContentHash, ContentStore, EdenApiFileStore, ExtStoredPolicy, MemcacheStore, Metadata,
StoreKey,

View File

@ -8,10 +8,7 @@
use std::ops::AddAssign;
use std::sync::Arc;
use parking_lot::{Mutex, RwLock};
use tracing::instrument;
use types::Key;
use parking_lot::RwLock;
use crate::indexedlogutil::StoreType;
@ -176,9 +173,79 @@ impl FileStoreFetchMetrics {
}
}
#[derive(Clone, Debug, Default)]
pub struct WriteMetrics {
/// Numbers of entities we attempted to write
items: usize,
/// Number of successfully written entities
ok: usize,
/// Number of entities which returned a write error (including batch errors)
err: usize,
}
impl AddAssign for WriteMetrics {
fn add_assign(&mut self, rhs: Self) {
self.items += rhs.items;
self.ok += rhs.ok;
self.err += rhs.err;
}
}
impl WriteMetrics {
pub(crate) fn item(&mut self, keys: usize) {
self.items += keys;
}
pub(crate) fn ok(&mut self, keys: usize) {
self.ok += keys;
}
// TODO(meyer): Add write error tracking.
#[allow(dead_code)]
pub(crate) fn err(&mut self, keys: usize) {
self.err += keys;
}
fn metrics(&self) -> impl Iterator<Item = (&'static str, usize)> {
std::array::IntoIter::new([("items", self.items), ("ok", self.ok), ("err", self.err)])
.filter(|&(_, v)| v != 0)
}
}
#[derive(Clone, Debug, Default)]
pub struct FileStoreWriteMetrics {
/// Writes to the local LFS backend
pub(crate) lfs: WriteMetrics,
/// Writes to the local non-LFS backend
pub(crate) nonlfs: WriteMetrics,
/// LFS Pointer-only writes (supported only through fallback)
pub(crate) lfsptr: WriteMetrics,
}
impl AddAssign for FileStoreWriteMetrics {
fn add_assign(&mut self, rhs: Self) {
self.lfs += rhs.lfs;
self.nonlfs += rhs.nonlfs;
self.lfsptr += rhs.lfsptr;
}
}
impl FileStoreWriteMetrics {
fn metrics(&self) -> impl Iterator<Item = (String, usize)> {
namespaced("lfs", self.lfs.metrics())
.chain(namespaced("nonlfs", self.nonlfs.metrics()))
.chain(namespaced("lfsptr", self.lfsptr.metrics()))
}
}
#[derive(Debug, Default, Clone)]
pub struct FileStoreMetrics {
pub(crate) fetch: FileStoreFetchMetrics,
pub(crate) write: FileStoreWriteMetrics,
}
impl FileStoreMetrics {
@ -187,33 +254,10 @@ impl FileStoreMetrics {
}
pub fn metrics(&self) -> impl Iterator<Item = (String, usize)> {
namespaced("scmstore.file.fetch", self.fetch.metrics())
}
}
#[derive(Debug, Default)]
pub(crate) struct ContentStoreFallbacksInner {
write_ptr: u64,
}
#[derive(Debug)]
pub struct ContentStoreFallbacks {
inner: Mutex<ContentStoreFallbacksInner>,
}
impl ContentStoreFallbacks {
pub(crate) fn new() -> Self {
ContentStoreFallbacks {
inner: Mutex::new(ContentStoreFallbacksInner::default()),
}
}
#[instrument(level = "warn", skip(self))]
pub(crate) fn write_ptr(&self, _key: &Key) {
self.inner.lock().write_ptr += 1;
}
pub fn write_ptr_count(&self) -> u64 {
self.inner.lock().write_ptr
namespaced(
"scmstore.file",
namespaced("fetch", self.fetch.metrics())
.chain(namespaced("write", self.write.metrics())),
)
}
}

View File

@ -11,7 +11,7 @@ mod types;
pub use self::{
fetch::FileStoreFetch,
metrics::{ContentStoreFallbacks, FileStoreMetrics},
metrics::{FileStoreMetrics, FileStoreWriteMetrics},
types::{FileAttributes, FileAuxData, StoreFile},
};
@ -77,7 +77,6 @@ pub struct FileStore {
// Legacy ContentStore fallback
pub(crate) contentstore: Option<Arc<ContentStore>>,
pub(crate) fallbacks: Arc<ContentStoreFallbacks>,
// Aux Data Stores
pub(crate) aux_local: Option<Arc<AuxStore>>,
@ -185,9 +184,12 @@ impl FileStore {
#[instrument(skip(self, entries))]
pub fn write_batch(&self, entries: impl Iterator<Item = (Key, Bytes, Metadata)>) -> Result<()> {
// TODO(meyer): Track error metrics too and don't fail the whole batch for a single write error.
let mut metrics = FileStoreWriteMetrics::default();
let mut indexedlog_local = self.indexedlog_local.as_ref().map(|l| l.write_lock());
for (key, bytes, meta) in entries {
if meta.is_lfs() {
metrics.lfsptr.item(1);
if !self.allow_write_lfs_ptrs {
ensure!(
std::env::var("TESTTMP").is_ok(),
@ -199,7 +201,6 @@ impl FileStore {
let contentstore = self.contentstore.as_ref().ok_or_else(|| {
anyhow!("trying to write LFS pointer but no ContentStore is available")
})?;
self.fallbacks.write_ptr(&key);
let delta = Delta {
data: bytes,
base: None,
@ -210,6 +211,7 @@ impl FileStore {
} else {
contentstore.add(&delta, &meta)
}?;
metrics.lfsptr.ok(1);
continue;
}
let hg_blob_len = bytes.len() as u64;
@ -218,6 +220,7 @@ impl FileStore {
.lfs_threshold_bytes
.map_or(false, |threshold| hg_blob_len > threshold)
{
metrics.lfs.item(1);
let lfs_local = self.lfs_local.as_ref().ok_or_else(|| {
anyhow!("trying to write LFS file but no local LfsStore is available")
})?;
@ -227,15 +230,19 @@ impl FileStore {
// TODO(meyer): Do similar LockGuard impl for LfsStore so we can lock across the batch for both
lfs_local.add_blob(&sha256, lfs_blob)?;
lfs_local.add_pointer(lfs_pointer)?;
metrics.lfs.ok(1);
} else {
metrics.nonlfs.item(1);
let indexedlog_local = indexedlog_local.as_mut().ok_or_else(|| {
anyhow!(
"trying to write non-LFS file but no local non-LFS IndexedLog is available"
)
})?;
indexedlog_local.put_entry(Entry::new(key, bytes, meta))?;
metrics.nonlfs.ok(1);
}
}
self.metrics.write().write += metrics;
Ok(())
}
@ -262,7 +269,6 @@ impl FileStore {
lfs_remote: None,
contentstore: None,
fallbacks: self.fallbacks.clone(),
fetch_logger: self.fetch_logger.clone(),
metrics: self.metrics.clone(),
@ -321,10 +327,6 @@ impl FileStore {
result
}
pub fn fallbacks(&self) -> Arc<ContentStoreFallbacks> {
self.fallbacks.clone()
}
pub fn metrics(&self) -> Vec<(String, usize)> {
self.metrics.read().metrics().collect()
}
@ -351,7 +353,6 @@ impl FileStore {
lfs_remote: None,
contentstore: None,
fallbacks: Arc::new(ContentStoreFallbacks::new()),
fetch_logger: None,
metrics: FileStoreMetrics::new(),
@ -394,7 +395,6 @@ impl LegacyStore for FileStore {
lfs_remote: None,
contentstore: None,
fallbacks: self.fallbacks.clone(),
fetch_logger: self.fetch_logger.clone(),
metrics: self.metrics.clone(),

View File

@ -7,7 +7,7 @@
pub use self::{
builder::{FileStoreBuilder, TreeStoreBuilder},
file::{ContentStoreFallbacks, FileAttributes, FileAuxData, FileStore, StoreFile},
file::{FileAttributes, FileAuxData, FileStore, StoreFile},
tree::TreeStore,
util::file_to_async_key_stream,
};

View File

@ -17,14 +17,12 @@
$ hgcloneshallow ssh://user@dummy/master shallow -q
1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob) (?)
{ metrics : { fallback : { writeptr : 0},
ssh : { connections : 2,
{ metrics : { ssh : { connections : 2,
getpack : { calls : 1, revs : 1},
read : { bytes : 1484},
write : { bytes : 778}}}}
$ hgcloneshallow ssh://user@dummy/master shallow2 -q
{ metrics : { fallback : { writeptr : 0},
ssh : { connections : 1,
{ metrics : { ssh : { connections : 1,
read : { bytes : 859},
write : { bytes : 631}}}}
@ -52,16 +50,14 @@ the server supports our custom getfiles method.
adding manifests
adding file changes
added 1 changesets with 0 changes to 0 files
{ metrics : { fallback : { writeptr : 0},
ssh : { connections : 1,
{ metrics : { ssh : { connections : 1,
read : { bytes : 975},
write : { bytes : 608}}}}
$ hg up
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob) (?)
{ metrics : { fallback : { writeptr : 0},
ssh : { connections : 1,
{ metrics : { ssh : { connections : 1,
getpack : { calls : 1, revs : 1},
read : { bytes : 625},
write : { bytes : 147}}}}
@ -76,11 +72,9 @@ the server supports our custom getfiles method.
$ cd shallow
$ echo z > z
$ hg commit -qAm z
{ metrics : { fallback : { writeptr : 0}}}
$ echo x >> x
$ echo y >> y
$ hg commit -qAm xxyy
{ metrics : { fallback : { writeptr : 0}}}
$ cd ../shallow2
$ clearcache
$ hg pull ../shallow
@ -91,8 +85,7 @@ the server supports our custom getfiles method.
adding file changes
4 files fetched over 2 fetches - (4 misses, 0.00% hit ratio) over 0.00s (?)
added 3 changesets with 4 changes to 3 files
{ metrics : { fallback : { writeptr : 0},
ssh : { connections : 2,
{ metrics : { ssh : { connections : 2,
getpack : { calls : 3, revs : 3},
read : { bytes : 1403},
write : { bytes : 337}}}}
@ -100,7 +93,6 @@ the server supports our custom getfiles method.
# pull from shallow to shallow (ssh)
$ hg debugstrip -r d34c38483be9d08f205eaae60c380a29b48e0189
{ metrics : { fallback : { writeptr : 0}}}
$ hg pull ssh://user@dummy/$TESTTMP/shallow --config remotefilelog.cachepath=${CACHEDIR}2
pulling from ssh://user@dummy/$TESTTMP/shallow
searching for changes
@ -109,28 +101,23 @@ the server supports our custom getfiles method.
adding file changes
2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob) (?)
added 3 changesets with 4 changes to 3 files
remote: { metrics : { fallback : { writeptr : 0},
remote: ssh : { connections : 1,
remote: { metrics : { ssh : { connections : 1,
remote: getpack : { calls : 1, revs : 1},
remote: read : { bytes : 625},
remote: write : { bytes : 147}}}}
{ metrics : { fallback : { writeptr : 0},
ssh : { connections : 2,
{ metrics : { ssh : { connections : 2,
getpack : { calls : 1, revs : 1},
read : { bytes : 2848},
write : { bytes : 755}}}}
$ hg up
3 files updated, 0 files merged, 0 files removed, 0 files unresolved
{ metrics : { fallback : { writeptr : 0}}}
$ cat z
z
$ hg -R ../shallow debugstrip -qr 'desc(xxyy)'
{ metrics : { fallback : { writeptr : 0}}}
$ hg debugstrip -qr 'desc(xxyy)'
{ metrics : { fallback : { writeptr : 0}}}
$ cd ..
# push from shallow to shallow
@ -138,7 +125,6 @@ the server supports our custom getfiles method.
$ cd shallow
$ echo a > a
$ hg commit -qAm a
{ metrics : { fallback : { writeptr : 0}}}
$ hg push ssh://user@dummy/$TESTTMP/shallow2
pushing to ssh://user@dummy/$TESTTMP/shallow2
searching for changes
@ -146,16 +132,13 @@ the server supports our custom getfiles method.
remote: adding manifests
remote: adding file changes
remote: added 1 changesets with 1 changes to 1 files
remote: { metrics : { fallback : { writeptr : 0}}}
{ metrics : { fallback : { writeptr : 0},
ssh : { connections : 1,
{ metrics : { ssh : { connections : 1,
read : { bytes : 612},
write : { bytes : 991}}}}
$ cd ../shallow2
$ hg up
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
{ metrics : { fallback : { writeptr : 0}}}
$ cat a
a
$ cd ..
@ -170,8 +153,7 @@ the server supports our custom getfiles method.
remote: adding manifests
remote: adding file changes
remote: added 2 changesets with 2 changes to 2 files
{ metrics : { fallback : { writeptr : 0},
ssh : { connections : 1,
{ metrics : { ssh : { connections : 1,
read : { bytes : 589},
write : { bytes : 1454}}}}
@ -190,11 +172,9 @@ the server supports our custom getfiles method.
$ cd ../shallow
$ echo p > p
$ hg commit -qAm p
{ metrics : { fallback : { writeptr : 0}}}
$ hg debugmakepublic .
$ echo d > d
$ hg commit -qAm d
{ metrics : { fallback : { writeptr : 0}}}
$ cd ../shallow2
$ hg pull ../shallow
@ -204,7 +184,6 @@ the server supports our custom getfiles method.
adding manifests
adding file changes
added 2 changesets with 1 changes to 1 files
{ metrics : { fallback : { writeptr : 0}}}
$ cd ..
@ -224,21 +203,15 @@ the server supports our custom getfiles method.
$ cd multimf-shallow
$ echo a > a
$ hg commit -qAm a
{ metrics : { fallback : { writeptr : 0}}}
$ echo b > b
$ hg commit -qAm b
{ metrics : { fallback : { writeptr : 0}}}
$ echo c > c
$ hg commit -qAm c1
{ metrics : { fallback : { writeptr : 0}}}
$ hg up -q 'desc(a)'
{ metrics : { fallback : { writeptr : 0}}}
$ echo c > c
$ hg commit -qAm c2
{ metrics : { fallback : { writeptr : 0}}}
$ echo cc > c
$ hg commit -qAm c22
{ metrics : { fallback : { writeptr : 0}}}
$ hg log -G -T '{desc}\n'
@ c22
@ -254,7 +227,6 @@ the server supports our custom getfiles method.
$ cd ../multimf-shallow2
- initial commit to prevent hg pull from being a clone
$ echo z > z && hg commit -qAm z
{ metrics : { fallback : { writeptr : 0}}}
$ hg pull -f ssh://user@dummy/$TESTTMP/multimf-shallow
pulling from ssh://user@dummy/$TESTTMP/multimf-shallow
searching for changes
@ -264,15 +236,11 @@ the server supports our custom getfiles method.
adding manifests
adding file changes
added 5 changesets with 4 changes to 3 files
remote: { metrics : { fallback : { writeptr : 0}}}
{ metrics : { fallback : { writeptr : 0},
ssh : { connections : 1,
{ metrics : { ssh : { connections : 1,
read : { bytes : 2883},
write : { bytes : 608}}}}
$ hg up -q 'desc(c22)'
{ metrics : { fallback : { writeptr : 0}}}
$ hg log -f -T '{node}\n' c
d8f06a4c6d38c9308d3dcbee10c27ae5a01ab93f
853b3dc5bcf912b088c60ccd3ec60f35e96b92bb
{ metrics : { fallback : { writeptr : 0}}}

View File

@ -21,8 +21,7 @@
fetching tree '' bc0c2c938b929f98b1c31a8c5994396ebb096bf0
1 trees fetched over * (glob)
1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) (?)
{ metrics : { fallback : { writeptr : 0},
ssh : { connections : 2,
{ metrics : { ssh : { connections : 2,
getpack : { calls : 1, revs : 1},
gettreepack : { basemfnodes : 0,
calls : 1,
@ -52,8 +51,7 @@ Test auto creating trees for merge commit
adding manifests
adding file changes
added 2 changesets with 0 changes to 0 files
{ metrics : { fallback : { writeptr : 0},
ssh : { connections : 1,
{ metrics : { ssh : { connections : 1,
read : { bytes : 1097},
write : { bytes : 657}}}}
$ hg manifest -r tip

View File

@ -24,8 +24,7 @@ Clone it
fetching tree '' a539ce0c1a22b0ecf34498f9f5ce8ea56df9ecb7
1 trees fetched over * (glob)
2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over * (glob) (?)
{ metrics : { fallback : { writeptr : 0},
scmstore : { file : { fetch : { contentstore : { hits : 2,
{ metrics : { scmstore : { file : { fetch : { contentstore : { hits : 2,
keys : 2,
requests : 1},
indexedlog : { cache : { hits : 8,
@ -87,8 +86,7 @@ Pull exactly up to d into the client
adding manifests
adding file changes
added 2 changesets with 0 changes to 0 files
{ metrics : { fallback : { writeptr : 0},
ssh : { connections : 1,
{ metrics : { ssh : { connections : 1,
read : { bytes : 1086},
write : { bytes : 680}}}}