mononoke: make get_filenode() return FilenodeResult

Summary:
Similar to get_all_filenodes_maybe_stale() make this method return
FilenodeResult if filenodes are disabled.

Note: this diff adds one TODO in fetch_root_filenode, which will be removed
together with other TODOs in the next diff.

Reviewed By: ahornby

Differential Revision: D21904399

fbshipit-source-id: 1569579699c02eb07021f8143aa652aa192d23bc
This commit is contained in:
Stanislau Hlebik 2020-06-08 05:15:13 -07:00 committed by Facebook GitHub Bot
parent 3df579d412
commit c34cfd9bbf
9 changed files with 111 additions and 30 deletions

View File

@ -222,7 +222,7 @@ impl<F: Filenodes> Filenodes for DelayedFilenodes<F> {
path: &RepoPath,
filenode: HgFileNodeId,
repo_id: RepositoryId,
) -> BoxFuture<Option<FilenodeInfo>, Error> {
) -> BoxFuture<FilenodeResult<Option<FilenodeInfo>>, Error> {
delay(
self.get_dist,
self.inner.get_filenode(ctx, path, filenode, repo_id),

View File

@ -532,9 +532,7 @@ impl BlobRepo {
node: HgFileNodeId,
) -> impl Future<Item = FilenodeResult<Option<FilenodeInfo>>, Error = Error> {
let path = path.clone();
self.filenodes
.get_filenode(ctx, &path, node, self.repoid)
.map(FilenodeResult::Present)
self.filenodes.get_filenode(ctx, &path, node, self.repoid)
}
pub fn get_filenode(

View File

@ -413,7 +413,9 @@ async fn fetch_root_filenode(
repo.get_repoid(),
)
.compat()
.await?;
.await?
// TODO(stash): do not fail but return FilenodesOnlyPublic::Disabled
.do_not_handle_disabled_filenodes()?;
maybe_info
.map(|info| {

View File

@ -140,7 +140,7 @@ pub trait Filenodes: Send + Sync {
path: &RepoPath,
filenode: HgFileNodeId,
repo_id: RepositoryId,
) -> BoxFuture<Option<FilenodeInfo>, Error>;
) -> BoxFuture<FilenodeResult<Option<FilenodeInfo>>, Error>;
fn get_all_filenodes_maybe_stale(
&self,

View File

@ -89,7 +89,7 @@ impl Filenodes for MicrowaveFilenodes {
path: &RepoPath,
filenode_id: HgFileNodeId,
repo_id: RepositoryId,
) -> BoxFuture<Option<FilenodeInfo>, Error> {
) -> BoxFuture<FilenodeResult<Option<FilenodeInfo>>, Error> {
cloned!(self.inner, mut self.recorder, path);
// NOTE: Receiving any other repo_id here would be a programming error, so we block it. See
@ -105,7 +105,8 @@ impl Filenodes for MicrowaveFilenodes {
let info = inner
.get_filenode(ctx, &path, filenode_id, repo_id)
.compat()
.await?;
.await?
.do_not_handle_disabled_filenodes()?;
if let Some(ref info) = info {
recorder
@ -116,7 +117,7 @@ impl Filenodes for MicrowaveFilenodes {
.await?;
}
Ok(info)
Ok(FilenodeResult::Present(info))
}
.boxed()
.compat()

View File

@ -99,7 +99,7 @@ impl Filenodes for NewFilenodes {
path: &RepoPath,
filenode_id: HgFileNodeId,
repo_id: RepositoryId,
) -> BoxFuture<Option<FilenodeInfo>, Error> {
) -> BoxFuture<FilenodeResult<Option<FilenodeInfo>>, Error> {
cloned!(self.reader, path);
async move {

View File

@ -146,14 +146,14 @@ impl FilenodesReader {
repo_id: RepositoryId,
path: &RepoPath,
filenode: HgFileNodeId,
) -> Result<Option<FilenodeInfo>, Error> {
) -> Result<FilenodeResult<Option<FilenodeInfo>>, Error> {
STATS::gets.add_value(1);
let pwh = PathWithHash::from_repo_path(&path);
let key = filenode_cache_key(repo_id, &pwh, &filenode);
if let Some(cached) = self.local_cache.get(&key) {
return Ok(Some(cached.try_into()?));
return Ok(FilenodeResult::Present(Some(cached.try_into()?)));
}
let permit = self.shards.acquire_filenodes(&path, filenode).await;
@ -162,7 +162,7 @@ impl FilenodesReader {
// Now that we acquired the permit, check our cache again, in case the previous permit
// owner just filed the cache with the filenode we're looking for.
if let Some(cached) = self.local_cache.get(&key) {
return Ok(Some(cached.try_into()?));
return Ok(FilenodeResult::Present(Some(cached.try_into()?)));
}
STATS::get_local_cache_misses.add_value(1);
@ -170,7 +170,7 @@ impl FilenodesReader {
if let Some(info) = enforce_remote_cache_timeout(self.remote_cache.get_filenode(&key)).await
{
self.local_cache.fill(&key, &(&info).into());
return Ok(Some(info));
return Ok(FilenodeResult::Present(Some(info)));
}
let cache_filler = FilenodeCacheFiller {
@ -192,10 +192,13 @@ impl FilenodesReader {
)
.await
{
Ok(Some(res)) => {
return Ok(Some(res.try_into()?));
Ok(FilenodeResult::Disabled) => {
return Ok(FilenodeResult::Disabled);
}
Ok(None)
Ok(FilenodeResult::Present(Some(res))) => {
return Ok(FilenodeResult::Present(Some(res.try_into()?)));
}
Ok(FilenodeResult::Present(None))
| Err(ErrorKind::FixedCopyInfoMissing(_))
| Err(ErrorKind::PathNotFound(_)) => {
// If the filenode wasn't found, or its copy info was missing, it might be present
@ -221,7 +224,13 @@ impl FilenodesReader {
)
.await?;
return res.map(|r| r.try_into()).transpose();
match res {
FilenodeResult::Present(res) => {
let res = res.map(|res| res.try_into()).transpose()?;
Ok(FilenodeResult::Present(res))
}
FilenodeResult::Disabled => Ok(FilenodeResult::Disabled),
}
}
pub async fn get_all_filenodes_for_path(
@ -325,13 +334,17 @@ async fn select_filenode_from_sql(
pwh: &PathWithHash<'_>,
filenode: HgFileNodeId,
recorder: &PerfCounterRecorder<'_>,
) -> Result<Option<CachedFilenode>, ErrorKind> {
) -> Result<FilenodeResult<Option<CachedFilenode>>, ErrorKind> {
if tunables().get_filenodes_disabled() {
return Ok(FilenodeResult::Disabled);
}
let partial = select_partial_filenode(connections, repo_id, pwh, filenode, recorder).await?;
let partial = match partial {
Some(partial) => partial,
None => {
return Ok(None);
return Ok(FilenodeResult::Present(None));
}
};
@ -342,13 +355,13 @@ async fn select_filenode_from_sql(
{
Some(ret) => ret,
None => {
return Ok(None);
return Ok(FilenodeResult::Present(None));
}
};
filler.fill(ret.clone());
Ok(Some(ret))
Ok(FilenodeResult::Present(Some(ret)))
}
async fn select_partial_filenode(

View File

@ -61,14 +61,16 @@ async fn test_filenode_fill(fb: FacebookInit) -> Result<(), Error> {
// A local miss should fill the remote cache:
reader
.get_filenode(&ctx, REPO_ZERO, &path, info.filenode)
.await?;
.await?
.do_not_handle_disabled_filenodes()?;
wait_for_filenode(&reader.remote_cache, &key).await?;
// A local hit should not fill the remote cache:
reader.remote_cache = make_test_cache();
reader
.get_filenode(&ctx, REPO_ZERO, &path, info.filenode)
.await?;
.await?
.do_not_handle_disabled_filenodes()?;
let r = wait_for_filenode(&reader.remote_cache, &key).await;
assert!(r.is_err());

View File

@ -37,9 +37,13 @@ async fn check_roundtrip(
payload: PreparedFilenode,
) -> Result<(), Error> {
assert_eq!(
reader
.get_filenode(&ctx, repo_id, &payload.path, payload.info.filenode)
.await?,
async {
let res = reader
.get_filenode(&ctx, repo_id, &payload.path, payload.info.filenode)
.await?;
res.do_not_handle_disabled_filenodes()
}
.await?,
None
);
@ -48,9 +52,13 @@ async fn check_roundtrip(
.await?;
assert_eq!(
reader
.get_filenode(&ctx, repo_id, &payload.path, payload.info.filenode)
.await?,
async {
let res = reader
.get_filenode(&ctx, repo_id, &payload.path, payload.info.filenode)
.await?;
res.do_not_handle_disabled_filenodes()
}
.await?,
Some(payload.info),
);
@ -403,6 +411,7 @@ async fn assert_no_filenode(
repo_id: RepositoryId,
) -> Result<(), Error> {
let res = reader.get_filenode(&ctx, repo_id, path, hash).await?;
let res = res.do_not_handle_disabled_filenodes()?;
assert!(res.is_none());
Ok(())
}
@ -418,6 +427,7 @@ async fn assert_filenode(
let res = reader
.get_filenode(&ctx, repo_id, path, hash)
.await?
.do_not_handle_disabled_filenodes()?
.ok_or(format_err!("not found: {}", hash))?;
assert_eq!(res, expected);
Ok(())
@ -960,3 +970,58 @@ async fn get_all_filenodes_maybe_stale_with_disabled(fb: FacebookInit) -> Result
Ok(())
}
#[fbinit::compat_test]
async fn test_get_filenode_with_disabled(fb: FacebookInit) -> Result<(), Error> {
let ctx = CoreContext::test_mock(fb);
let (reader, writer) = build_reader_writer(create_sharded()?);
let reader = with_caching(reader);
do_add_filenodes(&ctx, &writer, vec![root_first_filenode()], REPO_ZERO).await?;
let payload_info = root_first_filenode().info;
let tunables = MononokeTunables::default();
tunables.update_bools(&hashmap! {"filenodes_disabled".to_string() => true});
let res = with_tunables_async(
tunables,
reader
.get_filenode(&ctx, REPO_ZERO, &RepoPath::RootPath, payload_info.filenode)
.boxed(),
)
.await?;
if let FilenodeResult::Present(_) = res {
panic!("expected FilenodeResult::Disabled");
}
assert_filenode(
&ctx,
&reader,
&RepoPath::root(),
ONES_FNID,
REPO_ZERO,
root_first_filenode().info,
)
.await?;
// The filenode are cached now, even with filenodes_disabled = true
// all filenodes should be returned
let tunables = MononokeTunables::default();
tunables.update_bools(&hashmap! {"filenodes_disabled".to_string() => true});
with_tunables_async(
tunables,
assert_filenode(
&ctx,
&reader,
&RepoPath::root(),
ONES_FNID,
REPO_ZERO,
root_first_filenode().info,
)
.boxed(),
)
.await?;
Ok(())
}