mononoke/newfilenodes: stop using i8 internally for is_tree

Summary: Makes the code a little nicer to work with.

Reviewed By: HarveyHunt

Differential Revision: D20138720

fbshipit-source-id: 19f228782ab3582739e35fddcb2b0bf952110641
This commit is contained in:
Thomas Orozco 2020-02-27 12:15:05 -08:00 committed by Facebook Github Bot
parent ed602e6009
commit b7dfbdd09d
4 changed files with 97 additions and 13 deletions

View File

@ -345,7 +345,14 @@ async fn select_partial_filenode(
recorder.increment();
let rows = enforce_sql_timeout(
SelectFilenode::query(&connection, &repo_id, &pwh.hash, &pwh.is_tree, &filenode).compat(),
SelectFilenode::query(
&connection,
&repo_id,
&pwh.hash,
pwh.sql_is_tree(),
&filenode,
)
.compat(),
)
.await?;
@ -399,7 +406,7 @@ async fn select_partial_history(
recorder.increment();
let rows = enforce_sql_timeout(
SelectAllFilenodes::query(&connection, &repo_id, &pwh.hash, &pwh.is_tree).compat(),
SelectAllFilenodes::query(&connection, &repo_id, &pwh.hash, pwh.sql_is_tree()).compat(),
)
.await?;
@ -470,7 +477,7 @@ async fn fill_paths(
.get(&from_path_hash)
.ok_or_else(|| ErrorKind::PathNotFound(from_path_hash.clone()))?
.clone();
Some((pwh.is_tree != 0, from_path, from_node))
Some((pwh.is_tree, from_path, from_node))
}
None => None,
};

View File

@ -82,7 +82,7 @@ impl From<PathBytes> for Value {
pub struct PathWithHash<'a> {
pub path: &'a RepoPath,
pub path_bytes: PathBytes,
pub is_tree: i8,
pub is_tree: bool,
pub hash: PathHashBytes,
}
@ -90,8 +90,6 @@ impl<'a> PathWithHash<'a> {
pub fn from_repo_path(path: &'a RepoPath) -> Self {
let (path_bytes, is_tree) = convert_from_repo_path(path);
let is_tree = if is_tree { 1 } else { 0 };
let hash = {
let mut hash_content = hash::Context::new("path".as_bytes());
hash_content.update(&path_bytes);
@ -116,6 +114,14 @@ impl<'a> PathWithHash<'a> {
raw_shard_number % shard_count
}
pub fn sql_is_tree(&self) -> &'static i8 {
if self.is_tree {
&1
} else {
&0
}
}
}
fn convert_from_repo_path(path: &RepoPath) -> (Vec<u8>, bool) {
@ -128,7 +134,7 @@ fn convert_from_repo_path(path: &RepoPath) -> (Vec<u8>, bool) {
pub struct PathHash {
pub path_bytes: PathBytes,
pub is_tree: i8,
pub is_tree: bool,
pub hash: PathHashBytes,
}
@ -151,6 +157,14 @@ impl PathHash {
pub fn shard_number(&self, shard_count: usize) -> usize {
PathWithHash::shard_number_by_hash(&self.hash, shard_count)
}
pub fn sql_is_tree(&self) -> &'static i8 {
if self.is_tree {
&1
} else {
&0
}
}
}
#[derive(Abomonation, Clone)]

View File

@ -306,6 +306,19 @@ fn root_merge_filenode() -> PreparedFilenode {
}
}
fn dir_a_first_filenode() -> PreparedFilenode {
PreparedFilenode {
path: RepoPath::dir("a").unwrap(),
info: FilenodeInfo {
filenode: ONES_FNID,
p1: None,
p2: None,
copyfrom: None,
linknode: ONES_CSID,
},
}
}
fn file_a_first_filenode() -> PreparedFilenode {
PreparedFilenode {
path: RepoPath::file("a").unwrap(),
@ -792,6 +805,56 @@ macro_rules! filenodes_tests {
.await?;
Ok(())
}
#[fbinit::test]
async fn test_mixed_path_insert_and_get(fb: FacebookInit) -> Result<(), Error> {
let ctx = CoreContext::test_mock(fb);
let (reader, writer) = build_reader_writer($create_db()?);
let reader = $enable_caching(reader);
do_add_filenode(&ctx, &writer, file_a_first_filenode(), REPO_ZERO).await?;
do_add_filenode(&ctx, &writer, dir_a_first_filenode(), REPO_ZERO).await?;
assert_filenode(
&ctx,
&reader,
&RepoPath::file("a").unwrap(),
ONES_FNID,
REPO_ZERO,
file_a_first_filenode().info,
)
.await?;
assert_filenode(
&ctx,
&reader,
&RepoPath::dir("a").unwrap(),
ONES_FNID,
REPO_ZERO,
dir_a_first_filenode().info,
)
.await?;
assert_all_filenodes(
&ctx,
&reader,
&RepoPath::file("a").unwrap(),
REPO_ZERO,
&vec![file_a_first_filenode().info],
)
.await?;
assert_all_filenodes(
&ctx,
&reader,
&RepoPath::dir("a").unwrap(),
REPO_ZERO,
&vec![dir_a_first_filenode().info],
)
.await?;
Ok(())
}
}
};
}

View File

@ -144,11 +144,11 @@ async fn insert_filenodes(
let mut filenode_rows = Vec::new();
let mut copydata_rows = Vec::new();
for (pwh, filenode) in filenodes {
for (ph, filenode) in filenodes {
filenode_rows.push((
&repo_id,
&pwh.hash,
&pwh.is_tree,
&ph.hash,
ph.sql_is_tree(),
&filenode.info.filenode,
&filenode.info.linknode,
&filenode.info.p1,
@ -163,15 +163,15 @@ async fn insert_filenodes(
if let Some(ref copyinfo) = filenode.info.copyfrom {
let (ref frompath, ref fromnode) = copyinfo;
let from_pwh = PathHash::from_repo_path(frompath);
if from_pwh.is_tree != pwh.is_tree {
if from_pwh.is_tree != ph.is_tree {
let e = ErrorKind::InvalidCopy(filenode.path.clone(), frompath.clone());
return Err(e.into());
}
copydata_rows.push((
&repo_id,
&pwh.hash,
&ph.hash,
&filenode.info.filenode,
&pwh.is_tree,
ph.sql_is_tree(),
from_pwh.hash,
fromnode,
));