From b7dfbdd09d14b37a7e3f5c441cc64892aff7ecf8 Mon Sep 17 00:00:00 2001 From: Thomas Orozco Date: Thu, 27 Feb 2020 12:15:05 -0800 Subject: [PATCH] mononoke/newfilenodes: stop using i8 internally for is_tree Summary: Makes the code a little nicer to work with. Reviewed By: HarveyHunt Differential Revision: D20138720 fbshipit-source-id: 19f228782ab3582739e35fddcb2b0bf952110641 --- eden/mononoke/newfilenodes/src/reader.rs | 13 +++- eden/mononoke/newfilenodes/src/structs.rs | 22 +++++-- .../newfilenodes/src/test/test_reader.rs | 63 +++++++++++++++++++ eden/mononoke/newfilenodes/src/writer.rs | 12 ++-- 4 files changed, 97 insertions(+), 13 deletions(-) diff --git a/eden/mononoke/newfilenodes/src/reader.rs b/eden/mononoke/newfilenodes/src/reader.rs index dd7e481576..2bf3726268 100644 --- a/eden/mononoke/newfilenodes/src/reader.rs +++ b/eden/mononoke/newfilenodes/src/reader.rs @@ -345,7 +345,14 @@ async fn select_partial_filenode( recorder.increment(); let rows = enforce_sql_timeout( - SelectFilenode::query(&connection, &repo_id, &pwh.hash, &pwh.is_tree, &filenode).compat(), + SelectFilenode::query( + &connection, + &repo_id, + &pwh.hash, + pwh.sql_is_tree(), + &filenode, + ) + .compat(), ) .await?; @@ -399,7 +406,7 @@ async fn select_partial_history( recorder.increment(); let rows = enforce_sql_timeout( - SelectAllFilenodes::query(&connection, &repo_id, &pwh.hash, &pwh.is_tree).compat(), + SelectAllFilenodes::query(&connection, &repo_id, &pwh.hash, pwh.sql_is_tree()).compat(), ) .await?; @@ -470,7 +477,7 @@ async fn fill_paths( .get(&from_path_hash) .ok_or_else(|| ErrorKind::PathNotFound(from_path_hash.clone()))? .clone(); - Some((pwh.is_tree != 0, from_path, from_node)) + Some((pwh.is_tree, from_path, from_node)) } None => None, }; diff --git a/eden/mononoke/newfilenodes/src/structs.rs b/eden/mononoke/newfilenodes/src/structs.rs index 9878293cf8..c5ac422f8b 100644 --- a/eden/mononoke/newfilenodes/src/structs.rs +++ b/eden/mononoke/newfilenodes/src/structs.rs @@ -82,7 +82,7 @@ impl From for Value { pub struct PathWithHash<'a> { pub path: &'a RepoPath, pub path_bytes: PathBytes, - pub is_tree: i8, + pub is_tree: bool, pub hash: PathHashBytes, } @@ -90,8 +90,6 @@ impl<'a> PathWithHash<'a> { pub fn from_repo_path(path: &'a RepoPath) -> Self { let (path_bytes, is_tree) = convert_from_repo_path(path); - let is_tree = if is_tree { 1 } else { 0 }; - let hash = { let mut hash_content = hash::Context::new("path".as_bytes()); hash_content.update(&path_bytes); @@ -116,6 +114,14 @@ impl<'a> PathWithHash<'a> { raw_shard_number % shard_count } + + pub fn sql_is_tree(&self) -> &'static i8 { + if self.is_tree { + &1 + } else { + &0 + } + } } fn convert_from_repo_path(path: &RepoPath) -> (Vec, bool) { @@ -128,7 +134,7 @@ fn convert_from_repo_path(path: &RepoPath) -> (Vec, bool) { pub struct PathHash { pub path_bytes: PathBytes, - pub is_tree: i8, + pub is_tree: bool, pub hash: PathHashBytes, } @@ -151,6 +157,14 @@ impl PathHash { pub fn shard_number(&self, shard_count: usize) -> usize { PathWithHash::shard_number_by_hash(&self.hash, shard_count) } + + pub fn sql_is_tree(&self) -> &'static i8 { + if self.is_tree { + &1 + } else { + &0 + } + } } #[derive(Abomonation, Clone)] diff --git a/eden/mononoke/newfilenodes/src/test/test_reader.rs b/eden/mononoke/newfilenodes/src/test/test_reader.rs index 35f0d5843c..1437558317 100644 --- a/eden/mononoke/newfilenodes/src/test/test_reader.rs +++ b/eden/mononoke/newfilenodes/src/test/test_reader.rs @@ -306,6 +306,19 @@ fn root_merge_filenode() -> PreparedFilenode { } } +fn dir_a_first_filenode() -> PreparedFilenode { + PreparedFilenode { + path: RepoPath::dir("a").unwrap(), + info: FilenodeInfo { + filenode: ONES_FNID, + p1: None, + p2: None, + copyfrom: None, + linknode: ONES_CSID, + }, + } +} + fn file_a_first_filenode() -> PreparedFilenode { PreparedFilenode { path: RepoPath::file("a").unwrap(), @@ -792,6 +805,56 @@ macro_rules! filenodes_tests { .await?; Ok(()) } + + #[fbinit::test] + async fn test_mixed_path_insert_and_get(fb: FacebookInit) -> Result<(), Error> { + let ctx = CoreContext::test_mock(fb); + let (reader, writer) = build_reader_writer($create_db()?); + let reader = $enable_caching(reader); + + do_add_filenode(&ctx, &writer, file_a_first_filenode(), REPO_ZERO).await?; + do_add_filenode(&ctx, &writer, dir_a_first_filenode(), REPO_ZERO).await?; + + assert_filenode( + &ctx, + &reader, + &RepoPath::file("a").unwrap(), + ONES_FNID, + REPO_ZERO, + file_a_first_filenode().info, + ) + .await?; + + assert_filenode( + &ctx, + &reader, + &RepoPath::dir("a").unwrap(), + ONES_FNID, + REPO_ZERO, + dir_a_first_filenode().info, + ) + .await?; + + assert_all_filenodes( + &ctx, + &reader, + &RepoPath::file("a").unwrap(), + REPO_ZERO, + &vec![file_a_first_filenode().info], + ) + .await?; + + assert_all_filenodes( + &ctx, + &reader, + &RepoPath::dir("a").unwrap(), + REPO_ZERO, + &vec![dir_a_first_filenode().info], + ) + .await?; + + Ok(()) + } } }; } diff --git a/eden/mononoke/newfilenodes/src/writer.rs b/eden/mononoke/newfilenodes/src/writer.rs index a49efc5fc4..51a34631d1 100644 --- a/eden/mononoke/newfilenodes/src/writer.rs +++ b/eden/mononoke/newfilenodes/src/writer.rs @@ -144,11 +144,11 @@ async fn insert_filenodes( let mut filenode_rows = Vec::new(); let mut copydata_rows = Vec::new(); - for (pwh, filenode) in filenodes { + for (ph, filenode) in filenodes { filenode_rows.push(( &repo_id, - &pwh.hash, - &pwh.is_tree, + &ph.hash, + ph.sql_is_tree(), &filenode.info.filenode, &filenode.info.linknode, &filenode.info.p1, @@ -163,15 +163,15 @@ async fn insert_filenodes( if let Some(ref copyinfo) = filenode.info.copyfrom { let (ref frompath, ref fromnode) = copyinfo; let from_pwh = PathHash::from_repo_path(frompath); - if from_pwh.is_tree != pwh.is_tree { + if from_pwh.is_tree != ph.is_tree { let e = ErrorKind::InvalidCopy(filenode.path.clone(), frompath.clone()); return Err(e.into()); } copydata_rows.push(( &repo_id, - &pwh.hash, + &ph.hash, &filenode.info.filenode, - &pwh.is_tree, + ph.sql_is_tree(), from_pwh.hash, fromnode, ));