mirror of
https://github.com/facebook/sapling.git
synced 2024-10-06 14:58:03 +03:00
Prepare for rustfmt 2.0
Summary: Generated by formatting with rustfmt 2.0.0-rc.2 and then a second time with fbsource's current rustfmt (1.4.14). This results in formatting for which rustfmt 1.4 is idempotent but is closer to the style of rustfmt 2.0, reducing the amount of code that will need to change atomically in that upgrade. --- *Why now?* **:** The 1.x branch is no longer being developed and fixes like https://github.com/rust-lang/rustfmt/issues/4159 (which we need in fbcode) only land to the 2.0 branch. --- Reviewed By: zertosh Differential Revision: D23568779 fbshipit-source-id: 477200f35b280a4f6471d8e574e37e5f57917baf
This commit is contained in:
parent
8a26c3c960
commit
e62b176170
@ -253,7 +253,7 @@ fn map_to_python_err(py: Python, err: ParseError) -> PyErr {
|
||||
return PyErr::new::<exceptions::OptionAmbiguous, _>(
|
||||
py,
|
||||
(msg, option_name, possibilities),
|
||||
)
|
||||
);
|
||||
}
|
||||
ParseError::AmbiguousCommand {
|
||||
command_name,
|
||||
@ -262,10 +262,10 @@ fn map_to_python_err(py: Python, err: ParseError) -> PyErr {
|
||||
return PyErr::new::<exceptions::AmbiguousCommand, _>(
|
||||
py,
|
||||
(msg, command_name, possibilities),
|
||||
)
|
||||
);
|
||||
}
|
||||
ParseError::CircularReference { command_name } => {
|
||||
return PyErr::new::<exceptions::CircularReference, _>(py, (msg, command_name))
|
||||
return PyErr::new::<exceptions::CircularReference, _>(py, (msg, command_name));
|
||||
}
|
||||
ParseError::MalformedAlias { name, value } => {
|
||||
return PyErr::new::<exceptions::MalformedAlias, _>(py, (msg, name, value));
|
||||
|
@ -116,7 +116,7 @@ fn fix_newline(py: Python, hunk: &PyList, a: &PyList, b: &PyList) -> PyResult<us
|
||||
Some(b'-') => {
|
||||
a.set_item(py, a.len(py) - 1, to_object(py, &last_line[..]));
|
||||
}
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
hunk.set_item(py, hunk_len - 1, to_object(py, &last_line));
|
||||
}
|
||||
|
@ -101,10 +101,10 @@ fn register_error_handlers() {
|
||||
match e {
|
||||
dag::Error::Backend(ref backend_error) => match backend_error.as_ref() {
|
||||
dag::errors::BackendError::Io(e) => {
|
||||
return Some(cpython_ext::error::translate_io_error(py, &e))
|
||||
return Some(cpython_ext::error::translate_io_error(py, &e));
|
||||
}
|
||||
dag::errors::BackendError::Other(e) => return specific_error_handler(py, e, m),
|
||||
_ => (),
|
||||
_ => {}
|
||||
},
|
||||
dag::Error::VertexNotFound(_) | dag::Error::IdNotFound(_) => {
|
||||
return Some(PyErr::new::<CommitLookupError, _>(
|
||||
@ -112,7 +112,7 @@ fn register_error_handlers() {
|
||||
cpython_ext::Str::from(e.to_string()),
|
||||
));
|
||||
}
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -562,7 +562,7 @@ fn file_metadata_to_py_tuple(
|
||||
let flag = {
|
||||
let mut s = String::new();
|
||||
match file_metadata.file_type {
|
||||
FileType::Regular => (),
|
||||
FileType::Regular => {}
|
||||
FileType::Executable => s.push('x'),
|
||||
FileType::Symlink => s.push('l'),
|
||||
};
|
||||
|
@ -304,7 +304,7 @@ impl<T: RemoteDataStore + ?Sized> RemoteDataStorePyExt for T {
|
||||
results.append(py, key_tuple.into_object());
|
||||
}
|
||||
StoreKey::Content(_, _) => {
|
||||
return Err(format_err!("Unsupported key: {:?}", key)).map_pyerr(py)
|
||||
return Err(format_err!("Unsupported key: {:?}", key)).map_pyerr(py);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -69,7 +69,7 @@ impl<T: HgIdHistoryStore + ?Sized> HgIdHistoryStorePyExt for T {
|
||||
results.append(py, key_tuple.into_object());
|
||||
}
|
||||
StoreKey::Content(_, _) => {
|
||||
return Err(format_err!("Unsupported key: {:?}", key)).map_pyerr(py)
|
||||
return Err(format_err!("Unsupported key: {:?}", key)).map_pyerr(py);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -156,7 +156,7 @@ fn update(
|
||||
|
||||
let meta = match state.store.get_meta(StoreKey::hgid(key))? {
|
||||
StoreResult::NotFound(key) => {
|
||||
return Err(format_err!("Can't find metadata for key: {:?}", key))
|
||||
return Err(format_err!("Can't find metadata for key: {:?}", key));
|
||||
}
|
||||
StoreResult::Found(meta) => meta,
|
||||
};
|
||||
|
@ -55,7 +55,7 @@ fn main() {
|
||||
full_args = debugpython_args;
|
||||
}
|
||||
}
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
|
||||
#[cfg(feature = "with_chg")]
|
||||
|
@ -46,9 +46,7 @@ pub static STREAM_BUFFER_SIZE: usize = 128;
|
||||
///
|
||||
/// If the async computation panics then the panic gets propagated up. At that point the mutex
|
||||
/// holding the runtime gets poisoned.
|
||||
pub fn block_on_future<F: Future>(f: F) -> F::Output
|
||||
where
|
||||
{
|
||||
pub fn block_on_future<F: Future>(f: F) -> F::Output {
|
||||
// Should be replaced with `runtime.handle().block_on` after updating tokio, see T65261126.
|
||||
// T73962890 tracks updating this code internally. Externally the issue is tracked under at
|
||||
// https://github.com/tokio-rs/tokio/issues/2390
|
||||
|
@ -160,7 +160,7 @@ impl BlackboxOptions {
|
||||
push(INDEX_EVENT_TAG_NAME, name.as_bytes());
|
||||
}
|
||||
}
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
result
|
||||
|
@ -685,7 +685,15 @@ impl fmt::Display for Event {
|
||||
write!(
|
||||
f,
|
||||
"[network] {:?} finished in {} calls, duration {} ms, latency {} ms, read {} bytes, write {} bytes, session id {}, url {}{}",
|
||||
op, calls, duration_ms, latency_ms, read_bytes, write_bytes, session_id, url, result,
|
||||
op,
|
||||
calls,
|
||||
duration_ms,
|
||||
latency_ms,
|
||||
read_bytes,
|
||||
write_bytes,
|
||||
session_id,
|
||||
url,
|
||||
result,
|
||||
)?;
|
||||
}
|
||||
Start {
|
||||
|
@ -29,7 +29,9 @@ pub struct UnknownCommand(pub String);
|
||||
pub struct FallbackToPython;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
#[error("'{0}' is not inside a repository, but this command requires a repository!\n(use 'cd' to go to a directory inside a repository and try again)")]
|
||||
#[error(
|
||||
"'{0}' is not inside a repository, but this command requires a repository!\n(use 'cd' to go to a directory inside a repository and try again)"
|
||||
)]
|
||||
pub struct RepoRequired(pub String);
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
|
@ -293,7 +293,7 @@ impl ConfigSet {
|
||||
};
|
||||
return handle_value(this, pair, section, name, location);
|
||||
}
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
unreachable!();
|
||||
@ -348,7 +348,7 @@ impl ConfigSet {
|
||||
match pair.as_rule() {
|
||||
Rule::include => handle_include(this, pair, errors),
|
||||
Rule::unset => handle_unset(this, pair, section),
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -366,7 +366,7 @@ impl ConfigSet {
|
||||
Rule::config_item => handle_config_item(self, pair, section.clone()),
|
||||
Rule::section => handle_section(pair, &mut section),
|
||||
Rule::directive => handle_directive(self, pair, §ion, errors),
|
||||
Rule::blank_line | Rule::comment_line | Rule::new_line | Rule::EOI => (),
|
||||
Rule::blank_line | Rule::comment_line | Rule::new_line | Rule::EOI => {}
|
||||
|
||||
Rule::comment_start
|
||||
| Rule::compound
|
||||
@ -524,7 +524,7 @@ impl ConfigSet {
|
||||
(None, Some(value)) => {
|
||||
result.missing.push(((sname.clone(), kname.clone()), value));
|
||||
}
|
||||
(None, None) => (),
|
||||
(None, None) => {}
|
||||
};
|
||||
|
||||
if !removed && super_value.is_some() && super_value != last_value {
|
||||
|
@ -364,7 +364,7 @@ impl ConfigSetHgExt for ConfigSet {
|
||||
let res = generate_dynamicconfig(repo_path, repo_name, None, user_name);
|
||||
if let Err(e) = res {
|
||||
match e.downcast_ref::<IOError>() {
|
||||
Some(io_error) if io_error.kind() == ErrorKind::PermissionDenied => (),
|
||||
Some(io_error) if io_error.kind() == ErrorKind::PermissionDenied => {}
|
||||
_ => return Err(e),
|
||||
};
|
||||
}
|
||||
@ -716,7 +716,7 @@ fn parse_list_internal(value: &str) -> Vec<String> {
|
||||
enum State {
|
||||
Plain,
|
||||
Quote,
|
||||
};
|
||||
}
|
||||
|
||||
let mut offset = 0;
|
||||
let mut parts: Vec<String> = vec![String::new()];
|
||||
|
@ -95,7 +95,7 @@ pub(crate) fn beautify(
|
||||
}
|
||||
}
|
||||
Ok(best_branch)
|
||||
};
|
||||
}
|
||||
|
||||
// Sort heads recursively.
|
||||
fn sort(
|
||||
@ -135,7 +135,7 @@ pub(crate) fn beautify(
|
||||
}
|
||||
|
||||
Ok(())
|
||||
};
|
||||
}
|
||||
|
||||
let main_branch = main_branch.unwrap_or_else(NameSet::empty);
|
||||
let mut heads: Vec<_> = this
|
||||
|
@ -220,7 +220,7 @@ impl IdMap {
|
||||
let id = Id(entry.read_u64::<BigEndian>().unwrap());
|
||||
return Ok(Some(id));
|
||||
}
|
||||
None => (),
|
||||
None => {}
|
||||
Some(Err(err)) => return Err(err.into()),
|
||||
}
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ impl Iterator for Iter {
|
||||
match self.rhs.contains(&name) {
|
||||
Err(err) => break Some(Err(err)),
|
||||
Ok(true) => continue,
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
break result;
|
||||
|
@ -229,7 +229,7 @@ impl fmt::Debug for Hints {
|
||||
(Some(min), Some(max)) => write!(f, ", {}..={}", min.0, max.0)?,
|
||||
(Some(min), None) => write!(f, ", {}..", min.0)?,
|
||||
(None, Some(max)) => write!(f, ", ..={}", max.0)?,
|
||||
(None, None) => (),
|
||||
(None, None) => {}
|
||||
}
|
||||
write!(f, ")")?;
|
||||
Ok(())
|
||||
|
@ -144,7 +144,7 @@ impl fmt::Debug for IdLazySet {
|
||||
match (remaining, inner.state) {
|
||||
(0, State::Incomplete) => f.write_str(" + ? more")?,
|
||||
(n, State::Incomplete) => write!(f, "+ {} + ? more", n)?,
|
||||
(0, _) => (),
|
||||
(0, _) => {}
|
||||
(n, _) => write!(f, " + {} more", n)?,
|
||||
}
|
||||
f.write_str(">")?;
|
||||
|
@ -94,7 +94,7 @@ impl fmt::Debug for IdStaticSet {
|
||||
}))
|
||||
.finish()?;
|
||||
match spans.len().max(limit) - limit {
|
||||
0 => (),
|
||||
0 => {}
|
||||
1 => write!(f, " + 1 span")?,
|
||||
n => write!(f, " + {} spans", n)?,
|
||||
}
|
||||
|
@ -78,7 +78,7 @@ impl IntersectionSet {
|
||||
(Some(id1), Some(id2), true) => {
|
||||
hints.set_min_id(id1.max(id2));
|
||||
}
|
||||
(None, Some(_), false) | (None, None, _) => (),
|
||||
(None, Some(_), false) | (None, None, _) => {}
|
||||
}
|
||||
match (lhs.hints().max_id(), rhs.hints().max_id(), compatible) {
|
||||
(Some(id), None, _) | (Some(id), Some(_), false) | (None, Some(id), true) => {
|
||||
@ -87,7 +87,7 @@ impl IntersectionSet {
|
||||
(Some(id1), Some(id2), true) => {
|
||||
hints.set_max_id(id1.min(id2));
|
||||
}
|
||||
(None, Some(_), false) | (None, None, _) => (),
|
||||
(None, Some(_), false) | (None, None, _) => {}
|
||||
}
|
||||
Self { lhs, rhs, hints }
|
||||
}
|
||||
@ -210,7 +210,7 @@ impl Iterator for Iter {
|
||||
}
|
||||
continue;
|
||||
}
|
||||
Ok(true) => (),
|
||||
Ok(true) => {}
|
||||
}
|
||||
}
|
||||
break result;
|
||||
|
@ -105,7 +105,7 @@ impl fmt::Debug for LazySet {
|
||||
match (remaining, inner.state) {
|
||||
(0, State::Incomplete) => f.write_str(" + ? more")?,
|
||||
(n, State::Incomplete) => write!(f, "+ {} + ? more", n)?,
|
||||
(0, _) => (),
|
||||
(0, _) => {}
|
||||
(n, _) => write!(f, " + {} more", n)?,
|
||||
}
|
||||
f.write_str(">")?;
|
||||
|
@ -100,7 +100,7 @@ impl Span {
|
||||
(Excluded(_), _) | (Unbounded, _) | (_, Unbounded) => {
|
||||
panic!("unsupported bound type")
|
||||
}
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
match (bounds.start_bound(), bounds.end_bound()) {
|
||||
|
@ -111,7 +111,7 @@ pub fn parse(text: impl AsRef<str>) -> BTreeMap<String, BTreeSet<String>> {
|
||||
return;
|
||||
}
|
||||
match (ch, direction) {
|
||||
(' ', _) => (),
|
||||
(' ', _) => {}
|
||||
('|', BottomTop) => {
|
||||
to_visit.push((y + 1, x - 1, "/"));
|
||||
to_visit.push((y + 1, x, "|/\\t"));
|
||||
@ -174,7 +174,7 @@ pub fn parse(text: impl AsRef<str>) -> BTreeMap<String, BTreeSet<String>> {
|
||||
match (ch, direction) {
|
||||
('-', BottomTop) => panic!("'-' is incompatible with BottomTop direction"),
|
||||
('|', LeftRight) => panic!("'|' is incompatible with LeftRight direction"),
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1000,16 +1000,16 @@ I ignored.txt
|
||||
});
|
||||
|
||||
let mardui_color_stdout = concat!(
|
||||
"\u{001B}[34m\u{001B}[1mM modified.txt\u{001B}[0m\n",
|
||||
"\u{001B}[32m\u{001B}[1mA added.txt\u{001B}[0m\n",
|
||||
"\u{001B}[32m\u{001B}[1mA added_even_though_normally_ignored.txt\u{001B}[0m\n",
|
||||
"\u{001B}[32m\u{001B}[1mA added_other_parent.txt\u{001B}[0m\n",
|
||||
"\u{001B}[31m\u{001B}[1mR modified_and_marked_for_removal.txt\u{001B}[0m\n",
|
||||
"\u{001B}[31m\u{001B}[1mR removed.txt\u{001B}[0m\n",
|
||||
"\u{001B}[36m\u{001B}[1m\u{001b}[4m! removed_but_not_marked_for_removal.txt\u{001B}[0m\n",
|
||||
"\u{001B}[35m\u{001B}[1m\u{001b}[4m? unknown.txt\u{001B}[0m\n",
|
||||
"\u{001B}[30;1m\u{001B}[1mI ignored.txt\u{001B}[0m\n",
|
||||
);
|
||||
"\u{001B}[34m\u{001B}[1mM modified.txt\u{001B}[0m\n",
|
||||
"\u{001B}[32m\u{001B}[1mA added.txt\u{001B}[0m\n",
|
||||
"\u{001B}[32m\u{001B}[1mA added_even_though_normally_ignored.txt\u{001B}[0m\n",
|
||||
"\u{001B}[32m\u{001B}[1mA added_other_parent.txt\u{001B}[0m\n",
|
||||
"\u{001B}[31m\u{001B}[1mR modified_and_marked_for_removal.txt\u{001B}[0m\n",
|
||||
"\u{001B}[31m\u{001B}[1mR removed.txt\u{001B}[0m\n",
|
||||
"\u{001B}[36m\u{001B}[1m\u{001b}[4m! removed_but_not_marked_for_removal.txt\u{001B}[0m\n",
|
||||
"\u{001B}[35m\u{001B}[1m\u{001b}[4m? unknown.txt\u{001B}[0m\n",
|
||||
"\u{001B}[30;1m\u{001B}[1mI ignored.txt\u{001B}[0m\n",
|
||||
);
|
||||
test_status(StatusTestCase {
|
||||
args: vec!["-mardui".to_owned()],
|
||||
entries: entries.clone(),
|
||||
|
@ -152,7 +152,7 @@ fn current_dir(io: &mut clidispatch::io::IO) -> io::Result<PathBuf> {
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
result
|
||||
|
@ -80,7 +80,7 @@ impl GitSegmentedCommits {
|
||||
bookmarks.push(format!("{} {}\n", vertex.to_hex(), name));
|
||||
}
|
||||
}
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -499,7 +499,7 @@ mod tests {
|
||||
struct Body<'a> {
|
||||
foo: &'a str,
|
||||
hello: &'a str,
|
||||
};
|
||||
}
|
||||
|
||||
let body = Body {
|
||||
foo: "bar",
|
||||
|
@ -650,7 +650,7 @@ impl LeafOffset {
|
||||
match key_offset.to_typed(&*index) {
|
||||
Ok(TypedOffset::Key(x)) => x.mark_unused(index),
|
||||
Ok(TypedOffset::ExtKey(x)) => x.mark_unused(index),
|
||||
_ => (),
|
||||
_ => {}
|
||||
};
|
||||
index.dirty_leafs[self.dirty_index()].mark_unused()
|
||||
}
|
||||
@ -812,7 +812,7 @@ impl<'a> Iterator for RangeIter<'a> {
|
||||
let result = Self::step(self.index, &mut self.front_stack, Back, exclusive);
|
||||
match result {
|
||||
Some(Err(_)) | None => self.completed = true,
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
result
|
||||
}
|
||||
@ -829,7 +829,7 @@ impl<'a> DoubleEndedIterator for RangeIter<'a> {
|
||||
let result = Self::step(self.index, &mut self.back_stack, Front, exclusive);
|
||||
match result {
|
||||
Some(Err(_)) | None => self.completed = true,
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
result
|
||||
}
|
||||
@ -2504,9 +2504,9 @@ impl Index {
|
||||
.context(&path, "cannot seek to end")?;
|
||||
if len < old_len {
|
||||
let message = format!(
|
||||
"on-disk index is unexpectedly smaller ({} bytes) than its previous version ({} bytes)",
|
||||
len, old_len
|
||||
);
|
||||
"on-disk index is unexpectedly smaller ({} bytes) than its previous version ({} bytes)",
|
||||
len, old_len
|
||||
);
|
||||
// This is not a "corruption" - something has truncated the
|
||||
// file, potentially recreating it. We haven't checked the
|
||||
// new content, so it's not considered as "data corruption".
|
||||
@ -3188,7 +3188,7 @@ impl Index {
|
||||
radix.offsets[b2v as usize] = new_leaf_offset.into();
|
||||
completed = true;
|
||||
}
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
|
||||
// Create the Radix entry, and connect it to the parent entry.
|
||||
|
@ -411,10 +411,13 @@ impl Log {
|
||||
fn check_append_only(this: &Log, new_meta: &LogMetadata) -> crate::Result<()> {
|
||||
let old_meta = &this.meta;
|
||||
if old_meta.primary_len > new_meta.primary_len {
|
||||
Err(crate::Error::path(this.dir.as_opt_path().unwrap(), format!(
|
||||
"on-disk log is unexpectedly smaller ({} bytes) than its previous version ({} bytes)",
|
||||
new_meta.primary_len, old_meta.primary_len
|
||||
)))
|
||||
Err(crate::Error::path(
|
||||
this.dir.as_opt_path().unwrap(),
|
||||
format!(
|
||||
"on-disk log is unexpectedly smaller ({} bytes) than its previous version ({} bytes)",
|
||||
new_meta.primary_len, old_meta.primary_len
|
||||
),
|
||||
))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
@ -483,7 +486,7 @@ impl Log {
|
||||
match filter(&context, content)
|
||||
.map_err(|err| crate::Error::wrap(err, "failed to run filter function"))?
|
||||
{
|
||||
FlushFilterOutput::Drop => (),
|
||||
FlushFilterOutput::Drop => {}
|
||||
FlushFilterOutput::Keep => log.append(content)?,
|
||||
FlushFilterOutput::Replace(content) => log.append(content)?,
|
||||
}
|
||||
@ -1309,7 +1312,7 @@ impl Log {
|
||||
let msg = format!("read offset {} exceeds buffer size {}", offset, buf.len());
|
||||
return Err(data_error(msg));
|
||||
}
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
|
||||
let (entry_flags, vlq_len): (u32, _) = buf.read_vlq_at(offset as usize).map_err(|e| {
|
||||
|
@ -471,7 +471,7 @@ impl IndexOutput {
|
||||
IndexOutput::Remove(_) | IndexOutput::RemovePrefix(_) => {
|
||||
return Err(crate::Error::programming(
|
||||
"into_cow does not support Remove or RemovePrefix",
|
||||
))
|
||||
));
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -1021,10 +1021,10 @@ fn test_repair_and_delete_content() {
|
||||
// Check no SIGBUS
|
||||
let log = long_lived_log.borrow();
|
||||
match log.lookup(0, "z") {
|
||||
Err(_) => (), // okay - not SIGBUS
|
||||
Err(_) => {} // okay - not SIGBUS
|
||||
Ok(iter) => match iter.into_vec() {
|
||||
Err(_) => (), // okay - not SIGBUS
|
||||
Ok(_) => (), // okay - not SIGBUS
|
||||
Err(_) => {} // okay - not SIGBUS
|
||||
Ok(_) => {} // okay - not SIGBUS
|
||||
},
|
||||
}
|
||||
// Check 'sync' on a long-lived log will load the right data and
|
||||
|
@ -85,7 +85,7 @@ impl OpenOptions {
|
||||
let mut multimeta = MultiMeta::default();
|
||||
if let Err(e) = multimeta.read_file(&meta_path) {
|
||||
match e.kind() {
|
||||
io::ErrorKind::NotFound => (), // not fatal.
|
||||
io::ErrorKind::NotFound => {} // not fatal.
|
||||
_ => return Err(e).context(&meta_path, "when opening MultiLog"),
|
||||
}
|
||||
};
|
||||
|
@ -443,7 +443,7 @@ impl RotateLog {
|
||||
match filter(&context, content).map_err(|err| {
|
||||
crate::Error::wrap(err, "failed to run filter function")
|
||||
})? {
|
||||
FlushFilterOutput::Drop => (),
|
||||
FlushFilterOutput::Drop => {}
|
||||
FlushFilterOutput::Keep => log.append(content)?,
|
||||
FlushFilterOutput::Replace(content) => log.append(content)?,
|
||||
}
|
||||
|
@ -327,7 +327,7 @@ pub(crate) fn mkdir_p(dir: impl AsRef<Path>) -> crate::Result<()> {
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
Err(err).context(dir, "cannot mkdir")
|
||||
})
|
||||
|
@ -84,7 +84,7 @@ mod unix_tests {
|
||||
Ok(())
|
||||
};
|
||||
match run() {
|
||||
Ok(_) => (),
|
||||
Ok(_) => {}
|
||||
Err(err) => {
|
||||
if verbose {
|
||||
eprintln!(
|
||||
|
@ -59,13 +59,13 @@ impl<'a> Iterator for BfsIter<'a> {
|
||||
None => return None,
|
||||
Some((path, link)) => match link {
|
||||
Link::Leaf(file_metadata) => {
|
||||
return Some(Ok((path, FsNodeMetadata::File(*file_metadata))))
|
||||
return Some(Ok((path, FsNodeMetadata::File(*file_metadata))));
|
||||
}
|
||||
Link::Ephemeral(children) => (path, children, None),
|
||||
Link::Durable(entry) => loop {
|
||||
match entry.get_links() {
|
||||
None => match self.prefetch((&path, &entry)) {
|
||||
Ok(_) => (),
|
||||
Ok(_) => {}
|
||||
Err(e) => return Some(Err(e)),
|
||||
},
|
||||
Some(children_result) => match children_result {
|
||||
@ -162,12 +162,12 @@ impl<'a> DfsCursor<'a> {
|
||||
State::Push => {
|
||||
self.state = State::Pop;
|
||||
}
|
||||
State::Pop => (),
|
||||
State::Pop => {}
|
||||
State::Next => {
|
||||
// We don't have any scenario this would be reached.
|
||||
panic!("Calling skip_subtree on cursor is not implemented for State::Next");
|
||||
}
|
||||
State::Done => (),
|
||||
State::Done => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -387,7 +387,7 @@ impl TreeManifest {
|
||||
path: RepoPathBuf,
|
||||
converted_nodes: Vec<(RepoPathBuf, HgId, Bytes, HgId, HgId)>,
|
||||
parent_trees: Vec<DfsCursor<'a>>,
|
||||
};
|
||||
}
|
||||
impl<'a> Executor<'a> {
|
||||
fn new(
|
||||
store: &'a InnerStore,
|
||||
@ -403,7 +403,7 @@ impl TreeManifest {
|
||||
// be pointing to the underlying link.
|
||||
for cursor in executor.parent_trees.iter_mut() {
|
||||
match cursor.step() {
|
||||
Step::Success | Step::End => (),
|
||||
Step::Success | Step::End => {}
|
||||
Step::Err(err) => return Err(err),
|
||||
}
|
||||
}
|
||||
@ -425,7 +425,7 @@ impl TreeManifest {
|
||||
for id in active_parents {
|
||||
let cursor = &mut self.parent_trees[*id];
|
||||
match cursor.step() {
|
||||
Step::Success | Step::End => (),
|
||||
Step::Success | Step::End => {}
|
||||
Step::Err(err) => return Err(err),
|
||||
}
|
||||
}
|
||||
@ -441,13 +441,13 @@ impl TreeManifest {
|
||||
while !cursor.finished() && cursor.path() < self.path.as_repo_path() {
|
||||
cursor.skip_subtree();
|
||||
match cursor.step() {
|
||||
Step::Success | Step::End => (),
|
||||
Step::Success | Step::End => {}
|
||||
Step::Err(err) => return Err(err),
|
||||
}
|
||||
}
|
||||
if !cursor.finished() && cursor.path() == self.path.as_repo_path() {
|
||||
match cursor.link() {
|
||||
Leaf(_) => (), // files and directories don't share history
|
||||
Leaf(_) => {} // files and directories don't share history
|
||||
Durable(_) => result.push(*id),
|
||||
Ephemeral(_) => {
|
||||
panic!("Found ephemeral parent when finalizing manifest.")
|
||||
@ -1179,7 +1179,7 @@ mod tests {
|
||||
cursor.skip_subtree();
|
||||
match cursor.step() {
|
||||
Step::Success => panic!("should have reached the end of the tree"),
|
||||
Step::End => (), // success
|
||||
Step::End => {} // success
|
||||
Step::Err(error) => panic!(error),
|
||||
}
|
||||
}
|
||||
@ -1188,7 +1188,7 @@ mod tests {
|
||||
fn test_cursor_skip() {
|
||||
fn step<'a>(cursor: &mut DfsCursor<'a>) {
|
||||
match cursor.step() {
|
||||
Step::Success => (),
|
||||
Step::Success => {}
|
||||
Step::End => panic!("reached the end too soon"),
|
||||
Step::Err(error) => panic!(error),
|
||||
}
|
||||
@ -1216,7 +1216,7 @@ mod tests {
|
||||
cursor.skip_subtree();
|
||||
match cursor.step() {
|
||||
Step::Success => panic!("should have reached the end of the tree"),
|
||||
Step::End => (), // success
|
||||
Step::End => {} // success
|
||||
Step::Err(error) => panic!(error),
|
||||
}
|
||||
}
|
||||
|
@ -438,7 +438,7 @@ pub(crate) fn load_root(blobs: &Zstore, id: Id20) -> Result<Root> {
|
||||
return Err(crate::Error(format!(
|
||||
"Root ID {} is not found",
|
||||
id.to_hex()
|
||||
)))
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -143,7 +143,7 @@ impl GitignoreMatcher {
|
||||
match matched {
|
||||
Match::Ignore(glob) => explain.add_glob(glob),
|
||||
Match::Whitelist(glob) => explain.add_glob(glob),
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -286,14 +286,14 @@ fn next_path_separator(pat: &[u8], start: usize) -> Option<usize> {
|
||||
} else if in_box_brackets {
|
||||
match ch {
|
||||
b']' => in_box_brackets = false,
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
} else {
|
||||
match ch {
|
||||
b'\\' => escaped = true,
|
||||
b'[' => in_box_brackets = true,
|
||||
b'/' => return Some(i + start),
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -59,7 +59,7 @@ pub fn expand_curly_brackets(pat: &str) -> Vec<String> {
|
||||
} else if in_box_brackets {
|
||||
match ch {
|
||||
']' => in_box_brackets = false,
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
} else {
|
||||
match ch {
|
||||
@ -98,7 +98,7 @@ pub fn expand_curly_brackets(pat: &str) -> Vec<String> {
|
||||
dag.push(StrNode::default());
|
||||
need_write = false;
|
||||
}
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
@ -170,7 +170,7 @@ pub fn plain_to_glob(plain: &str) -> String {
|
||||
for ch in plain.chars() {
|
||||
match ch {
|
||||
'\\' | '*' | '{' | '}' | '[' | ']' => result.push('\\'),
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
result.push(ch);
|
||||
}
|
||||
|
@ -351,75 +351,75 @@ impl<'a> ContentStoreBuilder<'a> {
|
||||
(None, None)
|
||||
};
|
||||
|
||||
let remote_store: Option<Arc<ReportingRemoteDataStore>> =
|
||||
if let Some(remotestore) = self.remotestore {
|
||||
let (cache, shared_store) = if let Some(memcachestore) = self.memcachestore {
|
||||
// Combine the memcache store with the other stores. The intent is that all
|
||||
// remote requests will first go to the memcache store, and only reach the
|
||||
// slower remote store after that.
|
||||
//
|
||||
// If data isn't found in the memcache store, once fetched from the remote
|
||||
// store it will be written to the local cache, and will populate the memcache
|
||||
// store, so other clients and future requests won't need to go to a network
|
||||
// store.
|
||||
let memcachedatastore = memcachestore
|
||||
.clone()
|
||||
.datastore(shared_mutabledatastore.clone());
|
||||
let remote_store: Option<Arc<ReportingRemoteDataStore>> = if let Some(remotestore) =
|
||||
self.remotestore
|
||||
{
|
||||
let (cache, shared_store) = if let Some(memcachestore) = self.memcachestore {
|
||||
// Combine the memcache store with the other stores. The intent is that all
|
||||
// remote requests will first go to the memcache store, and only reach the
|
||||
// slower remote store after that.
|
||||
//
|
||||
// If data isn't found in the memcache store, once fetched from the remote
|
||||
// store it will be written to the local cache, and will populate the memcache
|
||||
// store, so other clients and future requests won't need to go to a network
|
||||
// store.
|
||||
let memcachedatastore = memcachestore
|
||||
.clone()
|
||||
.datastore(shared_mutabledatastore.clone());
|
||||
|
||||
let mut multiplexstore: MultiplexDeltaStore<Arc<dyn HgIdMutableDeltaStore>> =
|
||||
MultiplexDeltaStore::new();
|
||||
multiplexstore.add_store(memcachestore);
|
||||
multiplexstore.add_store(shared_mutabledatastore.clone());
|
||||
let mut multiplexstore: MultiplexDeltaStore<Arc<dyn HgIdMutableDeltaStore>> =
|
||||
MultiplexDeltaStore::new();
|
||||
multiplexstore.add_store(memcachestore);
|
||||
multiplexstore.add_store(shared_mutabledatastore.clone());
|
||||
|
||||
(
|
||||
Some(memcachedatastore),
|
||||
Arc::new(multiplexstore) as Arc<dyn HgIdMutableDeltaStore>,
|
||||
)
|
||||
} else {
|
||||
(None, shared_mutabledatastore.clone())
|
||||
};
|
||||
|
||||
let mut remotestores = UnionHgIdDataStore::new();
|
||||
|
||||
// First, the fast memcache store
|
||||
if let Some(cache) = cache {
|
||||
remotestores.add(cache.clone());
|
||||
};
|
||||
|
||||
// Second, the slower remotestore. For LFS blobs, the LFS pointers will be fetched
|
||||
// at this step and be written to the LFS store.
|
||||
let filenode_remotestore = remotestore.datastore(shared_store.clone());
|
||||
remotestores.add(filenode_remotestore.clone());
|
||||
|
||||
// Third, the LFS remote store. The previously fetched LFS pointers will be used to
|
||||
// fetch the actual blobs in this store.
|
||||
if enable_lfs {
|
||||
let lfs_remote_store = Arc::new(LfsRemote::new(
|
||||
shared_lfs_store,
|
||||
local_lfs_store,
|
||||
self.config,
|
||||
)?);
|
||||
remotestores.add(lfs_remote_store.datastore(shared_store.clone()));
|
||||
|
||||
// Fallback store if the LFS one is dead.
|
||||
let lfs_fallback = LfsFallbackRemoteStore::new(filenode_remotestore);
|
||||
remotestores.add(lfs_fallback);
|
||||
}
|
||||
|
||||
let remotestores: Box<dyn RemoteDataStore> = Box::new(remotestores);
|
||||
let logging_regex = self
|
||||
.config
|
||||
.get_opt::<String>("remotefilelog", "undesiredfileregex")?
|
||||
.map(|s| Regex::new(&s))
|
||||
.transpose()?;
|
||||
let remotestores =
|
||||
Arc::new(ReportingRemoteDataStore::new(remotestores, logging_regex));
|
||||
datastore.add(remotestores.clone());
|
||||
Some(remotestores)
|
||||
(
|
||||
Some(memcachedatastore),
|
||||
Arc::new(multiplexstore) as Arc<dyn HgIdMutableDeltaStore>,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
(None, shared_mutabledatastore.clone())
|
||||
};
|
||||
|
||||
let mut remotestores = UnionHgIdDataStore::new();
|
||||
|
||||
// First, the fast memcache store
|
||||
if let Some(cache) = cache {
|
||||
remotestores.add(cache.clone());
|
||||
};
|
||||
|
||||
// Second, the slower remotestore. For LFS blobs, the LFS pointers will be fetched
|
||||
// at this step and be written to the LFS store.
|
||||
let filenode_remotestore = remotestore.datastore(shared_store.clone());
|
||||
remotestores.add(filenode_remotestore.clone());
|
||||
|
||||
// Third, the LFS remote store. The previously fetched LFS pointers will be used to
|
||||
// fetch the actual blobs in this store.
|
||||
if enable_lfs {
|
||||
let lfs_remote_store = Arc::new(LfsRemote::new(
|
||||
shared_lfs_store,
|
||||
local_lfs_store,
|
||||
self.config,
|
||||
)?);
|
||||
remotestores.add(lfs_remote_store.datastore(shared_store.clone()));
|
||||
|
||||
// Fallback store if the LFS one is dead.
|
||||
let lfs_fallback = LfsFallbackRemoteStore::new(filenode_remotestore);
|
||||
remotestores.add(lfs_fallback);
|
||||
}
|
||||
|
||||
let remotestores: Box<dyn RemoteDataStore> = Box::new(remotestores);
|
||||
let logging_regex = self
|
||||
.config
|
||||
.get_opt::<String>("remotefilelog", "undesiredfileregex")?
|
||||
.map(|s| Regex::new(&s))
|
||||
.transpose()?;
|
||||
let remotestores = Arc::new(ReportingRemoteDataStore::new(remotestores, logging_regex));
|
||||
datastore.add(remotestores.clone());
|
||||
Some(remotestores)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(ContentStore {
|
||||
datastore,
|
||||
local_mutabledatastore,
|
||||
|
@ -493,7 +493,7 @@ impl LfsBlobsStore {
|
||||
remove_file(path).with_context(|| format!("Cannot remove LFS blob {}", hash))?;
|
||||
}
|
||||
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -2107,7 +2107,10 @@ mod tests {
|
||||
let objs = [(blob.0, blob.1)].iter().cloned().collect::<HashSet<_>>();
|
||||
let resp = remote.batch_fetch(&objs, |_, _| unreachable!());
|
||||
let err = resp.err().unwrap();
|
||||
assert_eq!(err.to_string(), "Couldn't fetch oid 0000000000000000000000000000000000000000000000000000000000000000: ObjectError { code: 404, message: \"Object does not exist\" }");
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
"Couldn't fetch oid 0000000000000000000000000000000000000000000000000000000000000000: ObjectError { code: 404, message: \"Object does not exist\" }"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -2192,7 +2195,10 @@ mod tests {
|
||||
let objs = [(blob.0, blob.1)].iter().cloned().collect::<HashSet<_>>();
|
||||
let resp = remote.batch_fetch(&objs, |_, _| unreachable!());
|
||||
let err = resp.err().unwrap();
|
||||
assert_eq!(err.to_string(), "Couldn't fetch oid 0000000000000000000000000000000000000000000000000000000000000000: ObjectError { code: 404, message: \"Object does not exist\" }");
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
"Couldn't fetch oid 0000000000000000000000000000000000000000000000000000000000000000: ObjectError { code: 404, message: \"Object does not exist\" }"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -2220,7 +2226,10 @@ mod tests {
|
||||
let objs = [(blob.0, blob.1)].iter().cloned().collect::<HashSet<_>>();
|
||||
let resp = remote.batch_fetch(&objs, |_, _| unreachable!());
|
||||
let err = resp.err().unwrap();
|
||||
assert_eq!(err.to_string(), "Couldn't fetch oid 0000000000000000000000000000000000000000000000000000000000000000: ObjectError { code: 404, message: \"Object does not exist\" }");
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
"Couldn't fetch oid 0000000000000000000000000000000000000000000000000000000000000000: ObjectError { code: 404, message: \"Object does not exist\" }"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ fn repack_datapack(data_pack: &DataPack, mut_pack: &mut MutableDataPack) -> Resu
|
||||
// If we managed to get a delta, the metadata must be present.
|
||||
match data_pack.get_meta(StoreKey::hgid(delta.key.clone()))? {
|
||||
StoreResult::Found(meta) => mut_pack.add(&delta, &meta)?,
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -331,7 +331,7 @@ fn repack_datapack_to_contentstore(
|
||||
StoreResult::Found(meta) => {
|
||||
store.add_pending(&key, Bytes::from(content), meta, location)?
|
||||
}
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -81,7 +81,7 @@ impl RevlogIndex {
|
||||
for rev in (min_rev as usize..self.len()).rev() {
|
||||
let state = states[rev - min_rev as usize];
|
||||
match state {
|
||||
State::Unspecified => (),
|
||||
State::Unspecified => {}
|
||||
State::PotentialHead | State::NotHead => {
|
||||
if state == State::PotentialHead {
|
||||
result.push(rev as u32);
|
||||
@ -143,7 +143,7 @@ impl RevlogIndex {
|
||||
// Do not track "unknown" explicitly. This is future-proof,
|
||||
// since tracking "unknown" explicitly is quite expensive
|
||||
// with the new "dag" abstraction.
|
||||
Phase::Unspecified => (),
|
||||
Phase::Unspecified => {}
|
||||
}
|
||||
for &parent_rev in self.parent_revs(rev as u32)?.as_revs() {
|
||||
// Propagate phases from this rev to its parents.
|
||||
@ -894,7 +894,7 @@ impl PrefixLookup for RevlogIndex {
|
||||
result.push(node.to_vec().into());
|
||||
}
|
||||
}
|
||||
Ok(None) => (),
|
||||
Ok(None) => {}
|
||||
Err(crate::Error::AmbiguousPrefix) => {
|
||||
// Convert AmbiguousPrefix to a non-error with multiple vertex pushed to
|
||||
// result. That's what the Python code base expects.
|
||||
@ -1239,7 +1239,7 @@ impl DagAlgorithm for RevlogIndex {
|
||||
for i in (0..states.len()).rev() {
|
||||
let state = states[i];
|
||||
match state {
|
||||
State::Unspecified => (),
|
||||
State::Unspecified => {}
|
||||
State::PotentialHead | State::NotHead => {
|
||||
let rev = i + min_rev;
|
||||
if state == State::PotentialHead {
|
||||
@ -1953,7 +1953,7 @@ commit 3"#
|
||||
match slice {
|
||||
[b1, b2] => result += &format!("{:02x}{:02x} ", b1, b2),
|
||||
[b1] => result += &format!("{:02x} ", b1),
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
result += &" ".repeat(8 - (chunk.len() + 1) / 2);
|
||||
|
@ -816,7 +816,7 @@ impl TracingData {
|
||||
}
|
||||
}
|
||||
None
|
||||
};
|
||||
}
|
||||
|
||||
// Calculate JSON objects in a streaming way to reduce memory usage.
|
||||
let trace_event_iter = self.eventus.iter().map(move |eventus| {
|
||||
@ -1144,7 +1144,7 @@ impl TracingData {
|
||||
self.matched.insert(enter_eid, eid);
|
||||
}
|
||||
}
|
||||
Action::Event => (),
|
||||
Action::Event => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1357,7 +1357,7 @@ impl TracingData {
|
||||
}
|
||||
}
|
||||
""
|
||||
};
|
||||
}
|
||||
|
||||
/// Render RawTreeSpan to rows.
|
||||
fn render_span(ctx: &mut Context, id: usize, mut indent: usize, first_row_ch: char) {
|
||||
|
@ -222,7 +222,6 @@ impl<'a, K: 'a, V: 'a> Iterator for IterMut<'a, K, V> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use crate::vecmap::{Iter, VecMap};
|
||||
use quickcheck::quickcheck;
|
||||
use std::collections::BTreeMap;
|
||||
|
@ -75,7 +75,7 @@ impl MutationEntry {
|
||||
enum EntryFormat {
|
||||
FloatDate,
|
||||
Latest,
|
||||
};
|
||||
}
|
||||
let format = match r.read_u8()? {
|
||||
0 => return Err(anyhow!("invalid mutation entry version: 0")),
|
||||
1..=4 => {
|
||||
|
@ -160,7 +160,7 @@ pub fn absolute(path: impl AsRef<Path>) -> io::Result<PathBuf> {
|
||||
Component::ParentDir => {
|
||||
result.pop();
|
||||
}
|
||||
Component::CurDir => (),
|
||||
Component::CurDir => {}
|
||||
}
|
||||
}
|
||||
Ok(result)
|
||||
@ -188,7 +188,7 @@ pub fn remove_file<P: AsRef<Path>>(path: P) -> io::Result<()> {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
result.map_err(Into::into)
|
||||
}
|
||||
|
@ -608,7 +608,7 @@ where
|
||||
}
|
||||
}
|
||||
// Impossible here.
|
||||
(None, None) => (),
|
||||
(None, None) => {}
|
||||
}
|
||||
|
||||
// When the files have no differences we shouldn't print any further
|
||||
@ -635,7 +635,7 @@ where
|
||||
state.emit_binary_files_differ(old_file.path.as_ref(), new_file.path.as_ref())
|
||||
}
|
||||
}
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
return state.collect();
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user