mirror of
https://github.com/facebook/sapling.git
synced 2024-10-05 14:28:17 +03:00
Fix clippy warnings (2/2)
Summary: This fixes the remaining clippy errors. The only thing l haven't fixed is that clippy crashes when trying to parse `eden/mononoke/blobstore:multiplexedblob_wal`. This doesn't generate warnings and the command still succeeds, but it's annoying. I haven't been able to track down why, so I'm leaving it be. Reviewed By: YousefSalama Differential Revision: D43980977 fbshipit-source-id: 28ddcce5894903d6e0f980cd04b586dc1afc00c6
This commit is contained in:
parent
72eb05a1bd
commit
49b4c42949
@ -148,6 +148,7 @@ impl<'a> RepoBlobstoreCopier<'a> {
|
||||
fn new(source: &'a RepoBlobstore, target: &'a RepoBlobstore) -> Self {
|
||||
let inner_source = source.0.0.as_inner_unredacted();
|
||||
let inner_target = target.0.0.as_inner_unredacted();
|
||||
#[allow(clippy::vtable_address_comparisons)]
|
||||
if Arc::ptr_eq(inner_source.as_inner(), inner_target.as_inner()) {
|
||||
Self::Optimized {
|
||||
source: inner_source,
|
||||
|
@ -836,7 +836,7 @@ mod tests {
|
||||
let transaction = bookmarks.create_transaction(ctx.clone());
|
||||
rt.block_on(transaction.commit()).unwrap();
|
||||
|
||||
let _ = spawn_query("c", ttl, &rt);
|
||||
std::mem::drop(spawn_query("c", ttl, &rt));
|
||||
let requests = assert_no_pending_requests(requests, &rt, 100);
|
||||
|
||||
// successfull transaction should redirect further requests to master
|
||||
@ -904,15 +904,15 @@ mod tests {
|
||||
let requests = assert_no_pending_requests(requests, &rt, 100);
|
||||
|
||||
// Spawn two queries, but without the cache being turned on. We expect 2 requests.
|
||||
let _ = spawn_query("a", Some(-1), &rt);
|
||||
std::mem::drop(spawn_query("a", Some(-1), &rt));
|
||||
let (request, requests) = next_request(requests, &rt, 100);
|
||||
assert_eq!(request.prefix, BookmarkPrefix::new("a").unwrap());
|
||||
|
||||
let _ = spawn_query("b", Some(-1), &rt);
|
||||
std::mem::drop(spawn_query("b", Some(-1), &rt));
|
||||
let (request, requests) = next_request(requests, &rt, 100);
|
||||
assert_eq!(request.prefix, BookmarkPrefix::new("b").unwrap());
|
||||
|
||||
let _ = requests;
|
||||
std::mem::drop(requests);
|
||||
}
|
||||
|
||||
fn mock_bookmarks_response(
|
||||
|
@ -893,8 +893,8 @@ impl BookmarksCoordinator {
|
||||
info!(ctx.logger(), "Stopped warm bookmark cache updater");
|
||||
};
|
||||
|
||||
// Detach the handle. This will terminate using the `terminate` receiver.
|
||||
let _ = tokio::task::spawn(fut);
|
||||
// Fire and forget. This will terminate using the `terminate` receiver.
|
||||
std::mem::drop(tokio::task::spawn(fut));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -103,7 +103,7 @@ mod test {
|
||||
let limiter = AsyncLimiter::new(limiter).await;
|
||||
|
||||
for _ in 0..10 {
|
||||
let _ = limiter.access();
|
||||
std::mem::drop(limiter.access());
|
||||
}
|
||||
|
||||
let now = Instant::now();
|
||||
|
@ -1047,9 +1047,7 @@ impl RepoContext {
|
||||
) -> Result<Option<ChangesetContext>, MononokeError> {
|
||||
let mut cs_id = match freshness {
|
||||
BookmarkFreshness::MaybeStale => {
|
||||
self.warm_bookmarks_cache()
|
||||
.get(&self.ctx, &bookmark)
|
||||
.await?
|
||||
self.warm_bookmarks_cache().get(&self.ctx, bookmark).await?
|
||||
}
|
||||
BookmarkFreshness::MostRecent => None,
|
||||
};
|
||||
@ -1060,7 +1058,7 @@ impl RepoContext {
|
||||
cs_id = self
|
||||
.blob_repo()
|
||||
.bookmarks()
|
||||
.get(self.ctx.clone(), &bookmark)
|
||||
.get(self.ctx.clone(), bookmark)
|
||||
.await?
|
||||
}
|
||||
|
||||
|
@ -35,7 +35,7 @@ impl RepoContext {
|
||||
None => self
|
||||
.blob_repo()
|
||||
.bookmarks()
|
||||
.get(self.ctx().clone(), &bookmark)
|
||||
.get(self.ctx().clone(), bookmark)
|
||||
.await
|
||||
.context("Failed to fetch old bookmark target")?
|
||||
.ok_or_else(|| {
|
||||
@ -52,7 +52,7 @@ impl RepoContext {
|
||||
.with_pushvars(pushvars)
|
||||
}
|
||||
if let Some(redirector) = self.push_redirector.as_ref() {
|
||||
let large_bookmark = redirector.small_to_large_bookmark(&bookmark).await?;
|
||||
let large_bookmark = redirector.small_to_large_bookmark(bookmark).await?;
|
||||
if &large_bookmark == bookmark {
|
||||
return Err(MononokeError::InvalidRequest(format!(
|
||||
"Cannot delete shared bookmark '{}' from small repo",
|
||||
@ -73,7 +73,7 @@ impl RepoContext {
|
||||
// Wait for bookmark to catch up on small repo
|
||||
redirector.backsync_latest(ctx).await?;
|
||||
} else {
|
||||
make_delete_op(&bookmark, old_target, pushvars)
|
||||
make_delete_op(bookmark, old_target, pushvars)
|
||||
.run(self.ctx(), self.authorization_context(), self.inner_repo())
|
||||
.await?;
|
||||
}
|
||||
|
@ -44,7 +44,7 @@ impl RepoContext {
|
||||
None => self
|
||||
.blob_repo()
|
||||
.bookmarks()
|
||||
.get(self.ctx().clone(), &bookmark)
|
||||
.get(self.ctx().clone(), bookmark)
|
||||
.await
|
||||
.context("Failed to fetch old bookmark target")?
|
||||
.ok_or_else(|| {
|
||||
@ -82,7 +82,7 @@ impl RepoContext {
|
||||
op
|
||||
}
|
||||
if let Some(redirector) = self.push_redirector.as_ref() {
|
||||
let large_bookmark = redirector.small_to_large_bookmark(&bookmark).await?;
|
||||
let large_bookmark = redirector.small_to_large_bookmark(bookmark).await?;
|
||||
if &large_bookmark == bookmark {
|
||||
return Err(MononokeError::InvalidRequest(format!(
|
||||
"Cannot move shared bookmark '{}' from small repo",
|
||||
@ -113,7 +113,7 @@ impl RepoContext {
|
||||
redirector.backsync_latest(ctx).await?;
|
||||
} else {
|
||||
make_move_op(
|
||||
&bookmark,
|
||||
bookmark,
|
||||
target,
|
||||
old_target,
|
||||
allow_non_fast_forward,
|
||||
|
Loading…
Reference in New Issue
Block a user