mononoke_api: remove legacy functions

Summary: Now that apiserver is gone, we can remove the legacy parts of mononoke_api.

Reviewed By: krallin

Differential Revision: D21976615

fbshipit-source-id: 139d7ebd4a7185693f6a4fe4e5c234c143f18498
This commit is contained in:
Mark Thomas 2020-06-11 10:38:38 -07:00 committed by Facebook GitHub Bot
parent f261469a2f
commit 4b76a1564d
3 changed files with 69 additions and 216 deletions

View File

@ -1,78 +0,0 @@
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use anyhow::Error;
use blobrepo::BlobRepo;
use blobstore::Loadable;
use cloned::cloned;
use context::CoreContext;
use futures_ext::{FutureExt, StreamExt};
use futures_old::{future, Future, Stream};
use manifest::{Entry, ManifestOps};
use mercurial_types::HgChangesetId;
use mercurial_types::{manifest::Content, FileBytes};
use mononoke_types::MPath;
use thiserror::Error;
#[derive(Debug, Error)]
pub enum ErrorKind {
#[error("{0} not found")]
NotFound(String),
}
// This method is deprecated. It will eventually be replaced by something more like:
// let mononoke = Mononoke::new(...);
// mononoke.repo(repo_name).changeset(changeset_id).file(path).read();
pub fn get_content_by_path(
ctx: CoreContext,
repo: BlobRepo,
changesetid: HgChangesetId,
path: Option<MPath>,
) -> impl Future<Item = Content, Error = Error> {
changesetid
.load(ctx.clone(), repo.blobstore())
.from_err()
.and_then({
cloned!(repo, ctx);
move |changeset| {
changeset
.manifestid()
.find_entry(ctx, repo.get_blobstore(), path.clone())
.and_then(move |entry| {
entry.ok_or_else(|| {
ErrorKind::NotFound(MPath::display_opt(path.as_ref()).to_string())
.into()
})
})
}
})
.and_then(move |entry| match entry {
Entry::Tree(manifest_id) => manifest_id
.load(ctx.clone(), repo.blobstore())
.from_err()
.map(|manifest| Content::Tree(Box::new(manifest)))
.left_future(),
Entry::Leaf((file_type, filenode_id)) => {
let stream = filenode_id
.load(ctx.clone(), repo.blobstore())
.from_err()
.map(move |envelope| {
filestore::fetch_stream(
repo.blobstore(),
ctx.clone(),
envelope.content_id(),
)
})
.flatten_stream()
.map(FileBytes)
.boxify();
let content = Content::new_file(file_type, stream);
future::ok(content).right_future()
}
})
}

View File

@ -12,22 +12,15 @@ use std::collections::HashMap;
use std::sync::Arc;
use anyhow::Error;
use blobrepo::BlobRepo;
use blobrepo_factory::{BlobstoreOptions, Caching, ReadOnlyStorage};
use cloned::cloned;
use fbinit::FacebookInit;
use futures::future;
use futures::future::try_join_all;
use skiplist::SkiplistIndex;
use slog::{debug, info, o, Logger};
use sql_ext::facebook::MysqlOptions;
use synced_commit_mapping::SyncedCommitMapping;
use warm_bookmarks_cache::WarmBookmarksCache;
use metaconfig_parser::RepoConfigs;
#[cfg(test)]
use metaconfig_types::CommitSyncConfig;
use metaconfig_types::RepoConfig;
use crate::repo::Repo;
@ -37,7 +30,6 @@ pub mod changeset_path_diff;
pub mod errors;
pub mod file;
pub mod hg;
pub mod legacy;
pub mod path;
pub mod repo;
pub mod repo_write;
@ -47,8 +39,6 @@ pub mod tree;
#[cfg(test)]
mod test;
pub use crate::legacy::get_content_by_path;
pub use crate::changeset::{ChangesetContext, Generation};
pub use crate::changeset_path::{
unified_diff, ChangesetPathContext, CopyInfo, PathEntry, UnifiedDiff, UnifiedDiffMode,
@ -119,109 +109,6 @@ impl Mononoke {
Ok(Self { repos })
}
/// Create a Mononoke instance for testing.
#[cfg(test)]
async fn new_test(
ctx: CoreContext,
repos: impl IntoIterator<Item = (String, BlobRepo)>,
) -> Result<Self, Error> {
use futures::stream::{FuturesOrdered, TryStreamExt};
let repos = repos
.into_iter()
.map(move |(name, repo)| {
cloned!(ctx);
async move {
Repo::new_test(ctx.clone(), repo)
.await
.map(move |repo| (name, Arc::new(repo)))
}
})
.collect::<FuturesOrdered<_>>()
.try_collect()
.await?;
Ok(Self { repos })
}
#[cfg(test)]
async fn new_test_xrepo(
ctx: CoreContext,
repos: impl IntoIterator<
Item = (
String,
BlobRepo,
CommitSyncConfig,
Arc<dyn SyncedCommitMapping>,
),
>,
) -> Result<Self, Error> {
use futures::stream::{FuturesOrdered, TryStreamExt};
let repos = repos
.into_iter()
.map(
move |(name, repo, commit_sync_config, synced_commit_maping)| {
cloned!(ctx);
async move {
Repo::new_test_xrepo(
ctx.clone(),
repo,
commit_sync_config,
synced_commit_maping,
)
.await
.map(move |repo| (name, Arc::new(repo)))
}
},
)
.collect::<FuturesOrdered<_>>()
.try_collect()
.await?;
Ok(Self { repos })
}
/// Temporary function to create directly from parts.
pub fn new_from_parts(
repos: impl IntoIterator<
Item = (
String,
BlobRepo,
Arc<SkiplistIndex>,
Arc<WarmBookmarksCache>,
Arc<dyn SyncedCommitMapping>,
RepoConfig,
),
>,
) -> Self {
Self {
repos: repos
.into_iter()
.map(
|(
name,
blob_repo,
skiplist_index,
warm_bookmarks_cache,
synced_commit_mapping,
config,
)| {
(
name.clone(),
Arc::new(Repo::new_from_parts(
name,
blob_repo,
skiplist_index,
warm_bookmarks_cache,
synced_commit_mapping,
config,
)),
)
},
)
.collect(),
}
}
/// Start a request on a repository.
pub async fn repo(
&self,
@ -249,3 +136,72 @@ impl Mononoke {
try_join_all(reporting_futs).await.map(|_| ())
}
}
#[cfg(test)]
mod test_impl {
use super::*;
use blobrepo::BlobRepo;
use metaconfig_types::CommitSyncConfig;
use synced_commit_mapping::SyncedCommitMapping;
impl Mononoke {
/// Create a Mononoke instance for testing.
pub(crate) async fn new_test(
ctx: CoreContext,
repos: impl IntoIterator<Item = (String, BlobRepo)>,
) -> Result<Self, Error> {
use futures::stream::{FuturesOrdered, TryStreamExt};
let repos = repos
.into_iter()
.map(move |(name, repo)| {
cloned!(ctx);
async move {
Repo::new_test(ctx.clone(), repo)
.await
.map(move |repo| (name, Arc::new(repo)))
}
})
.collect::<FuturesOrdered<_>>()
.try_collect()
.await?;
Ok(Self { repos })
}
pub(crate) async fn new_test_xrepo(
ctx: CoreContext,
repos: impl IntoIterator<
Item = (
String,
BlobRepo,
CommitSyncConfig,
Arc<dyn SyncedCommitMapping>,
),
>,
) -> Result<Self, Error> {
use futures::stream::{FuturesOrdered, TryStreamExt};
let repos = repos
.into_iter()
.map(
move |(name, repo, commit_sync_config, synced_commit_maping)| {
cloned!(ctx);
async move {
Repo::new_test_xrepo(
ctx.clone(),
repo,
commit_sync_config,
synced_commit_maping,
)
.await
.map(move |repo| (name, Arc::new(repo)))
}
},
)
.collect::<FuturesOrdered<_>>()
.try_collect()
.await?;
Ok(Self { repos })
}
}
}

View File

@ -221,31 +221,6 @@ impl Repo {
})
}
/// Temporary function to create directly from parts.
pub(crate) fn new_from_parts(
name: String,
blob_repo: BlobRepo,
skiplist_index: Arc<SkiplistIndex>,
warm_bookmarks_cache: Arc<WarmBookmarksCache>,
synced_commit_mapping: Arc<dyn SyncedCommitMapping>,
config: RepoConfig,
) -> Self {
Self {
name,
blob_repo,
skiplist_index,
warm_bookmarks_cache,
synced_commit_mapping,
config,
repo_permission_checker: ArcPermissionChecker::from(
PermissionCheckerBuilder::always_allow(),
),
service_permission_checker: ArcPermissionChecker::from(
PermissionCheckerBuilder::always_allow(),
),
}
}
#[cfg(test)]
/// Construct a Repo from a test BlobRepo
pub(crate) async fn new_test(ctx: CoreContext, blob_repo: BlobRepo) -> Result<Self, Error> {