CODEMOD: rename mercurial_types::NodeHash to DNodeHash

Summary: mercurial_types::NodeHash should be replaced by types from mononoke_types in most cases and by mercurial::NodeHash in others. This rename should help with tracking this fact.

Reviewed By: sid0

Differential Revision: D7618389

fbshipit-source-id: a876e723d911df626c7851fba56a056843b4e049
This commit is contained in:
Lukas Piatkowski 2018-04-16 03:33:58 -07:00 committed by Facebook Github Bot
parent 528c76e082
commit 16cbfd5133
41 changed files with 317 additions and 314 deletions

View File

@ -20,7 +20,7 @@ use mercurial::{self, NodeHashConversion};
use mercurial::changeset::Extra;
use mercurial::revlogrepo::RevlogChangeset;
use mercurial_types::{BlobNode, Changeset, HgBlob, MPath, Parents, Time};
use mercurial_types::nodehash::{HgChangesetId, HgManifestId, NULL_HASH};
use mercurial_types::nodehash::{HgChangesetId, HgManifestId, D_NULL_HASH};
use errors::*;
@ -158,7 +158,7 @@ impl BlobChangeset {
changesetid: &HgChangesetId,
) -> impl Future<Item = Option<Self>, Error = Error> + Send + 'static {
let changesetid = *changesetid;
if changesetid == HgChangesetId::new(NULL_HASH) {
if changesetid == HgChangesetId::new(D_NULL_HASH) {
let revlogcs = RevlogChangeset::new_null();
let cs = BlobChangeset::new_with_id(&changesetid, revlogcs.into());
Either::A(Ok(Some(cs)).into_future())

View File

@ -10,7 +10,7 @@ use bincode;
pub use failure::{Error, ResultExt};
use mercurial_types::{HgBlob, HgBlobHash, HgChangesetId, HgFileNodeId, NodeHash, Parents,
use mercurial_types::{DNodeHash, HgBlob, HgBlobHash, HgChangesetId, HgFileNodeId, Parents,
RepoPath, Type};
#[derive(Debug)]
@ -42,24 +42,24 @@ pub type Result<T> = ::std::result::Result<T, Error>;
pub enum ErrorKind {
#[fail(display = "Error while opening state for {}", _0)] StateOpen(StateOpenError),
#[fail(display = "Changeset id {} is missing", _0)] ChangesetMissing(HgChangesetId),
#[fail(display = "Manifest id {} is missing", _0)] ManifestMissing(NodeHash),
#[fail(display = "Node id {} is missing", _0)] NodeMissing(NodeHash),
#[fail(display = "Manifest id {} is missing", _0)] ManifestMissing(DNodeHash),
#[fail(display = "Node id {} is missing", _0)] NodeMissing(DNodeHash),
#[fail(display = "Content missing nodeid {} (blob hash {:?})", _0, _1)]
ContentMissing(NodeHash, HgBlobHash),
ContentMissing(DNodeHash, HgBlobHash),
#[fail(display = "Uploaded blob is incomplete {:?}", _0)] BadUploadBlob(HgBlob),
#[fail(display = "Parents are not in blob store {:?}", _0)] ParentsUnknown(Parents),
#[fail(display = "Serialization of node failed {} ({})", _0, _1)]
SerializationFailed(NodeHash, bincode::Error),
SerializationFailed(DNodeHash, bincode::Error),
#[fail(display = "Root manifest is not a manifest (type {})", _0)] BadRootManifest(Type),
#[fail(display = "Manifest type {} does not match uploaded type {}", _0, _1)]
ManifestTypeMismatch(Type, Type),
#[fail(display = "Node generation failed for unknown reason")] NodeGenerationFailed,
#[fail(display = "Path {} appears multiple times in manifests", _0)] DuplicateEntry(RepoPath),
#[fail(display = "Duplicate manifest hash {}", _0)] DuplicateManifest(NodeHash),
#[fail(display = "Missing entries in new changeset {}", _0)] MissingEntries(NodeHash),
#[fail(display = "Duplicate manifest hash {}", _0)] DuplicateManifest(DNodeHash),
#[fail(display = "Missing entries in new changeset {}", _0)] MissingEntries(DNodeHash),
#[fail(display = "Filenode is missing: {} {}", _0, _1)] MissingFilenode(RepoPath, HgFileNodeId),
#[fail(display = "Some manifests do not exist")] MissingManifests,
#[fail(display = "Parents failed to complete")] ParentsFailed,
#[fail(display = "Expected {} to be a manifest, found a {} instead", _0, _1)]
NotAManifest(NodeHash, Type),
NotAManifest(DNodeHash, Type),
}

View File

@ -13,7 +13,7 @@ use futures::future::Future;
use futures_ext::{BoxFuture, FutureExt};
use mercurial::file;
use mercurial_types::{BlobNode, FileType, HgBlob, HgManifestId, MPath, MPathElement, NodeHash,
use mercurial_types::{BlobNode, DNodeHash, FileType, HgBlob, HgManifestId, MPath, MPathElement,
Parents};
use mercurial_types::manifest::{Content, Entry, Manifest, Type};
use mercurial_types::nodehash::EntryId;
@ -36,8 +36,8 @@ pub struct BlobEntry {
pub fn fetch_file_content_and_renames_from_blobstore(
blobstore: &Arc<Blobstore>,
nodeid: NodeHash,
) -> BoxFuture<(Bytes, Option<(MPath, NodeHash)>), Error> {
nodeid: DNodeHash,
) -> BoxFuture<(Bytes, Option<(MPath, DNodeHash)>), Error> {
get_node(blobstore, nodeid)
.and_then({
let blobstore = blobstore.clone();
@ -71,7 +71,7 @@ impl BlobEntry {
pub fn new(
blobstore: Arc<Blobstore>,
name: Option<MPathElement>,
nodeid: NodeHash,
nodeid: DNodeHash,
ty: Type,
) -> Result<Self> {
Ok(Self {

View File

@ -15,7 +15,7 @@ use futures::stream::{self, Stream};
use futures_ext::{BoxFuture, BoxStream, FutureExt, StreamExt};
use mercurial_types::{Entry, FileType, MPathElement, Manifest, Type};
use mercurial_types::nodehash::{EntryId, HgManifestId, NodeHash, NULL_HASH};
use mercurial_types::nodehash::{DNodeHash, EntryId, HgManifestId, D_NULL_HASH};
use blobstore::Blobstore;
@ -95,7 +95,7 @@ impl BlobManifest {
manifestid: &HgManifestId,
) -> BoxFuture<Option<Self>, Error> {
let nodehash = manifestid.clone().into_nodehash();
if nodehash == NULL_HASH {
if nodehash == D_NULL_HASH {
Ok(Some(BlobManifest {
blobstore: blobstore.clone(),
content: ManifestContent::new_empty(),
@ -181,7 +181,7 @@ impl Details {
let (hash, flags) = data.split_at(40);
let hash = str::from_utf8(hash)
.map_err(|err| Error::from(err))
.and_then(|hash| hash.parse::<NodeHash>())
.and_then(|hash| hash.parse::<DNodeHash>())
.with_context(|_| format!("malformed hash: {:?}", hash))?;
let entryid = EntryId::new(hash);

View File

@ -37,8 +37,8 @@ use heads::Heads;
use manifoldblob::ManifoldBlob;
use memblob::EagerMemblob;
use memheads::MemHeads;
use mercurial_types::{BlobNode, Changeset, Entry, HgBlob, HgChangesetId, HgFileNodeId, Manifest,
NodeHash, Parents, RepoPath, RepositoryId, Time};
use mercurial_types::{BlobNode, Changeset, DNodeHash, Entry, HgBlob, HgChangesetId, HgFileNodeId,
Manifest, Parents, RepoPath, RepositoryId, Time};
use mercurial_types::manifest;
use mercurial_types::nodehash::HgManifestId;
use rocksblob::Rocksblob;
@ -209,13 +209,13 @@ impl BlobRepo {
))
}
pub fn get_file_content(&self, key: &NodeHash) -> BoxFuture<Bytes, Error> {
pub fn get_file_content(&self, key: &DNodeHash) -> BoxFuture<Bytes, Error> {
fetch_file_content_and_renames_from_blobstore(&self.blobstore, *key)
.map(|contentrename| contentrename.0)
.boxify()
}
pub fn get_parents(&self, path: &RepoPath, node: &NodeHash) -> BoxFuture<Parents, Error> {
pub fn get_parents(&self, path: &RepoPath, node: &DNodeHash) -> BoxFuture<Parents, Error> {
let path = path.clone();
let node = HgFileNodeId::new(*node);
self.filenodes
@ -237,8 +237,8 @@ impl BlobRepo {
pub fn get_file_copy(
&self,
path: &RepoPath,
node: &NodeHash,
) -> BoxFuture<Option<(RepoPath, NodeHash)>, Error> {
node: &DNodeHash,
) -> BoxFuture<Option<(RepoPath, DNodeHash)>, Error> {
let path = path.clone();
let node = HgFileNodeId::new(*node);
self.filenodes
@ -257,7 +257,7 @@ impl BlobRepo {
.boxify()
}
pub fn get_changesets(&self) -> BoxStream<NodeHash, Error> {
pub fn get_changesets(&self) -> BoxStream<DNodeHash, Error> {
BlobChangesetStream {
repo: self.clone(),
heads: self.heads.heads().boxify(),
@ -266,7 +266,7 @@ impl BlobRepo {
}.boxify()
}
pub fn get_heads(&self) -> BoxStream<NodeHash, Error> {
pub fn get_heads(&self) -> BoxStream<DNodeHash, Error> {
self.heads.heads().boxify()
}
@ -289,7 +289,7 @@ impl BlobRepo {
pub fn get_manifest_by_nodeid(
&self,
nodeid: &NodeHash,
nodeid: &DNodeHash,
) -> BoxFuture<Box<Manifest + Sync>, Error> {
let nodeid = *nodeid;
let manifestid = HgManifestId::new(nodeid);
@ -312,7 +312,7 @@ impl BlobRepo {
self.bookmarks.create_transaction(&self.repoid)
}
pub fn get_linknode(&self, path: RepoPath, node: &NodeHash) -> BoxFuture<NodeHash, Error> {
pub fn get_linknode(&self, path: RepoPath, node: &DNodeHash) -> BoxFuture<DNodeHash, Error> {
let node = HgFileNodeId::new(*node);
self.filenodes
.get_filenode(&path, &node, &self.repoid)
@ -337,17 +337,17 @@ impl BlobRepo {
// the entry or the data blob if the repo is aware of that data already existing in the
// underlying store.
// Note that the BlobEntry may not be consistent - parents do not have to be uploaded at this
// point, as long as you know their NodeHashes; this is also given to you as part of the
// point, as long as you know their DNodeHashes; this is also given to you as part of the
// result type, so that you can parallelise uploads. Consistency will be verified when
// adding the entries to a changeset.
pub fn upload_entry(
&self,
raw_content: HgBlob,
content_type: manifest::Type,
p1: Option<NodeHash>,
p2: Option<NodeHash>,
p1: Option<DNodeHash>,
p2: Option<DNodeHash>,
path: RepoPath,
) -> Result<(NodeHash, BoxFuture<(BlobEntry, RepoPath), Error>)> {
) -> Result<(DNodeHash, BoxFuture<(BlobEntry, RepoPath), Error>)> {
let p1 = p1.as_ref();
let p2 = p2.as_ref();
let raw_content = raw_content.clean();
@ -378,7 +378,7 @@ impl BlobRepo {
fn log_upload_stats(
logger: Logger,
path: RepoPath,
nodeid: NodeHash,
nodeid: DNodeHash,
phase: &str,
stats: Stats,
) {
@ -595,18 +595,18 @@ impl Clone for BlobRepo {
pub struct BlobChangesetStream {
repo: BlobRepo,
seen: HashSet<NodeHash>,
heads: BoxStream<NodeHash, Error>,
seen: HashSet<DNodeHash>,
heads: BoxStream<DNodeHash, Error>,
state: BCState,
}
enum BCState {
Idle,
WaitCS(NodeHash, BoxFuture<BlobChangeset, Error>),
WaitCS(DNodeHash, BoxFuture<BlobChangeset, Error>),
}
impl Stream for BlobChangesetStream {
type Item = NodeHash;
type Item = DNodeHash;
type Error = Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Error> {

View File

@ -22,8 +22,8 @@ use uuid::Uuid;
use blobstore::Blobstore;
use filenodes::{FilenodeInfo, Filenodes};
use mercurial::file;
use mercurial_types::{BlobNode, Changeset, Entry, EntryId, HgChangesetId, MPath, Manifest,
NodeHash, Parents, RepoPath, RepositoryId, Time};
use mercurial_types::{BlobNode, Changeset, DNodeHash, Entry, EntryId, HgChangesetId, MPath,
Manifest, Parents, RepoPath, RepositoryId, Time};
use mercurial_types::manifest::{self, Content};
use mercurial_types::manifest_utils::{changed_entry_stream, EntryStatus};
use mercurial_types::nodehash::{HgFileNodeId, HgManifestId};
@ -42,13 +42,13 @@ use utils::get_node_key;
/// See `get_completed_changeset()` for the public API you can use to extract the final changeset
#[derive(Clone)]
pub struct ChangesetHandle {
can_be_parent: Shared<oneshot::Receiver<(NodeHash, HgManifestId)>>,
can_be_parent: Shared<oneshot::Receiver<(DNodeHash, HgManifestId)>>,
completion_future: Shared<BoxFuture<BlobChangeset, Compat<Error>>>,
}
impl ChangesetHandle {
pub fn new_pending(
can_be_parent: Shared<oneshot::Receiver<(NodeHash, HgManifestId)>>,
can_be_parent: Shared<oneshot::Receiver<(DNodeHash, HgManifestId)>>,
completion_future: Shared<BoxFuture<BlobChangeset, Compat<Error>>>,
) -> Self {
Self {
@ -87,7 +87,7 @@ struct UploadEntriesState {
/// Parent hashes (if any) of the blobs that have been uploaded in this changeset. Used for
/// validation of this upload - all parents must either have been uploaded in this changeset,
/// or be present in the blobstore before the changeset can complete.
parents: HashSet<NodeHash>,
parents: HashSet<DNodeHash>,
blobstore: Arc<Blobstore>,
repoid: RepositoryId,
}
@ -198,7 +198,7 @@ impl UploadEntries {
}
}
pub fn finalize(self, filenodes: Arc<Filenodes>, cs_id: NodeHash) -> BoxFuture<(), Error> {
pub fn finalize(self, filenodes: Arc<Filenodes>, cs_id: DNodeHash) -> BoxFuture<(), Error> {
let required_checks = {
let inner = self.inner.lock().expect("Lock poisoned");
let checks: Vec<_> = inner
@ -310,7 +310,9 @@ fn compute_changed_files_pair(
.filter_map(|change| match change.status {
EntryStatus::Deleted(entry)
| EntryStatus::Added(entry)
| EntryStatus::Modified { to_entry: entry, .. } => {
| EntryStatus::Modified {
to_entry: entry, ..
} => {
if entry.get_type() == manifest::Type::Tree {
None
} else {

View File

@ -10,7 +10,7 @@ use futures_ext::{BoxFuture, FutureExt};
use bincode;
use blobstore::Blobstore;
use mercurial_types::{HgBlobHash, NodeHash, Parents};
use mercurial_types::{DNodeHash, HgBlobHash, Parents};
use errors::*;
@ -21,11 +21,11 @@ pub struct RawNodeBlob {
pub blob: HgBlobHash,
}
pub fn get_node_key(nodeid: NodeHash) -> String {
pub fn get_node_key(nodeid: DNodeHash) -> String {
format!("node-{}.bincode", nodeid)
}
pub fn get_node(blobstore: &Blobstore, nodeid: NodeHash) -> BoxFuture<RawNodeBlob, Error> {
pub fn get_node(blobstore: &Blobstore, nodeid: DNodeHash) -> BoxFuture<RawNodeBlob, Error> {
let key = get_node_key(nodeid);
blobstore

View File

@ -16,7 +16,7 @@ use futures_ext::{BoxFuture, StreamExt};
use blobrepo::{BlobEntry, BlobRepo, ChangesetHandle};
use memblob::{EagerMemblob, LazyMemblob};
use mercurial_types::{manifest, FileType, HgBlob, NodeHash, RepoPath, Time};
use mercurial_types::{manifest, DNodeHash, FileType, HgBlob, RepoPath, Time};
use std::sync::Arc;
pub fn get_empty_eager_repo() -> BlobRepo {
@ -68,7 +68,7 @@ pub fn upload_file_no_parents<S>(
repo: &BlobRepo,
data: S,
path: &RepoPath,
) -> (NodeHash, BoxFuture<(BlobEntry, RepoPath), Error>)
) -> (DNodeHash, BoxFuture<(BlobEntry, RepoPath), Error>)
where
S: Into<String>,
{
@ -86,8 +86,8 @@ pub fn upload_file_one_parent<S>(
repo: &BlobRepo,
data: S,
path: &RepoPath,
p1: NodeHash,
) -> (NodeHash, BoxFuture<(BlobEntry, RepoPath), Error>)
p1: DNodeHash,
) -> (DNodeHash, BoxFuture<(BlobEntry, RepoPath), Error>)
where
S: Into<String>,
{
@ -105,7 +105,7 @@ pub fn upload_manifest_no_parents<S>(
repo: &BlobRepo,
data: S,
path: &RepoPath,
) -> (NodeHash, BoxFuture<(BlobEntry, RepoPath), Error>)
) -> (DNodeHash, BoxFuture<(BlobEntry, RepoPath), Error>)
where
S: Into<String>,
{
@ -118,8 +118,8 @@ pub fn upload_manifest_one_parent<S>(
repo: &BlobRepo,
data: S,
path: &RepoPath,
p1: NodeHash,
) -> (NodeHash, BoxFuture<(BlobEntry, RepoPath), Error>)
p1: DNodeHash,
) -> (DNodeHash, BoxFuture<(BlobEntry, RepoPath), Error>)
where
S: Into<String>,
{
@ -163,8 +163,8 @@ pub fn create_changeset_one_parent(
)
}
pub fn string_to_nodehash(hash: &str) -> NodeHash {
NodeHash::from_ascii_str(hash.as_ascii_str().unwrap()).unwrap()
pub fn string_to_nodehash(hash: &str) -> DNodeHash {
DNodeHash::from_ascii_str(hash.as_ascii_str().unwrap()).unwrap()
}
pub fn run_future<F>(future: F) -> Result<F::Item, F::Error>

View File

@ -67,7 +67,7 @@ use futures_ext::{BoxFuture, FutureExt};
use futures_stats::{Stats, Timed};
use hyper::StatusCode;
use hyper::server::{Http, Request, Response, Service};
use mercurial_types::{Changeset, FileType, NodeHash, RepositoryId};
use mercurial_types::{Changeset, DNodeHash, FileType, RepositoryId};
use mercurial_types::nodehash::HgChangesetId;
use native_tls::TlsAcceptor;
use native_tls::backend::openssl::TlsAcceptorBuilderExt;
@ -113,25 +113,25 @@ where
fn parse_root_treemanifest_id_url(caps: Captures) -> Result<ParsedUrl> {
let repo = parse_capture::<String>(&caps, 1)?;
let hash = parse_capture::<NodeHash>(&caps, 2)?;
let hash = parse_capture::<DNodeHash>(&caps, 2)?;
Ok(ParsedUrl::RootTreeHgManifestId(repo, hash))
}
fn parse_tree_content_url(caps: Captures) -> Result<ParsedUrl> {
let repo = parse_capture::<String>(&caps, 1)?;
let hash = parse_capture::<NodeHash>(&caps, 2)?;
let hash = parse_capture::<DNodeHash>(&caps, 2)?;
Ok(ParsedUrl::TreeContent(repo, hash))
}
fn parse_tree_content_light_url(caps: Captures) -> Result<ParsedUrl> {
let repo = parse_capture::<String>(&caps, 1)?;
let hash = parse_capture::<NodeHash>(&caps, 2)?;
let hash = parse_capture::<DNodeHash>(&caps, 2)?;
Ok(ParsedUrl::TreeContentLight(repo, hash))
}
fn parse_blob_content_url(caps: Captures) -> Result<ParsedUrl> {
let repo = parse_capture::<String>(&caps, 1)?;
let hash = parse_capture::<NodeHash>(&caps, 2)?;
let hash = parse_capture::<DNodeHash>(&caps, 2)?;
Ok(ParsedUrl::BlobContent(repo, hash))
}
@ -148,10 +148,10 @@ fn parse_url(url: &str, routes: &[Route]) -> Result<ParsedUrl> {
}
enum ParsedUrl {
RootTreeHgManifestId(String, NodeHash),
TreeContent(String, NodeHash),
TreeContentLight(String, NodeHash),
BlobContent(String, NodeHash),
RootTreeHgManifestId(String, DNodeHash),
TreeContent(String, DNodeHash),
TreeContentLight(String, DNodeHash),
BlobContent(String, DNodeHash),
}
lazy_static! {
@ -190,7 +190,7 @@ impl From<mercurial_types::Type> for MetadataType {
}
#[derive(Serialize)]
struct TreeMetadata {
hash: NodeHash,
hash: DNodeHash,
path: PathBuf,
#[serde(rename = "type")]
ty: MetadataType,
@ -280,7 +280,7 @@ where
fn get_tree_content(
&self,
reponame: String,
hash: &NodeHash,
hash: &DNodeHash,
options: TreeMetadataOptions,
) -> Box<futures::Future<Item = Bytes, Error = Error> + Send> {
let repo = match self.name_to_repo.get(&reponame) {
@ -315,7 +315,7 @@ where
fn get_blob_content(
&self,
reponame: String,
hash: &NodeHash,
hash: &DNodeHash,
) -> Box<futures::Future<Item = Bytes, Error = Error> + Send> {
let repo = match self.name_to_repo.get(&reponame) {
Some(repo) => repo,

View File

@ -19,7 +19,7 @@ extern crate time_ext;
use dieselfilenodes::{MysqlFilenodes, DEFAULT_INSERT_CHUNK_SIZE};
use filenodes::Filenodes;
use futures::future::Future;
use mercurial_types::{HgFileNodeId, NodeHash, RepoPath, RepositoryId};
use mercurial_types::{DNodeHash, HgFileNodeId, RepoPath, RepositoryId};
use slog::{Drain, Level};
use slog_glog_fmt::default_drain as glog_drain;
use std::str::FromStr;
@ -74,7 +74,7 @@ fn main() {
info!(root_log, "file");
RepoPath::file(filename).expect("incorrect repopath")
};
let filenode_hash = NodeHash::from_str(filenode).expect("incorrect filenode: should be sha1");
let filenode_hash = DNodeHash::from_str(filenode).expect("incorrect filenode: should be sha1");
let mut filenode_hash = HgFileNodeId::new(filenode_hash);

View File

@ -32,7 +32,7 @@ use futures_cpupool::CpuPool;
use futures_ext::{BoxFuture, BoxStream, FutureExt, StreamExt};
use heads::Heads;
use mercurial_types::NodeHash;
use mercurial_types::DNodeHash;
static PREFIX: &'static str = "head-";
@ -74,13 +74,13 @@ impl FileHeads {
Self::open_with_pool(path, pool)
}
fn get_path(&self, key: &NodeHash) -> Result<PathBuf> {
fn get_path(&self, key: &DNodeHash) -> Result<PathBuf> {
Ok(self.base.join(format!("{}{}", PREFIX, key.to_string())))
}
}
impl Heads for FileHeads {
fn add(&self, key: &NodeHash) -> BoxFuture<(), Error> {
fn add(&self, key: &DNodeHash) -> BoxFuture<(), Error> {
let pool = self.pool.clone();
self.get_path(&key)
.into_future()
@ -94,7 +94,7 @@ impl Heads for FileHeads {
.boxify()
}
fn remove(&self, key: &NodeHash) -> BoxFuture<(), Error> {
fn remove(&self, key: &DNodeHash) -> BoxFuture<(), Error> {
let pool = self.pool.clone();
self.get_path(&key)
.into_future()
@ -114,7 +114,7 @@ impl Heads for FileHeads {
.boxify()
}
fn is_head(&self, key: &NodeHash) -> BoxFuture<bool, Error> {
fn is_head(&self, key: &DNodeHash) -> BoxFuture<bool, Error> {
let pool = self.pool.clone();
self.get_path(&key)
.into_future()
@ -125,7 +125,7 @@ impl Heads for FileHeads {
.boxify()
}
fn heads(&self) -> BoxStream<NodeHash, Error> {
fn heads(&self) -> BoxStream<DNodeHash, Error> {
let names = fs::read_dir(&self.base).map(|entries| {
entries
.map(|result| {
@ -136,7 +136,7 @@ impl Heads for FileHeads {
.filter_map(|result| match result {
Ok(ref name) if name.starts_with(PREFIX) => {
let name = &name[PREFIX.len()..];
let name = NodeHash::from_str(name)
let name = DNodeHash::from_str(name)
.context("can't parse name")
.map_err(Error::from);
Some(name)
@ -152,7 +152,6 @@ impl Heads for FileHeads {
}
}
#[cfg(test)]
mod test {
use super::*;

View File

@ -21,13 +21,12 @@ use futures::future::ok;
use futures::stream::iter_ok;
use futures_ext::{BoxFuture, BoxStream, FutureExt, StreamExt};
use heads::Heads;
use mercurial_types::NodeHash;
use mercurial_types::DNodeHash;
/// Generic, in-memory heads store backed by a HashSet, intended to be used in tests.
pub struct MemHeads {
heads: Mutex<HashSet<NodeHash>>,
heads: Mutex<HashSet<DNodeHash>>,
}
impl MemHeads {
@ -40,21 +39,21 @@ impl MemHeads {
}
impl Heads for MemHeads {
fn add(&self, head: &NodeHash) -> BoxFuture<(), Error> {
fn add(&self, head: &DNodeHash) -> BoxFuture<(), Error> {
self.heads.lock().unwrap().insert(head.clone());
ok(()).boxify()
}
fn remove(&self, head: &NodeHash) -> BoxFuture<(), Error> {
fn remove(&self, head: &DNodeHash) -> BoxFuture<(), Error> {
self.heads.lock().unwrap().remove(head);
ok(()).boxify()
}
fn is_head(&self, head: &NodeHash) -> BoxFuture<bool, Error> {
fn is_head(&self, head: &DNodeHash) -> BoxFuture<bool, Error> {
ok(self.heads.lock().unwrap().contains(head)).boxify()
}
fn heads(&self) -> BoxStream<NodeHash, Error> {
fn heads(&self) -> BoxStream<DNodeHash, Error> {
let guard = self.heads.lock().unwrap();
let heads = (*guard).clone();
iter_ok(heads).boxify()

View File

@ -13,7 +13,7 @@ extern crate mercurial_types;
use failure::Error;
use futures_ext::{BoxFuture, BoxStream};
use mercurial_types::NodeHash;
use mercurial_types::DNodeHash;
/// Trait representing the interface to a heads store, which more generally is just
/// a set of commit identifiers.
@ -21,26 +21,26 @@ pub trait Heads: Send + Sync + 'static {
// Heads are not guaranteed to be returned in any particular order. Heads that exist for
// the entire duration of the traversal are guaranteed to appear at least once.
fn add(&self, &NodeHash) -> BoxFuture<(), Error>;
fn remove(&self, &NodeHash) -> BoxFuture<(), Error>;
fn is_head(&self, &NodeHash) -> BoxFuture<bool, Error>;
fn heads(&self) -> BoxStream<NodeHash, Error>;
fn add(&self, &DNodeHash) -> BoxFuture<(), Error>;
fn remove(&self, &DNodeHash) -> BoxFuture<(), Error>;
fn is_head(&self, &DNodeHash) -> BoxFuture<bool, Error>;
fn heads(&self) -> BoxStream<DNodeHash, Error>;
}
impl Heads for Box<Heads> {
fn add(&self, head: &NodeHash) -> BoxFuture<(), Error> {
fn add(&self, head: &DNodeHash) -> BoxFuture<(), Error> {
self.as_ref().add(head)
}
fn remove(&self, head: &NodeHash) -> BoxFuture<(), Error> {
fn remove(&self, head: &DNodeHash) -> BoxFuture<(), Error> {
self.as_ref().remove(head)
}
fn is_head(&self, hash: &NodeHash) -> BoxFuture<bool, Error> {
fn is_head(&self, hash: &DNodeHash) -> BoxFuture<bool, Error> {
self.as_ref().is_head(hash)
}
fn heads(&self) -> BoxStream<NodeHash, Error> {
fn heads(&self) -> BoxStream<DNodeHash, Error> {
self.as_ref().heads()
}
}

View File

@ -23,10 +23,10 @@ use tempdir::TempDir;
use fileheads::FileHeads;
use heads::Heads;
use memheads::MemHeads;
use mercurial_types::NodeHash;
use mercurial_types::DNodeHash;
fn basic<H: Heads>(heads: H) {
let empty: Vec<NodeHash> = Vec::new();
let empty: Vec<DNodeHash> = Vec::new();
assert_eq!(heads.heads().collect().wait().unwrap(), empty);
let foo = mercurial_types_mocks::nodehash::ONES_HASH;

View File

@ -39,7 +39,7 @@ use hlua::{AnyLuaValue, Lua, LuaError, PushGuard};
use blobrepo::BlobRepo;
use hlua_futures::{AnyFuture, LuaCoroutine, LuaCoroutineBuilder};
use mercurial_types::{Changeset, NodeHash};
use mercurial_types::{Changeset, DNodeHash};
use mercurial_types::nodehash::HgChangesetId;
pub use errors::*;
@ -48,8 +48,8 @@ pub use errors::*;
pub struct HookInfo {
pub repo: String,
pub bookmark: String,
pub old_hash: NodeHash,
pub new_hash: NodeHash,
pub old_hash: DNodeHash,
pub new_hash: DNodeHash,
}
pub struct HookManager<'lua> {

View File

@ -7,28 +7,28 @@
// Ignore deprecation of NodeHash::new
#![allow(deprecated)]
use mercurial_types::{HgChangesetId, HgFileNodeId, HgManifestId, NodeHash};
// NULL_HASH is exported for convenience.
pub use mercurial_types::NULL_HASH;
use mercurial_types::{DNodeHash, HgChangesetId, HgFileNodeId, HgManifestId};
// D_NULL_HASH is exported for convenience.
pub use mercurial_types::D_NULL_HASH;
use hash;
// Definitions for hashes 1111...ffff.
pub const ONES_HASH: NodeHash = NodeHash::new(hash::ONES);
pub const TWOS_HASH: NodeHash = NodeHash::new(hash::TWOS);
pub const THREES_HASH: NodeHash = NodeHash::new(hash::THREES);
pub const FOURS_HASH: NodeHash = NodeHash::new(hash::FOURS);
pub const FIVES_HASH: NodeHash = NodeHash::new(hash::FIVES);
pub const SIXES_HASH: NodeHash = NodeHash::new(hash::SIXES);
pub const SEVENS_HASH: NodeHash = NodeHash::new(hash::SEVENS);
pub const EIGHTS_HASH: NodeHash = NodeHash::new(hash::EIGHTS);
pub const NINES_HASH: NodeHash = NodeHash::new(hash::NINES);
pub const AS_HASH: NodeHash = NodeHash::new(hash::AS);
pub const BS_HASH: NodeHash = NodeHash::new(hash::BS);
pub const CS_HASH: NodeHash = NodeHash::new(hash::CS);
pub const DS_HASH: NodeHash = NodeHash::new(hash::DS);
pub const ES_HASH: NodeHash = NodeHash::new(hash::ES);
pub const FS_HASH: NodeHash = NodeHash::new(hash::FS);
pub const ONES_HASH: DNodeHash = DNodeHash::new(hash::ONES);
pub const TWOS_HASH: DNodeHash = DNodeHash::new(hash::TWOS);
pub const THREES_HASH: DNodeHash = DNodeHash::new(hash::THREES);
pub const FOURS_HASH: DNodeHash = DNodeHash::new(hash::FOURS);
pub const FIVES_HASH: DNodeHash = DNodeHash::new(hash::FIVES);
pub const SIXES_HASH: DNodeHash = DNodeHash::new(hash::SIXES);
pub const SEVENS_HASH: DNodeHash = DNodeHash::new(hash::SEVENS);
pub const EIGHTS_HASH: DNodeHash = DNodeHash::new(hash::EIGHTS);
pub const NINES_HASH: DNodeHash = DNodeHash::new(hash::NINES);
pub const AS_HASH: DNodeHash = DNodeHash::new(hash::AS);
pub const BS_HASH: DNodeHash = DNodeHash::new(hash::BS);
pub const CS_HASH: DNodeHash = DNodeHash::new(hash::CS);
pub const DS_HASH: DNodeHash = DNodeHash::new(hash::DS);
pub const ES_HASH: DNodeHash = DNodeHash::new(hash::ES);
pub const FS_HASH: DNodeHash = DNodeHash::new(hash::FS);
// Definitions for changeset IDs 1111...ffff
pub const ONES_CSID: HgChangesetId = HgChangesetId::new(ONES_HASH);

View File

@ -5,7 +5,7 @@
// GNU General Public License version 2 or any later version.
use hash::{self, Context};
use nodehash::NodeHash;
use nodehash::DNodeHash;
use blob::HgBlob;
@ -13,12 +13,12 @@ use blob::HgBlob;
#[derive(Serialize, Deserialize, HeapSizeOf)]
pub enum Parents {
None,
One(NodeHash),
Two(NodeHash, NodeHash),
One(DNodeHash),
Two(DNodeHash, DNodeHash),
}
impl Parents {
pub fn new(p1: Option<&NodeHash>, p2: Option<&NodeHash>) -> Self {
pub fn new(p1: Option<&DNodeHash>, p2: Option<&DNodeHash>) -> Self {
match (p1, p2) {
(None, None) => Parents::None,
(Some(p1), None) => Parents::One(*p1),
@ -28,7 +28,7 @@ impl Parents {
}
}
pub fn get_nodes(&self) -> (Option<&NodeHash>, Option<&NodeHash>) {
pub fn get_nodes(&self) -> (Option<&DNodeHash>, Option<&DNodeHash>) {
match self {
&Parents::None => (None, None),
&Parents::One(ref p1) => (Some(p1), None),
@ -39,7 +39,7 @@ impl Parents {
impl<'a> IntoIterator for &'a Parents {
type IntoIter = ParentIter;
type Item = NodeHash;
type Item = DNodeHash;
fn into_iter(self) -> ParentIter {
ParentIter(*self)
}
@ -49,7 +49,7 @@ impl<'a> IntoIterator for &'a Parents {
pub struct ParentIter(Parents);
impl Iterator for ParentIter {
type Item = NodeHash;
type Item = DNodeHash;
fn next(&mut self) -> Option<Self::Item> {
let (ret, new) = match self.0 {
Parents::None => (None, Parents::None),
@ -81,7 +81,7 @@ impl BlobNode {
/// parent that's copied.
/// * If both p1 and p2 are None, it shouldn't really be possible to have copy info. But
/// the Mercurial Python client tries to parse metadata anyway, so match that behavior.
pub fn new<B>(blob: B, p1: Option<&NodeHash>, p2: Option<&NodeHash>) -> BlobNode
pub fn new<B>(blob: B, p1: Option<&DNodeHash>, p2: Option<&DNodeHash>) -> BlobNode
where
B: Into<HgBlob>,
{
@ -112,7 +112,7 @@ impl BlobNode {
// Annoyingly, filenode is defined as sha1(p1 || p2 || content), not
// sha1(p1 || p2 || sha1(content)), so we can't compute a filenode for
// a blob we don't have
pub fn nodeid(&self) -> Option<NodeHash> {
pub fn nodeid(&self) -> Option<DNodeHash> {
let null = hash::NULL;
let (h1, h2) = match &self.parents {
@ -129,7 +129,7 @@ impl BlobNode {
ctxt.update(h2);
ctxt.update(data);
NodeHash(ctxt.finish())
DNodeHash(ctxt.finish())
})
}
}
@ -153,19 +153,19 @@ mod test {
let p = &BlobNode::new(blob.clone(), None, None);
assert!(p.maybe_copied);
{
let pid: Option<NodeHash> = p.nodeid();
let pid: Option<DNodeHash> = p.nodeid();
let n = BlobNode::new(blob.clone(), pid.as_ref(), None);
assert_eq!(n.parents, Parents::One(pid.unwrap()));
assert!(!n.maybe_copied);
}
{
let pid: Option<NodeHash> = p.nodeid();
let pid: Option<DNodeHash> = p.nodeid();
let n = BlobNode::new(blob.clone(), None, pid.as_ref());
assert_eq!(n.parents, Parents::One(pid.unwrap()));
assert!(n.maybe_copied);
}
{
let pid: Option<NodeHash> = p.nodeid();
let pid: Option<DNodeHash> = p.nodeid();
let n = BlobNode::new(blob.clone(), pid.as_ref(), pid.as_ref());
assert_eq!(n.parents, Parents::One(pid.unwrap()));
assert!(!n.maybe_copied);
@ -184,8 +184,8 @@ mod test {
mem::swap(&mut p1, &mut p2);
}
let pid1: Option<NodeHash> = (&p1).nodeid();
let pid2: Option<NodeHash> = (&p2).nodeid();
let pid1: Option<DNodeHash> = (&p1).nodeid();
let pid2: Option<DNodeHash> = (&p2).nodeid();
let node1 = {
let n = BlobNode::new(

View File

@ -101,7 +101,7 @@ pub use delta::Delta;
pub use fsencode::{fncache_fsencode, simple_fsencode};
pub use manifest::{Entry, Manifest, Type};
pub use node::Node;
pub use nodehash::{EntryId, HgChangesetId, HgFileNodeId, HgManifestId, NodeHash, NULL_HASH};
pub use nodehash::{DNodeHash, EntryId, HgChangesetId, HgFileNodeId, HgManifestId, D_NULL_HASH};
pub use repo::RepositoryId;
pub use utils::percent_encode;

View File

@ -19,8 +19,8 @@ use hash::{self, Sha1};
use serde;
use sql_types::{HgChangesetIdSql, HgFileNodeIdSql, HgManifestIdSql};
pub const NULL_HASH: NodeHash = NodeHash(hash::NULL);
pub const NULL_CSID: HgChangesetId = HgChangesetId(NULL_HASH);
pub const D_NULL_HASH: DNodeHash = DNodeHash(hash::NULL);
pub const NULL_CSID: HgChangesetId = HgChangesetId(D_NULL_HASH);
/// This structure represents Sha1 based hashes that are used in Mononoke. It is a temporary
/// structure that will be entirely replaced by structures from mononoke-types::typed_hash.
@ -31,17 +31,17 @@ pub const NULL_CSID: HgChangesetId = HgChangesetId(NULL_HASH);
/// provides Flat Manifest hashes as aliases for Root Manifest hashes
#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Debug, Hash)]
#[derive(HeapSizeOf)]
pub struct NodeHash(pub(crate) Sha1);
pub struct DNodeHash(pub(crate) Sha1);
impl NodeHash {
impl DNodeHash {
#[deprecated(note = "This constructor is only used in two places: \
conversion from mercurial NodeHash and creation of NodeHash mocks")]
pub const fn new(sha1: Sha1) -> NodeHash {
NodeHash(sha1)
pub const fn new(sha1: Sha1) -> Self {
DNodeHash(sha1)
}
pub fn from_bytes(bytes: &[u8]) -> Result<NodeHash> {
Sha1::from_bytes(bytes).map(NodeHash)
pub fn from_bytes(bytes: &[u8]) -> Result<Self> {
Sha1::from_bytes(bytes).map(DNodeHash)
}
pub fn as_bytes(&self) -> &[u8] {
@ -49,13 +49,13 @@ impl NodeHash {
}
#[inline]
pub fn from_ascii_str(s: &AsciiStr) -> Result<NodeHash> {
Sha1::from_ascii_str(s).map(NodeHash)
pub fn from_ascii_str(s: &AsciiStr) -> Result<Self> {
Sha1::from_ascii_str(s).map(DNodeHash)
}
#[inline]
pub fn into_option(self) -> Option<Self> {
if self == NULL_HASH {
if self == D_NULL_HASH {
None
} else {
Some(self)
@ -69,10 +69,10 @@ impl NodeHash {
}
}
impl From<Option<NodeHash>> for NodeHash {
fn from(h: Option<NodeHash>) -> Self {
impl From<Option<DNodeHash>> for DNodeHash {
fn from(h: Option<DNodeHash>) -> Self {
match h {
None => NULL_HASH,
None => D_NULL_HASH,
Some(h) => h,
}
}
@ -102,7 +102,7 @@ impl<'de> serde::de::Visitor<'de> for StringVisitor {
}
}
impl serde::ser::Serialize for NodeHash {
impl serde::ser::Serialize for DNodeHash {
fn serialize<S>(&self, serializer: S) -> ::std::result::Result<S::Ok, S::Error>
where
S: serde::Serializer,
@ -111,73 +111,73 @@ impl serde::ser::Serialize for NodeHash {
}
}
impl<'de> serde::de::Deserialize<'de> for NodeHash {
fn deserialize<D>(deserializer: D) -> ::std::result::Result<NodeHash, D::Error>
impl<'de> serde::de::Deserialize<'de> for DNodeHash {
fn deserialize<D>(deserializer: D) -> ::std::result::Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
let hex = deserializer.deserialize_string(StringVisitor)?;
match Sha1::from_str(hex.as_str()) {
Ok(sha1) => Ok(NodeHash(sha1)),
Ok(sha1) => Ok(DNodeHash(sha1)),
Err(error) => Err(serde::de::Error::custom(error)),
}
}
}
impl FromStr for NodeHash {
impl FromStr for DNodeHash {
type Err = <Sha1 as FromStr>::Err;
fn from_str(s: &str) -> result::Result<NodeHash, Self::Err> {
Sha1::from_str(s).map(NodeHash)
fn from_str(s: &str) -> result::Result<Self, Self::Err> {
Sha1::from_str(s).map(DNodeHash)
}
}
impl Display for NodeHash {
impl Display for DNodeHash {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(fmt)
}
}
impl Arbitrary for NodeHash {
impl Arbitrary for DNodeHash {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
NodeHash(Sha1::arbitrary(g))
DNodeHash(Sha1::arbitrary(g))
}
fn shrink(&self) -> Box<Iterator<Item = Self>> {
single_shrinker(NULL_HASH)
single_shrinker(D_NULL_HASH)
}
}
#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Debug, Hash)]
#[derive(HeapSizeOf, FromSqlRow, AsExpression)]
#[sql_type = "HgChangesetIdSql"]
pub struct HgChangesetId(NodeHash);
pub struct HgChangesetId(DNodeHash);
impl HgChangesetId {
#[inline]
pub fn from_ascii_str(s: &AsciiStr) -> Result<HgChangesetId> {
NodeHash::from_ascii_str(s).map(HgChangesetId)
DNodeHash::from_ascii_str(s).map(HgChangesetId)
}
#[inline]
pub(crate) fn as_nodehash(&self) -> &NodeHash {
pub(crate) fn as_nodehash(&self) -> &DNodeHash {
&self.0
}
pub fn into_nodehash(self) -> NodeHash {
pub fn into_nodehash(self) -> DNodeHash {
self.0
}
pub const fn new(hash: NodeHash) -> Self {
pub const fn new(hash: DNodeHash) -> Self {
HgChangesetId(hash)
}
}
impl FromStr for HgChangesetId {
type Err = <NodeHash as FromStr>::Err;
type Err = <DNodeHash as FromStr>::Err;
fn from_str(s: &str) -> result::Result<HgChangesetId, Self::Err> {
NodeHash::from_str(s).map(HgChangesetId)
DNodeHash::from_str(s).map(HgChangesetId)
}
}
@ -202,7 +202,7 @@ impl<'de> serde::de::Deserialize<'de> for HgChangesetId {
D: serde::de::Deserializer<'de>,
{
let hex = deserializer.deserialize_string(StringVisitor)?;
match NodeHash::from_str(hex.as_str()) {
match DNodeHash::from_str(hex.as_str()) {
Ok(hash) => Ok(HgChangesetId::new(hash)),
Err(error) => Err(serde::de::Error::custom(error)),
}
@ -212,19 +212,19 @@ impl<'de> serde::de::Deserialize<'de> for HgChangesetId {
#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Debug, Hash)]
#[derive(HeapSizeOf, FromSqlRow, AsExpression)]
#[sql_type = "HgManifestIdSql"]
pub struct HgManifestId(NodeHash);
pub struct HgManifestId(DNodeHash);
impl HgManifestId {
#[inline]
pub(crate) fn as_nodehash(&self) -> &NodeHash {
pub(crate) fn as_nodehash(&self) -> &DNodeHash {
&self.0
}
pub fn into_nodehash(self) -> NodeHash {
pub fn into_nodehash(self) -> DNodeHash {
self.0
}
pub const fn new(hash: NodeHash) -> Self {
pub const fn new(hash: DNodeHash) -> Self {
HgManifestId(hash)
}
}
@ -238,19 +238,19 @@ impl Display for HgManifestId {
#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Debug, Hash)]
#[derive(HeapSizeOf, FromSqlRow, AsExpression)]
#[sql_type = "HgFileNodeIdSql"]
pub struct HgFileNodeId(NodeHash);
pub struct HgFileNodeId(DNodeHash);
impl HgFileNodeId {
#[inline]
pub(crate) fn as_nodehash(&self) -> &NodeHash {
pub(crate) fn as_nodehash(&self) -> &DNodeHash {
&self.0
}
pub fn into_nodehash(self) -> NodeHash {
pub fn into_nodehash(self) -> DNodeHash {
self.0
}
pub const fn new(hash: NodeHash) -> Self {
pub const fn new(hash: DNodeHash) -> Self {
HgFileNodeId(hash)
}
}
@ -264,14 +264,14 @@ impl Display for HgFileNodeId {
/// TODO: (jsgf) T25576292 EntryId should be a (Type, NodeId) tuple
#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Debug, Hash)]
#[derive(HeapSizeOf)]
pub struct EntryId(NodeHash);
pub struct EntryId(DNodeHash);
impl EntryId {
pub fn into_nodehash(self) -> NodeHash {
pub fn into_nodehash(self) -> DNodeHash {
self.0
}
pub fn new(hash: NodeHash) -> Self {
pub fn new(hash: DNodeHash) -> Self {
EntryId(hash)
}
}

View File

@ -13,7 +13,7 @@ use diesel::deserialize::{self, FromSql};
use diesel::serialize::{self, IsNull, Output, ToSql};
use diesel::sql_types::{Binary, Integer};
use {HgChangesetId, HgFileNodeId, HgManifestId, NodeHash, RepositoryId};
use {DNodeHash, HgChangesetId, HgFileNodeId, HgManifestId, RepositoryId};
use errors::*;
#[derive(QueryId, SqlType)]
@ -46,7 +46,7 @@ where
// Using unsafe here saves on a heap allocation. See https://goo.gl/K6hapb.
let raw_bytes: *const [u8] = FromSql::<Binary, DB>::from_sql(bytes)?;
let raw_bytes: &[u8] = unsafe { &*raw_bytes };
let hash = NodeHash::from_bytes(raw_bytes).compat()?;
let hash = DNodeHash::from_bytes(raw_bytes).compat()?;
Ok(Self::new(hash))
}
}
@ -66,7 +66,7 @@ where
// Using unsafe here saves on a heap allocation. See https://goo.gl/K6hapb.
let raw_bytes: *const [u8] = FromSql::<Binary, DB>::from_sql(bytes)?;
let raw_bytes: &[u8] = unsafe { &*raw_bytes };
let hash = NodeHash::from_bytes(raw_bytes).compat()?;
let hash = DNodeHash::from_bytes(raw_bytes).compat()?;
Ok(Self::new(hash))
}
}
@ -86,7 +86,7 @@ where
// Using unsafe here saves on a heap allocation. See https://goo.gl/K6hapb.
let raw_bytes: *const [u8] = FromSql::<Binary, DB>::from_sql(bytes)?;
let raw_bytes: &[u8] = unsafe { &*raw_bytes };
let hash = NodeHash::from_bytes(raw_bytes).compat()?;
let hash = DNodeHash::from_bytes(raw_bytes).compat()?;
Ok(Self::new(hash))
}
}

View File

@ -24,7 +24,6 @@ fn test_fsencode_from_core_hg() {
let expected = "data/abcdefghijklmnopqrstuvwxyz0123456789 !#%&'()+,-.;=[]^`{}";
check_fsencode_with_dotencode(&toencode[..], expected);
let toencode = b"data/abcdefghijklmnopqrstuvwxyz0123456789 !#%&'()+,-.;=[]^`{}xxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12345";
let expected = "data/abcdefghijklmnopqrstuvwxyz0123456789 !#%&'()+,-.;=[]^`{}xxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12345";
check_fsencode_with_dotencode(&toencode[..], expected);
@ -154,7 +153,8 @@ fn test_fsencode_from_core_hg() {
check_fsencode_with_dotencode(&toencode[..], expected);
let toencode = b"data/aux.bla/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c.i";
let expected = "data/au~78.bla/bla.aux/pr~6e/_p_r_n/lpt/co~6d3/nu~6c/coma/foo._n_u_l/normal.c.i";
let expected =
"data/au~78.bla/bla.aux/pr~6e/_p_r_n/lpt/co~6d3/nu~6c/coma/foo._n_u_l/normal.c.i";
check_fsencode_with_dotencode(&toencode[..], expected);
let toencode = b"data/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT.i";
@ -178,11 +178,13 @@ fn test_fsencode_from_core_hg() {
check_fsencode_with_dotencode(&toencode[..], expected);
let toencode = b"data/foo.../foo / /a./_. /__/.x../ bla/.FOO/something.i";
let expected = "data/foo..~2e/foo ~20/~20/a~2e/__.~20/____/~2ex.~2e/~20 bla/~2e_f_o_o/something.i";
let expected =
"data/foo..~2e/foo ~20/~20/a~2e/__.~20/____/~2ex.~2e/~20 bla/~2e_f_o_o/something.i";
check_fsencode_with_dotencode(&toencode[..], expected);
let toencode = b"data/c/co/com/com0/com1/com2/com3/com4/com5/com6/com7/com8/com9";
let expected = "data/c/co/com/com0/co~6d1/co~6d2/co~6d3/co~6d4/co~6d5/co~6d6/co~6d7/co~6d8/co~6d9";
let expected =
"data/c/co/com/com0/co~6d1/co~6d2/co~6d3/co~6d4/co~6d5/co~6d6/co~6d7/co~6d8/co~6d9";
check_fsencode_with_dotencode(&toencode[..], expected);
let toencode = b"data/C/CO/COM/COM0/COM1/COM2/COM3/COM4/COM5/COM6/COM7/COM8/COM9";
@ -195,7 +197,8 @@ fn test_fsencode_from_core_hg() {
let toencode =
b"data/x.c/x.co/x.com0/x.com1/x.com2/x.com3/x.com4/x.com5/x.com6/x.com7/x.com8/x.com9";
let expected = "data/x.c/x.co/x.com0/x.com1/x.com2/x.com3/x.com4/x.com5/x.com6/x.com7/x.com8/x.com9";
let expected =
"data/x.c/x.co/x.com0/x.com1/x.com2/x.com3/x.com4/x.com5/x.com6/x.com7/x.com8/x.com9";
check_fsencode_with_dotencode(&toencode[..], expected);
let toencode = b"data/cx/cox/comx/com0x/com1x/com2x/com3x/com4x/com5x/com6x/com7x/com8x/com9x";
@ -207,7 +210,8 @@ fn test_fsencode_from_core_hg() {
check_fsencode_with_dotencode(&toencode[..], expected);
let toencode = b"data/l/lp/lpt/lpt0/lpt1/lpt2/lpt3/lpt4/lpt5/lpt6/lpt7/lpt8/lpt9";
let expected = "data/l/lp/lpt/lpt0/lp~741/lp~742/lp~743/lp~744/lp~745/lp~746/lp~747/lp~748/lp~749";
let expected =
"data/l/lp/lpt/lpt0/lp~741/lp~742/lp~743/lp~744/lp~745/lp~746/lp~747/lp~748/lp~749";
check_fsencode_with_dotencode(&toencode[..], expected);
let toencode = b"data/L/LP/LPT/LPT0/LPT1/LPT2/LPT3/LPT4/LPT5/LPT6/LPT7/LPT8/LPT9";
@ -219,7 +223,8 @@ fn test_fsencode_from_core_hg() {
check_fsencode_with_dotencode(&toencode[..], expected);
let toencode = b"data/x.l/x.lp/x.lpt/x.lpt0/x.lpt1/x.lpt2/x.lpt3/x.lpt4/x.lpt5/x.lpt6/x.lpt7/x.lpt8/x.lpt9";
let expected = "data/x.l/x.lp/x.lpt/x.lpt0/x.lpt1/x.lpt2/x.lpt3/x.lpt4/x.lpt5/x.lpt6/x.lpt7/x.lpt8/x.lpt9";
let expected =
"data/x.l/x.lp/x.lpt/x.lpt0/x.lpt1/x.lpt2/x.lpt3/x.lpt4/x.lpt5/x.lpt6/x.lpt7/x.lpt8/x.lpt9";
check_fsencode_with_dotencode(&toencode[..], expected);
let toencode = b"data/lx/lpx/lptx/lpt0x/lpt1x/lpt2x/lpt3x/lpt4x/lpt5x/lpt6x/lpt7x/lpt8x/lpt9x";

View File

@ -23,11 +23,11 @@ use std::sync::Arc;
use blobrepo::BlobRepo;
use futures::Future;
use futures::executor::spawn;
use mercurial_types::{Changeset, Entry, FileType, MPath, Manifest, RepoPath, Type, NULL_HASH};
use mercurial_types::{Changeset, Entry, FileType, MPath, Manifest, RepoPath, Type, D_NULL_HASH};
use mercurial_types::manifest::Content;
use mercurial_types::manifest_utils::{changed_entry_stream, diff_sorted_vecs, ChangedEntry,
EntryStatus};
use mercurial_types::nodehash::{EntryId, HgChangesetId, NodeHash};
use mercurial_types::nodehash::{DNodeHash, EntryId, HgChangesetId};
use mercurial_types_mocks::manifest::{ContentFactory, MockEntry};
use mercurial_types_mocks::nodehash;
@ -43,7 +43,7 @@ fn get_root_manifest(repo: Arc<BlobRepo>, changesetid: &HgChangesetId) -> Box<Ma
fn get_hash(c: char) -> EntryId {
let hash: String = repeat(c).take(40).collect();
EntryId::new(NodeHash::from_str(&hash).unwrap())
EntryId::new(DNodeHash::from_str(&hash).unwrap())
}
fn get_entry(ty: Type, hash: EntryId, path: RepoPath) -> Box<Entry + Sync> {
@ -270,8 +270,8 @@ fn check_changed_paths(
fn do_check(
repo: Arc<BlobRepo>,
main_hash: NodeHash,
base_hash: NodeHash,
main_hash: DNodeHash,
base_hash: DNodeHash,
expected_added: Vec<&str>,
expected_deleted: Vec<&str>,
expected_modified: Vec<&str>,
@ -311,8 +311,8 @@ fn do_check(
fn test_recursive_changed_entry_stream_simple() {
async_unit::tokio_unit_test(|| -> Result<_, !> {
let repo = Arc::new(many_files_dirs::getrepo(None));
let main_hash = NodeHash::from_str("ecafdc4a4b6748b7a7215c6995f14c837dc1ebec").unwrap();
let base_hash = NodeHash::from_str("5a28e25f924a5d209b82ce0713d8d83e68982bc8").unwrap();
let main_hash = DNodeHash::from_str("ecafdc4a4b6748b7a7215c6995f14c837dc1ebec").unwrap();
let base_hash = DNodeHash::from_str("5a28e25f924a5d209b82ce0713d8d83e68982bc8").unwrap();
// main_hash is a child of base_hash
// hg st --change .
// A 2
@ -344,8 +344,8 @@ fn test_recursive_changed_entry_stream_simple() {
fn test_recursive_changed_entry_stream_changed_dirs() {
async_unit::tokio_unit_test(|| -> Result<_, !> {
let repo = Arc::new(many_files_dirs::getrepo(None));
let main_hash = NodeHash::from_str("473b2e715e0df6b2316010908879a3c78e275dd9").unwrap();
let base_hash = NodeHash::from_str("ecafdc4a4b6748b7a7215c6995f14c837dc1ebec").unwrap();
let main_hash = DNodeHash::from_str("473b2e715e0df6b2316010908879a3c78e275dd9").unwrap();
let base_hash = DNodeHash::from_str("ecafdc4a4b6748b7a7215c6995f14c837dc1ebec").unwrap();
// main_hash is a child of base_hash
// hg st --change .
// A dir1/subdir1/subsubdir1/file_1
@ -375,8 +375,8 @@ fn test_recursive_changed_entry_stream_changed_dirs() {
fn test_recursive_changed_entry_stream_dirs_replaced_with_file() {
async_unit::tokio_unit_test(|| -> Result<_, !> {
let repo = Arc::new(many_files_dirs::getrepo(None));
let main_hash = NodeHash::from_str("a6cb7dddec32acaf9a28db46cdb3061682155531").unwrap();
let base_hash = NodeHash::from_str("473b2e715e0df6b2316010908879a3c78e275dd9").unwrap();
let main_hash = DNodeHash::from_str("a6cb7dddec32acaf9a28db46cdb3061682155531").unwrap();
let base_hash = DNodeHash::from_str("473b2e715e0df6b2316010908879a3c78e275dd9").unwrap();
// main_hash is a child of base_hash
// hg st --change .
// A dir1
@ -414,12 +414,12 @@ fn test_recursive_changed_entry_stream_dirs_replaced_with_file() {
#[test]
fn nodehash_option() {
assert_eq!(NULL_HASH.into_option(), None);
assert_eq!(NodeHash::from(None), NULL_HASH);
assert_eq!(D_NULL_HASH.into_option(), None);
assert_eq!(DNodeHash::from(None), D_NULL_HASH);
assert_eq!(nodehash::ONES_HASH.into_option(), Some(nodehash::ONES_HASH));
assert_eq!(
NodeHash::from(Some(nodehash::ONES_HASH)),
DNodeHash::from(Some(nodehash::ONES_HASH)),
nodehash::ONES_HASH
);
}

View File

@ -11,7 +11,7 @@ use errors::*;
use itertools::Itertools;
use mercurial_types::{BlobNode, MPath, NodeHash};
use mercurial_types::{BlobNode, DNodeHash, MPath};
#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct File {
@ -37,9 +37,7 @@ impl File {
.iter()
.enumerate()
.tuple_windows()
.find(|&((_, a), (_, b))| {
*a == META_MARKER[0] && *b == META_MARKER[1]
})
.find(|&((_, a), (_, b))| *a == META_MARKER[0] && *b == META_MARKER[1])
.map(|((idx, _), _)| idx + META_SZ * 2)
.unwrap_or(META_SZ); // XXX malformed if None - unterminated metadata
@ -75,7 +73,7 @@ impl File {
kv
}
pub fn copied_from(&self) -> Result<Option<(MPath, NodeHash)>> {
pub fn copied_from(&self) -> Result<Option<(MPath, DNodeHash)>> {
if !self.node.maybe_copied() {
return Ok(None);
}

View File

@ -14,7 +14,7 @@ use ascii::{AsciiStr, AsciiString};
use quickcheck::{single_shrinker, Arbitrary, Gen};
use errors::*;
use mercurial_types::{self, RepoPath};
use mercurial_types::{DNodeHash, RepoPath};
use mercurial_types::hash::{self, Sha1};
use serde;
use sql_types::{HgChangesetIdSql, HgManifestIdSql};
@ -67,13 +67,13 @@ impl NodeHash {
/// This method is temporary (as the mercurial_types hashes are) and will go away once
/// transision to BonsaiChangesets is complete
#[inline]
pub fn into_mononoke(self) -> mercurial_types::NodeHash {
pub fn into_mononoke(self) -> DNodeHash {
#![allow(deprecated)]
mercurial_types::NodeHash::new(self.0)
DNodeHash::new(self.0)
}
/// Returns true if self Mercurial hash is equal to Mononoke Sha1 based hash
pub fn is_equal_to(&self, hash: mercurial_types::NodeHash) -> bool {
pub fn is_equal_to(&self, hash: DNodeHash) -> bool {
self.as_bytes() == hash.as_bytes()
}
}
@ -84,7 +84,7 @@ pub trait NodeHashConversion {
fn into_mercurial(self) -> NodeHash;
}
impl NodeHashConversion for mercurial_types::NodeHash {
impl NodeHashConversion for DNodeHash {
/// Method used to convert a Mononoke Sha1 based NodeHash into Mercurial Sha1 based NodeHash
/// without performing lookups in a remapping tables. It should be used only on Filenodes and
/// Manifests that are not Root Manifests.

View File

@ -20,7 +20,7 @@ use futures_ext::FutureExt;
use asyncmemo::{Asyncmemo, Filler};
use blobrepo::BlobRepo;
use mercurial_types::{HgChangesetId, NodeHash, NULL_HASH};
use mercurial_types::{DNodeHash, HgChangesetId, D_NULL_HASH};
use nodehashkey::Key;
@ -58,9 +58,9 @@ impl RepoGenCache {
pub fn get(
&self,
repo: &Arc<BlobRepo>,
nodeid: NodeHash,
nodeid: DNodeHash,
) -> impl Future<Item = Generation, Error = Error> + Send {
if nodeid == NULL_HASH {
if nodeid == D_NULL_HASH {
Either::A(Ok(Generation(0)).into_future())
} else {
Either::B(self.cache.get((repo, nodeid.clone())))

View File

@ -8,12 +8,12 @@ use std::hash::{Hash, Hasher};
use std::sync::Arc;
use heapsize::HeapSizeOf;
use mercurial_types::NodeHash;
use mercurial_types::DNodeHash;
use ptrwrap::PtrWrap;
#[derive(Debug)]
pub struct Key<R>(pub PtrWrap<R>, pub NodeHash);
pub struct Key<R>(pub PtrWrap<R>, pub DNodeHash);
impl<R> Clone for Key<R> {
fn clone(&self) -> Self {
@ -41,14 +41,14 @@ impl<R> HeapSizeOf for Key<R> {
}
}
impl<'a, R> From<(&'a Arc<R>, NodeHash)> for Key<R> {
fn from((repo, hash): (&'a Arc<R>, NodeHash)) -> Self {
impl<'a, R> From<(&'a Arc<R>, DNodeHash)> for Key<R> {
fn from((repo, hash): (&'a Arc<R>, DNodeHash)) -> Self {
Key(From::from(repo), hash)
}
}
impl<'a, R> From<(&'a PtrWrap<R>, NodeHash)> for Key<R> {
fn from((repo, hash): (&'a PtrWrap<R>, NodeHash)) -> Self {
impl<'a, R> From<(&'a PtrWrap<R>, DNodeHash)> for Key<R> {
fn from((repo, hash): (&'a PtrWrap<R>, DNodeHash)) -> Self {
Key(repo.clone(), hash)
}
}

View File

@ -17,7 +17,7 @@ use futures::future::Future;
use futures::stream::{iter_ok, Stream};
use blobrepo::BlobRepo;
use mercurial_types::{Changeset, NodeHash};
use mercurial_types::{Changeset, DNodeHash};
use mercurial_types::nodehash::HgChangesetId;
use repoinfo::{Generation, RepoGenCache};
@ -28,16 +28,16 @@ use errors::*;
pub struct AncestorsNodeStream {
repo: Arc<BlobRepo>,
repo_generation: RepoGenCache,
next_generation: BTreeMap<Generation, HashSet<NodeHash>>,
pending_changesets: Box<Stream<Item = (NodeHash, Generation), Error = Error> + Send>,
drain: IntoIter<NodeHash>,
next_generation: BTreeMap<Generation, HashSet<DNodeHash>>,
pending_changesets: Box<Stream<Item = (DNodeHash, Generation), Error = Error> + Send>,
drain: IntoIter<DNodeHash>,
}
fn make_pending(
repo: Arc<BlobRepo>,
repo_generation: RepoGenCache,
hashes: IntoIter<NodeHash>,
) -> Box<Stream<Item = (NodeHash, Generation), Error = Error> + Send> {
hashes: IntoIter<DNodeHash>,
) -> Box<Stream<Item = (DNodeHash, Generation), Error = Error> + Send> {
let size = hashes.size_hint().0;
let new_repo = repo.clone();
@ -62,8 +62,8 @@ fn make_pending(
}
impl AncestorsNodeStream {
pub fn new(repo: &Arc<BlobRepo>, repo_generation: RepoGenCache, hash: NodeHash) -> Self {
let node_set: HashSet<NodeHash> = hashset!{hash};
pub fn new(repo: &Arc<BlobRepo>, repo_generation: RepoGenCache, hash: DNodeHash) -> Self {
let node_set: HashSet<DNodeHash> = hashset!{hash};
AncestorsNodeStream {
repo: repo.clone(),
repo_generation: repo_generation.clone(),
@ -83,7 +83,7 @@ impl AncestorsNodeStream {
}
impl Stream for AncestorsNodeStream {
type Item = NodeHash;
type Item = DNodeHash;
type Error = Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
// Empty the drain if any - return all items for this generation
@ -137,7 +137,7 @@ pub fn common_ancestors<I>(
nodes: I,
) -> Box<NodeStream>
where
I: IntoIterator<Item = NodeHash>,
I: IntoIterator<Item = DNodeHash>,
{
let nodes_iter = nodes.into_iter().map({
let repo_generation = repo_generation.clone();
@ -152,7 +152,7 @@ pub fn greatest_common_ancestor<I>(
nodes: I,
) -> Box<NodeStream>
where
I: IntoIterator<Item = NodeHash>,
I: IntoIterator<Item = DNodeHash>,
{
Box::new(common_ancestors(repo, repo_generation, nodes).take(1))
}

View File

@ -6,11 +6,11 @@
pub use failure::{Error, Result};
use mercurial_types::NodeHash;
use mercurial_types::DNodeHash;
#[derive(Debug, Fail)]
pub enum ErrorKind {
#[fail(display = "repo error checking for node: {}", _0)] RepoError(NodeHash),
#[fail(display = "repo error checking for node: {}", _0)] RepoError(DNodeHash),
#[fail(display = "could not fetch node generation")] GenerationFetchFailed,
#[fail(display = "failed to fetch parent nodes")] ParentsFetchFailed,
}

View File

@ -8,7 +8,7 @@ use blobrepo::BlobRepo;
use futures::Async;
use futures::Poll;
use futures::stream::Stream;
use mercurial_types::NodeHash;
use mercurial_types::DNodeHash;
use repoinfo::{Generation, RepoGenCache};
use std::boxed::Box;
use std::collections::HashMap;
@ -22,10 +22,10 @@ use errors::*;
use setcommon::*;
pub struct IntersectNodeStream {
inputs: Vec<(InputStream, Poll<Option<(NodeHash, Generation)>, Error>)>,
inputs: Vec<(InputStream, Poll<Option<(DNodeHash, Generation)>, Error>)>,
current_generation: Option<Generation>,
accumulator: HashMap<NodeHash, usize>,
drain: Option<IntoIter<NodeHash, usize>>,
accumulator: HashMap<DNodeHash, usize>,
drain: Option<IntoIter<DNodeHash, usize>>,
}
impl IntersectNodeStream {
@ -101,7 +101,7 @@ impl IntersectNodeStream {
}
impl Stream for IntersectNodeStream {
type Item = NodeHash;
type Item = DNodeHash;
type Error = Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
// This feels wrong, but in practice it's fine - it should be quick to hit a return, and

View File

@ -15,7 +15,7 @@ extern crate mercurial_types;
extern crate repoinfo;
use futures::stream::Stream;
use mercurial_types::NodeHash;
use mercurial_types::DNodeHash;
mod setcommon;
@ -34,7 +34,7 @@ pub use setdifferencenodestream::SetDifferenceNodeStream;
pub mod errors;
pub use errors::{Error, ErrorKind};
pub type NodeStream = Stream<Item = NodeHash, Error = errors::Error> + Send + 'static;
pub type NodeStream = Stream<Item = DNodeHash, Error = errors::Error> + Send + 'static;
mod validation;
pub use validation::ValidateNodeStream;

View File

@ -15,7 +15,7 @@ use std::collections::HashSet;
use std::sync::Arc;
use blobrepo::BlobRepo;
use mercurial_types::NodeHash;
use mercurial_types::DNodeHash;
use repoinfo::RepoGenCache;
use branch_even;
@ -36,7 +36,7 @@ use validation::ValidateNodeStream;
#[derive(Clone, Copy, Debug)]
enum RevsetEntry {
SingleNode(Option<NodeHash>),
SingleNode(Option<DNodeHash>),
SetDifference,
Intersect(usize),
Union(usize),
@ -53,7 +53,7 @@ impl RevsetSpec {
G: Rng,
{
let mut all_changesets_executor = spawn(repo.get_changesets());
let mut all_changesets: Vec<NodeHash> = Vec::new();
let mut all_changesets: Vec<DNodeHash> = Vec::new();
loop {
all_changesets.push(match all_changesets_executor.wait_stream() {
None => break,
@ -70,8 +70,8 @@ impl RevsetSpec {
}
}
pub fn as_hashes(&self) -> HashSet<NodeHash> {
let mut output: Vec<HashSet<NodeHash>> = Vec::new();
pub fn as_hashes(&self) -> HashSet<DNodeHash> {
let mut output: Vec<HashSet<DNodeHash>> = Vec::new();
for entry in self.rp_entries.iter() {
match entry {
&RevsetEntry::SingleNode(None) => panic!("You need to add_hashes first!"),

View File

@ -15,7 +15,7 @@ use futures::future::Future;
use futures::stream::{self, iter_ok, Stream};
use blobrepo::BlobRepo;
use mercurial_types::{Changeset, NodeHash};
use mercurial_types::{Changeset, DNodeHash};
use mercurial_types::nodehash::HgChangesetId;
use repoinfo::{Generation, RepoGenCache};
@ -24,7 +24,7 @@ use errors::*;
#[derive(Clone, Copy, Eq, Hash, Ord, PartialEq, PartialOrd)]
struct HashGen {
hash: NodeHash,
hash: DNodeHash,
generation: Generation,
}
@ -37,13 +37,13 @@ struct ParentChild {
pub struct RangeNodeStream {
repo: Arc<BlobRepo>,
repo_generation: RepoGenCache,
start_node: NodeHash,
start_node: DNodeHash,
start_generation: Box<Stream<Item = Generation, Error = Error> + Send>,
children: HashMap<HashGen, HashSet<HashGen>>,
// Child, parent
pending_nodes: Box<Stream<Item = ParentChild, Error = Error> + Send>,
output_nodes: Option<BTreeMap<Generation, HashSet<NodeHash>>>,
drain: Option<IntoIter<NodeHash>>,
output_nodes: Option<BTreeMap<Generation, HashSet<DNodeHash>>>,
drain: Option<IntoIter<DNodeHash>>,
}
fn make_pending(
@ -78,8 +78,8 @@ impl RangeNodeStream {
pub fn new(
repo: &Arc<BlobRepo>,
repo_generation: RepoGenCache,
start_node: NodeHash,
end_node: NodeHash,
start_node: DNodeHash,
end_node: DNodeHash,
) -> Self {
let start_generation = Box::new(
repo_generation
@ -157,7 +157,7 @@ impl RangeNodeStream {
}
impl Stream for RangeNodeStream {
type Item = NodeHash;
type Item = DNodeHash;
type Error = Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
// Empty the drain; this can only happen once we're in Stage 2

View File

@ -7,7 +7,7 @@
use blobrepo::BlobRepo;
use futures::future::Future;
use futures::stream::Stream;
use mercurial_types::NodeHash;
use mercurial_types::DNodeHash;
use repoinfo::{Generation, RepoGenCache};
use std::boxed::Box;
use std::sync::Arc;
@ -17,7 +17,7 @@ use errors::*;
use futures::{Async, Poll};
pub type InputStream = Box<Stream<Item = (NodeHash, Generation), Error = Error> + 'static + Send>;
pub type InputStream = Box<Stream<Item = (DNodeHash, Generation), Error = Error> + 'static + Send>;
pub fn add_generations(
stream: Box<NodeStream>,
@ -35,7 +35,7 @@ pub fn add_generations(
}
pub fn all_inputs_ready(
inputs: &Vec<(InputStream, Poll<Option<(NodeHash, Generation)>, Error>)>,
inputs: &Vec<(InputStream, Poll<Option<(DNodeHash, Generation)>, Error>)>,
) -> bool {
inputs
.iter()
@ -47,7 +47,7 @@ pub fn all_inputs_ready(
}
pub fn poll_all_inputs(
inputs: &mut Vec<(InputStream, Poll<Option<(NodeHash, Generation)>, Error>)>,
inputs: &mut Vec<(InputStream, Poll<Option<(DNodeHash, Generation)>, Error>)>,
) {
for &mut (ref mut input, ref mut state) in inputs.iter_mut() {
if let Ok(Async::NotReady) = *state {
@ -63,7 +63,7 @@ pub struct NotReadyEmptyStream {
#[cfg(test)]
impl Stream for NotReadyEmptyStream {
type Item = NodeHash;
type Item = DNodeHash;
type Error = Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
@ -78,12 +78,12 @@ impl Stream for NotReadyEmptyStream {
#[cfg(test)]
pub struct RepoErrorStream {
pub hash: NodeHash,
pub hash: DNodeHash,
}
#[cfg(test)]
impl Stream for RepoErrorStream {
type Item = NodeHash;
type Item = DNodeHash;
type Error = Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {

View File

@ -7,7 +7,7 @@
use blobrepo::BlobRepo;
use futures::{Async, Poll};
use futures::stream::Stream;
use mercurial_types::NodeHash;
use mercurial_types::DNodeHash;
use repoinfo::{Generation, RepoGenCache};
use std::boxed::Box;
use std::collections::HashSet;
@ -19,12 +19,12 @@ use setcommon::*;
pub struct SetDifferenceNodeStream {
keep_input: InputStream,
next_keep: Async<Option<(NodeHash, Generation)>>,
next_keep: Async<Option<(DNodeHash, Generation)>>,
remove_input: InputStream,
next_remove: Async<Option<(NodeHash, Generation)>>,
next_remove: Async<Option<(DNodeHash, Generation)>>,
remove_nodes: HashSet<NodeHash>,
remove_nodes: HashSet<DNodeHash>,
remove_generation: Option<Generation>,
}
@ -50,14 +50,14 @@ impl SetDifferenceNodeStream {
return Box::new(self);
}
fn next_keep(&mut self) -> Result<&Async<Option<(NodeHash, Generation)>>> {
fn next_keep(&mut self) -> Result<&Async<Option<(DNodeHash, Generation)>>> {
if self.next_keep.is_not_ready() {
self.next_keep = self.keep_input.poll()?;
}
Ok(&self.next_keep)
}
fn next_remove(&mut self) -> Result<&Async<Option<(NodeHash, Generation)>>> {
fn next_remove(&mut self) -> Result<&Async<Option<(DNodeHash, Generation)>>> {
if self.next_remove.is_not_ready() {
self.next_remove = self.remove_input.poll()?;
}
@ -66,7 +66,7 @@ impl SetDifferenceNodeStream {
}
impl Stream for SetDifferenceNodeStream {
type Item = NodeHash;
type Item = DNodeHash;
type Error = Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
// This feels wrong, but in practice it's fine - it should be quick to hit a return, and

View File

@ -11,18 +11,18 @@ use failure::Error;
use futures::{Async, Poll};
use futures::future::Future;
use futures::stream::Stream;
use mercurial_types::NodeHash;
use mercurial_types::DNodeHash;
use mercurial_types::nodehash::HgChangesetId;
use NodeStream;
pub struct SingleNodeHash {
nodehash: Option<NodeHash>,
nodehash: Option<DNodeHash>,
exists: Box<Future<Item = bool, Error = Error> + Send>,
}
impl SingleNodeHash {
pub fn new(nodehash: NodeHash, repo: &BlobRepo) -> Self {
pub fn new(nodehash: DNodeHash, repo: &BlobRepo) -> Self {
let changesetid = HgChangesetId::new(nodehash);
let exists = Box::new(repo.changeset_exists(&changesetid));
let nodehash = Some(nodehash);
@ -35,7 +35,7 @@ impl SingleNodeHash {
}
impl Stream for SingleNodeHash {
type Item = NodeHash;
type Item = DNodeHash;
type Error = Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
if self.nodehash.is_none() {

View File

@ -9,14 +9,14 @@ use ascii::AsciiString;
use blobrepo::BlobRepo;
use futures::Future;
use futures::executor::spawn;
use mercurial_types::NodeHash;
use mercurial_types::DNodeHash;
use repoinfo::RepoGenCache;
use std::collections::HashSet;
use std::sync::Arc;
pub fn string_to_nodehash(hash: &'static str) -> NodeHash {
NodeHash::from_ascii_str(&AsciiString::from_ascii(hash).expect("Can't turn string to AsciiString"))
.expect("Can't turn AsciiString to NodeHash")
pub fn string_to_nodehash(hash: &'static str) -> DNodeHash {
DNodeHash::from_ascii_str(&AsciiString::from_ascii(hash).expect("Can't turn string to AsciiString"))
.expect("Can't turn AsciiString to DNodeHash")
}
/// Accounting for reordering within generations, ensure that a NodeStream gives the expected
@ -27,7 +27,7 @@ pub fn assert_node_sequence<I>(
hashes: I,
stream: Box<NodeStream>,
) where
I: IntoIterator<Item = NodeHash>,
I: IntoIterator<Item = DNodeHash>,
{
let mut nodestream = spawn(stream);
let mut received_hashes = HashSet::new();

View File

@ -8,7 +8,7 @@ use blobrepo::BlobRepo;
use futures::Async;
use futures::Poll;
use futures::stream::Stream;
use mercurial_types::NodeHash;
use mercurial_types::DNodeHash;
use repoinfo::{Generation, RepoGenCache};
use std::boxed::Box;
use std::collections::HashSet;
@ -23,10 +23,10 @@ use NodeStream;
use setcommon::*;
pub struct UnionNodeStream {
inputs: Vec<(InputStream, Poll<Option<(NodeHash, Generation)>, Error>)>,
inputs: Vec<(InputStream, Poll<Option<(DNodeHash, Generation)>, Error>)>,
current_generation: Option<Generation>,
accumulator: HashSet<NodeHash>,
drain: Option<IntoIter<NodeHash>>,
accumulator: HashSet<DNodeHash>,
drain: Option<IntoIter<DNodeHash>>,
}
impl UnionNodeStream {
@ -95,7 +95,7 @@ impl UnionNodeStream {
}
impl Stream for UnionNodeStream {
type Item = NodeHash;
type Item = DNodeHash;
type Error = Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {

View File

@ -11,7 +11,7 @@ use blobrepo::BlobRepo;
use failure::Error;
use futures::{Async, Poll};
use futures::stream::Stream;
use mercurial_types::NodeHash;
use mercurial_types::DNodeHash;
use repoinfo::{Generation, RepoGenCache};
use NodeStream;
@ -24,7 +24,7 @@ use setcommon::{add_generations, InputStream};
pub struct ValidateNodeStream {
wrapped: InputStream,
last_generation: Option<Generation>,
seen_hashes: HashSet<NodeHash>,
seen_hashes: HashSet<DNodeHash>,
}
impl ValidateNodeStream {
@ -46,7 +46,7 @@ impl ValidateNodeStream {
}
impl Stream for ValidateNodeStream {
type Item = NodeHash;
type Item = DNodeHash;
type Error = Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {

View File

@ -37,8 +37,8 @@ use blobrepo::BlobChangeset;
use bundle2_resolver;
use mercurial::{self, NodeHashConversion, RevlogChangeset};
use mercurial_bundles::{parts, Bundle2EncodeBuilder, Bundle2Item};
use mercurial_types::{percent_encode, Changeset, Entry, HgChangesetId, HgManifestId, MPath,
NodeHash, Parents, RepoPath, RepositoryId, Type, NULL_HASH};
use mercurial_types::{percent_encode, Changeset, DNodeHash, Entry, HgChangesetId, HgManifestId,
MPath, Parents, RepoPath, RepositoryId, Type, D_NULL_HASH};
use mercurial_types::manifest_utils::{changed_entry_stream, EntryStatus};
use metaconfig::repoconfig::RepoType;
@ -328,7 +328,7 @@ impl RepoClient {
let repo_generation = &self.repo.repo_generation;
let hgrepo = &self.repo.hgrepo;
let ancestors_stream = |nodes: &Vec<NodeHash>| -> Box<NodeStream> {
let ancestors_stream = |nodes: &Vec<DNodeHash>| -> Box<NodeStream> {
let heads_ancestors = nodes.iter().map(|head| {
AncestorsNodeStream::new(&hgrepo, repo_generation.clone(), *head).boxed()
});
@ -435,7 +435,7 @@ impl RepoClient {
.basemfnodes
.get(0)
.map(|h| h.into_mononoke())
.unwrap_or(NULL_HASH);
.unwrap_or(D_NULL_HASH);
if params.rootdir.len() != 0 {
// For now, only root repo
@ -494,13 +494,13 @@ impl HgCommands for RepoClient {
struct ParentStream<CS> {
repo: Arc<HgRepo>,
n: NodeHash,
bottom: NodeHash,
n: DNodeHash,
bottom: DNodeHash,
wait_cs: Option<CS>,
};
impl<CS> ParentStream<CS> {
fn new(repo: &Arc<HgRepo>, top: NodeHash, bottom: NodeHash) -> Self {
fn new(repo: &Arc<HgRepo>, top: DNodeHash, bottom: DNodeHash) -> Self {
ParentStream {
repo: repo.clone(),
n: top,
@ -511,11 +511,11 @@ impl HgCommands for RepoClient {
}
impl Stream for ParentStream<BoxFuture<BlobChangeset, hgproto::Error>> {
type Item = NodeHash;
type Item = DNodeHash;
type Error = hgproto::Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
if self.n == self.bottom || self.n == NULL_HASH {
if self.n == self.bottom || self.n == D_NULL_HASH {
return Ok(Async::Ready(None));
}
@ -530,7 +530,7 @@ impl HgCommands for RepoClient {
self.wait_cs = None; // got it
let p = match cs.parents() {
&Parents::None => NULL_HASH,
&Parents::None => D_NULL_HASH,
&Parents::One(ref p) => *p,
&Parents::Two(ref p, _) => *p,
};
@ -786,8 +786,8 @@ impl HgCommands for RepoClient {
fn get_changed_entry_stream(
repo: Arc<BlobRepo>,
mfid: &NodeHash,
basemfid: &NodeHash,
mfid: &DNodeHash,
basemfid: &DNodeHash,
) -> BoxStream<(Box<Entry + Sync>, Option<MPath>), Error> {
let manifest = repo.get_manifest_by_nodeid(mfid)
.traced_global("fetch rootmf", trace_args!());
@ -918,10 +918,10 @@ fn fetch_treepack_part_input(
fn get_file_history(
repo: Arc<BlobRepo>,
startnode: NodeHash,
startnode: DNodeHash,
path: MPath,
) -> BoxStream<(NodeHash, Parents, NodeHash, Option<(MPath, NodeHash)>), Error> {
if startnode == NULL_HASH {
) -> BoxStream<(DNodeHash, Parents, DNodeHash, Option<(MPath, DNodeHash)>), Error> {
if startnode == D_NULL_HASH {
return stream::empty().boxify();
}
let mut startstate = VecDeque::new();
@ -931,7 +931,7 @@ fn get_file_history(
stream::unfold(
(startstate, seen_nodes),
move |cur_data: (VecDeque<NodeHash>, HashSet<NodeHash>)| {
move |cur_data: (VecDeque<DNodeHash>, HashSet<DNodeHash>)| {
let (mut nodes, mut seen_nodes) = cur_data;
let node = nodes.pop_front()?;
@ -969,7 +969,7 @@ fn get_file_history(
fn create_remotefilelog_blob(
repo: Arc<BlobRepo>,
node: NodeHash,
node: DNodeHash,
path: MPath,
) -> BoxFuture<Bytes, Error> {
// raw_content includes copy information
@ -1008,8 +1008,8 @@ fn create_remotefilelog_blob(
for (node, parents, linknode, copy) in history {
let (p1, p2) = match parents {
Parents::None => (NULL_HASH, NULL_HASH),
Parents::One(p) => (p, NULL_HASH),
Parents::None => (D_NULL_HASH, D_NULL_HASH),
Parents::One(p) => (p, D_NULL_HASH),
Parents::Two(p1, p2) => (p1, p2),
};

View File

@ -61,7 +61,7 @@ use changesets::{Changesets, ChangesetInsert, SqliteChangesets};
use memblob::EagerMemblob;
use dbbookmarks::SqliteDbBookmarks;
use dieselfilenodes::SqliteFilenodes;
use mercurial_types::{HgChangesetId, NodeHash, RepositoryId};
use mercurial_types::{HgChangesetId, DNodeHash, RepositoryId};
use memheads::MemHeads;
use blobrepo::BlobRepo;
use ascii::AsciiString;
@ -104,7 +104,7 @@ pub fn getrepo(logger: Option<Logger>) -> BlobRepo {
commit_hash = split[0]
writeline(
'let cs_id = HgChangesetId::new(NodeHash::from_str("{}").unwrap());'.
'let cs_id = HgChangesetId::new(DNodeHash::from_str("{}").unwrap());'.
format(commit_hash)
)
writeline('let parents = vec![')
@ -112,12 +112,12 @@ pub fn getrepo(logger: Option<Logger>) -> BlobRepo {
indent += 1
for p in split[1:-1]:
writeline(
'HgChangesetId::new(NodeHash::from_str("{}").unwrap()), '.
'HgChangesetId::new(DNodeHash::from_str("{}").unwrap()), '.
format(p)
)
writeline(
'HgChangesetId::new(NodeHash::from_str("{}").unwrap())'.
'HgChangesetId::new(DNodeHash::from_str("{}").unwrap())'.
format(split[-1])
)
indent -= 1
@ -138,7 +138,7 @@ pub fn getrepo(logger: Option<Logger>) -> BlobRepo {
for head in glob.glob(os.path.join(args.source, "heads", "head-*")):
head = head[-40:]
writeline(
'heads.add(&NodeHash::from_ascii_str(&AsciiString::from_ascii("{}").unwrap()).unwrap()).wait().expect("Head put failed");'.
'heads.add(&DNodeHash::from_ascii_str(&AsciiString::from_ascii("{}").unwrap()).unwrap()).wait().expect("Head put failed");'.
format(head)
)
writeline("")

View File

@ -19,7 +19,7 @@ use bytes::Bytes;
use futures::executor::spawn;
use mercurial_types::{Changeset, FileType, MPathElement};
use mercurial_types::manifest::{Content, Type};
use mercurial_types::nodehash::{HgChangesetId, NodeHash};
use mercurial_types::nodehash::{DNodeHash, HgChangesetId};
#[test]
fn check_heads() {
@ -32,11 +32,11 @@ fn check_heads() {
assert!(
if let Some(Ok(hash)) = heads.wait_stream() {
hash
== NodeHash::from_ascii_str(&AsciiString::from_ascii(
== DNodeHash::from_ascii_str(&AsciiString::from_ascii(
"a5ffa77602a066db7d5cfb9fb5823a0895717c5a",
).expect("Can't turn string to AsciiString"))
.expect(
"Can't turn AsciiString to NodeHash",
"Can't turn AsciiString to DNodeHash",
)
} else {
false
@ -60,11 +60,11 @@ fn check_head_exists() {
async_unit::tokio_unit_test(|| {
let repo = linear::getrepo(None);
let nodehash = NodeHash::from_ascii_str(&AsciiString::from_ascii(
let nodehash = DNodeHash::from_ascii_str(&AsciiString::from_ascii(
"a5ffa77602a066db7d5cfb9fb5823a0895717c5a",
).expect("Can't turn string to AsciiString"))
.expect(
"Can't turn AsciiString to NodeHash",
"Can't turn AsciiString to DNodeHash",
);
let exists_future = repo.changeset_exists(&HgChangesetId::new(nodehash));
@ -85,7 +85,7 @@ fn check_head_has_file() {
repo.get_changeset_by_changesetid(&HgChangesetId::from_ascii_str(
&AsciiString::from_ascii("a5ffa77602a066db7d5cfb9fb5823a0895717c5a")
.expect("Can't turn string to AsciiString"),
).expect("Can't turn AsciiString to NodeHash"));
).expect("Can't turn AsciiString to DNodeHash"));
let changeset = spawn(changeset_future)
.wait_future()
.expect("Can't get changeset");