Reformat Mononoke thrift files to match arc f

Summary: I have my editor set up to format on save - let's unify this on the standard FB format for thrift files, so that I don't create junk.

Reviewed By: ahornby

Differential Revision: D30285082

fbshipit-source-id: 17b09635a2473174a92e29bb042432dbac44865a
This commit is contained in:
Simon Farnsworth 2021-08-16 04:41:39 -07:00 committed by Facebook GitHub Bot
parent e2d57e9f02
commit bfb9db07b7
13 changed files with 1208 additions and 1317 deletions

View File

@ -6,6 +6,6 @@
*/
union LockState {
1: string locked_by,
2: string uploaded_key,
1: string locked_by;
2: string uploaded_key;
}

View File

@ -9,11 +9,11 @@ include "eden/mononoke/mononoke_types/if/mononoke_types_thrift.thrift"
# Memcache constants. Should be change when we want to invalidate memcache
# entries
const i32 MC_CODEVER = 0
const i32 MC_SITEVER = 0
const i32 MC_CODEVER = 0;
const i32 MC_SITEVER = 0;
struct BonsaiGlobalrevMappingEntry {
1: required i32 repo_id,
2: required mononoke_types_thrift.ChangesetId bcs_id,
3: required i64 globalrev,
1: required i32 repo_id;
2: required mononoke_types_thrift.ChangesetId bcs_id;
3: required i64 globalrev;
}

View File

@ -10,13 +10,13 @@ include "eden/mononoke/mononoke_types/if/mononoke_types_thrift.thrift"
# Memcache constants. Should be change when we want to invalidate memcache
# entries
const i32 MC_CODEVER = 0
const i32 MC_SITEVER = 3
const i32 MC_CODEVER = 0;
const i32 MC_SITEVER = 3;
typedef i32 RepoId (rust.newtype)
struct BonsaiHgMappingEntry {
1: required RepoId repo_id,
2: required mononoke_types_thrift.ChangesetId bcs_id,
3: required mercurial_thrift.HgNodeHash hg_cs_id,
1: required RepoId repo_id;
2: required mononoke_types_thrift.ChangesetId bcs_id;
3: required mercurial_thrift.HgNodeHash hg_cs_id;
}

View File

@ -9,11 +9,11 @@ include "eden/mononoke/mononoke_types/if/mononoke_types_thrift.thrift"
# Memcache constants. Should be change when we want to invalidate memcache
# entries
const i32 MC_CODEVER = 0
const i32 MC_SITEVER = 0
const i32 MC_CODEVER = 0;
const i32 MC_SITEVER = 0;
struct BonsaiSvnrevMappingEntry {
1: required i32 repo_id,
2: required mononoke_types_thrift.ChangesetId bcs_id,
3: required i64 svnrev,
1: required i32 repo_id;
2: required mononoke_types_thrift.ChangesetId bcs_id;
3: required i64 svnrev;
}

View File

@ -9,17 +9,17 @@ include "eden/mononoke/mononoke_types/if/mononoke_types_thrift.thrift"
# Memcache constants. Should be change when we want to invalidate memcache
# entries
const i32 MC_CODEVER = 0
const i32 MC_SITEVER = 0
const i32 MC_CODEVER = 0;
const i32 MC_SITEVER = 0;
typedef i32 RepoId (rust.newtype)
// Thrift does not support unsigned, so using i64 here
// Thrift does not support unsigned, so using i64 here
typedef i64 GenerationNum (rust.newtype)
struct ChangesetEntry {
1: required RepoId repo_id,
2: required mononoke_types_thrift.ChangesetId cs_id,
3: required list<mononoke_types_thrift.ChangesetId> parents,
4: required GenerationNum gen,
1: required RepoId repo_id;
2: required mononoke_types_thrift.ChangesetId cs_id;
3: required list<mononoke_types_thrift.ChangesetId> parents;
4: required GenerationNum gen;
}

View File

@ -16,14 +16,16 @@ include "eden/mononoke/mononoke_types/if/mononoke_types_thrift.thrift"
// reason the commit is being fetched.
struct ChangesetInfo {
// Changeset id of the source Bonsai changeset
1: mononoke_types_thrift.ChangesetId changeset_id,
2: list<mononoke_types_thrift.ChangesetId> parents,
3: string author,
4: mononoke_types_thrift.DateTime author_date,
5: optional string committer,
6: optional mononoke_types_thrift.DateTime committer_date,
7: ChangesetMessage message,
8: map<string, binary> (rust.type = "sorted_vector_map::SortedVectorMap") extra,
1: mononoke_types_thrift.ChangesetId changeset_id;
2: list<mononoke_types_thrift.ChangesetId> parents;
3: string author;
4: mononoke_types_thrift.DateTime author_date;
5: optional string committer;
6: optional mononoke_types_thrift.DateTime committer_date;
7: ChangesetMessage message;
8: map<string, binary> (
rust.type = "sorted_vector_map::SortedVectorMap",
) extra;
}
// Commit message is represented by a separate union of formats for the future
@ -31,5 +33,5 @@ struct ChangesetInfo {
// At some point we may like to store large commit messages as separate blobs to
// make fetching changesets faster if there is no need in the whole description.
union ChangesetMessage {
1: string message,
1: string message;
}

View File

@ -10,26 +10,26 @@ include "eden/mononoke/mercurial/types/if/mercurial_thrift.thrift"
# Memcache constants. Should be change when we want to invalidate memcache
# entries
const i32 MC_CODEVER = 3
const i32 MC_SITEVER = 1
const i32 MC_CODEVER = 3;
const i32 MC_SITEVER = 1;
union FilenodeInfoList {
1: list<FilenodeInfo> Data,
2: list<i64> Pointers,
1: list<FilenodeInfo> Data;
2: list<i64> Pointers;
// This actual value is ignored
3: byte TooBig,
3: byte TooBig;
}
struct FilenodeInfo {
// 1 was used previously.
2: required mercurial_thrift.HgNodeHash filenode,
3: optional mercurial_thrift.HgNodeHash p1,
4: optional mercurial_thrift.HgNodeHash p2,
5: optional FilenodeCopyFrom copyfrom,
6: required mercurial_thrift.HgNodeHash linknode,
2: required mercurial_thrift.HgNodeHash filenode;
3: optional mercurial_thrift.HgNodeHash p1;
4: optional mercurial_thrift.HgNodeHash p2;
5: optional FilenodeCopyFrom copyfrom;
6: required mercurial_thrift.HgNodeHash linknode;
}
struct FilenodeCopyFrom {
1: required mononoke_types_thrift.RepoPath path,
2: required mercurial_thrift.HgNodeHash filenode,
1: required mononoke_types_thrift.RepoPath path;
2: required mercurial_thrift.HgNodeHash filenode;
}

View File

@ -8,22 +8,22 @@
include "eden/mononoke/mononoke_types/if/mononoke_types_thrift.thrift"
struct BlobHandle {
1: mononoke_types_thrift.GitSha1 oid,
2: i64 size,
3: mononoke_types_thrift.FileType file_type,
1: mononoke_types_thrift.GitSha1 oid;
2: i64 size;
3: mononoke_types_thrift.FileType file_type;
}
struct TreeHandle {
1: mononoke_types_thrift.GitSha1 oid,
2: i64 size,
1: mononoke_types_thrift.GitSha1 oid;
2: i64 size;
}
union TreeMember {
1: BlobHandle Blob,
2: TreeHandle Tree,
1: BlobHandle Blob;
2: TreeHandle Tree;
}
struct Tree {
1: TreeHandle handle,
2: map<mononoke_types_thrift.MPathElement, TreeMember> members,
1: TreeHandle handle;
2: map<mononoke_types_thrift.MPathElement, TreeMember> members;
}

View File

@ -12,36 +12,36 @@ typedef mononoke_types_thrift.Sha1 HgNodeHash (rust.newtype)
// Changeset contents are stored inline.
struct HgChangesetEnvelope {
// The node ID is expected to match the contents exactly.
1: required HgNodeHash node_id,
2: optional HgNodeHash p1,
3: optional HgNodeHash p2,
1: required HgNodeHash node_id;
2: optional HgNodeHash p1;
3: optional HgNodeHash p2;
// These contents are exactly as they would be serialized by Mercurial.
4: optional binary contents,
4: optional binary contents;
}
// Manifest contents are expected to generally be small, so they're stored
// inline in the envelope. There's also no real dedup possible between native
// Mononoke data structures and these ones.
struct HgManifestEnvelope {
1: required HgNodeHash node_id,
2: optional HgNodeHash p1,
3: optional HgNodeHash p2,
1: required HgNodeHash node_id;
2: optional HgNodeHash p1;
3: optional HgNodeHash p2;
// Root tree manifest nodes can have node IDs that don't match the contents.
// That is required for lookups, but it means that in the event of recovery
// from a disaster, hash consistency can't be checked. The computed node ID
// is stored to allow that to happen.
4: required HgNodeHash computed_node_id,
4: required HgNodeHash computed_node_id;
// These contents are exactly as they would be serialized by Mercurial.
5: optional binary contents,
5: optional binary contents;
}
struct HgFileEnvelope {
1: required HgNodeHash node_id,
2: optional HgNodeHash p1,
3: optional HgNodeHash p2,
4: optional mononoke_types_thrift.ContentId content_id,
1: required HgNodeHash node_id;
2: optional HgNodeHash p1;
3: optional HgNodeHash p2;
4: optional mononoke_types_thrift.ContentId content_id;
// content_size is a u64 stored as an i64, and doesn't include the size of
// the metadata
5: required i64 content_size,
6: optional binary metadata,
5: required i64 content_size;
6: optional binary metadata;
}

View File

@ -9,31 +9,31 @@ include "eden/mononoke/mononoke_types/if/mononoke_types_thrift.thrift"
include "eden/mononoke/mercurial/types/if/mercurial_thrift.thrift"
// Code version constant -- update to invalidate saved state.
const i32 CODEVER = 1
const i32 CODEVER = 1;
struct FilenodeSnapshot {
// Note: required fields are enforced at runtime here (to prevent Thift from
// giving us garbage values and calling those acceptable).
1: optional mononoke_types_thrift.RepoPath path,
2: optional mercurial_thrift.HgNodeHash filenode,
3: optional mercurial_thrift.HgNodeHash p1,
4: optional mercurial_thrift.HgNodeHash p2,
5: optional CopyInfoSnapshot copyfrom,
6: optional mercurial_thrift.HgNodeHash linknode,
1: optional mononoke_types_thrift.RepoPath path;
2: optional mercurial_thrift.HgNodeHash filenode;
3: optional mercurial_thrift.HgNodeHash p1;
4: optional mercurial_thrift.HgNodeHash p2;
5: optional CopyInfoSnapshot copyfrom;
6: optional mercurial_thrift.HgNodeHash linknode;
}
struct CopyInfoSnapshot {
1: optional mononoke_types_thrift.RepoPath path,
2: optional mercurial_thrift.HgNodeHash filenode,
1: optional mononoke_types_thrift.RepoPath path;
2: optional mercurial_thrift.HgNodeHash filenode;
}
struct ChangesetSnapshot {
1: optional mononoke_types_thrift.ChangesetId cs_id,
2: optional list<mononoke_types_thrift.ChangesetId> parents,
3: optional i64 gen,
1: optional mononoke_types_thrift.ChangesetId cs_id;
2: optional list<mononoke_types_thrift.ChangesetId> parents;
3: optional i64 gen;
}
struct RepoSnapshot {
1: optional list<FilenodeSnapshot> filenodes,
2: optional list<ChangesetSnapshot> changesets,
1: optional list<FilenodeSnapshot> filenodes;
2: optional list<ChangesetSnapshot> changesets;
}

View File

@ -24,7 +24,7 @@ typedef binary (rust.type = "bytes::Bytes") binary_bytes
// Allow the hash type to change in the future.
union IdType {
1: Blake2 Blake2,
1: Blake2 Blake2;
} (rust.ord)
typedef IdType ChangesetId (rust.newtype)
@ -33,9 +33,9 @@ typedef IdType ContentChunkId (rust.newtype)
typedef IdType RawBundle2Id (rust.newtype)
typedef IdType FileUnodeId (rust.newtype)
typedef IdType ManifestUnodeId (rust.newtype)
typedef IdType DeletedManifestId(rust.newtype)
typedef IdType DeletedManifestId (rust.newtype)
typedef IdType FsnodeId (rust.newtype)
typedef IdType SkeletonManifestId(rust.newtype)
typedef IdType SkeletonManifestId (rust.newtype)
typedef IdType MPathHash (rust.newtype)
typedef IdType ContentMetadataId (rust.newtype)
@ -51,19 +51,25 @@ typedef binary Sha1 (rust.newtype, rust.type = "smallvec::SmallVec<[u8; 20]>")
// Other content alias types
typedef binary Sha256 (rust.newtype, rust.type = "smallvec::SmallVec<[u8; 32]>")
typedef binary GitSha1 (rust.newtype, rust.type = "smallvec::SmallVec<[u8; 20]>")
typedef binary GitSha1 (
rust.newtype,
rust.type = "smallvec::SmallVec<[u8; 20]>",
)
// A path in a repo is stored as a list of elements. This is so that the sort
// order of paths is the same as that of a tree traversal, so that deltas on
// manifests can be applied in a streaming way.
typedef binary MPathElement (rust.newtype, rust.type = "smallvec::SmallVec<[u8; 24]>")
typedef binary MPathElement (
rust.newtype,
rust.type = "smallvec::SmallVec<[u8; 24]>",
)
typedef list<MPathElement> MPath (rust.newtype)
union RepoPath {
# Thrift language doesn't support void here, so put a dummy bool
1: bool RootPath,
2: MPath DirectoryPath,
3: MPath FilePath,
1: bool RootPath;
2: MPath DirectoryPath;
3: MPath FilePath;
}
// Parent ordering
@ -101,29 +107,33 @@ union RepoPath {
// * Corollary: The file list in Mercurial is not pcf, so the Bonsai diff is
// computed separately.
struct BonsaiChangeset {
1: required list<ChangesetId> parents,
2: string author,
3: optional DateTime author_date,
1: required list<ChangesetId> parents;
2: string author;
3: optional DateTime author_date;
// Mercurial won't necessarily have a committer, so this is optional.
4: optional string committer,
5: optional DateTime committer_date,
6: string message,
7: map<string, binary> (rust.type = "sorted_vector_map::SortedVectorMap") extra,
8: map<MPath, FileChangeOpt> (rust.type = "sorted_vector_map::SortedVectorMap") file_changes,
4: optional string committer;
5: optional DateTime committer_date;
6: string message;
7: map<string, binary> (
rust.type = "sorted_vector_map::SortedVectorMap",
) extra;
8: map<MPath, FileChangeOpt> (
rust.type = "sorted_vector_map::SortedVectorMap",
) file_changes;
}
// DateTime fields do not have a reasonable default value! They must
// always be required or optional.
struct DateTime {
1: required i64 timestamp_secs,
1: required i64 timestamp_secs;
// Timezones can go up to UTC+13 (which would be represented as -46800), so
// an i16 can't fit them.
2: required i32 tz_offset_secs,
2: required i32 tz_offset_secs;
}
struct ContentChunkPointer {
1: ContentChunkId chunk_id,
2: i64 size,
1: ContentChunkId chunk_id;
2: i64 size;
}
// When a file is chunked, we reprsent it as a list of its chunks, as well as
@ -133,24 +143,24 @@ struct ChunkedFileContents {
// FileContents reprseentation in Mononoke, which would normally require
// hashing the contents (but we obviously can't do that here, since we don't
// have the contents).
1: ContentId content_id,
2: list<ContentChunkPointer> chunks,
1: ContentId content_id;
2: list<ContentChunkPointer> chunks;
}
union FileContents {
// Plain uncompressed bytes - WYSIWYG.
1: binary_bytes Bytes,
1: binary_bytes Bytes;
// References to Chunks (stored as FileContents, too).
2: ChunkedFileContents Chunked,
2: ChunkedFileContents Chunked;
}
union ContentChunk {
1: binary_bytes Bytes,
1: binary_bytes Bytes;
}
// Payload of object which is an alias
union ContentAlias {
1: ContentId ContentId, // File content alias
1: ContentId ContentId; // File content alias
}
// Metadata about a file. This includes hahs aliases, or the file's size.
@ -161,17 +171,17 @@ union ContentAlias {
// value).
struct ContentMetadata {
// total_size is needed to make GitSha1 meaningful, but generally useful
1: optional i64 total_size,
1: optional i64 total_size;
// ContentId we're providing metadata for
2: optional ContentId content_id,
3: optional Sha1 sha1,
4: optional Sha256 sha256,
2: optional ContentId content_id;
3: optional Sha1 sha1;
4: optional Sha256 sha256;
// always object type "blob"
5: optional GitSha1 git_sha1,
5: optional GitSha1 git_sha1;
}
union RawBundle2 {
1: binary Bytes,
1: binary Bytes;
}
enum FileType {
@ -185,90 +195,94 @@ struct FileChangeOpt {
// At most one value can be present.
// Changes to a tracked file
1: optional FileChange change,
1: optional FileChange change;
// This is a change to an untracked file in a snapshot commit.
2: optional UntrackedFileChange untracked_change,
2: optional UntrackedFileChange untracked_change;
// Present if this is a missing file in a snapshot commit.
3: optional UntrackedDeletion untracked_deletion,
3: optional UntrackedDeletion untracked_deletion;
}
struct UntrackedDeletion {
// Additional state (if necessary)
// Additional state (if necessary)
}
struct UntrackedFileChange {
1: ContentId content_id,
2: FileType file_type,
3: i64 size,
1: ContentId content_id;
2: FileType file_type;
3: i64 size;
}
struct FileChange {
1: required ContentId content_id,
2: FileType file_type,
1: required ContentId content_id;
2: FileType file_type;
// size is a u64 stored as an i64
3: required i64 size,
4: optional CopyInfo copy_from,
3: required i64 size;
4: optional CopyInfo copy_from;
}
// This is only used optionally so it is OK to use `required` here.
struct CopyInfo {
1: required MPath file,
1: required MPath file;
// cs_id must match one of the parents specified in BonsaiChangeset
2: required ChangesetId cs_id,
2: required ChangesetId cs_id;
}
struct FileUnode {
1: list<FileUnodeId> parents,
2: ContentId content_id,
3: FileType file_type,
4: MPathHash path_hash,
5: ChangesetId linknode,
1: list<FileUnodeId> parents;
2: ContentId content_id;
3: FileType file_type;
4: MPathHash path_hash;
5: ChangesetId linknode;
}
union UnodeEntry {
1: FileUnodeId File,
2: ManifestUnodeId Directory,
1: FileUnodeId File;
2: ManifestUnodeId Directory;
}
struct ManifestUnode {
1: list<ManifestUnodeId> parents,
2: map<MPathElement, UnodeEntry> (rust.type = "sorted_vector_map::SortedVectorMap") subentries,
3: ChangesetId linknode,
1: list<ManifestUnodeId> parents;
2: map<MPathElement, UnodeEntry> (
rust.type = "sorted_vector_map::SortedVectorMap",
) subentries;
3: ChangesetId linknode;
}
struct DeletedManifest {
1: optional ChangesetId linknode,
2: map<MPathElement, DeletedManifestId> (rust.type = "sorted_vector_map::SortedVectorMap") subentries,
1: optional ChangesetId linknode;
2: map<MPathElement, DeletedManifestId> (
rust.type = "sorted_vector_map::SortedVectorMap",
) subentries;
}
struct FsnodeFile {
1: ContentId content_id,
2: FileType file_type,
1: ContentId content_id;
2: FileType file_type;
// size is a u64 stored as an i64
3: i64 size,
4: Sha1 content_sha1,
5: Sha256 content_sha256,
3: i64 size;
4: Sha1 content_sha1;
5: Sha256 content_sha256;
}
struct FsnodeDirectory {
1: FsnodeId id,
2: FsnodeSummary summary,
1: FsnodeId id;
2: FsnodeSummary summary;
}
struct FsnodeSummary {
1: Sha1 simple_format_sha1,
2: Sha256 simple_format_sha256,
1: Sha1 simple_format_sha1;
2: Sha256 simple_format_sha256;
// Counts and sizes are u64s stored as i64s
3: i64 child_files_count,
4: i64 child_files_total_size,
5: i64 child_dirs_count,
6: i64 descendant_files_count,
7: i64 descendant_files_total_size,
3: i64 child_files_count;
4: i64 child_files_total_size;
5: i64 child_dirs_count;
6: i64 descendant_files_count;
7: i64 descendant_files_total_size;
}
union FsnodeEntry {
1: FsnodeFile File,
2: FsnodeDirectory Directory,
1: FsnodeFile File;
2: FsnodeDirectory Directory;
}
// Content-addressed manifest, with metadata useful for filesystem
@ -282,33 +296,35 @@ union FsnodeEntry {
// files and manifests, and the number of files and sub-directories within
// directories.
struct Fsnode {
1: map<MPathElement, FsnodeEntry> (rust.type = "sorted_vector_map::SortedVectorMap") subentries,
2: FsnodeSummary summary,
1: map<MPathElement, FsnodeEntry> (
rust.type = "sorted_vector_map::SortedVectorMap",
) subentries;
2: FsnodeSummary summary;
}
struct SkeletonManifestDirectory {
1: SkeletonManifestId id,
2: SkeletonManifestSummary summary,
1: SkeletonManifestId id;
2: SkeletonManifestSummary summary;
}
struct SkeletonManifestSummary {
1: i64 child_files_count,
2: i64 child_dirs_count,
3: i64 descendant_files_count,
4: i64 descendant_dirs_count,
5: i32 max_path_len,
6: i32 max_path_wchar_len,
7: bool child_case_conflicts,
8: bool descendant_case_conflicts,
9: bool child_non_utf8_filenames,
10: bool descendant_non_utf8_filenames,
11: bool child_invalid_windows_filenames,
12: bool descendant_invalid_windows_filenames,
1: i64 child_files_count;
2: i64 child_dirs_count;
3: i64 descendant_files_count;
4: i64 descendant_dirs_count;
5: i32 max_path_len;
6: i32 max_path_wchar_len;
7: bool child_case_conflicts;
8: bool descendant_case_conflicts;
9: bool child_non_utf8_filenames;
10: bool descendant_non_utf8_filenames;
11: bool child_invalid_windows_filenames;
12: bool descendant_invalid_windows_filenames;
}
struct SkeletonManifestEntry {
// Present if this is a directory, absent for a file.
1: optional SkeletonManifestDirectory directory,
1: optional SkeletonManifestDirectory directory;
}
// Structure-addressed manifest, with metadata useful for traversing manifest
@ -319,8 +335,10 @@ struct SkeletonManifestEntry {
// represented by a single skeleton manifest. Skeleton manifest identities
// change when files are added or removed.
struct SkeletonManifest {
1: map<MPathElement, SkeletonManifestEntry> (rust.type = "sorted_vector_map::SortedVectorMap") subentries,
2: SkeletonManifestSummary summary,
1: map<MPathElement, SkeletonManifestEntry> (
rust.type = "sorted_vector_map::SortedVectorMap",
) subentries;
2: SkeletonManifestSummary summary;
}
// Structure that holds a commit graph, usually a history of a file
@ -361,16 +379,16 @@ struct SkeletonManifest {
// Note that offset might point to a commit in a next FastlogBatch or even
// point to batch outside of all previous_batches.
struct FastlogBatch {
1: list<CompressedHashAndParents> latest,
2: list<FastlogBatchId> previous_batches,
1: list<CompressedHashAndParents> latest;
2: list<FastlogBatchId> previous_batches;
}
typedef i32 ParentOffset (rust.newtype)
struct CompressedHashAndParents {
1: ChangesetId cs_id,
1: ChangesetId cs_id;
# Offsets can be negative!
2: list<ParentOffset> parent_offsets,
2: list<ParentOffset> parent_offsets;
}
typedef i32 BlameChangeset (rust.newtype)
@ -384,21 +402,21 @@ enum BlameRejected {
// Blame V1
struct BlameRange {
1: i32 length,
2: ChangesetId csid,
3: BlamePath path,
1: i32 length;
2: ChangesetId csid;
3: BlamePath path;
// offset of this range in the origin file (file that introduced this change)
4: i32 origin_offset,
4: i32 origin_offset;
}
struct Blame {
1: list<BlameRange> ranges,
2: list<MPath> paths,
1: list<BlameRange> ranges;
2: list<MPath> paths;
}
union BlameMaybeRejected {
1: Blame Blame (py3.name = "blame"),
2: BlameRejected Rejected,
1: Blame Blame (py3.name = "blame");
2: BlameRejected Rejected;
}
// Blame V2
@ -406,16 +424,16 @@ union BlameMaybeRejected {
struct BlameRangeV2 {
// Length (in lines) of this range. The offset of a range is implicit from
// the sum of the lengths of the prior ranges.
1: i32 length,
1: i32 length;
// Index into csids of the changeset that introduced these lines.
2: BlameChangeset csid_index,
2: BlameChangeset csid_index;
// Index into paths of the path of this file when this line was introduced.
3: BlamePath path_index,
3: BlamePath path_index;
// The offset of this range at the time that this line was introduced.
4: i32 origin_offset,
4: i32 origin_offset;
// "Skip past this change" support.
//
@ -464,16 +482,16 @@ struct BlameRangeV2 {
// The offset of this range in the file before this range was introduced that
// was replaced by this range. Not present for root commits.
5: optional i32 parent_offset,
5: optional i32 parent_offset;
// The length of the range in the file before this range was introduced that
// was replaced by this range. Not present for root commits.
6: optional i32 parent_length,
6: optional i32 parent_length;
// If this file was being renamed when this line was introduced, this is
// the index into paths of the original path. Not present for root commits
// or if the file has the same name as path_index.
7: optional BlamePath renamed_from_path_index,
7: optional BlamePath renamed_from_path_index;
// If this is a merge commit, and the file is not in the first parent, then
// this is the index of the first parent that contains the file that contains
@ -484,13 +502,13 @@ struct BlameRangeV2 {
//
// Note that this is an index into the list of parents in the bonsai
// changeset, and *not* an index into csids.
8: optional i32 parent_index,
8: optional i32 parent_index;
}
struct BlameDataV2 {
// A list of ranges that describe when the lines of this file were
// introduced.
1: list<BlameRangeV2> ranges,
1: list<BlameRangeV2> ranges;
// A mapping of integer indexes to changeset IDs that is used to reduce the
// repetition of data in ranges.
@ -503,30 +521,32 @@ struct BlameDataV2 {
// Changesets are removed from this map when all lines that were added in the
// changeset are moved and none of the ranges reference it. This means there
// are gaps in this mapping, and so a map is used.
2: map<i32, ChangesetId> (rust.type = "sorted_vector_map::SortedVectorMap") csids,
2: map<i32, ChangesetId> (
rust.type = "sorted_vector_map::SortedVectorMap",
) csids;
// The maximum index that is assigned to a changeset id. This is also the
// index that would be assigned to the current changeset, as long as the
// changeset adds new lines. If the changeset only deletes or merges lines,
// then this index will not appear in the csids map.
3: BlameChangeset max_csid_index,
3: BlameChangeset max_csid_index;
// The list of paths that this file has been located at. This is used to
// reduce repetition of data in ranges. Since files are not often moved, and
// for simplicity, this includes all paths the file has ever been located at,
// even if they are no longer referenced by any of the ranges.
4: list<MPath> paths,
4: list<MPath> paths;
}
union BlameV2 {
// This version of the file contains full blame information.
1: BlameDataV2 full_blame,
1: BlameDataV2 full_blame;
// This version of the file was rejected for blaming.
2: BlameRejected rejected,
2: BlameRejected rejected;
}
struct RedactionKeyList {
// List of keys to be redacted
1: list<string> keys,
1: list<string> keys;
}

View File

@ -8,31 +8,31 @@
include "eden/mononoke/mononoke_types/if/mononoke_types_thrift.thrift"
struct SkiplistEntry {
1: RepoId repo_id,
2: mononoke_types_thrift.ChangesetId cs_id,
3: GenerationNum gen,
4: SkiplistNodeType node_type,
1: RepoId repo_id;
2: mononoke_types_thrift.ChangesetId cs_id;
3: GenerationNum gen;
4: SkiplistNodeType node_type;
}
typedef i32 RepoId (rust.newtype)
// Thrift does not support unsigned, so using i64 here
// Thrift does not support unsigned, so using i64 here
typedef i64 GenerationNum (rust.newtype)
struct CommitAndGenerationNumber {
1: mononoke_types_thrift.ChangesetId cs_id,
2: GenerationNum gen,
1: mononoke_types_thrift.ChangesetId cs_id;
2: GenerationNum gen;
}
struct SkipEdges {
1: list<CommitAndGenerationNumber> edges,
1: list<CommitAndGenerationNumber> edges;
}
struct ParentEdges {
1: list<CommitAndGenerationNumber> edges,
1: list<CommitAndGenerationNumber> edges;
}
union SkiplistNodeType {
1: SkipEdges SkipEdges,
2: ParentEdges ParentEdges,
1: SkipEdges SkipEdges;
2: ParentEdges ParentEdges;
}

File diff suppressed because it is too large Load Diff