mirror of
https://github.com/facebook/sapling.git
synced 2024-10-10 00:45:18 +03:00
e308419b58
Summary: In a repository with files with large histories we run into a lot of SqlTimeout errors while fetching file history to serve getpack calls. However fetching the whole file history is not really necessary - client knows how to work with partial history i.e. if client misses some portion of history then it would just fetch it on demand. This diff adds way to add a limit on how many entries were going to be fetched, and if more entries were fetched then we return FilenodeRangeResult::TooBig. The downside of this diff is that we'd have to do more sequential database queries. Reviewed By: krallin Differential Revision: D23025249 fbshipit-source-id: ebed9d6df6f8f40e658bc4b83123c75f78e70d93
36 lines
1010 B
Thrift
36 lines
1010 B
Thrift
/*
|
|
* Copyright (c) Facebook, Inc. and its affiliates.
|
|
*
|
|
* This software may be used and distributed according to the terms of the
|
|
* GNU General Public License version 2.
|
|
*/
|
|
|
|
include "eden/mononoke/mononoke_types/if/mononoke_types_thrift.thrift"
|
|
include "eden/mononoke/mercurial/types/if/mercurial_thrift.thrift"
|
|
|
|
# Memcache constants. Should be change when we want to invalidate memcache
|
|
# entries
|
|
const i32 MC_CODEVER = 3
|
|
const i32 MC_SITEVER = 0
|
|
|
|
union FilenodeInfoList {
|
|
1: list<FilenodeInfo> Data,
|
|
2: list<i64> Pointers,
|
|
// This actual value is ignored
|
|
3: byte TooBig,
|
|
}
|
|
|
|
struct FilenodeInfo {
|
|
// 1 was used previously.
|
|
2: required mercurial_thrift.HgNodeHash filenode,
|
|
3: optional mercurial_thrift.HgNodeHash p1,
|
|
4: optional mercurial_thrift.HgNodeHash p2,
|
|
5: optional FilenodeCopyFrom copyfrom,
|
|
6: required mercurial_thrift.HgNodeHash linknode,
|
|
}
|
|
|
|
struct FilenodeCopyFrom {
|
|
1: required mononoke_types_thrift.RepoPath path,
|
|
2: required mercurial_thrift.HgNodeHash filenode,
|
|
}
|