2005-08-27 12:43:48 +04:00
|
|
|
"""
|
|
|
|
revlog.py - storage back-end for mercurial
|
|
|
|
|
|
|
|
This provides efficient delta storage with O(1) retrieve and append
|
|
|
|
and O(changes) merge between branches
|
|
|
|
|
|
|
|
Copyright 2005 Matt Mackall <mpm@selenic.com>
|
|
|
|
|
|
|
|
This software may be used and distributed according to the terms
|
|
|
|
of the GNU General Public License, incorporated herein by reference.
|
|
|
|
"""
|
2005-05-04 01:16:10 +04:00
|
|
|
|
2005-06-01 12:21:38 +04:00
|
|
|
import zlib, struct, sha, binascii, heapq
|
2005-05-04 01:16:10 +04:00
|
|
|
from mercurial import mdiff
|
|
|
|
|
2005-05-08 04:11:36 +04:00
|
|
|
def hex(node): return binascii.hexlify(node)
|
|
|
|
def bin(node): return binascii.unhexlify(node)
|
Change the size of the short hash representation
-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA1
Change the size of the short hash representation
First note that this number doesn't really matter, as we always check
for ambiguous short hash ids.
Here's the math on collision probability:
>>> import math
>>> def p(f, n): return 1 - (1 / math.exp(n**2/(2*f)))
...
>>> p(2**32, 30000.0)
0.09947179164613551 # with 30000 changesets (BKCVS), we have a 9% chance
>>> p(2**32, 65000.0)
0.38850881217977273 # and with a full import from BK, we'd have a 39% chance
>>> p(2**40, 1e6)
0.36539171908447321 # we'd like to be "safe" for 1M csets, so 40 isn't enough
>>> p(2**48, 1e6)
0.001774780051374103 # But 48 looks good
>>> p(2**48, 1e7)
0.16275260939624481
>>> p(2**48, 5e6)
0.043437281083569146
>>> p(2**48, 2e6)
0.0070802434913129764
>>> p(2**48, 3e6)
0.01586009440574343
manifest hash: 24d9f928a463f46708b0e11fb781d5a241851424
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.0 (GNU/Linux)
iD8DBQFCsQoMywK+sNU5EO8RAoBBAJwII9GV6dT9QUOYAk3gZGw9z0JvjACfSI4q
IFnTu1F7P5OuLelO1GsM8Bs=
=CNWk
-----END PGP SIGNATURE-----
2005-06-16 09:11:40 +04:00
|
|
|
def short(node): return hex(node[:6])
|
2005-05-08 04:11:36 +04:00
|
|
|
|
2005-05-04 01:16:10 +04:00
|
|
|
def compress(text):
|
2005-08-27 12:43:48 +04:00
|
|
|
""" generate a possibly-compressed representation of text """
|
2005-05-21 05:31:12 +04:00
|
|
|
if not text: return text
|
|
|
|
if len(text) < 44:
|
|
|
|
if text[0] == '\0': return text
|
|
|
|
return 'u' + text
|
|
|
|
bin = zlib.compress(text)
|
|
|
|
if len(bin) > len(text):
|
|
|
|
if text[0] == '\0': return text
|
|
|
|
return 'u' + text
|
|
|
|
return bin
|
2005-05-04 01:16:10 +04:00
|
|
|
|
|
|
|
def decompress(bin):
|
2005-08-27 12:43:48 +04:00
|
|
|
""" decompress the given input """
|
2005-05-21 05:31:12 +04:00
|
|
|
if not bin: return bin
|
|
|
|
t = bin[0]
|
|
|
|
if t == '\0': return bin
|
|
|
|
if t == 'x': return zlib.decompress(bin)
|
|
|
|
if t == 'u': return bin[1:]
|
2005-08-27 06:08:25 +04:00
|
|
|
raise RevlogError("unknown compression type %s" % t)
|
2005-05-04 01:16:10 +04:00
|
|
|
|
|
|
|
def hash(text, p1, p2):
|
2005-08-27 12:43:48 +04:00
|
|
|
"""generate a hash from the given text and its parent hashes
|
|
|
|
|
|
|
|
This hash combines both the current file contents and its history
|
|
|
|
in a manner that makes it easy to distinguish nodes with the same
|
|
|
|
content in the revision graph.
|
|
|
|
"""
|
2005-05-04 01:16:10 +04:00
|
|
|
l = [p1, p2]
|
|
|
|
l.sort()
|
2005-06-30 20:44:22 +04:00
|
|
|
s = sha.new(l[0])
|
|
|
|
s.update(l[1])
|
|
|
|
s.update(text)
|
|
|
|
return s.digest()
|
2005-05-04 01:16:10 +04:00
|
|
|
|
|
|
|
nullid = "\0" * 20
|
|
|
|
indexformat = ">4l20s20s20s"
|
|
|
|
|
2005-05-17 12:33:22 +04:00
|
|
|
class lazyparser:
|
2005-08-27 12:43:48 +04:00
|
|
|
"""
|
|
|
|
this class avoids the need to parse the entirety of large indices
|
|
|
|
|
|
|
|
By default we parse and load 1000 entries at a time.
|
|
|
|
|
|
|
|
If no position is specified, we load the whole index, and replace
|
|
|
|
the lazy objects in revlog with the underlying objects for
|
|
|
|
efficiency in cases where we look at most of the nodes.
|
|
|
|
"""
|
2005-06-14 00:01:12 +04:00
|
|
|
def __init__(self, data, revlog):
|
2005-05-17 12:33:22 +04:00
|
|
|
self.data = data
|
|
|
|
self.s = struct.calcsize(indexformat)
|
|
|
|
self.l = len(data)/self.s
|
|
|
|
self.index = [None] * self.l
|
|
|
|
self.map = {nullid: -1}
|
2005-06-14 00:01:12 +04:00
|
|
|
self.all = 0
|
|
|
|
self.revlog = revlog
|
2005-05-17 12:33:22 +04:00
|
|
|
|
2005-06-14 00:01:12 +04:00
|
|
|
def load(self, pos=None):
|
|
|
|
if self.all: return
|
|
|
|
if pos is not None:
|
|
|
|
block = pos / 1000
|
|
|
|
i = block * 1000
|
|
|
|
end = min(self.l, i + 1000)
|
|
|
|
else:
|
|
|
|
self.all = 1
|
|
|
|
i = 0
|
|
|
|
end = self.l
|
|
|
|
self.revlog.index = self.index
|
|
|
|
self.revlog.nodemap = self.map
|
2005-06-29 22:42:35 +04:00
|
|
|
|
2005-05-17 12:33:22 +04:00
|
|
|
while i < end:
|
|
|
|
d = self.data[i * self.s: (i + 1) * self.s]
|
|
|
|
e = struct.unpack(indexformat, d)
|
|
|
|
self.index[i] = e
|
|
|
|
self.map[e[6]] = i
|
|
|
|
i += 1
|
2005-06-29 22:42:35 +04:00
|
|
|
|
2005-05-17 12:33:22 +04:00
|
|
|
class lazyindex:
|
2005-08-27 12:43:48 +04:00
|
|
|
"""a lazy version of the index array"""
|
2005-05-17 12:33:22 +04:00
|
|
|
def __init__(self, parser):
|
|
|
|
self.p = parser
|
|
|
|
def __len__(self):
|
|
|
|
return len(self.p.index)
|
2005-05-21 05:34:04 +04:00
|
|
|
def load(self, pos):
|
|
|
|
self.p.load(pos)
|
|
|
|
return self.p.index[pos]
|
2005-05-17 12:33:22 +04:00
|
|
|
def __getitem__(self, pos):
|
2005-05-21 05:34:04 +04:00
|
|
|
return self.p.index[pos] or self.load(pos)
|
2005-05-17 12:33:22 +04:00
|
|
|
def append(self, e):
|
|
|
|
self.p.index.append(e)
|
2005-06-29 22:42:35 +04:00
|
|
|
|
2005-05-17 12:33:22 +04:00
|
|
|
class lazymap:
|
2005-08-27 12:43:48 +04:00
|
|
|
"""a lazy version of the node map"""
|
2005-05-17 12:33:22 +04:00
|
|
|
def __init__(self, parser):
|
|
|
|
self.p = parser
|
|
|
|
def load(self, key):
|
2005-06-14 00:01:12 +04:00
|
|
|
if self.p.all: return
|
2005-05-17 12:33:22 +04:00
|
|
|
n = self.p.data.find(key)
|
2005-05-18 00:20:29 +04:00
|
|
|
if n < 0: raise KeyError("node " + hex(key))
|
2005-05-17 12:33:22 +04:00
|
|
|
pos = n / self.p.s
|
|
|
|
self.p.load(pos)
|
|
|
|
def __contains__(self, key):
|
2005-06-14 00:01:12 +04:00
|
|
|
self.p.load()
|
|
|
|
return key in self.p.map
|
2005-05-19 04:47:49 +04:00
|
|
|
def __iter__(self):
|
2005-06-25 11:50:27 +04:00
|
|
|
yield nullid
|
2005-05-19 04:47:49 +04:00
|
|
|
for i in xrange(self.p.l):
|
|
|
|
try:
|
|
|
|
yield self.p.index[i][6]
|
|
|
|
except:
|
|
|
|
self.p.load(i)
|
|
|
|
yield self.p.index[i][6]
|
2005-05-17 12:33:22 +04:00
|
|
|
def __getitem__(self, key):
|
|
|
|
try:
|
|
|
|
return self.p.map[key]
|
|
|
|
except KeyError:
|
2005-05-18 00:20:29 +04:00
|
|
|
try:
|
|
|
|
self.load(key)
|
|
|
|
return self.p.map[key]
|
|
|
|
except KeyError:
|
|
|
|
raise KeyError("node " + hex(key))
|
2005-05-17 12:33:22 +04:00
|
|
|
def __setitem__(self, key, val):
|
|
|
|
self.p.map[key] = val
|
|
|
|
|
2005-08-27 06:08:25 +04:00
|
|
|
class RevlogError(Exception): pass
|
|
|
|
|
2005-05-04 01:16:10 +04:00
|
|
|
class revlog:
|
2005-08-27 12:43:48 +04:00
|
|
|
"""
|
|
|
|
the underlying revision storage object
|
|
|
|
|
|
|
|
A revlog consists of two parts, an index and the revision data.
|
|
|
|
|
|
|
|
The index is a file with a fixed record size containing
|
|
|
|
information on each revision, includings its nodeid (hash), the
|
|
|
|
nodeids of its parents, the position and offset of its data within
|
|
|
|
the data file, and the revision it's based on. Finally, each entry
|
|
|
|
contains a linkrev entry that can serve as a pointer to external
|
|
|
|
data.
|
|
|
|
|
|
|
|
The revision data itself is a linear collection of data chunks.
|
|
|
|
Each chunk represents a revision and is usually represented as a
|
|
|
|
delta against the previous chunk. To bound lookup time, runs of
|
|
|
|
deltas are limited to about 2 times the length of the original
|
|
|
|
version data. This makes retrieval of a version proportional to
|
|
|
|
its size, or O(1) relative to the number of revisions.
|
|
|
|
|
|
|
|
Both pieces of the revlog are written to in an append-only
|
|
|
|
fashion, which means we never need to rewrite a file to insert or
|
|
|
|
remove data, and can use some simple techniques to avoid the need
|
|
|
|
for locking while reading.
|
|
|
|
"""
|
2005-05-04 01:16:10 +04:00
|
|
|
def __init__(self, opener, indexfile, datafile):
|
2005-08-27 12:43:48 +04:00
|
|
|
"""
|
|
|
|
create a revlog object
|
|
|
|
|
|
|
|
opener is a function that abstracts the file opening operation
|
|
|
|
and can be used to implement COW semantics or the like.
|
|
|
|
"""
|
2005-05-04 01:16:10 +04:00
|
|
|
self.indexfile = indexfile
|
|
|
|
self.datafile = datafile
|
|
|
|
self.opener = opener
|
|
|
|
self.cache = None
|
2005-05-21 05:35:20 +04:00
|
|
|
|
2005-05-04 01:16:10 +04:00
|
|
|
try:
|
|
|
|
i = self.opener(self.indexfile).read()
|
2005-05-17 10:10:02 +04:00
|
|
|
except IOError:
|
2005-05-17 12:33:22 +04:00
|
|
|
i = ""
|
2005-05-21 05:35:20 +04:00
|
|
|
|
|
|
|
if len(i) > 10000:
|
|
|
|
# big index, let's parse it on demand
|
2005-06-14 00:01:12 +04:00
|
|
|
parser = lazyparser(i, self)
|
2005-05-21 05:35:20 +04:00
|
|
|
self.index = lazyindex(parser)
|
|
|
|
self.nodemap = lazymap(parser)
|
|
|
|
else:
|
|
|
|
s = struct.calcsize(indexformat)
|
|
|
|
l = len(i) / s
|
|
|
|
self.index = [None] * l
|
|
|
|
m = [None] * l
|
|
|
|
|
|
|
|
n = 0
|
|
|
|
for f in xrange(0, len(i), s):
|
|
|
|
# offset, size, base, linkrev, p1, p2, nodeid
|
|
|
|
e = struct.unpack(indexformat, i[f:f + s])
|
|
|
|
m[n] = (e[6], n)
|
|
|
|
self.index[n] = e
|
|
|
|
n += 1
|
|
|
|
|
|
|
|
self.nodemap = dict(m)
|
|
|
|
self.nodemap[nullid] = -1
|
2005-05-04 01:16:10 +04:00
|
|
|
|
|
|
|
def tip(self): return self.node(len(self.index) - 1)
|
|
|
|
def count(self): return len(self.index)
|
2005-05-05 10:51:25 +04:00
|
|
|
def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
|
2005-05-04 01:16:10 +04:00
|
|
|
def rev(self, node): return self.nodemap[node]
|
|
|
|
def linkrev(self, node): return self.index[self.nodemap[node]][3]
|
2005-05-04 06:35:03 +04:00
|
|
|
def parents(self, node):
|
|
|
|
if node == nullid: return (nullid, nullid)
|
|
|
|
return self.index[self.nodemap[node]][4:6]
|
2005-05-04 01:16:10 +04:00
|
|
|
|
|
|
|
def start(self, rev): return self.index[rev][0]
|
|
|
|
def length(self, rev): return self.index[rev][1]
|
|
|
|
def end(self, rev): return self.start(rev) + self.length(rev)
|
|
|
|
def base(self, rev): return self.index[rev][2]
|
|
|
|
|
2005-08-27 06:19:35 +04:00
|
|
|
def reachable(self, rev, stop=None):
|
|
|
|
reachable = {}
|
|
|
|
visit = [rev]
|
|
|
|
reachable[rev] = 1
|
|
|
|
if stop:
|
|
|
|
stopn = self.rev(stop)
|
|
|
|
else:
|
|
|
|
stopn = 0
|
|
|
|
while visit:
|
|
|
|
n = visit.pop(0)
|
|
|
|
if n == stop:
|
|
|
|
continue
|
|
|
|
if n == nullid:
|
|
|
|
continue
|
|
|
|
for p in self.parents(n):
|
|
|
|
if self.rev(p) < stopn:
|
|
|
|
continue
|
|
|
|
if p not in reachable:
|
|
|
|
reachable[p] = 1
|
|
|
|
visit.append(p)
|
|
|
|
return reachable
|
|
|
|
|
2005-08-15 08:09:09 +04:00
|
|
|
def heads(self, stop=None):
|
2005-08-27 12:43:48 +04:00
|
|
|
"""return the list of all nodes that have no children"""
|
2005-06-03 06:07:01 +04:00
|
|
|
p = {}
|
|
|
|
h = []
|
2005-08-15 08:09:09 +04:00
|
|
|
stoprev = 0
|
|
|
|
if stop and stop in self.nodemap:
|
|
|
|
stoprev = self.rev(stop)
|
2005-08-27 12:43:48 +04:00
|
|
|
|
2005-06-04 23:10:42 +04:00
|
|
|
for r in range(self.count() - 1, -1, -1):
|
2005-06-03 06:07:01 +04:00
|
|
|
n = self.node(r)
|
|
|
|
if n not in p:
|
|
|
|
h.append(n)
|
2005-08-15 08:09:09 +04:00
|
|
|
if n == stop:
|
|
|
|
break
|
|
|
|
if r < stoprev:
|
|
|
|
break
|
2005-06-03 06:07:01 +04:00
|
|
|
for pn in self.parents(n):
|
|
|
|
p[pn] = 1
|
|
|
|
return h
|
2005-06-16 07:48:04 +04:00
|
|
|
|
|
|
|
def children(self, node):
|
2005-08-27 12:43:48 +04:00
|
|
|
"""find the children of a given node"""
|
2005-06-16 07:48:04 +04:00
|
|
|
c = []
|
|
|
|
p = self.rev(node)
|
|
|
|
for r in range(p + 1, self.count()):
|
|
|
|
n = self.node(r)
|
|
|
|
for pn in self.parents(n):
|
2005-08-07 00:58:28 +04:00
|
|
|
if pn == node:
|
|
|
|
c.append(n)
|
2005-06-16 07:48:04 +04:00
|
|
|
continue
|
|
|
|
elif pn == nullid:
|
|
|
|
continue
|
|
|
|
return c
|
2005-06-29 22:42:35 +04:00
|
|
|
|
2005-05-08 04:11:36 +04:00
|
|
|
def lookup(self, id):
|
2005-08-27 12:43:48 +04:00
|
|
|
"""locate a node based on revision number or subset of hex nodeid"""
|
2005-05-08 04:11:36 +04:00
|
|
|
try:
|
|
|
|
rev = int(id)
|
2005-06-25 11:50:27 +04:00
|
|
|
if str(rev) != id: raise ValueError
|
|
|
|
if rev < 0: rev = self.count() + rev
|
2005-06-26 15:04:59 +04:00
|
|
|
if rev < 0 or rev >= self.count(): raise ValueError
|
2005-05-08 04:11:36 +04:00
|
|
|
return self.node(rev)
|
2005-06-25 11:50:27 +04:00
|
|
|
except (ValueError, OverflowError):
|
2005-05-08 04:11:36 +04:00
|
|
|
c = []
|
|
|
|
for n in self.nodemap:
|
2005-06-25 11:50:27 +04:00
|
|
|
if hex(n).startswith(id):
|
2005-05-08 04:11:36 +04:00
|
|
|
c.append(n)
|
|
|
|
if len(c) > 1: raise KeyError("Ambiguous identifier")
|
2005-05-14 01:12:32 +04:00
|
|
|
if len(c) < 1: raise KeyError("No match found")
|
2005-05-08 04:11:36 +04:00
|
|
|
return c[0]
|
2005-06-29 22:42:35 +04:00
|
|
|
|
2005-05-08 04:11:36 +04:00
|
|
|
return None
|
|
|
|
|
2005-05-04 01:16:10 +04:00
|
|
|
def diff(self, a, b):
|
2005-08-27 12:43:48 +04:00
|
|
|
"""return a delta between two revisions"""
|
2005-05-04 01:16:10 +04:00
|
|
|
return mdiff.textdiff(a, b)
|
|
|
|
|
2005-05-17 10:10:02 +04:00
|
|
|
def patches(self, t, pl):
|
2005-08-27 12:43:48 +04:00
|
|
|
"""apply a list of patches to a string"""
|
2005-05-17 10:10:02 +04:00
|
|
|
return mdiff.patches(t, pl)
|
|
|
|
|
2005-05-21 05:40:24 +04:00
|
|
|
def delta(self, node):
|
2005-08-27 12:43:48 +04:00
|
|
|
"""return or calculate a delta between a node and its predecessor"""
|
2005-05-21 05:40:24 +04:00
|
|
|
r = self.rev(node)
|
|
|
|
b = self.base(r)
|
|
|
|
if r == b:
|
|
|
|
return self.diff(self.revision(self.node(r - 1)),
|
|
|
|
self.revision(node))
|
|
|
|
else:
|
|
|
|
f = self.opener(self.datafile)
|
|
|
|
f.seek(self.start(r))
|
|
|
|
data = f.read(self.length(r))
|
|
|
|
return decompress(data)
|
|
|
|
|
2005-05-04 01:16:10 +04:00
|
|
|
def revision(self, node):
|
2005-08-27 12:43:48 +04:00
|
|
|
"""return an uncompressed revision of a given"""
|
2005-05-08 04:11:36 +04:00
|
|
|
if node == nullid: return ""
|
2005-05-04 01:16:10 +04:00
|
|
|
if self.cache and self.cache[0] == node: return self.cache[2]
|
|
|
|
|
2005-08-27 12:43:48 +04:00
|
|
|
# look up what we need to read
|
2005-05-04 01:16:10 +04:00
|
|
|
text = None
|
|
|
|
rev = self.rev(node)
|
2005-05-21 05:36:02 +04:00
|
|
|
start, length, base, link, p1, p2, node = self.index[rev]
|
|
|
|
end = start + length
|
|
|
|
if base != rev: start = self.start(base)
|
2005-05-04 01:16:10 +04:00
|
|
|
|
2005-08-27 12:43:48 +04:00
|
|
|
# do we have useful data cached?
|
2005-05-04 01:16:10 +04:00
|
|
|
if self.cache and self.cache[1] >= base and self.cache[1] < rev:
|
|
|
|
base = self.cache[1]
|
|
|
|
start = self.start(base + 1)
|
|
|
|
text = self.cache[2]
|
|
|
|
last = 0
|
|
|
|
|
|
|
|
f = self.opener(self.datafile)
|
|
|
|
f.seek(start)
|
|
|
|
data = f.read(end - start)
|
|
|
|
|
2005-07-08 12:07:54 +04:00
|
|
|
if text is None:
|
2005-05-04 01:16:10 +04:00
|
|
|
last = self.length(base)
|
|
|
|
text = decompress(data[:last])
|
|
|
|
|
2005-05-14 22:27:14 +04:00
|
|
|
bins = []
|
2005-05-13 05:54:55 +04:00
|
|
|
for r in xrange(base + 1, rev + 1):
|
2005-05-04 01:16:10 +04:00
|
|
|
s = self.length(r)
|
2005-05-14 22:27:14 +04:00
|
|
|
bins.append(decompress(data[last:last + s]))
|
2005-05-04 01:16:10 +04:00
|
|
|
last = last + s
|
|
|
|
|
2005-05-14 22:27:14 +04:00
|
|
|
text = mdiff.patches(text, bins)
|
|
|
|
|
2005-05-05 10:51:25 +04:00
|
|
|
if node != hash(text, p1, p2):
|
2005-05-19 04:59:54 +04:00
|
|
|
raise IOError("integrity check failed on %s:%d"
|
|
|
|
% (self.datafile, rev))
|
2005-05-04 01:16:10 +04:00
|
|
|
|
|
|
|
self.cache = (node, rev, text)
|
2005-06-29 22:42:35 +04:00
|
|
|
return text
|
2005-05-04 01:16:10 +04:00
|
|
|
|
2005-07-07 10:28:35 +04:00
|
|
|
def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
|
2005-08-27 12:43:48 +04:00
|
|
|
"""add a revision to the log
|
|
|
|
|
|
|
|
text - the revision data to add
|
|
|
|
transaction - the transaction object used for rollback
|
|
|
|
link - the linkrev data to add
|
|
|
|
p1, p2 - the parent nodeids of the revision
|
|
|
|
d - an optional precomputed delta
|
|
|
|
"""
|
2005-05-04 01:16:10 +04:00
|
|
|
if text is None: text = ""
|
|
|
|
if p1 is None: p1 = self.tip()
|
|
|
|
if p2 is None: p2 = nullid
|
|
|
|
|
|
|
|
node = hash(text, p1, p2)
|
|
|
|
|
2005-06-10 12:26:29 +04:00
|
|
|
if node in self.nodemap:
|
|
|
|
return node
|
|
|
|
|
2005-05-04 01:16:10 +04:00
|
|
|
n = self.count()
|
|
|
|
t = n - 1
|
|
|
|
|
|
|
|
if n:
|
2005-05-13 05:54:55 +04:00
|
|
|
base = self.base(t)
|
|
|
|
start = self.start(base)
|
2005-05-04 01:16:10 +04:00
|
|
|
end = self.end(t)
|
2005-07-07 10:28:35 +04:00
|
|
|
if not d:
|
|
|
|
prev = self.revision(self.tip())
|
|
|
|
d = self.diff(prev, text)
|
2005-05-19 04:59:54 +04:00
|
|
|
data = compress(d)
|
2005-05-13 05:54:55 +04:00
|
|
|
dist = end - start + len(data)
|
2005-05-04 01:16:10 +04:00
|
|
|
|
|
|
|
# full versions are inserted when the needed deltas
|
|
|
|
# become comparable to the uncompressed text
|
2005-05-13 05:54:55 +04:00
|
|
|
if not n or dist > len(text) * 2:
|
2005-05-04 01:16:10 +04:00
|
|
|
data = compress(text)
|
|
|
|
base = n
|
|
|
|
else:
|
|
|
|
base = self.base(t)
|
|
|
|
|
|
|
|
offset = 0
|
|
|
|
if t >= 0:
|
|
|
|
offset = self.end(t)
|
|
|
|
|
|
|
|
e = (offset, len(data), base, link, p1, p2, node)
|
2005-06-29 22:42:35 +04:00
|
|
|
|
2005-05-04 01:16:10 +04:00
|
|
|
self.index.append(e)
|
|
|
|
self.nodemap[node] = n
|
|
|
|
entry = struct.pack(indexformat, *e)
|
|
|
|
|
2005-05-05 10:51:25 +04:00
|
|
|
transaction.add(self.datafile, e[0])
|
2005-05-04 01:16:10 +04:00
|
|
|
self.opener(self.datafile, "a").write(data)
|
2005-05-08 04:33:31 +04:00
|
|
|
transaction.add(self.indexfile, n * len(entry))
|
2005-05-04 01:16:10 +04:00
|
|
|
self.opener(self.indexfile, "a").write(entry)
|
|
|
|
|
|
|
|
self.cache = (node, n, text)
|
|
|
|
return node
|
|
|
|
|
|
|
|
def ancestor(self, a, b):
|
2005-08-27 12:43:48 +04:00
|
|
|
"""calculate the least common ancestor of nodes a and b"""
|
2005-05-25 11:11:44 +04:00
|
|
|
# calculate the distance of every node from root
|
|
|
|
dist = {nullid: 0}
|
|
|
|
for i in xrange(self.count()):
|
|
|
|
n = self.node(i)
|
|
|
|
p1, p2 = self.parents(n)
|
|
|
|
dist[n] = max(dist[p1], dist[p2]) + 1
|
2005-06-29 22:42:35 +04:00
|
|
|
|
2005-05-25 11:11:44 +04:00
|
|
|
# traverse ancestors in order of decreasing distance from root
|
|
|
|
def ancestors(node):
|
|
|
|
# we store negative distances because heap returns smallest member
|
|
|
|
h = [(-dist[node], node)]
|
|
|
|
seen = {}
|
|
|
|
earliest = self.count()
|
|
|
|
while h:
|
|
|
|
d, n = heapq.heappop(h)
|
|
|
|
if n not in seen:
|
|
|
|
seen[n] = 1
|
2005-06-16 12:39:30 +04:00
|
|
|
r = self.rev(n)
|
|
|
|
yield (-d, r, n)
|
2005-05-25 11:11:44 +04:00
|
|
|
for p in self.parents(n):
|
|
|
|
heapq.heappush(h, (-dist[p], p))
|
|
|
|
|
|
|
|
x = ancestors(a)
|
|
|
|
y = ancestors(b)
|
|
|
|
lx = x.next()
|
|
|
|
ly = y.next()
|
|
|
|
|
|
|
|
# increment each ancestor list until it is closer to root than
|
|
|
|
# the other, or they match
|
|
|
|
while 1:
|
|
|
|
if lx == ly:
|
2005-06-16 12:39:30 +04:00
|
|
|
return lx[2]
|
2005-05-25 11:11:44 +04:00
|
|
|
elif lx < ly:
|
|
|
|
ly = y.next()
|
|
|
|
elif lx > ly:
|
|
|
|
lx = x.next()
|
2005-05-04 01:16:10 +04:00
|
|
|
|
2005-05-10 12:40:49 +04:00
|
|
|
def group(self, linkmap):
|
2005-08-27 12:43:48 +04:00
|
|
|
"""calculate a delta group
|
|
|
|
|
|
|
|
Given a list of changeset revs, return a set of deltas and
|
|
|
|
metadata corresponding to nodes. the first delta is
|
|
|
|
parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
|
|
|
|
have this parent as it has all history before these
|
|
|
|
changesets. parent is parent[0]
|
|
|
|
"""
|
2005-05-10 12:40:49 +04:00
|
|
|
revs = []
|
|
|
|
needed = {}
|
|
|
|
|
|
|
|
# find file nodes/revs that match changeset revs
|
|
|
|
for i in xrange(0, self.count()):
|
|
|
|
if self.index[i][3] in linkmap:
|
|
|
|
revs.append(i)
|
|
|
|
needed[i] = 1
|
|
|
|
|
|
|
|
# if we don't have any revisions touched by these changesets, bail
|
2005-05-30 20:03:54 +04:00
|
|
|
if not revs:
|
|
|
|
yield struct.pack(">l", 0)
|
|
|
|
return
|
2005-05-10 12:40:49 +04:00
|
|
|
|
|
|
|
# add the parent of the first rev
|
|
|
|
p = self.parents(self.node(revs[0]))[0]
|
|
|
|
revs.insert(0, self.rev(p))
|
|
|
|
|
|
|
|
# for each delta that isn't contiguous in the log, we need to
|
|
|
|
# reconstruct the base, reconstruct the result, and then
|
|
|
|
# calculate the delta. We also need to do this where we've
|
|
|
|
# stored a full version and not a delta
|
|
|
|
for i in xrange(0, len(revs) - 1):
|
|
|
|
a, b = revs[i], revs[i + 1]
|
|
|
|
if a + 1 != b or self.base(b) == b:
|
|
|
|
for j in xrange(self.base(a), a + 1):
|
|
|
|
needed[j] = 1
|
|
|
|
for j in xrange(self.base(b), b + 1):
|
|
|
|
needed[j] = 1
|
|
|
|
|
|
|
|
# calculate spans to retrieve from datafile
|
|
|
|
needed = needed.keys()
|
|
|
|
needed.sort()
|
|
|
|
spans = []
|
2005-05-30 20:03:54 +04:00
|
|
|
oo = -1
|
|
|
|
ol = 0
|
2005-05-10 12:40:49 +04:00
|
|
|
for n in needed:
|
|
|
|
if n < 0: continue
|
|
|
|
o = self.start(n)
|
|
|
|
l = self.length(n)
|
2005-05-30 20:03:54 +04:00
|
|
|
if oo + ol == o: # can we merge with the previous?
|
|
|
|
nl = spans[-1][2]
|
|
|
|
nl.append((n, l))
|
|
|
|
ol += l
|
|
|
|
spans[-1] = (oo, ol, nl)
|
2005-05-10 12:40:49 +04:00
|
|
|
else:
|
2005-05-30 20:03:54 +04:00
|
|
|
oo = o
|
|
|
|
ol = l
|
|
|
|
spans.append((oo, ol, [(n, l)]))
|
2005-05-10 12:40:49 +04:00
|
|
|
|
|
|
|
# read spans in, divide up chunks
|
|
|
|
chunks = {}
|
2005-05-30 20:03:54 +04:00
|
|
|
for span in spans:
|
2005-05-10 12:40:49 +04:00
|
|
|
# we reopen the file for each span to make http happy for now
|
|
|
|
f = self.opener(self.datafile)
|
|
|
|
f.seek(span[0])
|
|
|
|
data = f.read(span[1])
|
|
|
|
|
|
|
|
# divide up the span
|
|
|
|
pos = 0
|
|
|
|
for r, l in span[2]:
|
2005-05-30 20:03:54 +04:00
|
|
|
chunks[r] = decompress(data[pos: pos + l])
|
2005-05-10 12:40:49 +04:00
|
|
|
pos += l
|
|
|
|
|
|
|
|
# helper to reconstruct intermediate versions
|
|
|
|
def construct(text, base, rev):
|
2005-05-30 20:03:54 +04:00
|
|
|
bins = [chunks[r] for r in xrange(base + 1, rev + 1)]
|
2005-05-14 22:27:14 +04:00
|
|
|
return mdiff.patches(text, bins)
|
2005-05-10 12:40:49 +04:00
|
|
|
|
|
|
|
# build deltas
|
|
|
|
deltas = []
|
2005-05-14 22:27:14 +04:00
|
|
|
for d in xrange(0, len(revs) - 1):
|
2005-05-10 12:40:49 +04:00
|
|
|
a, b = revs[d], revs[d + 1]
|
|
|
|
n = self.node(b)
|
2005-05-30 20:03:54 +04:00
|
|
|
|
|
|
|
# do we need to construct a new delta?
|
2005-05-10 12:40:49 +04:00
|
|
|
if a + 1 != b or self.base(b) == b:
|
|
|
|
if a >= 0:
|
|
|
|
base = self.base(a)
|
2005-05-30 20:03:54 +04:00
|
|
|
ta = chunks[self.base(a)]
|
2005-05-10 12:40:49 +04:00
|
|
|
ta = construct(ta, base, a)
|
|
|
|
else:
|
|
|
|
ta = ""
|
2005-06-29 22:42:35 +04:00
|
|
|
|
2005-05-10 12:40:49 +04:00
|
|
|
base = self.base(b)
|
|
|
|
if a > base:
|
|
|
|
base = a
|
|
|
|
tb = ta
|
|
|
|
else:
|
2005-05-30 20:03:54 +04:00
|
|
|
tb = chunks[self.base(b)]
|
2005-05-10 12:40:49 +04:00
|
|
|
tb = construct(tb, base, b)
|
|
|
|
d = self.diff(ta, tb)
|
|
|
|
else:
|
2005-05-30 20:03:54 +04:00
|
|
|
d = chunks[b]
|
2005-05-10 12:40:49 +04:00
|
|
|
|
|
|
|
p = self.parents(n)
|
|
|
|
meta = n + p[0] + p[1] + linkmap[self.linkrev(n)]
|
|
|
|
l = struct.pack(">l", len(meta) + len(d) + 4)
|
2005-05-30 20:03:54 +04:00
|
|
|
yield l
|
|
|
|
yield meta
|
|
|
|
yield d
|
2005-05-10 12:40:49 +04:00
|
|
|
|
2005-05-30 20:03:54 +04:00
|
|
|
yield struct.pack(">l", 0)
|
|
|
|
|
2005-08-26 15:06:58 +04:00
|
|
|
def addgroup(self, revs, linkmapper, transaction, unique=0):
|
2005-08-27 12:43:48 +04:00
|
|
|
"""
|
|
|
|
add a delta group
|
|
|
|
|
|
|
|
given a set of deltas, add them to the revision log. the
|
|
|
|
first delta is against its parent, which should be in our
|
|
|
|
log, the rest are against the previous delta.
|
|
|
|
"""
|
2005-05-10 12:40:49 +04:00
|
|
|
|
2005-08-27 12:43:48 +04:00
|
|
|
#track the base of the current delta log
|
2005-05-10 12:40:49 +04:00
|
|
|
r = self.count()
|
|
|
|
t = r - 1
|
2005-05-30 20:03:54 +04:00
|
|
|
node = nullid
|
2005-06-29 22:42:35 +04:00
|
|
|
|
2005-07-09 06:10:57 +04:00
|
|
|
base = prev = -1
|
2005-07-09 01:21:22 +04:00
|
|
|
start = end = measure = 0
|
2005-05-10 12:40:49 +04:00
|
|
|
if r:
|
|
|
|
start = self.start(self.base(t))
|
|
|
|
end = self.end(t)
|
|
|
|
measure = self.length(self.base(t))
|
|
|
|
base = self.base(t)
|
|
|
|
prev = self.tip()
|
|
|
|
|
|
|
|
transaction.add(self.datafile, end)
|
|
|
|
transaction.add(self.indexfile, r * struct.calcsize(indexformat))
|
|
|
|
dfh = self.opener(self.datafile, "a")
|
|
|
|
ifh = self.opener(self.indexfile, "a")
|
|
|
|
|
|
|
|
# loop through our set of deltas
|
2005-05-30 20:03:54 +04:00
|
|
|
chain = None
|
|
|
|
for chunk in revs:
|
|
|
|
node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
|
2005-05-19 04:29:39 +04:00
|
|
|
link = linkmapper(cs)
|
2005-05-17 13:07:01 +04:00
|
|
|
if node in self.nodemap:
|
2005-06-04 00:43:16 +04:00
|
|
|
# this can happen if two branches make the same change
|
|
|
|
if unique:
|
2005-08-27 06:08:25 +04:00
|
|
|
raise RevlogError("already have %s" % hex(node[:4]))
|
2005-07-09 01:21:22 +04:00
|
|
|
chain = node
|
2005-06-04 00:43:16 +04:00
|
|
|
continue
|
2005-05-30 20:03:54 +04:00
|
|
|
delta = chunk[80:]
|
|
|
|
|
|
|
|
if not chain:
|
|
|
|
# retrieve the parent revision of the delta chain
|
|
|
|
chain = p1
|
|
|
|
if not chain in self.nodemap:
|
2005-08-27 06:08:25 +04:00
|
|
|
raise RevlogError("unknown base %s" % short(chain[:4]))
|
2005-05-10 12:40:49 +04:00
|
|
|
|
|
|
|
# full versions are inserted when the needed deltas become
|
|
|
|
# comparable to the uncompressed text or when the previous
|
|
|
|
# version is not the one we have a delta against. We use
|
|
|
|
# the size of the previous full rev as a proxy for the
|
|
|
|
# current size.
|
|
|
|
|
|
|
|
if chain == prev:
|
|
|
|
cdelta = compress(delta)
|
|
|
|
|
|
|
|
if chain != prev or (end - start + len(cdelta)) > measure * 2:
|
|
|
|
# flush our writes here so we can read it in revision
|
|
|
|
dfh.flush()
|
|
|
|
ifh.flush()
|
2005-05-13 23:47:16 +04:00
|
|
|
text = self.revision(chain)
|
2005-05-17 10:10:02 +04:00
|
|
|
text = self.patches(text, [delta])
|
2005-05-10 12:40:49 +04:00
|
|
|
chk = self.addrevision(text, transaction, link, p1, p2)
|
|
|
|
if chk != node:
|
2005-08-27 06:08:25 +04:00
|
|
|
raise RevlogError("consistency error adding group")
|
2005-05-10 12:40:49 +04:00
|
|
|
measure = len(text)
|
|
|
|
else:
|
|
|
|
e = (end, len(cdelta), self.base(t), link, p1, p2, node)
|
|
|
|
self.index.append(e)
|
|
|
|
self.nodemap[node] = r
|
|
|
|
dfh.write(cdelta)
|
|
|
|
ifh.write(struct.pack(indexformat, *e))
|
|
|
|
|
2005-05-13 23:47:16 +04:00
|
|
|
t, r, chain, prev = r, r + 1, node, node
|
2005-05-10 12:40:49 +04:00
|
|
|
start = self.start(self.base(t))
|
|
|
|
end = self.end(t)
|
|
|
|
|
|
|
|
dfh.close()
|
|
|
|
ifh.close()
|
|
|
|
return node
|