2009-04-26 03:13:08 +04:00
|
|
|
# changegroup.py - Mercurial changegroup manipulation functions
|
|
|
|
#
|
|
|
|
# Copyright 2006 Matt Mackall <mpm@selenic.com>
|
|
|
|
#
|
|
|
|
# This software may be used and distributed according to the terms of the
|
2010-01-20 07:20:08 +03:00
|
|
|
# GNU General Public License version 2 or any later version.
|
2006-12-13 22:27:09 +03:00
|
|
|
|
2015-08-08 10:35:37 +03:00
|
|
|
from __future__ import absolute_import
|
|
|
|
|
|
|
|
import os
|
|
|
|
import struct
|
|
|
|
import tempfile
|
2014-04-02 02:27:53 +04:00
|
|
|
import weakref
|
2015-08-08 10:35:37 +03:00
|
|
|
|
|
|
|
from .i18n import _
|
|
|
|
from .node import (
|
|
|
|
hex,
|
|
|
|
nullid,
|
|
|
|
nullrev,
|
|
|
|
short,
|
|
|
|
)
|
|
|
|
|
|
|
|
from . import (
|
|
|
|
branchmap,
|
|
|
|
dagutil,
|
|
|
|
discovery,
|
|
|
|
error,
|
|
|
|
mdiff,
|
|
|
|
phases,
|
|
|
|
util,
|
|
|
|
)
|
2006-03-21 13:47:21 +03:00
|
|
|
|
2014-09-02 14:11:36 +04:00
|
|
|
_CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
|
2014-10-17 16:41:11 +04:00
|
|
|
_CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
|
2011-04-30 12:00:41 +04:00
|
|
|
|
2011-02-22 05:03:39 +03:00
|
|
|
def readexactly(stream, n):
|
|
|
|
'''read n bytes from stream.read and abort if less was available'''
|
|
|
|
s = stream.read(n)
|
|
|
|
if len(s) < n:
|
|
|
|
raise util.Abort(_("stream ended unexpectedly"
|
|
|
|
" (got %d bytes, expected %d)")
|
|
|
|
% (len(s), n))
|
|
|
|
return s
|
|
|
|
|
|
|
|
def getchunk(stream):
|
|
|
|
"""return the next chunk from stream as a string"""
|
|
|
|
d = readexactly(stream, 4)
|
2006-03-21 13:47:21 +03:00
|
|
|
l = struct.unpack(">l", d)[0]
|
|
|
|
if l <= 4:
|
2011-02-22 05:10:37 +03:00
|
|
|
if l:
|
|
|
|
raise util.Abort(_("invalid chunk length %d") % l)
|
2006-03-21 13:47:21 +03:00
|
|
|
return ""
|
2011-02-22 05:03:39 +03:00
|
|
|
return readexactly(stream, l - 4)
|
2006-03-21 13:47:21 +03:00
|
|
|
|
2007-10-04 02:17:28 +04:00
|
|
|
def chunkheader(length):
|
2009-09-09 01:58:59 +04:00
|
|
|
"""return a changegroup chunk header (string)"""
|
2007-10-04 02:17:28 +04:00
|
|
|
return struct.pack(">l", length + 4)
|
2006-03-21 13:47:21 +03:00
|
|
|
|
|
|
|
def closechunk():
|
2009-09-09 01:58:59 +04:00
|
|
|
"""return a changegroup chunk header (string) for a zero-length chunk"""
|
2006-03-21 13:47:21 +03:00
|
|
|
return struct.pack(">l", 0)
|
|
|
|
|
2015-01-16 23:53:45 +03:00
|
|
|
def combineresults(results):
|
|
|
|
"""logic to combine 0 or more addchangegroup results into one"""
|
|
|
|
changedheads = 0
|
|
|
|
result = 1
|
|
|
|
for ret in results:
|
|
|
|
# If any changegroup result is 0, return 0
|
|
|
|
if ret == 0:
|
|
|
|
result = 0
|
|
|
|
break
|
|
|
|
if ret < -1:
|
|
|
|
changedheads += ret + 1
|
|
|
|
elif ret > 1:
|
|
|
|
changedheads += ret - 1
|
|
|
|
if changedheads > 0:
|
|
|
|
result = 1 + changedheads
|
|
|
|
elif changedheads < 0:
|
|
|
|
result = -1 + changedheads
|
|
|
|
return result
|
|
|
|
|
2006-11-16 08:37:45 +03:00
|
|
|
bundletypes = {
|
2015-09-18 15:32:43 +03:00
|
|
|
"": ("", None), # only when using unbundle on ssh and old http servers
|
2011-04-30 16:22:03 +04:00
|
|
|
# since the unification ssh accepts a header but there
|
|
|
|
# is no capability signaling it.
|
2015-04-09 23:25:48 +03:00
|
|
|
"HG20": (), # special-cased below
|
2015-09-12 03:06:56 +03:00
|
|
|
"HG10UN": ("HG10UN", None),
|
2015-09-16 03:35:32 +03:00
|
|
|
"HG10BZ": ("HG10", 'BZ'),
|
|
|
|
"HG10GZ": ("HG10GZ", 'GZ'),
|
2006-11-16 08:37:45 +03:00
|
|
|
}
|
|
|
|
|
2009-07-09 01:17:10 +04:00
|
|
|
# hgweb uses this list to communicate its preferred type
|
2008-02-21 17:00:25 +03:00
|
|
|
bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
|
|
|
|
|
2015-01-16 01:39:41 +03:00
|
|
|
def writebundle(ui, cg, filename, bundletype, vfs=None):
|
2006-11-16 00:51:58 +03:00
|
|
|
"""Write a bundle file and return its filename.
|
|
|
|
|
|
|
|
Existing files will not be overwritten.
|
|
|
|
If no filename is specified, a temporary file is created.
|
|
|
|
bz2 compression can be turned off.
|
|
|
|
The bundle file will be deleted in case of errors.
|
|
|
|
"""
|
|
|
|
|
|
|
|
fh = None
|
|
|
|
cleanup = None
|
|
|
|
try:
|
|
|
|
if filename:
|
2014-03-08 20:03:28 +04:00
|
|
|
if vfs:
|
|
|
|
fh = vfs.open(filename, "wb")
|
|
|
|
else:
|
|
|
|
fh = open(filename, "wb")
|
2006-11-16 00:51:58 +03:00
|
|
|
else:
|
|
|
|
fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
|
|
|
|
fh = os.fdopen(fd, "wb")
|
|
|
|
cleanup = filename
|
|
|
|
|
2015-04-09 23:25:48 +03:00
|
|
|
if bundletype == "HG20":
|
2015-08-08 10:35:37 +03:00
|
|
|
from . import bundle2
|
2015-01-16 02:39:16 +03:00
|
|
|
bundle = bundle2.bundle20(ui)
|
2015-04-09 23:25:48 +03:00
|
|
|
part = bundle.newpart('changegroup', data=cg.getchunks())
|
2015-01-16 02:39:16 +03:00
|
|
|
part.addparam('version', cg.version)
|
2015-09-16 03:43:54 +03:00
|
|
|
z = util.compressors[None]()
|
2015-01-16 02:39:16 +03:00
|
|
|
chunkiter = bundle.getchunks()
|
|
|
|
else:
|
|
|
|
if cg.version != '01':
|
2015-01-18 04:38:57 +03:00
|
|
|
raise util.Abort(_('old bundle types only supports v1 '
|
|
|
|
'changegroups'))
|
2015-09-16 03:35:32 +03:00
|
|
|
header, comp = bundletypes[bundletype]
|
2015-01-16 02:39:16 +03:00
|
|
|
fh.write(header)
|
2015-09-16 03:35:32 +03:00
|
|
|
if comp not in util.compressors:
|
|
|
|
raise util.Abort(_('unknown stream compression type: %s')
|
|
|
|
% comp)
|
|
|
|
z = util.compressors[comp]()
|
2015-01-16 02:39:16 +03:00
|
|
|
chunkiter = cg.getchunks()
|
2006-11-16 08:37:45 +03:00
|
|
|
|
2006-11-16 00:51:58 +03:00
|
|
|
# parse the changegroup data, otherwise we will block
|
|
|
|
# in case of sshrepo because we don't know the end of the stream
|
|
|
|
|
2010-09-19 21:51:54 +04:00
|
|
|
# an empty chunkgroup is the end of the changegroup
|
|
|
|
# a changegroup has at least 2 chunkgroups (changelog and manifest).
|
|
|
|
# after that, an empty chunkgroup is the end of the changegroup
|
2015-01-16 02:39:16 +03:00
|
|
|
for chunk in chunkiter:
|
2014-04-11 00:19:00 +04:00
|
|
|
fh.write(z.compress(chunk))
|
2006-11-16 00:51:58 +03:00
|
|
|
fh.write(z.flush())
|
|
|
|
cleanup = None
|
|
|
|
return filename
|
|
|
|
finally:
|
|
|
|
if fh is not None:
|
|
|
|
fh.close()
|
|
|
|
if cleanup is not None:
|
2014-03-08 20:03:28 +04:00
|
|
|
if filename and vfs:
|
|
|
|
vfs.unlink(cleanup)
|
|
|
|
else:
|
|
|
|
os.unlink(cleanup)
|
2006-11-16 00:51:58 +03:00
|
|
|
|
2014-09-02 14:11:36 +04:00
|
|
|
class cg1unpacker(object):
|
|
|
|
deltaheader = _CHANGEGROUPV1_DELTA_HEADER
|
2011-04-30 12:00:41 +04:00
|
|
|
deltaheadersize = struct.calcsize(deltaheader)
|
2015-01-16 02:39:16 +03:00
|
|
|
version = '01'
|
2010-08-26 01:53:06 +04:00
|
|
|
def __init__(self, fh, alg):
|
2015-09-16 03:53:28 +03:00
|
|
|
if alg == 'UN':
|
|
|
|
alg = None # get more modern without breaking too much
|
2015-09-16 03:35:32 +03:00
|
|
|
if not alg in util.decompressors:
|
|
|
|
raise util.Abort(_('unknown stream compression type: %s')
|
|
|
|
% alg)
|
2015-09-23 21:33:30 +03:00
|
|
|
if alg == 'BZ':
|
|
|
|
alg = '_truncatedBZ'
|
2015-09-16 03:35:32 +03:00
|
|
|
self._stream = util.decompressors[alg](fh)
|
2010-08-26 01:55:54 +04:00
|
|
|
self._type = alg
|
2010-09-19 21:38:44 +04:00
|
|
|
self.callback = None
|
2010-08-26 01:55:54 +04:00
|
|
|
def compressed(self):
|
2015-09-16 03:53:28 +03:00
|
|
|
return self._type is not None
|
2010-08-26 01:53:06 +04:00
|
|
|
def read(self, l):
|
|
|
|
return self._stream.read(l)
|
2010-09-18 04:02:32 +04:00
|
|
|
def seek(self, pos):
|
|
|
|
return self._stream.seek(pos)
|
|
|
|
def tell(self):
|
2010-09-18 04:24:29 +04:00
|
|
|
return self._stream.tell()
|
2010-09-21 01:14:05 +04:00
|
|
|
def close(self):
|
|
|
|
return self._stream.close()
|
2010-09-19 21:38:44 +04:00
|
|
|
|
|
|
|
def chunklength(self):
|
2011-02-22 18:31:01 +03:00
|
|
|
d = readexactly(self._stream, 4)
|
2011-02-22 05:10:37 +03:00
|
|
|
l = struct.unpack(">l", d)[0]
|
|
|
|
if l <= 4:
|
|
|
|
if l:
|
|
|
|
raise util.Abort(_("invalid chunk length %d") % l)
|
|
|
|
return 0
|
|
|
|
if self.callback:
|
2010-09-19 21:38:44 +04:00
|
|
|
self.callback()
|
2011-02-22 05:10:37 +03:00
|
|
|
return l - 4
|
2010-09-19 21:38:44 +04:00
|
|
|
|
2011-04-30 21:01:24 +04:00
|
|
|
def changelogheader(self):
|
|
|
|
"""v10 does not have a changelog header chunk"""
|
|
|
|
return {}
|
|
|
|
|
|
|
|
def manifestheader(self):
|
|
|
|
"""v10 does not have a manifest header chunk"""
|
|
|
|
return {}
|
|
|
|
|
|
|
|
def filelogheader(self):
|
|
|
|
"""return the header of the filelogs chunk, v10 only has the filename"""
|
2010-09-19 21:38:44 +04:00
|
|
|
l = self.chunklength()
|
2011-04-30 21:01:24 +04:00
|
|
|
if not l:
|
|
|
|
return {}
|
|
|
|
fname = readexactly(self._stream, l)
|
2014-03-12 21:14:31 +04:00
|
|
|
return {'filename': fname}
|
2010-09-19 21:38:44 +04:00
|
|
|
|
2011-04-30 12:00:41 +04:00
|
|
|
def _deltaheader(self, headertuple, prevnode):
|
|
|
|
node, p1, p2, cs = headertuple
|
|
|
|
if prevnode is None:
|
|
|
|
deltabase = p1
|
|
|
|
else:
|
|
|
|
deltabase = prevnode
|
|
|
|
return node, p1, p2, deltabase, cs
|
|
|
|
|
2011-04-30 21:01:24 +04:00
|
|
|
def deltachunk(self, prevnode):
|
2010-09-19 22:12:45 +04:00
|
|
|
l = self.chunklength()
|
|
|
|
if not l:
|
|
|
|
return {}
|
2011-04-30 12:00:41 +04:00
|
|
|
headerdata = readexactly(self._stream, self.deltaheadersize)
|
|
|
|
header = struct.unpack(self.deltaheader, headerdata)
|
|
|
|
delta = readexactly(self._stream, l - self.deltaheadersize)
|
|
|
|
node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
|
2014-03-12 21:14:31 +04:00
|
|
|
return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
|
|
|
|
'deltabase': deltabase, 'delta': delta}
|
2010-09-19 22:12:45 +04:00
|
|
|
|
2014-04-11 00:19:00 +04:00
|
|
|
def getchunks(self):
|
|
|
|
"""returns all the chunks contains in the bundle
|
|
|
|
|
|
|
|
Used when you need to forward the binary stream to a file or another
|
|
|
|
network API. To do so, it parse the changegroup data, otherwise it will
|
|
|
|
block in case of sshrepo because it don't know the end of the stream.
|
|
|
|
"""
|
|
|
|
# an empty chunkgroup is the end of the changegroup
|
|
|
|
# a changegroup has at least 2 chunkgroups (changelog and manifest).
|
|
|
|
# after that, an empty chunkgroup is the end of the changegroup
|
|
|
|
empty = False
|
|
|
|
count = 0
|
|
|
|
while not empty or count <= 2:
|
|
|
|
empty = True
|
|
|
|
count += 1
|
|
|
|
while True:
|
|
|
|
chunk = getchunk(self)
|
|
|
|
if not chunk:
|
|
|
|
break
|
|
|
|
empty = False
|
|
|
|
yield chunkheader(len(chunk))
|
|
|
|
pos = 0
|
|
|
|
while pos < len(chunk):
|
|
|
|
next = pos + 2**20
|
|
|
|
yield chunk[pos:next]
|
|
|
|
pos = next
|
|
|
|
yield closechunk()
|
|
|
|
|
2014-10-17 16:41:11 +04:00
|
|
|
class cg2unpacker(cg1unpacker):
|
|
|
|
deltaheader = _CHANGEGROUPV2_DELTA_HEADER
|
|
|
|
deltaheadersize = struct.calcsize(deltaheader)
|
2015-01-16 02:39:16 +03:00
|
|
|
version = '02'
|
2014-10-17 16:41:11 +04:00
|
|
|
|
|
|
|
def _deltaheader(self, headertuple, prevnode):
|
|
|
|
node, p1, p2, deltabase, cs = headertuple
|
|
|
|
return node, p1, p2, deltabase, cs
|
|
|
|
|
2010-09-18 04:02:26 +04:00
|
|
|
class headerlessfixup(object):
|
|
|
|
def __init__(self, fh, h):
|
|
|
|
self._h = h
|
|
|
|
self._fh = fh
|
|
|
|
def read(self, n):
|
|
|
|
if self._h:
|
|
|
|
d, self._h = self._h[:n], self._h[n:]
|
|
|
|
if len(d) < n:
|
2011-02-22 05:03:39 +03:00
|
|
|
d += readexactly(self._fh, n - len(d))
|
2010-09-18 04:02:26 +04:00
|
|
|
return d
|
2011-02-22 05:03:39 +03:00
|
|
|
return readexactly(self._fh, n)
|
2010-09-18 04:02:26 +04:00
|
|
|
|
2014-09-02 14:11:36 +04:00
|
|
|
class cg1packer(object):
|
|
|
|
deltaheader = _CHANGEGROUPV1_DELTA_HEADER
|
2015-01-16 02:39:16 +03:00
|
|
|
version = '01'
|
2013-05-10 23:35:49 +04:00
|
|
|
def __init__(self, repo, bundlecaps=None):
|
|
|
|
"""Given a source repo, construct a bundler.
|
|
|
|
|
|
|
|
bundlecaps is optional and can be used to specify the set of
|
|
|
|
capabilities which can be used to build the bundle.
|
|
|
|
"""
|
2013-02-10 02:42:03 +04:00
|
|
|
# Set of capabilities we can use to build the bundle.
|
|
|
|
if bundlecaps is None:
|
|
|
|
bundlecaps = set()
|
|
|
|
self._bundlecaps = bundlecaps
|
2015-06-26 01:43:52 +03:00
|
|
|
# experimental config: bundle.reorder
|
2013-05-10 23:35:49 +04:00
|
|
|
reorder = repo.ui.config('bundle', 'reorder', 'auto')
|
|
|
|
if reorder == 'auto':
|
|
|
|
reorder = None
|
|
|
|
else:
|
|
|
|
reorder = util.parsebool(reorder)
|
|
|
|
self._repo = repo
|
|
|
|
self._reorder = reorder
|
2013-05-11 01:48:03 +04:00
|
|
|
self._progress = repo.ui.progress
|
2014-08-15 21:43:32 +04:00
|
|
|
if self._repo.ui.verbose and not self._repo.ui.debugflag:
|
|
|
|
self._verbosenote = self._repo.ui.note
|
|
|
|
else:
|
|
|
|
self._verbosenote = lambda s: None
|
|
|
|
|
2011-04-01 00:24:06 +04:00
|
|
|
def close(self):
|
|
|
|
return closechunk()
|
2013-05-10 23:03:01 +04:00
|
|
|
|
2011-04-01 00:24:06 +04:00
|
|
|
def fileheader(self, fname):
|
|
|
|
return chunkheader(len(fname)) + fname
|
2013-05-10 23:03:01 +04:00
|
|
|
|
2015-04-29 20:30:58 +03:00
|
|
|
def group(self, nodelist, revlog, lookup, units=None):
|
2013-05-10 23:03:01 +04:00
|
|
|
"""Calculate a delta group, yielding a sequence of changegroup chunks
|
|
|
|
(strings).
|
|
|
|
|
|
|
|
Given a list of changeset revs, return a set of deltas and
|
|
|
|
metadata corresponding to nodes. The first delta is
|
|
|
|
first parent(nodelist[0]) -> nodelist[0], the receiver is
|
|
|
|
guaranteed to have this parent as it has all history before
|
|
|
|
these changesets. In the case firstparent is nullrev the
|
|
|
|
changegroup starts with a full revision.
|
|
|
|
|
2013-05-11 01:48:03 +04:00
|
|
|
If units is not None, progress detail will be generated, units specifies
|
|
|
|
the type of revlog that is touched (changelog, manifest, etc.).
|
|
|
|
"""
|
2013-05-10 23:03:01 +04:00
|
|
|
# if we don't have any revisions touched by these changesets, bail
|
|
|
|
if len(nodelist) == 0:
|
|
|
|
yield self.close()
|
|
|
|
return
|
|
|
|
|
|
|
|
# for generaldelta revlogs, we linearize the revs; this will both be
|
|
|
|
# much quicker and generate a much smaller bundle
|
2015-04-29 20:30:58 +03:00
|
|
|
if (revlog._generaldelta and self._reorder is None) or self._reorder:
|
2013-05-10 23:03:01 +04:00
|
|
|
dag = dagutil.revlogdag(revlog)
|
|
|
|
revs = set(revlog.rev(n) for n in nodelist)
|
|
|
|
revs = dag.linearize(revs)
|
|
|
|
else:
|
|
|
|
revs = sorted([revlog.rev(n) for n in nodelist])
|
|
|
|
|
|
|
|
# add the parent of the first rev
|
|
|
|
p = revlog.parentrevs(revs[0])[0]
|
|
|
|
revs.insert(0, p)
|
|
|
|
|
|
|
|
# build deltas
|
2013-05-11 01:48:03 +04:00
|
|
|
total = len(revs) - 1
|
|
|
|
msgbundling = _('bundling')
|
2013-05-10 23:03:01 +04:00
|
|
|
for r in xrange(len(revs) - 1):
|
2013-05-11 01:48:03 +04:00
|
|
|
if units is not None:
|
|
|
|
self._progress(msgbundling, r + 1, unit=units, total=total)
|
2013-05-10 23:03:01 +04:00
|
|
|
prev, curr = revs[r], revs[r + 1]
|
2013-05-11 01:14:54 +04:00
|
|
|
linknode = lookup(revlog.node(curr))
|
|
|
|
for c in self.revchunk(revlog, curr, prev, linknode):
|
2013-05-10 23:03:01 +04:00
|
|
|
yield c
|
|
|
|
|
2015-04-23 01:03:09 +03:00
|
|
|
if units is not None:
|
|
|
|
self._progress(msgbundling, None)
|
2013-05-10 23:03:01 +04:00
|
|
|
yield self.close()
|
|
|
|
|
2013-05-31 04:51:13 +04:00
|
|
|
# filter any nodes that claim to be part of the known set
|
2015-04-28 23:40:00 +03:00
|
|
|
def prune(self, revlog, missing, commonrevs):
|
2013-05-31 04:51:13 +04:00
|
|
|
rr, rl = revlog.rev, revlog.linkrev
|
|
|
|
return [n for n in missing if rl(rr(n)) not in commonrevs]
|
|
|
|
|
2013-02-10 19:03:20 +04:00
|
|
|
def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
|
2013-05-10 23:35:49 +04:00
|
|
|
'''yield a sequence of changegroup chunks (strings)'''
|
|
|
|
repo = self._repo
|
2015-05-01 02:45:03 +03:00
|
|
|
cl = repo.changelog
|
|
|
|
ml = repo.manifest
|
2013-02-10 19:03:20 +04:00
|
|
|
|
2014-11-21 03:30:57 +03:00
|
|
|
clrevorder = {}
|
2013-02-10 19:03:20 +04:00
|
|
|
mfs = {} # needed manifests
|
|
|
|
fnodes = {} # needed file nodes
|
|
|
|
changedfiles = set()
|
|
|
|
|
2013-05-11 01:14:54 +04:00
|
|
|
# Callback for the changelog, used to collect changed files and manifest
|
|
|
|
# nodes.
|
|
|
|
# Returns the linkrev node (identity in the changelog case).
|
|
|
|
def lookupcl(x):
|
|
|
|
c = cl.read(x)
|
2014-11-21 03:30:57 +03:00
|
|
|
clrevorder[x] = len(clrevorder)
|
2013-05-11 01:14:54 +04:00
|
|
|
changedfiles.update(c[3])
|
|
|
|
# record the first changeset introducing this manifest version
|
|
|
|
mfs.setdefault(c[0], x)
|
|
|
|
return x
|
|
|
|
|
2014-08-15 21:43:32 +04:00
|
|
|
self._verbosenote(_('uncompressed size of bundle content:\n'))
|
|
|
|
size = 0
|
2015-04-29 20:30:58 +03:00
|
|
|
for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
|
2014-08-15 21:43:32 +04:00
|
|
|
size += len(chunk)
|
2014-11-07 07:57:12 +03:00
|
|
|
yield chunk
|
2014-08-15 21:43:32 +04:00
|
|
|
self._verbosenote(_('%8.i (changelog)\n') % size)
|
2014-11-07 07:57:12 +03:00
|
|
|
|
2015-04-29 23:25:07 +03:00
|
|
|
# We need to make sure that the linkrev in the changegroup refers to
|
|
|
|
# the first changeset that introduced the manifest or file revision.
|
|
|
|
# The fastpath is usually safer than the slowpath, because the filelogs
|
|
|
|
# are walked in revlog order.
|
|
|
|
#
|
|
|
|
# When taking the slowpath with reorder=None and the manifest revlog
|
|
|
|
# uses generaldelta, the manifest may be walked in the "wrong" order.
|
|
|
|
# Without 'clrevorder', we would get an incorrect linkrev (see fix in
|
|
|
|
# cc0ff93d0c0c).
|
|
|
|
#
|
|
|
|
# When taking the fastpath, we are only vulnerable to reordering
|
|
|
|
# of the changelog itself. The changelog never uses generaldelta, so
|
|
|
|
# it is only reordered when reorder=True. To handle this case, we
|
|
|
|
# simply take the slowpath, which already has the 'clrevorder' logic.
|
|
|
|
# This was also fixed in cc0ff93d0c0c.
|
2015-04-29 20:34:28 +03:00
|
|
|
fastpathlinkrev = fastpathlinkrev and not self._reorder
|
2013-05-11 01:14:54 +04:00
|
|
|
# Callback for the manifest, used to collect linkrevs for filelog
|
|
|
|
# revisions.
|
|
|
|
# Returns the linkrev node (collected in lookupcl).
|
|
|
|
def lookupmf(x):
|
|
|
|
clnode = mfs[x]
|
2015-04-29 20:34:28 +03:00
|
|
|
if not fastpathlinkrev:
|
2015-04-28 20:19:42 +03:00
|
|
|
mdata = ml.readfast(x)
|
2013-05-11 01:14:54 +04:00
|
|
|
for f, n in mdata.iteritems():
|
|
|
|
if f in changedfiles:
|
|
|
|
# record the first changeset introducing this filelog
|
|
|
|
# version
|
2014-11-21 03:30:57 +03:00
|
|
|
fclnodes = fnodes.setdefault(f, {})
|
|
|
|
fclnode = fclnodes.setdefault(n, clnode)
|
|
|
|
if clrevorder[clnode] < clrevorder[fclnode]:
|
|
|
|
fclnodes[n] = clnode
|
2013-05-11 01:14:54 +04:00
|
|
|
return clnode
|
2013-05-10 23:35:49 +04:00
|
|
|
|
2015-04-28 20:19:42 +03:00
|
|
|
mfnodes = self.prune(ml, mfs, commonrevs)
|
2014-08-15 21:43:32 +04:00
|
|
|
size = 0
|
2015-04-29 20:30:58 +03:00
|
|
|
for chunk in self.group(mfnodes, ml, lookupmf, units=_('manifests')):
|
2014-08-15 21:43:32 +04:00
|
|
|
size += len(chunk)
|
2013-05-10 23:35:49 +04:00
|
|
|
yield chunk
|
2014-08-15 21:43:32 +04:00
|
|
|
self._verbosenote(_('%8.i (manifests)\n') % size)
|
2013-05-10 23:35:49 +04:00
|
|
|
|
2013-05-11 00:57:54 +04:00
|
|
|
mfs.clear()
|
2015-04-21 00:11:20 +03:00
|
|
|
clrevs = set(cl.rev(x) for x in clnodes)
|
2013-05-11 00:57:54 +04:00
|
|
|
|
2013-06-26 00:23:12 +04:00
|
|
|
def linknodes(filerevlog, fname):
|
2015-04-29 20:34:28 +03:00
|
|
|
if fastpathlinkrev:
|
2014-04-04 05:29:03 +04:00
|
|
|
llr = filerevlog.linkrev
|
2013-05-11 00:57:54 +04:00
|
|
|
def genfilenodes():
|
|
|
|
for r in filerevlog:
|
|
|
|
linkrev = llr(r)
|
2015-04-21 00:11:20 +03:00
|
|
|
if linkrev in clrevs:
|
2013-05-11 00:57:54 +04:00
|
|
|
yield filerevlog.node(r), cl.node(linkrev)
|
2014-11-07 09:33:48 +03:00
|
|
|
return dict(genfilenodes())
|
2013-06-26 00:23:12 +04:00
|
|
|
return fnodes.get(fname, {})
|
|
|
|
|
|
|
|
for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
|
|
|
|
source):
|
|
|
|
yield chunk
|
2013-05-11 01:14:54 +04:00
|
|
|
|
2013-06-26 00:23:12 +04:00
|
|
|
yield self.close()
|
|
|
|
|
|
|
|
if clnodes:
|
|
|
|
repo.hook('outgoing', node=hex(clnodes[0]), source=source)
|
|
|
|
|
2015-04-28 23:49:19 +03:00
|
|
|
# The 'source' parameter is useful for extensions
|
2013-06-26 00:23:12 +04:00
|
|
|
def generatefiles(self, changedfiles, linknodes, commonrevs, source):
|
|
|
|
repo = self._repo
|
|
|
|
progress = self._progress
|
|
|
|
msgbundling = _('bundling')
|
|
|
|
|
|
|
|
total = len(changedfiles)
|
|
|
|
# for progress output
|
|
|
|
msgfiles = _('files')
|
|
|
|
for i, fname in enumerate(sorted(changedfiles)):
|
|
|
|
filerevlog = repo.file(fname)
|
|
|
|
if not filerevlog:
|
|
|
|
raise util.Abort(_("empty or missing revlog for %s") % fname)
|
|
|
|
|
|
|
|
linkrevnodes = linknodes(filerevlog, fname)
|
2013-05-11 01:14:54 +04:00
|
|
|
# Lookup for filenodes, we collected the linkrev nodes above in the
|
|
|
|
# fastpath case and with lookupmf in the slowpath case.
|
|
|
|
def lookupfilelog(x):
|
|
|
|
return linkrevnodes[x]
|
|
|
|
|
2015-04-28 23:40:00 +03:00
|
|
|
filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
|
2013-05-11 00:57:54 +04:00
|
|
|
if filenodes:
|
2013-05-11 01:48:03 +04:00
|
|
|
progress(msgbundling, i + 1, item=fname, unit=msgfiles,
|
|
|
|
total=total)
|
2014-08-15 21:43:32 +04:00
|
|
|
h = self.fileheader(fname)
|
|
|
|
size = len(h)
|
|
|
|
yield h
|
2015-04-29 20:30:58 +03:00
|
|
|
for chunk in self.group(filenodes, filerevlog, lookupfilelog):
|
2014-08-15 21:43:32 +04:00
|
|
|
size += len(chunk)
|
2013-05-10 23:35:49 +04:00
|
|
|
yield chunk
|
2014-08-15 21:43:32 +04:00
|
|
|
self._verbosenote(_('%8.i %s\n') % (size, fname))
|
2015-04-23 01:03:09 +03:00
|
|
|
progress(msgbundling, None)
|
2013-05-10 23:03:01 +04:00
|
|
|
|
2014-10-17 16:41:11 +04:00
|
|
|
def deltaparent(self, revlog, rev, p1, p2, prev):
|
|
|
|
return prev
|
|
|
|
|
2013-05-11 01:14:54 +04:00
|
|
|
def revchunk(self, revlog, rev, prev, linknode):
|
2011-04-30 13:03:28 +04:00
|
|
|
node = revlog.node(rev)
|
|
|
|
p1, p2 = revlog.parentrevs(rev)
|
2014-10-17 16:41:11 +04:00
|
|
|
base = self.deltaparent(revlog, rev, p1, p2, prev)
|
2011-04-30 13:03:28 +04:00
|
|
|
|
|
|
|
prefix = ''
|
2015-01-22 06:09:32 +03:00
|
|
|
if revlog.iscensored(base) or revlog.iscensored(rev):
|
|
|
|
try:
|
|
|
|
delta = revlog.revision(node)
|
2015-06-24 08:20:08 +03:00
|
|
|
except error.CensoredNodeError as e:
|
2015-01-22 06:09:32 +03:00
|
|
|
delta = e.tombstone
|
|
|
|
if base == nullrev:
|
|
|
|
prefix = mdiff.trivialdiffheader(len(delta))
|
|
|
|
else:
|
|
|
|
baselen = revlog.rawsize(base)
|
|
|
|
prefix = mdiff.replacediffheader(baselen, len(delta))
|
|
|
|
elif base == nullrev:
|
2011-04-30 13:03:28 +04:00
|
|
|
delta = revlog.revision(node)
|
|
|
|
prefix = mdiff.trivialdiffheader(len(delta))
|
|
|
|
else:
|
|
|
|
delta = revlog.revdiff(base, rev)
|
|
|
|
p1n, p2n = revlog.parents(node)
|
|
|
|
basenode = revlog.node(base)
|
|
|
|
meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
|
|
|
|
meta += prefix
|
|
|
|
l = len(meta) + len(delta)
|
2011-04-01 00:24:06 +04:00
|
|
|
yield chunkheader(l)
|
|
|
|
yield meta
|
2011-04-30 13:03:28 +04:00
|
|
|
yield delta
|
|
|
|
def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
|
|
|
|
# do nothing with basenode, it is implicitly the previous one in HG10
|
|
|
|
return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
|
2014-04-02 00:59:55 +04:00
|
|
|
|
2014-10-17 16:41:11 +04:00
|
|
|
class cg2packer(cg1packer):
|
2015-01-16 02:39:16 +03:00
|
|
|
version = '02'
|
2014-10-17 16:41:11 +04:00
|
|
|
deltaheader = _CHANGEGROUPV2_DELTA_HEADER
|
|
|
|
|
2015-04-29 20:38:45 +03:00
|
|
|
def __init__(self, repo, bundlecaps=None):
|
|
|
|
super(cg2packer, self).__init__(repo, bundlecaps)
|
|
|
|
if self._reorder is None:
|
|
|
|
# Since generaldelta is directly supported by cg2, reordering
|
|
|
|
# generally doesn't help, so we disable it by default (treating
|
|
|
|
# bundle.reorder=auto just like bundle.reorder=False).
|
|
|
|
self._reorder = False
|
2014-10-17 16:41:11 +04:00
|
|
|
|
|
|
|
def deltaparent(self, revlog, rev, p1, p2, prev):
|
|
|
|
dp = revlog.deltaparent(rev)
|
|
|
|
# avoid storing full revisions; pick prev in those cases
|
|
|
|
# also pick prev when we can't be sure remote has dp
|
|
|
|
if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
|
|
|
|
return prev
|
|
|
|
return dp
|
|
|
|
|
|
|
|
def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
|
|
|
|
return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
|
|
|
|
|
|
|
|
packermap = {'01': (cg1packer, cg1unpacker),
|
|
|
|
'02': (cg2packer, cg2unpacker)}
|
2014-09-25 08:24:06 +04:00
|
|
|
|
2014-04-02 01:13:34 +04:00
|
|
|
def _changegroupinfo(repo, nodes, source):
|
|
|
|
if repo.ui.verbose or source == 'bundle':
|
|
|
|
repo.ui.status(_("%d changesets found\n") % len(nodes))
|
|
|
|
if repo.ui.debugflag:
|
|
|
|
repo.ui.debug("list of changesets:\n")
|
|
|
|
for node in nodes:
|
|
|
|
repo.ui.debug("%s\n" % hex(node))
|
|
|
|
|
2014-10-17 16:41:02 +04:00
|
|
|
def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
|
2014-04-02 00:59:55 +04:00
|
|
|
repo = repo.unfiltered()
|
|
|
|
commonrevs = outgoing.common
|
|
|
|
csets = outgoing.missing
|
|
|
|
heads = outgoing.missingheads
|
|
|
|
# We go through the fast path if we get told to, or if all (unfiltered
|
|
|
|
# heads have been requested (since we then know there all linkrevs will
|
|
|
|
# be pulled by the client).
|
|
|
|
heads.sort()
|
|
|
|
fastpathlinkrev = fastpath or (
|
|
|
|
repo.filtername is None and heads == sorted(repo.heads()))
|
|
|
|
|
|
|
|
repo.hook('preoutgoing', throw=True, source=source)
|
2014-04-02 01:13:34 +04:00
|
|
|
_changegroupinfo(repo, csets, source)
|
2014-10-17 16:41:02 +04:00
|
|
|
return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
|
|
|
|
|
2015-01-16 02:55:13 +03:00
|
|
|
def getsubset(repo, outgoing, bundler, source, fastpath=False, version='01'):
|
2014-10-17 16:41:02 +04:00
|
|
|
gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
|
2015-09-12 03:06:02 +03:00
|
|
|
return packermap[version][1](util.chunkbuffer(gengroup), None)
|
2014-04-02 01:25:03 +04:00
|
|
|
|
2015-01-16 02:55:13 +03:00
|
|
|
def changegroupsubset(repo, roots, heads, source, version='01'):
|
2014-04-02 01:25:03 +04:00
|
|
|
"""Compute a changegroup consisting of all the nodes that are
|
|
|
|
descendants of any of the roots and ancestors of any of the heads.
|
|
|
|
Return a chunkbuffer object whose read() method will return
|
|
|
|
successive changegroup chunks.
|
|
|
|
|
|
|
|
It is fairly complex as determining which filenodes and which
|
|
|
|
manifest nodes need to be included for the changeset to be complete
|
|
|
|
is non-trivial.
|
|
|
|
|
|
|
|
Another wrinkle is doing the reverse, figuring out which changeset in
|
|
|
|
the changegroup a particular filenode or manifestnode belongs to.
|
|
|
|
"""
|
|
|
|
cl = repo.changelog
|
|
|
|
if not roots:
|
|
|
|
roots = [nullid]
|
|
|
|
discbases = []
|
|
|
|
for n in roots:
|
|
|
|
discbases.extend([p for p in cl.parents(n) if p != nullid])
|
2015-06-29 21:20:09 +03:00
|
|
|
# TODO: remove call to nodesbetween.
|
|
|
|
csets, roots, heads = cl.nodesbetween(roots, heads)
|
|
|
|
included = set(csets)
|
|
|
|
discbases = [n for n in discbases if n not in included]
|
2014-04-02 01:25:03 +04:00
|
|
|
outgoing = discovery.outgoing(cl, discbases, heads)
|
2015-01-16 02:55:13 +03:00
|
|
|
bundler = packermap[version][0](repo)
|
|
|
|
return getsubset(repo, outgoing, bundler, source, version=version)
|
2014-04-02 01:25:03 +04:00
|
|
|
|
2014-10-17 16:41:21 +04:00
|
|
|
def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
|
|
|
|
version='01'):
|
2014-10-17 16:41:02 +04:00
|
|
|
"""Like getbundle, but taking a discovery.outgoing as an argument.
|
|
|
|
|
|
|
|
This is only implemented for local repos and reuses potentially
|
|
|
|
precomputed sets in outgoing. Returns a raw changegroup generator."""
|
|
|
|
if not outgoing.missing:
|
|
|
|
return None
|
2014-10-17 16:41:21 +04:00
|
|
|
bundler = packermap[version][0](repo, bundlecaps)
|
2014-10-17 16:41:02 +04:00
|
|
|
return getsubsetraw(repo, outgoing, bundler, source)
|
|
|
|
|
2014-09-02 14:11:36 +04:00
|
|
|
def getlocalchangegroup(repo, source, outgoing, bundlecaps=None):
|
2014-04-02 01:33:23 +04:00
|
|
|
"""Like getbundle, but taking a discovery.outgoing as an argument.
|
|
|
|
|
|
|
|
This is only implemented for local repos and reuses potentially
|
|
|
|
precomputed sets in outgoing."""
|
|
|
|
if not outgoing.missing:
|
|
|
|
return None
|
2014-09-02 14:11:36 +04:00
|
|
|
bundler = cg1packer(repo, bundlecaps)
|
2014-04-02 01:33:23 +04:00
|
|
|
return getsubset(repo, outgoing, bundler, source)
|
|
|
|
|
2015-06-03 05:58:06 +03:00
|
|
|
def computeoutgoing(repo, heads, common):
|
2014-05-08 04:22:34 +04:00
|
|
|
"""Computes which revs are outgoing given a set of common
|
|
|
|
and a set of heads.
|
2014-04-02 01:40:35 +04:00
|
|
|
|
2014-05-08 04:22:34 +04:00
|
|
|
This is a separate function so extensions can have access to
|
|
|
|
the logic.
|
2014-04-02 01:40:35 +04:00
|
|
|
|
2014-05-08 04:22:34 +04:00
|
|
|
Returns a discovery.outgoing object.
|
2014-04-02 01:40:35 +04:00
|
|
|
"""
|
|
|
|
cl = repo.changelog
|
|
|
|
if common:
|
|
|
|
hasnode = cl.hasnode
|
|
|
|
common = [n for n in common if hasnode(n)]
|
|
|
|
else:
|
|
|
|
common = [nullid]
|
|
|
|
if not heads:
|
|
|
|
heads = cl.heads()
|
2014-05-08 04:22:34 +04:00
|
|
|
return discovery.outgoing(cl, common, heads)
|
|
|
|
|
2014-09-02 14:11:36 +04:00
|
|
|
def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None):
|
2014-05-08 04:22:34 +04:00
|
|
|
"""Like changegroupsubset, but returns the set difference between the
|
|
|
|
ancestors of heads and the ancestors common.
|
|
|
|
|
|
|
|
If heads is None, use the local heads. If common is None, use [nullid].
|
|
|
|
|
|
|
|
The nodes in common might not all be known locally due to the way the
|
|
|
|
current discovery protocol works.
|
|
|
|
"""
|
2015-06-03 05:58:06 +03:00
|
|
|
outgoing = computeoutgoing(repo, heads, common)
|
2014-09-02 14:11:36 +04:00
|
|
|
return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps)
|
2014-04-02 01:40:35 +04:00
|
|
|
|
2014-04-02 02:08:27 +04:00
|
|
|
def changegroup(repo, basenodes, source):
|
|
|
|
# to avoid a race we use changegroupsubset() (issue1320)
|
|
|
|
return changegroupsubset(repo, basenodes, repo.heads(), source)
|
|
|
|
|
2014-04-02 02:21:56 +04:00
|
|
|
def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
|
|
|
|
revisions = 0
|
|
|
|
files = 0
|
|
|
|
while True:
|
|
|
|
chunkdata = source.filelogheader()
|
|
|
|
if not chunkdata:
|
|
|
|
break
|
|
|
|
f = chunkdata["filename"]
|
|
|
|
repo.ui.debug("adding %s revisions\n" % f)
|
|
|
|
pr()
|
|
|
|
fl = repo.file(f)
|
|
|
|
o = len(fl)
|
2015-02-06 03:55:29 +03:00
|
|
|
try:
|
|
|
|
if not fl.addgroup(source, revmap, trp):
|
|
|
|
raise util.Abort(_("received file revlog group is empty"))
|
2015-06-24 08:20:08 +03:00
|
|
|
except error.CensoredBaseError as e:
|
2015-02-06 03:55:29 +03:00
|
|
|
raise util.Abort(_("received delta base is censored: %s") % e)
|
2014-04-02 02:21:56 +04:00
|
|
|
revisions += len(fl) - o
|
|
|
|
files += 1
|
|
|
|
if f in needfiles:
|
|
|
|
needs = needfiles[f]
|
|
|
|
for new in xrange(o, len(fl)):
|
|
|
|
n = fl.node(new)
|
|
|
|
if n in needs:
|
|
|
|
needs.remove(n)
|
|
|
|
else:
|
|
|
|
raise util.Abort(
|
|
|
|
_("received spurious file revlog entry"))
|
|
|
|
if not needs:
|
|
|
|
del needfiles[f]
|
|
|
|
repo.ui.progress(_('files'), None)
|
|
|
|
|
|
|
|
for f, needs in needfiles.iteritems():
|
|
|
|
fl = repo.file(f)
|
|
|
|
for n in needs:
|
|
|
|
try:
|
|
|
|
fl.rev(n)
|
|
|
|
except error.LookupError:
|
|
|
|
raise util.Abort(
|
|
|
|
_('missing file data for %s:%s - run hg verify') %
|
|
|
|
(f, hex(n)))
|
|
|
|
|
|
|
|
return revisions, files
|
2014-04-02 02:27:53 +04:00
|
|
|
|
2014-08-06 00:49:38 +04:00
|
|
|
def addchangegroup(repo, source, srctype, url, emptyok=False,
|
2015-06-08 01:57:40 +03:00
|
|
|
targetphase=phases.draft, expectedtotal=None):
|
2014-04-02 02:27:53 +04:00
|
|
|
"""Add the changegroup returned by source.read() to this repo.
|
|
|
|
srctype is a string like 'push', 'pull', or 'unbundle'. url is
|
|
|
|
the URL of the repo where this changegroup is coming from.
|
|
|
|
|
|
|
|
Return an integer summarizing the change to this repo:
|
|
|
|
- nothing changed or no source: 0
|
|
|
|
- more heads than before: 1+added heads (2..n)
|
|
|
|
- fewer heads than before: -1-removed heads (-2..-n)
|
|
|
|
- number of heads stays the same: 1
|
|
|
|
"""
|
|
|
|
repo = repo.unfiltered()
|
|
|
|
def csmap(x):
|
|
|
|
repo.ui.debug("add changeset %s\n" % short(x))
|
|
|
|
return len(cl)
|
|
|
|
|
|
|
|
def revmap(x):
|
|
|
|
return cl.rev(x)
|
|
|
|
|
|
|
|
if not source:
|
|
|
|
return 0
|
|
|
|
|
|
|
|
changesets = files = revisions = 0
|
|
|
|
|
|
|
|
tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
|
2014-10-14 11:06:46 +04:00
|
|
|
# The transaction could have been created before and already carries source
|
|
|
|
# information. In this case we use the top level data. We overwrite the
|
|
|
|
# argument because we need to use the top level value (if they exist) in
|
|
|
|
# this function.
|
|
|
|
srctype = tr.hookargs.setdefault('source', srctype)
|
|
|
|
url = tr.hookargs.setdefault('url', url)
|
2014-10-18 08:55:31 +04:00
|
|
|
|
|
|
|
# write changelog data to temp files so concurrent readers will not see
|
|
|
|
# inconsistent view
|
|
|
|
cl = repo.changelog
|
|
|
|
cl.delayupdate(tr)
|
|
|
|
oldheads = cl.heads()
|
2014-04-02 02:27:53 +04:00
|
|
|
try:
|
2014-10-14 11:06:46 +04:00
|
|
|
repo.hook('prechangegroup', throw=True, **tr.hookargs)
|
2014-10-14 11:09:25 +04:00
|
|
|
|
2014-04-02 02:27:53 +04:00
|
|
|
trp = weakref.proxy(tr)
|
|
|
|
# pull off the changeset group
|
|
|
|
repo.ui.status(_("adding changesets\n"))
|
|
|
|
clstart = len(cl)
|
|
|
|
class prog(object):
|
2015-06-12 21:00:50 +03:00
|
|
|
def __init__(self, step, total):
|
|
|
|
self._step = step
|
|
|
|
self._total = total
|
|
|
|
self._count = 1
|
2015-06-12 20:54:10 +03:00
|
|
|
def __call__(self):
|
2015-06-12 21:00:50 +03:00
|
|
|
repo.ui.progress(self._step, self._count, unit=_('chunks'),
|
|
|
|
total=self._total)
|
|
|
|
self._count += 1
|
|
|
|
source.callback = prog(_('changesets'), expectedtotal)
|
2014-04-02 02:27:53 +04:00
|
|
|
|
changegroup: compute seen files as changesets are added (issue4750)
Before this patch, addchangegroup() would walk the changelog and compute
the set of seen files between applying changesets and applying
manifests. When cloning large repositories such as mozilla-central,
this consumed a non-trivial amount of time. On my MBP, this walk takes
~10s. On a dainty EC2 instance, this was measured to take ~125s! On the
latter machine, this delay was enough for the Mercurial server to
disconnect the client, thinking it had timed out, thus causing a clone
to abort.
This patch enables the changelog to compute the set of changed files as
new revisions are added. By doing so, we:
* avoid a potentially heavy computation between changelog and manifest
processing by spreading the computation across all changelog additions
* avoid extra reads from the changelog by operating on the data as it is
added
The downside of this is that the add revision callback does result in
extra I/O. Before, we would perform a flush (and subsequent read to
construct the full revision) when new delta chains were created. For
changelogs, this is typically every 2-4 revisions. Using the callback
guarantees there will be a flush after every added revision *and* an
open + read of the changelog to obtain the full revision in order to
read the added files. So, this increases the frequency of these
operations by the average chain length. In the future, the revlog
should be smart enough to know how to read revisions that haven't been
flushed yet, thus eliminating this extra I/O.
On my MBP, the total CPU times for an `hg unbundle` with a local
mozilla-central gzip bundle containing 251,934 changesets and 211,065
files did not have a statistically significant change with this patch,
holding steady around 360s. So, the increased revlog flushing did not
have an effect.
With this patch, there is no longer a visible pause between applying
changeset and manifest data. Before, it sure felt like Mercurial was
lethargic making this transition. Now, the transition is nearly
instantaneous, giving the impression that Mercurial is faster. Of course,
eliminating this pause means that the potential for network disconnect due
to channel inactivity during the changelog walk is eliminated as well.
And that is the impetus behind this change.
2015-07-18 20:57:20 +03:00
|
|
|
efiles = set()
|
|
|
|
def onchangelog(cl, node):
|
|
|
|
efiles.update(cl.read(node)[3])
|
|
|
|
|
2014-04-02 02:27:53 +04:00
|
|
|
source.changelogheader()
|
changegroup: compute seen files as changesets are added (issue4750)
Before this patch, addchangegroup() would walk the changelog and compute
the set of seen files between applying changesets and applying
manifests. When cloning large repositories such as mozilla-central,
this consumed a non-trivial amount of time. On my MBP, this walk takes
~10s. On a dainty EC2 instance, this was measured to take ~125s! On the
latter machine, this delay was enough for the Mercurial server to
disconnect the client, thinking it had timed out, thus causing a clone
to abort.
This patch enables the changelog to compute the set of changed files as
new revisions are added. By doing so, we:
* avoid a potentially heavy computation between changelog and manifest
processing by spreading the computation across all changelog additions
* avoid extra reads from the changelog by operating on the data as it is
added
The downside of this is that the add revision callback does result in
extra I/O. Before, we would perform a flush (and subsequent read to
construct the full revision) when new delta chains were created. For
changelogs, this is typically every 2-4 revisions. Using the callback
guarantees there will be a flush after every added revision *and* an
open + read of the changelog to obtain the full revision in order to
read the added files. So, this increases the frequency of these
operations by the average chain length. In the future, the revlog
should be smart enough to know how to read revisions that haven't been
flushed yet, thus eliminating this extra I/O.
On my MBP, the total CPU times for an `hg unbundle` with a local
mozilla-central gzip bundle containing 251,934 changesets and 211,065
files did not have a statistically significant change with this patch,
holding steady around 360s. So, the increased revlog flushing did not
have an effect.
With this patch, there is no longer a visible pause between applying
changeset and manifest data. Before, it sure felt like Mercurial was
lethargic making this transition. Now, the transition is nearly
instantaneous, giving the impression that Mercurial is faster. Of course,
eliminating this pause means that the potential for network disconnect due
to channel inactivity during the changelog walk is eliminated as well.
And that is the impetus behind this change.
2015-07-18 20:57:20 +03:00
|
|
|
srccontent = cl.addgroup(source, csmap, trp,
|
|
|
|
addrevisioncb=onchangelog)
|
|
|
|
efiles = len(efiles)
|
|
|
|
|
2014-04-02 02:27:53 +04:00
|
|
|
if not (srccontent or emptyok):
|
|
|
|
raise util.Abort(_("received changelog group is empty"))
|
|
|
|
clend = len(cl)
|
|
|
|
changesets = clend - clstart
|
|
|
|
repo.ui.progress(_('changesets'), None)
|
|
|
|
|
|
|
|
# pull off the manifest group
|
|
|
|
repo.ui.status(_("adding manifests\n"))
|
2015-06-12 21:00:50 +03:00
|
|
|
# manifests <= changesets
|
|
|
|
source.callback = prog(_('manifests'), changesets)
|
2014-04-02 02:27:53 +04:00
|
|
|
# no need to check for empty manifest group here:
|
|
|
|
# if the result of the merge of 1 and 2 is the same in 3 and 4,
|
|
|
|
# no new manifest will be created and the manifest group will
|
|
|
|
# be empty during the pull
|
|
|
|
source.manifestheader()
|
|
|
|
repo.manifest.addgroup(source, revmap, trp)
|
|
|
|
repo.ui.progress(_('manifests'), None)
|
|
|
|
|
|
|
|
needfiles = {}
|
|
|
|
if repo.ui.configbool('server', 'validate', default=False):
|
|
|
|
# validate incoming csets have their manifests
|
|
|
|
for cset in xrange(clstart, clend):
|
2015-04-28 20:21:04 +03:00
|
|
|
mfnode = repo.changelog.read(repo.changelog.node(cset))[0]
|
|
|
|
mfest = repo.manifest.readdelta(mfnode)
|
2014-04-02 02:27:53 +04:00
|
|
|
# store file nodes we must see
|
|
|
|
for f, n in mfest.iteritems():
|
|
|
|
needfiles.setdefault(f, set()).add(n)
|
|
|
|
|
|
|
|
# process the files
|
|
|
|
repo.ui.status(_("adding file changes\n"))
|
|
|
|
source.callback = None
|
2015-06-12 21:00:50 +03:00
|
|
|
pr = prog(_('files'), efiles)
|
2014-04-02 02:27:53 +04:00
|
|
|
newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
|
|
|
|
needfiles)
|
|
|
|
revisions += newrevs
|
|
|
|
files += newfiles
|
|
|
|
|
|
|
|
dh = 0
|
|
|
|
if oldheads:
|
|
|
|
heads = cl.heads()
|
|
|
|
dh = len(heads) - len(oldheads)
|
|
|
|
for h in heads:
|
|
|
|
if h not in oldheads and repo[h].closesbranch():
|
|
|
|
dh -= 1
|
|
|
|
htext = ""
|
|
|
|
if dh:
|
|
|
|
htext = _(" (%+d heads)") % dh
|
|
|
|
|
|
|
|
repo.ui.status(_("added %d changesets"
|
|
|
|
" with %d changes to %d files%s\n")
|
|
|
|
% (changesets, revisions, files, htext))
|
|
|
|
repo.invalidatevolatilesets()
|
|
|
|
|
|
|
|
if changesets > 0:
|
2014-10-18 08:55:31 +04:00
|
|
|
p = lambda: tr.writepending() and repo.root or ""
|
2014-04-18 01:09:20 +04:00
|
|
|
if 'node' not in tr.hookargs:
|
|
|
|
tr.hookargs['node'] = hex(cl.node(clstart))
|
2014-10-16 10:54:53 +04:00
|
|
|
hookargs = dict(tr.hookargs)
|
|
|
|
else:
|
|
|
|
hookargs = dict(tr.hookargs)
|
|
|
|
hookargs['node'] = hex(cl.node(clstart))
|
2014-10-14 11:06:46 +04:00
|
|
|
repo.hook('pretxnchangegroup', throw=True, pending=p, **hookargs)
|
2014-04-02 02:27:53 +04:00
|
|
|
|
|
|
|
added = [cl.node(r) for r in xrange(clstart, clend)]
|
2015-06-18 23:34:22 +03:00
|
|
|
publishing = repo.publishing()
|
2014-04-08 05:10:50 +04:00
|
|
|
if srctype in ('push', 'serve'):
|
2014-04-02 02:27:53 +04:00
|
|
|
# Old servers can not push the boundary themselves.
|
|
|
|
# New servers won't push the boundary if changeset already
|
|
|
|
# exists locally as secret
|
|
|
|
#
|
|
|
|
# We should not use added here but the list of all change in
|
|
|
|
# the bundle
|
|
|
|
if publishing:
|
2014-08-06 12:54:19 +04:00
|
|
|
phases.advanceboundary(repo, tr, phases.public, srccontent)
|
2014-04-02 02:27:53 +04:00
|
|
|
else:
|
2014-08-06 00:49:38 +04:00
|
|
|
# Those changesets have been pushed from the outside, their
|
|
|
|
# phases are going to be pushed alongside. Therefor
|
|
|
|
# `targetphase` is ignored.
|
2014-08-06 12:54:19 +04:00
|
|
|
phases.advanceboundary(repo, tr, phases.draft, srccontent)
|
2014-08-06 10:52:21 +04:00
|
|
|
phases.retractboundary(repo, tr, phases.draft, added)
|
2014-04-02 02:27:53 +04:00
|
|
|
elif srctype != 'strip':
|
|
|
|
# publishing only alter behavior during push
|
|
|
|
#
|
|
|
|
# strip should not touch boundary at all
|
2014-08-06 10:52:21 +04:00
|
|
|
phases.retractboundary(repo, tr, targetphase, added)
|
2014-04-02 02:27:53 +04:00
|
|
|
|
|
|
|
if changesets > 0:
|
|
|
|
if srctype != 'strip':
|
|
|
|
# During strip, branchcache is invalid but coming call to
|
|
|
|
# `destroyed` will repair it.
|
|
|
|
# In other case we can safely update cache on disk.
|
|
|
|
branchmap.updatecache(repo.filtered('served'))
|
2014-10-16 10:54:53 +04:00
|
|
|
|
2014-04-02 02:27:53 +04:00
|
|
|
def runhooks():
|
|
|
|
# These hooks run when the lock releases, not when the
|
|
|
|
# transaction closes. So it's possible for the changelog
|
|
|
|
# to have changed since we last saw it.
|
|
|
|
if clstart >= len(repo):
|
|
|
|
return
|
|
|
|
|
|
|
|
# forcefully update the on-disk branch cache
|
|
|
|
repo.ui.debug("updating the branch cache\n")
|
2014-10-14 11:06:46 +04:00
|
|
|
repo.hook("changegroup", **hookargs)
|
2014-04-02 02:27:53 +04:00
|
|
|
|
|
|
|
for n in added:
|
2014-10-14 11:03:03 +04:00
|
|
|
args = hookargs.copy()
|
|
|
|
args['node'] = hex(n)
|
2014-10-14 11:06:46 +04:00
|
|
|
repo.hook("incoming", **args)
|
2014-04-02 02:27:53 +04:00
|
|
|
|
|
|
|
newheads = [h for h in repo.heads() if h not in oldheads]
|
|
|
|
repo.ui.log("incoming",
|
|
|
|
"%s incoming changes - new heads: %s\n",
|
|
|
|
len(added),
|
|
|
|
', '.join([hex(c[:6]) for c in newheads]))
|
2014-10-28 17:44:23 +03:00
|
|
|
|
|
|
|
tr.addpostclose('changegroup-runhooks-%020i' % clstart,
|
2014-11-08 19:35:15 +03:00
|
|
|
lambda tr: repo._afterlock(runhooks))
|
2014-10-28 17:44:23 +03:00
|
|
|
|
|
|
|
tr.close()
|
2014-04-02 02:27:53 +04:00
|
|
|
|
|
|
|
finally:
|
|
|
|
tr.release()
|
2015-04-11 06:34:06 +03:00
|
|
|
repo.ui.flush()
|
2014-04-02 02:27:53 +04:00
|
|
|
# never return 0 here:
|
|
|
|
if dh < 0:
|
|
|
|
return dh - 1
|
|
|
|
else:
|
|
|
|
return dh + 1
|