2009-04-26 03:13:08 +04:00
|
|
|
# bundlerepo.py - repository class for viewing uncompressed bundles
|
|
|
|
#
|
|
|
|
# Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
|
|
|
|
#
|
|
|
|
# This software may be used and distributed according to the terms of the
|
2010-01-20 07:20:08 +03:00
|
|
|
# GNU General Public License version 2 or any later version.
|
2006-03-13 05:54:23 +03:00
|
|
|
|
2009-04-26 03:24:49 +04:00
|
|
|
"""Repository class for viewing uncompressed bundles.
|
|
|
|
|
|
|
|
This provides a read-only repository interface to bundles as if they
|
|
|
|
were part of the actual repository.
|
|
|
|
"""
|
|
|
|
|
2015-08-08 10:36:35 +03:00
|
|
|
from __future__ import absolute_import
|
|
|
|
|
|
|
|
import os
|
|
|
|
import shutil
|
|
|
|
import tempfile
|
|
|
|
|
|
|
|
from .i18n import _
|
|
|
|
from .node import nullid
|
|
|
|
|
|
|
|
from . import (
|
|
|
|
bundle2,
|
|
|
|
changegroup,
|
|
|
|
changelog,
|
|
|
|
cmdutil,
|
|
|
|
discovery,
|
|
|
|
error,
|
|
|
|
exchange,
|
|
|
|
filelog,
|
|
|
|
localrepo,
|
|
|
|
manifest,
|
|
|
|
mdiff,
|
2016-03-23 10:55:22 +03:00
|
|
|
node as nodemod,
|
2015-08-08 10:36:35 +03:00
|
|
|
pathutil,
|
|
|
|
phases,
|
2016-11-22 21:33:11 +03:00
|
|
|
pycompat,
|
2015-08-08 10:36:35 +03:00
|
|
|
revlog,
|
|
|
|
util,
|
2017-03-02 16:47:03 +03:00
|
|
|
vfs as vfsmod,
|
2015-08-08 10:36:35 +03:00
|
|
|
)
|
2006-03-13 05:54:23 +03:00
|
|
|
|
2006-03-13 08:58:31 +03:00
|
|
|
class bundlerevlog(revlog.revlog):
|
2011-04-30 12:41:06 +04:00
|
|
|
def __init__(self, opener, indexfile, bundle, linkmapper):
|
2006-03-13 05:54:23 +03:00
|
|
|
# How it works:
|
2013-01-16 23:41:32 +04:00
|
|
|
# To retrieve a revision, we need to know the offset of the revision in
|
|
|
|
# the bundle (an unbundle object). We store this offset in the index
|
2013-02-09 02:26:00 +04:00
|
|
|
# (start). The base of the delta is stored in the base field.
|
2006-03-13 05:54:23 +03:00
|
|
|
#
|
2013-01-16 23:41:32 +04:00
|
|
|
# To differentiate a rev in the bundle from a rev in the revlog, we
|
2013-02-09 02:26:00 +04:00
|
|
|
# check revision against repotiprev.
|
2017-03-02 16:47:03 +03:00
|
|
|
opener = vfsmod.readonlyvfs(opener)
|
2007-03-23 03:12:03 +03:00
|
|
|
revlog.revlog.__init__(self, opener, indexfile)
|
2010-09-18 04:24:29 +04:00
|
|
|
self.bundle = bundle
|
2008-06-26 23:35:50 +04:00
|
|
|
n = len(self)
|
2013-02-09 02:26:00 +04:00
|
|
|
self.repotiprev = n - 1
|
2013-01-16 23:41:34 +04:00
|
|
|
self.bundlerevs = set() # used by 'bundle()' revset expression
|
2017-09-20 19:39:03 +03:00
|
|
|
for deltadata in bundle.deltaiter():
|
|
|
|
node, p1, p2, cs, deltabase, delta, flags = deltadata
|
2011-04-30 12:41:06 +04:00
|
|
|
|
|
|
|
size = len(delta)
|
|
|
|
start = bundle.tell() - size
|
|
|
|
|
|
|
|
link = linkmapper(cs)
|
2006-03-13 05:54:23 +03:00
|
|
|
if node in self.nodemap:
|
2011-04-30 12:41:06 +04:00
|
|
|
# this can happen if two branches make the same change
|
2013-01-16 23:41:34 +04:00
|
|
|
self.bundlerevs.add(self.nodemap[node])
|
2006-03-13 05:54:23 +03:00
|
|
|
continue
|
2011-04-30 12:41:06 +04:00
|
|
|
|
2006-03-13 05:54:23 +03:00
|
|
|
for p in (p1, p2):
|
2012-05-12 18:00:57 +04:00
|
|
|
if p not in self.nodemap:
|
2009-10-27 12:33:41 +03:00
|
|
|
raise error.LookupError(p, self.indexfile,
|
2009-01-12 07:48:28 +03:00
|
|
|
_("unknown parent"))
|
2013-01-16 23:41:41 +04:00
|
|
|
|
|
|
|
if deltabase not in self.nodemap:
|
|
|
|
raise LookupError(deltabase, self.indexfile,
|
|
|
|
_('unknown delta base'))
|
|
|
|
|
|
|
|
baserev = self.rev(deltabase)
|
2007-08-14 20:25:27 +04:00
|
|
|
# start, size, full unc. size, base (unused), link, p1, p2, node
|
2017-04-07 04:06:42 +03:00
|
|
|
e = (revlog.offset_type(start, flags), size, -1, baserev, link,
|
2007-07-24 05:44:08 +04:00
|
|
|
self.rev(p1), self.rev(p2), node)
|
|
|
|
self.index.insert(-1, e)
|
2006-03-13 05:54:23 +03:00
|
|
|
self.nodemap[node] = n
|
2013-01-16 23:41:34 +04:00
|
|
|
self.bundlerevs.add(n)
|
2006-03-13 05:54:23 +03:00
|
|
|
n += 1
|
|
|
|
|
2009-10-31 18:42:51 +03:00
|
|
|
def _chunk(self, rev):
|
2013-02-09 02:26:00 +04:00
|
|
|
# Warning: in case of bundle, the diff is against what we stored as
|
|
|
|
# delta base, not against rev - 1
|
2006-03-13 05:54:23 +03:00
|
|
|
# XXX: could use some caching
|
2013-02-09 02:26:00 +04:00
|
|
|
if rev <= self.repotiprev:
|
2009-10-31 18:42:51 +03:00
|
|
|
return revlog.revlog._chunk(self, rev)
|
2010-09-18 04:24:29 +04:00
|
|
|
self.bundle.seek(self.start(rev))
|
|
|
|
return self.bundle.read(self.length(rev))
|
2006-03-13 05:54:23 +03:00
|
|
|
|
|
|
|
def revdiff(self, rev1, rev2):
|
|
|
|
"""return or calculate a delta between two revisions"""
|
2013-02-09 02:26:00 +04:00
|
|
|
if rev1 > self.repotiprev and rev2 > self.repotiprev:
|
2006-03-13 05:54:23 +03:00
|
|
|
# hot path for bundle
|
2013-02-09 02:26:00 +04:00
|
|
|
revb = self.index[rev2][3]
|
2006-03-13 05:54:23 +03:00
|
|
|
if revb == rev1:
|
2009-10-31 18:42:51 +03:00
|
|
|
return self._chunk(rev2)
|
2013-02-09 02:26:00 +04:00
|
|
|
elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
|
2007-01-15 19:56:20 +03:00
|
|
|
return revlog.revlog.revdiff(self, rev1, rev2)
|
2006-03-13 05:54:23 +03:00
|
|
|
|
2017-04-03 19:31:39 +03:00
|
|
|
return mdiff.textdiff(self.revision(rev1, raw=True),
|
|
|
|
self.revision(rev2, raw=True))
|
2006-03-13 05:54:23 +03:00
|
|
|
|
2017-01-05 20:16:07 +03:00
|
|
|
def revision(self, nodeorrev, raw=False):
|
2012-04-13 12:14:59 +04:00
|
|
|
"""return an uncompressed revision of a given node or revision
|
|
|
|
number.
|
|
|
|
"""
|
2012-04-08 21:38:02 +04:00
|
|
|
if isinstance(nodeorrev, int):
|
|
|
|
rev = nodeorrev
|
|
|
|
node = self.node(rev)
|
|
|
|
else:
|
|
|
|
node = nodeorrev
|
|
|
|
rev = self.rev(node)
|
|
|
|
|
2010-01-25 09:05:27 +03:00
|
|
|
if node == nullid:
|
|
|
|
return ""
|
2006-03-13 05:54:23 +03:00
|
|
|
|
2017-04-07 03:45:47 +03:00
|
|
|
rawtext = None
|
2006-03-13 05:54:23 +03:00
|
|
|
chain = []
|
2013-01-16 23:41:41 +04:00
|
|
|
iterrev = rev
|
2006-03-13 05:54:23 +03:00
|
|
|
# reconstruct the revision if it is from a changegroup
|
2013-02-09 02:26:00 +04:00
|
|
|
while iterrev > self.repotiprev:
|
2013-01-16 23:41:41 +04:00
|
|
|
if self._cache and self._cache[1] == iterrev:
|
2017-04-07 03:45:47 +03:00
|
|
|
rawtext = self._cache[2]
|
2006-03-13 05:54:23 +03:00
|
|
|
break
|
2013-01-16 23:41:41 +04:00
|
|
|
chain.append(iterrev)
|
2013-02-09 02:26:00 +04:00
|
|
|
iterrev = self.index[iterrev][3]
|
2017-04-07 03:45:47 +03:00
|
|
|
if rawtext is None:
|
|
|
|
rawtext = self.baserevision(iterrev)
|
2006-03-13 05:54:23 +03:00
|
|
|
|
|
|
|
while chain:
|
2009-10-31 18:42:51 +03:00
|
|
|
delta = self._chunk(chain.pop())
|
2017-04-07 03:45:47 +03:00
|
|
|
rawtext = mdiff.patches(rawtext, [delta])
|
2006-03-13 05:54:23 +03:00
|
|
|
|
2017-04-07 03:45:47 +03:00
|
|
|
text, validatehash = self._processflags(rawtext, self.flags(rev),
|
revlog: flag processor
Add the ability for revlog objects to process revision flags and apply
registered transforms on read/write operations.
This patch introduces:
- the 'revlog._processflags()' method that looks at revision flags and applies
flag processors registered on them. Due to the need to handle non-commutative
operations, flag transforms are applied in stable order but the order in which
the transforms are applied is reversed between read and write operations.
- the 'addflagprocessor()' method allowing to register processors on flags.
Flag processors are defined as a 3-tuple of (read, write, raw) functions to be
applied depending on the operation being performed.
- an update on 'revlog.addrevision()' behavior. The current flagprocessor design
relies on extensions to wrap around 'addrevision()' to set flags on revision
data, and on the flagprocessor to perform the actual transformation of its
contents. In the lfs case, this means we need to process flags before we meet
the 2GB size check, leading to performing some operations before it happens:
- if flags are set on the revision data, we assume some extensions might be
modifying the contents using the flag processor next, and we compute the
node for the original revision data (still allowing extension to override
the node by wrapping around 'addrevision()').
- we then invoke the flag processor to apply registered transforms (in lfs's
case, drastically reducing the size of large blobs).
- finally, we proceed with the 2GB size check.
Note: In the case a cachedelta is passed to 'addrevision()' and we detect the
flag processor modified the revision data, we chose to trust the flag processor
and drop the cachedelta.
2017-01-10 19:15:21 +03:00
|
|
|
'read', raw=raw)
|
|
|
|
if validatehash:
|
|
|
|
self.checkhash(text, node, rev=rev)
|
2017-04-07 03:45:47 +03:00
|
|
|
self._cache = (node, rev, rawtext)
|
2006-03-13 05:54:23 +03:00
|
|
|
return text
|
|
|
|
|
2013-08-27 03:50:31 +04:00
|
|
|
def baserevision(self, nodeorrev):
|
|
|
|
# Revlog subclasses may override 'revision' method to modify format of
|
|
|
|
# content retrieved from revlog. To use bundlerevlog with such class one
|
|
|
|
# needs to override 'baserevision' and make more specific call here.
|
2017-04-07 03:43:29 +03:00
|
|
|
return revlog.revlog.revision(self, nodeorrev, raw=True)
|
2013-08-27 03:50:31 +04:00
|
|
|
|
2006-03-13 05:54:23 +03:00
|
|
|
def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
|
|
|
|
raise NotImplementedError
|
2017-09-15 17:58:45 +03:00
|
|
|
def addgroup(self, deltas, transaction, addrevisioncb=None):
|
2006-03-13 05:54:23 +03:00
|
|
|
raise NotImplementedError
|
|
|
|
def strip(self, rev, minlink):
|
|
|
|
raise NotImplementedError
|
|
|
|
def checksize(self):
|
|
|
|
raise NotImplementedError
|
|
|
|
|
2006-03-13 08:58:31 +03:00
|
|
|
class bundlechangelog(bundlerevlog, changelog.changelog):
|
2010-09-18 04:24:29 +04:00
|
|
|
def __init__(self, opener, bundle):
|
2006-03-13 08:58:31 +03:00
|
|
|
changelog.changelog.__init__(self, opener)
|
2011-04-30 12:41:06 +04:00
|
|
|
linkmapper = lambda x: x
|
|
|
|
bundlerevlog.__init__(self, opener, self.indexfile, bundle,
|
|
|
|
linkmapper)
|
2006-03-13 05:54:23 +03:00
|
|
|
|
2013-08-27 03:50:31 +04:00
|
|
|
def baserevision(self, nodeorrev):
|
|
|
|
# Although changelog doesn't override 'revision' method, some extensions
|
|
|
|
# may replace this class with another that does. Same story with
|
|
|
|
# manifest and filelog classes.
|
bundlerepo: disable filtering of changelog while constructing revision text
This avoids the following error that happened if base revision of bundle file
was hidden. bundlerevlog needs it to construct revision texts from bundle
content as revlog.revision() does.
File "mercurial/context.py", line 485, in _changeset
return self._repo.changelog.read(self.rev())
File "mercurial/changelog.py", line 319, in read
text = self.revision(node)
File "mercurial/bundlerepo.py", line 124, in revision
text = self.baserevision(iterrev)
File "mercurial/bundlerepo.py", line 160, in baserevision
return changelog.changelog.revision(self, nodeorrev)
File "mercurial/revlog.py", line 1041, in revision
node = self.node(rev)
File "mercurial/changelog.py", line 211, in node
raise error.FilteredIndexError(rev)
mercurial.error.FilteredIndexError: 1
2015-04-29 13:47:37 +03:00
|
|
|
|
|
|
|
# This bypasses filtering on changelog.node() and rev() because we need
|
|
|
|
# revision text of the bundle base even if it is hidden.
|
|
|
|
oldfilter = self.filteredrevs
|
|
|
|
try:
|
|
|
|
self.filteredrevs = ()
|
2017-04-07 03:43:29 +03:00
|
|
|
return changelog.changelog.revision(self, nodeorrev, raw=True)
|
bundlerepo: disable filtering of changelog while constructing revision text
This avoids the following error that happened if base revision of bundle file
was hidden. bundlerevlog needs it to construct revision texts from bundle
content as revlog.revision() does.
File "mercurial/context.py", line 485, in _changeset
return self._repo.changelog.read(self.rev())
File "mercurial/changelog.py", line 319, in read
text = self.revision(node)
File "mercurial/bundlerepo.py", line 124, in revision
text = self.baserevision(iterrev)
File "mercurial/bundlerepo.py", line 160, in baserevision
return changelog.changelog.revision(self, nodeorrev)
File "mercurial/revlog.py", line 1041, in revision
node = self.node(rev)
File "mercurial/changelog.py", line 211, in node
raise error.FilteredIndexError(rev)
mercurial.error.FilteredIndexError: 1
2015-04-29 13:47:37 +03:00
|
|
|
finally:
|
|
|
|
self.filteredrevs = oldfilter
|
2013-08-27 03:50:31 +04:00
|
|
|
|
2016-11-11 12:15:59 +03:00
|
|
|
class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
|
2016-08-05 20:08:11 +03:00
|
|
|
def __init__(self, opener, bundle, linkmapper, dirlogstarts=None, dir=''):
|
2016-11-11 12:15:59 +03:00
|
|
|
manifest.manifestrevlog.__init__(self, opener, dir=dir)
|
2010-09-18 04:24:29 +04:00
|
|
|
bundlerevlog.__init__(self, opener, self.indexfile, bundle,
|
2007-03-23 03:12:03 +03:00
|
|
|
linkmapper)
|
2016-08-05 20:08:11 +03:00
|
|
|
if dirlogstarts is None:
|
|
|
|
dirlogstarts = {}
|
|
|
|
if self.bundle.version == "03":
|
|
|
|
dirlogstarts = _getfilestarts(self.bundle)
|
|
|
|
self._dirlogstarts = dirlogstarts
|
|
|
|
self._linkmapper = linkmapper
|
2006-03-13 05:54:23 +03:00
|
|
|
|
2013-08-27 03:50:31 +04:00
|
|
|
def baserevision(self, nodeorrev):
|
2015-09-28 20:27:36 +03:00
|
|
|
node = nodeorrev
|
|
|
|
if isinstance(node, int):
|
|
|
|
node = self.node(node)
|
|
|
|
|
2016-09-13 00:29:09 +03:00
|
|
|
if node in self.fulltextcache:
|
2017-03-12 10:32:21 +03:00
|
|
|
result = '%s' % self.fulltextcache[node]
|
2015-09-28 20:27:36 +03:00
|
|
|
else:
|
2017-04-07 03:43:29 +03:00
|
|
|
result = manifest.manifestrevlog.revision(self, nodeorrev, raw=True)
|
2015-09-28 20:27:36 +03:00
|
|
|
return result
|
2013-08-27 03:50:31 +04:00
|
|
|
|
2016-08-05 20:08:11 +03:00
|
|
|
def dirlog(self, d):
|
|
|
|
if d in self._dirlogstarts:
|
|
|
|
self.bundle.seek(self._dirlogstarts[d])
|
|
|
|
return bundlemanifest(
|
|
|
|
self.opener, self.bundle, self._linkmapper,
|
|
|
|
self._dirlogstarts, dir=d)
|
|
|
|
return super(bundlemanifest, self).dirlog(d)
|
|
|
|
|
2006-03-13 08:58:31 +03:00
|
|
|
class bundlefilelog(bundlerevlog, filelog.filelog):
|
2015-05-04 00:18:32 +03:00
|
|
|
def __init__(self, opener, path, bundle, linkmapper):
|
2006-03-13 08:58:31 +03:00
|
|
|
filelog.filelog.__init__(self, opener, path)
|
2010-09-18 04:24:29 +04:00
|
|
|
bundlerevlog.__init__(self, opener, self.indexfile, bundle,
|
2007-03-23 03:12:03 +03:00
|
|
|
linkmapper)
|
2011-05-10 19:38:58 +04:00
|
|
|
|
2013-08-27 03:50:31 +04:00
|
|
|
def baserevision(self, nodeorrev):
|
2017-04-07 03:43:29 +03:00
|
|
|
return filelog.filelog.revision(self, nodeorrev, raw=True)
|
2013-08-27 03:50:31 +04:00
|
|
|
|
2012-07-13 23:52:28 +04:00
|
|
|
class bundlepeer(localrepo.localpeer):
|
|
|
|
def canpush(self):
|
|
|
|
return False
|
|
|
|
|
2014-12-18 22:38:48 +03:00
|
|
|
class bundlephasecache(phases.phasecache):
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
super(bundlephasecache, self).__init__(*args, **kwargs)
|
|
|
|
if util.safehasattr(self, 'opener'):
|
2017-03-02 16:47:03 +03:00
|
|
|
self.opener = vfsmod.readonlyvfs(self.opener)
|
2014-12-18 22:38:48 +03:00
|
|
|
|
|
|
|
def write(self):
|
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
def _write(self, fp):
|
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
def _updateroots(self, phase, newroots, tr):
|
|
|
|
self.phaseroots[phase] = newroots
|
|
|
|
self.invalidate()
|
|
|
|
self.dirty = True
|
|
|
|
|
2016-08-05 20:07:58 +03:00
|
|
|
def _getfilestarts(bundle):
|
|
|
|
bundlefilespos = {}
|
|
|
|
for chunkdata in iter(bundle.filelogheader, {}):
|
|
|
|
fname = chunkdata['filename']
|
|
|
|
bundlefilespos[fname] = bundle.tell()
|
|
|
|
for chunk in iter(lambda: bundle.deltachunk(None), {}):
|
|
|
|
pass
|
|
|
|
return bundlefilespos
|
|
|
|
|
2006-03-13 08:58:31 +03:00
|
|
|
class bundlerepository(localrepo.localrepository):
|
2006-03-13 05:54:23 +03:00
|
|
|
def __init__(self, ui, path, bundlename):
|
2008-03-15 19:04:28 +03:00
|
|
|
self._tempparent = None
|
|
|
|
try:
|
|
|
|
localrepo.localrepository.__init__(self, ui, path)
|
2009-01-12 19:42:31 +03:00
|
|
|
except error.RepoError:
|
2008-03-15 19:04:28 +03:00
|
|
|
self._tempparent = tempfile.mkdtemp()
|
2009-07-23 01:12:54 +04:00
|
|
|
localrepo.instance(ui, self._tempparent, 1)
|
2008-03-15 19:04:28 +03:00
|
|
|
localrepo.localrepository.__init__(self, ui, self._tempparent)
|
2014-03-19 05:45:14 +04:00
|
|
|
self.ui.setconfig('phases', 'publish', False, 'bundlerepo')
|
2006-07-26 00:50:32 +04:00
|
|
|
|
2008-02-10 10:53:39 +03:00
|
|
|
if path:
|
2009-12-07 13:31:45 +03:00
|
|
|
self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
|
2008-02-10 10:53:39 +03:00
|
|
|
else:
|
|
|
|
self._url = 'bundle:' + bundlename
|
2006-07-26 00:50:32 +04:00
|
|
|
|
2006-05-12 20:38:56 +04:00
|
|
|
self.tempfile = None
|
2011-01-07 12:48:30 +03:00
|
|
|
f = util.posixfile(bundlename, "rb")
|
2015-02-06 03:03:26 +03:00
|
|
|
self.bundlefile = self.bundle = exchange.readbundle(ui, f, bundlename)
|
2010-08-26 01:55:54 +04:00
|
|
|
|
2015-02-06 22:27:25 +03:00
|
|
|
if isinstance(self.bundle, bundle2.unbundle20):
|
2017-08-23 22:35:03 +03:00
|
|
|
hadchangegroup = False
|
2015-10-19 17:01:55 +03:00
|
|
|
for part in self.bundle.iterparts():
|
|
|
|
if part.type == 'changegroup':
|
2017-08-23 22:35:03 +03:00
|
|
|
if hadchangegroup:
|
2015-10-19 17:01:55 +03:00
|
|
|
raise NotImplementedError("can't process "
|
|
|
|
"multiple changegroups")
|
2017-08-23 22:35:03 +03:00
|
|
|
hadchangegroup = True
|
2015-10-19 17:01:55 +03:00
|
|
|
|
2017-08-23 22:35:03 +03:00
|
|
|
self._handlebundle2part(part)
|
|
|
|
|
|
|
|
if not hadchangegroup:
|
|
|
|
raise error.Abort(_("No changegroups found"))
|
2015-02-06 22:27:25 +03:00
|
|
|
|
2015-10-19 19:04:08 +03:00
|
|
|
elif self.bundle.compressed():
|
2017-08-23 22:34:56 +03:00
|
|
|
f = self._writetempbundle(self.bundle.read, '.hg10un',
|
|
|
|
header='HG10UN')
|
2015-10-19 19:04:08 +03:00
|
|
|
self.bundlefile = self.bundle = exchange.readbundle(ui, f,
|
|
|
|
bundlename,
|
|
|
|
self.vfs)
|
|
|
|
|
2006-03-13 05:54:23 +03:00
|
|
|
# dict with the mapping 'filename' -> position in the bundle
|
|
|
|
self.bundlefilespos = {}
|
2007-08-28 02:48:21 +04:00
|
|
|
|
2014-12-18 23:22:43 +03:00
|
|
|
self.firstnewrev = self.changelog.repotiprev + 1
|
|
|
|
phases.retractboundary(self, None, phases.draft,
|
|
|
|
[ctx.node() for ctx in self[self.firstnewrev:]])
|
|
|
|
|
2017-08-23 22:35:03 +03:00
|
|
|
def _handlebundle2part(self, part):
|
|
|
|
if part.type == 'changegroup':
|
|
|
|
cgstream = part
|
|
|
|
version = part.params.get('version', '01')
|
|
|
|
legalcgvers = changegroup.supportedincomingversions(self)
|
|
|
|
if version not in legalcgvers:
|
|
|
|
msg = _('Unsupported changegroup version: %s')
|
|
|
|
raise error.Abort(msg % version)
|
|
|
|
if self.bundle.compressed():
|
|
|
|
cgstream = self._writetempbundle(part.read,
|
|
|
|
".cg%sun" % version)
|
|
|
|
|
|
|
|
self.bundle = changegroup.getunbundler(version, cgstream, 'UN')
|
|
|
|
|
2017-08-23 22:34:56 +03:00
|
|
|
def _writetempbundle(self, readfn, suffix, header=''):
|
|
|
|
"""Write a temporary file to disk
|
|
|
|
"""
|
|
|
|
fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-",
|
|
|
|
suffix=".hg10un")
|
|
|
|
self.tempfile = temp
|
|
|
|
|
|
|
|
with os.fdopen(fdtemp, pycompat.sysstr('wb')) as fptemp:
|
|
|
|
fptemp.write(header)
|
|
|
|
while True:
|
|
|
|
chunk = readfn(2**18)
|
|
|
|
if not chunk:
|
|
|
|
break
|
|
|
|
fptemp.write(chunk)
|
|
|
|
|
|
|
|
return self.vfs.open(self.tempfile, mode="rb")
|
|
|
|
|
2012-10-08 21:34:04 +04:00
|
|
|
@localrepo.unfilteredpropertycache
|
2014-12-18 22:38:48 +03:00
|
|
|
def _phasecache(self):
|
|
|
|
return bundlephasecache(self, self._phasedefaults)
|
|
|
|
|
|
|
|
@localrepo.unfilteredpropertycache
|
2009-04-30 05:47:15 +04:00
|
|
|
def changelog(self):
|
2011-04-30 21:01:24 +04:00
|
|
|
# consume the header if it exists
|
|
|
|
self.bundle.changelogheader()
|
2015-01-11 02:25:54 +03:00
|
|
|
c = bundlechangelog(self.svfs, self.bundle)
|
2010-09-18 04:24:29 +04:00
|
|
|
self.manstart = self.bundle.tell()
|
2009-04-30 05:47:15 +04:00
|
|
|
return c
|
|
|
|
|
2016-10-19 03:32:51 +03:00
|
|
|
def _constructmanifest(self):
|
2010-09-18 04:24:29 +04:00
|
|
|
self.bundle.seek(self.manstart)
|
2011-04-30 21:01:24 +04:00
|
|
|
# consume the header if it exists
|
|
|
|
self.bundle.manifestheader()
|
2016-02-23 01:34:54 +03:00
|
|
|
linkmapper = self.unfiltered().changelog.rev
|
|
|
|
m = bundlemanifest(self.svfs, self.bundle, linkmapper)
|
2010-09-18 04:24:29 +04:00
|
|
|
self.filestart = self.bundle.tell()
|
2009-04-30 05:47:15 +04:00
|
|
|
return m
|
|
|
|
|
2012-10-08 21:34:04 +04:00
|
|
|
@localrepo.unfilteredpropertycache
|
2009-04-30 05:47:15 +04:00
|
|
|
def manstart(self):
|
|
|
|
self.changelog
|
|
|
|
return self.manstart
|
|
|
|
|
2012-10-08 21:34:04 +04:00
|
|
|
@localrepo.unfilteredpropertycache
|
2009-04-30 05:47:15 +04:00
|
|
|
def filestart(self):
|
2016-11-10 13:13:19 +03:00
|
|
|
self.manifestlog
|
2009-04-30 05:47:15 +04:00
|
|
|
return self.filestart
|
2006-03-13 05:54:23 +03:00
|
|
|
|
2006-07-26 00:50:32 +04:00
|
|
|
def url(self):
|
|
|
|
return self._url
|
|
|
|
|
2006-03-13 05:54:23 +03:00
|
|
|
def file(self, f):
|
2007-08-28 02:48:21 +04:00
|
|
|
if not self.bundlefilespos:
|
2010-09-18 04:24:29 +04:00
|
|
|
self.bundle.seek(self.filestart)
|
2016-08-05 20:07:58 +03:00
|
|
|
self.bundlefilespos = _getfilestarts(self.bundle)
|
2007-08-28 02:48:21 +04:00
|
|
|
|
2006-03-13 05:54:23 +03:00
|
|
|
if f in self.bundlefilespos:
|
2010-09-18 04:24:29 +04:00
|
|
|
self.bundle.seek(self.bundlefilespos[f])
|
2016-02-22 20:35:40 +03:00
|
|
|
linkmapper = self.unfiltered().changelog.rev
|
|
|
|
return bundlefilelog(self.svfs, f, self.bundle, linkmapper)
|
2006-03-13 05:54:23 +03:00
|
|
|
else:
|
2015-01-11 02:25:54 +03:00
|
|
|
return filelog.filelog(self.svfs, f)
|
2006-03-13 05:54:23 +03:00
|
|
|
|
2010-09-21 01:14:05 +04:00
|
|
|
def close(self):
|
|
|
|
"""Close assigned bundle file immediately."""
|
2015-02-06 03:03:26 +03:00
|
|
|
self.bundlefile.close()
|
2010-11-07 22:18:57 +03:00
|
|
|
if self.tempfile is not None:
|
2014-03-08 20:03:28 +04:00
|
|
|
self.vfs.unlink(self.tempfile)
|
2008-03-15 19:04:28 +03:00
|
|
|
if self._tempparent:
|
|
|
|
shutil.rmtree(self._tempparent, True)
|
2006-07-31 18:11:12 +04:00
|
|
|
|
2008-03-20 19:12:35 +03:00
|
|
|
def cancopy(self):
|
|
|
|
return False
|
|
|
|
|
2012-07-13 23:52:28 +04:00
|
|
|
def peer(self):
|
|
|
|
return bundlepeer(self)
|
|
|
|
|
2008-11-27 18:07:17 +03:00
|
|
|
def getcwd(self):
|
2016-11-22 21:33:11 +03:00
|
|
|
return pycompat.getcwd() # always outside the repo
|
2008-11-27 18:07:17 +03:00
|
|
|
|
2016-03-23 10:55:22 +03:00
|
|
|
# Check if parents exist in localrepo before setting
|
|
|
|
def setparents(self, p1, p2=nullid):
|
|
|
|
p1rev = self.changelog.rev(p1)
|
|
|
|
p2rev = self.changelog.rev(p2)
|
|
|
|
msg = _("setting parent to node %s that only exists in the bundle\n")
|
|
|
|
if self.changelog.repotiprev < p1rev:
|
|
|
|
self.ui.warn(msg % nodemod.hex(p1))
|
|
|
|
if self.changelog.repotiprev < p2rev:
|
|
|
|
self.ui.warn(msg % nodemod.hex(p2))
|
|
|
|
return super(bundlerepository, self).setparents(p1, p2)
|
2011-12-01 17:17:17 +04:00
|
|
|
|
2006-07-31 18:11:12 +04:00
|
|
|
def instance(ui, path, create):
|
|
|
|
if create:
|
2015-10-08 22:55:45 +03:00
|
|
|
raise error.Abort(_('cannot create new bundle repository'))
|
2015-06-26 01:43:24 +03:00
|
|
|
# internal config: bundle.mainreporoot
|
2017-06-30 04:31:26 +03:00
|
|
|
parentpath = ui.config("bundle", "mainreporoot")
|
2012-02-01 03:25:37 +04:00
|
|
|
if not parentpath:
|
|
|
|
# try to find the correct path to the working directory repo
|
2016-11-22 21:33:11 +03:00
|
|
|
parentpath = cmdutil.findrepo(pycompat.getcwd())
|
2012-02-01 03:25:37 +04:00
|
|
|
if parentpath is None:
|
|
|
|
parentpath = ''
|
2007-12-18 23:11:13 +03:00
|
|
|
if parentpath:
|
|
|
|
# Try to make the full path relative so we get a nice, short URL.
|
|
|
|
# In particular, we don't want temp dir names in test outputs.
|
2016-11-22 21:33:11 +03:00
|
|
|
cwd = pycompat.getcwd()
|
2007-12-18 23:11:13 +03:00
|
|
|
if parentpath == cwd:
|
|
|
|
parentpath = ''
|
|
|
|
else:
|
bundlerepo: use pathutil.normasprefix to ensure os.sep at the end of cwd
Since Python 2.7.9, "os.path.join(path, '')" doesn't add "os.sep" at
the end of UNC path (see issue4557 for detail).
This makes bundlerepo incorrectly work, if:
1. cwd is the root of UNC share (e.g. "\host\share"), and
2. mainreporoot is near cwd (e.g. "\host\sharefoo\repo")
- host of UNC path is same as one of cwd
- share of UNC path starts with one of cwd
3. "repopath" isn't specified in bundle URI
(e.g. "bundle:bundlefile" or just "bundlefile")
For example:
$ hg --cwd \host\share -R \host\sharefoo\repo incoming bundle
In this case:
- os.path.join(r"\host\share", "") returns r"\host\share",
- r"\host\sharefoo\repo".startswith(r"\host\share") returns True, then
- r"foo\repo" is treated as repopath of bundlerepo instead of
r"\host\sharefoo\repo"
This causes failure of combining "\host\sharefoo\repo" and bundle
file: in addition to it, "\host\share\foo\repo" may be combined with
bundle file, if it accidentally exists.
This patch uses "pathutil.normasprefix()" to ensure "os.sep" at the
end of cwd safely, even with some problematic encodings, which use
0x5c (= "os.sep" on Windows) as the tail byte of some multi-byte
characters.
BTW, normalization before "pathutil.normasprefix()" isn't needed in
this case, because "os.getcwd()" always returns normalized one.
2015-04-22 17:38:55 +03:00
|
|
|
cwd = pathutil.normasprefix(cwd)
|
2007-12-18 23:11:13 +03:00
|
|
|
if parentpath.startswith(cwd):
|
|
|
|
parentpath = parentpath[len(cwd):]
|
2011-04-30 20:43:20 +04:00
|
|
|
u = util.url(path)
|
url: refactor util.drop_scheme() and hg.localpath() into url.localpath()
This replaces util.drop_scheme() with url.localpath(), using url.url for
parsing instead of doing it on its own. The function is moved from
util to url to avoid an import cycle.
hg.localpath() is removed in favor of using url.localpath(). This
provides more consistent behavior between "hg clone" and other
commands.
To preserve backwards compatibility, URLs like bundle://../foo still
refer to ../foo, not /foo.
If a URL contains a scheme, percent-encoded entities are decoded. When
there's no scheme, all characters are left untouched.
Comparison of old and new behaviors:
URL drop_scheme() hg.localpath() url.localpath()
=== ============= ============== ===============
file://foo/foo /foo foo/foo /foo
file://localhost:80/foo /foo localhost:80/foo /foo
file://localhost:/foo /foo localhost:/foo /foo
file://localhost/foo /foo /foo /foo
file:///foo /foo /foo /foo
file://foo (empty string) foo /
file:/foo /foo /foo /foo
file:foo foo foo foo
file:foo%23bar foo%23bar foo%23bar foo#bar
foo%23bar foo%23bar foo%23bar foo%23bar
/foo /foo /foo /foo
Windows-related paths on Windows:
URL drop_scheme() hg.localpath() url.localpath()
=== ============= ============== ===============
file:///C:/foo C:/C:/foo /C:/foo C:/foo
file:///D:/foo C:/D:/foo /D:/foo D:/foo
file://C:/foo C:/foo C:/foo C:/foo
file://D:/foo C:/foo D:/foo D:/foo
file:////foo/bar //foo/bar //foo/bar //foo/bar
//foo/bar //foo/bar //foo/bar //foo/bar
\\foo\bar //foo/bar //foo/bar \\foo\bar
Windows-related paths on other platforms:
file:///C:/foo C:/C:/foo /C:/foo C:/foo
file:///D:/foo C:/D:/foo /D:/foo D:/foo
file://C:/foo C:/foo C:/foo C:/foo
file://D:/foo C:/foo D:/foo D:/foo
file:////foo/bar //foo/bar //foo/bar //foo/bar
//foo/bar //foo/bar //foo/bar //foo/bar
\\foo\bar //foo/bar //foo/bar \\foo\bar
For more information about file:// URL handling, see:
http://www-archive.mozilla.org/quality/networking/testing/filetests.html
Related issues:
- issue1153: File URIs aren't handled correctly in windows
This patch should preserve the fix implemented in
5c92d05b064e. However, it goes a step further and "promotes"
Windows-style drive letters from being interpreted as host names to
being part of the path.
- issue2154: Cannot escape '#' in Mercurial URLs (#1172 in THG)
The fragment is still interpreted as a revision or a branch, even in
paths to bundles. However, when file: is used, percent-encoded
entities are decoded, so file:test%23bundle.hg can refer to
test#bundle.hg ond isk.
2011-03-31 07:03:05 +04:00
|
|
|
path = u.localpath()
|
|
|
|
if u.scheme == 'bundle':
|
2006-07-31 18:11:12 +04:00
|
|
|
s = path.split("+", 1)
|
|
|
|
if len(s) == 1:
|
2007-12-18 23:11:13 +03:00
|
|
|
repopath, bundlename = parentpath, s[0]
|
2006-07-31 18:11:12 +04:00
|
|
|
else:
|
|
|
|
repopath, bundlename = s
|
|
|
|
else:
|
2007-12-18 23:11:13 +03:00
|
|
|
repopath, bundlename = parentpath, path
|
2006-07-31 18:11:12 +04:00
|
|
|
return bundlerepository(ui, repopath, bundlename)
|
2010-10-15 00:41:43 +04:00
|
|
|
|
2014-12-18 23:33:17 +03:00
|
|
|
class bundletransactionmanager(object):
|
|
|
|
def transaction(self):
|
|
|
|
return None
|
|
|
|
|
|
|
|
def close(self):
|
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
def release(self):
|
|
|
|
raise NotImplementedError
|
|
|
|
|
2011-05-02 14:36:23 +04:00
|
|
|
def getremotechanges(ui, repo, other, onlyheads=None, bundlename=None,
|
2011-04-30 19:21:37 +04:00
|
|
|
force=False):
|
2011-05-02 14:36:23 +04:00
|
|
|
'''obtains a bundle of changes incoming from other
|
|
|
|
|
|
|
|
"onlyheads" restricts the returned changes to those reachable from the
|
|
|
|
specified heads.
|
|
|
|
"bundlename", if given, stores the bundle to this file path permanently;
|
2011-05-04 22:14:30 +04:00
|
|
|
otherwise it's stored to a temp file and gets deleted again when you call
|
|
|
|
the returned "cleanupfn".
|
2011-05-02 14:36:23 +04:00
|
|
|
"force" indicates whether to proceed on unrelated repos.
|
|
|
|
|
|
|
|
Returns a tuple (local, csets, cleanupfn):
|
|
|
|
|
2012-05-12 17:54:54 +04:00
|
|
|
"local" is a local repo from which to obtain the actual incoming
|
|
|
|
changesets; it is a bundlerepo for the obtained bundle when the
|
|
|
|
original "other" is remote.
|
2011-05-02 14:36:23 +04:00
|
|
|
"csets" lists the incoming changeset node ids.
|
2012-05-12 17:54:54 +04:00
|
|
|
"cleanupfn" must be called without arguments when you're done processing
|
|
|
|
the changes; it closes both the original "other" and the one returned
|
|
|
|
here.
|
2011-05-02 14:36:23 +04:00
|
|
|
'''
|
2012-05-12 17:54:54 +04:00
|
|
|
tmp = discovery.findcommonincoming(repo, other, heads=onlyheads,
|
|
|
|
force=force)
|
2010-10-15 00:41:43 +04:00
|
|
|
common, incoming, rheads = tmp
|
|
|
|
if not incoming:
|
|
|
|
try:
|
2011-09-14 15:51:50 +04:00
|
|
|
if bundlename:
|
2014-05-30 22:53:10 +04:00
|
|
|
os.unlink(bundlename)
|
2011-04-23 01:51:25 +04:00
|
|
|
except OSError:
|
2010-10-15 00:41:43 +04:00
|
|
|
pass
|
2012-12-28 14:16:01 +04:00
|
|
|
return repo, [], other.close
|
2010-10-15 00:41:43 +04:00
|
|
|
|
2014-08-15 05:24:40 +04:00
|
|
|
commonset = set(common)
|
|
|
|
rheads = [x for x in rheads if x not in commonset]
|
|
|
|
|
2010-10-15 00:41:43 +04:00
|
|
|
bundle = None
|
2011-05-02 14:36:23 +04:00
|
|
|
bundlerepo = None
|
2012-07-13 23:46:53 +04:00
|
|
|
localrepo = other.local()
|
|
|
|
if bundlename or not localrepo:
|
2010-10-15 00:41:43 +04:00
|
|
|
# create a bundle (uncompressed if other repo is not local)
|
|
|
|
|
2016-08-03 17:42:10 +03:00
|
|
|
# developer config: devel.legacy.exchange
|
|
|
|
legexc = ui.configlist('devel', 'legacy.exchange')
|
2016-08-03 17:23:26 +03:00
|
|
|
forcebundle1 = 'bundle2' not in legexc and 'bundle1' in legexc
|
2016-08-03 17:42:10 +03:00
|
|
|
canbundle2 = (not forcebundle1
|
2015-10-05 10:23:20 +03:00
|
|
|
and other.capable('getbundle')
|
|
|
|
and other.capable('bundle2'))
|
|
|
|
if canbundle2:
|
|
|
|
kwargs = {}
|
|
|
|
kwargs['common'] = common
|
|
|
|
kwargs['heads'] = rheads
|
|
|
|
kwargs['bundlecaps'] = exchange.caps20to10(repo)
|
|
|
|
kwargs['cg'] = True
|
|
|
|
b2 = other.getbundle('incoming', **kwargs)
|
|
|
|
fname = bundle = changegroup.writechunks(ui, b2._forwardchunks(),
|
|
|
|
bundlename)
|
|
|
|
else:
|
2015-10-05 10:18:11 +03:00
|
|
|
if other.capable('getbundle'):
|
|
|
|
cg = other.getbundle('incoming', common=common, heads=rheads)
|
|
|
|
elif onlyheads is None and not other.capable('changegroupsubset'):
|
|
|
|
# compat with older servers when pulling all remote heads
|
|
|
|
cg = other.changegroup(incoming, "incoming")
|
|
|
|
rheads = None
|
|
|
|
else:
|
|
|
|
cg = other.changegroupsubset(incoming, rheads, 'incoming')
|
|
|
|
if localrepo:
|
|
|
|
bundletype = "HG10BZ"
|
|
|
|
else:
|
|
|
|
bundletype = "HG10UN"
|
2016-03-29 00:41:29 +03:00
|
|
|
fname = bundle = bundle2.writebundle(ui, cg, bundlename,
|
2015-10-05 10:18:11 +03:00
|
|
|
bundletype)
|
2010-10-15 00:41:43 +04:00
|
|
|
# keep written bundle?
|
|
|
|
if bundlename:
|
|
|
|
bundle = None
|
2012-07-13 23:46:53 +04:00
|
|
|
if not localrepo:
|
2010-10-15 00:41:43 +04:00
|
|
|
# use the created uncompressed bundlerepo
|
2012-10-10 23:55:49 +04:00
|
|
|
localrepo = bundlerepo = bundlerepository(repo.baseui, repo.root,
|
|
|
|
fname)
|
2011-05-02 14:36:23 +04:00
|
|
|
# this repo contains local and other now, so filter out local again
|
|
|
|
common = repo.heads()
|
2013-02-06 11:55:29 +04:00
|
|
|
if localrepo:
|
|
|
|
# Part of common may be remotely filtered
|
|
|
|
# So use an unfiltered version
|
|
|
|
# The discovery process probably need cleanup to avoid that
|
|
|
|
localrepo = localrepo.unfiltered()
|
2011-05-02 14:36:23 +04:00
|
|
|
|
2011-05-23 22:35:10 +04:00
|
|
|
csets = localrepo.changelog.findmissing(common, rheads)
|
2011-05-02 14:36:23 +04:00
|
|
|
|
2014-12-18 23:33:17 +03:00
|
|
|
if bundlerepo:
|
|
|
|
reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev:]]
|
|
|
|
remotephases = other.listkeys('phases')
|
|
|
|
|
|
|
|
pullop = exchange.pulloperation(bundlerepo, other, heads=reponodes)
|
|
|
|
pullop.trmanager = bundletransactionmanager()
|
|
|
|
exchange._pullapplyphases(pullop, remotephases)
|
|
|
|
|
2011-05-02 14:36:23 +04:00
|
|
|
def cleanup():
|
|
|
|
if bundlerepo:
|
|
|
|
bundlerepo.close()
|
|
|
|
if bundle:
|
2014-05-30 22:53:10 +04:00
|
|
|
os.unlink(bundle)
|
2011-05-04 22:14:30 +04:00
|
|
|
other.close()
|
2011-05-02 14:36:23 +04:00
|
|
|
|
|
|
|
return (localrepo, csets, cleanup)
|