2005-05-04 01:16:10 +04:00
|
|
|
# hg.py - repository classes for mercurial
|
|
|
|
#
|
2007-06-19 10:51:34 +04:00
|
|
|
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
|
2006-08-12 23:30:02 +04:00
|
|
|
# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
|
2005-05-04 01:16:10 +04:00
|
|
|
#
|
2009-04-26 03:08:54 +04:00
|
|
|
# This software may be used and distributed according to the terms of the
|
2010-01-20 07:20:08 +03:00
|
|
|
# GNU General Public License version 2 or any later version.
|
2005-05-04 01:16:10 +04:00
|
|
|
|
2015-08-09 04:52:36 +03:00
|
|
|
from __future__ import absolute_import
|
2014-10-07 00:35:02 +04:00
|
|
|
|
2015-08-09 04:52:36 +03:00
|
|
|
import errno
|
2016-06-10 07:12:33 +03:00
|
|
|
import hashlib
|
2015-08-09 04:52:36 +03:00
|
|
|
import os
|
|
|
|
import shutil
|
|
|
|
|
|
|
|
from .i18n import _
|
|
|
|
from .node import nullid
|
|
|
|
|
|
|
|
from . import (
|
|
|
|
bookmarks,
|
|
|
|
bundlerepo,
|
|
|
|
cmdutil,
|
2016-03-11 22:35:42 +03:00
|
|
|
destutil,
|
2015-08-09 04:52:36 +03:00
|
|
|
discovery,
|
|
|
|
error,
|
|
|
|
exchange,
|
|
|
|
extensions,
|
|
|
|
httppeer,
|
|
|
|
localrepo,
|
|
|
|
lock,
|
2017-12-06 21:56:45 +03:00
|
|
|
logexchange,
|
2015-08-09 04:52:36 +03:00
|
|
|
merge as mergemod,
|
|
|
|
node,
|
|
|
|
phases,
|
2018-03-21 23:36:22 +03:00
|
|
|
progress,
|
2015-08-09 04:52:36 +03:00
|
|
|
repoview,
|
|
|
|
scmutil,
|
|
|
|
sshpeer,
|
|
|
|
statichttprepo,
|
|
|
|
ui as uimod,
|
|
|
|
unionrepo,
|
|
|
|
url,
|
|
|
|
util,
|
|
|
|
verify as verifymod,
|
2017-03-02 15:27:42 +03:00
|
|
|
vfs as vfsmod,
|
2015-08-09 04:52:36 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
release = lock.release
|
2005-05-04 01:16:10 +04:00
|
|
|
|
2016-06-24 12:32:38 +03:00
|
|
|
# shared features
|
|
|
|
sharedbookmarks = 'bookmarks'
|
|
|
|
|
2006-07-31 18:11:12 +04:00
|
|
|
def _local(path):
|
2011-07-01 19:37:09 +04:00
|
|
|
path = util.expandpath(util.urllocalpath(path))
|
2009-12-07 13:31:45 +03:00
|
|
|
return (os.path.isfile(path) and bundlerepo or localrepo)
|
2006-06-21 20:14:36 +04:00
|
|
|
|
2012-07-13 23:46:53 +04:00
|
|
|
def addbranchrevs(lrepo, other, branches, revs):
|
|
|
|
peer = other.peer() # a courtesy to callers using a localrepo for other
|
2010-06-10 14:46:09 +04:00
|
|
|
hashbranch, branches = branches
|
|
|
|
if not hashbranch and not branches:
|
2014-10-07 11:09:50 +04:00
|
|
|
x = revs or None
|
|
|
|
if util.safehasattr(revs, 'first'):
|
2015-12-31 11:16:59 +03:00
|
|
|
y = revs.first()
|
2014-10-07 11:09:50 +04:00
|
|
|
elif revs:
|
|
|
|
y = revs[0]
|
|
|
|
else:
|
|
|
|
y = None
|
|
|
|
return x, y
|
2015-03-14 00:00:06 +03:00
|
|
|
if revs:
|
|
|
|
revs = list(revs)
|
|
|
|
else:
|
|
|
|
revs = []
|
|
|
|
|
2012-07-13 23:46:53 +04:00
|
|
|
if not peer.capable('branchmap'):
|
2010-06-10 14:46:09 +04:00
|
|
|
if branches:
|
2015-10-08 22:55:45 +03:00
|
|
|
raise error.Abort(_("remote branch lookup not supported"))
|
2010-06-10 14:46:09 +04:00
|
|
|
revs.append(hashbranch)
|
2010-02-07 17:57:16 +03:00
|
|
|
return revs, revs[0]
|
2012-07-13 23:46:53 +04:00
|
|
|
branchmap = peer.branchmap()
|
2010-06-10 14:46:09 +04:00
|
|
|
|
2010-11-25 00:56:32 +03:00
|
|
|
def primary(branch):
|
|
|
|
if branch == '.':
|
2012-07-13 23:46:53 +04:00
|
|
|
if not lrepo:
|
2015-10-08 22:55:45 +03:00
|
|
|
raise error.Abort(_("dirstate branch not accessible"))
|
2010-11-25 00:56:32 +03:00
|
|
|
branch = lrepo.dirstate.branch()
|
|
|
|
if branch in branchmap:
|
|
|
|
revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
|
2010-06-10 14:46:09 +04:00
|
|
|
return True
|
2010-02-07 16:29:07 +03:00
|
|
|
else:
|
2010-06-10 14:46:09 +04:00
|
|
|
return False
|
|
|
|
|
|
|
|
for branch in branches:
|
2010-11-25 00:56:32 +03:00
|
|
|
if not primary(branch):
|
2010-06-10 14:46:09 +04:00
|
|
|
raise error.RepoLookupError(_("unknown branch '%s'") % branch)
|
|
|
|
if hashbranch:
|
2010-11-25 00:56:32 +03:00
|
|
|
if not primary(hashbranch):
|
2010-06-10 14:46:09 +04:00
|
|
|
revs.append(hashbranch)
|
2010-02-07 16:29:07 +03:00
|
|
|
return revs, revs[0]
|
|
|
|
|
2011-03-31 07:02:09 +04:00
|
|
|
def parseurl(path, branches=None):
|
2010-06-10 14:46:09 +04:00
|
|
|
'''parse url#branch, returning (url, (branch, branches))'''
|
2007-08-16 01:10:36 +04:00
|
|
|
|
2011-04-30 20:43:20 +04:00
|
|
|
u = util.url(path)
|
2011-04-06 14:48:59 +04:00
|
|
|
branch = None
|
|
|
|
if u.fragment:
|
|
|
|
branch = u.fragment
|
|
|
|
u.fragment = None
|
2017-04-07 11:16:35 +03:00
|
|
|
return bytes(u), (branch, branches or [])
|
2007-08-16 01:10:36 +04:00
|
|
|
|
2011-06-14 01:25:18 +04:00
|
|
|
schemes = {
|
2006-07-31 18:11:12 +04:00
|
|
|
'bundle': bundlerepo,
|
2013-01-18 18:54:09 +04:00
|
|
|
'union': unionrepo,
|
2006-07-31 18:11:12 +04:00
|
|
|
'file': _local,
|
2012-07-13 23:47:06 +04:00
|
|
|
'http': httppeer,
|
|
|
|
'https': httppeer,
|
|
|
|
'ssh': sshpeer,
|
2006-07-31 18:11:12 +04:00
|
|
|
'static-http': statichttprepo,
|
2007-07-08 11:54:42 +04:00
|
|
|
}
|
2006-06-21 20:14:36 +04:00
|
|
|
|
2011-06-13 23:53:23 +04:00
|
|
|
def _peerlookup(path):
|
2011-04-30 20:43:20 +04:00
|
|
|
u = util.url(path)
|
2011-03-31 07:02:07 +04:00
|
|
|
scheme = u.scheme or 'file'
|
2011-06-14 01:25:18 +04:00
|
|
|
thing = schemes.get(scheme) or schemes['file']
|
2006-07-31 18:11:12 +04:00
|
|
|
try:
|
|
|
|
return thing(path)
|
|
|
|
except TypeError:
|
hg: explicitly check that peer lookup object has instance() if call failed
If a "thing" is callable but raises TypeError for some reason, a callable
object would be returned. Thereafter, unfriendly traceback would be displayed:
Traceback (most recent call last):
...
File "mercurial/hg.pyc", line 119, in _peerorrepo
obj = _peerlookup(path).instance(ui, path, create)
AttributeError: 'function' object has no attribute 'instance'
Instead, we should show the reason why "thing(path)" didn't work:
Traceback (most recent call last):
...
File "hggit/__init__.py", line 89, in _local
p = urlcls(path).localpath()
TypeError: 'NoneType' object is not callable
If a "thing" is not callable, it must be a module or an object that implements
instance(). If that module didn't have instance(), the error message would be
"<unloaded module 'foo'> object is not callable". It doesn't make perfect sense,
but it isn't so bad as it can blame which module went wrong.
2015-05-30 06:46:30 +03:00
|
|
|
# we can't test callable(thing) because 'thing' can be an unloaded
|
|
|
|
# module that implements __call__
|
|
|
|
if not util.safehasattr(thing, 'instance'):
|
|
|
|
raise
|
2006-07-31 18:11:12 +04:00
|
|
|
return thing
|
2006-08-04 00:24:41 +04:00
|
|
|
|
2006-07-28 21:46:25 +04:00
|
|
|
def islocal(repo):
|
2014-02-04 02:36:20 +04:00
|
|
|
'''return true if repo (or path pointing to repo) is local'''
|
2017-06-22 00:50:11 +03:00
|
|
|
if isinstance(repo, bytes):
|
2006-07-31 18:11:12 +04:00
|
|
|
try:
|
2011-06-13 23:53:23 +04:00
|
|
|
return _peerlookup(repo).islocal(repo)
|
2006-07-31 18:11:12 +04:00
|
|
|
except AttributeError:
|
|
|
|
return False
|
2006-07-28 21:46:25 +04:00
|
|
|
return repo.local()
|
|
|
|
|
2012-10-18 08:30:08 +04:00
|
|
|
def openpath(ui, path):
|
|
|
|
'''open path with open if local, url.open if remote'''
|
2014-02-04 02:53:44 +04:00
|
|
|
pathurl = util.url(path, parsequery=False, parsefragment=False)
|
|
|
|
if pathurl.islocal():
|
|
|
|
return util.posixfile(pathurl.localpath(), 'rb')
|
2012-10-18 08:30:08 +04:00
|
|
|
else:
|
|
|
|
return url.open(ui, path)
|
|
|
|
|
2014-03-28 20:20:07 +04:00
|
|
|
# a list of (ui, repo) functions called for wire peer initialization
|
|
|
|
wirepeersetupfuncs = []
|
|
|
|
|
2017-04-30 07:39:47 +03:00
|
|
|
def _peerorrepo(ui, path, create=False, presetupfuncs=None):
|
2006-08-03 22:07:57 +04:00
|
|
|
"""return a repository object for the specified path"""
|
2012-07-13 23:46:53 +04:00
|
|
|
obj = _peerlookup(path).instance(ui, path, create)
|
|
|
|
ui = getattr(obj, "ui", ui)
|
2017-04-30 07:39:47 +03:00
|
|
|
for f in presetupfuncs or []:
|
|
|
|
f(ui, obj)
|
2013-09-21 16:33:29 +04:00
|
|
|
for name, module in extensions.extensions(ui):
|
2007-08-18 00:33:27 +04:00
|
|
|
hook = getattr(module, 'reposetup', None)
|
|
|
|
if hook:
|
2012-07-13 23:46:53 +04:00
|
|
|
hook(ui, obj)
|
2014-03-28 20:20:07 +04:00
|
|
|
if not obj.local():
|
|
|
|
for f in wirepeersetupfuncs:
|
|
|
|
f(ui, obj)
|
2012-07-13 23:46:53 +04:00
|
|
|
return obj
|
|
|
|
|
2017-04-30 07:39:47 +03:00
|
|
|
def repository(ui, path='', create=False, presetupfuncs=None):
|
2012-07-13 23:46:53 +04:00
|
|
|
"""return a repository object for the specified path"""
|
2017-04-30 07:39:47 +03:00
|
|
|
peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
|
2012-07-13 23:46:53 +04:00
|
|
|
repo = peer.local()
|
|
|
|
if not repo:
|
2015-10-08 22:55:45 +03:00
|
|
|
raise error.Abort(_("repository '%s' is not local") %
|
2012-07-13 23:46:53 +04:00
|
|
|
(path or peer.url()))
|
2013-01-13 11:39:16 +04:00
|
|
|
return repo.filtered('visible')
|
2006-07-12 03:18:53 +04:00
|
|
|
|
2011-07-05 15:28:55 +04:00
|
|
|
def peer(uiorrepo, opts, path, create=False):
|
2011-06-10 20:43:38 +04:00
|
|
|
'''return a repository peer for the specified path'''
|
2011-07-05 15:28:55 +04:00
|
|
|
rui = remoteui(uiorrepo, opts)
|
2012-07-13 23:46:53 +04:00
|
|
|
return _peerorrepo(rui, path, create).peer()
|
2011-06-10 20:43:38 +04:00
|
|
|
|
2006-07-28 21:46:25 +04:00
|
|
|
def defaultdest(source):
|
2014-03-21 19:46:08 +04:00
|
|
|
'''return default destination of clone if none is given
|
|
|
|
|
2017-09-03 08:32:11 +03:00
|
|
|
>>> defaultdest(b'foo')
|
2014-03-21 19:46:08 +04:00
|
|
|
'foo'
|
2017-09-03 08:32:11 +03:00
|
|
|
>>> defaultdest(b'/foo/bar')
|
2014-03-21 19:46:08 +04:00
|
|
|
'bar'
|
2017-09-03 08:32:11 +03:00
|
|
|
>>> defaultdest(b'/')
|
2014-03-21 19:46:08 +04:00
|
|
|
''
|
2017-09-03 08:32:11 +03:00
|
|
|
>>> defaultdest(b'')
|
2014-03-21 19:46:12 +04:00
|
|
|
''
|
2017-09-03 08:32:11 +03:00
|
|
|
>>> defaultdest(b'http://example.org/')
|
2014-03-21 19:46:12 +04:00
|
|
|
''
|
2017-09-03 08:32:11 +03:00
|
|
|
>>> defaultdest(b'http://example.org/foo/')
|
2014-03-21 19:46:08 +04:00
|
|
|
'foo'
|
|
|
|
'''
|
2014-03-21 19:46:12 +04:00
|
|
|
path = util.url(source).path
|
|
|
|
if not path:
|
|
|
|
return ''
|
|
|
|
return os.path.basename(os.path.normpath(path))
|
2006-08-03 22:07:57 +04:00
|
|
|
|
2017-02-13 16:05:24 +03:00
|
|
|
def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
|
|
|
|
relative=False):
|
2009-06-14 03:01:47 +04:00
|
|
|
'''create a shared repository'''
|
|
|
|
|
|
|
|
if not islocal(source):
|
2015-10-08 22:55:45 +03:00
|
|
|
raise error.Abort(_('can only share local repositories'))
|
2009-06-14 03:01:47 +04:00
|
|
|
|
2009-06-14 03:16:44 +04:00
|
|
|
if not dest:
|
2009-12-20 09:55:33 +03:00
|
|
|
dest = defaultdest(source)
|
2009-08-12 21:06:12 +04:00
|
|
|
else:
|
|
|
|
dest = ui.expandpath(dest)
|
2009-06-14 03:16:44 +04:00
|
|
|
|
2009-06-14 03:01:47 +04:00
|
|
|
if isinstance(source, str):
|
|
|
|
origsource = ui.expandpath(source)
|
2010-02-07 16:29:07 +03:00
|
|
|
source, branches = parseurl(origsource)
|
2009-06-14 03:01:47 +04:00
|
|
|
srcrepo = repository(ui, source)
|
2010-02-07 16:29:07 +03:00
|
|
|
rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
|
2009-06-14 03:01:47 +04:00
|
|
|
else:
|
2012-07-13 23:46:53 +04:00
|
|
|
srcrepo = source.local()
|
2009-06-14 03:01:47 +04:00
|
|
|
origsource = source = srcrepo.url()
|
|
|
|
checkout = None
|
|
|
|
|
|
|
|
sharedpath = srcrepo.sharedpath # if our source is already sharing
|
|
|
|
|
2017-03-02 15:27:42 +03:00
|
|
|
destwvfs = vfsmod.vfs(dest, realpath=True)
|
|
|
|
destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
|
2009-06-14 03:01:47 +04:00
|
|
|
|
2014-06-21 13:07:39 +04:00
|
|
|
if destvfs.lexists():
|
2015-10-08 22:55:45 +03:00
|
|
|
raise error.Abort(_('destination already exists'))
|
2009-06-14 03:01:47 +04:00
|
|
|
|
2014-06-21 12:59:20 +04:00
|
|
|
if not destwvfs.isdir():
|
|
|
|
destwvfs.mkdir()
|
2014-06-21 13:07:39 +04:00
|
|
|
destvfs.makedir()
|
2009-06-14 03:01:47 +04:00
|
|
|
|
|
|
|
requirements = ''
|
|
|
|
try:
|
2015-01-16 01:17:12 +03:00
|
|
|
requirements = srcrepo.vfs.read('requires')
|
2015-06-24 08:20:08 +03:00
|
|
|
except IOError as inst:
|
2009-06-14 03:01:47 +04:00
|
|
|
if inst.errno != errno.ENOENT:
|
|
|
|
raise
|
|
|
|
|
2017-02-13 16:05:24 +03:00
|
|
|
if relative:
|
|
|
|
try:
|
|
|
|
sharedpath = os.path.relpath(sharedpath, destvfs.base)
|
|
|
|
requirements += 'relshared\n'
|
2017-11-03 06:55:09 +03:00
|
|
|
except (IOError, ValueError) as e:
|
|
|
|
# ValueError is raised on Windows if the drive letters differ on
|
|
|
|
# each path
|
2017-02-13 16:05:24 +03:00
|
|
|
raise error.Abort(_('cannot calculate relative path'),
|
|
|
|
hint=str(e))
|
|
|
|
else:
|
|
|
|
requirements += 'shared\n'
|
|
|
|
|
2014-06-21 13:10:29 +04:00
|
|
|
destvfs.write('requires', requirements)
|
|
|
|
destvfs.write('sharedpath', sharedpath)
|
2009-06-14 03:01:47 +04:00
|
|
|
|
2014-06-21 12:59:20 +04:00
|
|
|
r = repository(ui, destwvfs.base)
|
2016-10-03 08:34:40 +03:00
|
|
|
postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
|
2016-02-21 04:41:59 +03:00
|
|
|
_postshareupdate(r, update, checkout=checkout)
|
subrepo: share instead of clone if the parent repo is shared (issue5675) (BC)
Previously, only the top level repo was shared, and then any subrepos were
cloned on demand. This is problematic because commits to the parent repo would
write an updated .hgsubstate to the share source, but the corresponding subrepo
commit would be stuck in the local subrepo. That would prevent an update in the
source repo. We already go to great lengths to avoid having inconsistent repos
(e.g., `hg push -r rev` will push _everything_ in a subrepo, even if it isn't
referenced in one of the parent's outgoing commits). Therefore, this seems like
a bug fix, and there's no option to get the old behavior. I can't imagine the
previous behavior was useful to anybody.
There shouldn't be an issue with svn, since it is centralized. Maybe --git-dir
can be used for git subrepos, but I'll leave that to someone more familiar with
git.
An integer was previously being implicitly returned from commands.share(), which
caused dispatch() to start crashing when changing over to returning the shared
repo. All error paths appear to raise, so this can be hardcoded to success.
The clone command checks for 'is None' in a similar pattern, but since
hg.clone() always returns a tuple, that seems wrong?
.. fix:: Issue 5675
Creating a share of a repository with a Mercurial subrepository will now
share the subrepository.
and
.. bc::
Mercurial subrepositories are now shared instead of cloned when the parent
repository is shared. This prevents dangling subrepository references in the
share source. Previously shared repositories with cloned subrepositories
will continue to function unchanged.
2017-10-16 05:48:02 +03:00
|
|
|
return r
|
2009-06-14 03:01:47 +04:00
|
|
|
|
2017-10-18 04:48:56 +03:00
|
|
|
def unshare(ui, repo):
|
|
|
|
"""convert a shared repository to a normal one
|
|
|
|
|
|
|
|
Copy the store data to the repo and remove the sharedpath data.
|
|
|
|
"""
|
|
|
|
|
|
|
|
destlock = lock = None
|
|
|
|
lock = repo.lock()
|
|
|
|
try:
|
|
|
|
# we use locks here because if we race with commit, we
|
|
|
|
# can end up with extra data in the cloned revlogs that's
|
|
|
|
# not pointed to by changesets, thus causing verify to
|
|
|
|
# fail
|
|
|
|
|
|
|
|
destlock = copystore(ui, repo, repo.path)
|
|
|
|
|
|
|
|
sharefile = repo.vfs.join('sharedpath')
|
|
|
|
util.rename(sharefile, sharefile + '.old')
|
|
|
|
|
|
|
|
repo.requirements.discard('shared')
|
|
|
|
repo.requirements.discard('relshared')
|
|
|
|
repo._writerequirements()
|
|
|
|
finally:
|
|
|
|
destlock and destlock.release()
|
|
|
|
lock and lock.release()
|
|
|
|
|
|
|
|
# update store, spath, svfs and sjoin of repo
|
2018-01-23 23:08:40 +03:00
|
|
|
# invalidate before rerunning __init__
|
|
|
|
repo.unfiltered().invalidate(clearfilecache=True)
|
|
|
|
repo.unfiltered().invalidatedirstate()
|
2017-10-18 04:48:56 +03:00
|
|
|
repo.unfiltered().__init__(repo.baseui, repo.root)
|
|
|
|
|
2017-10-18 05:55:33 +03:00
|
|
|
# TODO: figure out how to access subrepos that exist, but were previously
|
|
|
|
# removed from .hgsub
|
|
|
|
c = repo['.']
|
|
|
|
subs = c.substate
|
|
|
|
for s in sorted(subs):
|
|
|
|
c.sub(s).unshare()
|
|
|
|
|
2016-10-03 08:34:40 +03:00
|
|
|
def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
|
2015-12-13 06:20:29 +03:00
|
|
|
"""Called after a new shared repo is created.
|
|
|
|
|
|
|
|
The new repo only has a requirements file and pointer to the source.
|
|
|
|
This function configures additional shared data.
|
|
|
|
|
|
|
|
Extensions can wrap this function and write additional entries to
|
|
|
|
destrepo/.hg/shared to indicate additional pieces of data to be shared.
|
|
|
|
"""
|
2016-10-03 08:34:40 +03:00
|
|
|
default = defaultpath or sourcerepo.ui.config('paths', 'default')
|
2015-12-13 06:20:29 +03:00
|
|
|
if default:
|
|
|
|
fp = destrepo.vfs("hgrc", "w", text=True)
|
|
|
|
fp.write("[paths]\n")
|
|
|
|
fp.write("default = %s\n" % default)
|
|
|
|
fp.close()
|
|
|
|
|
2016-08-07 18:10:47 +03:00
|
|
|
with destrepo.wlock():
|
|
|
|
if bookmarks:
|
|
|
|
fp = destrepo.vfs('shared', 'w')
|
|
|
|
fp.write(sharedbookmarks + '\n')
|
|
|
|
fp.close()
|
2014-12-13 22:32:46 +03:00
|
|
|
|
2016-02-21 04:41:59 +03:00
|
|
|
def _postshareupdate(repo, update, checkout=None):
|
|
|
|
"""Maybe perform a working directory update after a shared repo is created.
|
|
|
|
|
|
|
|
``update`` can be a boolean or a revision to update to.
|
|
|
|
"""
|
|
|
|
if not update:
|
|
|
|
return
|
|
|
|
|
|
|
|
repo.ui.status(_("updating working directory\n"))
|
|
|
|
if update is not True:
|
|
|
|
checkout = update
|
|
|
|
for test in (checkout, 'default', 'tip'):
|
|
|
|
if test is None:
|
|
|
|
continue
|
|
|
|
try:
|
|
|
|
uprev = repo.lookup(test)
|
|
|
|
break
|
|
|
|
except error.RepoLookupError:
|
|
|
|
continue
|
|
|
|
_update(repo, uprev)
|
|
|
|
|
2011-08-11 02:03:16 +04:00
|
|
|
def copystore(ui, srcrepo, destpath):
|
|
|
|
'''copy files from store of srcrepo in destpath
|
|
|
|
|
|
|
|
returns destlock
|
|
|
|
'''
|
|
|
|
destlock = None
|
|
|
|
try:
|
2018-03-21 23:36:22 +03:00
|
|
|
with progress.bar(ui, _('linking')) as prog:
|
|
|
|
hardlink = None
|
|
|
|
num = 0
|
|
|
|
srcpublishing = srcrepo.publishing()
|
|
|
|
srcvfs = vfsmod.vfs(srcrepo.sharedpath)
|
|
|
|
dstvfs = vfsmod.vfs(destpath)
|
|
|
|
for f in srcrepo.store.copylist():
|
|
|
|
if srcpublishing and f.endswith('phaseroots'):
|
|
|
|
continue
|
|
|
|
dstbase = os.path.dirname(f)
|
|
|
|
if dstbase and not dstvfs.exists(dstbase):
|
|
|
|
dstvfs.mkdir(dstbase)
|
|
|
|
if srcvfs.exists(f):
|
|
|
|
if f.endswith('data'):
|
|
|
|
# 'dstbase' may be empty (e.g. revlog format 0)
|
|
|
|
lockfile = os.path.join(dstbase, "lock")
|
|
|
|
# lock to avoid premature writing to the target
|
|
|
|
destlock = lock.lock(dstvfs, lockfile)
|
|
|
|
hardlink, num = util.copyfiles(srcvfs.join(f),
|
|
|
|
dstvfs.join(f),
|
|
|
|
hardlink, num, prog)
|
2011-08-11 02:03:16 +04:00
|
|
|
if hardlink:
|
|
|
|
ui.debug("linked %d files\n" % num)
|
|
|
|
else:
|
|
|
|
ui.debug("copied %d files\n" % num)
|
|
|
|
return destlock
|
2012-05-13 15:18:06 +04:00
|
|
|
except: # re-raises
|
2011-08-11 02:03:16 +04:00
|
|
|
release(destlock)
|
|
|
|
raise
|
|
|
|
|
2015-07-09 02:19:09 +03:00
|
|
|
def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
|
|
|
|
rev=None, update=True, stream=False):
|
|
|
|
"""Perform a clone using a shared repo.
|
|
|
|
|
|
|
|
The store for the repository will be located at <sharepath>/.hg. The
|
|
|
|
specified revisions will be cloned or pulled from "source". A shared repo
|
|
|
|
will be created at "dest" and a working copy will be created if "update" is
|
|
|
|
True.
|
|
|
|
"""
|
|
|
|
revs = None
|
|
|
|
if rev:
|
|
|
|
if not srcpeer.capable('lookup'):
|
2015-10-08 22:55:45 +03:00
|
|
|
raise error.Abort(_("src repository does not support "
|
2015-07-09 02:19:09 +03:00
|
|
|
"revision lookup and so doesn't "
|
|
|
|
"support clone by revision"))
|
|
|
|
revs = [srcpeer.lookup(r) for r in rev]
|
|
|
|
|
2016-02-28 05:22:49 +03:00
|
|
|
# Obtain a lock before checking for or cloning the pooled repo otherwise
|
|
|
|
# 2 clients may race creating or populating it.
|
|
|
|
pooldir = os.path.dirname(sharepath)
|
|
|
|
# lock class requires the directory to exist.
|
|
|
|
try:
|
|
|
|
util.makedir(pooldir, False)
|
|
|
|
except OSError as e:
|
|
|
|
if e.errno != errno.EEXIST:
|
|
|
|
raise
|
|
|
|
|
2017-03-02 15:27:42 +03:00
|
|
|
poolvfs = vfsmod.vfs(pooldir)
|
2015-07-09 02:19:09 +03:00
|
|
|
basename = os.path.basename(sharepath)
|
|
|
|
|
2016-02-28 05:22:49 +03:00
|
|
|
with lock.lock(poolvfs, '%s.lock' % basename):
|
|
|
|
if os.path.exists(sharepath):
|
|
|
|
ui.status(_('(sharing from existing pooled repository %s)\n') %
|
|
|
|
basename)
|
|
|
|
else:
|
|
|
|
ui.status(_('(sharing from new pooled repository %s)\n') % basename)
|
|
|
|
# Always use pull mode because hardlinks in share mode don't work
|
|
|
|
# well. Never update because working copies aren't necessary in
|
|
|
|
# share mode.
|
|
|
|
clone(ui, peeropts, source, dest=sharepath, pull=True,
|
|
|
|
rev=rev, update=False, stream=stream)
|
2015-07-09 02:19:09 +03:00
|
|
|
|
2016-10-03 08:34:40 +03:00
|
|
|
# Resolve the value to put in [paths] section for the source.
|
|
|
|
if islocal(source):
|
|
|
|
defaultpath = os.path.abspath(util.urllocalpath(source))
|
|
|
|
else:
|
|
|
|
defaultpath = source
|
|
|
|
|
2015-07-09 02:19:09 +03:00
|
|
|
sharerepo = repository(ui, path=sharepath)
|
2016-10-03 08:34:40 +03:00
|
|
|
share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
|
|
|
|
defaultpath=defaultpath)
|
2015-07-09 02:19:09 +03:00
|
|
|
|
|
|
|
# We need to perform a pull against the dest repo to fetch bookmarks
|
|
|
|
# and other non-store data that isn't shared by default. In the case of
|
|
|
|
# non-existing shared repo, this means we pull from the remote twice. This
|
|
|
|
# is a bit weird. But at the time it was implemented, there wasn't an easy
|
|
|
|
# way to pull just non-changegroup data.
|
|
|
|
destrepo = repository(ui, path=dest)
|
|
|
|
exchange.pull(destrepo, srcpeer, heads=revs)
|
|
|
|
|
2016-02-21 04:44:29 +03:00
|
|
|
_postshareupdate(destrepo, update)
|
|
|
|
|
2015-07-09 02:19:09 +03:00
|
|
|
return srcpeer, peer(ui, peeropts, dest)
|
|
|
|
|
2017-05-25 12:55:00 +03:00
|
|
|
# Recomputing branch cache might be slow on big repos,
|
|
|
|
# so just copy it
|
|
|
|
def _copycache(srcrepo, dstcachedir, fname):
|
|
|
|
"""copy a cache from srcrepo to destcachedir (if it exists)"""
|
|
|
|
srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
|
|
|
|
dstbranchcache = os.path.join(dstcachedir, fname)
|
|
|
|
if os.path.exists(srcbranchcache):
|
|
|
|
if not os.path.exists(dstcachedir):
|
|
|
|
os.mkdir(dstcachedir)
|
|
|
|
util.copyfile(srcbranchcache, dstbranchcache)
|
|
|
|
|
2017-05-25 12:59:07 +03:00
|
|
|
def _cachetocopy(srcrepo):
|
|
|
|
"""return the list of cache file valuable to copy during a clone"""
|
|
|
|
# In local clones we're copying all nodes, not just served
|
|
|
|
# ones. Therefore copy all branch caches over.
|
|
|
|
cachefiles = ['branch2']
|
2017-05-25 13:05:33 +03:00
|
|
|
cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
|
|
|
|
cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
|
2017-05-25 13:09:09 +03:00
|
|
|
cachefiles += ['tags2']
|
|
|
|
cachefiles += ['tags2-%s' % f for f in repoview.filtertable]
|
|
|
|
cachefiles += ['hgtagsfnodes1']
|
2017-05-25 12:59:07 +03:00
|
|
|
return cachefiles
|
|
|
|
|
2011-06-14 18:33:46 +04:00
|
|
|
def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
|
2015-07-09 02:19:09 +03:00
|
|
|
update=True, stream=False, branch=None, shareopts=None):
|
2006-07-12 03:18:53 +04:00
|
|
|
"""Make a copy of an existing repository.
|
|
|
|
|
|
|
|
Create a copy of an existing repository in a new directory. The
|
|
|
|
source and destination are URLs, as passed to the repository
|
2012-07-13 23:46:53 +04:00
|
|
|
function. Returns a pair of repository peers, the source and
|
2006-07-12 03:18:53 +04:00
|
|
|
newly created destination.
|
|
|
|
|
|
|
|
The location of the source is added to the new repository's
|
|
|
|
.hg/hgrc file, as the default to be used for future pulls and
|
|
|
|
pushes.
|
|
|
|
|
|
|
|
If an exception is raised, the partly cloned/updated destination
|
|
|
|
repository will be deleted.
|
2006-07-12 19:28:00 +04:00
|
|
|
|
2006-07-28 21:46:25 +04:00
|
|
|
Arguments:
|
|
|
|
|
|
|
|
source: repository object or URL
|
2006-07-12 03:18:53 +04:00
|
|
|
|
|
|
|
dest: URL of destination repository to create (defaults to base
|
|
|
|
name of source repository)
|
|
|
|
|
2014-12-13 01:02:56 +03:00
|
|
|
pull: always pull from source repository, even in local case or if the
|
|
|
|
server prefers streaming
|
2006-07-12 03:18:53 +04:00
|
|
|
|
2006-07-16 03:06:35 +04:00
|
|
|
stream: stream raw data uncompressed from repository (fast over
|
|
|
|
LAN, slow over WAN)
|
2006-07-15 01:51:36 +04:00
|
|
|
|
2006-07-12 03:18:53 +04:00
|
|
|
rev: revision to clone up to (implies pull=True)
|
|
|
|
|
|
|
|
update: update working directory after clone completes, if
|
2008-04-12 09:19:52 +04:00
|
|
|
destination is local repository (True means update to default rev,
|
|
|
|
anything else is treated as a revision)
|
2010-02-07 17:23:46 +03:00
|
|
|
|
|
|
|
branch: branches to clone
|
2015-07-09 02:19:09 +03:00
|
|
|
|
|
|
|
shareopts: dict of options to control auto sharing behavior. The "pool" key
|
|
|
|
activates auto sharing mode and defines the directory for stores. The
|
|
|
|
"mode" key determines how to construct the directory name of the shared
|
|
|
|
repository. "identity" means the name is derived from the node of the first
|
|
|
|
changeset in the repository. "remote" means the name is derived from the
|
|
|
|
remote's path/URL. Defaults to "identity."
|
2006-07-12 03:18:53 +04:00
|
|
|
"""
|
2007-06-02 03:40:14 +04:00
|
|
|
|
2017-06-20 21:16:18 +03:00
|
|
|
if isinstance(source, bytes):
|
2008-02-13 23:37:38 +03:00
|
|
|
origsource = ui.expandpath(source)
|
2010-02-07 17:23:46 +03:00
|
|
|
source, branch = parseurl(origsource, branch)
|
2012-07-13 23:46:53 +04:00
|
|
|
srcpeer = peer(ui, peeropts, source)
|
2006-07-28 21:46:25 +04:00
|
|
|
else:
|
2012-07-13 23:46:53 +04:00
|
|
|
srcpeer = source.peer() # in case we were called with a localrepo
|
2010-07-02 10:12:50 +04:00
|
|
|
branch = (None, branch or [])
|
2012-07-13 23:46:53 +04:00
|
|
|
origsource = source = srcpeer.url()
|
|
|
|
rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
|
2006-07-28 21:46:25 +04:00
|
|
|
|
2006-07-12 03:18:53 +04:00
|
|
|
if dest is None:
|
2006-07-28 21:46:25 +04:00
|
|
|
dest = defaultdest(source)
|
2014-03-21 19:46:12 +04:00
|
|
|
if dest:
|
|
|
|
ui.status(_("destination directory: %s\n") % dest)
|
2009-08-12 21:06:12 +04:00
|
|
|
else:
|
|
|
|
dest = ui.expandpath(dest)
|
2006-07-28 21:46:25 +04:00
|
|
|
|
2011-07-01 19:37:09 +04:00
|
|
|
dest = util.urllocalpath(dest)
|
|
|
|
source = util.urllocalpath(source)
|
2006-07-12 03:18:53 +04:00
|
|
|
|
2012-07-06 13:45:27 +04:00
|
|
|
if not dest:
|
2015-10-08 22:55:45 +03:00
|
|
|
raise error.Abort(_("empty destination path is not valid"))
|
2014-06-21 13:14:09 +04:00
|
|
|
|
2017-03-02 15:27:42 +03:00
|
|
|
destvfs = vfsmod.vfs(dest, expandpath=True)
|
2014-06-21 13:14:09 +04:00
|
|
|
if destvfs.lexists():
|
|
|
|
if not destvfs.isdir():
|
2015-10-08 22:55:45 +03:00
|
|
|
raise error.Abort(_("destination '%s' already exists") % dest)
|
2014-06-21 13:19:49 +04:00
|
|
|
elif destvfs.listdir():
|
2015-10-08 22:55:45 +03:00
|
|
|
raise error.Abort(_("destination '%s' is not empty") % dest)
|
2006-07-12 03:18:53 +04:00
|
|
|
|
2015-07-09 02:19:09 +03:00
|
|
|
shareopts = shareopts or {}
|
|
|
|
sharepool = shareopts.get('pool')
|
|
|
|
sharenamemode = shareopts.get('mode')
|
2015-08-13 09:07:07 +03:00
|
|
|
if sharepool and islocal(dest):
|
2015-07-09 02:19:09 +03:00
|
|
|
sharepath = None
|
|
|
|
if sharenamemode == 'identity':
|
|
|
|
# Resolve the name from the initial changeset in the remote
|
|
|
|
# repository. This returns nullid when the remote is empty. It
|
|
|
|
# raises RepoLookupError if revision 0 is filtered or otherwise
|
|
|
|
# not available. If we fail to resolve, sharing is not enabled.
|
|
|
|
try:
|
|
|
|
rootnode = srcpeer.lookup('0')
|
|
|
|
if rootnode != node.nullid:
|
|
|
|
sharepath = os.path.join(sharepool, node.hex(rootnode))
|
|
|
|
else:
|
|
|
|
ui.status(_('(not using pooled storage: '
|
|
|
|
'remote appears to be empty)\n'))
|
|
|
|
except error.RepoLookupError:
|
|
|
|
ui.status(_('(not using pooled storage: '
|
|
|
|
'unable to resolve identity of remote)\n'))
|
|
|
|
elif sharenamemode == 'remote':
|
2016-06-10 07:12:33 +03:00
|
|
|
sharepath = os.path.join(
|
|
|
|
sharepool, hashlib.sha1(source).hexdigest())
|
2015-07-09 02:19:09 +03:00
|
|
|
else:
|
2016-06-14 12:53:55 +03:00
|
|
|
raise error.Abort(_('unknown share naming mode: %s') %
|
|
|
|
sharenamemode)
|
2015-07-09 02:19:09 +03:00
|
|
|
|
|
|
|
if sharepath:
|
|
|
|
return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
|
|
|
|
dest, pull=pull, rev=rev, update=update,
|
|
|
|
stream=stream)
|
|
|
|
|
2012-10-06 03:10:56 +04:00
|
|
|
srclock = destlock = cleandir = None
|
2012-07-13 23:46:53 +04:00
|
|
|
srcrepo = srcpeer.local()
|
2007-07-22 01:02:10 +04:00
|
|
|
try:
|
2011-05-19 10:20:26 +04:00
|
|
|
abspath = origsource
|
|
|
|
if islocal(origsource):
|
2011-07-01 19:37:09 +04:00
|
|
|
abspath = os.path.abspath(util.urllocalpath(origsource))
|
2011-05-19 10:20:26 +04:00
|
|
|
|
2007-07-22 01:02:10 +04:00
|
|
|
if islocal(dest):
|
2012-10-06 03:10:56 +04:00
|
|
|
cleandir = dest
|
2006-07-12 03:18:53 +04:00
|
|
|
|
2007-07-22 01:02:10 +04:00
|
|
|
copy = False
|
2012-07-13 23:52:37 +04:00
|
|
|
if (srcrepo and srcrepo.cancopy() and islocal(dest)
|
2012-09-03 16:05:19 +04:00
|
|
|
and not phases.hassecret(srcrepo)):
|
2007-07-22 01:02:10 +04:00
|
|
|
copy = not pull and not rev
|
2006-12-10 02:06:43 +03:00
|
|
|
|
2007-07-22 01:02:10 +04:00
|
|
|
if copy:
|
|
|
|
try:
|
|
|
|
# we use a lock here because if we race with commit, we
|
|
|
|
# can end up with extra data in the cloned revlogs that's
|
|
|
|
# not pointed to by changesets, thus causing verify to
|
|
|
|
# fail
|
2011-05-27 14:42:36 +04:00
|
|
|
srclock = srcrepo.lock(wait=False)
|
2009-01-12 20:09:14 +03:00
|
|
|
except error.LockError:
|
2007-07-22 01:02:10 +04:00
|
|
|
copy = False
|
|
|
|
|
|
|
|
if copy:
|
2011-05-27 14:42:36 +04:00
|
|
|
srcrepo.hook('preoutgoing', throw=True, source='clone')
|
2011-10-29 20:02:23 +04:00
|
|
|
hgdir = os.path.realpath(os.path.join(dest, ".hg"))
|
2007-07-22 01:02:10 +04:00
|
|
|
if not os.path.exists(dest):
|
|
|
|
os.mkdir(dest)
|
2009-04-01 06:21:53 +04:00
|
|
|
else:
|
|
|
|
# only clean up directories we create ourselves
|
2012-10-06 03:10:56 +04:00
|
|
|
cleandir = hgdir
|
2007-12-02 22:37:30 +03:00
|
|
|
try:
|
2011-05-27 14:42:36 +04:00
|
|
|
destpath = hgdir
|
|
|
|
util.makedir(destpath, notindexed=True)
|
2015-06-24 08:20:08 +03:00
|
|
|
except OSError as inst:
|
2007-12-02 22:37:30 +03:00
|
|
|
if inst.errno == errno.EEXIST:
|
2012-10-06 03:10:56 +04:00
|
|
|
cleandir = None
|
2015-10-08 22:55:45 +03:00
|
|
|
raise error.Abort(_("destination '%s' already exists")
|
2007-12-02 22:37:30 +03:00
|
|
|
% dest)
|
|
|
|
raise
|
2008-08-14 05:18:44 +04:00
|
|
|
|
2011-08-11 02:03:16 +04:00
|
|
|
destlock = copystore(ui, srcrepo, destpath)
|
2014-09-27 00:55:53 +04:00
|
|
|
# copy bookmarks over
|
2017-03-09 03:53:24 +03:00
|
|
|
srcbookmarks = srcrepo.vfs.join('bookmarks')
|
2014-09-27 00:55:53 +04:00
|
|
|
dstbookmarks = os.path.join(destpath, 'bookmarks')
|
|
|
|
if os.path.exists(srcbookmarks):
|
|
|
|
util.copyfile(srcbookmarks, dstbookmarks)
|
2007-07-22 01:02:10 +04:00
|
|
|
|
2012-10-04 00:19:53 +04:00
|
|
|
dstcachedir = os.path.join(destpath, 'cache')
|
2017-05-25 12:59:07 +03:00
|
|
|
for cache in _cachetocopy(srcrepo):
|
|
|
|
_copycache(srcrepo, dstcachedir, cache)
|
2012-10-04 00:19:53 +04:00
|
|
|
|
2007-07-22 01:02:10 +04:00
|
|
|
# we need to re-init the repo after manually copying the data
|
|
|
|
# into it
|
2012-07-29 01:28:36 +04:00
|
|
|
destpeer = peer(srcrepo, peeropts, dest)
|
2011-05-27 14:42:36 +04:00
|
|
|
srcrepo.hook('outgoing', source='clone',
|
2010-09-02 14:08:13 +04:00
|
|
|
node=node.hex(node.nullid))
|
2006-07-12 03:18:53 +04:00
|
|
|
else:
|
2007-12-02 22:37:30 +03:00
|
|
|
try:
|
2012-10-04 21:46:43 +04:00
|
|
|
destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
|
|
|
|
# only pass ui when no srcrepo
|
2015-06-24 08:20:08 +03:00
|
|
|
except OSError as inst:
|
2007-12-02 22:37:30 +03:00
|
|
|
if inst.errno == errno.EEXIST:
|
2012-10-06 03:10:56 +04:00
|
|
|
cleandir = None
|
2015-10-08 22:55:45 +03:00
|
|
|
raise error.Abort(_("destination '%s' already exists")
|
2007-12-02 22:37:30 +03:00
|
|
|
% dest)
|
|
|
|
raise
|
2007-07-22 01:02:10 +04:00
|
|
|
|
|
|
|
revs = None
|
|
|
|
if rev:
|
2012-07-13 23:46:53 +04:00
|
|
|
if not srcpeer.capable('lookup'):
|
2015-10-08 22:55:45 +03:00
|
|
|
raise error.Abort(_("src repository does not support "
|
2009-07-17 22:52:21 +04:00
|
|
|
"revision lookup and so doesn't "
|
|
|
|
"support clone by revision"))
|
2012-07-13 23:46:53 +04:00
|
|
|
revs = [srcpeer.lookup(r) for r in rev]
|
2009-05-15 00:00:56 +04:00
|
|
|
checkout = revs[0]
|
2015-11-12 03:47:49 +03:00
|
|
|
local = destpeer.local()
|
|
|
|
if local:
|
2014-12-13 01:02:56 +03:00
|
|
|
if not stream:
|
|
|
|
if pull:
|
|
|
|
stream = False
|
|
|
|
else:
|
|
|
|
stream = None
|
2015-11-12 03:47:49 +03:00
|
|
|
# internal config: ui.quietbookmarkmove
|
2017-03-17 00:18:50 +03:00
|
|
|
overrides = {('ui', 'quietbookmarkmove'): True}
|
|
|
|
with local.ui.configoverride(overrides, 'clone'):
|
2015-11-12 03:47:49 +03:00
|
|
|
exchange.pull(local, srcpeer, revs,
|
|
|
|
streamclonerequested=stream)
|
2012-07-13 23:46:53 +04:00
|
|
|
elif srcrepo:
|
2014-09-27 02:15:49 +04:00
|
|
|
exchange.push(srcrepo, destpeer, revs=revs,
|
|
|
|
bookmarks=srcrepo._bookmarks.keys())
|
2007-07-22 01:02:10 +04:00
|
|
|
else:
|
2015-10-08 22:55:45 +03:00
|
|
|
raise error.Abort(_("clone from remote to remote not supported")
|
|
|
|
)
|
2006-07-12 03:18:53 +04:00
|
|
|
|
2012-10-06 03:10:56 +04:00
|
|
|
cleandir = None
|
2006-07-12 03:18:53 +04:00
|
|
|
|
2012-07-13 23:46:53 +04:00
|
|
|
destrepo = destpeer.local()
|
|
|
|
if destrepo:
|
2014-10-07 00:35:02 +04:00
|
|
|
template = uimod.samplehgrcs['cloned']
|
2017-08-02 18:45:02 +03:00
|
|
|
fp = destrepo.vfs("hgrc", "wb")
|
2011-11-22 22:06:42 +04:00
|
|
|
u = util.url(abspath)
|
|
|
|
u.passwd = None
|
2017-08-02 18:45:02 +03:00
|
|
|
defaulturl = bytes(u)
|
|
|
|
fp.write(util.tonativeeol(template % defaulturl))
|
2007-07-22 01:02:10 +04:00
|
|
|
fp.close()
|
|
|
|
|
2014-03-19 05:45:14 +04:00
|
|
|
destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
|
2009-06-15 11:45:38 +04:00
|
|
|
|
2017-12-06 04:10:27 +03:00
|
|
|
if ui.configbool('experimental', 'remotenames'):
|
2017-12-06 21:56:45 +03:00
|
|
|
logexchange.pullremotenames(destrepo, srcpeer)
|
2017-12-06 04:10:27 +03:00
|
|
|
|
2007-07-22 01:02:10 +04:00
|
|
|
if update:
|
2008-04-12 09:19:52 +04:00
|
|
|
if update is not True:
|
2012-08-08 19:04:02 +04:00
|
|
|
checkout = srcpeer.lookup(update)
|
2012-10-26 14:36:15 +04:00
|
|
|
uprev = None
|
2012-10-29 17:02:30 +04:00
|
|
|
status = None
|
2012-10-26 14:36:15 +04:00
|
|
|
if checkout is not None:
|
2007-08-27 09:21:58 +04:00
|
|
|
try:
|
2012-10-26 14:36:15 +04:00
|
|
|
uprev = destrepo.lookup(checkout)
|
2009-08-31 19:58:33 +04:00
|
|
|
except error.RepoLookupError:
|
2015-09-25 01:52:11 +03:00
|
|
|
if update is not True:
|
|
|
|
try:
|
|
|
|
uprev = destrepo.lookup(update)
|
|
|
|
except error.RepoLookupError:
|
|
|
|
pass
|
2012-10-26 14:36:15 +04:00
|
|
|
if uprev is None:
|
|
|
|
try:
|
|
|
|
uprev = destrepo._bookmarks['@']
|
2012-10-26 16:37:03 +04:00
|
|
|
update = '@'
|
2012-10-29 17:02:30 +04:00
|
|
|
bn = destrepo[uprev].branch()
|
|
|
|
if bn == 'default':
|
|
|
|
status = _("updating to bookmark @\n")
|
|
|
|
else:
|
2014-03-31 21:46:03 +04:00
|
|
|
status = (_("updating to bookmark @ on branch %s\n")
|
2015-09-25 01:47:23 +03:00
|
|
|
% bn)
|
2012-10-26 14:36:15 +04:00
|
|
|
except KeyError:
|
|
|
|
try:
|
|
|
|
uprev = destrepo.branchtip('default')
|
|
|
|
except error.RepoLookupError:
|
|
|
|
uprev = destrepo.lookup('tip')
|
2012-10-29 17:02:30 +04:00
|
|
|
if not status:
|
|
|
|
bn = destrepo[uprev].branch()
|
|
|
|
status = _("updating to branch %s\n") % bn
|
|
|
|
destrepo.ui.status(status)
|
2011-05-27 14:42:36 +04:00
|
|
|
_update(destrepo, uprev)
|
2012-10-02 11:26:42 +04:00
|
|
|
if update in destrepo._bookmarks:
|
2015-04-14 08:27:01 +03:00
|
|
|
bookmarks.activate(destrepo, update)
|
2007-07-22 01:02:10 +04:00
|
|
|
finally:
|
2012-01-19 00:56:52 +04:00
|
|
|
release(srclock, destlock)
|
2012-10-06 03:10:56 +04:00
|
|
|
if cleandir is not None:
|
|
|
|
shutil.rmtree(cleandir, True)
|
2012-07-13 23:46:53 +04:00
|
|
|
if srcpeer is not None:
|
|
|
|
srcpeer.close()
|
2013-06-09 00:37:08 +04:00
|
|
|
return srcpeer, destpeer
|
2006-08-04 00:24:41 +04:00
|
|
|
|
2015-12-15 02:13:25 +03:00
|
|
|
def _showstats(repo, stats, quietempty=False):
|
|
|
|
if quietempty and not any(stats):
|
|
|
|
return
|
2009-09-15 01:48:25 +04:00
|
|
|
repo.ui.status(_("%d files updated, %d files merged, "
|
|
|
|
"%d files removed, %d files unresolved\n") % stats)
|
2006-10-10 12:39:44 +04:00
|
|
|
|
2017-02-13 23:58:37 +03:00
|
|
|
def updaterepo(repo, node, overwrite, updatecheck=None):
|
2012-10-24 20:45:22 +04:00
|
|
|
"""Update the working directory to node.
|
|
|
|
|
|
|
|
When overwrite is set, changes are clobbered, merged else
|
|
|
|
|
|
|
|
returns stats (see pydoc mercurial.merge.applyupdates)"""
|
2015-12-15 02:54:03 +03:00
|
|
|
return mergemod.update(repo, node, False, overwrite,
|
2017-02-13 23:58:37 +03:00
|
|
|
labels=['working copy', 'destination'],
|
|
|
|
updatecheck=updatecheck)
|
2012-10-24 20:45:22 +04:00
|
|
|
|
2017-02-13 23:58:37 +03:00
|
|
|
def update(repo, node, quietempty=False, updatecheck=None):
|
|
|
|
"""update the working directory to node"""
|
|
|
|
stats = updaterepo(repo, node, False, updatecheck=updatecheck)
|
2015-12-15 02:14:06 +03:00
|
|
|
_showstats(repo, stats, quietempty)
|
2006-10-10 12:39:44 +04:00
|
|
|
if stats[3]:
|
2008-04-11 21:52:56 +04:00
|
|
|
repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
|
2007-12-10 19:24:14 +03:00
|
|
|
return stats[3] > 0
|
2006-08-04 00:24:41 +04:00
|
|
|
|
2008-12-28 21:59:42 +03:00
|
|
|
# naming conflict in clone()
|
|
|
|
_update = update
|
|
|
|
|
2015-12-15 01:08:14 +03:00
|
|
|
def clean(repo, node, show_stats=True, quietempty=False):
|
2006-08-08 07:54:33 +04:00
|
|
|
"""forcibly switch the working directory to node, clobbering changes"""
|
2012-10-24 20:45:22 +04:00
|
|
|
stats = updaterepo(repo, node, True)
|
2017-03-11 22:02:25 +03:00
|
|
|
repo.vfs.unlinkpath('graftstate', ignoremissing=True)
|
2010-01-25 09:05:27 +03:00
|
|
|
if show_stats:
|
2015-12-15 01:08:14 +03:00
|
|
|
_showstats(repo, stats, quietempty)
|
2007-12-10 19:24:14 +03:00
|
|
|
return stats[3] > 0
|
2006-08-04 00:24:41 +04:00
|
|
|
|
2016-03-11 22:35:42 +03:00
|
|
|
# naming conflict in updatetotally()
|
|
|
|
_clean = clean
|
|
|
|
|
2017-02-13 23:58:37 +03:00
|
|
|
def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
|
2016-03-11 22:35:42 +03:00
|
|
|
"""Update the working directory with extra care for non-file components
|
|
|
|
|
|
|
|
This takes care of non-file components below:
|
|
|
|
|
|
|
|
:bookmark: might be advanced or (in)activated
|
|
|
|
|
|
|
|
This takes arguments below:
|
|
|
|
|
|
|
|
:checkout: to which revision the working directory is updated
|
|
|
|
:brev: a name, which might be a bookmark to be activated after updating
|
|
|
|
:clean: whether changes in the working directory can be discarded
|
2017-02-13 23:58:37 +03:00
|
|
|
:updatecheck: how to deal with a dirty working directory
|
|
|
|
|
|
|
|
Valid values for updatecheck are (None => linear):
|
|
|
|
|
|
|
|
* abort: abort if the working directory is dirty
|
|
|
|
* none: don't check (merge working directory changes into destination)
|
|
|
|
* linear: check that update is linear before merging working directory
|
|
|
|
changes into destination
|
2017-02-13 11:05:55 +03:00
|
|
|
* noconflict: check that the update does not result in file merges
|
2016-03-11 22:35:42 +03:00
|
|
|
|
|
|
|
This returns whether conflict is detected at updating or not.
|
|
|
|
"""
|
2017-02-13 23:58:37 +03:00
|
|
|
if updatecheck is None:
|
2017-10-14 10:13:50 +03:00
|
|
|
updatecheck = ui.config('commands', 'update.check')
|
2017-02-13 11:05:55 +03:00
|
|
|
if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
|
2017-02-14 03:03:05 +03:00
|
|
|
# If not configured, or invalid value configured
|
|
|
|
updatecheck = 'linear'
|
2016-03-11 22:35:42 +03:00
|
|
|
with repo.wlock():
|
2016-03-11 22:35:42 +03:00
|
|
|
movemarkfrom = None
|
|
|
|
warndest = False
|
|
|
|
if checkout is None:
|
2017-02-13 22:32:09 +03:00
|
|
|
updata = destutil.destupdate(repo, clean=clean)
|
2016-03-11 22:35:42 +03:00
|
|
|
checkout, movemarkfrom, brev = updata
|
|
|
|
warndest = True
|
|
|
|
|
|
|
|
if clean:
|
|
|
|
ret = _clean(repo, checkout)
|
|
|
|
else:
|
2017-02-13 23:58:37 +03:00
|
|
|
if updatecheck == 'abort':
|
2017-02-13 22:58:02 +03:00
|
|
|
cmdutil.bailifchanged(repo, merge=False)
|
2017-02-13 23:58:37 +03:00
|
|
|
updatecheck = 'none'
|
|
|
|
ret = _update(repo, checkout, updatecheck=updatecheck)
|
2016-03-11 22:35:42 +03:00
|
|
|
|
|
|
|
if not ret and movemarkfrom:
|
|
|
|
if movemarkfrom == repo['.'].node():
|
|
|
|
pass # no-op update
|
|
|
|
elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
|
2016-08-22 15:44:14 +03:00
|
|
|
b = ui.label(repo._activebookmark, 'bookmarks.active')
|
|
|
|
ui.status(_("updating bookmark %s\n") % b)
|
2016-03-11 22:35:42 +03:00
|
|
|
else:
|
|
|
|
# this can happen with a non-linear update
|
2016-08-22 15:44:14 +03:00
|
|
|
b = ui.label(repo._activebookmark, 'bookmarks')
|
|
|
|
ui.status(_("(leaving bookmark %s)\n") % b)
|
2016-03-11 22:35:42 +03:00
|
|
|
bookmarks.deactivate(repo)
|
|
|
|
elif brev in repo._bookmarks:
|
|
|
|
if brev != repo._activebookmark:
|
2016-08-22 15:44:14 +03:00
|
|
|
b = ui.label(brev, 'bookmarks.active')
|
|
|
|
ui.status(_("(activating bookmark %s)\n") % b)
|
2016-03-11 22:35:42 +03:00
|
|
|
bookmarks.activate(repo, brev)
|
|
|
|
elif brev:
|
|
|
|
if repo._activebookmark:
|
2016-08-22 15:44:14 +03:00
|
|
|
b = ui.label(repo._activebookmark, 'bookmarks')
|
|
|
|
ui.status(_("(leaving bookmark %s)\n") % b)
|
2016-03-11 22:35:42 +03:00
|
|
|
bookmarks.deactivate(repo)
|
|
|
|
|
|
|
|
if warndest:
|
|
|
|
destutil.statusotherdests(ui, repo)
|
|
|
|
|
|
|
|
return ret
|
|
|
|
|
2016-10-07 18:51:50 +03:00
|
|
|
def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None):
|
2010-12-13 19:46:31 +03:00
|
|
|
"""Branch merge with node, resolving changes. Return true if any
|
|
|
|
unresolved conflicts."""
|
2016-10-07 18:51:50 +03:00
|
|
|
stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
|
|
|
|
labels=labels)
|
2006-10-10 12:39:44 +04:00
|
|
|
_showstats(repo, stats)
|
|
|
|
if stats[3]:
|
2009-03-04 22:06:16 +03:00
|
|
|
repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
|
2010-09-13 03:05:53 +04:00
|
|
|
"or 'hg update -C .' to abandon\n"))
|
2006-10-10 12:39:44 +04:00
|
|
|
elif remind:
|
|
|
|
repo.ui.status(_("(branch merge, don't forget to commit)\n"))
|
2007-12-10 19:24:14 +03:00
|
|
|
return stats[3] > 0
|
2006-08-08 07:54:33 +04:00
|
|
|
|
2010-10-14 23:36:00 +04:00
|
|
|
def _incoming(displaychlist, subreporecurse, ui, repo, source,
|
|
|
|
opts, buffered=False):
|
|
|
|
"""
|
|
|
|
Helper for incoming / gincoming.
|
|
|
|
displaychlist gets called with
|
|
|
|
(remoterepo, incomingchangesetlist, displayer) parameters,
|
|
|
|
and is supposed to contain only code that can't be unified.
|
|
|
|
"""
|
2010-09-13 15:09:30 +04:00
|
|
|
source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
|
2011-06-10 20:43:38 +04:00
|
|
|
other = peer(repo, opts, source)
|
2011-04-30 20:43:20 +04:00
|
|
|
ui.status(_('comparing with %s\n') % util.hidepassword(source))
|
2010-09-13 15:09:30 +04:00
|
|
|
revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
|
2010-10-14 23:36:00 +04:00
|
|
|
|
2010-09-13 15:09:30 +04:00
|
|
|
if revs:
|
|
|
|
revs = [other.lookup(rev) for rev in revs]
|
2011-05-02 14:36:23 +04:00
|
|
|
other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
|
|
|
|
revs, opts["bundle"], opts["force"])
|
2010-10-14 23:36:00 +04:00
|
|
|
try:
|
2011-05-02 14:36:23 +04:00
|
|
|
if not chlist:
|
|
|
|
ui.status(_("no changes found\n"))
|
|
|
|
return subreporecurse()
|
2017-02-21 18:53:13 +03:00
|
|
|
ui.pager('incoming')
|
2010-10-14 23:36:00 +04:00
|
|
|
displayer = cmdutil.show_changeset(ui, other, opts, buffered)
|
|
|
|
displaychlist(other, chlist, displayer)
|
|
|
|
displayer.close()
|
|
|
|
finally:
|
2011-05-02 14:36:23 +04:00
|
|
|
cleanupfn()
|
2010-10-14 23:36:00 +04:00
|
|
|
subreporecurse()
|
|
|
|
return 0 # exit code is zero since we found incoming changes
|
|
|
|
|
|
|
|
def incoming(ui, repo, source, opts):
|
|
|
|
def subreporecurse():
|
|
|
|
ret = 1
|
|
|
|
if opts.get('subrepos'):
|
|
|
|
ctx = repo[None]
|
|
|
|
for subpath in sorted(ctx.substate):
|
|
|
|
sub = ctx.sub(subpath)
|
|
|
|
ret = min(ret, sub.incoming(ui, source, opts))
|
|
|
|
return ret
|
|
|
|
|
|
|
|
def display(other, chlist, displayer):
|
|
|
|
limit = cmdutil.loglimit(opts)
|
2010-09-13 15:09:30 +04:00
|
|
|
if opts.get('newest_first'):
|
2010-10-14 23:37:00 +04:00
|
|
|
chlist.reverse()
|
2010-09-13 15:09:30 +04:00
|
|
|
count = 0
|
2010-10-14 23:37:00 +04:00
|
|
|
for n in chlist:
|
2010-09-13 15:09:30 +04:00
|
|
|
if limit is not None and count >= limit:
|
|
|
|
break
|
|
|
|
parents = [p for p in other.changelog.parents(n) if p != nullid]
|
|
|
|
if opts.get('no_merges') and len(parents) == 2:
|
|
|
|
continue
|
|
|
|
count += 1
|
|
|
|
displayer.show(other[n])
|
2010-10-14 23:36:00 +04:00
|
|
|
return _incoming(display, subreporecurse, ui, repo, source, opts)
|
2010-09-13 15:09:30 +04:00
|
|
|
|
2010-10-15 07:21:51 +04:00
|
|
|
def _outgoing(ui, repo, dest, opts):
|
2017-12-15 02:03:55 +03:00
|
|
|
path = ui.paths.getpath(dest, default=('default-push', 'default'))
|
|
|
|
if not path:
|
|
|
|
raise error.Abort(_('default repository not configured!'),
|
|
|
|
hint=_("see 'hg help config.paths'"))
|
|
|
|
dest = path.pushloc or path.loc
|
|
|
|
branches = path.branch, opts.get('branch') or []
|
|
|
|
|
2011-04-30 20:43:20 +04:00
|
|
|
ui.status(_('comparing with %s\n') % util.hidepassword(dest))
|
2010-09-13 15:09:24 +04:00
|
|
|
revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
|
|
|
|
if revs:
|
2012-07-15 20:43:10 +04:00
|
|
|
revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
|
2010-09-13 15:09:24 +04:00
|
|
|
|
2011-06-10 20:43:38 +04:00
|
|
|
other = peer(repo, opts, dest)
|
2017-05-05 20:08:36 +03:00
|
|
|
outgoing = discovery.findcommonoutgoing(repo, other, revs,
|
2012-01-09 06:47:16 +04:00
|
|
|
force=opts.get('force'))
|
|
|
|
o = outgoing.missing
|
2010-09-13 15:09:24 +04:00
|
|
|
if not o:
|
2012-07-25 21:34:31 +04:00
|
|
|
scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
|
2014-04-15 19:37:24 +04:00
|
|
|
return o, other
|
2010-10-15 07:21:51 +04:00
|
|
|
|
|
|
|
def outgoing(ui, repo, dest, opts):
|
|
|
|
def recurse():
|
|
|
|
ret = 1
|
|
|
|
if opts.get('subrepos'):
|
|
|
|
ctx = repo[None]
|
|
|
|
for subpath in sorted(ctx.substate):
|
|
|
|
sub = ctx.sub(subpath)
|
|
|
|
ret = min(ret, sub.outgoing(ui, dest, opts))
|
|
|
|
return ret
|
|
|
|
|
|
|
|
limit = cmdutil.loglimit(opts)
|
2014-04-15 19:37:24 +04:00
|
|
|
o, other = _outgoing(ui, repo, dest, opts)
|
2014-04-15 19:37:24 +04:00
|
|
|
if not o:
|
2014-04-15 19:37:24 +04:00
|
|
|
cmdutil.outgoinghooks(ui, repo, other, opts, o)
|
2010-09-24 14:00:55 +04:00
|
|
|
return recurse()
|
|
|
|
|
2010-09-13 15:09:24 +04:00
|
|
|
if opts.get('newest_first'):
|
|
|
|
o.reverse()
|
2017-02-21 19:06:02 +03:00
|
|
|
ui.pager('outgoing')
|
2010-09-13 15:09:24 +04:00
|
|
|
displayer = cmdutil.show_changeset(ui, repo, opts)
|
|
|
|
count = 0
|
|
|
|
for n in o:
|
|
|
|
if limit is not None and count >= limit:
|
|
|
|
break
|
|
|
|
parents = [p for p in repo.changelog.parents(n) if p != nullid]
|
|
|
|
if opts.get('no_merges') and len(parents) == 2:
|
|
|
|
continue
|
|
|
|
count += 1
|
|
|
|
displayer.show(repo[n])
|
|
|
|
displayer.close()
|
2014-04-15 19:37:24 +04:00
|
|
|
cmdutil.outgoinghooks(ui, repo, other, opts, o)
|
2010-09-24 14:00:55 +04:00
|
|
|
recurse()
|
|
|
|
return 0 # exit code is zero since we found outgoing changes
|
2010-09-13 15:09:24 +04:00
|
|
|
|
2018-02-16 06:10:45 +03:00
|
|
|
def verify(repo, revs=None):
|
|
|
|
"""verify the consistency of a repository
|
|
|
|
If revs is None, verify everything in the repository.
|
|
|
|
Otherwise, revs is a smartset that specifies revisions to verify
|
|
|
|
and some checks requiring knowledge about the entire repo will be skipped.
|
|
|
|
"""
|
|
|
|
ret = verifymod.verify(repo, revs)
|
2015-06-16 23:15:15 +03:00
|
|
|
|
|
|
|
# Broken subrepo references in hidden csets don't seem worth worrying about,
|
|
|
|
# since they can't be pushed/pulled, and --hidden can be used if they are a
|
|
|
|
# concern.
|
|
|
|
|
2018-03-02 23:42:17 +03:00
|
|
|
if not repo.ui.configbool("verify", "skipmanifests"):
|
|
|
|
# pathto() is needed for -R case
|
|
|
|
revs = repo.revs("filelog(%s)",
|
|
|
|
util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
|
|
|
|
|
|
|
|
if revs:
|
|
|
|
repo.ui.status(_('checking subrepo links\n'))
|
|
|
|
for rev in revs:
|
|
|
|
ctx = repo[rev]
|
|
|
|
try:
|
|
|
|
for subpath in ctx.substate:
|
|
|
|
try:
|
|
|
|
ret = (ctx.sub(subpath, allowcreate=False).verify()
|
|
|
|
or ret)
|
|
|
|
except error.RepoError as e:
|
|
|
|
repo.ui.warn(('%s: %s\n') % (rev, e))
|
|
|
|
except Exception:
|
|
|
|
repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
|
|
|
|
node.short(ctx.node()))
|
2015-06-16 23:15:15 +03:00
|
|
|
|
|
|
|
return ret
|
2010-06-01 20:18:57 +04:00
|
|
|
|
|
|
|
def remoteui(src, opts):
|
|
|
|
'build a remote ui from ui or repo and opts'
|
2011-07-26 00:22:18 +04:00
|
|
|
if util.safehasattr(src, 'baseui'): # looks like a repository
|
2010-06-01 20:18:57 +04:00
|
|
|
dst = src.baseui.copy() # drop repo-specific config
|
|
|
|
src = src.ui # copy target options from repo
|
|
|
|
else: # assume it's a global ui object
|
|
|
|
dst = src.copy() # keep all global options
|
|
|
|
|
|
|
|
# copy ssh-specific options
|
|
|
|
for o in 'ssh', 'remotecmd':
|
|
|
|
v = opts.get(o) or src.config('ui', o)
|
|
|
|
if v:
|
2014-03-19 05:45:14 +04:00
|
|
|
dst.setconfig("ui", o, v, 'copied')
|
2010-06-01 20:18:57 +04:00
|
|
|
|
|
|
|
# copy bundle-specific options
|
|
|
|
r = src.config('bundle', 'mainreporoot')
|
|
|
|
if r:
|
2014-03-19 05:45:14 +04:00
|
|
|
dst.setconfig('bundle', 'mainreporoot', r, 'copied')
|
2010-06-01 20:18:57 +04:00
|
|
|
|
2010-12-27 19:49:58 +03:00
|
|
|
# copy selected local settings to the remote ui
|
2016-07-20 05:57:34 +03:00
|
|
|
for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
|
2010-06-01 20:18:57 +04:00
|
|
|
for key, val in src.configitems(sect):
|
2014-03-19 05:45:14 +04:00
|
|
|
dst.setconfig(sect, key, val, 'copied')
|
2010-12-27 19:49:58 +03:00
|
|
|
v = src.config('web', 'cacerts')
|
2016-06-05 06:18:20 +03:00
|
|
|
if v:
|
2014-03-19 05:45:14 +04:00
|
|
|
dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
|
2010-06-01 20:18:57 +04:00
|
|
|
|
|
|
|
return dst
|
2015-08-23 04:54:34 +03:00
|
|
|
|
|
|
|
# Files of interest
|
|
|
|
# Used to check if the repository has changed looking at mtime and size of
|
2015-10-17 01:58:46 +03:00
|
|
|
# these files.
|
2015-08-23 04:54:34 +03:00
|
|
|
foi = [('spath', '00changelog.i'),
|
|
|
|
('spath', 'phaseroots'), # ! phase can change content at the same size
|
|
|
|
('spath', 'obsstore'),
|
|
|
|
('path', 'bookmarks'), # ! bookmark can change content at the same size
|
|
|
|
]
|
|
|
|
|
|
|
|
class cachedlocalrepo(object):
|
|
|
|
"""Holds a localrepository that can be cached and reused."""
|
|
|
|
|
|
|
|
def __init__(self, repo):
|
|
|
|
"""Create a new cached repo from an existing repo.
|
|
|
|
|
|
|
|
We assume the passed in repo was recently created. If the
|
|
|
|
repo has changed between when it was created and when it was
|
|
|
|
turned into a cache, it may not refresh properly.
|
|
|
|
"""
|
|
|
|
assert isinstance(repo, localrepo.localrepository)
|
|
|
|
self._repo = repo
|
|
|
|
self._state, self.mtime = self._repostate()
|
hg: make cachedlocalrepo cache appropriate repoview object
Before this patch, 'cachedlocalrepo' always caches "visible" repoview
object, because 'cachedlocalrepo' uses "visible" repoview returned by
'hg.repository()' without any additional processing.
If the client of 'cachedlocalrepo' wants "served" repoview, some
objects to be cached are discarded unintentionally.
1. 'cachedlocalrepo' newly caches "visible" repoview object
(call it VIEW1)
2. 'cachedlocalrepo' returns VIEW1 to the client of it at 'fetch()'
3. the client gets "served" repoview object by 'filtered("served")'
on VIEW1 (call this "served" repoview VIEW2)
4. accessing to 'repo.changelog' implies:
- instantiation of changelog via 'localrepository.changelog'
- instantiation of "filtered changelog" via 'repoview.changelog'
5. "filtered changelog" above is cached in VIEW2
6. VIEW2 is discarded after processing, because there is no
reference to it
7. 'cachedlocalrepo' returns VIEW1 cached at (1) above to the
client at next 'fetch()'
8. 'filtered("served")' on VIEW1 at the client side creates new
"served" repoview again, because VIEW1 is "visible"
(call this new "served" repoview VIEW3)
9. accessing to 'repo.changelog' implies instantiation of filtered
changelog again, because "filtered changelog" is cached in
VIEW2 at (5), but not in VIEW3 currently used
10. (go to (7) above)
As described above, "served" repoview object and "filtered changelog"
cached in it are discarded always, even if the repository itself
hasn't been changed since last access.
For example, in the case of 'hgweb_mod.hgweb', "newly caching" occurs,
when:
- all cached objects are already assigned to another threads
(in this case, repoview is created in 'cachedlocalrepo.copy()')
- or, stat of '00changelog.i' is changed from last access
(in this case, repoview is created in 'cachedlocalrepo.fetch()')
once changes are pushed via HTTP, this always occurs.
The root cause of this inefficiency is that 'cachedlocalrepo' always
caches "visible" repoview object, even if the client of it wants
another view.
To make 'cachedlocalrepo' cache appropriate repoview object, this
patch adds additional filtering on the repo object returned by
'hg.repository()'. It is assumed that initial repoview object should
be already filtered by expected view.
After this patch:
- 'filtered("served")' on VIEW1 at (3)/(7) above returns VIEW1
itself, because VIEW1 is now "served", and
- VIEW2 and VIEW3 equal VIEW1
- therefore, "filtered changelog" is cached in VIEW1, and reused
intentionally
2016-02-13 19:33:55 +03:00
|
|
|
self._filtername = repo.filtername
|
2015-08-23 04:54:34 +03:00
|
|
|
|
|
|
|
def fetch(self):
|
|
|
|
"""Refresh (if necessary) and return a repository.
|
|
|
|
|
|
|
|
If the cached instance is out of date, it will be recreated
|
|
|
|
automatically and returned.
|
|
|
|
|
|
|
|
Returns a tuple of the repo and a boolean indicating whether a new
|
|
|
|
repo instance was created.
|
|
|
|
"""
|
|
|
|
# We compare the mtimes and sizes of some well-known files to
|
|
|
|
# determine if the repo changed. This is not precise, as mtimes
|
|
|
|
# are susceptible to clock skew and imprecise filesystems and
|
|
|
|
# file content can change while maintaining the same size.
|
|
|
|
|
|
|
|
state, mtime = self._repostate()
|
|
|
|
if state == self._state:
|
|
|
|
return self._repo, False
|
|
|
|
|
hg: make cachedlocalrepo cache appropriate repoview object
Before this patch, 'cachedlocalrepo' always caches "visible" repoview
object, because 'cachedlocalrepo' uses "visible" repoview returned by
'hg.repository()' without any additional processing.
If the client of 'cachedlocalrepo' wants "served" repoview, some
objects to be cached are discarded unintentionally.
1. 'cachedlocalrepo' newly caches "visible" repoview object
(call it VIEW1)
2. 'cachedlocalrepo' returns VIEW1 to the client of it at 'fetch()'
3. the client gets "served" repoview object by 'filtered("served")'
on VIEW1 (call this "served" repoview VIEW2)
4. accessing to 'repo.changelog' implies:
- instantiation of changelog via 'localrepository.changelog'
- instantiation of "filtered changelog" via 'repoview.changelog'
5. "filtered changelog" above is cached in VIEW2
6. VIEW2 is discarded after processing, because there is no
reference to it
7. 'cachedlocalrepo' returns VIEW1 cached at (1) above to the
client at next 'fetch()'
8. 'filtered("served")' on VIEW1 at the client side creates new
"served" repoview again, because VIEW1 is "visible"
(call this new "served" repoview VIEW3)
9. accessing to 'repo.changelog' implies instantiation of filtered
changelog again, because "filtered changelog" is cached in
VIEW2 at (5), but not in VIEW3 currently used
10. (go to (7) above)
As described above, "served" repoview object and "filtered changelog"
cached in it are discarded always, even if the repository itself
hasn't been changed since last access.
For example, in the case of 'hgweb_mod.hgweb', "newly caching" occurs,
when:
- all cached objects are already assigned to another threads
(in this case, repoview is created in 'cachedlocalrepo.copy()')
- or, stat of '00changelog.i' is changed from last access
(in this case, repoview is created in 'cachedlocalrepo.fetch()')
once changes are pushed via HTTP, this always occurs.
The root cause of this inefficiency is that 'cachedlocalrepo' always
caches "visible" repoview object, even if the client of it wants
another view.
To make 'cachedlocalrepo' cache appropriate repoview object, this
patch adds additional filtering on the repo object returned by
'hg.repository()'. It is assumed that initial repoview object should
be already filtered by expected view.
After this patch:
- 'filtered("served")' on VIEW1 at (3)/(7) above returns VIEW1
itself, because VIEW1 is now "served", and
- VIEW2 and VIEW3 equal VIEW1
- therefore, "filtered changelog" is cached in VIEW1, and reused
intentionally
2016-02-13 19:33:55 +03:00
|
|
|
repo = repository(self._repo.baseui, self._repo.url())
|
|
|
|
if self._filtername:
|
|
|
|
self._repo = repo.filtered(self._filtername)
|
|
|
|
else:
|
|
|
|
self._repo = repo.unfiltered()
|
2015-08-23 04:54:34 +03:00
|
|
|
self._state = state
|
|
|
|
self.mtime = mtime
|
|
|
|
|
|
|
|
return self._repo, True
|
|
|
|
|
|
|
|
def _repostate(self):
|
|
|
|
state = []
|
|
|
|
maxmtime = -1
|
|
|
|
for attr, fname in foi:
|
|
|
|
prefix = getattr(self._repo, attr)
|
|
|
|
p = os.path.join(prefix, fname)
|
|
|
|
try:
|
|
|
|
st = os.stat(p)
|
|
|
|
except OSError:
|
|
|
|
st = os.stat(prefix)
|
|
|
|
state.append((st.st_mtime, st.st_size))
|
|
|
|
maxmtime = max(maxmtime, st.st_mtime)
|
|
|
|
|
|
|
|
return tuple(state), maxmtime
|
|
|
|
|
|
|
|
def copy(self):
|
2015-09-12 21:31:56 +03:00
|
|
|
"""Obtain a copy of this class instance.
|
|
|
|
|
|
|
|
A new localrepository instance is obtained. The new instance should be
|
|
|
|
completely independent of the original.
|
|
|
|
"""
|
|
|
|
repo = repository(self._repo.baseui, self._repo.origroot)
|
hg: make cachedlocalrepo cache appropriate repoview object
Before this patch, 'cachedlocalrepo' always caches "visible" repoview
object, because 'cachedlocalrepo' uses "visible" repoview returned by
'hg.repository()' without any additional processing.
If the client of 'cachedlocalrepo' wants "served" repoview, some
objects to be cached are discarded unintentionally.
1. 'cachedlocalrepo' newly caches "visible" repoview object
(call it VIEW1)
2. 'cachedlocalrepo' returns VIEW1 to the client of it at 'fetch()'
3. the client gets "served" repoview object by 'filtered("served")'
on VIEW1 (call this "served" repoview VIEW2)
4. accessing to 'repo.changelog' implies:
- instantiation of changelog via 'localrepository.changelog'
- instantiation of "filtered changelog" via 'repoview.changelog'
5. "filtered changelog" above is cached in VIEW2
6. VIEW2 is discarded after processing, because there is no
reference to it
7. 'cachedlocalrepo' returns VIEW1 cached at (1) above to the
client at next 'fetch()'
8. 'filtered("served")' on VIEW1 at the client side creates new
"served" repoview again, because VIEW1 is "visible"
(call this new "served" repoview VIEW3)
9. accessing to 'repo.changelog' implies instantiation of filtered
changelog again, because "filtered changelog" is cached in
VIEW2 at (5), but not in VIEW3 currently used
10. (go to (7) above)
As described above, "served" repoview object and "filtered changelog"
cached in it are discarded always, even if the repository itself
hasn't been changed since last access.
For example, in the case of 'hgweb_mod.hgweb', "newly caching" occurs,
when:
- all cached objects are already assigned to another threads
(in this case, repoview is created in 'cachedlocalrepo.copy()')
- or, stat of '00changelog.i' is changed from last access
(in this case, repoview is created in 'cachedlocalrepo.fetch()')
once changes are pushed via HTTP, this always occurs.
The root cause of this inefficiency is that 'cachedlocalrepo' always
caches "visible" repoview object, even if the client of it wants
another view.
To make 'cachedlocalrepo' cache appropriate repoview object, this
patch adds additional filtering on the repo object returned by
'hg.repository()'. It is assumed that initial repoview object should
be already filtered by expected view.
After this patch:
- 'filtered("served")' on VIEW1 at (3)/(7) above returns VIEW1
itself, because VIEW1 is now "served", and
- VIEW2 and VIEW3 equal VIEW1
- therefore, "filtered changelog" is cached in VIEW1, and reused
intentionally
2016-02-13 19:33:55 +03:00
|
|
|
if self._filtername:
|
|
|
|
repo = repo.filtered(self._filtername)
|
|
|
|
else:
|
|
|
|
repo = repo.unfiltered()
|
2015-09-12 21:31:56 +03:00
|
|
|
c = cachedlocalrepo(repo)
|
2015-08-23 04:54:34 +03:00
|
|
|
c._state = self._state
|
|
|
|
c.mtime = self.mtime
|
|
|
|
return c
|