2005-08-28 01:21:25 +04:00
|
|
|
# localrepo.py - read/write repository class for mercurial
|
|
|
|
#
|
2007-06-19 10:51:34 +04:00
|
|
|
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
|
2005-08-28 01:21:25 +04:00
|
|
|
#
|
2009-04-26 03:08:54 +04:00
|
|
|
# This software may be used and distributed according to the terms of the
|
2010-01-20 07:20:08 +03:00
|
|
|
# GNU General Public License version 2 or any later version.
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2008-03-07 00:23:26 +03:00
|
|
|
from node import bin, hex, nullid, nullrev, short
|
2006-12-15 05:25:19 +03:00
|
|
|
from i18n import _
|
2010-06-17 01:04:46 +04:00
|
|
|
import repo, changegroup, subrepo, discovery, pushkey
|
2011-02-10 22:46:27 +03:00
|
|
|
import changelog, dirstate, filelog, manifest, context, bookmarks
|
2009-05-14 17:35:46 +04:00
|
|
|
import lock, transaction, store, encoding
|
2011-04-20 21:54:57 +04:00
|
|
|
import scmutil, util, extensions, hook, error
|
2010-03-11 19:43:44 +03:00
|
|
|
import match as matchmod
|
|
|
|
import merge as mergemod
|
|
|
|
import tags as tagsmod
|
2010-04-12 23:37:21 +04:00
|
|
|
import url as urlmod
|
2009-04-22 04:01:22 +04:00
|
|
|
from lock import release
|
2010-06-07 22:03:32 +04:00
|
|
|
import weakref, errno, os, time, inspect
|
2009-04-30 05:47:15 +04:00
|
|
|
propertycache = util.propertycache
|
2009-04-22 04:01:22 +04:00
|
|
|
|
2006-07-14 22:17:22 +04:00
|
|
|
class localrepository(repo.repository):
|
2011-03-22 11:22:21 +03:00
|
|
|
capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
|
2011-03-23 18:02:11 +03:00
|
|
|
'known', 'getbundle'))
|
2010-09-15 12:44:10 +04:00
|
|
|
supportedformats = set(('revlogv1', 'parentdelta'))
|
2010-10-09 23:54:50 +04:00
|
|
|
supported = supportedformats | set(('store', 'fncache', 'shared',
|
|
|
|
'dotencode'))
|
2006-06-16 03:37:23 +04:00
|
|
|
|
2009-04-27 01:50:43 +04:00
|
|
|
def __init__(self, baseui, path=None, create=0):
|
2006-07-14 22:17:22 +04:00
|
|
|
repo.repository.__init__(self)
|
2009-12-07 13:31:45 +03:00
|
|
|
self.root = os.path.realpath(util.expandpath(path))
|
2007-03-11 04:03:20 +03:00
|
|
|
self.path = os.path.join(self.root, ".hg")
|
2006-12-10 02:06:45 +03:00
|
|
|
self.origroot = path
|
2011-04-21 00:43:31 +04:00
|
|
|
self.auditor = scmutil.path_auditor(self.root, self._checknested)
|
2011-04-20 21:54:57 +04:00
|
|
|
self.opener = scmutil.opener(self.path)
|
|
|
|
self.wopener = scmutil.opener(self.root)
|
2009-06-13 23:44:59 +04:00
|
|
|
self.baseui = baseui
|
|
|
|
self.ui = baseui.copy()
|
|
|
|
|
|
|
|
try:
|
|
|
|
self.ui.readconfig(self.join("hgrc"), self.root)
|
|
|
|
extensions.loadall(self.ui)
|
|
|
|
except IOError:
|
|
|
|
pass
|
2005-08-28 03:28:53 +04:00
|
|
|
|
2006-09-03 01:06:47 +04:00
|
|
|
if not os.path.isdir(self.path):
|
|
|
|
if create:
|
|
|
|
if not os.path.exists(path):
|
2010-07-21 21:29:57 +04:00
|
|
|
util.makedirs(path)
|
2011-03-28 17:54:22 +04:00
|
|
|
util.makedir(self.path, notindexed=True)
|
2007-03-09 02:08:24 +03:00
|
|
|
requirements = ["revlogv1"]
|
2009-06-13 23:44:59 +04:00
|
|
|
if self.ui.configbool('format', 'usestore', True):
|
2007-03-09 01:12:52 +03:00
|
|
|
os.mkdir(os.path.join(self.path, "store"))
|
2007-03-09 02:08:24 +03:00
|
|
|
requirements.append("store")
|
2009-06-13 23:44:59 +04:00
|
|
|
if self.ui.configbool('format', 'usefncache', True):
|
2008-10-24 12:31:51 +04:00
|
|
|
requirements.append("fncache")
|
2010-10-09 23:54:50 +04:00
|
|
|
if self.ui.configbool('format', 'dotencode', True):
|
|
|
|
requirements.append('dotencode')
|
2007-03-09 02:08:24 +03:00
|
|
|
# create an invalid changelog
|
|
|
|
self.opener("00changelog.i", "a").write(
|
|
|
|
'\0\0\0\2' # represents revlogv2
|
|
|
|
' dummy changelog to prevent using the old repo layout'
|
|
|
|
)
|
2010-08-10 20:58:08 +04:00
|
|
|
if self.ui.configbool('format', 'parentdelta', False):
|
|
|
|
requirements.append("parentdelta")
|
2006-09-03 01:06:47 +04:00
|
|
|
else:
|
2009-01-12 19:42:31 +03:00
|
|
|
raise error.RepoError(_("repository %s not found") % path)
|
2006-09-03 01:06:47 +04:00
|
|
|
elif create:
|
2009-01-12 19:42:31 +03:00
|
|
|
raise error.RepoError(_("repository %s already exists") % path)
|
2006-12-10 02:06:59 +03:00
|
|
|
else:
|
|
|
|
# find requirements
|
2009-04-30 05:47:28 +04:00
|
|
|
requirements = set()
|
2006-12-10 02:06:59 +03:00
|
|
|
try:
|
2009-04-30 05:47:28 +04:00
|
|
|
requirements = set(self.opener("requires").read().splitlines())
|
2006-12-10 02:06:59 +03:00
|
|
|
except IOError, inst:
|
|
|
|
if inst.errno != errno.ENOENT:
|
|
|
|
raise
|
2009-04-30 05:47:28 +04:00
|
|
|
for r in requirements - self.supported:
|
2011-02-18 22:25:25 +03:00
|
|
|
raise error.RequirementError(
|
|
|
|
_("requirement '%s' not supported") % r)
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2009-06-14 03:01:46 +04:00
|
|
|
self.sharedpath = self.path
|
|
|
|
try:
|
|
|
|
s = os.path.realpath(self.opener("sharedpath").read())
|
|
|
|
if not os.path.exists(s):
|
|
|
|
raise error.RepoError(
|
2009-06-19 10:28:29 +04:00
|
|
|
_('.hg/sharedpath points to nonexistent directory %s') % s)
|
2009-06-14 03:01:46 +04:00
|
|
|
self.sharedpath = s
|
|
|
|
except IOError, inst:
|
|
|
|
if inst.errno != errno.ENOENT:
|
|
|
|
raise
|
|
|
|
|
2011-04-20 21:54:57 +04:00
|
|
|
self.store = store.store(requirements, self.sharedpath, scmutil.opener)
|
2008-07-24 18:32:52 +04:00
|
|
|
self.spath = self.store.path
|
|
|
|
self.sopener = self.store.opener
|
|
|
|
self.sjoin = self.store.join
|
|
|
|
self.opener.createmode = self.store.createmode
|
2010-09-15 12:44:10 +04:00
|
|
|
self._applyrequirements(requirements)
|
|
|
|
if create:
|
|
|
|
self._writerequirements()
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2009-07-16 18:39:41 +04:00
|
|
|
# These two define the set of tags for this repository. _tags
|
|
|
|
# maps tag name to node; _tagtypes maps tag name to 'global' or
|
|
|
|
# 'local'. (Global tags are defined by .hgtags across all
|
|
|
|
# heads, and local tags are defined in .hg/localtags.) They
|
|
|
|
# constitute the in-memory cache of tags.
|
|
|
|
self._tags = None
|
|
|
|
self._tagtypes = None
|
|
|
|
|
2010-11-25 00:56:32 +03:00
|
|
|
self._branchcache = None
|
2008-02-15 21:06:36 +03:00
|
|
|
self._branchcachetip = None
|
2005-08-28 01:21:25 +04:00
|
|
|
self.nodetagscache = None
|
2006-12-30 05:04:31 +03:00
|
|
|
self.filterpats = {}
|
2008-01-28 23:39:47 +03:00
|
|
|
self._datafilters = {}
|
2007-07-22 01:02:10 +04:00
|
|
|
self._transref = self._lockref = self._wlockref = None
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2010-09-15 12:44:10 +04:00
|
|
|
def _applyrequirements(self, requirements):
|
|
|
|
self.requirements = requirements
|
|
|
|
self.sopener.options = {}
|
|
|
|
if 'parentdelta' in requirements:
|
|
|
|
self.sopener.options['parentdelta'] = 1
|
|
|
|
|
|
|
|
def _writerequirements(self):
|
|
|
|
reqfile = self.opener("requires", "w")
|
|
|
|
for r in self.requirements:
|
|
|
|
reqfile.write("%s\n" % r)
|
|
|
|
reqfile.close()
|
|
|
|
|
2010-09-03 14:58:51 +04:00
|
|
|
def _checknested(self, path):
|
|
|
|
"""Determine if path is a legal nested repository."""
|
|
|
|
if not path.startswith(self.root):
|
|
|
|
return False
|
|
|
|
subpath = path[len(self.root) + 1:]
|
|
|
|
|
|
|
|
# XXX: Checking against the current working copy is wrong in
|
|
|
|
# the sense that it can reject things like
|
|
|
|
#
|
|
|
|
# $ hg cat -r 10 sub/x.txt
|
|
|
|
#
|
|
|
|
# if sub/ is no longer a subrepository in the working copy
|
|
|
|
# parent revision.
|
|
|
|
#
|
|
|
|
# However, it can of course also allow things that would have
|
|
|
|
# been rejected before, such as the above cat command if sub/
|
|
|
|
# is a subrepository now, but was a normal directory before.
|
|
|
|
# The old path auditor would have rejected by mistake since it
|
|
|
|
# panics when it sees sub/.hg/.
|
|
|
|
#
|
2010-09-07 17:31:56 +04:00
|
|
|
# All in all, checking against the working copy seems sensible
|
|
|
|
# since we want to prevent access to nested repositories on
|
|
|
|
# the filesystem *now*.
|
|
|
|
ctx = self[None]
|
2010-09-03 14:58:51 +04:00
|
|
|
parts = util.splitpath(subpath)
|
|
|
|
while parts:
|
|
|
|
prefix = os.sep.join(parts)
|
|
|
|
if prefix in ctx.substate:
|
|
|
|
if prefix == subpath:
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
sub = ctx.sub(prefix)
|
|
|
|
return sub.checknested(subpath[len(prefix) + 1:])
|
|
|
|
else:
|
|
|
|
parts.pop()
|
|
|
|
return False
|
|
|
|
|
2011-02-10 22:46:27 +03:00
|
|
|
@util.propertycache
|
|
|
|
def _bookmarks(self):
|
|
|
|
return bookmarks.read(self)
|
|
|
|
|
|
|
|
@util.propertycache
|
|
|
|
def _bookmarkcurrent(self):
|
|
|
|
return bookmarks.readcurrent(self)
|
2010-09-03 14:58:51 +04:00
|
|
|
|
2009-04-30 05:47:15 +04:00
|
|
|
@propertycache
|
|
|
|
def changelog(self):
|
|
|
|
c = changelog.changelog(self.sopener)
|
|
|
|
if 'HG_PENDING' in os.environ:
|
|
|
|
p = os.environ['HG_PENDING']
|
|
|
|
if p.startswith(self.root):
|
|
|
|
c.readpending('00changelog.i.a')
|
2010-02-05 21:10:26 +03:00
|
|
|
self.sopener.options['defversion'] = c.version
|
2009-04-30 05:47:15 +04:00
|
|
|
return c
|
|
|
|
|
|
|
|
@propertycache
|
|
|
|
def manifest(self):
|
|
|
|
return manifest.manifest(self.sopener)
|
|
|
|
|
|
|
|
@propertycache
|
|
|
|
def dirstate(self):
|
2010-11-22 21:43:31 +03:00
|
|
|
warned = [0]
|
|
|
|
def validate(node):
|
|
|
|
try:
|
|
|
|
r = self.changelog.rev(node)
|
|
|
|
return node
|
|
|
|
except error.LookupError:
|
|
|
|
if not warned[0]:
|
|
|
|
warned[0] = True
|
|
|
|
self.ui.warn(_("warning: ignoring unknown"
|
2010-11-24 15:25:12 +03:00
|
|
|
" working parent %s!\n") % short(node))
|
2010-11-22 21:43:31 +03:00
|
|
|
return nullid
|
|
|
|
|
|
|
|
return dirstate.dirstate(self.opener, self.ui, self.root, validate)
|
2006-04-29 02:50:22 +04:00
|
|
|
|
2008-06-26 23:35:46 +04:00
|
|
|
def __getitem__(self, changeid):
|
2009-05-20 02:52:46 +04:00
|
|
|
if changeid is None:
|
2008-06-26 23:35:46 +04:00
|
|
|
return context.workingctx(self)
|
|
|
|
return context.changectx(self, changeid)
|
|
|
|
|
2009-11-24 15:32:19 +03:00
|
|
|
def __contains__(self, changeid):
|
|
|
|
try:
|
|
|
|
return bool(self.lookup(changeid))
|
|
|
|
except error.RepoLookupError:
|
|
|
|
return False
|
|
|
|
|
2008-06-26 23:35:50 +04:00
|
|
|
def __nonzero__(self):
|
|
|
|
return True
|
|
|
|
|
|
|
|
def __len__(self):
|
|
|
|
return len(self.changelog)
|
|
|
|
|
|
|
|
def __iter__(self):
|
|
|
|
for i in xrange(len(self)):
|
|
|
|
yield i
|
|
|
|
|
2006-07-26 00:50:32 +04:00
|
|
|
def url(self):
|
|
|
|
return 'file:' + self.root
|
|
|
|
|
2006-02-15 02:28:06 +03:00
|
|
|
def hook(self, name, throw=False, **args):
|
2007-06-18 22:24:34 +04:00
|
|
|
return hook.hook(self.ui, self, name, throw, **args)
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2006-07-12 19:59:20 +04:00
|
|
|
tag_disallowed = ':\r\n'
|
|
|
|
|
2009-05-14 22:20:40 +04:00
|
|
|
def _tag(self, names, node, message, local, user, date, extra={}):
|
2008-03-15 01:38:56 +03:00
|
|
|
if isinstance(names, str):
|
|
|
|
allchars = names
|
|
|
|
names = (names,)
|
|
|
|
else:
|
|
|
|
allchars = ''.join(names)
|
2007-02-27 23:58:40 +03:00
|
|
|
for c in self.tag_disallowed:
|
2008-03-15 01:38:56 +03:00
|
|
|
if c in allchars:
|
2007-02-27 23:58:40 +03:00
|
|
|
raise util.Abort(_('%r cannot be used in a tag name') % c)
|
|
|
|
|
2010-04-19 12:41:12 +04:00
|
|
|
branches = self.branchmap()
|
2008-03-15 01:38:56 +03:00
|
|
|
for name in names:
|
|
|
|
self.hook('pretag', throw=True, node=hex(node), tag=name,
|
|
|
|
local=local)
|
2010-04-19 12:41:12 +04:00
|
|
|
if name in branches:
|
|
|
|
self.ui.warn(_("warning: tag %s conflicts with existing"
|
|
|
|
" branch name\n") % name)
|
2007-02-27 23:58:40 +03:00
|
|
|
|
2008-03-15 01:38:56 +03:00
|
|
|
def writetags(fp, names, munge, prevtags):
|
2008-02-04 02:03:46 +03:00
|
|
|
fp.seek(0, 2)
|
2007-07-17 07:15:03 +04:00
|
|
|
if prevtags and prevtags[-1] != '\n':
|
|
|
|
fp.write('\n')
|
2008-03-15 01:38:56 +03:00
|
|
|
for name in names:
|
2008-06-14 02:29:10 +04:00
|
|
|
m = munge and munge(name) or name
|
2009-07-16 18:39:41 +04:00
|
|
|
if self._tagtypes and name in self._tagtypes:
|
|
|
|
old = self._tags.get(name, nullid)
|
2008-06-14 02:29:10 +04:00
|
|
|
fp.write('%s %s\n' % (hex(old), m))
|
|
|
|
fp.write('%s %s\n' % (hex(node), m))
|
2007-07-17 07:15:03 +04:00
|
|
|
fp.close()
|
2007-07-22 01:02:09 +04:00
|
|
|
|
2007-07-17 07:15:03 +04:00
|
|
|
prevtags = ''
|
2007-02-27 23:58:40 +03:00
|
|
|
if local:
|
2007-07-17 07:15:03 +04:00
|
|
|
try:
|
|
|
|
fp = self.opener('localtags', 'r+')
|
2009-03-23 15:13:06 +03:00
|
|
|
except IOError:
|
2007-07-17 07:15:03 +04:00
|
|
|
fp = self.opener('localtags', 'a')
|
|
|
|
else:
|
|
|
|
prevtags = fp.read()
|
|
|
|
|
2007-02-27 23:58:40 +03:00
|
|
|
# local tags are stored in the current charset
|
2008-03-15 01:38:56 +03:00
|
|
|
writetags(fp, names, None, prevtags)
|
|
|
|
for name in names:
|
|
|
|
self.hook('tag', node=hex(node), tag=name, local=local)
|
2007-02-27 23:58:40 +03:00
|
|
|
return
|
|
|
|
|
2009-05-14 22:20:40 +04:00
|
|
|
try:
|
|
|
|
fp = self.wfile('.hgtags', 'rb+')
|
|
|
|
except IOError:
|
|
|
|
fp = self.wfile('.hgtags', 'ab')
|
2007-02-27 23:58:40 +03:00
|
|
|
else:
|
2009-05-14 22:20:40 +04:00
|
|
|
prevtags = fp.read()
|
2007-07-17 07:15:03 +04:00
|
|
|
|
|
|
|
# committed tags are stored in UTF-8
|
2009-04-03 23:51:48 +04:00
|
|
|
writetags(fp, names, encoding.fromlocal, prevtags)
|
2007-07-17 07:15:03 +04:00
|
|
|
|
2010-12-24 17:23:01 +03:00
|
|
|
fp.close()
|
|
|
|
|
2009-05-14 22:20:40 +04:00
|
|
|
if '.hgtags' not in self.dirstate:
|
2010-06-07 22:03:32 +04:00
|
|
|
self[None].add(['.hgtags'])
|
2007-02-27 23:58:40 +03:00
|
|
|
|
2010-03-11 19:43:44 +03:00
|
|
|
m = matchmod.exact(self.root, '', ['.hgtags'])
|
2009-06-01 23:11:32 +04:00
|
|
|
tagnode = self.commit(message, user, date, extra=extra, match=m)
|
2007-02-27 23:58:40 +03:00
|
|
|
|
2008-03-15 01:38:56 +03:00
|
|
|
for name in names:
|
|
|
|
self.hook('tag', node=hex(node), tag=name, local=local)
|
2007-02-27 23:58:40 +03:00
|
|
|
|
|
|
|
return tagnode
|
|
|
|
|
2008-03-15 01:38:56 +03:00
|
|
|
def tag(self, names, node, message, local, user, date):
|
|
|
|
'''tag a revision with one or more symbolic names.
|
2006-07-12 19:59:20 +04:00
|
|
|
|
2008-03-15 01:38:56 +03:00
|
|
|
names is a list of strings or, when adding a single tag, names may be a
|
|
|
|
string.
|
2008-03-21 02:39:39 +03:00
|
|
|
|
2008-03-15 01:38:56 +03:00
|
|
|
if local is True, the tags are stored in a per-repository file.
|
|
|
|
otherwise, they are stored in the .hgtags file, and a new
|
2006-07-12 19:59:20 +04:00
|
|
|
changeset is committed with the change.
|
|
|
|
|
|
|
|
keyword arguments:
|
|
|
|
|
2008-03-15 01:38:56 +03:00
|
|
|
local: whether to store tags in non-version-controlled file
|
2006-07-12 19:59:20 +04:00
|
|
|
(default False)
|
|
|
|
|
|
|
|
message: commit message to use if committing
|
|
|
|
|
|
|
|
user: name of user to use if committing
|
|
|
|
|
|
|
|
date: date tuple to use if committing'''
|
|
|
|
|
2010-12-14 06:20:30 +03:00
|
|
|
if not local:
|
|
|
|
for x in self.status()[:5]:
|
|
|
|
if '.hgtags' in x:
|
|
|
|
raise util.Abort(_('working copy of .hgtags is changed '
|
|
|
|
'(please commit .hgtags manually)'))
|
2006-07-12 19:59:20 +04:00
|
|
|
|
2009-03-03 04:19:09 +03:00
|
|
|
self.tags() # instantiate the cache
|
2008-09-10 10:48:23 +04:00
|
|
|
self._tag(names, node, message, local, user, date)
|
2006-07-12 19:59:20 +04:00
|
|
|
|
2005-08-28 01:21:25 +04:00
|
|
|
def tags(self):
|
|
|
|
'''return a mapping of tag to node'''
|
2009-07-16 18:39:41 +04:00
|
|
|
if self._tags is None:
|
|
|
|
(self._tags, self._tagtypes) = self._findtags()
|
2009-07-16 18:39:41 +04:00
|
|
|
|
2009-07-16 18:39:41 +04:00
|
|
|
return self._tags
|
2009-07-16 18:39:41 +04:00
|
|
|
|
|
|
|
def _findtags(self):
|
|
|
|
'''Do the hard work of finding tags. Return a pair of dicts
|
|
|
|
(tags, tagtypes) where tags maps tag name to node, and tagtypes
|
|
|
|
maps tag name to a string like \'global\' or \'local\'.
|
|
|
|
Subclasses or extensions are free to add their own tags, but
|
|
|
|
should be aware that the returned dicts will be retained for the
|
|
|
|
duration of the localrepo object.'''
|
|
|
|
|
|
|
|
# XXX what tagtype should subclasses/extensions use? Currently
|
|
|
|
# mq and bookmarks add tags, but do not set the tagtype at all.
|
|
|
|
# Should each extension invent its own tag type? Should there
|
|
|
|
# be one tagtype for all such "virtual" tags? Or is the status
|
|
|
|
# quo fine?
|
2006-03-22 07:30:47 +03:00
|
|
|
|
2009-07-16 18:39:41 +04:00
|
|
|
alltags = {} # map tag name to (node, hist)
|
|
|
|
tagtypes = {}
|
|
|
|
|
2010-03-11 19:43:44 +03:00
|
|
|
tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
|
|
|
|
tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
|
2007-03-15 00:40:47 +03:00
|
|
|
|
2009-07-16 18:41:19 +04:00
|
|
|
# Build the return dicts. Have to re-encode tag names because
|
|
|
|
# the tags module always uses UTF-8 (in order not to lose info
|
|
|
|
# writing to the cache), but the rest of Mercurial wants them in
|
|
|
|
# local encoding.
|
2009-07-16 18:39:41 +04:00
|
|
|
tags = {}
|
2009-07-16 18:39:41 +04:00
|
|
|
for (name, (node, hist)) in alltags.iteritems():
|
|
|
|
if node != nullid:
|
2011-04-04 23:51:10 +04:00
|
|
|
try:
|
|
|
|
# ignore tags to unknown nodes
|
|
|
|
self.changelog.lookup(node)
|
|
|
|
tags[encoding.tolocal(name)] = node
|
|
|
|
except error.LookupError:
|
|
|
|
pass
|
2009-07-16 18:39:41 +04:00
|
|
|
tags['tip'] = self.changelog.tip()
|
2009-07-16 18:41:19 +04:00
|
|
|
tagtypes = dict([(encoding.tolocal(name), value)
|
|
|
|
for (name, value) in tagtypes.iteritems()])
|
2009-07-16 18:39:41 +04:00
|
|
|
return (tags, tagtypes)
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2007-12-09 10:32:05 +03:00
|
|
|
def tagtype(self, tagname):
|
|
|
|
'''
|
|
|
|
return the type of the given tag. result can be:
|
|
|
|
|
|
|
|
'local' : a local tag
|
|
|
|
'global' : a global tag
|
|
|
|
None : tag does not exist
|
|
|
|
'''
|
|
|
|
|
|
|
|
self.tags()
|
2007-12-29 21:49:48 +03:00
|
|
|
|
2009-07-16 18:39:41 +04:00
|
|
|
return self._tagtypes.get(tagname)
|
2007-12-09 10:32:05 +03:00
|
|
|
|
2005-08-28 01:21:25 +04:00
|
|
|
def tagslist(self):
|
|
|
|
'''return a list of tags ordered by revision'''
|
|
|
|
l = []
|
2009-01-12 11:16:03 +03:00
|
|
|
for t, n in self.tags().iteritems():
|
2011-04-07 18:16:17 +04:00
|
|
|
r = self.changelog.rev(n)
|
2006-01-12 09:57:58 +03:00
|
|
|
l.append((r, t, n))
|
2009-04-27 01:50:44 +04:00
|
|
|
return [(t, n) for r, t, n in sorted(l)]
|
2005-08-28 01:21:25 +04:00
|
|
|
|
|
|
|
def nodetags(self, node):
|
|
|
|
'''return the tags associated with a node'''
|
|
|
|
if not self.nodetagscache:
|
|
|
|
self.nodetagscache = {}
|
2009-01-12 11:16:03 +03:00
|
|
|
for t, n in self.tags().iteritems():
|
2006-01-12 09:57:58 +03:00
|
|
|
self.nodetagscache.setdefault(n, []).append(t)
|
2010-04-26 23:58:36 +04:00
|
|
|
for tags in self.nodetagscache.itervalues():
|
|
|
|
tags.sort()
|
2005-08-28 01:21:25 +04:00
|
|
|
return self.nodetagscache.get(node, [])
|
|
|
|
|
2011-02-11 21:36:15 +03:00
|
|
|
def nodebookmarks(self, node):
|
|
|
|
marks = []
|
|
|
|
for bookmark, n in self._bookmarks.iteritems():
|
|
|
|
if n == node:
|
|
|
|
marks.append(bookmark)
|
|
|
|
return sorted(marks)
|
|
|
|
|
2008-02-15 21:06:36 +03:00
|
|
|
def _branchtags(self, partial, lrev):
|
2009-01-15 05:47:38 +03:00
|
|
|
# TODO: rename this function?
|
2008-06-26 23:35:50 +04:00
|
|
|
tiprev = len(self) - 1
|
2006-10-24 06:32:56 +04:00
|
|
|
if lrev != tiprev:
|
2010-03-26 19:02:23 +03:00
|
|
|
ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
|
|
|
|
self._updatebranchcache(partial, ctxgen)
|
2006-10-24 06:32:56 +04:00
|
|
|
self._writebranchcache(partial, self.changelog.tip(), tiprev)
|
|
|
|
|
2006-12-07 19:35:43 +03:00
|
|
|
return partial
|
|
|
|
|
2010-08-29 01:57:39 +04:00
|
|
|
def updatebranchcache(self):
|
2009-10-26 15:37:39 +03:00
|
|
|
tip = self.changelog.tip()
|
2009-10-31 02:27:50 +03:00
|
|
|
if self._branchcache is not None and self._branchcachetip == tip:
|
|
|
|
return self._branchcache
|
2009-10-26 15:37:39 +03:00
|
|
|
|
2008-02-15 21:06:36 +03:00
|
|
|
oldtip = self._branchcachetip
|
|
|
|
self._branchcachetip = tip
|
|
|
|
if oldtip is None or oldtip not in self.changelog.nodemap:
|
|
|
|
partial, last, lrev = self._readbranchcache()
|
|
|
|
else:
|
|
|
|
lrev = self.changelog.rev(oldtip)
|
2009-10-31 02:27:50 +03:00
|
|
|
partial = self._branchcache
|
2008-02-15 21:06:36 +03:00
|
|
|
|
2008-02-15 21:06:36 +03:00
|
|
|
self._branchtags(partial, lrev)
|
2009-01-15 05:47:38 +03:00
|
|
|
# this private cache holds all heads (not just tips)
|
2009-10-31 02:27:50 +03:00
|
|
|
self._branchcache = partial
|
2006-12-07 19:35:43 +03:00
|
|
|
|
2010-08-29 01:57:39 +04:00
|
|
|
def branchmap(self):
|
|
|
|
'''returns a dictionary {branch: [branchheads]}'''
|
|
|
|
self.updatebranchcache()
|
2009-10-31 02:27:50 +03:00
|
|
|
return self._branchcache
|
2009-01-15 05:47:38 +03:00
|
|
|
|
|
|
|
def branchtags(self):
|
|
|
|
'''return a dict where branch names map to the tipmost head of
|
2009-01-15 05:47:38 +03:00
|
|
|
the branch, open heads come before closed'''
|
|
|
|
bt = {}
|
2009-10-31 02:31:08 +03:00
|
|
|
for bn, heads in self.branchmap().iteritems():
|
2010-02-08 16:52:28 +03:00
|
|
|
tip = heads[-1]
|
|
|
|
for h in reversed(heads):
|
2009-01-15 05:47:38 +03:00
|
|
|
if 'close' not in self.changelog.read(h)[5]:
|
2010-02-08 16:52:28 +03:00
|
|
|
tip = h
|
2009-01-15 05:47:38 +03:00
|
|
|
break
|
2010-02-08 16:52:28 +03:00
|
|
|
bt[bn] = tip
|
2009-01-15 05:47:38 +03:00
|
|
|
return bt
|
|
|
|
|
2006-10-24 06:32:56 +04:00
|
|
|
def _readbranchcache(self):
|
|
|
|
partial = {}
|
2006-10-18 03:31:18 +04:00
|
|
|
try:
|
2011-01-17 11:37:20 +03:00
|
|
|
f = self.opener("cache/branchheads")
|
2006-11-15 22:56:57 +03:00
|
|
|
lines = f.read().split('\n')
|
|
|
|
f.close()
|
2007-05-08 11:57:05 +04:00
|
|
|
except (IOError, OSError):
|
|
|
|
return {}, nullid, nullrev
|
|
|
|
|
|
|
|
try:
|
2007-03-09 20:09:02 +03:00
|
|
|
last, lrev = lines.pop(0).split(" ", 1)
|
2006-10-18 03:31:18 +04:00
|
|
|
last, lrev = bin(last), int(lrev)
|
2008-06-26 23:35:50 +04:00
|
|
|
if lrev >= len(self) or self[lrev].node() != last:
|
2006-12-02 08:38:55 +03:00
|
|
|
# invalidate the cache
|
2008-02-09 20:58:31 +03:00
|
|
|
raise ValueError('invalidating branch cache (tip differs)')
|
2006-12-02 08:38:55 +03:00
|
|
|
for l in lines:
|
2010-01-25 09:05:27 +03:00
|
|
|
if not l:
|
|
|
|
continue
|
2007-03-09 20:09:02 +03:00
|
|
|
node, label = l.split(" ", 1)
|
2010-11-25 00:56:32 +03:00
|
|
|
label = encoding.tolocal(label.strip())
|
|
|
|
partial.setdefault(label, []).append(bin(node))
|
2009-01-12 20:48:05 +03:00
|
|
|
except KeyboardInterrupt:
|
2006-12-02 08:38:55 +03:00
|
|
|
raise
|
|
|
|
except Exception, inst:
|
|
|
|
if self.ui.debugflag:
|
|
|
|
self.ui.warn(str(inst), '\n')
|
|
|
|
partial, last, lrev = {}, nullid, nullrev
|
2006-10-24 06:32:56 +04:00
|
|
|
return partial, last, lrev
|
2006-10-18 03:31:18 +04:00
|
|
|
|
2006-10-24 06:32:56 +04:00
|
|
|
def _writebranchcache(self, branches, tip, tiprev):
|
2006-10-18 19:46:51 +04:00
|
|
|
try:
|
2011-01-17 11:37:20 +03:00
|
|
|
f = self.opener("cache/branchheads", "w", atomictemp=True)
|
2006-10-24 06:32:56 +04:00
|
|
|
f.write("%s %s\n" % (hex(tip), tiprev))
|
2009-01-15 05:47:38 +03:00
|
|
|
for label, nodes in branches.iteritems():
|
|
|
|
for node in nodes:
|
2010-11-25 00:56:32 +03:00
|
|
|
f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
|
2007-04-09 11:24:17 +04:00
|
|
|
f.rename()
|
2007-05-08 11:57:05 +04:00
|
|
|
except (IOError, OSError):
|
2006-10-18 19:46:51 +04:00
|
|
|
pass
|
2006-10-18 03:31:18 +04:00
|
|
|
|
2010-03-26 19:02:23 +03:00
|
|
|
def _updatebranchcache(self, partial, ctxgen):
|
2009-06-29 11:54:23 +04:00
|
|
|
# collect new branch entries
|
|
|
|
newbranches = {}
|
2010-03-26 19:02:23 +03:00
|
|
|
for c in ctxgen:
|
2009-06-29 11:54:23 +04:00
|
|
|
newbranches.setdefault(c.branch(), []).append(c.node())
|
|
|
|
# if older branchheads are reachable from new ones, they aren't
|
|
|
|
# really branchheads. Note checking parents is insufficient:
|
|
|
|
# 1 (branch a) -> 2 (branch b) -> 3 (branch a)
|
|
|
|
for branch, newnodes in newbranches.iteritems():
|
|
|
|
bheads = partial.setdefault(branch, [])
|
|
|
|
bheads.extend(newnodes)
|
2010-04-15 19:25:37 +04:00
|
|
|
if len(bheads) <= 1:
|
2009-06-29 11:54:23 +04:00
|
|
|
continue
|
|
|
|
# starting from tip means fewer passes over reachable
|
|
|
|
while newnodes:
|
|
|
|
latest = newnodes.pop()
|
|
|
|
if latest not in bheads:
|
|
|
|
continue
|
2009-07-13 22:19:17 +04:00
|
|
|
minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
|
|
|
|
reachable = self.changelog.reachable(latest, minbhrev)
|
2010-04-15 19:25:37 +04:00
|
|
|
reachable.remove(latest)
|
2009-06-29 11:54:23 +04:00
|
|
|
bheads = [b for b in bheads if b not in reachable]
|
|
|
|
partial[branch] = bheads
|
2006-10-24 06:32:56 +04:00
|
|
|
|
2005-08-28 01:21:25 +04:00
|
|
|
def lookup(self, key):
|
2008-11-14 23:12:43 +03:00
|
|
|
if isinstance(key, int):
|
|
|
|
return self.changelog.node(key)
|
|
|
|
elif key == '.':
|
2011-04-05 01:21:59 +04:00
|
|
|
return self.dirstate.p1()
|
2006-12-06 00:41:42 +03:00
|
|
|
elif key == 'null':
|
|
|
|
return nullid
|
2008-11-14 23:12:43 +03:00
|
|
|
elif key == 'tip':
|
|
|
|
return self.changelog.tip()
|
2006-10-18 20:44:56 +04:00
|
|
|
n = self.changelog._match(key)
|
|
|
|
if n:
|
|
|
|
return n
|
2011-02-10 22:46:28 +03:00
|
|
|
if key in self._bookmarks:
|
|
|
|
return self._bookmarks[key]
|
2006-10-18 03:31:56 +04:00
|
|
|
if key in self.tags():
|
2005-08-28 01:21:25 +04:00
|
|
|
return self.tags()[key]
|
2006-10-18 03:31:56 +04:00
|
|
|
if key in self.branchtags():
|
|
|
|
return self.branchtags()[key]
|
2006-10-18 20:44:56 +04:00
|
|
|
n = self.changelog._partialmatch(key)
|
|
|
|
if n:
|
|
|
|
return n
|
2009-05-25 19:44:37 +04:00
|
|
|
|
|
|
|
# can't find key, check if it might have come from damaged dirstate
|
|
|
|
if key in self.dirstate.parents():
|
|
|
|
raise error.Abort(_("working directory has unknown parent '%s'!")
|
|
|
|
% short(key))
|
2007-07-22 01:02:10 +04:00
|
|
|
try:
|
|
|
|
if len(key) == 20:
|
|
|
|
key = hex(key)
|
2011-04-23 01:51:25 +04:00
|
|
|
except TypeError:
|
2007-07-22 01:02:10 +04:00
|
|
|
pass
|
2009-08-31 19:58:33 +04:00
|
|
|
raise error.RepoLookupError(_("unknown revision '%s'") % key)
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2010-04-13 03:33:25 +04:00
|
|
|
def lookupbranch(self, key, remote=None):
|
|
|
|
repo = remote or self
|
|
|
|
if key in repo.branchmap():
|
|
|
|
return key
|
|
|
|
|
|
|
|
repo = (remote and remote.local()) and remote or self
|
|
|
|
return repo[key].branch()
|
|
|
|
|
2011-03-22 11:22:21 +03:00
|
|
|
def known(self, nodes):
|
|
|
|
nm = self.changelog.nodemap
|
|
|
|
return [(n in nm) for n in nodes]
|
|
|
|
|
2005-08-28 01:21:25 +04:00
|
|
|
def local(self):
|
2005-08-28 03:28:53 +04:00
|
|
|
return True
|
2005-08-28 01:21:25 +04:00
|
|
|
|
|
|
|
def join(self, f):
|
|
|
|
return os.path.join(self.path, f)
|
|
|
|
|
|
|
|
def wjoin(self, f):
|
|
|
|
return os.path.join(self.root, f)
|
|
|
|
|
|
|
|
def file(self, f):
|
2006-01-12 09:57:58 +03:00
|
|
|
if f[0] == '/':
|
|
|
|
f = f[1:]
|
2007-03-23 03:52:38 +03:00
|
|
|
return filelog.filelog(self.sopener, f)
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2008-06-26 02:35:20 +04:00
|
|
|
def changectx(self, changeid):
|
2008-06-26 23:35:46 +04:00
|
|
|
return self[changeid]
|
2006-10-03 10:21:46 +04:00
|
|
|
|
2006-09-30 00:48:16 +04:00
|
|
|
def parents(self, changeid=None):
|
2008-06-26 22:46:33 +04:00
|
|
|
'''get list of changectxs for parents of changeid'''
|
2008-06-26 23:35:46 +04:00
|
|
|
return self[changeid].parents()
|
2006-09-30 00:48:16 +04:00
|
|
|
|
2006-06-29 02:08:10 +04:00
|
|
|
def filectx(self, path, changeid=None, fileid=None):
|
|
|
|
"""changeid can be a changeset revision, node, or tag.
|
|
|
|
fileid can be a file revision or node."""
|
|
|
|
return context.filectx(self, path, changeid, fileid)
|
|
|
|
|
2005-08-28 01:21:25 +04:00
|
|
|
def getcwd(self):
|
|
|
|
return self.dirstate.getcwd()
|
|
|
|
|
2007-06-09 06:49:12 +04:00
|
|
|
def pathto(self, f, cwd=None):
|
|
|
|
return self.dirstate.pathto(f, cwd)
|
|
|
|
|
2005-08-28 01:21:25 +04:00
|
|
|
def wfile(self, f, mode='r'):
|
|
|
|
return self.wopener(f, mode)
|
|
|
|
|
2007-03-24 05:40:25 +03:00
|
|
|
def _link(self, f):
|
|
|
|
return os.path.islink(self.wjoin(f))
|
|
|
|
|
2010-07-23 19:28:20 +04:00
|
|
|
def _loadfilter(self, filter):
|
2006-12-30 05:04:31 +03:00
|
|
|
if filter not in self.filterpats:
|
2005-09-15 11:59:16 +04:00
|
|
|
l = []
|
2006-12-30 05:04:31 +03:00
|
|
|
for pat, cmd in self.ui.configitems(filter):
|
2008-10-14 23:28:49 +04:00
|
|
|
if cmd == '!':
|
|
|
|
continue
|
2010-03-11 19:43:44 +03:00
|
|
|
mf = matchmod.match(self.root, '', [pat])
|
2008-01-28 23:39:47 +03:00
|
|
|
fn = None
|
2008-02-09 20:27:58 +03:00
|
|
|
params = cmd
|
2008-01-28 23:39:47 +03:00
|
|
|
for name, filterfn in self._datafilters.iteritems():
|
2008-03-07 02:24:36 +03:00
|
|
|
if cmd.startswith(name):
|
2008-01-28 23:39:47 +03:00
|
|
|
fn = filterfn
|
2008-02-09 20:27:58 +03:00
|
|
|
params = cmd[len(name):].lstrip()
|
2008-01-28 23:39:47 +03:00
|
|
|
break
|
|
|
|
if not fn:
|
2007-12-22 07:21:17 +03:00
|
|
|
fn = lambda s, c, **kwargs: util.filter(s, c)
|
|
|
|
# Wrap old filters not supporting keyword arguments
|
|
|
|
if not inspect.getargspec(fn)[2]:
|
|
|
|
oldfn = fn
|
|
|
|
fn = lambda s, c, **kwargs: oldfn(s, c)
|
2008-02-09 20:27:58 +03:00
|
|
|
l.append((mf, fn, params))
|
2006-12-30 05:04:31 +03:00
|
|
|
self.filterpats[filter] = l
|
2010-10-10 21:07:58 +04:00
|
|
|
return self.filterpats[filter]
|
2005-09-15 11:59:16 +04:00
|
|
|
|
2010-10-10 21:10:16 +04:00
|
|
|
def _filter(self, filterpats, filename, data):
|
|
|
|
for mf, fn, cmd in filterpats:
|
2005-09-15 11:59:16 +04:00
|
|
|
if mf(filename):
|
2009-09-19 03:15:38 +04:00
|
|
|
self.ui.debug("filtering %s through %s\n" % (filename, cmd))
|
2007-12-22 07:21:17 +03:00
|
|
|
data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
|
2005-09-15 11:59:16 +04:00
|
|
|
break
|
|
|
|
|
|
|
|
return data
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2010-10-10 20:58:45 +04:00
|
|
|
@propertycache
|
|
|
|
def _encodefilterpats(self):
|
|
|
|
return self._loadfilter('encode')
|
|
|
|
|
|
|
|
@propertycache
|
|
|
|
def _decodefilterpats(self):
|
|
|
|
return self._loadfilter('decode')
|
|
|
|
|
2008-01-28 23:39:47 +03:00
|
|
|
def adddatafilter(self, name, filter):
|
|
|
|
self._datafilters[name] = filter
|
|
|
|
|
2006-12-30 05:04:31 +03:00
|
|
|
def wread(self, filename):
|
|
|
|
if self._link(filename):
|
|
|
|
data = os.readlink(self.wjoin(filename))
|
|
|
|
else:
|
|
|
|
data = self.wopener(filename, 'r').read()
|
2010-10-10 20:58:45 +04:00
|
|
|
return self._filter(self._encodefilterpats, filename, data)
|
2005-09-15 11:59:16 +04:00
|
|
|
|
2006-12-30 05:04:31 +03:00
|
|
|
def wwrite(self, filename, data, flags):
|
2010-10-10 20:58:45 +04:00
|
|
|
data = self._filter(self._decodefilterpats, filename, data)
|
2008-08-11 06:55:06 +04:00
|
|
|
if 'l' in flags:
|
|
|
|
self.wopener.symlink(data, filename)
|
|
|
|
else:
|
|
|
|
self.wopener(filename, 'w').write(data)
|
|
|
|
if 'x' in flags:
|
|
|
|
util.set_flags(self.wjoin(filename), False, True)
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2006-12-30 05:04:31 +03:00
|
|
|
def wwritedata(self, filename, data):
|
2010-10-10 20:58:45 +04:00
|
|
|
return self._filter(self._decodefilterpats, filename, data)
|
2006-12-30 05:04:31 +03:00
|
|
|
|
2010-04-10 02:23:35 +04:00
|
|
|
def transaction(self, desc):
|
2009-04-15 21:54:22 +04:00
|
|
|
tr = self._transref and self._transref() or None
|
|
|
|
if tr and tr.running():
|
|
|
|
return tr.nest()
|
2006-02-28 21:24:54 +03:00
|
|
|
|
2008-01-16 20:32:25 +03:00
|
|
|
# abort here if the journal already exists
|
|
|
|
if os.path.exists(self.sjoin("journal")):
|
2010-01-25 09:05:27 +03:00
|
|
|
raise error.RepoError(
|
|
|
|
_("abandoned transaction found - run hg recover"))
|
2008-01-16 20:32:25 +03:00
|
|
|
|
2006-06-01 21:08:29 +04:00
|
|
|
# save dirstate for rollback
|
2005-08-28 01:21:25 +04:00
|
|
|
try:
|
|
|
|
ds = self.opener("dirstate").read()
|
|
|
|
except IOError:
|
|
|
|
ds = ""
|
|
|
|
self.opener("journal.dirstate", "w").write(ds)
|
2010-11-25 00:56:32 +03:00
|
|
|
self.opener("journal.branch", "w").write(
|
|
|
|
encoding.fromlocal(self.dirstate.branch()))
|
2010-04-13 04:11:14 +04:00
|
|
|
self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2006-12-05 13:28:21 +03:00
|
|
|
renames = [(self.sjoin("journal"), self.sjoin("undo")),
|
2008-01-08 00:26:12 +03:00
|
|
|
(self.join("journal.dirstate"), self.join("undo.dirstate")),
|
2010-04-10 02:23:35 +04:00
|
|
|
(self.join("journal.branch"), self.join("undo.branch")),
|
|
|
|
(self.join("journal.desc"), self.join("undo.desc"))]
|
2006-10-24 02:12:20 +04:00
|
|
|
tr = transaction.transaction(self.ui.warn, self.sopener,
|
2008-02-09 23:38:54 +03:00
|
|
|
self.sjoin("journal"),
|
|
|
|
aftertrans(renames),
|
2008-08-14 05:18:42 +04:00
|
|
|
self.store.createmode)
|
2007-07-22 01:02:10 +04:00
|
|
|
self._transref = weakref.ref(tr)
|
2006-02-28 21:24:54 +03:00
|
|
|
return tr
|
2005-08-28 01:21:25 +04:00
|
|
|
|
|
|
|
def recover(self):
|
2009-04-22 04:01:22 +04:00
|
|
|
lock = self.lock()
|
2007-07-22 01:02:10 +04:00
|
|
|
try:
|
|
|
|
if os.path.exists(self.sjoin("journal")):
|
|
|
|
self.ui.status(_("rolling back interrupted transaction\n"))
|
2010-01-25 09:05:27 +03:00
|
|
|
transaction.rollback(self.sopener, self.sjoin("journal"),
|
|
|
|
self.ui.warn)
|
2007-07-22 01:02:10 +04:00
|
|
|
self.invalidate()
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
self.ui.warn(_("no interrupted transaction available\n"))
|
|
|
|
return False
|
|
|
|
finally:
|
2009-04-22 04:01:22 +04:00
|
|
|
lock.release()
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2010-04-10 02:23:37 +04:00
|
|
|
def rollback(self, dryrun=False):
|
2007-07-22 01:02:10 +04:00
|
|
|
wlock = lock = None
|
2007-07-22 01:02:10 +04:00
|
|
|
try:
|
2007-07-22 01:02:10 +04:00
|
|
|
wlock = self.wlock()
|
|
|
|
lock = self.lock()
|
2007-07-22 01:02:10 +04:00
|
|
|
if os.path.exists(self.sjoin("undo")):
|
2010-04-10 02:23:37 +04:00
|
|
|
try:
|
2010-04-13 04:11:14 +04:00
|
|
|
args = self.opener("undo.desc", "r").read().splitlines()
|
|
|
|
if len(args) >= 3 and self.ui.verbose:
|
2011-02-10 11:03:06 +03:00
|
|
|
desc = _("repository tip rolled back to revision %s"
|
2010-04-13 04:21:30 +04:00
|
|
|
" (undo %s: %s)\n") % (
|
2010-05-14 02:36:45 +04:00
|
|
|
int(args[0]) - 1, args[1], args[2])
|
2010-04-13 04:11:14 +04:00
|
|
|
elif len(args) >= 2:
|
2011-02-10 11:03:06 +03:00
|
|
|
desc = _("repository tip rolled back to revision %s"
|
|
|
|
" (undo %s)\n") % (
|
2010-05-14 02:36:45 +04:00
|
|
|
int(args[0]) - 1, args[1])
|
2010-04-13 04:11:14 +04:00
|
|
|
except IOError:
|
2010-04-10 02:23:37 +04:00
|
|
|
desc = _("rolling back unknown transaction\n")
|
|
|
|
self.ui.status(desc)
|
|
|
|
if dryrun:
|
|
|
|
return
|
2010-01-25 09:05:27 +03:00
|
|
|
transaction.rollback(self.sopener, self.sjoin("undo"),
|
|
|
|
self.ui.warn)
|
2007-07-22 01:02:10 +04:00
|
|
|
util.rename(self.join("undo.dirstate"), self.join("dirstate"))
|
2011-02-10 22:46:27 +03:00
|
|
|
if os.path.exists(self.join('undo.bookmarks')):
|
|
|
|
util.rename(self.join('undo.bookmarks'),
|
|
|
|
self.join('bookmarks'))
|
2008-02-09 21:39:01 +03:00
|
|
|
try:
|
|
|
|
branch = self.opener("undo.branch").read()
|
|
|
|
self.dirstate.setbranch(branch)
|
|
|
|
except IOError:
|
2011-04-19 15:25:19 +04:00
|
|
|
self.ui.warn(_("named branch could not be reset, "
|
|
|
|
"current branch is still: %s\n")
|
2010-11-25 00:56:32 +03:00
|
|
|
% self.dirstate.branch())
|
2007-07-22 01:02:10 +04:00
|
|
|
self.invalidate()
|
|
|
|
self.dirstate.invalidate()
|
2009-07-16 18:39:41 +04:00
|
|
|
self.destroyed()
|
2011-02-22 02:58:54 +03:00
|
|
|
parents = tuple([p.rev() for p in self.parents()])
|
|
|
|
if len(parents) > 1:
|
|
|
|
self.ui.status(_("working directory now based on "
|
|
|
|
"revisions %d and %d\n") % parents)
|
|
|
|
else:
|
|
|
|
self.ui.status(_("working directory now based on "
|
|
|
|
"revision %d\n") % parents)
|
2007-07-22 01:02:10 +04:00
|
|
|
else:
|
|
|
|
self.ui.warn(_("no rollback information available\n"))
|
2010-05-16 02:48:49 +04:00
|
|
|
return 1
|
2007-07-22 01:02:10 +04:00
|
|
|
finally:
|
2009-04-22 04:01:22 +04:00
|
|
|
release(lock, wlock)
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2010-02-21 22:59:27 +03:00
|
|
|
def invalidatecaches(self):
|
2009-07-16 18:39:41 +04:00
|
|
|
self._tags = None
|
|
|
|
self._tagtypes = None
|
2006-02-22 09:26:29 +03:00
|
|
|
self.nodetagscache = None
|
2009-10-31 02:27:50 +03:00
|
|
|
self._branchcache = None # in UTF-8
|
2008-02-15 21:06:36 +03:00
|
|
|
self._branchcachetip = None
|
2006-02-22 09:26:29 +03:00
|
|
|
|
2010-02-21 22:59:27 +03:00
|
|
|
def invalidate(self):
|
2011-03-03 06:57:20 +03:00
|
|
|
for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
|
2010-02-21 22:59:27 +03:00
|
|
|
if a in self.__dict__:
|
|
|
|
delattr(self, a)
|
|
|
|
self.invalidatecaches()
|
|
|
|
|
2007-07-22 01:02:09 +04:00
|
|
|
def _lock(self, lockname, wait, releasefn, acquirefn, desc):
|
2005-08-28 01:21:25 +04:00
|
|
|
try:
|
2006-10-24 02:12:20 +04:00
|
|
|
l = lock.lock(lockname, 0, releasefn, desc=desc)
|
2009-01-12 20:09:14 +03:00
|
|
|
except error.LockHeld, inst:
|
2005-11-12 02:34:13 +03:00
|
|
|
if not wait:
|
2006-03-28 21:01:07 +04:00
|
|
|
raise
|
2006-11-20 21:55:59 +03:00
|
|
|
self.ui.warn(_("waiting for lock on %s held by %r\n") %
|
|
|
|
(desc, inst.locker))
|
2006-03-28 21:01:07 +04:00
|
|
|
# default to 600 seconds timeout
|
2006-10-24 02:12:20 +04:00
|
|
|
l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
|
2006-03-28 21:01:07 +04:00
|
|
|
releasefn, desc=desc)
|
2006-02-20 00:39:09 +03:00
|
|
|
if acquirefn:
|
|
|
|
acquirefn()
|
|
|
|
return l
|
|
|
|
|
2007-07-22 01:02:09 +04:00
|
|
|
def lock(self, wait=True):
|
2009-08-05 16:42:57 +04:00
|
|
|
'''Lock the repository store (.hg/store) and return a weak reference
|
|
|
|
to the lock. Use this before modifying the store (e.g. committing or
|
|
|
|
stripping). If you are opening a transaction, get a lock as well.)'''
|
2009-04-22 04:01:22 +04:00
|
|
|
l = self._lockref and self._lockref()
|
|
|
|
if l is not None and l.held:
|
|
|
|
l.lock()
|
|
|
|
return l
|
2007-07-22 01:02:10 +04:00
|
|
|
|
2011-01-28 15:38:34 +03:00
|
|
|
l = self._lock(self.sjoin("lock"), wait, self.store.write,
|
|
|
|
self.invalidate, _('repository %s') % self.origroot)
|
2007-07-22 01:02:10 +04:00
|
|
|
self._lockref = weakref.ref(l)
|
|
|
|
return l
|
2006-02-20 00:39:09 +03:00
|
|
|
|
2007-07-22 01:02:09 +04:00
|
|
|
def wlock(self, wait=True):
|
2009-08-05 16:42:57 +04:00
|
|
|
'''Lock the non-store parts of the repository (everything under
|
|
|
|
.hg except .hg/store) and return a weak reference to the lock.
|
|
|
|
Use this before modifying files in .hg.'''
|
2009-04-22 04:01:22 +04:00
|
|
|
l = self._wlockref and self._wlockref()
|
|
|
|
if l is not None and l.held:
|
|
|
|
l.lock()
|
|
|
|
return l
|
2007-07-22 01:02:10 +04:00
|
|
|
|
|
|
|
l = self._lock(self.join("wlock"), wait, self.dirstate.write,
|
|
|
|
self.dirstate.invalidate, _('working directory of %s') %
|
|
|
|
self.origroot)
|
|
|
|
self._wlockref = weakref.ref(l)
|
|
|
|
return l
|
2005-11-12 02:34:13 +03:00
|
|
|
|
2009-05-14 22:20:40 +04:00
|
|
|
def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
|
2006-10-09 04:57:45 +04:00
|
|
|
"""
|
2006-10-09 23:02:01 +04:00
|
|
|
commit an individual file as part of a larger transaction
|
2006-10-09 04:57:45 +04:00
|
|
|
"""
|
2006-10-09 23:02:01 +04:00
|
|
|
|
2009-04-28 20:14:49 +04:00
|
|
|
fname = fctx.path()
|
|
|
|
text = fctx.data()
|
|
|
|
flog = self.file(fname)
|
|
|
|
fparent1 = manifest1.get(fname, nullid)
|
2009-05-14 22:20:40 +04:00
|
|
|
fparent2 = fparent2o = manifest2.get(fname, nullid)
|
2006-02-18 02:23:53 +03:00
|
|
|
|
2006-10-09 04:57:45 +04:00
|
|
|
meta = {}
|
2009-04-28 20:14:49 +04:00
|
|
|
copy = fctx.renamed()
|
|
|
|
if copy and copy[0] != fname:
|
2007-01-31 00:09:08 +03:00
|
|
|
# Mark the new revision of this file as a copy of another
|
2007-06-06 22:22:52 +04:00
|
|
|
# file. This copy data will effectively act as a parent
|
|
|
|
# of this new revision. If this is a merge, the first
|
2007-01-31 00:09:08 +03:00
|
|
|
# parent will be the nullid (meaning "look up the copy data")
|
|
|
|
# and the second one will be the other parent. For example:
|
|
|
|
#
|
|
|
|
# 0 --- 1 --- 3 rev1 changes file foo
|
|
|
|
# \ / rev2 renames foo to bar and changes it
|
|
|
|
# \- 2 -/ rev3 should have bar with all changes and
|
|
|
|
# should record that bar descends from
|
|
|
|
# bar in rev2 and foo in rev1
|
|
|
|
#
|
|
|
|
# this allows this merge to succeed:
|
|
|
|
#
|
|
|
|
# 0 --- 1 --- 3 rev4 reverts the content change from rev2
|
|
|
|
# \ / merging rev3 and rev4 should use bar@rev2
|
|
|
|
# \- 2 --- 4 as the merge base
|
|
|
|
#
|
2008-08-11 03:01:03 +04:00
|
|
|
|
2009-04-28 20:14:49 +04:00
|
|
|
cfname = copy[0]
|
|
|
|
crev = manifest1.get(cfname)
|
|
|
|
newfparent = fparent2
|
2008-08-11 03:01:03 +04:00
|
|
|
|
|
|
|
if manifest2: # branch merge
|
2009-04-28 20:14:49 +04:00
|
|
|
if fparent2 == nullid or crev is None: # copied on remote side
|
|
|
|
if cfname in manifest2:
|
|
|
|
crev = manifest2[cfname]
|
|
|
|
newfparent = fparent1
|
2008-08-11 03:01:03 +04:00
|
|
|
|
2008-08-11 03:01:03 +04:00
|
|
|
# find source in nearest ancestor if we've lost track
|
2009-04-28 20:14:49 +04:00
|
|
|
if not crev:
|
2009-09-19 03:15:38 +04:00
|
|
|
self.ui.debug(" %s: searching for copy revision for %s\n" %
|
2009-04-28 20:14:49 +04:00
|
|
|
(fname, cfname))
|
2010-11-16 02:04:55 +03:00
|
|
|
for ancestor in self[None].ancestors():
|
2009-04-28 20:14:49 +04:00
|
|
|
if cfname in ancestor:
|
|
|
|
crev = ancestor[cfname].filenode()
|
2008-08-11 03:38:43 +04:00
|
|
|
break
|
2008-08-11 03:01:03 +04:00
|
|
|
|
2010-11-16 02:04:55 +03:00
|
|
|
if crev:
|
|
|
|
self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
|
|
|
|
meta["copy"] = cfname
|
|
|
|
meta["copyrev"] = hex(crev)
|
|
|
|
fparent1, fparent2 = nullid, newfparent
|
|
|
|
else:
|
|
|
|
self.ui.warn(_("warning: can't find ancestor for '%s' "
|
|
|
|
"copied from '%s'!\n") % (fname, cfname))
|
|
|
|
|
2009-04-28 20:14:49 +04:00
|
|
|
elif fparent2 != nullid:
|
2006-02-18 02:23:53 +03:00
|
|
|
# is one parent an ancestor of the other?
|
2009-04-28 20:14:49 +04:00
|
|
|
fparentancestor = flog.ancestor(fparent1, fparent2)
|
|
|
|
if fparentancestor == fparent1:
|
|
|
|
fparent1, fparent2 = fparent2, nullid
|
|
|
|
elif fparentancestor == fparent2:
|
|
|
|
fparent2 = nullid
|
2006-02-18 02:23:53 +03:00
|
|
|
|
2009-05-14 22:20:40 +04:00
|
|
|
# is the file changed?
|
|
|
|
if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
|
|
|
|
changelist.append(fname)
|
|
|
|
return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
|
2006-02-18 02:23:53 +03:00
|
|
|
|
2009-05-14 22:20:40 +04:00
|
|
|
# are just the flags changed during merge?
|
2010-02-05 18:02:27 +03:00
|
|
|
if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
|
2009-05-14 22:20:40 +04:00
|
|
|
changelist.append(fname)
|
|
|
|
|
|
|
|
return fparent1
|
2005-11-12 02:34:13 +03:00
|
|
|
|
2009-06-01 23:11:32 +04:00
|
|
|
def commit(self, text="", user=None, date=None, match=None, force=False,
|
|
|
|
editor=False, extra={}):
|
2009-05-19 13:39:12 +04:00
|
|
|
"""Add a new revision to current repository.
|
|
|
|
|
2009-06-01 23:11:32 +04:00
|
|
|
Revision information is gathered from the working directory,
|
|
|
|
match can be used to filter the committed files. If editor is
|
|
|
|
supplied, it is called to get a commit message.
|
2009-05-19 13:39:12 +04:00
|
|
|
"""
|
2009-06-02 06:51:00 +04:00
|
|
|
|
2009-06-05 01:21:03 +04:00
|
|
|
def fail(f, msg):
|
|
|
|
raise util.Abort('%s: %s' % (f, msg))
|
|
|
|
|
|
|
|
if not match:
|
2010-03-11 19:43:44 +03:00
|
|
|
match = matchmod.always(self.root, '')
|
2009-06-05 01:21:03 +04:00
|
|
|
|
|
|
|
if not force:
|
|
|
|
vdirs = []
|
|
|
|
match.dir = vdirs.append
|
|
|
|
match.bad = fail
|
|
|
|
|
2009-05-14 22:20:40 +04:00
|
|
|
wlock = self.wlock()
|
|
|
|
try:
|
2009-06-15 11:45:38 +04:00
|
|
|
wctx = self[None]
|
2010-04-21 03:34:12 +04:00
|
|
|
merge = len(wctx.parents()) > 1
|
2009-05-14 22:20:40 +04:00
|
|
|
|
2010-04-21 03:34:12 +04:00
|
|
|
if (not force and merge and match and
|
2009-05-19 02:36:24 +04:00
|
|
|
(match.files() or match.anypats())):
|
2009-05-14 22:20:40 +04:00
|
|
|
raise util.Abort(_('cannot partially commit a merge '
|
|
|
|
'(do not specify files or patterns)'))
|
|
|
|
|
2009-06-01 23:11:32 +04:00
|
|
|
changes = self.status(match=match, clean=force)
|
|
|
|
if force:
|
|
|
|
changes[0].extend(changes[6]) # mq may commit unchanged files
|
2008-06-19 00:52:25 +04:00
|
|
|
|
2009-06-15 11:45:38 +04:00
|
|
|
# check subrepos
|
|
|
|
subs = []
|
2010-02-22 03:19:59 +03:00
|
|
|
removedsubs = set()
|
|
|
|
for p in wctx.parents():
|
|
|
|
removedsubs.update(s for s in p.substate if match(s))
|
2009-06-15 11:45:38 +04:00
|
|
|
for s in wctx.substate:
|
2010-02-22 03:19:59 +03:00
|
|
|
removedsubs.discard(s)
|
2009-06-15 11:45:38 +04:00
|
|
|
if match(s) and wctx.sub(s).dirty():
|
|
|
|
subs.append(s)
|
2010-07-01 20:20:13 +04:00
|
|
|
if (subs or removedsubs):
|
|
|
|
if (not match('.hgsub') and
|
|
|
|
'.hgsub' in (wctx.modified() + wctx.added())):
|
2010-07-01 20:22:56 +04:00
|
|
|
raise util.Abort(_("can't commit subrepos without .hgsub"))
|
2010-07-01 20:20:13 +04:00
|
|
|
if '.hgsubstate' not in changes[0]:
|
|
|
|
changes[0].insert(0, '.hgsubstate')
|
2009-06-15 11:45:38 +04:00
|
|
|
|
2011-02-16 00:25:48 +03:00
|
|
|
if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
|
|
|
|
changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
|
|
|
|
if changedsubs:
|
|
|
|
raise util.Abort(_("uncommitted changes in subrepo %s")
|
|
|
|
% changedsubs[0])
|
|
|
|
|
2009-06-02 06:51:00 +04:00
|
|
|
# make sure all explicit patterns are matched
|
|
|
|
if not force and match.files():
|
2009-06-02 07:13:08 +04:00
|
|
|
matched = set(changes[0] + changes[1] + changes[2])
|
2009-06-02 06:51:00 +04:00
|
|
|
|
|
|
|
for f in match.files():
|
2009-06-15 11:45:38 +04:00
|
|
|
if f == '.' or f in matched or f in wctx.substate:
|
2009-06-02 06:51:00 +04:00
|
|
|
continue
|
|
|
|
if f in changes[3]: # missing
|
|
|
|
fail(f, _('file not found!'))
|
|
|
|
if f in vdirs: # visited directory
|
|
|
|
d = f + '/'
|
2009-06-02 07:13:08 +04:00
|
|
|
for mf in matched:
|
|
|
|
if mf.startswith(d):
|
|
|
|
break
|
|
|
|
else:
|
2009-06-02 06:51:00 +04:00
|
|
|
fail(f, _("no match under directory!"))
|
|
|
|
elif f not in self.dirstate:
|
|
|
|
fail(f, _("file not tracked!"))
|
|
|
|
|
2010-04-21 03:34:12 +04:00
|
|
|
if (not force and not extra.get("close") and not merge
|
2009-05-19 02:36:24 +04:00
|
|
|
and not (changes[0] or changes[1] or changes[2])
|
2010-04-21 03:34:12 +04:00
|
|
|
and wctx.branch() == wctx.p1().branch()):
|
2009-05-14 22:20:40 +04:00
|
|
|
return None
|
|
|
|
|
2010-03-11 19:43:44 +03:00
|
|
|
ms = mergemod.mergestate(self)
|
2008-08-14 05:18:40 +04:00
|
|
|
for f in changes[0]:
|
|
|
|
if f in ms and ms[f] == 'u':
|
|
|
|
raise util.Abort(_("unresolved merge conflicts "
|
2011-03-06 17:21:50 +03:00
|
|
|
"(see hg help resolve)"))
|
2009-05-19 02:36:24 +04:00
|
|
|
|
2010-04-21 03:18:31 +04:00
|
|
|
cctx = context.workingctx(self, text, user, date, extra, changes)
|
2009-05-19 02:36:24 +04:00
|
|
|
if editor:
|
2009-07-01 10:05:24 +04:00
|
|
|
cctx._text = editor(self, cctx, subs)
|
2009-11-25 05:08:40 +03:00
|
|
|
edited = (text != cctx._text)
|
2009-06-15 11:45:38 +04:00
|
|
|
|
|
|
|
# commit subs
|
2010-02-22 03:19:59 +03:00
|
|
|
if subs or removedsubs:
|
2009-06-15 11:45:38 +04:00
|
|
|
state = wctx.substate.copy()
|
2010-08-31 18:36:31 +04:00
|
|
|
for s in sorted(subs):
|
2010-05-02 01:05:22 +04:00
|
|
|
sub = wctx.sub(s)
|
|
|
|
self.ui.status(_('committing subrepository %s\n') %
|
2010-10-19 05:55:28 +04:00
|
|
|
subrepo.subrelpath(sub))
|
2010-05-02 01:05:22 +04:00
|
|
|
sr = sub.commit(cctx._text, user, date)
|
2009-06-15 11:45:38 +04:00
|
|
|
state[s] = (state[s][0], sr)
|
2010-12-17 15:38:15 +03:00
|
|
|
subrepo.writestate(self, state)
|
2009-06-15 11:45:38 +04:00
|
|
|
|
2009-11-25 05:08:39 +03:00
|
|
|
# Save commit message in case this transaction gets rolled back
|
2009-11-27 19:50:52 +03:00
|
|
|
# (e.g. by a pretxncommit hook). Leave the content alone on
|
|
|
|
# the assumption that the user will use the same editor again.
|
|
|
|
msgfile = self.opener('last-message.txt', 'wb')
|
|
|
|
msgfile.write(cctx._text)
|
2009-11-25 05:08:39 +03:00
|
|
|
msgfile.close()
|
|
|
|
|
2010-04-21 03:34:12 +04:00
|
|
|
p1, p2 = self.dirstate.parents()
|
|
|
|
hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
|
2009-11-25 05:08:40 +03:00
|
|
|
try:
|
2010-02-17 17:43:21 +03:00
|
|
|
self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
|
2009-11-25 05:08:40 +03:00
|
|
|
ret = self.commitctx(cctx, True)
|
|
|
|
except:
|
|
|
|
if edited:
|
|
|
|
msgfn = self.pathto(msgfile.name[len(self.root)+1:])
|
|
|
|
self.ui.write(
|
|
|
|
_('note: commit message saved in %s\n') % msgfn)
|
|
|
|
raise
|
2009-05-14 22:24:39 +04:00
|
|
|
|
2011-02-10 22:46:27 +03:00
|
|
|
# update bookmarks, dirstate and mergestate
|
2011-03-15 01:50:28 +03:00
|
|
|
bookmarks.update(self, p1, ret)
|
2009-05-14 22:24:39 +04:00
|
|
|
for f in changes[0] + changes[1]:
|
|
|
|
self.dirstate.normal(f)
|
|
|
|
for f in changes[2]:
|
|
|
|
self.dirstate.forget(f)
|
|
|
|
self.dirstate.setparents(ret)
|
2009-05-19 02:36:24 +04:00
|
|
|
ms.reset()
|
2008-06-19 00:52:26 +04:00
|
|
|
finally:
|
2009-05-14 22:20:40 +04:00
|
|
|
wlock.release()
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2010-02-17 17:43:21 +03:00
|
|
|
self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
|
|
|
|
return ret
|
|
|
|
|
2009-05-19 02:36:24 +04:00
|
|
|
def commitctx(self, ctx, error=False):
|
2008-10-11 15:07:29 +04:00
|
|
|
"""Add a new revision to current repository.
|
2009-05-14 22:21:20 +04:00
|
|
|
Revision information is passed via the context argument.
|
2008-10-11 15:07:29 +04:00
|
|
|
"""
|
2008-06-19 02:14:23 +04:00
|
|
|
|
2009-05-14 22:21:20 +04:00
|
|
|
tr = lock = None
|
2010-11-01 19:26:08 +03:00
|
|
|
removed = list(ctx.removed())
|
2009-05-14 22:24:26 +04:00
|
|
|
p1, p2 = ctx.p1(), ctx.p2()
|
|
|
|
m1 = p1.manifest().copy()
|
|
|
|
m2 = p2.manifest()
|
2009-05-14 22:21:20 +04:00
|
|
|
user = ctx.user()
|
2009-05-14 22:21:20 +04:00
|
|
|
|
|
|
|
lock = self.lock()
|
|
|
|
try:
|
2010-04-10 02:23:35 +04:00
|
|
|
tr = self.transaction("commit")
|
2007-07-22 23:53:57 +04:00
|
|
|
trp = weakref.proxy(tr)
|
2007-07-22 01:02:10 +04:00
|
|
|
|
|
|
|
# check in files
|
|
|
|
new = {}
|
2008-06-19 00:52:26 +04:00
|
|
|
changed = []
|
2008-06-26 23:35:50 +04:00
|
|
|
linkrev = len(self)
|
2009-05-14 22:21:20 +04:00
|
|
|
for f in sorted(ctx.modified() + ctx.added()):
|
2007-07-22 01:02:10 +04:00
|
|
|
self.ui.note(f + "\n")
|
|
|
|
try:
|
2009-05-14 22:20:40 +04:00
|
|
|
fctx = ctx[f]
|
|
|
|
new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
|
|
|
|
changed)
|
|
|
|
m1.set(f, fctx.flags())
|
2010-02-12 00:15:42 +03:00
|
|
|
except OSError, inst:
|
|
|
|
self.ui.warn(_("trouble committing %s!\n") % f)
|
|
|
|
raise
|
|
|
|
except IOError, inst:
|
|
|
|
errcode = getattr(inst, 'errno', errno.ENOENT)
|
|
|
|
if error or errcode and errcode != errno.ENOENT:
|
2007-07-22 01:02:10 +04:00
|
|
|
self.ui.warn(_("trouble committing %s!\n") % f)
|
|
|
|
raise
|
|
|
|
else:
|
2009-05-19 02:36:24 +04:00
|
|
|
removed.append(f)
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2007-07-22 01:02:10 +04:00
|
|
|
# update manifest
|
|
|
|
m1.update(new)
|
2009-05-19 02:36:24 +04:00
|
|
|
removed = [f for f in sorted(removed) if f in m1 or f in m2]
|
|
|
|
drop = [f for f in removed if f in m1]
|
|
|
|
for f in drop:
|
|
|
|
del m1[f]
|
2009-05-14 22:24:26 +04:00
|
|
|
mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
|
2009-05-19 02:36:24 +04:00
|
|
|
p2.manifestnode(), (new, drop))
|
2007-07-22 01:02:10 +04:00
|
|
|
|
2009-05-19 02:36:24 +04:00
|
|
|
# update changelog
|
2009-02-17 04:35:07 +03:00
|
|
|
self.changelog.delayupdate()
|
2009-05-19 02:36:24 +04:00
|
|
|
n = self.changelog.add(mn, changed + removed, ctx.description(),
|
|
|
|
trp, p1.node(), p2.node(),
|
2009-05-14 22:21:20 +04:00
|
|
|
user, ctx.date(), ctx.extra().copy())
|
2009-02-17 04:35:07 +03:00
|
|
|
p = lambda: self.changelog.writepending() and self.root or ""
|
2010-02-17 17:43:21 +03:00
|
|
|
xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
|
2007-07-22 01:02:10 +04:00
|
|
|
self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
|
2009-02-17 04:35:07 +03:00
|
|
|
parent2=xp2, pending=p)
|
|
|
|
self.changelog.finalize(trp)
|
2007-07-22 01:02:10 +04:00
|
|
|
tr.close()
|
|
|
|
|
2009-10-31 02:27:50 +03:00
|
|
|
if self._branchcache:
|
2010-08-29 01:57:39 +04:00
|
|
|
self.updatebranchcache()
|
2007-07-22 01:02:10 +04:00
|
|
|
return n
|
|
|
|
finally:
|
2010-05-27 19:47:40 +04:00
|
|
|
if tr:
|
|
|
|
tr.release()
|
2009-05-14 22:20:40 +04:00
|
|
|
lock.release()
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2009-07-16 18:39:41 +04:00
|
|
|
def destroyed(self):
|
|
|
|
'''Inform the repository that nodes have been destroyed.
|
|
|
|
Intended for use by strip and rollback, so there's a common
|
|
|
|
place for anything that has to be done after destroying history.'''
|
|
|
|
# XXX it might be nice if we could take the list of destroyed
|
|
|
|
# nodes, but I don't see an easy way for rollback() to do that
|
2009-07-16 18:39:42 +04:00
|
|
|
|
|
|
|
# Ensure the persistent tag cache is updated. Doing it now
|
|
|
|
# means that the tag cache only has to worry about destroyed
|
|
|
|
# heads immediately after a strip/rollback. That in turn
|
|
|
|
# guarantees that "cachetip == currenttip" (comparing both rev
|
|
|
|
# and node) always means no nodes have been added or destroyed.
|
|
|
|
|
|
|
|
# XXX this is suboptimal when qrefresh'ing: we strip the current
|
|
|
|
# head, refresh the tag cache, then immediately add a new head.
|
|
|
|
# But I think doing it this way is necessary for the "instant
|
|
|
|
# tag cache retrieval" case to work.
|
2010-02-21 22:59:27 +03:00
|
|
|
self.invalidatecaches()
|
2009-07-16 18:39:41 +04:00
|
|
|
|
2008-05-12 20:37:08 +04:00
|
|
|
def walk(self, match, node=None):
|
2006-10-27 20:24:10 +04:00
|
|
|
'''
|
|
|
|
walk recursively through the directory tree or a given
|
|
|
|
changeset, finding all files matched by the match
|
|
|
|
function
|
|
|
|
'''
|
2008-06-28 04:25:48 +04:00
|
|
|
return self[node].walk(match)
|
2006-10-27 20:24:10 +04:00
|
|
|
|
2008-07-12 03:46:02 +04:00
|
|
|
def status(self, node1='.', node2=None, match=None,
|
2010-09-03 14:58:51 +04:00
|
|
|
ignored=False, clean=False, unknown=False,
|
|
|
|
listsubrepos=False):
|
2006-07-21 03:21:07 +04:00
|
|
|
"""return status of files between two nodes or node and working directory
|
2006-01-12 13:32:07 +03:00
|
|
|
|
|
|
|
If node1 is None, use the first dirstate parent instead.
|
|
|
|
If node2 is None, compare node1 with working directory.
|
|
|
|
"""
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2008-07-12 03:46:02 +04:00
|
|
|
def mfmatches(ctx):
|
|
|
|
mf = ctx.manifest().copy()
|
2005-08-28 01:21:25 +04:00
|
|
|
for fn in mf.keys():
|
|
|
|
if not match(fn):
|
|
|
|
del mf[fn]
|
|
|
|
return mf
|
|
|
|
|
2008-10-13 00:21:08 +04:00
|
|
|
if isinstance(node1, context.changectx):
|
|
|
|
ctx1 = node1
|
|
|
|
else:
|
|
|
|
ctx1 = self[node1]
|
|
|
|
if isinstance(node2, context.changectx):
|
|
|
|
ctx2 = node2
|
|
|
|
else:
|
|
|
|
ctx2 = self[node2]
|
|
|
|
|
2008-11-27 18:07:17 +03:00
|
|
|
working = ctx2.rev() is None
|
2008-07-12 03:46:02 +04:00
|
|
|
parentworking = working and ctx1 == self['.']
|
2010-03-11 19:43:44 +03:00
|
|
|
match = match or matchmod.always(self.root, self.getcwd())
|
2008-06-26 23:35:50 +04:00
|
|
|
listignored, listclean, listunknown = ignored, clean, unknown
|
2006-07-21 03:21:07 +04:00
|
|
|
|
2008-10-13 00:21:08 +04:00
|
|
|
# load earliest manifest first for caching reasons
|
|
|
|
if not working and ctx2.rev() < ctx1.rev():
|
|
|
|
ctx2.manifest()
|
|
|
|
|
2008-10-09 01:22:10 +04:00
|
|
|
if not parentworking:
|
|
|
|
def bad(f, msg):
|
|
|
|
if f not in ctx1:
|
|
|
|
self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
|
|
|
|
match.bad = bad
|
|
|
|
|
2008-07-12 03:46:02 +04:00
|
|
|
if working: # we need to scan the working dir
|
2010-05-28 23:41:11 +04:00
|
|
|
subrepos = []
|
|
|
|
if '.hgsub' in self.dirstate:
|
|
|
|
subrepos = ctx1.substate.keys()
|
2010-01-01 02:19:30 +03:00
|
|
|
s = self.dirstate.status(match, subrepos, listignored,
|
|
|
|
listclean, listunknown)
|
2008-07-12 03:46:02 +04:00
|
|
|
cmp, modified, added, removed, deleted, unknown, ignored, clean = s
|
|
|
|
|
|
|
|
# check for any possibly clean files
|
|
|
|
if parentworking and cmp:
|
|
|
|
fixup = []
|
|
|
|
# do a full compare of any files that might have changed
|
2009-05-14 22:20:40 +04:00
|
|
|
for f in sorted(cmp):
|
2008-07-12 03:46:02 +04:00
|
|
|
if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
|
2010-07-27 18:40:46 +04:00
|
|
|
or ctx1[f].cmp(ctx2[f])):
|
2008-07-12 03:46:02 +04:00
|
|
|
modified.append(f)
|
|
|
|
else:
|
|
|
|
fixup.append(f)
|
2006-02-25 15:44:40 +03:00
|
|
|
|
2008-07-12 03:46:02 +04:00
|
|
|
# update dirstate for files that are actually clean
|
|
|
|
if fixup:
|
2010-07-25 05:05:38 +04:00
|
|
|
if listclean:
|
|
|
|
clean += fixup
|
|
|
|
|
2008-07-12 03:46:02 +04:00
|
|
|
try:
|
2009-05-28 10:29:40 +04:00
|
|
|
# updating the dirstate is optional
|
|
|
|
# so we don't wait on the lock
|
2009-05-27 16:16:13 +04:00
|
|
|
wlock = self.wlock(False)
|
2007-07-22 01:02:10 +04:00
|
|
|
try:
|
2008-07-12 03:46:02 +04:00
|
|
|
for f in fixup:
|
|
|
|
self.dirstate.normal(f)
|
2009-05-27 16:16:13 +04:00
|
|
|
finally:
|
|
|
|
wlock.release()
|
|
|
|
except error.LockError:
|
|
|
|
pass
|
2008-07-12 03:46:02 +04:00
|
|
|
|
|
|
|
if not parentworking:
|
|
|
|
mf1 = mfmatches(ctx1)
|
|
|
|
if working:
|
2006-01-12 13:32:07 +03:00
|
|
|
# we are comparing working dir against non-parent
|
|
|
|
# generate a pseudo-manifest for the working dir
|
2008-07-12 03:46:02 +04:00
|
|
|
mf2 = mfmatches(self['.'])
|
2008-07-12 03:46:02 +04:00
|
|
|
for f in cmp + modified + added:
|
2008-07-12 03:46:02 +04:00
|
|
|
mf2[f] = None
|
2008-07-22 22:00:22 +04:00
|
|
|
mf2.set(f, ctx2.flags(f))
|
2006-01-12 14:22:28 +03:00
|
|
|
for f in removed:
|
2006-01-12 13:32:07 +03:00
|
|
|
if f in mf2:
|
|
|
|
del mf2[f]
|
2008-07-12 03:46:02 +04:00
|
|
|
else:
|
|
|
|
# we are comparing two revisions
|
|
|
|
deleted, unknown, ignored = [], [], []
|
|
|
|
mf2 = mfmatches(ctx2)
|
2007-04-24 22:05:39 +04:00
|
|
|
|
2006-07-21 03:21:07 +04:00
|
|
|
modified, added, clean = [], [], []
|
2008-07-22 22:03:19 +04:00
|
|
|
for fn in mf2:
|
2008-01-20 16:39:25 +03:00
|
|
|
if fn in mf1:
|
2011-04-11 22:44:22 +04:00
|
|
|
if (fn not in deleted and
|
|
|
|
(mf1.flags(fn) != mf2.flags(fn) or
|
|
|
|
(mf1[fn] != mf2[fn] and
|
|
|
|
(mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
|
2006-01-12 13:32:07 +03:00
|
|
|
modified.append(fn)
|
2008-06-26 23:35:50 +04:00
|
|
|
elif listclean:
|
2006-07-21 03:21:07 +04:00
|
|
|
clean.append(fn)
|
2006-01-12 13:32:07 +03:00
|
|
|
del mf1[fn]
|
2011-04-11 22:44:22 +04:00
|
|
|
elif fn not in deleted:
|
2006-01-12 13:32:07 +03:00
|
|
|
added.append(fn)
|
2006-01-12 14:22:28 +03:00
|
|
|
removed = mf1.keys()
|
|
|
|
|
2008-07-22 22:03:19 +04:00
|
|
|
r = modified, added, removed, deleted, unknown, ignored, clean
|
2010-09-03 14:58:51 +04:00
|
|
|
|
|
|
|
if listsubrepos:
|
2010-09-07 18:34:07 +04:00
|
|
|
for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
|
2010-09-03 14:58:51 +04:00
|
|
|
if working:
|
|
|
|
rev2 = None
|
|
|
|
else:
|
|
|
|
rev2 = ctx2.substate[subpath][1]
|
|
|
|
try:
|
|
|
|
submatch = matchmod.narrowmatcher(subpath, match)
|
|
|
|
s = sub.status(rev2, match=submatch, ignored=listignored,
|
|
|
|
clean=listclean, unknown=listunknown,
|
|
|
|
listsubrepos=True)
|
|
|
|
for rfiles, sfiles in zip(r, s):
|
|
|
|
rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
|
|
|
|
except error.LookupError:
|
|
|
|
self.ui.status(_("skipping missing subrepository: %s\n")
|
|
|
|
% subpath)
|
|
|
|
|
2011-02-03 12:31:17 +03:00
|
|
|
for l in r:
|
|
|
|
l.sort()
|
2008-07-22 22:03:19 +04:00
|
|
|
return r
|
2006-07-21 03:21:07 +04:00
|
|
|
|
2009-06-11 03:11:49 +04:00
|
|
|
def heads(self, start=None):
|
2005-11-16 14:08:25 +03:00
|
|
|
heads = self.changelog.heads(start)
|
|
|
|
# sort the output in rev descending order
|
2010-12-03 13:30:45 +03:00
|
|
|
return sorted(heads, key=self.changelog.rev, reverse=True)
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2009-06-03 15:42:55 +04:00
|
|
|
def branchheads(self, branch=None, start=None, closed=False):
|
2009-09-23 17:51:36 +04:00
|
|
|
'''return a (possibly filtered) list of heads for the given branch
|
|
|
|
|
|
|
|
Heads are returned in topological order, from newest to oldest.
|
|
|
|
If branch is None, use the dirstate branch.
|
|
|
|
If start is not None, return only heads reachable from start.
|
|
|
|
If closed is True, return heads that are marked as closed as well.
|
|
|
|
'''
|
2008-06-26 23:35:46 +04:00
|
|
|
if branch is None:
|
|
|
|
branch = self[None].branch()
|
2009-10-31 02:31:08 +03:00
|
|
|
branches = self.branchmap()
|
2007-06-19 19:37:43 +04:00
|
|
|
if branch not in branches:
|
|
|
|
return []
|
2009-01-15 05:47:38 +03:00
|
|
|
# the cache returns heads ordered lowest to highest
|
2009-09-23 17:51:36 +04:00
|
|
|
bheads = list(reversed(branches[branch]))
|
2007-06-19 19:37:43 +04:00
|
|
|
if start is not None:
|
2009-01-15 05:47:38 +03:00
|
|
|
# filter out the heads that cannot be reached from startrev
|
2009-09-23 17:51:36 +04:00
|
|
|
fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
|
|
|
|
bheads = [h for h in bheads if h in fbheads]
|
2009-01-15 05:47:38 +03:00
|
|
|
if not closed:
|
2009-01-19 14:59:56 +03:00
|
|
|
bheads = [h for h in bheads if
|
2009-01-15 05:47:38 +03:00
|
|
|
('close' not in self.changelog.read(h)[5])]
|
2009-01-15 05:47:38 +03:00
|
|
|
return bheads
|
2007-06-19 19:37:43 +04:00
|
|
|
|
2005-08-28 01:21:25 +04:00
|
|
|
def branches(self, nodes):
|
2006-01-12 09:57:58 +03:00
|
|
|
if not nodes:
|
|
|
|
nodes = [self.changelog.tip()]
|
2005-08-28 01:21:25 +04:00
|
|
|
b = []
|
|
|
|
for n in nodes:
|
|
|
|
t = n
|
2006-05-24 03:01:39 +04:00
|
|
|
while 1:
|
2005-08-28 01:21:25 +04:00
|
|
|
p = self.changelog.parents(n)
|
|
|
|
if p[1] != nullid or p[0] == nullid:
|
|
|
|
b.append((t, n, p[0], p[1]))
|
|
|
|
break
|
|
|
|
n = p[0]
|
|
|
|
return b
|
|
|
|
|
|
|
|
def between(self, pairs):
|
|
|
|
r = []
|
|
|
|
|
|
|
|
for top, bottom in pairs:
|
|
|
|
n, l, i = top, [], 0
|
|
|
|
f = 1
|
|
|
|
|
2009-01-25 19:16:45 +03:00
|
|
|
while n != bottom and n != nullid:
|
2005-08-28 01:21:25 +04:00
|
|
|
p = self.changelog.parents(n)[0]
|
|
|
|
if i == f:
|
|
|
|
l.append(n)
|
|
|
|
f = f * 2
|
|
|
|
n = p
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
r.append(l)
|
|
|
|
|
|
|
|
return r
|
|
|
|
|
2007-07-22 01:02:10 +04:00
|
|
|
def pull(self, remote, heads=None, force=False):
|
|
|
|
lock = self.lock()
|
2006-08-09 04:08:59 +04:00
|
|
|
try:
|
2011-03-23 18:06:55 +03:00
|
|
|
usecommon = remote.capable('getbundle')
|
2010-06-07 20:35:54 +04:00
|
|
|
tmp = discovery.findcommonincoming(self, remote, heads=heads,
|
2011-03-23 18:06:55 +03:00
|
|
|
force=force, commononly=usecommon)
|
2010-06-07 20:35:54 +04:00
|
|
|
common, fetch, rheads = tmp
|
2006-08-09 04:08:59 +04:00
|
|
|
if not fetch:
|
|
|
|
self.ui.status(_("no changes found\n"))
|
2011-02-10 22:46:28 +03:00
|
|
|
result = 0
|
2006-08-09 04:08:59 +04:00
|
|
|
else:
|
2011-03-23 18:06:55 +03:00
|
|
|
if heads is None and list(common) == [nullid]:
|
2011-02-10 22:46:28 +03:00
|
|
|
self.ui.status(_("requesting all changes\n"))
|
|
|
|
elif heads is None and remote.capable('changegroupsubset'):
|
|
|
|
# issue1320, avoid a race if remote changed after discovery
|
|
|
|
heads = rheads
|
|
|
|
|
2011-03-23 18:06:55 +03:00
|
|
|
if usecommon:
|
|
|
|
cg = remote.getbundle('pull', common=common,
|
|
|
|
heads=heads or rheads)
|
|
|
|
elif heads is None:
|
2011-02-10 22:46:28 +03:00
|
|
|
cg = remote.changegroup(fetch, 'pull')
|
|
|
|
elif not remote.capable('changegroupsubset'):
|
2010-08-30 00:37:58 +04:00
|
|
|
raise util.Abort(_("partial pull cannot be done because "
|
2011-02-10 22:46:28 +03:00
|
|
|
"other repository doesn't support "
|
|
|
|
"changegroupsubset."))
|
|
|
|
else:
|
|
|
|
cg = remote.changegroupsubset(fetch, heads, 'pull')
|
|
|
|
result = self.addchangegroup(cg, 'pull', remote.url(),
|
|
|
|
lock=lock)
|
2006-08-09 04:08:59 +04:00
|
|
|
finally:
|
2009-04-22 04:01:22 +04:00
|
|
|
lock.release()
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2011-02-10 22:46:28 +03:00
|
|
|
return result
|
|
|
|
|
2011-02-01 00:16:33 +03:00
|
|
|
def checkpush(self, force, revs):
|
|
|
|
"""Extensions can override this function if additional checks have
|
|
|
|
to be performed before pushing, or call it if they override push
|
|
|
|
command.
|
|
|
|
"""
|
|
|
|
pass
|
|
|
|
|
2010-05-21 17:22:29 +04:00
|
|
|
def push(self, remote, force=False, revs=None, newbranch=False):
|
2010-05-03 05:56:25 +04:00
|
|
|
'''Push outgoing changesets (limited by revs) from the current
|
|
|
|
repository to remote. Return an integer:
|
|
|
|
- 0 means HTTP error *or* nothing to push
|
|
|
|
- 1 means we pushed and remote head count is unchanged *or*
|
|
|
|
we have outgoing changesets but refused to push
|
|
|
|
- other values as described by addchangegroup()
|
|
|
|
'''
|
2006-06-16 03:37:23 +04:00
|
|
|
# there are two ways to push to remote repo:
|
|
|
|
#
|
|
|
|
# addchangegroup assumes local user can lock remote
|
|
|
|
# repo (local filesystem, old ssh servers).
|
|
|
|
#
|
|
|
|
# unbundle assumes local user cannot lock remote repo (new ssh
|
|
|
|
# servers, http servers).
|
|
|
|
|
2011-02-01 00:16:33 +03:00
|
|
|
self.checkpush(force, revs)
|
2010-07-16 15:38:33 +04:00
|
|
|
lock = None
|
|
|
|
unbundle = remote.capable('unbundle')
|
|
|
|
if not unbundle:
|
|
|
|
lock = remote.lock()
|
2007-07-22 01:02:10 +04:00
|
|
|
try:
|
2011-02-10 22:46:28 +03:00
|
|
|
cg, remote_heads = discovery.prepush(self, remote, force, revs,
|
|
|
|
newbranch)
|
|
|
|
ret = remote_heads
|
|
|
|
if cg is not None:
|
|
|
|
if unbundle:
|
|
|
|
# local repo finds heads on server, finds out what
|
|
|
|
# revs it must push. once revs transferred, if server
|
|
|
|
# finds it has different heads (someone else won
|
|
|
|
# commit/push race), server aborts.
|
|
|
|
if force:
|
|
|
|
remote_heads = ['force']
|
|
|
|
# ssh: return remote's addchangegroup()
|
|
|
|
# http: return remote's addchangegroup() or 0 for error
|
|
|
|
ret = remote.unbundle(cg, remote_heads, 'push')
|
|
|
|
else:
|
|
|
|
# we return an integer indicating remote head count change
|
|
|
|
ret = remote.addchangegroup(cg, 'push', self.url(),
|
|
|
|
lock=lock)
|
2007-07-22 01:02:10 +04:00
|
|
|
finally:
|
2010-07-16 15:38:33 +04:00
|
|
|
if lock is not None:
|
|
|
|
lock.release()
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2011-02-10 22:46:28 +03:00
|
|
|
self.ui.debug("checking for updated bookmarks\n")
|
|
|
|
rb = remote.listkeys('bookmarks')
|
|
|
|
for k in rb.keys():
|
|
|
|
if k in self._bookmarks:
|
|
|
|
nr, nl = rb[k], hex(self._bookmarks[k])
|
|
|
|
if nr in self:
|
|
|
|
cr = self[nr]
|
|
|
|
cl = self[nl]
|
|
|
|
if cl in cr.descendants():
|
|
|
|
r = remote.pushkey('bookmarks', k, nr, nl)
|
|
|
|
if r:
|
|
|
|
self.ui.status(_("updating bookmark %s\n") % k)
|
|
|
|
else:
|
|
|
|
self.ui.warn(_('updating bookmark %s'
|
|
|
|
' failed!\n') % k)
|
|
|
|
|
|
|
|
return ret
|
|
|
|
|
2007-12-30 21:46:13 +03:00
|
|
|
def changegroupinfo(self, nodes, source):
|
|
|
|
if self.ui.verbose or source == 'bundle':
|
|
|
|
self.ui.status(_("%d changesets found\n") % len(nodes))
|
2006-10-25 20:45:18 +04:00
|
|
|
if self.ui.debugflag:
|
2009-09-19 03:15:38 +04:00
|
|
|
self.ui.debug("list of changesets:\n")
|
2006-10-25 20:45:18 +04:00
|
|
|
for node in nodes:
|
|
|
|
self.ui.debug("%s\n" % hex(node))
|
|
|
|
|
2011-03-20 03:16:57 +03:00
|
|
|
def changegroupsubset(self, bases, heads, source):
|
2009-09-09 01:58:59 +04:00
|
|
|
"""Compute a changegroup consisting of all the nodes that are
|
|
|
|
descendents of any of the bases and ancestors of any of the heads.
|
|
|
|
Return a chunkbuffer object whose read() method will return
|
|
|
|
successive changegroup chunks.
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2005-10-12 05:56:47 +04:00
|
|
|
It is fairly complex as determining which filenodes and which
|
|
|
|
manifest nodes need to be included for the changeset to be complete
|
|
|
|
is non-trivial.
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2005-10-12 05:56:47 +04:00
|
|
|
Another wrinkle is doing the reverse, figuring out which changeset in
|
2008-01-19 23:01:16 +03:00
|
|
|
the changegroup a particular filenode or manifestnode belongs to.
|
|
|
|
"""
|
2011-03-23 18:02:11 +03:00
|
|
|
cl = self.changelog
|
|
|
|
if not bases:
|
|
|
|
bases = [nullid]
|
|
|
|
csets, bases, heads = cl.nodesbetween(bases, heads)
|
|
|
|
# We assume that all ancestors of bases are known
|
|
|
|
common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
|
|
|
|
return self._changegroupsubset(common, csets, heads, source)
|
|
|
|
|
|
|
|
def getbundle(self, source, heads=None, common=None):
|
|
|
|
"""Like changegroupsubset, but returns the set difference between the
|
|
|
|
ancestors of heads and the ancestors common.
|
|
|
|
|
|
|
|
If heads is None, use the local heads. If common is None, use [nullid].
|
|
|
|
|
|
|
|
The nodes in common might not all be known locally due to the way the
|
|
|
|
current discovery protocol works.
|
|
|
|
"""
|
|
|
|
cl = self.changelog
|
|
|
|
if common:
|
|
|
|
nm = cl.nodemap
|
|
|
|
common = [n for n in common if n in nm]
|
|
|
|
else:
|
|
|
|
common = [nullid]
|
|
|
|
if not heads:
|
|
|
|
heads = cl.heads()
|
|
|
|
common, missing = cl.findcommonmissing(common, heads)
|
|
|
|
return self._changegroupsubset(common, missing, heads, source)
|
|
|
|
|
|
|
|
def _changegroupsubset(self, commonrevs, csets, heads, source):
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2009-11-07 14:28:30 +03:00
|
|
|
cl = self.changelog
|
2011-03-21 03:43:28 +03:00
|
|
|
mf = self.manifest
|
|
|
|
mfs = {} # needed manifests
|
|
|
|
fnodes = {} # needed file nodes
|
2011-03-31 18:59:56 +04:00
|
|
|
changedfiles = set()
|
2011-03-31 22:56:05 +04:00
|
|
|
fstate = ['', {}]
|
2011-03-31 19:03:24 +04:00
|
|
|
count = [0]
|
2011-03-21 03:43:28 +03:00
|
|
|
|
2011-03-20 03:16:57 +03:00
|
|
|
# can we go through the fast path ?
|
|
|
|
heads.sort()
|
2011-03-21 03:43:28 +03:00
|
|
|
if heads == sorted(self.heads()):
|
2011-03-21 03:43:28 +03:00
|
|
|
return self._changegroup(csets, source)
|
2008-10-21 19:00:35 +04:00
|
|
|
|
2009-11-07 14:28:30 +03:00
|
|
|
# slow path
|
2006-02-17 19:26:21 +03:00
|
|
|
self.hook('preoutgoing', throw=True, source=source)
|
2011-03-21 03:43:28 +03:00
|
|
|
self.changegroupinfo(csets, source)
|
2005-10-12 05:56:47 +04:00
|
|
|
|
2011-03-31 02:50:34 +04:00
|
|
|
# filter any nodes that claim to be part of the known set
|
|
|
|
def prune(revlog, missing):
|
|
|
|
for n in missing:
|
|
|
|
if revlog.linkrev(revlog.rev(n)) not in commonrevs:
|
|
|
|
yield n
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2011-03-31 23:25:26 +04:00
|
|
|
def lookup(revlog, x):
|
|
|
|
if revlog == cl:
|
|
|
|
c = cl.read(x)
|
|
|
|
changedfiles.update(c[3])
|
|
|
|
mfs.setdefault(c[0], x)
|
|
|
|
count[0] += 1
|
|
|
|
self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
|
|
|
|
return x
|
|
|
|
elif revlog == mf:
|
|
|
|
clnode = mfs[x]
|
|
|
|
mdata = mf.readfast(x)
|
|
|
|
for f in changedfiles:
|
|
|
|
if f in mdata:
|
|
|
|
fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
|
|
|
|
count[0] += 1
|
|
|
|
self.ui.progress(_('bundling'), count[0],
|
|
|
|
unit=_('manifests'), total=len(mfs))
|
|
|
|
return mfs[x]
|
|
|
|
else:
|
|
|
|
self.ui.progress(
|
|
|
|
_('bundling'), count[0], item=fstate[0],
|
|
|
|
unit=_('files'), total=len(changedfiles))
|
|
|
|
return fstate[1][x]
|
2011-03-31 22:56:05 +04:00
|
|
|
|
2011-04-01 00:24:06 +04:00
|
|
|
bundler = changegroup.bundle10(lookup)
|
|
|
|
|
2005-08-28 01:21:25 +04:00
|
|
|
def gengroup():
|
2005-10-12 05:56:47 +04:00
|
|
|
# Create a changenode group generator that will call our functions
|
|
|
|
# back to lookup the owning changenode and collect information.
|
2011-04-01 00:24:06 +04:00
|
|
|
for chunk in cl.group(csets, bundler):
|
2011-03-21 03:43:28 +03:00
|
|
|
yield chunk
|
2010-12-10 22:30:37 +03:00
|
|
|
self.ui.progress(_('bundling'), None)
|
2010-02-09 19:02:01 +03:00
|
|
|
|
2005-10-12 05:56:47 +04:00
|
|
|
# Create a generator for the manifestnodes that calls our lookup
|
|
|
|
# and data collection functions back.
|
2011-03-31 19:03:24 +04:00
|
|
|
count[0] = 0
|
2011-04-01 00:24:06 +04:00
|
|
|
for chunk in mf.group(prune(mf, mfs), bundler):
|
2011-03-21 03:43:28 +03:00
|
|
|
yield chunk
|
2010-12-10 22:30:37 +03:00
|
|
|
self.ui.progress(_('bundling'), None)
|
2005-10-12 05:56:47 +04:00
|
|
|
|
2011-03-21 03:43:28 +03:00
|
|
|
mfs.clear()
|
2005-10-12 05:56:47 +04:00
|
|
|
|
|
|
|
# Go through all our files in order sorted by name.
|
2011-03-31 22:56:05 +04:00
|
|
|
count[0] = 0
|
|
|
|
for fname in sorted(changedfiles):
|
2005-10-07 21:57:11 +04:00
|
|
|
filerevlog = self.file(fname)
|
2008-06-26 23:35:50 +04:00
|
|
|
if not len(filerevlog):
|
2007-12-19 00:40:46 +03:00
|
|
|
raise util.Abort(_("empty or missing revlog for %s") % fname)
|
2011-03-31 22:56:05 +04:00
|
|
|
fstate[0] = fname
|
|
|
|
fstate[1] = fnodes.pop(fname, {})
|
2011-03-31 02:50:27 +04:00
|
|
|
first = True
|
|
|
|
|
2011-03-31 22:56:05 +04:00
|
|
|
for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
|
2011-04-01 00:24:06 +04:00
|
|
|
bundler):
|
2011-03-31 02:50:27 +04:00
|
|
|
if first:
|
2011-04-01 00:24:06 +04:00
|
|
|
if chunk == bundler.close():
|
2011-03-31 02:50:27 +04:00
|
|
|
break
|
2011-03-31 22:56:05 +04:00
|
|
|
count[0] += 1
|
2011-04-01 00:24:06 +04:00
|
|
|
yield bundler.fileheader(fname)
|
2011-03-31 02:50:27 +04:00
|
|
|
first = False
|
|
|
|
yield chunk
|
2005-10-12 05:56:47 +04:00
|
|
|
# Signal that no more groups are left.
|
2011-04-01 00:24:06 +04:00
|
|
|
yield bundler.close()
|
2010-12-10 22:30:37 +03:00
|
|
|
self.ui.progress(_('bundling'), None)
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2011-03-21 03:43:28 +03:00
|
|
|
if csets:
|
|
|
|
self.hook('outgoing', node=hex(csets[0]), source=source)
|
2006-02-17 19:26:21 +03:00
|
|
|
|
2010-09-20 23:32:21 +04:00
|
|
|
return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2006-02-17 19:26:21 +03:00
|
|
|
def changegroup(self, basenodes, source):
|
2008-10-21 19:00:35 +04:00
|
|
|
# to avoid a race we use changegroupsubset() (issue1320)
|
|
|
|
return self.changegroupsubset(basenodes, self.heads(), source)
|
|
|
|
|
2009-11-07 14:28:30 +03:00
|
|
|
def _changegroup(self, nodes, source):
|
2009-09-09 01:58:59 +04:00
|
|
|
"""Compute the changegroup of all nodes that we have that a recipient
|
|
|
|
doesn't. Return a chunkbuffer object whose read() method will return
|
|
|
|
successive changegroup chunks.
|
2005-10-12 05:56:47 +04:00
|
|
|
|
|
|
|
This is much easier than the previous function as we can assume that
|
2008-10-21 19:00:35 +04:00
|
|
|
the recipient has any changenode we aren't sending them.
|
|
|
|
|
2009-11-07 14:28:30 +03:00
|
|
|
nodes is the set of nodes to send"""
|
2006-02-17 19:26:21 +03:00
|
|
|
|
2011-03-31 18:59:56 +04:00
|
|
|
cl = self.changelog
|
|
|
|
mf = self.manifest
|
|
|
|
mfs = {}
|
|
|
|
changedfiles = set()
|
2011-03-31 22:56:05 +04:00
|
|
|
fstate = ['']
|
|
|
|
count = [0]
|
2011-03-31 18:59:56 +04:00
|
|
|
|
2006-02-17 19:26:21 +03:00
|
|
|
self.hook('preoutgoing', throw=True, source=source)
|
2011-03-31 18:59:56 +04:00
|
|
|
self.changegroupinfo(nodes, source)
|
2006-02-17 19:26:21 +03:00
|
|
|
|
2009-04-22 02:57:28 +04:00
|
|
|
revset = set([cl.rev(n) for n in nodes])
|
2005-10-07 21:57:11 +04:00
|
|
|
|
2008-06-26 23:35:50 +04:00
|
|
|
def gennodelst(log):
|
|
|
|
for r in log:
|
2008-11-13 00:19:14 +03:00
|
|
|
if log.linkrev(r) in revset:
|
|
|
|
yield log.node(r)
|
2005-10-07 21:57:11 +04:00
|
|
|
|
2011-03-31 23:25:26 +04:00
|
|
|
def lookup(revlog, x):
|
|
|
|
if revlog == cl:
|
|
|
|
c = cl.read(x)
|
|
|
|
changedfiles.update(c[3])
|
|
|
|
mfs.setdefault(c[0], x)
|
|
|
|
count[0] += 1
|
|
|
|
self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
|
|
|
|
return x
|
|
|
|
elif revlog == mf:
|
|
|
|
count[0] += 1
|
|
|
|
self.ui.progress(_('bundling'), count[0],
|
|
|
|
unit=_('manifests'), total=len(mfs))
|
|
|
|
return cl.node(revlog.linkrev(revlog.rev(x)))
|
|
|
|
else:
|
|
|
|
self.ui.progress(
|
|
|
|
_('bundling'), count[0], item=fstate[0],
|
|
|
|
total=len(changedfiles), unit=_('files'))
|
|
|
|
return cl.node(revlog.linkrev(revlog.rev(x)))
|
2011-03-31 22:56:05 +04:00
|
|
|
|
2011-04-01 00:24:06 +04:00
|
|
|
bundler = changegroup.bundle10(lookup)
|
|
|
|
|
2005-10-07 21:57:11 +04:00
|
|
|
def gengroup():
|
2009-09-09 01:58:59 +04:00
|
|
|
'''yield a sequence of changegroup chunks (strings)'''
|
2005-10-07 21:57:11 +04:00
|
|
|
# construct a list of all changed files
|
2011-03-28 20:18:56 +04:00
|
|
|
|
2011-04-01 00:24:06 +04:00
|
|
|
for chunk in cl.group(nodes, bundler):
|
2011-03-21 04:25:41 +03:00
|
|
|
yield chunk
|
2010-12-10 22:30:37 +03:00
|
|
|
self.ui.progress(_('bundling'), None)
|
2005-10-07 21:57:11 +04:00
|
|
|
|
2011-03-31 22:56:05 +04:00
|
|
|
count[0] = 0
|
2011-04-01 00:24:06 +04:00
|
|
|
for chunk in mf.group(gennodelst(mf), bundler):
|
2011-03-21 04:25:41 +03:00
|
|
|
yield chunk
|
2010-12-10 22:30:37 +03:00
|
|
|
self.ui.progress(_('bundling'), None)
|
2005-10-07 21:57:11 +04:00
|
|
|
|
2011-03-31 22:56:05 +04:00
|
|
|
count[0] = 0
|
|
|
|
for fname in sorted(changedfiles):
|
2005-10-07 21:57:11 +04:00
|
|
|
filerevlog = self.file(fname)
|
2008-06-26 23:35:50 +04:00
|
|
|
if not len(filerevlog):
|
2007-12-19 00:40:46 +03:00
|
|
|
raise util.Abort(_("empty or missing revlog for %s") % fname)
|
2011-03-31 22:56:05 +04:00
|
|
|
fstate[0] = fname
|
2011-03-31 02:50:27 +04:00
|
|
|
first = True
|
2011-04-01 00:24:06 +04:00
|
|
|
for chunk in filerevlog.group(gennodelst(filerevlog), bundler):
|
2011-03-31 02:50:27 +04:00
|
|
|
if first:
|
2011-04-01 00:24:06 +04:00
|
|
|
if chunk == bundler.close():
|
2011-03-31 02:50:27 +04:00
|
|
|
break
|
2011-03-31 22:56:05 +04:00
|
|
|
count[0] += 1
|
2011-04-01 00:24:06 +04:00
|
|
|
yield bundler.fileheader(fname)
|
2011-03-31 02:50:27 +04:00
|
|
|
first = False
|
|
|
|
yield chunk
|
2011-04-01 00:24:06 +04:00
|
|
|
yield bundler.close()
|
2011-03-31 18:59:56 +04:00
|
|
|
self.ui.progress(_('bundling'), None)
|
2006-04-22 00:14:27 +04:00
|
|
|
|
|
|
|
if nodes:
|
|
|
|
self.hook('outgoing', node=hex(nodes[0]), source=source)
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2010-09-20 23:32:21 +04:00
|
|
|
return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2010-06-25 22:47:28 +04:00
|
|
|
def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
|
2010-05-03 05:56:25 +04:00
|
|
|
"""Add the changegroup returned by source.read() to this repo.
|
|
|
|
srctype is a string like 'push', 'pull', or 'unbundle'. url is
|
|
|
|
the URL of the repo where this changegroup is coming from.
|
2011-01-16 19:26:34 +03:00
|
|
|
If lock is not None, the function takes ownership of the lock
|
|
|
|
and releases it after the changegroup is added.
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2010-05-03 05:56:25 +04:00
|
|
|
Return an integer summarizing the change to this repo:
|
2006-12-06 01:25:28 +03:00
|
|
|
- nothing changed or no source: 0
|
|
|
|
- more heads than before: 1+added heads (2..n)
|
2010-05-03 05:56:25 +04:00
|
|
|
- fewer heads than before: -1-removed heads (-2..-n)
|
2006-12-06 01:25:28 +03:00
|
|
|
- number of heads stays the same: 1
|
|
|
|
"""
|
2005-08-28 01:21:25 +04:00
|
|
|
def csmap(x):
|
2009-09-19 03:15:38 +04:00
|
|
|
self.ui.debug("add changeset %s\n" % short(x))
|
2008-06-26 23:35:50 +04:00
|
|
|
return len(cl)
|
2005-08-28 01:21:25 +04:00
|
|
|
|
|
|
|
def revmap(x):
|
fix race in localrepo.addchangegroup.
localrepo.addchangegroup writes to changelog, then manifest, then normal
files. this breaks access ordering. if reader reads changelog while
manifest is being written, can find pointers into places in manifest
that are not yet written. same can happen for manifest and normal files.
fix is to make almost no change to localrepo.addchangegroup. it must
to write changelog and manifest data early because it has to read them
while writing other files. instead, write changelog and manifest data
to temp file that reader cannot see, then append temp data to manifest
after all normal files written, finally append temp data to changelog.
temp file code is in new appendfile module. can be used in other places
with small changes.
much smaller race still left. we write all new data in one write call,
but reader can maybe see partial update because python or os or filesystem
cannot always make write really atomic. file locking no help: slow, not
portable, not reliable over nfs. only real safe other plan is write to
temp file every time and rename, but performance bad when manifest or
changelog is big.
2006-03-24 20:08:12 +03:00
|
|
|
return cl.rev(x)
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2006-01-12 09:57:58 +03:00
|
|
|
if not source:
|
2006-03-29 22:27:16 +04:00
|
|
|
return 0
|
2006-02-15 21:49:30 +03:00
|
|
|
|
2006-07-26 00:50:32 +04:00
|
|
|
self.hook('prechangegroup', throw=True, source=srctype, url=url)
|
2006-02-15 21:49:30 +03:00
|
|
|
|
2005-08-28 01:21:25 +04:00
|
|
|
changesets = files = revisions = 0
|
2010-04-11 02:20:43 +04:00
|
|
|
efiles = set()
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2006-06-04 19:46:33 +04:00
|
|
|
# write changelog data to temp files so concurrent readers will not see
|
|
|
|
# inconsistent view
|
2007-03-23 07:37:44 +03:00
|
|
|
cl = self.changelog
|
|
|
|
cl.delayupdate()
|
2011-04-24 22:11:05 +04:00
|
|
|
oldheads = cl.heads()
|
2007-03-23 07:37:44 +03:00
|
|
|
|
2010-04-13 04:11:14 +04:00
|
|
|
tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
|
2007-07-22 01:02:10 +04:00
|
|
|
try:
|
2007-07-22 23:53:57 +04:00
|
|
|
trp = weakref.proxy(tr)
|
2007-07-22 01:02:10 +04:00
|
|
|
# pull off the changeset group
|
|
|
|
self.ui.status(_("adding changesets\n"))
|
2009-05-14 18:11:45 +04:00
|
|
|
clstart = len(cl)
|
2010-02-07 21:00:40 +03:00
|
|
|
class prog(object):
|
2010-02-18 01:07:50 +03:00
|
|
|
step = _('changesets')
|
2010-02-07 21:00:40 +03:00
|
|
|
count = 1
|
|
|
|
ui = self.ui
|
2010-04-11 02:20:43 +04:00
|
|
|
total = None
|
2010-02-07 21:00:40 +03:00
|
|
|
def __call__(self):
|
2010-04-11 02:20:43 +04:00
|
|
|
self.ui.progress(self.step, self.count, unit=_('chunks'),
|
|
|
|
total=self.total)
|
2010-02-07 21:00:40 +03:00
|
|
|
self.count += 1
|
|
|
|
pr = prog()
|
2010-09-19 21:38:44 +04:00
|
|
|
source.callback = pr
|
|
|
|
|
2010-09-19 21:51:54 +04:00
|
|
|
if (cl.addgroup(source, csmap, trp) is None
|
2010-09-19 03:20:34 +04:00
|
|
|
and not emptyok):
|
2007-07-22 01:02:10 +04:00
|
|
|
raise util.Abort(_("received changelog group is empty"))
|
2009-05-14 18:11:45 +04:00
|
|
|
clend = len(cl)
|
|
|
|
changesets = clend - clstart
|
2010-04-11 02:20:43 +04:00
|
|
|
for c in xrange(clstart, clend):
|
|
|
|
efiles.update(self[c].files())
|
|
|
|
efiles = len(efiles)
|
2010-02-18 01:07:50 +03:00
|
|
|
self.ui.progress(_('changesets'), None)
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2007-07-22 01:02:10 +04:00
|
|
|
# pull off the manifest group
|
|
|
|
self.ui.status(_("adding manifests\n"))
|
2010-02-18 01:07:50 +03:00
|
|
|
pr.step = _('manifests')
|
2010-02-07 21:00:40 +03:00
|
|
|
pr.count = 1
|
2010-04-11 02:20:43 +04:00
|
|
|
pr.total = changesets # manifests <= changesets
|
2007-07-22 01:02:10 +04:00
|
|
|
# no need to check for empty manifest group here:
|
|
|
|
# if the result of the merge of 1 and 2 is the same in 3 and 4,
|
|
|
|
# no new manifest will be created and the manifest group will
|
|
|
|
# be empty during the pull
|
2010-09-19 21:51:54 +04:00
|
|
|
self.manifest.addgroup(source, revmap, trp)
|
2010-02-18 01:07:50 +03:00
|
|
|
self.ui.progress(_('manifests'), None)
|
2007-07-22 01:02:10 +04:00
|
|
|
|
2010-02-12 01:37:43 +03:00
|
|
|
needfiles = {}
|
|
|
|
if self.ui.configbool('server', 'validate', default=False):
|
|
|
|
# validate incoming csets have their manifests
|
|
|
|
for cset in xrange(clstart, clend):
|
|
|
|
mfest = self.changelog.read(self.changelog.node(cset))[0]
|
|
|
|
mfest = self.manifest.readdelta(mfest)
|
|
|
|
# store file nodes we must see
|
|
|
|
for f, n in mfest.iteritems():
|
|
|
|
needfiles.setdefault(f, set()).add(n)
|
|
|
|
|
2007-07-22 01:02:10 +04:00
|
|
|
# process the files
|
|
|
|
self.ui.status(_("adding file changes\n"))
|
2010-02-07 21:00:40 +03:00
|
|
|
pr.step = 'files'
|
|
|
|
pr.count = 1
|
2010-04-11 02:20:43 +04:00
|
|
|
pr.total = efiles
|
2010-09-19 21:38:44 +04:00
|
|
|
source.callback = None
|
|
|
|
|
2007-07-22 01:02:10 +04:00
|
|
|
while 1:
|
2010-09-19 03:20:34 +04:00
|
|
|
f = source.chunk()
|
2007-07-22 01:02:10 +04:00
|
|
|
if not f:
|
|
|
|
break
|
2009-09-19 03:15:38 +04:00
|
|
|
self.ui.debug("adding %s revisions\n" % f)
|
2010-04-11 02:20:43 +04:00
|
|
|
pr()
|
2007-07-22 01:02:10 +04:00
|
|
|
fl = self.file(f)
|
2008-06-26 23:35:50 +04:00
|
|
|
o = len(fl)
|
2010-09-19 21:51:54 +04:00
|
|
|
if fl.addgroup(source, revmap, trp) is None:
|
2007-07-22 01:02:10 +04:00
|
|
|
raise util.Abort(_("received file revlog group is empty"))
|
2008-06-26 23:35:50 +04:00
|
|
|
revisions += len(fl) - o
|
2007-07-22 01:02:10 +04:00
|
|
|
files += 1
|
2010-02-12 01:37:43 +03:00
|
|
|
if f in needfiles:
|
|
|
|
needs = needfiles[f]
|
|
|
|
for new in xrange(o, len(fl)):
|
|
|
|
n = fl.node(new)
|
|
|
|
if n in needs:
|
|
|
|
needs.remove(n)
|
|
|
|
if not needs:
|
|
|
|
del needfiles[f]
|
2010-02-18 01:07:50 +03:00
|
|
|
self.ui.progress(_('files'), None)
|
2010-02-12 01:37:43 +03:00
|
|
|
|
|
|
|
for f, needs in needfiles.iteritems():
|
|
|
|
fl = self.file(f)
|
|
|
|
for n in needs:
|
|
|
|
try:
|
|
|
|
fl.rev(n)
|
|
|
|
except error.LookupError:
|
|
|
|
raise util.Abort(
|
|
|
|
_('missing file data for %s:%s - run hg verify') %
|
|
|
|
(f, hex(n)))
|
2007-07-22 01:02:10 +04:00
|
|
|
|
2011-04-24 22:11:05 +04:00
|
|
|
dh = 0
|
|
|
|
if oldheads:
|
|
|
|
heads = cl.heads()
|
|
|
|
dh = len(heads) - len(oldheads)
|
|
|
|
for h in heads:
|
|
|
|
if h not in oldheads and 'close' in self[h].extra():
|
|
|
|
dh -= 1
|
|
|
|
htext = ""
|
|
|
|
if dh:
|
|
|
|
htext = _(" (%+d heads)") % dh
|
2007-07-22 01:02:10 +04:00
|
|
|
|
|
|
|
self.ui.status(_("added %d changesets"
|
|
|
|
" with %d changes to %d files%s\n")
|
2011-04-24 22:11:05 +04:00
|
|
|
% (changesets, revisions, files, htext))
|
2007-07-22 01:02:10 +04:00
|
|
|
|
|
|
|
if changesets > 0:
|
2009-05-14 18:09:27 +04:00
|
|
|
p = lambda: cl.writepending() and self.root or ""
|
2007-07-22 01:02:10 +04:00
|
|
|
self.hook('pretxnchangegroup', throw=True,
|
2009-05-14 18:11:45 +04:00
|
|
|
node=hex(cl.node(clstart)), source=srctype,
|
2009-02-17 04:35:07 +03:00
|
|
|
url=url, pending=p)
|
|
|
|
|
|
|
|
# make changelog see real files again
|
|
|
|
cl.finalize(trp)
|
2007-07-22 01:02:10 +04:00
|
|
|
|
|
|
|
tr.close()
|
|
|
|
finally:
|
2010-05-27 19:47:40 +04:00
|
|
|
tr.release()
|
2010-06-25 22:47:28 +04:00
|
|
|
if lock:
|
|
|
|
lock.release()
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2005-10-04 01:45:14 +04:00
|
|
|
if changesets > 0:
|
2008-02-04 02:03:46 +03:00
|
|
|
# forcefully update the on-disk branch cache
|
2009-09-19 03:15:38 +04:00
|
|
|
self.ui.debug("updating the branch cache\n")
|
2010-08-29 01:57:39 +04:00
|
|
|
self.updatebranchcache()
|
2009-05-14 18:11:45 +04:00
|
|
|
self.hook("changegroup", node=hex(cl.node(clstart)),
|
2006-07-26 00:50:32 +04:00
|
|
|
source=srctype, url=url)
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2009-05-14 18:11:45 +04:00
|
|
|
for i in xrange(clstart, clend):
|
2009-05-14 18:09:27 +04:00
|
|
|
self.hook("incoming", node=hex(cl.node(i)),
|
2006-07-26 00:50:32 +04:00
|
|
|
source=srctype, url=url)
|
2005-09-22 21:12:42 +04:00
|
|
|
|
2006-12-06 01:25:28 +03:00
|
|
|
# never return 0 here:
|
2011-04-24 22:11:05 +04:00
|
|
|
if dh < 0:
|
|
|
|
return dh - 1
|
2006-12-06 01:25:28 +03:00
|
|
|
else:
|
2011-04-24 22:11:05 +04:00
|
|
|
return dh + 1
|
2006-02-28 21:24:54 +03:00
|
|
|
|
clone: only use stream when we understand the revlog format
This patch fixes issues with stream cloning in the presense of parentdelta,
lwcopy and similar additions that change the interpretation of the revlog
format, or the format itself.
Currently, the stream capability is sent like this:
stream=<version of changelog>
But the client doesn't actually check the version number; also, it only checks
the changelog and it doesn't capture the interpretation-changes and
flag-changes in parentdelta and lwcopy.
This patch removes the 'stream' capability whenever we use a non-basic revlog
format, to prevent old clients from receiving incorrect data. In those cases,
a new capability called 'streamreqs' is added instead. Instead of a revlog
version, it comes with a list of revlog-format relevant requirements, which
are a subset of the repository requirements, excluding things that are not
relevant for stream.
New clients use this to determine whether or not they can stream. Old clients
only look for the 'stream' capability, as always. New servers will still send
this when serving old repositories.
2010-09-15 13:06:22 +04:00
|
|
|
def stream_in(self, remote, requirements):
|
2011-01-28 15:34:07 +03:00
|
|
|
lock = self.lock()
|
2006-10-27 20:17:12 +04:00
|
|
|
try:
|
2011-01-28 15:34:07 +03:00
|
|
|
fp = remote.stream_out()
|
|
|
|
l = fp.readline()
|
|
|
|
try:
|
|
|
|
resp = int(l)
|
|
|
|
except ValueError:
|
|
|
|
raise error.ResponseError(
|
|
|
|
_('Unexpected response from remote server:'), l)
|
|
|
|
if resp == 1:
|
|
|
|
raise util.Abort(_('operation forbidden by server'))
|
|
|
|
elif resp == 2:
|
|
|
|
raise util.Abort(_('locking the remote repository failed'))
|
|
|
|
elif resp != 0:
|
|
|
|
raise util.Abort(_('the server sent an unknown error code'))
|
|
|
|
self.ui.status(_('streaming all changes\n'))
|
2006-10-27 20:17:12 +04:00
|
|
|
l = fp.readline()
|
|
|
|
try:
|
2011-01-28 15:34:07 +03:00
|
|
|
total_files, total_bytes = map(int, l.split(' ', 1))
|
2008-10-04 01:13:03 +04:00
|
|
|
except (ValueError, TypeError):
|
2009-01-12 20:28:28 +03:00
|
|
|
raise error.ResponseError(
|
2006-10-27 20:17:12 +04:00
|
|
|
_('Unexpected response from remote server:'), l)
|
2011-01-28 15:34:07 +03:00
|
|
|
self.ui.status(_('%d files to transfer, %s of data\n') %
|
|
|
|
(total_files, util.bytecount(total_bytes)))
|
|
|
|
start = time.time()
|
|
|
|
for i in xrange(total_files):
|
|
|
|
# XXX doesn't support '\n' or '\r' in filenames
|
|
|
|
l = fp.readline()
|
|
|
|
try:
|
|
|
|
name, size = l.split('\0', 1)
|
|
|
|
size = int(size)
|
|
|
|
except (ValueError, TypeError):
|
|
|
|
raise error.ResponseError(
|
|
|
|
_('Unexpected response from remote server:'), l)
|
|
|
|
self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
|
|
|
|
# for backwards compat, name was partially encoded
|
|
|
|
ofp = self.sopener(store.decodedir(name), 'w')
|
|
|
|
for chunk in util.filechunkiter(fp, limit=size):
|
|
|
|
ofp.write(chunk)
|
|
|
|
ofp.close()
|
|
|
|
elapsed = time.time() - start
|
|
|
|
if elapsed <= 0:
|
|
|
|
elapsed = 0.001
|
|
|
|
self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
|
|
|
|
(util.bytecount(total_bytes), elapsed,
|
|
|
|
util.bytecount(total_bytes / elapsed)))
|
|
|
|
|
|
|
|
# new requirements = old non-format requirements + new format-related
|
|
|
|
# requirements from the streamed-in repository
|
|
|
|
requirements.update(set(self.requirements) - self.supportedformats)
|
|
|
|
self._applyrequirements(requirements)
|
|
|
|
self._writerequirements()
|
clone: only use stream when we understand the revlog format
This patch fixes issues with stream cloning in the presense of parentdelta,
lwcopy and similar additions that change the interpretation of the revlog
format, or the format itself.
Currently, the stream capability is sent like this:
stream=<version of changelog>
But the client doesn't actually check the version number; also, it only checks
the changelog and it doesn't capture the interpretation-changes and
flag-changes in parentdelta and lwcopy.
This patch removes the 'stream' capability whenever we use a non-basic revlog
format, to prevent old clients from receiving incorrect data. In those cases,
a new capability called 'streamreqs' is added instead. Instead of a revlog
version, it comes with a list of revlog-format relevant requirements, which
are a subset of the repository requirements, excluding things that are not
relevant for stream.
New clients use this to determine whether or not they can stream. Old clients
only look for the 'stream' capability, as always. New servers will still send
this when serving old repositories.
2010-09-15 13:06:22 +04:00
|
|
|
|
2011-01-28 15:34:07 +03:00
|
|
|
self.invalidate()
|
|
|
|
return len(self.heads()) + 1
|
|
|
|
finally:
|
|
|
|
lock.release()
|
2006-08-08 01:27:09 +04:00
|
|
|
|
2006-07-15 01:51:36 +04:00
|
|
|
def clone(self, remote, heads=[], stream=False):
|
2006-07-14 22:17:22 +04:00
|
|
|
'''clone remote repository.
|
|
|
|
|
|
|
|
keyword arguments:
|
|
|
|
heads: list of revs to clone (forces use of pull)
|
2006-07-16 03:06:35 +04:00
|
|
|
stream: use streaming clone if possible'''
|
2006-07-14 22:17:22 +04:00
|
|
|
|
2006-07-16 03:06:35 +04:00
|
|
|
# now, all clients that can request uncompressed clones can
|
|
|
|
# read repo formats supported by all servers that can serve
|
|
|
|
# them.
|
2006-07-14 22:17:22 +04:00
|
|
|
|
|
|
|
# if revlog format changes, client will have to check version
|
2006-07-16 03:06:35 +04:00
|
|
|
# and format flags on "stream" capability, and use
|
|
|
|
# uncompressed only if compatible.
|
2006-07-14 22:17:22 +04:00
|
|
|
|
clone: only use stream when we understand the revlog format
This patch fixes issues with stream cloning in the presense of parentdelta,
lwcopy and similar additions that change the interpretation of the revlog
format, or the format itself.
Currently, the stream capability is sent like this:
stream=<version of changelog>
But the client doesn't actually check the version number; also, it only checks
the changelog and it doesn't capture the interpretation-changes and
flag-changes in parentdelta and lwcopy.
This patch removes the 'stream' capability whenever we use a non-basic revlog
format, to prevent old clients from receiving incorrect data. In those cases,
a new capability called 'streamreqs' is added instead. Instead of a revlog
version, it comes with a list of revlog-format relevant requirements, which
are a subset of the repository requirements, excluding things that are not
relevant for stream.
New clients use this to determine whether or not they can stream. Old clients
only look for the 'stream' capability, as always. New servers will still send
this when serving old repositories.
2010-09-15 13:06:22 +04:00
|
|
|
if stream and not heads:
|
|
|
|
# 'stream' means remote revlog format is revlogv1 only
|
|
|
|
if remote.capable('stream'):
|
|
|
|
return self.stream_in(remote, set(('revlogv1',)))
|
|
|
|
# otherwise, 'streamreqs' contains the remote revlog format
|
|
|
|
streamreqs = remote.capable('streamreqs')
|
|
|
|
if streamreqs:
|
|
|
|
streamreqs = set(streamreqs.split(','))
|
|
|
|
# if we support it, stream in and adjust our requirements
|
|
|
|
if not streamreqs - self.supportedformats:
|
|
|
|
return self.stream_in(remote, streamreqs)
|
2006-07-14 22:17:22 +04:00
|
|
|
return self.pull(remote, heads)
|
|
|
|
|
2010-06-17 01:04:46 +04:00
|
|
|
def pushkey(self, namespace, key, old, new):
|
|
|
|
return pushkey.push(self, namespace, key, old, new)
|
|
|
|
|
|
|
|
def listkeys(self, namespace):
|
|
|
|
return pushkey.list(self, namespace)
|
|
|
|
|
2011-03-22 09:38:32 +03:00
|
|
|
def debugwireargs(self, one, two, three=None, four=None):
|
|
|
|
'''used to test argument passing over the wire'''
|
|
|
|
return "%s %s %s %s" % (one, two, three, four)
|
|
|
|
|
2006-02-28 21:24:54 +03:00
|
|
|
# used to avoid circular references so destructors work
|
2006-12-05 13:28:21 +03:00
|
|
|
def aftertrans(files):
|
|
|
|
renamefiles = [tuple(t) for t in files]
|
2006-02-28 21:24:54 +03:00
|
|
|
def a():
|
2006-12-05 13:28:21 +03:00
|
|
|
for src, dest in renamefiles:
|
|
|
|
util.rename(src, dest)
|
2006-02-28 21:24:54 +03:00
|
|
|
return a
|
|
|
|
|
2006-07-31 18:11:12 +04:00
|
|
|
def instance(ui, path, create):
|
url: refactor util.drop_scheme() and hg.localpath() into url.localpath()
This replaces util.drop_scheme() with url.localpath(), using url.url for
parsing instead of doing it on its own. The function is moved from
util to url to avoid an import cycle.
hg.localpath() is removed in favor of using url.localpath(). This
provides more consistent behavior between "hg clone" and other
commands.
To preserve backwards compatibility, URLs like bundle://../foo still
refer to ../foo, not /foo.
If a URL contains a scheme, percent-encoded entities are decoded. When
there's no scheme, all characters are left untouched.
Comparison of old and new behaviors:
URL drop_scheme() hg.localpath() url.localpath()
=== ============= ============== ===============
file://foo/foo /foo foo/foo /foo
file://localhost:80/foo /foo localhost:80/foo /foo
file://localhost:/foo /foo localhost:/foo /foo
file://localhost/foo /foo /foo /foo
file:///foo /foo /foo /foo
file://foo (empty string) foo /
file:/foo /foo /foo /foo
file:foo foo foo foo
file:foo%23bar foo%23bar foo%23bar foo#bar
foo%23bar foo%23bar foo%23bar foo%23bar
/foo /foo /foo /foo
Windows-related paths on Windows:
URL drop_scheme() hg.localpath() url.localpath()
=== ============= ============== ===============
file:///C:/foo C:/C:/foo /C:/foo C:/foo
file:///D:/foo C:/D:/foo /D:/foo D:/foo
file://C:/foo C:/foo C:/foo C:/foo
file://D:/foo C:/foo D:/foo D:/foo
file:////foo/bar //foo/bar //foo/bar //foo/bar
//foo/bar //foo/bar //foo/bar //foo/bar
\\foo\bar //foo/bar //foo/bar \\foo\bar
Windows-related paths on other platforms:
file:///C:/foo C:/C:/foo /C:/foo C:/foo
file:///D:/foo C:/D:/foo /D:/foo D:/foo
file://C:/foo C:/foo C:/foo C:/foo
file://D:/foo C:/foo D:/foo D:/foo
file:////foo/bar //foo/bar //foo/bar //foo/bar
//foo/bar //foo/bar //foo/bar //foo/bar
\\foo\bar //foo/bar //foo/bar \\foo\bar
For more information about file:// URL handling, see:
http://www-archive.mozilla.org/quality/networking/testing/filetests.html
Related issues:
- issue1153: File URIs aren't handled correctly in windows
This patch should preserve the fix implemented in
5c92d05b064e. However, it goes a step further and "promotes"
Windows-style drive letters from being interpreted as host names to
being part of the path.
- issue2154: Cannot escape '#' in Mercurial URLs (#1172 in THG)
The fragment is still interpreted as a revision or a branch, even in
paths to bundles. However, when file: is used, percent-encoded
entities are decoded, so file:test%23bundle.hg can refer to
test#bundle.hg ond isk.
2011-03-31 07:03:05 +04:00
|
|
|
return localrepository(ui, urlmod.localpath(path), create)
|
2006-10-01 21:26:33 +04:00
|
|
|
|
2006-07-31 18:11:12 +04:00
|
|
|
def islocal(path):
|
|
|
|
return True
|