2006-06-29 02:07:46 +04:00
|
|
|
# context.py - changeset and file context objects for mercurial
|
|
|
|
#
|
2007-06-19 10:51:34 +04:00
|
|
|
# Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
|
2006-06-29 02:07:46 +04:00
|
|
|
#
|
2009-04-26 03:08:54 +04:00
|
|
|
# This software may be used and distributed according to the terms of the
|
2010-01-20 07:20:08 +03:00
|
|
|
# GNU General Public License version 2 or any later version.
|
2006-06-29 02:07:46 +04:00
|
|
|
|
2012-04-08 21:38:07 +04:00
|
|
|
from node import nullid, nullrev, short, hex, bin
|
2006-12-15 05:25:19 +03:00
|
|
|
from i18n import _
|
2014-04-08 01:17:48 +04:00
|
|
|
import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
|
2011-06-19 01:52:51 +04:00
|
|
|
import match as matchmod
|
2010-06-07 22:03:32 +04:00
|
|
|
import os, errno, stat
|
2012-08-28 22:52:04 +04:00
|
|
|
import obsolete as obsmod
|
2013-01-03 21:51:16 +04:00
|
|
|
import repoview
|
2014-02-05 00:27:49 +04:00
|
|
|
import fileset
|
2014-05-30 01:12:59 +04:00
|
|
|
import revlog
|
2006-09-18 07:20:44 +04:00
|
|
|
|
2009-04-27 01:50:44 +04:00
|
|
|
propertycache = util.propertycache
|
2008-11-14 14:44:26 +03:00
|
|
|
|
2013-07-14 04:59:21 +04:00
|
|
|
class basectx(object):
|
|
|
|
"""A basectx object represents the common logic for its children:
|
|
|
|
changectx: read-only context that is already present in the repo,
|
|
|
|
workingctx: a context that represents the working directory and can
|
|
|
|
be committed,
|
|
|
|
memctx: a context that represents changes in-memory and can also
|
|
|
|
be committed."""
|
|
|
|
def __new__(cls, repo, changeid='', *args, **kwargs):
|
2013-08-07 00:50:28 +04:00
|
|
|
if isinstance(changeid, basectx):
|
|
|
|
return changeid
|
|
|
|
|
|
|
|
o = super(basectx, cls).__new__(cls)
|
|
|
|
|
|
|
|
o._repo = repo
|
|
|
|
o._rev = nullrev
|
|
|
|
o._node = nullid
|
|
|
|
|
|
|
|
return o
|
2013-07-14 04:59:21 +04:00
|
|
|
|
2013-08-03 01:46:23 +04:00
|
|
|
def __str__(self):
|
|
|
|
return short(self.node())
|
|
|
|
|
2013-08-03 01:52:13 +04:00
|
|
|
def __int__(self):
|
|
|
|
return self.rev()
|
|
|
|
|
2013-08-03 03:24:08 +04:00
|
|
|
def __repr__(self):
|
|
|
|
return "<%s %s>" % (type(self).__name__, str(self))
|
|
|
|
|
2013-08-06 02:00:09 +04:00
|
|
|
def __eq__(self, other):
|
|
|
|
try:
|
|
|
|
return type(self) == type(other) and self._rev == other._rev
|
|
|
|
except AttributeError:
|
|
|
|
return False
|
|
|
|
|
2013-08-06 02:00:32 +04:00
|
|
|
def __ne__(self, other):
|
|
|
|
return not (self == other)
|
|
|
|
|
2013-08-06 02:21:38 +04:00
|
|
|
def __contains__(self, key):
|
|
|
|
return key in self._manifest
|
|
|
|
|
2013-08-06 02:22:05 +04:00
|
|
|
def __getitem__(self, key):
|
|
|
|
return self.filectx(key)
|
|
|
|
|
2013-08-06 02:22:18 +04:00
|
|
|
def __iter__(self):
|
|
|
|
for f in sorted(self._manifest):
|
|
|
|
yield f
|
|
|
|
|
2014-04-24 05:52:10 +04:00
|
|
|
def _manifestmatches(self, match, s):
|
|
|
|
"""generate a new manifest filtered by the match argument
|
|
|
|
|
|
|
|
This method is for internal use only and mainly exists to provide an
|
|
|
|
object oriented way for other contexts to customize the manifest
|
|
|
|
generation.
|
|
|
|
"""
|
|
|
|
if match.always():
|
2014-07-13 04:59:03 +04:00
|
|
|
return self.manifest().copy()
|
|
|
|
|
2014-07-17 01:53:03 +04:00
|
|
|
files = match.files()
|
|
|
|
if (match.matchfn == match.exact or
|
|
|
|
(not match.anypats() and util.all(fn in self for fn in files))):
|
|
|
|
return self.manifest().intersectfiles(files)
|
2014-07-13 04:59:03 +04:00
|
|
|
|
|
|
|
mf = self.manifest().copy()
|
2014-04-24 05:52:10 +04:00
|
|
|
for fn in mf.keys():
|
|
|
|
if not match(fn):
|
|
|
|
del mf[fn]
|
|
|
|
return mf
|
|
|
|
|
2014-04-24 00:39:30 +04:00
|
|
|
def _matchstatus(self, other, s, match, listignored, listclean,
|
|
|
|
listunknown):
|
|
|
|
"""return match.always if match is none
|
|
|
|
|
|
|
|
This internal method provides a way for child objects to override the
|
|
|
|
match operator.
|
|
|
|
"""
|
|
|
|
return match or matchmod.always(self._repo.root, self._repo.getcwd())
|
|
|
|
|
2014-04-22 06:39:10 +04:00
|
|
|
def _prestatus(self, other, s, match, listignored, listclean, listunknown):
|
|
|
|
"""provide a hook to allow child objects to preprocess status results
|
|
|
|
|
|
|
|
For example, this allows other contexts, such as workingctx, to query
|
|
|
|
the dirstate before comparing the manifests.
|
|
|
|
"""
|
2014-04-24 01:06:23 +04:00
|
|
|
# load earliest manifest first for caching reasons
|
|
|
|
if self.rev() < other.rev():
|
|
|
|
self.manifest()
|
2014-04-22 06:39:10 +04:00
|
|
|
return s
|
|
|
|
|
2014-04-22 21:51:58 +04:00
|
|
|
def _poststatus(self, other, s, match, listignored, listclean, listunknown):
|
|
|
|
"""provide a hook to allow child objects to postprocess status results
|
|
|
|
|
|
|
|
For example, this allows other contexts, such as workingctx, to filter
|
|
|
|
suspect symlinks in the case of FAT32 and NTFS filesytems.
|
|
|
|
"""
|
|
|
|
return s
|
|
|
|
|
2014-04-22 06:35:36 +04:00
|
|
|
def _buildstatus(self, other, s, match, listignored, listclean,
|
2014-05-30 01:09:16 +04:00
|
|
|
listunknown):
|
2014-04-22 06:35:36 +04:00
|
|
|
"""build a status with respect to another context"""
|
|
|
|
mf1 = other._manifestmatches(match, s)
|
|
|
|
mf2 = self._manifestmatches(match, s)
|
|
|
|
|
|
|
|
modified, added, clean = [], [], []
|
2014-08-01 23:49:00 +04:00
|
|
|
deleted, unknown, ignored = s[3], s[4], s[5]
|
2014-04-22 06:35:36 +04:00
|
|
|
withflags = mf1.withflags() | mf2.withflags()
|
|
|
|
for fn, mf2node in mf2.iteritems():
|
|
|
|
if fn in mf1:
|
|
|
|
if (fn not in deleted and
|
|
|
|
((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
|
|
|
|
(mf1[fn] != mf2node and
|
|
|
|
(mf2node or self[fn].cmp(other[fn]))))):
|
|
|
|
modified.append(fn)
|
|
|
|
elif listclean:
|
|
|
|
clean.append(fn)
|
|
|
|
del mf1[fn]
|
|
|
|
elif fn not in deleted:
|
|
|
|
added.append(fn)
|
|
|
|
removed = mf1.keys()
|
2014-08-02 00:13:24 +04:00
|
|
|
if removed:
|
|
|
|
# need to filter files if they are already reported as removed
|
|
|
|
unknown = [fn for fn in unknown if fn not in mf1]
|
|
|
|
ignored = [fn for fn in ignored if fn not in mf1]
|
2014-04-22 06:35:36 +04:00
|
|
|
|
|
|
|
return [modified, added, removed, deleted, unknown, ignored, clean]
|
|
|
|
|
2013-08-06 02:21:23 +04:00
|
|
|
@propertycache
|
|
|
|
def substate(self):
|
|
|
|
return subrepo.state(self, self._repo.ui)
|
|
|
|
|
2013-09-21 06:55:42 +04:00
|
|
|
def subrev(self, subpath):
|
|
|
|
return self.substate[subpath][1]
|
|
|
|
|
2013-08-03 04:09:06 +04:00
|
|
|
def rev(self):
|
|
|
|
return self._rev
|
2013-08-03 01:48:19 +04:00
|
|
|
def node(self):
|
|
|
|
return self._node
|
2013-08-03 01:49:01 +04:00
|
|
|
def hex(self):
|
2013-08-03 01:50:13 +04:00
|
|
|
return hex(self.node())
|
2013-08-06 02:22:49 +04:00
|
|
|
def manifest(self):
|
|
|
|
return self._manifest
|
2013-08-06 03:19:04 +04:00
|
|
|
def phasestr(self):
|
|
|
|
return phases.phasenames[self.phase()]
|
2013-08-06 03:19:19 +04:00
|
|
|
def mutable(self):
|
|
|
|
return self.phase() > phases.public
|
2013-08-03 04:09:06 +04:00
|
|
|
|
2014-02-05 00:27:49 +04:00
|
|
|
def getfileset(self, expr):
|
|
|
|
return fileset.getfileset(self, expr)
|
|
|
|
|
2013-09-18 08:34:57 +04:00
|
|
|
def obsolete(self):
|
|
|
|
"""True if the changeset is obsolete"""
|
|
|
|
return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
|
|
|
|
|
|
|
|
def extinct(self):
|
|
|
|
"""True if the changeset is extinct"""
|
|
|
|
return self.rev() in obsmod.getrevs(self._repo, 'extinct')
|
|
|
|
|
|
|
|
def unstable(self):
|
|
|
|
"""True if the changeset is not obsolete but it's ancestor are"""
|
|
|
|
return self.rev() in obsmod.getrevs(self._repo, 'unstable')
|
|
|
|
|
|
|
|
def bumped(self):
|
|
|
|
"""True if the changeset try to be a successor of a public changeset
|
|
|
|
|
|
|
|
Only non-public and non-obsolete changesets may be bumped.
|
|
|
|
"""
|
|
|
|
return self.rev() in obsmod.getrevs(self._repo, 'bumped')
|
|
|
|
|
|
|
|
def divergent(self):
|
|
|
|
"""Is a successors of a changeset with multiple possible successors set
|
|
|
|
|
|
|
|
Only non-public and non-obsolete changesets may be divergent.
|
|
|
|
"""
|
|
|
|
return self.rev() in obsmod.getrevs(self._repo, 'divergent')
|
|
|
|
|
|
|
|
def troubled(self):
|
|
|
|
"""True if the changeset is either unstable, bumped or divergent"""
|
|
|
|
return self.unstable() or self.bumped() or self.divergent()
|
|
|
|
|
|
|
|
def troubles(self):
|
|
|
|
"""return the list of troubles affecting this changesets.
|
|
|
|
|
|
|
|
Troubles are returned as strings. possible values are:
|
|
|
|
- unstable,
|
|
|
|
- bumped,
|
|
|
|
- divergent.
|
|
|
|
"""
|
|
|
|
troubles = []
|
|
|
|
if self.unstable():
|
|
|
|
troubles.append('unstable')
|
|
|
|
if self.bumped():
|
|
|
|
troubles.append('bumped')
|
|
|
|
if self.divergent():
|
|
|
|
troubles.append('divergent')
|
|
|
|
return troubles
|
|
|
|
|
2013-08-06 03:19:38 +04:00
|
|
|
def parents(self):
|
|
|
|
"""return contexts for each parent changeset"""
|
|
|
|
return self._parents
|
|
|
|
|
2013-08-06 03:26:15 +04:00
|
|
|
def p1(self):
|
|
|
|
return self._parents[0]
|
|
|
|
|
2013-08-06 03:26:54 +04:00
|
|
|
def p2(self):
|
|
|
|
if len(self._parents) == 2:
|
|
|
|
return self._parents[1]
|
|
|
|
return changectx(self._repo, -1)
|
|
|
|
|
2013-08-06 03:28:23 +04:00
|
|
|
def _fileinfo(self, path):
|
|
|
|
if '_manifest' in self.__dict__:
|
|
|
|
try:
|
|
|
|
return self._manifest[path], self._manifest.flags(path)
|
|
|
|
except KeyError:
|
|
|
|
raise error.ManifestLookupError(self._node, path,
|
|
|
|
_('not found in manifest'))
|
|
|
|
if '_manifestdelta' in self.__dict__ or path in self.files():
|
|
|
|
if path in self._manifestdelta:
|
|
|
|
return (self._manifestdelta[path],
|
|
|
|
self._manifestdelta.flags(path))
|
|
|
|
node, flag = self._repo.manifest.find(self._changeset[0], path)
|
|
|
|
if not node:
|
|
|
|
raise error.ManifestLookupError(self._node, path,
|
|
|
|
_('not found in manifest'))
|
|
|
|
|
|
|
|
return node, flag
|
|
|
|
|
2013-08-06 03:28:40 +04:00
|
|
|
def filenode(self, path):
|
|
|
|
return self._fileinfo(path)[0]
|
|
|
|
|
2013-08-06 03:28:54 +04:00
|
|
|
def flags(self, path):
|
|
|
|
try:
|
|
|
|
return self._fileinfo(path)[1]
|
|
|
|
except error.LookupError:
|
|
|
|
return ''
|
|
|
|
|
2013-08-06 03:40:36 +04:00
|
|
|
def sub(self, path):
|
|
|
|
return subrepo.subrepo(self, path)
|
|
|
|
|
2013-08-06 03:41:00 +04:00
|
|
|
def match(self, pats=[], include=None, exclude=None, default='glob'):
|
|
|
|
r = self._repo
|
|
|
|
return matchmod.match(r.root, r.getcwd(), pats,
|
|
|
|
include, exclude, default,
|
|
|
|
auditor=r.auditor, ctx=self)
|
|
|
|
|
2013-08-06 03:41:12 +04:00
|
|
|
def diff(self, ctx2=None, match=None, **opts):
|
|
|
|
"""Returns a diff generator for the given contexts and matcher"""
|
|
|
|
if ctx2 is None:
|
|
|
|
ctx2 = self.p1()
|
2013-08-07 00:11:31 +04:00
|
|
|
if ctx2 is not None:
|
2013-08-06 03:41:12 +04:00
|
|
|
ctx2 = self._repo[ctx2]
|
|
|
|
diffopts = patch.diffopts(self._repo.ui, opts)
|
2014-04-30 01:43:59 +04:00
|
|
|
return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
|
2013-08-06 03:41:12 +04:00
|
|
|
|
2013-08-06 03:41:43 +04:00
|
|
|
@propertycache
|
|
|
|
def _dirs(self):
|
|
|
|
return scmutil.dirs(self._manifest)
|
|
|
|
|
2013-08-06 03:41:56 +04:00
|
|
|
def dirs(self):
|
|
|
|
return self._dirs
|
|
|
|
|
2014-06-18 07:26:51 +04:00
|
|
|
def dirty(self, missing=False, merge=True, branch=True):
|
2013-08-06 03:42:41 +04:00
|
|
|
return False
|
|
|
|
|
2014-04-25 03:07:42 +04:00
|
|
|
def status(self, other=None, match=None, listignored=False,
|
|
|
|
listclean=False, listunknown=False, listsubrepos=False):
|
|
|
|
"""return status of files between two nodes or node and working
|
|
|
|
directory.
|
|
|
|
|
|
|
|
If other is None, compare this node with working directory.
|
2014-06-01 04:26:15 +04:00
|
|
|
|
|
|
|
returns (modified, added, removed, deleted, unknown, ignored, clean)
|
2014-04-25 03:07:42 +04:00
|
|
|
"""
|
|
|
|
|
|
|
|
ctx1 = self
|
|
|
|
ctx2 = self._repo[other]
|
|
|
|
|
|
|
|
# This next code block is, admittedly, fragile logic that tests for
|
|
|
|
# reversing the contexts and wouldn't need to exist if it weren't for
|
|
|
|
# the fast (and common) code path of comparing the working directory
|
|
|
|
# with its first parent.
|
|
|
|
#
|
|
|
|
# What we're aiming for here is the ability to call:
|
|
|
|
#
|
|
|
|
# workingctx.status(parentctx)
|
|
|
|
#
|
|
|
|
# If we always built the manifest for each context and compared those,
|
|
|
|
# then we'd be done. But the special case of the above call means we
|
|
|
|
# just copy the manifest of the parent.
|
|
|
|
reversed = False
|
|
|
|
if (not isinstance(ctx1, changectx)
|
|
|
|
and isinstance(ctx2, changectx)):
|
|
|
|
reversed = True
|
|
|
|
ctx1, ctx2 = ctx2, ctx1
|
|
|
|
|
|
|
|
r = [[], [], [], [], [], [], []]
|
|
|
|
match = ctx2._matchstatus(ctx1, r, match, listignored, listclean,
|
|
|
|
listunknown)
|
|
|
|
r = ctx2._prestatus(ctx1, r, match, listignored, listclean, listunknown)
|
|
|
|
r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
|
2014-05-30 01:09:16 +04:00
|
|
|
listunknown)
|
2014-04-25 03:07:42 +04:00
|
|
|
r = ctx2._poststatus(ctx1, r, match, listignored, listclean,
|
|
|
|
listunknown)
|
|
|
|
|
|
|
|
if reversed:
|
2014-08-02 00:01:35 +04:00
|
|
|
# reverse added and removed
|
|
|
|
r[1], r[2] = r[2], r[1]
|
2014-04-25 03:07:42 +04:00
|
|
|
|
|
|
|
if listsubrepos:
|
|
|
|
for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
|
|
|
|
rev2 = ctx2.subrev(subpath)
|
|
|
|
try:
|
|
|
|
submatch = matchmod.narrowmatcher(subpath, match)
|
|
|
|
s = sub.status(rev2, match=submatch, ignored=listignored,
|
|
|
|
clean=listclean, unknown=listunknown,
|
|
|
|
listsubrepos=True)
|
|
|
|
for rfiles, sfiles in zip(r, s):
|
|
|
|
rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
|
|
|
|
except error.LookupError:
|
|
|
|
self._repo.ui.status(_("skipping missing "
|
|
|
|
"subrepository: %s\n") % subpath)
|
|
|
|
|
|
|
|
for l in r:
|
|
|
|
l.sort()
|
2014-05-28 02:04:48 +04:00
|
|
|
|
|
|
|
# we return a tuple to signify that this list isn't changing
|
2014-10-14 09:52:27 +04:00
|
|
|
return scmutil.status(*r)
|
2014-04-25 03:07:42 +04:00
|
|
|
|
|
|
|
|
2013-11-07 07:09:15 +04:00
|
|
|
def makememctx(repo, parents, text, user, date, branch, files, store,
|
|
|
|
editor=None):
|
|
|
|
def getfilectx(repo, memctx, path):
|
2014-08-27 00:03:32 +04:00
|
|
|
data, mode, copied = store.getfile(path)
|
|
|
|
if data is None:
|
|
|
|
return None
|
|
|
|
islink, isexec = mode
|
2013-08-16 01:49:27 +04:00
|
|
|
return memfilectx(repo, path, data, islink=islink, isexec=isexec,
|
|
|
|
copied=copied, memctx=memctx)
|
2013-11-07 07:09:15 +04:00
|
|
|
extra = {}
|
|
|
|
if branch:
|
|
|
|
extra['branch'] = encoding.fromlocal(branch)
|
|
|
|
ctx = memctx(repo, parents, text, files, getfilectx, user,
|
2014-05-05 16:26:40 +04:00
|
|
|
date, extra, editor)
|
2013-11-07 07:09:15 +04:00
|
|
|
return ctx
|
|
|
|
|
2013-07-14 04:59:21 +04:00
|
|
|
class changectx(basectx):
|
2006-06-29 02:07:46 +04:00
|
|
|
"""A changecontext object makes access to data related to a particular
|
2013-10-23 21:49:56 +04:00
|
|
|
changeset convenient. It represents a read-only context already present in
|
2013-07-14 04:59:21 +04:00
|
|
|
the repo."""
|
2008-06-26 22:46:31 +04:00
|
|
|
def __init__(self, repo, changeid=''):
|
2006-06-29 02:07:46 +04:00
|
|
|
"""changeid is a revision number, node, or tag"""
|
2013-08-07 01:42:41 +04:00
|
|
|
|
|
|
|
# since basectx.__new__ already took care of copying the object, we
|
|
|
|
# don't need to do anything in __init__, so we just exit here
|
|
|
|
if isinstance(changeid, basectx):
|
|
|
|
return
|
|
|
|
|
2008-06-26 22:46:31 +04:00
|
|
|
if changeid == '':
|
|
|
|
changeid = '.'
|
2006-06-29 02:07:46 +04:00
|
|
|
self._repo = repo
|
2012-04-08 21:38:07 +04:00
|
|
|
|
2014-10-16 03:05:24 +04:00
|
|
|
try:
|
|
|
|
if isinstance(changeid, int):
|
|
|
|
try:
|
|
|
|
self._node = repo.changelog.node(changeid)
|
|
|
|
except IndexError:
|
|
|
|
raise error.RepoLookupError(
|
|
|
|
_("unknown revision '%s'") % changeid)
|
|
|
|
self._rev = changeid
|
|
|
|
return
|
|
|
|
if isinstance(changeid, long):
|
|
|
|
changeid = str(changeid)
|
|
|
|
if changeid == '.':
|
|
|
|
self._node = repo.dirstate.p1()
|
|
|
|
self._rev = repo.changelog.rev(self._node)
|
|
|
|
return
|
|
|
|
if changeid == 'null':
|
|
|
|
self._node = nullid
|
|
|
|
self._rev = nullrev
|
|
|
|
return
|
|
|
|
if changeid == 'tip':
|
|
|
|
self._node = repo.changelog.tip()
|
|
|
|
self._rev = repo.changelog.rev(self._node)
|
|
|
|
return
|
|
|
|
if len(changeid) == 20:
|
|
|
|
try:
|
|
|
|
self._node = changeid
|
|
|
|
self._rev = repo.changelog.rev(changeid)
|
|
|
|
return
|
|
|
|
except LookupError:
|
|
|
|
pass
|
|
|
|
|
2012-04-08 21:38:07 +04:00
|
|
|
try:
|
2014-10-16 03:05:24 +04:00
|
|
|
r = int(changeid)
|
|
|
|
if str(r) != changeid:
|
|
|
|
raise ValueError
|
|
|
|
l = len(repo.changelog)
|
|
|
|
if r < 0:
|
|
|
|
r += l
|
|
|
|
if r < 0 or r >= l:
|
|
|
|
raise ValueError
|
|
|
|
self._rev = r
|
|
|
|
self._node = repo.changelog.node(r)
|
2012-04-08 21:38:07 +04:00
|
|
|
return
|
2014-10-16 03:05:24 +04:00
|
|
|
except (ValueError, OverflowError, IndexError):
|
2012-04-08 21:38:07 +04:00
|
|
|
pass
|
|
|
|
|
2014-10-16 03:05:24 +04:00
|
|
|
if len(changeid) == 40:
|
|
|
|
try:
|
|
|
|
self._node = bin(changeid)
|
|
|
|
self._rev = repo.changelog.rev(self._node)
|
|
|
|
return
|
|
|
|
except (TypeError, LookupError):
|
|
|
|
pass
|
|
|
|
|
|
|
|
if changeid in repo._bookmarks:
|
|
|
|
self._node = repo._bookmarks[changeid]
|
|
|
|
self._rev = repo.changelog.rev(self._node)
|
|
|
|
return
|
|
|
|
if changeid in repo._tagscache.tags:
|
|
|
|
self._node = repo._tagscache.tags[changeid]
|
|
|
|
self._rev = repo.changelog.rev(self._node)
|
|
|
|
return
|
2012-04-08 21:38:07 +04:00
|
|
|
try:
|
2014-10-16 03:05:24 +04:00
|
|
|
self._node = repo.branchtip(changeid)
|
2012-04-08 21:38:07 +04:00
|
|
|
self._rev = repo.changelog.rev(self._node)
|
|
|
|
return
|
2014-10-16 03:05:24 +04:00
|
|
|
except error.RepoLookupError:
|
2012-04-08 21:38:07 +04:00
|
|
|
pass
|
|
|
|
|
2014-10-16 03:05:24 +04:00
|
|
|
self._node = repo.changelog._partialmatch(changeid)
|
|
|
|
if self._node is not None:
|
|
|
|
self._rev = repo.changelog.rev(self._node)
|
|
|
|
return
|
2012-04-08 21:38:07 +04:00
|
|
|
|
2014-10-16 03:05:24 +04:00
|
|
|
# lookup failed
|
|
|
|
# check if it might have come from damaged dirstate
|
|
|
|
#
|
|
|
|
# XXX we could avoid the unfiltered if we had a recognizable
|
|
|
|
# exception for filtered changeset access
|
|
|
|
if changeid in repo.unfiltered().dirstate.parents():
|
|
|
|
msg = _("working directory has unknown parent '%s'!")
|
|
|
|
raise error.Abort(msg % short(changeid))
|
|
|
|
try:
|
|
|
|
if len(changeid) == 20:
|
|
|
|
changeid = hex(changeid)
|
|
|
|
except TypeError:
|
|
|
|
pass
|
|
|
|
except Exception:
|
|
|
|
raise
|
2012-04-08 21:38:07 +04:00
|
|
|
raise error.RepoLookupError(
|
|
|
|
_("unknown revision '%s'") % changeid)
|
2006-06-29 02:07:46 +04:00
|
|
|
|
2008-04-05 00:41:17 +04:00
|
|
|
def __hash__(self):
|
|
|
|
try:
|
|
|
|
return hash(self._rev)
|
|
|
|
except AttributeError:
|
|
|
|
return id(self)
|
|
|
|
|
2006-09-30 02:47:51 +04:00
|
|
|
def __nonzero__(self):
|
2006-10-29 12:53:51 +03:00
|
|
|
return self._rev != nullrev
|
2006-09-30 02:47:51 +04:00
|
|
|
|
2009-04-24 20:47:15 +04:00
|
|
|
@propertycache
|
2008-11-14 14:44:26 +03:00
|
|
|
def _changeset(self):
|
2012-04-08 21:38:08 +04:00
|
|
|
return self._repo.changelog.read(self.rev())
|
2008-11-14 14:44:26 +03:00
|
|
|
|
2009-04-24 20:47:15 +04:00
|
|
|
@propertycache
|
2008-11-14 14:44:26 +03:00
|
|
|
def _manifest(self):
|
|
|
|
return self._repo.manifest.read(self._changeset[0])
|
|
|
|
|
2009-04-24 20:47:15 +04:00
|
|
|
@propertycache
|
2008-11-14 14:44:26 +03:00
|
|
|
def _manifestdelta(self):
|
|
|
|
return self._repo.manifest.readdelta(self._changeset[0])
|
|
|
|
|
2009-04-24 20:47:15 +04:00
|
|
|
@propertycache
|
2008-11-14 14:44:26 +03:00
|
|
|
def _parents(self):
|
|
|
|
p = self._repo.changelog.parentrevs(self._rev)
|
|
|
|
if p[1] == nullrev:
|
|
|
|
p = p[:-1]
|
|
|
|
return [changectx(self._repo, x) for x in p]
|
2006-10-03 00:17:59 +04:00
|
|
|
|
2010-01-25 09:05:27 +03:00
|
|
|
def changeset(self):
|
|
|
|
return self._changeset
|
|
|
|
def manifestnode(self):
|
|
|
|
return self._changeset[0]
|
|
|
|
|
|
|
|
def user(self):
|
|
|
|
return self._changeset[1]
|
|
|
|
def date(self):
|
|
|
|
return self._changeset[2]
|
|
|
|
def files(self):
|
|
|
|
return self._changeset[3]
|
|
|
|
def description(self):
|
|
|
|
return self._changeset[4]
|
|
|
|
def branch(self):
|
2010-11-25 00:56:32 +03:00
|
|
|
return encoding.tolocal(self._changeset[5].get("branch"))
|
2012-05-13 16:04:06 +04:00
|
|
|
def closesbranch(self):
|
|
|
|
return 'close' in self._changeset[5]
|
2010-01-25 09:05:27 +03:00
|
|
|
def extra(self):
|
|
|
|
return self._changeset[5]
|
|
|
|
def tags(self):
|
|
|
|
return self._repo.nodetags(self._node)
|
2011-02-11 21:36:15 +03:00
|
|
|
def bookmarks(self):
|
|
|
|
return self._repo.nodebookmarks(self._node)
|
2011-10-18 20:25:53 +04:00
|
|
|
def phase(self):
|
2012-05-12 02:24:07 +04:00
|
|
|
return self._repo._phasecache.phase(self._repo, self._rev)
|
2011-06-16 03:57:59 +04:00
|
|
|
def hidden(self):
|
2013-01-13 11:39:16 +04:00
|
|
|
return self._rev in repoview.filterrevs(self._repo, 'visible')
|
2006-06-29 02:07:46 +04:00
|
|
|
|
|
|
|
def children(self):
|
|
|
|
"""return contexts for each child changeset"""
|
2006-07-16 18:39:02 +04:00
|
|
|
c = self._repo.changelog.children(self._node)
|
2006-11-17 10:06:54 +03:00
|
|
|
return [changectx(self._repo, x) for x in c]
|
2006-06-29 02:07:46 +04:00
|
|
|
|
2008-08-11 03:38:43 +04:00
|
|
|
def ancestors(self):
|
2012-06-01 23:37:18 +04:00
|
|
|
for a in self._repo.changelog.ancestors([self._rev]):
|
2008-08-11 03:38:43 +04:00
|
|
|
yield changectx(self._repo, a)
|
|
|
|
|
|
|
|
def descendants(self):
|
2012-06-01 23:45:16 +04:00
|
|
|
for d in self._repo.changelog.descendants([self._rev]):
|
2008-08-11 03:38:43 +04:00
|
|
|
yield changectx(self._repo, d)
|
|
|
|
|
2006-12-25 20:57:55 +03:00
|
|
|
def filectx(self, path, fileid=None, filelog=None):
|
2006-06-29 02:07:46 +04:00
|
|
|
"""get a file context from this changeset"""
|
2006-07-16 18:39:03 +04:00
|
|
|
if fileid is None:
|
|
|
|
fileid = self.filenode(path)
|
2006-12-25 20:57:55 +03:00
|
|
|
return filectx(self._repo, path, fileid=fileid,
|
|
|
|
changectx=self, filelog=filelog)
|
2006-06-29 02:07:46 +04:00
|
|
|
|
2014-04-30 23:19:01 +04:00
|
|
|
def ancestor(self, c2, warn=False):
|
2014-08-19 03:13:10 +04:00
|
|
|
"""return the "best" ancestor context of self and c2
|
|
|
|
|
|
|
|
If there are multiple candidates, it will show a message and check
|
|
|
|
merge.preferancestor configuration before falling back to the
|
|
|
|
revlog ancestor."""
|
2009-11-12 21:03:39 +03:00
|
|
|
# deal with workingctxs
|
|
|
|
n2 = c2._node
|
2010-11-22 20:15:58 +03:00
|
|
|
if n2 is None:
|
2009-11-12 21:03:39 +03:00
|
|
|
n2 = c2._parents[0]._node
|
2014-04-17 19:32:04 +04:00
|
|
|
cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
|
|
|
|
if not cahs:
|
|
|
|
anc = nullid
|
|
|
|
elif len(cahs) == 1:
|
|
|
|
anc = cahs[0]
|
|
|
|
else:
|
2014-02-25 01:42:14 +04:00
|
|
|
for r in self._repo.ui.configlist('merge', 'preferancestor'):
|
2014-10-01 05:40:51 +04:00
|
|
|
try:
|
|
|
|
ctx = changectx(self._repo, r)
|
|
|
|
except error.RepoLookupError:
|
2014-08-15 04:46:44 +04:00
|
|
|
continue
|
2014-02-25 01:42:14 +04:00
|
|
|
anc = ctx.node()
|
|
|
|
if anc in cahs:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
anc = self._repo.changelog.ancestor(self._node, n2)
|
2014-04-30 23:19:01 +04:00
|
|
|
if warn:
|
|
|
|
self._repo.ui.status(
|
|
|
|
(_("note: using %s as ancestor of %s and %s\n") %
|
|
|
|
(short(anc), short(self._node), short(n2))) +
|
|
|
|
''.join(_(" alternatively, use --config "
|
|
|
|
"merge.preferancestor=%s\n") %
|
|
|
|
short(n) for n in sorted(cahs) if n != anc))
|
2014-04-17 19:32:04 +04:00
|
|
|
return changectx(self._repo, anc)
|
2006-09-18 07:59:33 +04:00
|
|
|
|
2012-09-18 16:39:12 +04:00
|
|
|
def descendant(self, other):
|
|
|
|
"""True if other is descendant of this changeset"""
|
|
|
|
return self._repo.changelog.descendant(self._rev, other._rev)
|
|
|
|
|
2008-06-28 04:25:48 +04:00
|
|
|
def walk(self, match):
|
2009-05-14 12:59:55 +04:00
|
|
|
fset = set(match.files())
|
2008-06-28 04:25:48 +04:00
|
|
|
# for dirstate.walk, files=['.'] means "walk the whole tree".
|
|
|
|
# follow that here, too
|
2009-05-14 12:59:55 +04:00
|
|
|
fset.discard('.')
|
2014-01-15 01:49:19 +04:00
|
|
|
|
|
|
|
# avoid the entire walk if we're only looking for specific files
|
|
|
|
if fset and not match.anypats():
|
|
|
|
if util.all([fn in self for fn in fset]):
|
|
|
|
for fn in sorted(fset):
|
|
|
|
if match(fn):
|
|
|
|
yield fn
|
|
|
|
raise StopIteration
|
|
|
|
|
2008-06-28 04:25:48 +04:00
|
|
|
for fn in self:
|
2012-02-22 19:07:54 +04:00
|
|
|
if fn in fset:
|
|
|
|
# specified pattern is the exact name
|
|
|
|
fset.remove(fn)
|
2008-06-28 04:25:48 +04:00
|
|
|
if match(fn):
|
|
|
|
yield fn
|
2009-05-14 12:59:55 +04:00
|
|
|
for fn in sorted(fset):
|
2012-02-22 19:07:54 +04:00
|
|
|
if fn in self._dirs:
|
|
|
|
# specified pattern is a directory
|
|
|
|
continue
|
2013-10-03 20:01:21 +04:00
|
|
|
match.bad(fn, _('no such file in rev %s') % self)
|
2008-06-28 04:25:48 +04:00
|
|
|
|
2014-08-02 09:07:29 +04:00
|
|
|
def matches(self, match):
|
|
|
|
return self.walk(match)
|
|
|
|
|
2013-08-11 00:10:26 +04:00
|
|
|
class basefilectx(object):
|
|
|
|
"""A filecontext object represents the common logic for its children:
|
|
|
|
filectx: read-only access to a filerevision that is already present
|
|
|
|
in the repo,
|
|
|
|
workingfilectx: a filecontext that represents files from the working
|
|
|
|
directory,
|
|
|
|
memfilectx: a filecontext that represents files in-memory."""
|
|
|
|
def __new__(cls, repo, path, *args, **kwargs):
|
|
|
|
return super(basefilectx, cls).__new__(cls)
|
|
|
|
|
2013-08-12 07:40:59 +04:00
|
|
|
@propertycache
|
|
|
|
def _filelog(self):
|
|
|
|
return self._repo.file(self._path)
|
|
|
|
|
2013-08-12 07:44:06 +04:00
|
|
|
@propertycache
|
|
|
|
def _changeid(self):
|
|
|
|
if '_changeid' in self.__dict__:
|
|
|
|
return self._changeid
|
|
|
|
elif '_changectx' in self.__dict__:
|
|
|
|
return self._changectx.rev()
|
|
|
|
else:
|
|
|
|
return self._filelog.linkrev(self._filerev)
|
|
|
|
|
2013-08-12 07:44:19 +04:00
|
|
|
@propertycache
|
|
|
|
def _filenode(self):
|
|
|
|
if '_fileid' in self.__dict__:
|
|
|
|
return self._filelog.lookup(self._fileid)
|
|
|
|
else:
|
|
|
|
return self._changectx.filenode(self._path)
|
|
|
|
|
2013-08-12 07:44:36 +04:00
|
|
|
@propertycache
|
|
|
|
def _filerev(self):
|
|
|
|
return self._filelog.rev(self._filenode)
|
|
|
|
|
2013-08-12 07:44:51 +04:00
|
|
|
@propertycache
|
|
|
|
def _repopath(self):
|
|
|
|
return self._path
|
|
|
|
|
2013-08-12 07:45:13 +04:00
|
|
|
def __nonzero__(self):
|
|
|
|
try:
|
|
|
|
self._filenode
|
|
|
|
return True
|
|
|
|
except error.LookupError:
|
|
|
|
# file is missing
|
|
|
|
return False
|
|
|
|
|
2013-08-12 07:45:30 +04:00
|
|
|
def __str__(self):
|
2013-08-15 22:31:17 +04:00
|
|
|
return "%s@%s" % (self.path(), self._changectx)
|
2013-08-12 07:45:30 +04:00
|
|
|
|
2013-08-12 07:46:54 +04:00
|
|
|
def __repr__(self):
|
|
|
|
return "<%s %s>" % (type(self).__name__, str(self))
|
|
|
|
|
2013-08-12 07:47:39 +04:00
|
|
|
def __hash__(self):
|
|
|
|
try:
|
|
|
|
return hash((self._path, self._filenode))
|
|
|
|
except AttributeError:
|
|
|
|
return id(self)
|
|
|
|
|
2013-08-12 07:49:03 +04:00
|
|
|
def __eq__(self, other):
|
|
|
|
try:
|
|
|
|
return (type(self) == type(other) and self._path == other._path
|
|
|
|
and self._filenode == other._filenode)
|
|
|
|
except AttributeError:
|
|
|
|
return False
|
|
|
|
|
2013-08-12 07:49:40 +04:00
|
|
|
def __ne__(self, other):
|
|
|
|
return not (self == other)
|
|
|
|
|
2013-08-12 07:50:15 +04:00
|
|
|
def filerev(self):
|
|
|
|
return self._filerev
|
2013-08-12 07:50:37 +04:00
|
|
|
def filenode(self):
|
|
|
|
return self._filenode
|
2013-08-12 07:51:04 +04:00
|
|
|
def flags(self):
|
|
|
|
return self._changectx.flags(self._path)
|
2013-08-12 07:51:18 +04:00
|
|
|
def filelog(self):
|
|
|
|
return self._filelog
|
2013-08-12 07:51:30 +04:00
|
|
|
def rev(self):
|
|
|
|
return self._changeid
|
2013-08-12 07:51:41 +04:00
|
|
|
def linkrev(self):
|
|
|
|
return self._filelog.linkrev(self._filerev)
|
2013-08-12 07:51:53 +04:00
|
|
|
def node(self):
|
|
|
|
return self._changectx.node()
|
2013-08-12 07:53:23 +04:00
|
|
|
def hex(self):
|
|
|
|
return self._changectx.hex()
|
2013-08-12 07:53:47 +04:00
|
|
|
def user(self):
|
|
|
|
return self._changectx.user()
|
2013-08-12 07:53:56 +04:00
|
|
|
def date(self):
|
|
|
|
return self._changectx.date()
|
2013-08-12 07:54:12 +04:00
|
|
|
def files(self):
|
|
|
|
return self._changectx.files()
|
2013-08-12 07:54:22 +04:00
|
|
|
def description(self):
|
|
|
|
return self._changectx.description()
|
2013-08-12 07:54:31 +04:00
|
|
|
def branch(self):
|
|
|
|
return self._changectx.branch()
|
2013-08-12 07:54:39 +04:00
|
|
|
def extra(self):
|
|
|
|
return self._changectx.extra()
|
2013-08-12 07:54:48 +04:00
|
|
|
def phase(self):
|
|
|
|
return self._changectx.phase()
|
2013-08-12 07:54:58 +04:00
|
|
|
def phasestr(self):
|
|
|
|
return self._changectx.phasestr()
|
2013-08-12 07:55:09 +04:00
|
|
|
def manifest(self):
|
|
|
|
return self._changectx.manifest()
|
2013-08-12 07:56:02 +04:00
|
|
|
def changectx(self):
|
|
|
|
return self._changectx
|
2013-08-12 07:50:15 +04:00
|
|
|
|
2013-08-12 07:56:18 +04:00
|
|
|
def path(self):
|
|
|
|
return self._path
|
|
|
|
|
2013-08-12 07:56:30 +04:00
|
|
|
def isbinary(self):
|
|
|
|
try:
|
|
|
|
return util.binary(self.data())
|
|
|
|
except IOError:
|
|
|
|
return False
|
2014-07-26 05:11:47 +04:00
|
|
|
def isexec(self):
|
|
|
|
return 'x' in self.flags()
|
|
|
|
def islink(self):
|
|
|
|
return 'l' in self.flags()
|
2013-08-12 07:56:30 +04:00
|
|
|
|
2013-08-12 07:56:53 +04:00
|
|
|
def cmp(self, fctx):
|
|
|
|
"""compare with other file context
|
|
|
|
|
|
|
|
returns True if different than fctx.
|
|
|
|
"""
|
|
|
|
if (fctx._filerev is None
|
|
|
|
and (self._repo._encodefilterpats
|
|
|
|
# if file data starts with '\1\n', empty metadata block is
|
|
|
|
# prepended, which adds 4 bytes to filelog.size().
|
|
|
|
or self.size() - 4 == fctx.size())
|
|
|
|
or self.size() == fctx.size()):
|
|
|
|
return self._filelog.cmp(self._filenode, fctx.data())
|
|
|
|
|
|
|
|
return True
|
|
|
|
|
2013-08-12 07:57:21 +04:00
|
|
|
def parents(self):
|
2014-08-15 06:37:46 +04:00
|
|
|
_path = self._path
|
2013-08-12 07:57:21 +04:00
|
|
|
fl = self._filelog
|
2014-08-15 06:37:46 +04:00
|
|
|
pl = [(_path, n, fl) for n in self._filelog.parents(self._filenode)]
|
2013-08-12 07:57:21 +04:00
|
|
|
|
|
|
|
r = self._filelog.renamed(self._filenode)
|
|
|
|
if r:
|
|
|
|
pl[0] = (r[0], r[1], None)
|
|
|
|
|
|
|
|
return [filectx(self._repo, p, fileid=n, filelog=l)
|
|
|
|
for p, n, l in pl if n != nullid]
|
|
|
|
|
2013-08-12 07:59:10 +04:00
|
|
|
def p1(self):
|
|
|
|
return self.parents()[0]
|
|
|
|
|
2013-08-12 08:00:11 +04:00
|
|
|
def p2(self):
|
|
|
|
p = self.parents()
|
|
|
|
if len(p) == 2:
|
|
|
|
return p[1]
|
|
|
|
return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
|
|
|
|
|
2011-11-18 15:04:31 +04:00
|
|
|
def annotate(self, follow=False, linenumber=None, diffopts=None):
|
2006-09-27 20:10:21 +04:00
|
|
|
'''returns a list of tuples of (ctx, line) for each line
|
|
|
|
in the file, where ctx is the filectx of the node where
|
2007-07-08 21:46:04 +04:00
|
|
|
that line was last changed.
|
|
|
|
This returns tuples of ((ctx, linenumber), line) for each line,
|
|
|
|
if "linenumber" parameter is NOT "None".
|
|
|
|
In such tuples, linenumber means one at the first appearance
|
|
|
|
in the managed file.
|
|
|
|
To reduce annotation cost,
|
|
|
|
this returns fixed value(False is used) as linenumber,
|
|
|
|
if "linenumber" parameter is "False".'''
|
|
|
|
|
2014-08-15 09:29:30 +04:00
|
|
|
if linenumber is None:
|
2014-08-15 09:33:19 +04:00
|
|
|
def decorate(text, rev):
|
|
|
|
return ([rev] * len(text.splitlines()), text)
|
2014-08-15 09:29:30 +04:00
|
|
|
elif linenumber:
|
2014-08-15 09:33:19 +04:00
|
|
|
def decorate(text, rev):
|
|
|
|
size = len(text.splitlines())
|
|
|
|
return ([(rev, i) for i in xrange(1, size + 1)], text)
|
2014-08-15 09:29:30 +04:00
|
|
|
else:
|
2014-08-15 09:33:19 +04:00
|
|
|
def decorate(text, rev):
|
|
|
|
return ([(rev, False)] * len(text.splitlines()), text)
|
2007-07-08 21:46:04 +04:00
|
|
|
|
2006-09-27 20:10:21 +04:00
|
|
|
def pair(parent, child):
|
2011-11-18 15:04:31 +04:00
|
|
|
blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
|
|
|
|
refine=True)
|
|
|
|
for (a1, a2, b1, b2), t in blocks:
|
|
|
|
# Changed blocks ('!') or blocks made only of blank lines ('~')
|
|
|
|
# belong to the child.
|
|
|
|
if t == '=':
|
|
|
|
child[0][b1:b2] = parent[0][a1:a2]
|
2006-09-27 20:10:21 +04:00
|
|
|
return child
|
|
|
|
|
2009-07-10 02:10:07 +04:00
|
|
|
getlog = util.lrucachefunc(lambda x: self._repo.file(x))
|
2006-09-27 20:10:21 +04:00
|
|
|
|
|
|
|
def parents(f):
|
2013-05-31 06:29:03 +04:00
|
|
|
pl = f.parents()
|
|
|
|
|
|
|
|
# Don't return renamed parents if we aren't following.
|
|
|
|
if not follow:
|
|
|
|
pl = [p for p in pl if p.path() == f.path()]
|
|
|
|
|
|
|
|
# renamed filectx won't have a filelog yet, so set it
|
|
|
|
# from the cache to save time
|
|
|
|
for p in pl:
|
|
|
|
if not '_filelog' in p.__dict__:
|
|
|
|
p._filelog = getlog(p.path())
|
|
|
|
|
|
|
|
return pl
|
2006-10-01 21:26:33 +04:00
|
|
|
|
2006-10-16 05:43:46 +04:00
|
|
|
# use linkrev to find the first changeset where self appeared
|
2007-12-29 17:11:48 +03:00
|
|
|
if self.rev() != self.linkrev():
|
2013-05-31 06:29:21 +04:00
|
|
|
base = self.filectx(self.filenode())
|
2006-10-16 05:43:46 +04:00
|
|
|
else:
|
|
|
|
base = self
|
|
|
|
|
2011-03-08 00:44:43 +03:00
|
|
|
# This algorithm would prefer to be recursive, but Python is a
|
|
|
|
# bit recursion-hostile. Instead we do an iterative
|
|
|
|
# depth-first search.
|
|
|
|
|
2006-10-16 05:43:46 +04:00
|
|
|
visit = [base]
|
2011-03-08 00:44:43 +03:00
|
|
|
hist = {}
|
|
|
|
pcache = {}
|
|
|
|
needed = {base: 1}
|
2006-09-27 20:10:21 +04:00
|
|
|
while visit:
|
2011-03-08 00:44:43 +03:00
|
|
|
f = visit[-1]
|
annotate: increase refcount of each revisions correctly (issue3841)
Before this patch, refcount (managed in "needed") of parents of each
revisions in "visit" is increased, only when parent is not annotated
yet (examined by "p not in hist").
But this causes less refcount of the revision like "A" in the tree
below ("A" is assumed as the second parent of "C"):
A --- B --- C
\ /
\-----/
Steps of annotation for "C" in this case are shown below:
1. for "C"
1.1 increase refcount of "B"
1.2 increase refcount of "A" (=> 1)
1.3 defer annotation for "C"
2. for "A"
2.1 annotate for "A" (=> put result into "hist[A]")
2.2 clear "pcache[A]" ("pcache[A] = []")
3. for "B"
3.1 not increase refcount of "A", because "A not in hist" is False
3.2 annotate for "B"
3.3 decrease refcount of "A" (=> 0)
3.4 delete "hist[A]", even though "A" is still needed by "C"
3.5 clear "pcache[B]"
4. for "C", again
4.1 not increase refcount of "B", because "B not in hist" is False
4.2 increase refcount of "A" (=> 1)
4.3 defer annotation for "C"
5. for "A", again
5.1 annotate for "A" (=> put result into "hist[A]", again)
5.2 clear "pcache[A]"
6. for "C", once again
6.1 not increase refcount of "B", because "B not in hist" is False
6.2 not increase refcount of "A", because "A not in hist" is False
6.3 annotate for "C"
6.4 decrease refcount of "A", and delete "hist[A]"
6.5 decrease refcount of "B", and delete "hist[B]"
6.6 clear "pcache[C]"
At step (5.1), annotation for "A" mis-recognizes that all lines are
created at "A", because "pcache[A]" already cleared at step (2.2)
prevents from scanning ancestors of "A".
So, annotation for "C" or its descendants loses information about "A"
or its ancestors.
The root cause of this problem is that refcount of "A" is decreased at
step (3.3), even though it isn't increased at step (3.1).
To increase refcount correctly, this patch increases refcount of each
parents of each revisions:
- regardless of "p not in hist" or not, and
- only once for each revisions in "visit" (by "not pcached")
In fact, this problem should occur only on legacy repositories in
which a filelog includes the merging between the revision and its
ancestor (as the second parent), because:
- tree is scanned in depth-first
without such merging, revisions in "visit" refer different
revisions as parent each other
- recent Mercurial doesn't allow such merging
changelog and manifest can include such merging someway, but
filelogs can't, because "localrepository._filecommit()" converts
such merging request to linear history.
This patch tests merging cases below: these cases are from filelog of
"mercurial/commands.py" in the repository of Mercurial itself.
- both parents are same
10 --- 11 --- 12
\_/
filelogrev: changesetid:
10 526aca6bcb38
11 05098100ff44
12 2d4f4cfa81d6
- the second parent is also ancestor of the first one
37 --- 38 --- 39 --- 40
\________/
filelogrev: changesetid:
37 033dc4170fe6
38 5ff1a23ce38c
39 661a47367859
40 a2ba99fd026f
2013-03-29 17:57:16 +04:00
|
|
|
pcached = f in pcache
|
|
|
|
if not pcached:
|
2011-03-08 00:44:43 +03:00
|
|
|
pcache[f] = parents(f)
|
|
|
|
|
|
|
|
ready = True
|
|
|
|
pl = pcache[f]
|
|
|
|
for p in pl:
|
|
|
|
if p not in hist:
|
|
|
|
ready = False
|
2006-09-27 20:10:21 +04:00
|
|
|
visit.append(p)
|
annotate: increase refcount of each revisions correctly (issue3841)
Before this patch, refcount (managed in "needed") of parents of each
revisions in "visit" is increased, only when parent is not annotated
yet (examined by "p not in hist").
But this causes less refcount of the revision like "A" in the tree
below ("A" is assumed as the second parent of "C"):
A --- B --- C
\ /
\-----/
Steps of annotation for "C" in this case are shown below:
1. for "C"
1.1 increase refcount of "B"
1.2 increase refcount of "A" (=> 1)
1.3 defer annotation for "C"
2. for "A"
2.1 annotate for "A" (=> put result into "hist[A]")
2.2 clear "pcache[A]" ("pcache[A] = []")
3. for "B"
3.1 not increase refcount of "A", because "A not in hist" is False
3.2 annotate for "B"
3.3 decrease refcount of "A" (=> 0)
3.4 delete "hist[A]", even though "A" is still needed by "C"
3.5 clear "pcache[B]"
4. for "C", again
4.1 not increase refcount of "B", because "B not in hist" is False
4.2 increase refcount of "A" (=> 1)
4.3 defer annotation for "C"
5. for "A", again
5.1 annotate for "A" (=> put result into "hist[A]", again)
5.2 clear "pcache[A]"
6. for "C", once again
6.1 not increase refcount of "B", because "B not in hist" is False
6.2 not increase refcount of "A", because "A not in hist" is False
6.3 annotate for "C"
6.4 decrease refcount of "A", and delete "hist[A]"
6.5 decrease refcount of "B", and delete "hist[B]"
6.6 clear "pcache[C]"
At step (5.1), annotation for "A" mis-recognizes that all lines are
created at "A", because "pcache[A]" already cleared at step (2.2)
prevents from scanning ancestors of "A".
So, annotation for "C" or its descendants loses information about "A"
or its ancestors.
The root cause of this problem is that refcount of "A" is decreased at
step (3.3), even though it isn't increased at step (3.1).
To increase refcount correctly, this patch increases refcount of each
parents of each revisions:
- regardless of "p not in hist" or not, and
- only once for each revisions in "visit" (by "not pcached")
In fact, this problem should occur only on legacy repositories in
which a filelog includes the merging between the revision and its
ancestor (as the second parent), because:
- tree is scanned in depth-first
without such merging, revisions in "visit" refer different
revisions as parent each other
- recent Mercurial doesn't allow such merging
changelog and manifest can include such merging someway, but
filelogs can't, because "localrepository._filecommit()" converts
such merging request to linear history.
This patch tests merging cases below: these cases are from filelog of
"mercurial/commands.py" in the repository of Mercurial itself.
- both parents are same
10 --- 11 --- 12
\_/
filelogrev: changesetid:
10 526aca6bcb38
11 05098100ff44
12 2d4f4cfa81d6
- the second parent is also ancestor of the first one
37 --- 38 --- 39 --- 40
\________/
filelogrev: changesetid:
37 033dc4170fe6
38 5ff1a23ce38c
39 661a47367859
40 a2ba99fd026f
2013-03-29 17:57:16 +04:00
|
|
|
if not pcached:
|
2011-03-08 00:44:43 +03:00
|
|
|
needed[p] = needed.get(p, 0) + 1
|
|
|
|
if ready:
|
|
|
|
visit.pop()
|
2013-03-29 17:57:15 +04:00
|
|
|
reusable = f in hist
|
|
|
|
if reusable:
|
|
|
|
curr = hist[f]
|
|
|
|
else:
|
|
|
|
curr = decorate(f.data(), f)
|
2011-03-08 00:44:43 +03:00
|
|
|
for p in pl:
|
2013-03-29 17:57:15 +04:00
|
|
|
if not reusable:
|
|
|
|
curr = pair(hist[p], curr)
|
2011-03-08 00:44:43 +03:00
|
|
|
if needed[p] == 1:
|
|
|
|
del hist[p]
|
2013-04-18 14:50:04 +04:00
|
|
|
del needed[p]
|
2011-03-08 00:44:43 +03:00
|
|
|
else:
|
|
|
|
needed[p] -= 1
|
|
|
|
|
|
|
|
hist[f] = curr
|
|
|
|
pcache[f] = []
|
|
|
|
|
|
|
|
return zip(hist[base][0], hist[base][1].splitlines(True))
|
2006-09-18 07:58:11 +04:00
|
|
|
|
2013-08-12 08:05:50 +04:00
|
|
|
def ancestors(self, followfirst=False):
|
|
|
|
visit = {}
|
|
|
|
c = self
|
|
|
|
cut = followfirst and 1 or None
|
|
|
|
while True:
|
|
|
|
for parent in c.parents()[:cut]:
|
|
|
|
visit[(parent.rev(), parent.node())] = parent
|
|
|
|
if not visit:
|
|
|
|
break
|
|
|
|
c = visit.pop(max(visit))
|
|
|
|
yield c
|
|
|
|
|
2013-08-12 08:03:33 +04:00
|
|
|
class filectx(basefilectx):
|
|
|
|
"""A filecontext object makes access to data related to a particular
|
|
|
|
filerevision convenient."""
|
|
|
|
def __init__(self, repo, path, changeid=None, fileid=None,
|
|
|
|
filelog=None, changectx=None):
|
|
|
|
"""changeid can be a changeset revision, node, or tag.
|
|
|
|
fileid can be a file revision or node."""
|
|
|
|
self._repo = repo
|
|
|
|
self._path = path
|
|
|
|
|
|
|
|
assert (changeid is not None
|
|
|
|
or fileid is not None
|
|
|
|
or changectx is not None), \
|
|
|
|
("bad args: changeid=%r, fileid=%r, changectx=%r"
|
|
|
|
% (changeid, fileid, changectx))
|
|
|
|
|
|
|
|
if filelog is not None:
|
|
|
|
self._filelog = filelog
|
|
|
|
|
|
|
|
if changeid is not None:
|
|
|
|
self._changeid = changeid
|
|
|
|
if changectx is not None:
|
|
|
|
self._changectx = changectx
|
|
|
|
if fileid is not None:
|
|
|
|
self._fileid = fileid
|
|
|
|
|
|
|
|
@propertycache
|
|
|
|
def _changectx(self):
|
|
|
|
try:
|
|
|
|
return changectx(self._repo, self._changeid)
|
|
|
|
except error.RepoLookupError:
|
|
|
|
# Linkrev may point to any revision in the repository. When the
|
|
|
|
# repository is filtered this may lead to `filectx` trying to build
|
|
|
|
# `changectx` for filtered revision. In such case we fallback to
|
|
|
|
# creating `changectx` on the unfiltered version of the reposition.
|
|
|
|
# This fallback should not be an issue because `changectx` from
|
|
|
|
# `filectx` are not used in complex operations that care about
|
|
|
|
# filtering.
|
|
|
|
#
|
|
|
|
# This fallback is a cheap and dirty fix that prevent several
|
|
|
|
# crashes. It does not ensure the behavior is correct. However the
|
|
|
|
# behavior was not correct before filtering either and "incorrect
|
|
|
|
# behavior" is seen as better as "crash"
|
|
|
|
#
|
|
|
|
# Linkrevs have several serious troubles with filtering that are
|
|
|
|
# complicated to solve. Proper handling of the issue here should be
|
|
|
|
# considered when solving linkrev issue are on the table.
|
|
|
|
return changectx(self._repo.unfiltered(), self._changeid)
|
|
|
|
|
|
|
|
def filectx(self, fileid):
|
|
|
|
'''opens an arbitrary revision of the file without
|
|
|
|
opening a new filelog'''
|
|
|
|
return filectx(self._repo, self._path, fileid=fileid,
|
|
|
|
filelog=self._filelog)
|
|
|
|
|
|
|
|
def data(self):
|
2014-10-14 23:46:16 +04:00
|
|
|
try:
|
|
|
|
return self._filelog.read(self._filenode)
|
|
|
|
except error.CensoredNodeError:
|
|
|
|
if self._repo.ui.config("censor", "policy", "abort") == "ignore":
|
|
|
|
return ""
|
|
|
|
raise util.Abort(_("censored node: %s") % short(self._filenode),
|
|
|
|
hint="set censor.policy to ignore errors")
|
|
|
|
|
2013-08-12 08:03:33 +04:00
|
|
|
def size(self):
|
|
|
|
return self._filelog.size(self._filerev)
|
|
|
|
|
|
|
|
def renamed(self):
|
|
|
|
"""check if file was actually renamed in this changeset revision
|
|
|
|
|
|
|
|
If rename logged in file revision, we report copy for changeset only
|
|
|
|
if file revisions linkrev points back to the changeset in question
|
|
|
|
or both changeset parents contain different file revisions.
|
|
|
|
"""
|
|
|
|
|
|
|
|
renamed = self._filelog.renamed(self._filenode)
|
|
|
|
if not renamed:
|
|
|
|
return renamed
|
|
|
|
|
|
|
|
if self.rev() == self.linkrev():
|
|
|
|
return renamed
|
|
|
|
|
|
|
|
name = self.path()
|
|
|
|
fnode = self._filenode
|
|
|
|
for p in self._changectx.parents():
|
|
|
|
try:
|
|
|
|
if fnode == p.filenode(name):
|
|
|
|
return None
|
|
|
|
except error.LookupError:
|
|
|
|
pass
|
|
|
|
return renamed
|
|
|
|
|
|
|
|
def children(self):
|
|
|
|
# hard for renames
|
|
|
|
c = self._filelog.children(self._filenode)
|
|
|
|
return [filectx(self._repo, self._path, fileid=x,
|
|
|
|
filelog=self._filelog) for x in c]
|
|
|
|
|
2013-09-18 03:34:45 +04:00
|
|
|
class committablectx(basectx):
|
|
|
|
"""A committablectx object provides common functionality for a context that
|
2013-08-15 00:02:08 +04:00
|
|
|
wants the ability to commit, e.g. workingctx or memctx."""
|
2010-04-21 03:18:31 +04:00
|
|
|
def __init__(self, repo, text="", user=None, date=None, extra=None,
|
|
|
|
changes=None):
|
2006-10-03 07:03:14 +04:00
|
|
|
self._repo = repo
|
|
|
|
self._rev = None
|
|
|
|
self._node = None
|
2008-06-19 00:52:26 +04:00
|
|
|
self._text = text
|
2008-06-21 17:27:51 +04:00
|
|
|
if date:
|
2008-06-19 00:52:26 +04:00
|
|
|
self._date = util.parsedate(date)
|
2008-07-22 22:00:22 +04:00
|
|
|
if user:
|
|
|
|
self._user = user
|
2008-06-19 00:52:25 +04:00
|
|
|
if changes:
|
2014-04-25 02:31:20 +04:00
|
|
|
self._status = changes
|
2006-10-03 07:03:14 +04:00
|
|
|
|
2008-06-19 00:52:26 +04:00
|
|
|
self._extra = {}
|
|
|
|
if extra:
|
|
|
|
self._extra = extra.copy()
|
|
|
|
if 'branch' not in self._extra:
|
|
|
|
try:
|
2010-11-25 00:56:32 +03:00
|
|
|
branch = encoding.fromlocal(self._repo.dirstate.branch())
|
2008-06-19 00:52:26 +04:00
|
|
|
except UnicodeDecodeError:
|
|
|
|
raise util.Abort(_('branch name not in UTF-8!'))
|
|
|
|
self._extra['branch'] = branch
|
|
|
|
if self._extra['branch'] == '':
|
|
|
|
self._extra['branch'] = 'default'
|
2006-10-03 07:03:14 +04:00
|
|
|
|
2013-08-15 00:25:14 +04:00
|
|
|
def __str__(self):
|
|
|
|
return str(self._parents[0]) + "+"
|
|
|
|
|
2013-08-15 00:28:43 +04:00
|
|
|
def __nonzero__(self):
|
|
|
|
return True
|
|
|
|
|
2011-10-23 01:12:33 +04:00
|
|
|
def _buildflagfunc(self):
|
|
|
|
# Create a fallback function for getting file flags when the
|
|
|
|
# filesystem doesn't support them
|
|
|
|
|
|
|
|
copiesget = self._repo.dirstate.copies().get
|
|
|
|
|
|
|
|
if len(self._parents) < 2:
|
|
|
|
# when we have one parent, it's easy: copy from parent
|
|
|
|
man = self._parents[0].manifest()
|
|
|
|
def func(f):
|
|
|
|
f = copiesget(f, f)
|
|
|
|
return man.flags(f)
|
|
|
|
else:
|
|
|
|
# merges are tricky: we try to reconstruct the unstored
|
|
|
|
# result from the merge (issue1802)
|
|
|
|
p1, p2 = self._parents
|
|
|
|
pa = p1.ancestor(p2)
|
|
|
|
m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
|
|
|
|
|
|
|
|
def func(f):
|
|
|
|
f = copiesget(f, f) # may be wrong for merges with copies
|
|
|
|
fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
|
|
|
|
if fl1 == fl2:
|
|
|
|
return fl1
|
|
|
|
if fl1 == fla:
|
|
|
|
return fl2
|
|
|
|
if fl2 == fla:
|
|
|
|
return fl1
|
|
|
|
return '' # punt for conflicts
|
|
|
|
|
|
|
|
return func
|
|
|
|
|
2013-08-15 00:30:17 +04:00
|
|
|
@propertycache
|
|
|
|
def _flagfunc(self):
|
|
|
|
return self._repo.dirstate.flagfunc(self._buildflagfunc)
|
|
|
|
|
2009-04-24 20:47:15 +04:00
|
|
|
@propertycache
|
2008-11-14 14:44:26 +03:00
|
|
|
def _manifest(self):
|
2014-04-30 01:49:27 +04:00
|
|
|
"""generate a manifest corresponding to the values in self._status"""
|
2006-10-03 07:03:14 +04:00
|
|
|
|
2006-10-03 10:21:46 +04:00
|
|
|
man = self._parents[0].manifest().copy()
|
2010-04-15 20:08:48 +04:00
|
|
|
if len(self._parents) > 1:
|
|
|
|
man2 = self.p2().manifest()
|
|
|
|
def getman(f):
|
|
|
|
if f in man:
|
|
|
|
return man
|
|
|
|
return man2
|
|
|
|
else:
|
|
|
|
getman = lambda f: man
|
2011-10-23 01:12:33 +04:00
|
|
|
|
|
|
|
copied = self._repo.dirstate.copies()
|
|
|
|
ff = self._flagfunc
|
2014-10-05 08:05:41 +04:00
|
|
|
for i, l in (("a", self._status.added), ("m", self._status.modified)):
|
2006-10-03 07:03:14 +04:00
|
|
|
for f in l:
|
2010-04-15 20:08:48 +04:00
|
|
|
orig = copied.get(f, f)
|
|
|
|
man[f] = getman(orig).get(orig, nullid) + i
|
2006-12-08 22:14:57 +03:00
|
|
|
try:
|
2014-10-10 22:09:37 +04:00
|
|
|
man.setflag(f, ff(f))
|
2006-12-08 22:14:57 +03:00
|
|
|
except OSError:
|
|
|
|
pass
|
2006-10-03 07:03:14 +04:00
|
|
|
|
2014-10-05 08:05:41 +04:00
|
|
|
for f in self._status.deleted + self._status.removed:
|
2006-10-10 22:03:24 +04:00
|
|
|
if f in man:
|
|
|
|
del man[f]
|
2006-10-03 07:03:14 +04:00
|
|
|
|
2008-11-14 14:44:26 +03:00
|
|
|
return man
|
|
|
|
|
2013-08-15 00:41:22 +04:00
|
|
|
@propertycache
|
|
|
|
def _status(self):
|
2014-04-25 02:31:20 +04:00
|
|
|
return self._repo.status()
|
2013-08-15 00:41:22 +04:00
|
|
|
|
2013-08-15 19:51:53 +04:00
|
|
|
@propertycache
|
|
|
|
def _user(self):
|
|
|
|
return self._repo.ui.username()
|
|
|
|
|
2013-08-15 19:57:43 +04:00
|
|
|
@propertycache
|
|
|
|
def _date(self):
|
|
|
|
return util.makedate()
|
|
|
|
|
2013-09-21 07:07:58 +04:00
|
|
|
def subrev(self, subpath):
|
|
|
|
return None
|
|
|
|
|
2013-08-15 00:57:24 +04:00
|
|
|
def user(self):
|
|
|
|
return self._user or self._repo.ui.username()
|
2013-08-15 01:03:32 +04:00
|
|
|
def date(self):
|
|
|
|
return self._date
|
2013-08-15 01:09:30 +04:00
|
|
|
def description(self):
|
|
|
|
return self._text
|
2013-08-15 01:09:45 +04:00
|
|
|
def files(self):
|
2014-10-05 08:05:41 +04:00
|
|
|
return sorted(self._status.modified + self._status.added +
|
|
|
|
self._status.removed)
|
2013-08-15 00:57:24 +04:00
|
|
|
|
2013-08-15 01:14:58 +04:00
|
|
|
def modified(self):
|
2014-10-05 08:05:41 +04:00
|
|
|
return self._status.modified
|
2013-08-15 01:15:18 +04:00
|
|
|
def added(self):
|
2014-10-05 08:05:41 +04:00
|
|
|
return self._status.added
|
2013-08-15 01:15:29 +04:00
|
|
|
def removed(self):
|
2014-10-05 08:05:41 +04:00
|
|
|
return self._status.removed
|
2013-08-15 01:21:55 +04:00
|
|
|
def deleted(self):
|
2014-10-05 08:05:41 +04:00
|
|
|
return self._status.deleted
|
2013-08-15 01:22:20 +04:00
|
|
|
def unknown(self):
|
2014-10-05 08:05:41 +04:00
|
|
|
return self._status.unknown
|
2013-08-15 01:22:32 +04:00
|
|
|
def ignored(self):
|
2014-10-05 08:05:41 +04:00
|
|
|
return self._status.ignored
|
2013-08-15 01:22:42 +04:00
|
|
|
def clean(self):
|
2014-10-05 08:05:41 +04:00
|
|
|
return self._status.clean
|
2013-08-15 01:23:02 +04:00
|
|
|
def branch(self):
|
|
|
|
return encoding.tolocal(self._extra['branch'])
|
2013-08-15 01:23:16 +04:00
|
|
|
def closesbranch(self):
|
|
|
|
return 'close' in self._extra
|
2013-08-15 01:23:28 +04:00
|
|
|
def extra(self):
|
|
|
|
return self._extra
|
2013-08-15 01:14:58 +04:00
|
|
|
|
2013-08-15 01:24:16 +04:00
|
|
|
def tags(self):
|
|
|
|
t = []
|
|
|
|
for p in self.parents():
|
|
|
|
t.extend(p.tags())
|
|
|
|
return t
|
|
|
|
|
2013-08-15 01:24:33 +04:00
|
|
|
def bookmarks(self):
|
|
|
|
b = []
|
|
|
|
for p in self.parents():
|
|
|
|
b.extend(p.bookmarks())
|
|
|
|
return b
|
|
|
|
|
2013-08-15 01:24:59 +04:00
|
|
|
def phase(self):
|
|
|
|
phase = phases.draft # default phase to draft
|
|
|
|
for p in self.parents():
|
|
|
|
phase = max(phase, p.phase())
|
|
|
|
return phase
|
|
|
|
|
2013-08-15 01:25:17 +04:00
|
|
|
def hidden(self):
|
|
|
|
return False
|
|
|
|
|
2013-08-15 01:25:26 +04:00
|
|
|
def children(self):
|
|
|
|
return []
|
|
|
|
|
2013-08-15 01:25:43 +04:00
|
|
|
def flags(self, path):
|
|
|
|
if '_manifest' in self.__dict__:
|
|
|
|
try:
|
|
|
|
return self._manifest.flags(path)
|
|
|
|
except KeyError:
|
|
|
|
return ''
|
|
|
|
|
|
|
|
try:
|
|
|
|
return self._flagfunc(path)
|
|
|
|
except OSError:
|
|
|
|
return ''
|
|
|
|
|
2013-08-15 01:37:01 +04:00
|
|
|
def ancestor(self, c2):
|
2014-08-19 03:13:10 +04:00
|
|
|
"""return the "best" ancestor context of self and c2"""
|
2013-08-15 01:37:01 +04:00
|
|
|
return self._parents[0].ancestor(c2) # punt on two parents for now
|
|
|
|
|
2013-08-15 01:37:11 +04:00
|
|
|
def walk(self, match):
|
|
|
|
return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
|
|
|
|
True, False))
|
|
|
|
|
2014-08-02 09:07:29 +04:00
|
|
|
def matches(self, match):
|
|
|
|
return sorted(self._repo.dirstate.matches(match))
|
|
|
|
|
2013-08-15 01:37:59 +04:00
|
|
|
def ancestors(self):
|
|
|
|
for a in self._repo.changelog.ancestors(
|
|
|
|
[p.rev() for p in self._parents]):
|
|
|
|
yield changectx(self._repo, a)
|
|
|
|
|
2013-08-15 01:40:27 +04:00
|
|
|
def markcommitted(self, node):
|
|
|
|
"""Perform post-commit cleanup necessary after committing this ctx
|
|
|
|
|
|
|
|
Specifically, this updates backing stores this working context
|
|
|
|
wraps to reflect the fact that the changes reflected by this
|
|
|
|
workingctx have been committed. For example, it marks
|
|
|
|
modified and added files as normal in the dirstate.
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
2014-09-05 22:36:20 +04:00
|
|
|
self._repo.dirstate.beginparentchange()
|
2013-08-15 01:40:27 +04:00
|
|
|
for f in self.modified() + self.added():
|
|
|
|
self._repo.dirstate.normal(f)
|
|
|
|
for f in self.removed():
|
|
|
|
self._repo.dirstate.drop(f)
|
|
|
|
self._repo.dirstate.setparents(node)
|
2014-09-05 22:36:20 +04:00
|
|
|
self._repo.dirstate.endparentchange()
|
2013-08-15 01:40:27 +04:00
|
|
|
|
2013-08-15 01:40:34 +04:00
|
|
|
def dirs(self):
|
|
|
|
return self._repo.dirstate.dirs()
|
|
|
|
|
2013-09-18 03:34:45 +04:00
|
|
|
class workingctx(committablectx):
|
2013-08-15 00:34:18 +04:00
|
|
|
"""A workingctx object makes access to data related to
|
|
|
|
the current working directory convenient.
|
|
|
|
date - any valid date string or (unixtime, offset), or None.
|
|
|
|
user - username string, or None.
|
|
|
|
extra - a dictionary of extra values, or None.
|
|
|
|
changes - a list of file lists as returned by localrepo.status()
|
|
|
|
or None to use the repository status.
|
|
|
|
"""
|
|
|
|
def __init__(self, repo, text="", user=None, date=None, extra=None,
|
|
|
|
changes=None):
|
|
|
|
super(workingctx, self).__init__(repo, text, user, date, extra, changes)
|
|
|
|
|
2011-05-01 17:29:50 +04:00
|
|
|
def __iter__(self):
|
|
|
|
d = self._repo.dirstate
|
|
|
|
for f in d:
|
|
|
|
if d[f] != 'r':
|
|
|
|
yield f
|
|
|
|
|
2014-07-04 08:01:37 +04:00
|
|
|
def __contains__(self, key):
|
|
|
|
return self._repo.dirstate[key] not in "?r"
|
|
|
|
|
2009-04-24 20:47:15 +04:00
|
|
|
@propertycache
|
2008-11-14 14:44:26 +03:00
|
|
|
def _parents(self):
|
|
|
|
p = self._repo.dirstate.parents()
|
|
|
|
if p[1] == nullid:
|
|
|
|
p = p[:-1]
|
2012-08-02 19:48:58 +04:00
|
|
|
return [changectx(self._repo, x) for x in p]
|
2006-10-03 07:03:14 +04:00
|
|
|
|
2006-12-25 20:57:55 +03:00
|
|
|
def filectx(self, path, filelog=None):
|
2006-10-03 07:03:14 +04:00
|
|
|
"""get a file context from the working directory"""
|
2006-12-25 20:57:55 +03:00
|
|
|
return workingfilectx(self._repo, path, workingctx=self,
|
|
|
|
filelog=filelog)
|
2006-10-03 07:03:14 +04:00
|
|
|
|
2012-04-23 14:12:04 +04:00
|
|
|
def dirty(self, missing=False, merge=True, branch=True):
|
2009-06-05 01:21:55 +04:00
|
|
|
"check whether a working directory is modified"
|
2010-05-02 01:05:21 +04:00
|
|
|
# check subrepos first
|
2012-12-12 05:38:14 +04:00
|
|
|
for s in sorted(self.substate):
|
2010-05-02 01:05:21 +04:00
|
|
|
if self.sub(s).dirty():
|
|
|
|
return True
|
|
|
|
# check current working dir
|
2012-04-23 14:12:04 +04:00
|
|
|
return ((merge and self.p2()) or
|
|
|
|
(branch and self.branch() != self.p1().branch()) or
|
2009-06-05 01:21:55 +04:00
|
|
|
self.modified() or self.added() or self.removed() or
|
|
|
|
(missing and self.deleted()))
|
|
|
|
|
2010-09-13 15:09:20 +04:00
|
|
|
def add(self, list, prefix=""):
|
|
|
|
join = lambda f: os.path.join(prefix, f)
|
2010-06-07 22:03:32 +04:00
|
|
|
wlock = self._repo.wlock()
|
|
|
|
ui, ds = self._repo.ui, self._repo.dirstate
|
|
|
|
try:
|
|
|
|
rejected = []
|
2013-10-14 19:51:04 +04:00
|
|
|
lstat = self._repo.wvfs.lstat
|
2010-06-07 22:03:32 +04:00
|
|
|
for f in list:
|
2011-04-19 14:42:53 +04:00
|
|
|
scmutil.checkportable(ui, join(f))
|
2010-06-07 22:03:32 +04:00
|
|
|
try:
|
2013-10-14 19:51:04 +04:00
|
|
|
st = lstat(f)
|
2011-04-23 01:51:25 +04:00
|
|
|
except OSError:
|
2010-09-13 15:09:20 +04:00
|
|
|
ui.warn(_("%s does not exist!\n") % join(f))
|
2010-06-07 22:03:32 +04:00
|
|
|
rejected.append(f)
|
|
|
|
continue
|
|
|
|
if st.st_size > 10000000:
|
|
|
|
ui.warn(_("%s: up to %d MB of RAM may be required "
|
|
|
|
"to manage this file\n"
|
|
|
|
"(use 'hg revert %s' to cancel the "
|
|
|
|
"pending addition)\n")
|
2010-09-13 15:09:20 +04:00
|
|
|
% (f, 3 * st.st_size // 1000000, join(f)))
|
2010-06-07 22:03:32 +04:00
|
|
|
if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
|
|
|
|
ui.warn(_("%s not added: only files and symlinks "
|
2010-09-13 15:09:20 +04:00
|
|
|
"supported currently\n") % join(f))
|
2013-10-14 19:51:04 +04:00
|
|
|
rejected.append(f)
|
2010-06-07 22:03:32 +04:00
|
|
|
elif ds[f] in 'amn':
|
2010-09-13 15:09:20 +04:00
|
|
|
ui.warn(_("%s already tracked!\n") % join(f))
|
2010-06-07 22:03:32 +04:00
|
|
|
elif ds[f] == 'r':
|
|
|
|
ds.normallookup(f)
|
|
|
|
else:
|
|
|
|
ds.add(f)
|
|
|
|
return rejected
|
|
|
|
finally:
|
|
|
|
wlock.release()
|
|
|
|
|
2012-01-18 04:10:59 +04:00
|
|
|
def forget(self, files, prefix=""):
|
|
|
|
join = lambda f: os.path.join(prefix, f)
|
2010-06-07 22:03:32 +04:00
|
|
|
wlock = self._repo.wlock()
|
|
|
|
try:
|
2012-01-18 04:10:59 +04:00
|
|
|
rejected = []
|
2011-05-27 02:15:35 +04:00
|
|
|
for f in files:
|
2012-02-16 15:56:48 +04:00
|
|
|
if f not in self._repo.dirstate:
|
2012-01-18 04:10:59 +04:00
|
|
|
self._repo.ui.warn(_("%s not tracked!\n") % join(f))
|
|
|
|
rejected.append(f)
|
2012-02-16 15:56:48 +04:00
|
|
|
elif self._repo.dirstate[f] != 'a':
|
|
|
|
self._repo.dirstate.remove(f)
|
2010-06-07 22:03:32 +04:00
|
|
|
else:
|
2011-05-27 02:15:35 +04:00
|
|
|
self._repo.dirstate.drop(f)
|
2012-01-18 04:10:59 +04:00
|
|
|
return rejected
|
2010-06-07 22:03:32 +04:00
|
|
|
finally:
|
|
|
|
wlock.release()
|
|
|
|
|
|
|
|
def undelete(self, list):
|
|
|
|
pctxs = self.parents()
|
|
|
|
wlock = self._repo.wlock()
|
|
|
|
try:
|
|
|
|
for f in list:
|
|
|
|
if self._repo.dirstate[f] != 'r':
|
|
|
|
self._repo.ui.warn(_("%s not removed!\n") % f)
|
|
|
|
else:
|
2010-09-22 01:14:58 +04:00
|
|
|
fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
|
2010-06-07 22:03:32 +04:00
|
|
|
t = fctx.data()
|
|
|
|
self._repo.wwrite(f, t, fctx.flags())
|
|
|
|
self._repo.dirstate.normal(f)
|
|
|
|
finally:
|
|
|
|
wlock.release()
|
|
|
|
|
|
|
|
def copy(self, source, dest):
|
2013-10-14 19:51:05 +04:00
|
|
|
try:
|
|
|
|
st = self._repo.wvfs.lstat(dest)
|
|
|
|
except OSError, err:
|
|
|
|
if err.errno != errno.ENOENT:
|
|
|
|
raise
|
2010-06-07 22:03:32 +04:00
|
|
|
self._repo.ui.warn(_("%s does not exist!\n") % dest)
|
2013-10-14 19:51:05 +04:00
|
|
|
return
|
|
|
|
if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
|
2010-06-07 22:03:32 +04:00
|
|
|
self._repo.ui.warn(_("copy failed: %s is not a file or a "
|
|
|
|
"symbolic link\n") % dest)
|
|
|
|
else:
|
|
|
|
wlock = self._repo.wlock()
|
|
|
|
try:
|
|
|
|
if self._repo.dirstate[dest] in '?r':
|
|
|
|
self._repo.dirstate.add(dest)
|
|
|
|
self._repo.dirstate.copy(source, dest)
|
|
|
|
finally:
|
|
|
|
wlock.release()
|
|
|
|
|
2014-03-08 01:32:37 +04:00
|
|
|
def _filtersuspectsymlink(self, files):
|
|
|
|
if not files or self._repo.dirstate._checklink:
|
|
|
|
return files
|
|
|
|
|
|
|
|
# Symlink placeholders may get non-symlink-like contents
|
|
|
|
# via user error or dereferencing by NFS or Samba servers,
|
|
|
|
# so we filter out any placeholders that don't look like a
|
|
|
|
# symlink
|
|
|
|
sane = []
|
|
|
|
for f in files:
|
|
|
|
if self.flags(f) == 'l':
|
|
|
|
d = self[f].data()
|
|
|
|
if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
|
|
|
|
self._repo.ui.debug('ignoring suspect symlink placeholder'
|
|
|
|
' "%s"\n' % f)
|
|
|
|
continue
|
|
|
|
sane.append(f)
|
|
|
|
return sane
|
|
|
|
|
2014-03-12 03:10:00 +04:00
|
|
|
def _checklookup(self, files):
|
|
|
|
# check for any possibly clean files
|
|
|
|
if not files:
|
|
|
|
return [], []
|
|
|
|
|
|
|
|
modified = []
|
|
|
|
fixup = []
|
|
|
|
pctx = self._parents[0]
|
|
|
|
# do a full compare of any files that might have changed
|
|
|
|
for f in sorted(files):
|
|
|
|
if (f not in pctx or self.flags(f) != pctx.flags(f)
|
|
|
|
or pctx[f].cmp(self[f])):
|
|
|
|
modified.append(f)
|
|
|
|
else:
|
|
|
|
fixup.append(f)
|
|
|
|
|
|
|
|
# update dirstate for files that are actually clean
|
|
|
|
if fixup:
|
|
|
|
try:
|
|
|
|
# updating the dirstate is optional
|
|
|
|
# so we don't wait on the lock
|
2014-08-02 05:30:18 +04:00
|
|
|
# wlock can invalidate the dirstate, so cache normal _after_
|
|
|
|
# taking the lock
|
2014-03-12 03:10:00 +04:00
|
|
|
wlock = self._repo.wlock(False)
|
2014-08-02 05:30:18 +04:00
|
|
|
normal = self._repo.dirstate.normal
|
2014-03-12 03:10:00 +04:00
|
|
|
try:
|
|
|
|
for f in fixup:
|
|
|
|
normal(f)
|
|
|
|
finally:
|
|
|
|
wlock.release()
|
|
|
|
except error.LockError:
|
|
|
|
pass
|
|
|
|
return modified, fixup
|
|
|
|
|
2014-04-16 00:43:30 +04:00
|
|
|
def _manifestmatches(self, match, s):
|
|
|
|
"""Slow path for workingctx
|
|
|
|
|
|
|
|
The fast path is when we compare the working directory to its parent
|
|
|
|
which means this function is comparing with a non-parent; therefore we
|
|
|
|
need to build a manifest and return what matches.
|
|
|
|
"""
|
|
|
|
mf = self._repo['.']._manifestmatches(match, s)
|
|
|
|
modified, added, removed = s[0:3]
|
|
|
|
for f in modified + added:
|
|
|
|
mf[f] = None
|
2014-10-10 22:09:37 +04:00
|
|
|
mf.setflag(f, self.flags(f))
|
2014-04-16 00:43:30 +04:00
|
|
|
for f in removed:
|
|
|
|
if f in mf:
|
|
|
|
del mf[f]
|
|
|
|
return mf
|
|
|
|
|
2014-04-22 07:12:59 +04:00
|
|
|
def _prestatus(self, other, s, match, listignored, listclean, listunknown):
|
|
|
|
"""override the parent hook with a dirstate query
|
|
|
|
|
|
|
|
We use this prestatus hook to populate the status with information from
|
|
|
|
the dirstate.
|
|
|
|
"""
|
2014-04-24 01:06:42 +04:00
|
|
|
# doesn't need to call super; if that changes, be aware that super
|
|
|
|
# calls self.manifest which would slow down the common case of calling
|
|
|
|
# status against a workingctx's parent
|
2014-04-22 07:12:59 +04:00
|
|
|
return self._dirstatestatus(match, listignored, listclean, listunknown)
|
|
|
|
|
2014-04-22 21:59:22 +04:00
|
|
|
def _poststatus(self, other, s, match, listignored, listclean, listunknown):
|
|
|
|
"""override the parent hook with a filter for suspect symlinks
|
|
|
|
|
|
|
|
We use this poststatus hook to filter out symlinks that might have
|
|
|
|
accidentally ended up with the entire contents of the file they are
|
|
|
|
susposed to be linking to.
|
|
|
|
"""
|
|
|
|
s[0] = self._filtersuspectsymlink(s[0])
|
2014-10-05 08:05:41 +04:00
|
|
|
self._status = scmutil.status(*s)
|
2014-04-22 21:59:22 +04:00
|
|
|
return s
|
|
|
|
|
2014-04-22 22:14:51 +04:00
|
|
|
def _dirstatestatus(self, match=None, ignored=False, clean=False,
|
|
|
|
unknown=False):
|
|
|
|
'''Gets the status from the dirstate -- internal use only.'''
|
|
|
|
listignored, listclean, listunknown = ignored, clean, unknown
|
|
|
|
match = match or matchmod.always(self._repo.root, self._repo.getcwd())
|
|
|
|
subrepos = []
|
|
|
|
if '.hgsub' in self:
|
|
|
|
subrepos = sorted(self.substate)
|
2014-10-04 08:44:10 +04:00
|
|
|
cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
|
|
|
|
listclean, listunknown)
|
|
|
|
modified, added, removed, deleted, unknown, ignored, clean = s
|
2014-04-22 22:14:51 +04:00
|
|
|
|
|
|
|
# check for any possibly clean files
|
|
|
|
if cmp:
|
|
|
|
modified2, fixup = self._checklookup(cmp)
|
|
|
|
modified += modified2
|
|
|
|
|
|
|
|
# update dirstate for files that are actually clean
|
|
|
|
if fixup and listclean:
|
|
|
|
clean += fixup
|
|
|
|
|
|
|
|
return [modified, added, removed, deleted, unknown, ignored, clean]
|
|
|
|
|
2014-04-24 17:34:44 +04:00
|
|
|
def _buildstatus(self, other, s, match, listignored, listclean,
|
2014-05-30 01:09:16 +04:00
|
|
|
listunknown):
|
2014-04-24 17:34:44 +04:00
|
|
|
"""build a status with respect to another context
|
|
|
|
|
|
|
|
This includes logic for maintaining the fast path of status when
|
|
|
|
comparing the working directory against its parent, which is to skip
|
|
|
|
building a new manifest if self (working directory) is not comparing
|
|
|
|
against its parent (repo['.']).
|
|
|
|
"""
|
|
|
|
if other != self._repo['.']:
|
|
|
|
s = super(workingctx, self)._buildstatus(other, s, match,
|
|
|
|
listignored, listclean,
|
|
|
|
listunknown)
|
|
|
|
return s
|
|
|
|
|
2014-04-24 17:32:28 +04:00
|
|
|
def _matchstatus(self, other, s, match, listignored, listclean,
|
|
|
|
listunknown):
|
|
|
|
"""override the match method with a filter for directory patterns
|
|
|
|
|
|
|
|
We use inheritance to customize the match.bad method only in cases of
|
|
|
|
workingctx since it belongs only to the working directory when
|
|
|
|
comparing against the parent changeset.
|
|
|
|
|
|
|
|
If we aren't comparing against the working directory's parent, then we
|
|
|
|
just use the default match object sent to us.
|
|
|
|
"""
|
|
|
|
superself = super(workingctx, self)
|
|
|
|
match = superself._matchstatus(other, s, match, listignored, listclean,
|
|
|
|
listunknown)
|
|
|
|
if other != self._repo['.']:
|
|
|
|
def bad(f, msg):
|
|
|
|
# 'f' may be a directory pattern from 'match.files()',
|
|
|
|
# so 'f not in ctx1' is not enough
|
|
|
|
if f not in other and f not in other.dirs():
|
|
|
|
self._repo.ui.warn('%s: %s\n' %
|
|
|
|
(self._repo.dirstate.pathto(f), msg))
|
|
|
|
match.bad = bad
|
|
|
|
return match
|
|
|
|
|
2014-05-28 02:55:35 +04:00
|
|
|
def status(self, other='.', match=None, listignored=False,
|
|
|
|
listclean=False, listunknown=False, listsubrepos=False):
|
|
|
|
# yet to be determined: what to do if 'other' is a 'workingctx' or a
|
|
|
|
# 'memctx'?
|
|
|
|
s = super(workingctx, self).status(other, match, listignored, listclean,
|
|
|
|
listunknown, listsubrepos)
|
|
|
|
# calling 'super' subtly reveresed the contexts, so we flip the results
|
|
|
|
# (s[1] is 'added' and s[2] is 'removed')
|
2014-05-28 02:04:48 +04:00
|
|
|
s = list(s)
|
2014-05-28 02:55:35 +04:00
|
|
|
s[1], s[2] = s[2], s[1]
|
2014-10-14 09:52:27 +04:00
|
|
|
return scmutil.status(*s)
|
2014-03-12 03:28:09 +04:00
|
|
|
|
2013-09-18 03:34:45 +04:00
|
|
|
class committablefilectx(basefilectx):
|
|
|
|
"""A committablefilectx provides common functionality for a file context
|
|
|
|
that wants the ability to commit, e.g. workingfilectx or memfilectx."""
|
2013-08-15 22:11:51 +04:00
|
|
|
def __init__(self, repo, path, filelog=None, ctx=None):
|
2006-10-03 07:03:14 +04:00
|
|
|
self._repo = repo
|
|
|
|
self._path = path
|
|
|
|
self._changeid = None
|
|
|
|
self._filerev = self._filenode = None
|
|
|
|
|
2013-05-01 21:42:03 +04:00
|
|
|
if filelog is not None:
|
2006-10-03 07:03:14 +04:00
|
|
|
self._filelog = filelog
|
2013-08-15 22:12:50 +04:00
|
|
|
if ctx:
|
|
|
|
self._changectx = ctx
|
|
|
|
|
2013-08-15 22:23:06 +04:00
|
|
|
def __nonzero__(self):
|
|
|
|
return True
|
|
|
|
|
2006-10-03 07:03:14 +04:00
|
|
|
def parents(self):
|
|
|
|
'''return parent filectxs, following copies if necessary'''
|
2009-05-20 04:08:53 +04:00
|
|
|
def filenode(ctx, path):
|
|
|
|
return ctx._manifest.get(path, nullid)
|
|
|
|
|
|
|
|
path = self._path
|
2006-10-03 07:03:14 +04:00
|
|
|
fl = self._filelog
|
2009-05-20 04:08:53 +04:00
|
|
|
pcl = self._changectx._parents
|
|
|
|
renamed = self.renamed()
|
|
|
|
|
|
|
|
if renamed:
|
|
|
|
pl = [renamed + (None,)]
|
|
|
|
else:
|
|
|
|
pl = [(path, filenode(pcl[0], path), fl)]
|
|
|
|
|
|
|
|
for pc in pcl[1:]:
|
|
|
|
pl.append((path, filenode(pc, path), fl))
|
2006-10-03 07:03:14 +04:00
|
|
|
|
2006-11-17 10:06:54 +03:00
|
|
|
return [filectx(self._repo, p, fileid=n, filelog=l)
|
2010-01-25 09:05:27 +03:00
|
|
|
for p, n, l in pl if n != nullid]
|
2006-10-03 07:03:14 +04:00
|
|
|
|
2013-08-15 22:42:56 +04:00
|
|
|
def children(self):
|
|
|
|
return []
|
|
|
|
|
2013-09-18 03:34:45 +04:00
|
|
|
class workingfilectx(committablefilectx):
|
2013-08-15 22:42:33 +04:00
|
|
|
"""A workingfilectx object makes access to data related to a particular
|
|
|
|
file in the working directory convenient."""
|
|
|
|
def __init__(self, repo, path, filelog=None, workingctx=None):
|
|
|
|
super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
|
|
|
|
|
|
|
|
@propertycache
|
|
|
|
def _changectx(self):
|
|
|
|
return workingctx(self._repo)
|
|
|
|
|
|
|
|
def data(self):
|
|
|
|
return self._repo.wread(self._path)
|
|
|
|
def renamed(self):
|
|
|
|
rp = self._repo.dirstate.copied(self._path)
|
|
|
|
if not rp:
|
|
|
|
return None
|
|
|
|
return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
|
|
|
|
|
2010-01-25 09:05:27 +03:00
|
|
|
def size(self):
|
2013-10-14 19:51:04 +04:00
|
|
|
return self._repo.wvfs.lstat(self._path).st_size
|
2006-12-25 00:55:34 +03:00
|
|
|
def date(self):
|
|
|
|
t, tz = self._changectx.date()
|
|
|
|
try:
|
2013-10-14 19:51:04 +04:00
|
|
|
return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
|
2006-12-25 00:55:34 +03:00
|
|
|
except OSError, err:
|
2010-01-25 09:05:27 +03:00
|
|
|
if err.errno != errno.ENOENT:
|
|
|
|
raise
|
2006-12-25 00:55:34 +03:00
|
|
|
return (t, tz)
|
2006-10-10 10:13:03 +04:00
|
|
|
|
2010-07-27 18:40:46 +04:00
|
|
|
def cmp(self, fctx):
|
|
|
|
"""compare with other file context
|
2010-07-09 06:02:39 +04:00
|
|
|
|
2010-07-27 18:40:46 +04:00
|
|
|
returns True if different than fctx.
|
2010-07-09 06:02:39 +04:00
|
|
|
"""
|
2012-08-16 00:39:18 +04:00
|
|
|
# fctx should be a filectx (not a workingfilectx)
|
2010-07-29 05:39:59 +04:00
|
|
|
# invert comparison to reuse the same code path
|
|
|
|
return fctx.cmp(self)
|
2008-06-19 02:14:23 +04:00
|
|
|
|
2014-07-02 23:01:01 +04:00
|
|
|
def remove(self, ignoremissing=False):
|
|
|
|
"""wraps unlink for a repo's working directory"""
|
|
|
|
util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
|
|
|
|
|
|
|
|
def write(self, data, flags):
|
|
|
|
"""wraps repo.wwrite"""
|
|
|
|
self._repo.wwrite(self._path, data, flags)
|
|
|
|
|
2013-08-16 00:00:03 +04:00
|
|
|
class memctx(committablectx):
|
2008-10-11 15:07:29 +04:00
|
|
|
"""Use memctx to perform in-memory commits via localrepo.commitctx().
|
|
|
|
|
|
|
|
Revision information is supplied at initialization time while
|
|
|
|
related files data and is made available through a callback
|
|
|
|
mechanism. 'repo' is the current localrepo, 'parents' is a
|
|
|
|
sequence of two parent revisions identifiers (pass None for every
|
|
|
|
missing parent), 'text' is the commit message and 'files' lists
|
|
|
|
names of files touched by the revision (normalized and relative to
|
|
|
|
repository root).
|
|
|
|
|
|
|
|
filectxfn(repo, memctx, path) is a callable receiving the
|
|
|
|
repository, the current memctx object and the normalized path of
|
|
|
|
requested file, relative to repository root. It is fired by the
|
|
|
|
commit function for every file in 'files', but calls order is
|
|
|
|
undefined. If the file is available in the revision being
|
|
|
|
committed (updated or added), filectxfn returns a memfilectx
|
|
|
|
object. If the file was removed, filectxfn raises an
|
|
|
|
IOError. Moved files are represented by marking the source file
|
|
|
|
removed and the new file added with copy information (see
|
|
|
|
memfilectx).
|
|
|
|
|
|
|
|
user receives the committer name and defaults to current
|
|
|
|
repository username, date is the commit date in any format
|
|
|
|
supported by util.parsedate() and defaults to current date, extra
|
|
|
|
is a dictionary of metadata or is left empty.
|
2008-06-19 02:14:23 +04:00
|
|
|
"""
|
2014-08-30 16:29:38 +04:00
|
|
|
|
|
|
|
# Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
|
|
|
|
# Extensions that need to retain compatibility across Mercurial 3.1 can use
|
|
|
|
# this field to determine what to do in filectxfn.
|
|
|
|
_returnnoneformissingfiles = True
|
|
|
|
|
2008-06-23 15:12:32 +04:00
|
|
|
def __init__(self, repo, parents, text, files, filectxfn, user=None,
|
2014-05-05 16:26:40 +04:00
|
|
|
date=None, extra=None, editor=False):
|
2013-08-16 00:03:03 +04:00
|
|
|
super(memctx, self).__init__(repo, text, user, date, extra)
|
2008-06-19 02:14:23 +04:00
|
|
|
self._rev = None
|
|
|
|
self._node = None
|
|
|
|
parents = [(p or nullid) for p in parents]
|
|
|
|
p1, p2 = parents
|
2008-06-26 23:35:46 +04:00
|
|
|
self._parents = [changectx(self._repo, p) for p in (p1, p2)]
|
2009-04-27 01:50:44 +04:00
|
|
|
files = sorted(set(files))
|
2014-10-05 08:05:41 +04:00
|
|
|
self._status = scmutil.status(files, [], [], [], [], [], [])
|
2008-06-19 02:14:23 +04:00
|
|
|
self._filectxfn = filectxfn
|
2014-07-16 22:07:39 +04:00
|
|
|
self.substate = {}
|
2008-06-19 02:14:23 +04:00
|
|
|
|
2014-07-26 04:36:01 +04:00
|
|
|
# if store is not callable, wrap it in a function
|
|
|
|
if not callable(filectxfn):
|
|
|
|
def getfilectx(repo, memctx, path):
|
|
|
|
fctx = filectxfn[path]
|
|
|
|
# this is weird but apparently we only keep track of one parent
|
|
|
|
# (why not only store that instead of a tuple?)
|
|
|
|
copied = fctx.renamed()
|
|
|
|
if copied:
|
|
|
|
copied = copied[0]
|
|
|
|
return memfilectx(repo, path, fctx.data(),
|
|
|
|
islink=fctx.islink(), isexec=fctx.isexec(),
|
|
|
|
copied=copied, memctx=memctx)
|
|
|
|
self._filectxfn = getfilectx
|
|
|
|
|
2008-06-19 02:14:23 +04:00
|
|
|
self._extra = extra and extra.copy() or {}
|
2011-06-04 17:20:49 +04:00
|
|
|
if self._extra.get('branch', '') == '':
|
2008-06-19 02:14:23 +04:00
|
|
|
self._extra['branch'] = 'default'
|
|
|
|
|
2014-05-05 16:26:40 +04:00
|
|
|
if editor:
|
|
|
|
self._text = editor(self._repo, self, [])
|
|
|
|
self._repo.savecommitmessage(self._text)
|
|
|
|
|
2008-06-19 02:14:23 +04:00
|
|
|
def filectx(self, path, filelog=None):
|
2014-08-27 00:03:32 +04:00
|
|
|
"""get a file context from the working directory
|
|
|
|
|
|
|
|
Returns None if file doesn't exist and should be removed."""
|
2008-06-19 02:14:23 +04:00
|
|
|
return self._filectxfn(self._repo, self, path)
|
|
|
|
|
2010-05-01 16:00:21 +04:00
|
|
|
def commit(self):
|
|
|
|
"""commit context to the repo"""
|
|
|
|
return self._repo.commitctx(self)
|
|
|
|
|
2014-05-30 01:12:59 +04:00
|
|
|
@propertycache
|
|
|
|
def _manifest(self):
|
|
|
|
"""generate a manifest based on the return values of filectxfn"""
|
|
|
|
|
|
|
|
# keep this simple for now; just worry about p1
|
|
|
|
pctx = self._parents[0]
|
|
|
|
man = pctx.manifest().copy()
|
|
|
|
|
|
|
|
for f, fnode in man.iteritems():
|
|
|
|
p1node = nullid
|
|
|
|
p2node = nullid
|
2014-06-18 07:55:06 +04:00
|
|
|
p = pctx[f].parents() # if file isn't in pctx, check p2?
|
2014-05-30 01:12:59 +04:00
|
|
|
if len(p) > 0:
|
|
|
|
p1node = p[0].node()
|
|
|
|
if len(p) > 1:
|
|
|
|
p2node = p[1].node()
|
|
|
|
man[f] = revlog.hash(self[f].data(), p1node, p2node)
|
|
|
|
|
|
|
|
return man
|
|
|
|
|
|
|
|
|
2013-08-16 00:23:36 +04:00
|
|
|
class memfilectx(committablefilectx):
|
2008-10-11 15:07:29 +04:00
|
|
|
"""memfilectx represents an in-memory file to commit.
|
|
|
|
|
2013-08-16 01:49:27 +04:00
|
|
|
See memctx and commitablefilectx for more details.
|
2008-06-19 02:14:23 +04:00
|
|
|
"""
|
2013-08-16 01:49:27 +04:00
|
|
|
def __init__(self, repo, path, data, islink=False,
|
|
|
|
isexec=False, copied=None, memctx=None):
|
2008-10-11 15:07:29 +04:00
|
|
|
"""
|
|
|
|
path is the normalized file path relative to repository root.
|
|
|
|
data is the file content as a string.
|
|
|
|
islink is True if the file is a symbolic link.
|
|
|
|
isexec is True if the file is executable.
|
|
|
|
copied is the source file path if current file was copied in the
|
|
|
|
revision being committed, or None."""
|
2013-08-16 01:49:27 +04:00
|
|
|
super(memfilectx, self).__init__(repo, path, None, memctx)
|
2008-06-19 02:14:23 +04:00
|
|
|
self._data = data
|
|
|
|
self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
|
|
|
|
self._copied = None
|
|
|
|
if copied:
|
|
|
|
self._copied = (copied, nullid)
|
|
|
|
|
2010-01-25 09:05:27 +03:00
|
|
|
def data(self):
|
|
|
|
return self._data
|
2014-06-04 00:49:51 +04:00
|
|
|
def size(self):
|
|
|
|
return len(self.data())
|
2010-01-25 09:05:27 +03:00
|
|
|
def flags(self):
|
|
|
|
return self._flags
|
|
|
|
def renamed(self):
|
|
|
|
return self._copied
|
2014-07-26 05:20:26 +04:00
|
|
|
|
|
|
|
def remove(self, ignoremissing=False):
|
|
|
|
"""wraps unlink for a repo's working directory"""
|
|
|
|
# need to figure out what to do here
|
|
|
|
del self._changectx[self._path]
|
|
|
|
|
|
|
|
def write(self, data, flags):
|
|
|
|
"""wraps repo.wwrite"""
|
|
|
|
self._data = data
|