2009-04-26 03:13:08 +04:00
|
|
|
# dirstate.py - working directory tracking for mercurial
|
|
|
|
#
|
|
|
|
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
|
|
|
|
#
|
|
|
|
# This software may be used and distributed according to the terms of the
|
2010-01-20 07:20:08 +03:00
|
|
|
# GNU General Public License version 2 or any later version.
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2008-03-07 00:23:26 +03:00
|
|
|
from node import nullid
|
2006-12-15 05:25:19 +03:00
|
|
|
from i18n import _
|
2011-04-21 15:18:52 +04:00
|
|
|
import scmutil, util, ignore, osutil, parsers, encoding
|
2009-04-28 19:40:46 +04:00
|
|
|
import struct, os, stat, errno
|
2009-10-31 18:56:58 +03:00
|
|
|
import cStringIO
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2007-06-18 22:24:34 +04:00
|
|
|
_format = ">cllll"
|
2009-04-30 05:47:18 +04:00
|
|
|
propertycache = util.propertycache
|
2007-06-18 22:24:34 +04:00
|
|
|
|
2008-07-12 03:46:02 +04:00
|
|
|
def _finddirs(path):
|
2008-09-13 04:57:07 +04:00
|
|
|
pos = path.rfind('/')
|
|
|
|
while pos != -1:
|
2008-07-12 03:46:02 +04:00
|
|
|
yield path[:pos]
|
2008-09-13 04:57:07 +04:00
|
|
|
pos = path.rfind('/', 0, pos)
|
2008-07-12 03:46:02 +04:00
|
|
|
|
2008-10-15 03:14:29 +04:00
|
|
|
def _incdirs(dirs, path):
|
|
|
|
for base in _finddirs(path):
|
|
|
|
if base in dirs:
|
|
|
|
dirs[base] += 1
|
|
|
|
return
|
|
|
|
dirs[base] = 1
|
|
|
|
|
|
|
|
def _decdirs(dirs, path):
|
|
|
|
for base in _finddirs(path):
|
|
|
|
if dirs[base] > 1:
|
|
|
|
dirs[base] -= 1
|
|
|
|
return
|
|
|
|
del dirs[base]
|
|
|
|
|
2005-11-19 09:48:47 +03:00
|
|
|
class dirstate(object):
|
2006-06-04 04:25:27 +04:00
|
|
|
|
2010-11-22 21:43:31 +03:00
|
|
|
def __init__(self, opener, ui, root, validate):
|
2009-12-28 01:24:05 +03:00
|
|
|
'''Create a new dirstate object.
|
|
|
|
|
|
|
|
opener is an open()-like callable that can be used to open the
|
|
|
|
dirstate file; root is the root of the directory tracked by
|
|
|
|
the dirstate.
|
|
|
|
'''
|
2007-06-18 22:24:34 +04:00
|
|
|
self._opener = opener
|
2010-11-22 21:43:31 +03:00
|
|
|
self._validate = validate
|
2007-06-18 22:24:34 +04:00
|
|
|
self._root = root
|
2008-09-02 17:12:50 +04:00
|
|
|
self._rootdir = os.path.join(root, '')
|
2007-07-22 01:02:09 +04:00
|
|
|
self._dirty = False
|
2007-07-22 01:44:38 +04:00
|
|
|
self._dirtypl = False
|
2011-03-23 17:34:22 +03:00
|
|
|
self._lastnormaltime = None
|
2007-06-18 22:24:34 +04:00
|
|
|
self._ui = ui
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2009-04-30 05:47:18 +04:00
|
|
|
@propertycache
|
|
|
|
def _map(self):
|
2009-10-01 23:36:45 +04:00
|
|
|
'''Return the dirstate contents as a map from filename to
|
|
|
|
(state, mode, size, time).'''
|
2009-04-30 05:47:18 +04:00
|
|
|
self._read()
|
|
|
|
return self._map
|
|
|
|
|
|
|
|
@propertycache
|
|
|
|
def _copymap(self):
|
|
|
|
self._read()
|
|
|
|
return self._copymap
|
|
|
|
|
|
|
|
@propertycache
|
|
|
|
def _foldmap(self):
|
|
|
|
f = {}
|
|
|
|
for name in self._map:
|
|
|
|
f[os.path.normcase(name)] = name
|
|
|
|
return f
|
|
|
|
|
|
|
|
@propertycache
|
|
|
|
def _branch(self):
|
|
|
|
try:
|
2011-05-02 12:11:18 +04:00
|
|
|
return self._opener.read("branch").strip() or "default"
|
2009-04-30 05:47:18 +04:00
|
|
|
except IOError:
|
|
|
|
return "default"
|
|
|
|
|
|
|
|
@propertycache
|
|
|
|
def _pl(self):
|
|
|
|
try:
|
2010-12-24 17:23:01 +03:00
|
|
|
fp = self._opener("dirstate")
|
|
|
|
st = fp.read(40)
|
|
|
|
fp.close()
|
2009-06-05 01:21:09 +04:00
|
|
|
l = len(st)
|
|
|
|
if l == 40:
|
2009-04-30 05:47:18 +04:00
|
|
|
return st[:20], st[20:40]
|
2009-06-05 01:21:09 +04:00
|
|
|
elif l > 0 and l < 40:
|
2009-05-25 21:48:15 +04:00
|
|
|
raise util.Abort(_('working directory state appears damaged!'))
|
2009-04-30 05:47:18 +04:00
|
|
|
except IOError, err:
|
2010-01-25 09:05:27 +03:00
|
|
|
if err.errno != errno.ENOENT:
|
|
|
|
raise
|
2009-04-30 05:47:18 +04:00
|
|
|
return [nullid, nullid]
|
|
|
|
|
|
|
|
@propertycache
|
|
|
|
def _dirs(self):
|
|
|
|
dirs = {}
|
2010-01-25 09:05:27 +03:00
|
|
|
for f, s in self._map.iteritems():
|
2009-04-30 05:47:18 +04:00
|
|
|
if s[0] != 'r':
|
|
|
|
_incdirs(dirs, f)
|
|
|
|
return dirs
|
|
|
|
|
|
|
|
@propertycache
|
|
|
|
def _ignore(self):
|
|
|
|
files = [self._join('.hgignore')]
|
|
|
|
for name, path in self._ui.configitems("ui"):
|
|
|
|
if name == 'ignore' or name.startswith('ignore.'):
|
2009-10-19 23:19:28 +04:00
|
|
|
files.append(util.expandpath(path))
|
2009-04-30 05:47:18 +04:00
|
|
|
return ignore.ignore(self._root, files, self._ui.warn)
|
|
|
|
|
|
|
|
@propertycache
|
|
|
|
def _slash(self):
|
|
|
|
return self._ui.configbool('ui', 'slash') and os.sep != '/'
|
|
|
|
|
|
|
|
@propertycache
|
|
|
|
def _checklink(self):
|
|
|
|
return util.checklink(self._root)
|
|
|
|
|
|
|
|
@propertycache
|
|
|
|
def _checkexec(self):
|
|
|
|
return util.checkexec(self._root)
|
|
|
|
|
|
|
|
@propertycache
|
|
|
|
def _checkcase(self):
|
|
|
|
return not util.checkcase(self._join('.hg'))
|
|
|
|
|
2007-07-22 01:02:09 +04:00
|
|
|
def _join(self, f):
|
2008-09-02 17:12:50 +04:00
|
|
|
# much faster than os.path.join()
|
2008-09-02 19:32:07 +04:00
|
|
|
# it's safe because f is always a relative path
|
2008-09-02 17:12:50 +04:00
|
|
|
return self._rootdir + f
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2008-06-26 22:46:34 +04:00
|
|
|
def flagfunc(self, fallback):
|
|
|
|
if self._checklink:
|
|
|
|
if self._checkexec:
|
|
|
|
def f(x):
|
2008-09-02 17:12:50 +04:00
|
|
|
p = self._join(x)
|
2008-06-26 22:46:34 +04:00
|
|
|
if os.path.islink(p):
|
|
|
|
return 'l'
|
2011-05-08 22:45:47 +04:00
|
|
|
if util.isexec(p):
|
2008-06-26 22:46:34 +04:00
|
|
|
return 'x'
|
|
|
|
return ''
|
|
|
|
return f
|
|
|
|
def f(x):
|
2008-09-02 17:12:50 +04:00
|
|
|
if os.path.islink(self._join(x)):
|
2008-06-26 22:46:34 +04:00
|
|
|
return 'l'
|
|
|
|
if 'x' in fallback(x):
|
|
|
|
return 'x'
|
|
|
|
return ''
|
|
|
|
return f
|
|
|
|
if self._checkexec:
|
|
|
|
def f(x):
|
|
|
|
if 'l' in fallback(x):
|
|
|
|
return 'l'
|
2011-05-08 22:45:47 +04:00
|
|
|
if util.isexec(self._join(x)):
|
2008-06-26 22:46:34 +04:00
|
|
|
return 'x'
|
|
|
|
return ''
|
|
|
|
return f
|
|
|
|
return fallback
|
|
|
|
|
2005-08-28 01:21:25 +04:00
|
|
|
def getcwd(self):
|
|
|
|
cwd = os.getcwd()
|
2010-01-25 09:05:27 +03:00
|
|
|
if cwd == self._root:
|
|
|
|
return ''
|
2007-06-18 22:24:34 +04:00
|
|
|
# self._root ends with a path separator if self._root is '/' or 'C:\'
|
|
|
|
rootsep = self._root
|
2008-01-09 15:30:35 +03:00
|
|
|
if not util.endswithsep(rootsep):
|
2007-03-16 06:22:58 +03:00
|
|
|
rootsep += os.sep
|
|
|
|
if cwd.startswith(rootsep):
|
|
|
|
return cwd[len(rootsep):]
|
|
|
|
else:
|
|
|
|
# we're outside the repo. return an absolute path.
|
|
|
|
return cwd
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2007-06-09 06:49:12 +04:00
|
|
|
def pathto(self, f, cwd=None):
|
|
|
|
if cwd is None:
|
|
|
|
cwd = self.getcwd()
|
2007-06-18 22:24:34 +04:00
|
|
|
path = util.pathto(self._root, cwd, f)
|
2007-06-09 06:49:12 +04:00
|
|
|
if self._slash:
|
2008-01-09 15:30:13 +03:00
|
|
|
return util.normpath(path)
|
2007-06-09 06:49:12 +04:00
|
|
|
return path
|
2007-06-09 06:49:12 +04:00
|
|
|
|
2005-08-28 01:21:25 +04:00
|
|
|
def __getitem__(self, key):
|
2009-10-01 23:36:45 +04:00
|
|
|
'''Return the current state of key (a filename) in the dirstate.
|
2009-12-28 01:24:05 +03:00
|
|
|
|
2009-10-01 23:36:45 +04:00
|
|
|
States are:
|
|
|
|
n normal
|
|
|
|
m needs merging
|
|
|
|
r marked for removal
|
|
|
|
a marked for addition
|
|
|
|
? not tracked
|
|
|
|
'''
|
2007-07-22 01:02:09 +04:00
|
|
|
return self._map.get(key, ("?",))[0]
|
2005-08-28 01:21:25 +04:00
|
|
|
|
|
|
|
def __contains__(self, key):
|
2007-06-18 22:24:34 +04:00
|
|
|
return key in self._map
|
|
|
|
|
|
|
|
def __iter__(self):
|
2009-04-27 01:50:44 +04:00
|
|
|
for x in sorted(self._map):
|
2007-06-18 22:24:34 +04:00
|
|
|
yield x
|
2005-08-28 01:21:25 +04:00
|
|
|
|
|
|
|
def parents(self):
|
2010-11-22 21:43:31 +03:00
|
|
|
return [self._validate(p) for p in self._pl]
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2011-04-05 00:52:55 +04:00
|
|
|
def p1(self):
|
|
|
|
return self._validate(self._pl[0])
|
|
|
|
|
|
|
|
def p2(self):
|
|
|
|
return self._validate(self._pl[1])
|
|
|
|
|
2007-03-14 02:50:02 +03:00
|
|
|
def branch(self):
|
2010-11-25 00:56:32 +03:00
|
|
|
return encoding.tolocal(self._branch)
|
2007-03-14 02:50:02 +03:00
|
|
|
|
2005-08-28 01:21:25 +04:00
|
|
|
def setparents(self, p1, p2=nullid):
|
2007-07-22 01:44:38 +04:00
|
|
|
self._dirty = self._dirtypl = True
|
2007-06-18 22:24:34 +04:00
|
|
|
self._pl = p1, p2
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2007-03-14 02:50:02 +03:00
|
|
|
def setbranch(self, branch):
|
2010-02-11 17:02:48 +03:00
|
|
|
if branch in ['tip', '.', 'null']:
|
|
|
|
raise util.Abort(_('the name \'%s\' is reserved') % branch)
|
2010-11-25 00:56:32 +03:00
|
|
|
self._branch = encoding.fromlocal(branch)
|
2011-05-02 12:11:18 +04:00
|
|
|
self._opener.write("branch", self._branch + '\n')
|
2007-03-14 02:50:02 +03:00
|
|
|
|
2007-06-18 22:24:34 +04:00
|
|
|
def _read(self):
|
2007-06-18 22:24:34 +04:00
|
|
|
self._map = {}
|
|
|
|
self._copymap = {}
|
2007-06-18 22:24:33 +04:00
|
|
|
try:
|
2011-05-02 12:11:18 +04:00
|
|
|
st = self._opener.read("dirstate")
|
2007-06-18 22:24:33 +04:00
|
|
|
except IOError, err:
|
2010-01-25 09:05:27 +03:00
|
|
|
if err.errno != errno.ENOENT:
|
|
|
|
raise
|
2007-06-18 22:24:33 +04:00
|
|
|
return
|
|
|
|
if not st:
|
|
|
|
return
|
|
|
|
|
2008-10-18 18:30:50 +04:00
|
|
|
p = parsers.parse_dirstate(self._map, self._copymap, st)
|
2007-07-20 02:43:25 +04:00
|
|
|
if not self._dirtypl:
|
2008-10-13 00:21:08 +04:00
|
|
|
self._pl = p
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2007-06-18 22:24:34 +04:00
|
|
|
def invalidate(self):
|
2010-12-02 05:43:06 +03:00
|
|
|
for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
|
|
|
|
"_ignore"):
|
2007-07-20 02:43:25 +04:00
|
|
|
if a in self.__dict__:
|
|
|
|
delattr(self, a)
|
2011-03-23 13:22:29 +03:00
|
|
|
self._lastnormaltime = None
|
2007-07-22 01:02:09 +04:00
|
|
|
self._dirty = False
|
2007-04-24 23:02:51 +04:00
|
|
|
|
2005-08-28 01:21:25 +04:00
|
|
|
def copy(self, source, dest):
|
2009-12-28 01:24:05 +03:00
|
|
|
"""Mark dest as a copy of source. Unmark dest if source is None."""
|
2008-06-15 15:01:03 +04:00
|
|
|
if source == dest:
|
|
|
|
return
|
2007-07-22 01:02:09 +04:00
|
|
|
self._dirty = True
|
2009-01-04 23:32:40 +03:00
|
|
|
if source is not None:
|
|
|
|
self._copymap[dest] = source
|
|
|
|
elif dest in self._copymap:
|
|
|
|
del self._copymap[dest]
|
2005-08-28 01:21:25 +04:00
|
|
|
|
|
|
|
def copied(self, file):
|
2007-06-18 22:24:34 +04:00
|
|
|
return self._copymap.get(file, None)
|
2006-09-26 02:53:17 +04:00
|
|
|
|
|
|
|
def copies(self):
|
2007-06-18 22:24:34 +04:00
|
|
|
return self._copymap
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2008-07-12 03:46:02 +04:00
|
|
|
def _droppath(self, f):
|
|
|
|
if self[f] not in "?r" and "_dirs" in self.__dict__:
|
2008-10-15 03:14:29 +04:00
|
|
|
_decdirs(self._dirs, f)
|
2007-06-18 22:24:34 +04:00
|
|
|
|
2008-07-12 03:46:02 +04:00
|
|
|
def _addpath(self, f, check=False):
|
2007-11-05 20:05:44 +03:00
|
|
|
oldstate = self[f]
|
2008-07-12 03:46:02 +04:00
|
|
|
if check or oldstate == "r":
|
2011-04-21 15:18:52 +04:00
|
|
|
scmutil.checkfilename(f)
|
2008-07-12 03:46:02 +04:00
|
|
|
if f in self._dirs:
|
|
|
|
raise util.Abort(_('directory %r already in dirstate') % f)
|
|
|
|
# shadows
|
|
|
|
for d in _finddirs(f):
|
|
|
|
if d in self._dirs:
|
|
|
|
break
|
|
|
|
if d in self._map and self[d] != 'r':
|
|
|
|
raise util.Abort(
|
|
|
|
_('file %r in dirstate clashes with %r') % (d, f))
|
|
|
|
if oldstate in "?r" and "_dirs" in self.__dict__:
|
2008-10-15 03:14:29 +04:00
|
|
|
_incdirs(self._dirs, f)
|
2007-11-05 20:05:44 +03:00
|
|
|
|
2007-07-22 01:02:09 +04:00
|
|
|
def normal(self, f):
|
2009-12-28 01:24:05 +03:00
|
|
|
'''Mark a file normal and clean.'''
|
2007-07-22 01:02:09 +04:00
|
|
|
self._dirty = True
|
2008-07-12 03:46:02 +04:00
|
|
|
self._addpath(f)
|
2007-07-22 01:02:09 +04:00
|
|
|
s = os.lstat(self._join(f))
|
2011-03-24 20:39:54 +03:00
|
|
|
mtime = int(s.st_mtime)
|
|
|
|
self._map[f] = ('n', s.st_mode, s.st_size, mtime)
|
2008-01-20 16:39:25 +03:00
|
|
|
if f in self._copymap:
|
2007-07-22 01:02:09 +04:00
|
|
|
del self._copymap[f]
|
2011-03-25 17:03:53 +03:00
|
|
|
if mtime > self._lastnormaltime:
|
|
|
|
# Remember the most recent modification timeslot for status(),
|
2011-03-24 20:39:54 +03:00
|
|
|
# to make sure we won't miss future size-preserving file content
|
|
|
|
# modifications that happen within the same timeslot.
|
2011-03-25 17:03:53 +03:00
|
|
|
self._lastnormaltime = mtime
|
dirstate: avoid a race with multiple commits in the same process
(issue2264, issue2516)
The race happens when two commits in a row change the same file
without changing its size, *if* those two commits happen in the same
second in the same process while holding the same repo lock. For
example:
commit 1:
M a
M b
commit 2: # same process, same second, same repo lock
M b # modify b without changing its size
M c
This first manifested in transplant, which is the most common way to
do multiple commits in the same process. But it can manifest in any
script or extension that does multiple commits under the same repo
lock. (Thus, the test script tests both transplant and a custom script.)
The problem was that dirstate.status() failed to notice the change to
b when localrepo is about to do the second commit, meaning that change
gets left in the working directory. In the context of transplant, that
means either a crash ("RuntimeError: nothing committed after
transplant") or a silently inaccurate transplant, depending on whether
any other files were modified by the second transplanted changeset.
The fix is to make status() work a little harder when we have
previously marked files as clean (state 'normal') in the same process.
Specifically, dirstate.normal() adds files to self._lastnormal, and
other state-changing methods remove them. Then dirstate.status() puts
any files in self._lastnormal into state 'lookup', which will make
localrepository.status() read file contents to see if it has really
changed. So we pay a small performance penalty for the second (and
subsequent) commits in the same process, without affecting the common
case. Anything that does lots of status updates and checks in the
same process could suffer a performance hit.
Incidentally, there is a simpler fix: call dirstate.normallookup() on
every file updated by commit() at the end of the commit. The trouble
with that solution is that it imposes a performance penalty on the
common case: it means the next status-dependent hg command after every
"hg commit" will be a little bit slower. The patch here is more
complex, but only affects performance for the uncommon case.
2011-03-21 00:41:09 +03:00
|
|
|
|
merge: forcefully mark files that we get from the second parent as dirty
After a hg merge, we want to include in the commit all the files that we
got from the second parent, so that we have the correct file-level
history. To make them visible to hg commit, we try to mark them as dirty.
Unfortunately, right now we can't really mark them as dirty[1] - the
best we can do is to mark them as needing a full comparison of their
contents, but they will still be considered clean if they happen to be
identical to the version in the first parent.
This changeset extends the dirstate format in a compatible way, so that
we can mark a file as dirty:
Right now we use a negative file size to indicate we don't have valid
stat data for this entry. In practice, this size is always -1.
This patch uses -2 to indicate that the entry is dirty. Older versions
of hg won't choke on this dirstate, but they may happily mark the file
as clean after a full comparison, destroying all of our hard work.
The patch adds a dirstate.normallookup method with the semantics of the
current normaldirty, and changes normaldirty to forcefully mark the
entry as dirty.
This should fix issue522.
[1] - well, we could put them in state 'm', but that state has a
different meaning.
2007-08-23 08:48:29 +04:00
|
|
|
def normallookup(self, f):
|
2009-12-28 01:24:05 +03:00
|
|
|
'''Mark a file normal, but possibly dirty.'''
|
2008-03-18 10:07:39 +03:00
|
|
|
if self._pl[1] != nullid and f in self._map:
|
|
|
|
# if there is a merge going on and the file was either
|
2010-04-20 13:17:01 +04:00
|
|
|
# in state 'm' (-1) or coming from other parent (-2) before
|
|
|
|
# being removed, restore that state.
|
2008-03-18 10:07:39 +03:00
|
|
|
entry = self._map[f]
|
|
|
|
if entry[0] == 'r' and entry[2] in (-1, -2):
|
|
|
|
source = self._copymap.get(f)
|
|
|
|
if entry[2] == -1:
|
|
|
|
self.merge(f)
|
|
|
|
elif entry[2] == -2:
|
2010-04-20 13:17:01 +04:00
|
|
|
self.otherparent(f)
|
2008-03-18 10:07:39 +03:00
|
|
|
if source:
|
|
|
|
self.copy(source, f)
|
|
|
|
return
|
|
|
|
if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
|
|
|
|
return
|
2007-07-22 01:02:09 +04:00
|
|
|
self._dirty = True
|
2008-07-12 03:46:02 +04:00
|
|
|
self._addpath(f)
|
2008-10-13 00:21:08 +04:00
|
|
|
self._map[f] = ('n', 0, -1, -1)
|
merge: forcefully mark files that we get from the second parent as dirty
After a hg merge, we want to include in the commit all the files that we
got from the second parent, so that we have the correct file-level
history. To make them visible to hg commit, we try to mark them as dirty.
Unfortunately, right now we can't really mark them as dirty[1] - the
best we can do is to mark them as needing a full comparison of their
contents, but they will still be considered clean if they happen to be
identical to the version in the first parent.
This changeset extends the dirstate format in a compatible way, so that
we can mark a file as dirty:
Right now we use a negative file size to indicate we don't have valid
stat data for this entry. In practice, this size is always -1.
This patch uses -2 to indicate that the entry is dirty. Older versions
of hg won't choke on this dirstate, but they may happily mark the file
as clean after a full comparison, destroying all of our hard work.
The patch adds a dirstate.normallookup method with the semantics of the
current normaldirty, and changes normaldirty to forcefully mark the
entry as dirty.
This should fix issue522.
[1] - well, we could put them in state 'm', but that state has a
different meaning.
2007-08-23 08:48:29 +04:00
|
|
|
if f in self._copymap:
|
|
|
|
del self._copymap[f]
|
|
|
|
|
2010-04-20 13:17:01 +04:00
|
|
|
def otherparent(self, f):
|
|
|
|
'''Mark as coming from the other parent, always dirty.'''
|
|
|
|
if self._pl[1] == nullid:
|
|
|
|
raise util.Abort(_("setting %r to other parent "
|
|
|
|
"only allowed in merges") % f)
|
merge: forcefully mark files that we get from the second parent as dirty
After a hg merge, we want to include in the commit all the files that we
got from the second parent, so that we have the correct file-level
history. To make them visible to hg commit, we try to mark them as dirty.
Unfortunately, right now we can't really mark them as dirty[1] - the
best we can do is to mark them as needing a full comparison of their
contents, but they will still be considered clean if they happen to be
identical to the version in the first parent.
This changeset extends the dirstate format in a compatible way, so that
we can mark a file as dirty:
Right now we use a negative file size to indicate we don't have valid
stat data for this entry. In practice, this size is always -1.
This patch uses -2 to indicate that the entry is dirty. Older versions
of hg won't choke on this dirstate, but they may happily mark the file
as clean after a full comparison, destroying all of our hard work.
The patch adds a dirstate.normallookup method with the semantics of the
current normaldirty, and changes normaldirty to forcefully mark the
entry as dirty.
This should fix issue522.
[1] - well, we could put them in state 'm', but that state has a
different meaning.
2007-08-23 08:48:29 +04:00
|
|
|
self._dirty = True
|
2008-07-12 03:46:02 +04:00
|
|
|
self._addpath(f)
|
2008-10-13 00:21:08 +04:00
|
|
|
self._map[f] = ('n', 0, -2, -1)
|
2007-07-22 01:02:09 +04:00
|
|
|
if f in self._copymap:
|
|
|
|
del self._copymap[f]
|
|
|
|
|
|
|
|
def add(self, f):
|
2009-12-28 01:24:05 +03:00
|
|
|
'''Mark a file added.'''
|
2007-07-22 01:02:09 +04:00
|
|
|
self._dirty = True
|
2008-07-12 03:46:02 +04:00
|
|
|
self._addpath(f, True)
|
2008-10-13 00:21:08 +04:00
|
|
|
self._map[f] = ('a', 0, -1, -1)
|
2007-07-22 01:02:09 +04:00
|
|
|
if f in self._copymap:
|
|
|
|
del self._copymap[f]
|
|
|
|
|
|
|
|
def remove(self, f):
|
2009-12-28 01:24:05 +03:00
|
|
|
'''Mark a file removed.'''
|
2007-07-22 01:02:09 +04:00
|
|
|
self._dirty = True
|
2008-07-12 03:46:02 +04:00
|
|
|
self._droppath(f)
|
2008-03-18 10:07:39 +03:00
|
|
|
size = 0
|
|
|
|
if self._pl[1] != nullid and f in self._map:
|
2010-04-20 13:17:01 +04:00
|
|
|
# backup the previous state
|
2008-03-18 10:07:39 +03:00
|
|
|
entry = self._map[f]
|
2010-04-20 13:17:01 +04:00
|
|
|
if entry[0] == 'm': # merge
|
2008-03-18 10:07:39 +03:00
|
|
|
size = -1
|
2010-04-20 13:17:01 +04:00
|
|
|
elif entry[0] == 'n' and entry[2] == -2: # other parent
|
2008-03-18 10:07:39 +03:00
|
|
|
size = -2
|
2008-10-13 00:21:08 +04:00
|
|
|
self._map[f] = ('r', 0, size, 0)
|
2008-03-18 10:07:39 +03:00
|
|
|
if size == 0 and f in self._copymap:
|
2007-07-22 01:02:09 +04:00
|
|
|
del self._copymap[f]
|
|
|
|
|
|
|
|
def merge(self, f):
|
2009-12-28 01:24:05 +03:00
|
|
|
'''Mark a file merged.'''
|
2007-07-22 01:02:09 +04:00
|
|
|
self._dirty = True
|
2007-07-22 01:02:09 +04:00
|
|
|
s = os.lstat(self._join(f))
|
2008-07-12 03:46:02 +04:00
|
|
|
self._addpath(f)
|
2008-10-18 13:26:06 +04:00
|
|
|
self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
|
2007-07-22 01:02:09 +04:00
|
|
|
if f in self._copymap:
|
|
|
|
del self._copymap[f]
|
|
|
|
|
2011-05-27 02:15:35 +04:00
|
|
|
def drop(self, f):
|
|
|
|
'''Drop a file from the dirstate'''
|
2007-07-22 01:02:09 +04:00
|
|
|
self._dirty = True
|
2011-05-27 02:15:35 +04:00
|
|
|
self._droppath(f)
|
|
|
|
del self._map[f]
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2011-03-22 19:59:43 +03:00
|
|
|
def _normalize(self, path, isknown):
|
|
|
|
normed = os.path.normcase(path)
|
|
|
|
folded = self._foldmap.get(normed, None)
|
|
|
|
if folded is None:
|
|
|
|
if isknown or not os.path.lexists(os.path.join(self._root, path)):
|
|
|
|
folded = path
|
2008-10-01 01:23:08 +04:00
|
|
|
else:
|
2011-03-22 19:59:43 +03:00
|
|
|
folded = self._foldmap.setdefault(normed,
|
2008-10-01 01:23:08 +04:00
|
|
|
util.fspath(path, self._root))
|
2011-03-22 19:59:43 +03:00
|
|
|
return folded
|
|
|
|
|
|
|
|
def normalize(self, path, isknown=False):
|
|
|
|
'''
|
|
|
|
normalize the case of a pathname when on a casefolding filesystem
|
|
|
|
|
|
|
|
isknown specifies whether the filename came from walking the
|
|
|
|
disk, to avoid extra filesystem access
|
|
|
|
|
|
|
|
The normalized case is determined based on the following precedence:
|
|
|
|
|
|
|
|
- version of name already stored in the dirstate
|
|
|
|
- version of name stored on disk
|
|
|
|
- version provided via command arguments
|
|
|
|
'''
|
|
|
|
|
|
|
|
if self._checkcase:
|
|
|
|
return self._normalize(path, isknown)
|
|
|
|
return path
|
2008-06-06 22:23:29 +04:00
|
|
|
|
2007-08-06 06:04:56 +04:00
|
|
|
def clear(self):
|
|
|
|
self._map = {}
|
2007-11-05 20:05:44 +03:00
|
|
|
if "_dirs" in self.__dict__:
|
2010-02-08 17:36:34 +03:00
|
|
|
delattr(self, "_dirs")
|
2007-08-06 06:04:56 +04:00
|
|
|
self._copymap = {}
|
|
|
|
self._pl = [nullid, nullid]
|
2011-03-23 13:22:29 +03:00
|
|
|
self._lastnormaltime = None
|
2007-08-06 08:00:10 +04:00
|
|
|
self._dirty = True
|
2007-08-06 06:04:56 +04:00
|
|
|
|
2006-02-20 21:04:56 +03:00
|
|
|
def rebuild(self, parent, files):
|
2007-08-06 06:04:56 +04:00
|
|
|
self.clear()
|
2006-07-16 12:14:17 +04:00
|
|
|
for f in files:
|
2008-06-26 23:35:50 +04:00
|
|
|
if 'x' in files.flags(f):
|
2008-10-13 00:21:08 +04:00
|
|
|
self._map[f] = ('n', 0777, -1, 0)
|
2006-02-20 21:04:56 +03:00
|
|
|
else:
|
2008-10-13 00:21:08 +04:00
|
|
|
self._map[f] = ('n', 0666, -1, 0)
|
2007-06-18 22:24:34 +04:00
|
|
|
self._pl = (parent, nullid)
|
2007-07-22 01:02:09 +04:00
|
|
|
self._dirty = True
|
2005-08-28 01:21:25 +04:00
|
|
|
|
|
|
|
def write(self):
|
2007-06-18 22:24:34 +04:00
|
|
|
if not self._dirty:
|
2006-02-23 04:17:08 +03:00
|
|
|
return
|
2008-03-19 23:55:21 +03:00
|
|
|
st = self._opener("dirstate", "w", atomictemp=True)
|
2008-03-20 21:53:59 +03:00
|
|
|
|
2009-10-01 19:17:52 +04:00
|
|
|
# use the modification time of the newly created temporary file as the
|
|
|
|
# filesystem's notion of 'now'
|
|
|
|
now = int(util.fstat(st).st_mtime)
|
2010-04-06 13:49:42 +04:00
|
|
|
|
|
|
|
cs = cStringIO.StringIO()
|
|
|
|
copymap = self._copymap
|
|
|
|
pack = struct.pack
|
|
|
|
write = cs.write
|
|
|
|
write("".join(self._pl))
|
|
|
|
for f, e in self._map.iteritems():
|
2009-10-01 19:17:52 +04:00
|
|
|
if e[0] == 'n' and e[3] == now:
|
|
|
|
# The file was last modified "simultaneously" with the current
|
|
|
|
# write to dirstate (i.e. within the same second for file-
|
|
|
|
# systems with a granularity of 1 sec). This commonly happens
|
|
|
|
# for at least a couple of files on 'update'.
|
|
|
|
# The user could change the file without changing its size
|
|
|
|
# within the same second. Invalidate the file's stat data in
|
|
|
|
# dirstate, forcing future 'status' calls to compare the
|
|
|
|
# contents of the file. This prevents mistakenly treating such
|
|
|
|
# files as clean.
|
2010-04-06 13:49:42 +04:00
|
|
|
e = (e[0], 0, -1, -1) # mark entry as 'unset'
|
|
|
|
self._map[f] = e
|
2009-10-01 19:17:52 +04:00
|
|
|
|
2010-04-05 20:13:20 +04:00
|
|
|
if f in copymap:
|
|
|
|
f = "%s\0%s" % (f, copymap[f])
|
2007-09-24 21:41:54 +04:00
|
|
|
e = pack(_format, e[0], e[1], e[2], e[3], len(f))
|
|
|
|
write(e)
|
|
|
|
write(f)
|
2007-04-24 23:02:42 +04:00
|
|
|
st.write(cs.getvalue())
|
2007-06-06 02:55:27 +04:00
|
|
|
st.rename()
|
2011-03-23 13:22:29 +03:00
|
|
|
self._lastnormaltime = None
|
2007-07-22 01:44:38 +04:00
|
|
|
self._dirty = self._dirtypl = False
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2008-02-08 23:07:55 +03:00
|
|
|
def _dirignore(self, f):
|
2008-04-05 20:15:04 +04:00
|
|
|
if f == '.':
|
|
|
|
return False
|
2008-02-08 23:07:55 +03:00
|
|
|
if self._ignore(f):
|
|
|
|
return True
|
2008-07-12 03:46:02 +04:00
|
|
|
for p in _finddirs(f):
|
|
|
|
if self._ignore(p):
|
2008-02-08 23:07:55 +03:00
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
2010-01-01 02:19:30 +03:00
|
|
|
def walk(self, match, subrepos, unknown, ignored):
|
2006-10-27 08:54:24 +04:00
|
|
|
'''
|
2009-10-01 23:36:45 +04:00
|
|
|
Walk recursively through the directory tree, finding all files
|
|
|
|
matched by match.
|
2006-10-27 08:54:24 +04:00
|
|
|
|
2009-10-01 23:36:45 +04:00
|
|
|
Return a dict mapping filename to stat-like object (either
|
|
|
|
mercurial.osutil.stat instance or return value of os.stat()).
|
2006-10-27 08:54:24 +04:00
|
|
|
'''
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2008-05-12 20:37:07 +04:00
|
|
|
def fwarn(f, msg):
|
2008-09-11 00:54:28 +04:00
|
|
|
self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
|
2008-05-12 20:37:07 +04:00
|
|
|
return False
|
|
|
|
|
2009-06-01 02:54:18 +04:00
|
|
|
def badtype(mode):
|
2009-05-08 09:54:00 +04:00
|
|
|
kind = _('unknown')
|
2010-01-25 09:05:27 +03:00
|
|
|
if stat.S_ISCHR(mode):
|
|
|
|
kind = _('character device')
|
|
|
|
elif stat.S_ISBLK(mode):
|
|
|
|
kind = _('block device')
|
|
|
|
elif stat.S_ISFIFO(mode):
|
|
|
|
kind = _('fifo')
|
|
|
|
elif stat.S_ISSOCK(mode):
|
|
|
|
kind = _('socket')
|
|
|
|
elif stat.S_ISDIR(mode):
|
|
|
|
kind = _('directory')
|
2009-06-01 02:54:18 +04:00
|
|
|
return _('unsupported file type (type is %s)') % kind
|
2008-07-22 22:03:23 +04:00
|
|
|
|
2007-06-18 22:24:34 +04:00
|
|
|
ignore = self._ignore
|
2008-02-08 23:07:55 +03:00
|
|
|
dirignore = self._dirignore
|
2007-03-11 05:00:54 +03:00
|
|
|
if ignored:
|
|
|
|
ignore = util.never
|
2008-02-08 23:07:55 +03:00
|
|
|
dirignore = util.never
|
2008-09-02 17:08:26 +04:00
|
|
|
elif not unknown:
|
|
|
|
# if unknown and ignored are False, skip step 2
|
|
|
|
ignore = util.always
|
|
|
|
dirignore = util.always
|
2006-10-27 21:09:33 +04:00
|
|
|
|
2008-07-22 22:03:31 +04:00
|
|
|
matchfn = match.matchfn
|
2009-06-01 02:54:18 +04:00
|
|
|
badfn = match.bad
|
2008-07-22 22:03:24 +04:00
|
|
|
dmap = self._map
|
2007-07-26 21:02:58 +04:00
|
|
|
normpath = util.normpath
|
2007-10-06 02:01:06 +04:00
|
|
|
listdir = osutil.listdir
|
2007-07-26 21:02:58 +04:00
|
|
|
lstat = os.lstat
|
2008-07-22 22:03:23 +04:00
|
|
|
getkind = stat.S_IFMT
|
2008-07-22 22:03:20 +04:00
|
|
|
dirkind = stat.S_IFDIR
|
2008-07-22 22:03:23 +04:00
|
|
|
regkind = stat.S_IFREG
|
|
|
|
lnkkind = stat.S_IFLNK
|
2008-07-22 22:03:24 +04:00
|
|
|
join = self._join
|
2008-07-22 22:03:10 +04:00
|
|
|
work = []
|
|
|
|
wadd = work.append
|
|
|
|
|
2009-06-01 02:54:18 +04:00
|
|
|
exact = skipstep3 = False
|
|
|
|
if matchfn == match.exact: # match.exact
|
2009-06-01 02:54:18 +04:00
|
|
|
exact = True
|
2009-06-01 02:54:18 +04:00
|
|
|
dirignore = util.always # skip step 2
|
|
|
|
elif match.files() and not match.anypats(): # match.match, no patterns
|
|
|
|
skipstep3 = True
|
2009-05-14 21:54:26 +04:00
|
|
|
|
2010-11-01 22:18:42 +03:00
|
|
|
if self._checkcase:
|
|
|
|
normalize = self._normalize
|
|
|
|
skipstep3 = False
|
|
|
|
else:
|
|
|
|
normalize = lambda x, y: x
|
|
|
|
|
2010-09-11 01:53:51 +04:00
|
|
|
files = sorted(match.files())
|
|
|
|
subrepos.sort()
|
|
|
|
i, j = 0, 0
|
|
|
|
while i < len(files) and j < len(subrepos):
|
|
|
|
subpath = subrepos[j] + "/"
|
2011-01-04 14:53:11 +03:00
|
|
|
if files[i] < subpath:
|
2010-09-11 01:53:51 +04:00
|
|
|
i += 1
|
|
|
|
continue
|
2011-02-04 11:05:23 +03:00
|
|
|
while i < len(files) and files[i].startswith(subpath):
|
2010-09-11 01:53:51 +04:00
|
|
|
del files[i]
|
|
|
|
j += 1
|
|
|
|
|
2008-07-22 22:03:24 +04:00
|
|
|
if not files or '.' in files:
|
|
|
|
files = ['']
|
2010-01-01 02:19:30 +03:00
|
|
|
results = dict.fromkeys(subrepos)
|
|
|
|
results['.hg'] = None
|
2007-07-26 21:02:58 +04:00
|
|
|
|
2008-07-22 22:03:18 +04:00
|
|
|
# step 1: find all explicit files
|
2010-09-11 01:53:51 +04:00
|
|
|
for ff in files:
|
2009-06-14 00:42:16 +04:00
|
|
|
nf = normalize(normpath(ff), False)
|
2008-07-22 22:03:21 +04:00
|
|
|
if nf in results:
|
2008-07-22 22:03:10 +04:00
|
|
|
continue
|
|
|
|
|
|
|
|
try:
|
2008-07-22 22:03:24 +04:00
|
|
|
st = lstat(join(nf))
|
2008-07-22 22:03:23 +04:00
|
|
|
kind = getkind(st.st_mode)
|
|
|
|
if kind == dirkind:
|
2009-06-01 02:54:18 +04:00
|
|
|
skipstep3 = False
|
2009-05-14 12:50:45 +04:00
|
|
|
if nf in dmap:
|
2009-05-28 01:39:41 +04:00
|
|
|
#file deleted on disk but still in dirstate
|
2009-05-14 12:50:45 +04:00
|
|
|
results[nf] = None
|
2009-06-02 05:25:01 +04:00
|
|
|
match.dir(nf)
|
2008-07-22 22:03:20 +04:00
|
|
|
if not dirignore(nf):
|
|
|
|
wadd(nf)
|
2010-09-24 21:46:54 +04:00
|
|
|
elif kind == regkind or kind == lnkkind:
|
2008-07-22 22:03:23 +04:00
|
|
|
results[nf] = st
|
2008-07-22 22:03:20 +04:00
|
|
|
else:
|
2009-06-01 02:54:18 +04:00
|
|
|
badfn(ff, badtype(kind))
|
2008-07-22 22:03:23 +04:00
|
|
|
if nf in dmap:
|
2008-07-22 22:03:21 +04:00
|
|
|
results[nf] = None
|
2008-07-22 22:03:10 +04:00
|
|
|
except OSError, inst:
|
2009-06-01 02:54:18 +04:00
|
|
|
if nf in dmap: # does it exactly match a file?
|
|
|
|
results[nf] = None
|
|
|
|
else: # does it match a directory?
|
|
|
|
prefix = nf + "/"
|
|
|
|
for fn in dmap:
|
|
|
|
if fn.startswith(prefix):
|
2009-06-02 05:25:01 +04:00
|
|
|
match.dir(nf)
|
2009-06-01 02:54:18 +04:00
|
|
|
skipstep3 = False
|
2009-06-01 02:54:18 +04:00
|
|
|
break
|
2009-06-01 02:54:18 +04:00
|
|
|
else:
|
2009-06-01 02:54:18 +04:00
|
|
|
badfn(ff, inst.strerror)
|
2008-07-22 22:03:10 +04:00
|
|
|
|
2008-07-22 22:03:18 +04:00
|
|
|
# step 2: visit subdirectories
|
|
|
|
while work:
|
|
|
|
nd = work.pop()
|
2008-10-15 16:52:27 +04:00
|
|
|
skip = None
|
2008-07-22 22:03:18 +04:00
|
|
|
if nd == '.':
|
|
|
|
nd = ''
|
|
|
|
else:
|
2008-10-15 16:52:27 +04:00
|
|
|
skip = '.hg'
|
|
|
|
try:
|
|
|
|
entries = listdir(join(nd), stat=True, skip=skip)
|
|
|
|
except OSError, inst:
|
|
|
|
if inst.errno == errno.EACCES:
|
|
|
|
fwarn(nd, inst.strerror)
|
|
|
|
continue
|
|
|
|
raise
|
2008-07-22 22:03:18 +04:00
|
|
|
for f, kind, st in entries:
|
2008-10-01 01:23:08 +04:00
|
|
|
nf = normalize(nd and (nd + "/" + f) or f, True)
|
2008-07-22 22:03:21 +04:00
|
|
|
if nf not in results:
|
2008-07-22 22:03:20 +04:00
|
|
|
if kind == dirkind:
|
|
|
|
if not ignore(nf):
|
2009-06-02 05:25:01 +04:00
|
|
|
match.dir(nf)
|
2008-07-22 22:03:20 +04:00
|
|
|
wadd(nf)
|
2008-07-22 22:03:31 +04:00
|
|
|
if nf in dmap and matchfn(nf):
|
2008-07-22 22:03:21 +04:00
|
|
|
results[nf] = None
|
2008-07-22 22:03:25 +04:00
|
|
|
elif kind == regkind or kind == lnkkind:
|
|
|
|
if nf in dmap:
|
2008-07-22 22:03:31 +04:00
|
|
|
if matchfn(nf):
|
2008-07-22 22:03:25 +04:00
|
|
|
results[nf] = st
|
2008-07-22 22:03:31 +04:00
|
|
|
elif matchfn(nf) and not ignore(nf):
|
2008-07-22 22:03:21 +04:00
|
|
|
results[nf] = st
|
2008-07-22 22:03:31 +04:00
|
|
|
elif nf in dmap and matchfn(nf):
|
2008-07-22 22:03:25 +04:00
|
|
|
results[nf] = None
|
2008-07-22 22:03:18 +04:00
|
|
|
|
|
|
|
# step 3: report unseen items in the dmap hash
|
2009-06-01 02:54:18 +04:00
|
|
|
if not skipstep3 and not exact:
|
2009-05-14 21:54:26 +04:00
|
|
|
visit = sorted([f for f in dmap if f not in results and matchfn(f)])
|
|
|
|
for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
|
|
|
|
if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
|
|
|
|
st = None
|
|
|
|
results[nf] = st
|
2010-01-01 02:19:30 +03:00
|
|
|
for s in subrepos:
|
|
|
|
del results[s]
|
2008-07-22 22:03:24 +04:00
|
|
|
del results['.hg']
|
2008-07-22 22:03:21 +04:00
|
|
|
return results
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2010-01-01 02:19:30 +03:00
|
|
|
def status(self, match, subrepos, ignored, clean, unknown):
|
2009-10-01 23:36:45 +04:00
|
|
|
'''Determine the status of the working copy relative to the
|
|
|
|
dirstate and return a tuple of lists (unsure, modified, added,
|
|
|
|
removed, deleted, unknown, ignored, clean), where:
|
|
|
|
|
|
|
|
unsure:
|
|
|
|
files that might have been modified since the dirstate was
|
|
|
|
written, but need to be read to be sure (size is the same
|
|
|
|
but mtime differs)
|
|
|
|
modified:
|
|
|
|
files that have definitely been modified since the dirstate
|
|
|
|
was written (different size or mode)
|
|
|
|
added:
|
|
|
|
files that have been explicitly added with hg add
|
|
|
|
removed:
|
|
|
|
files that have been explicitly removed with hg remove
|
|
|
|
deleted:
|
|
|
|
files that have been deleted through other means ("missing")
|
|
|
|
unknown:
|
|
|
|
files not in the dirstate that are not ignored
|
|
|
|
ignored:
|
|
|
|
files not in the dirstate that are ignored
|
|
|
|
(by _dirignore())
|
|
|
|
clean:
|
|
|
|
files that have definitely not been modified since the
|
|
|
|
dirstate was written
|
|
|
|
'''
|
2008-06-26 23:35:50 +04:00
|
|
|
listignored, listclean, listunknown = ignored, clean, unknown
|
2006-03-30 00:58:34 +04:00
|
|
|
lookup, modified, added, unknown, ignored = [], [], [], [], []
|
2006-07-21 03:21:07 +04:00
|
|
|
removed, deleted, clean = [], [], []
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2007-07-26 21:02:58 +04:00
|
|
|
dmap = self._map
|
2009-10-01 23:36:45 +04:00
|
|
|
ladd = lookup.append # aka "unsure"
|
2007-07-26 21:02:58 +04:00
|
|
|
madd = modified.append
|
|
|
|
aadd = added.append
|
|
|
|
uadd = unknown.append
|
|
|
|
iadd = ignored.append
|
|
|
|
radd = removed.append
|
|
|
|
dadd = deleted.append
|
|
|
|
cadd = clean.append
|
|
|
|
|
2010-08-09 17:31:56 +04:00
|
|
|
lnkkind = stat.S_IFLNK
|
|
|
|
|
2010-01-01 02:19:30 +03:00
|
|
|
for fn, st in self.walk(match, subrepos, listunknown,
|
|
|
|
listignored).iteritems():
|
2008-05-12 20:37:08 +04:00
|
|
|
if fn not in dmap:
|
2008-06-26 23:35:50 +04:00
|
|
|
if (listignored or match.exact(fn)) and self._dirignore(fn):
|
|
|
|
if listignored:
|
2008-02-08 23:07:55 +03:00
|
|
|
iadd(fn)
|
2008-06-26 23:35:50 +04:00
|
|
|
elif listunknown:
|
2007-07-26 21:02:58 +04:00
|
|
|
uadd(fn)
|
2005-10-28 00:29:35 +04:00
|
|
|
continue
|
2008-05-12 20:37:08 +04:00
|
|
|
|
2008-10-13 00:21:08 +04:00
|
|
|
state, mode, size, time = dmap[fn]
|
2008-05-12 20:37:08 +04:00
|
|
|
|
2008-07-22 22:02:36 +04:00
|
|
|
if not st and state in "nma":
|
|
|
|
dadd(fn)
|
|
|
|
elif state == 'n':
|
2010-08-09 17:31:56 +04:00
|
|
|
# The "mode & lnkkind != lnkkind or self._checklink"
|
|
|
|
# lines are an expansion of "islink => checklink"
|
|
|
|
# where islink means "is this a link?" and checklink
|
|
|
|
# means "can we check links?".
|
2011-03-25 17:03:53 +03:00
|
|
|
mtime = int(st.st_mtime)
|
2008-03-14 15:56:58 +03:00
|
|
|
if (size >= 0 and
|
|
|
|
(size != st.st_size
|
|
|
|
or ((mode ^ st.st_mode) & 0100 and self._checkexec))
|
2010-08-09 17:31:56 +04:00
|
|
|
and (mode & lnkkind != lnkkind or self._checklink)
|
2010-04-20 13:17:01 +04:00
|
|
|
or size == -2 # other parent
|
2007-06-22 06:42:06 +04:00
|
|
|
or fn in self._copymap):
|
2007-07-26 21:02:58 +04:00
|
|
|
madd(fn)
|
2011-03-25 17:03:53 +03:00
|
|
|
elif (mtime != time
|
2010-08-09 17:31:56 +04:00
|
|
|
and (mode & lnkkind != lnkkind or self._checklink)):
|
2007-07-26 21:02:58 +04:00
|
|
|
ladd(fn)
|
2011-03-25 17:03:53 +03:00
|
|
|
elif mtime == self._lastnormaltime:
|
|
|
|
# fn may have been changed in the same timeslot without
|
|
|
|
# changing its size. This can happen if we quickly do
|
|
|
|
# multiple commits in a single transaction.
|
|
|
|
# Force lookup, so we don't miss such a racy file change.
|
dirstate: avoid a race with multiple commits in the same process
(issue2264, issue2516)
The race happens when two commits in a row change the same file
without changing its size, *if* those two commits happen in the same
second in the same process while holding the same repo lock. For
example:
commit 1:
M a
M b
commit 2: # same process, same second, same repo lock
M b # modify b without changing its size
M c
This first manifested in transplant, which is the most common way to
do multiple commits in the same process. But it can manifest in any
script or extension that does multiple commits under the same repo
lock. (Thus, the test script tests both transplant and a custom script.)
The problem was that dirstate.status() failed to notice the change to
b when localrepo is about to do the second commit, meaning that change
gets left in the working directory. In the context of transplant, that
means either a crash ("RuntimeError: nothing committed after
transplant") or a silently inaccurate transplant, depending on whether
any other files were modified by the second transplanted changeset.
The fix is to make status() work a little harder when we have
previously marked files as clean (state 'normal') in the same process.
Specifically, dirstate.normal() adds files to self._lastnormal, and
other state-changing methods remove them. Then dirstate.status() puts
any files in self._lastnormal into state 'lookup', which will make
localrepository.status() read file contents to see if it has really
changed. So we pay a small performance penalty for the second (and
subsequent) commits in the same process, without affecting the common
case. Anything that does lots of status updates and checks in the
same process could suffer a performance hit.
Incidentally, there is a simpler fix: call dirstate.normallookup() on
every file updated by commit() at the end of the commit. The trouble
with that solution is that it imposes a performance penalty on the
common case: it means the next status-dependent hg command after every
"hg commit" will be a little bit slower. The patch here is more
complex, but only affects performance for the uncommon case.
2011-03-21 00:41:09 +03:00
|
|
|
ladd(fn)
|
2008-06-26 23:35:50 +04:00
|
|
|
elif listclean:
|
2007-07-26 21:02:58 +04:00
|
|
|
cadd(fn)
|
2008-05-12 20:37:08 +04:00
|
|
|
elif state == 'm':
|
2007-07-26 21:02:58 +04:00
|
|
|
madd(fn)
|
2008-05-12 20:37:08 +04:00
|
|
|
elif state == 'a':
|
2007-07-26 21:02:58 +04:00
|
|
|
aadd(fn)
|
2008-05-12 20:37:08 +04:00
|
|
|
elif state == 'r':
|
2007-07-26 21:02:58 +04:00
|
|
|
radd(fn)
|
2005-10-28 00:29:35 +04:00
|
|
|
|
2006-07-21 03:21:07 +04:00
|
|
|
return (lookup, modified, added, removed, deleted, unknown, ignored,
|
|
|
|
clean)
|