2005-08-28 01:21:25 +04:00
|
|
|
"""
|
|
|
|
dirstate.py - working directory tracking for mercurial
|
|
|
|
|
2007-06-19 10:51:34 +04:00
|
|
|
Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
|
2005-08-28 01:21:25 +04:00
|
|
|
|
|
|
|
This software may be used and distributed according to the terms
|
|
|
|
of the GNU General Public License, incorporated herein by reference.
|
|
|
|
"""
|
|
|
|
|
2008-03-07 00:23:26 +03:00
|
|
|
from node import nullid
|
2006-12-15 05:25:19 +03:00
|
|
|
from i18n import _
|
2008-03-07 00:23:41 +03:00
|
|
|
import struct, os, bisect, stat, strutil, util, errno, ignore
|
2008-03-19 23:55:21 +03:00
|
|
|
import cStringIO, osutil, sys
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2007-06-18 22:24:34 +04:00
|
|
|
_unknown = ('?', 0, 0, 0)
|
|
|
|
_format = ">cllll"
|
|
|
|
|
2005-11-19 09:48:47 +03:00
|
|
|
class dirstate(object):
|
2006-06-04 04:25:27 +04:00
|
|
|
|
2005-08-28 01:21:25 +04:00
|
|
|
def __init__(self, opener, ui, root):
|
2007-06-18 22:24:34 +04:00
|
|
|
self._opener = opener
|
|
|
|
self._root = root
|
2007-07-22 01:02:09 +04:00
|
|
|
self._dirty = False
|
2007-07-22 01:44:38 +04:00
|
|
|
self._dirtypl = False
|
2007-06-18 22:24:34 +04:00
|
|
|
self._ui = ui
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2007-06-18 22:24:33 +04:00
|
|
|
def __getattr__(self, name):
|
2007-06-18 22:24:34 +04:00
|
|
|
if name == '_map':
|
2007-06-18 22:24:34 +04:00
|
|
|
self._read()
|
2007-06-18 22:24:34 +04:00
|
|
|
return self._map
|
|
|
|
elif name == '_copymap':
|
2007-06-18 22:24:34 +04:00
|
|
|
self._read()
|
2007-06-18 22:24:34 +04:00
|
|
|
return self._copymap
|
2008-06-06 22:23:29 +04:00
|
|
|
elif name == '_foldmap':
|
|
|
|
_foldmap = {}
|
|
|
|
for name in self._map:
|
|
|
|
norm = os.path.normcase(os.path.normpath(name))
|
|
|
|
_foldmap[norm] = name
|
|
|
|
self._foldmap = _foldmap
|
|
|
|
return self._foldmap
|
2007-06-18 22:24:33 +04:00
|
|
|
elif name == '_branch':
|
|
|
|
try:
|
2007-06-19 10:06:37 +04:00
|
|
|
self._branch = (self._opener("branch").read().strip()
|
|
|
|
or "default")
|
2007-06-18 22:24:33 +04:00
|
|
|
except IOError:
|
|
|
|
self._branch = "default"
|
|
|
|
return self._branch
|
2007-06-18 22:24:34 +04:00
|
|
|
elif name == '_pl':
|
|
|
|
self._pl = [nullid, nullid]
|
2007-06-18 22:24:33 +04:00
|
|
|
try:
|
2007-06-18 22:24:34 +04:00
|
|
|
st = self._opener("dirstate").read(40)
|
2007-06-18 22:24:33 +04:00
|
|
|
if len(st) == 40:
|
2007-06-18 22:24:34 +04:00
|
|
|
self._pl = st[:20], st[20:40]
|
2007-06-18 22:24:33 +04:00
|
|
|
except IOError, err:
|
|
|
|
if err.errno != errno.ENOENT: raise
|
2007-06-18 22:24:34 +04:00
|
|
|
return self._pl
|
2007-06-18 22:24:34 +04:00
|
|
|
elif name == '_dirs':
|
|
|
|
self._dirs = {}
|
2007-06-18 22:24:34 +04:00
|
|
|
for f in self._map:
|
2007-11-05 20:05:44 +03:00
|
|
|
if self[f] != 'r':
|
|
|
|
self._incpath(f)
|
2007-06-18 22:24:34 +04:00
|
|
|
return self._dirs
|
2007-06-18 22:24:34 +04:00
|
|
|
elif name == '_ignore':
|
2007-07-22 01:02:09 +04:00
|
|
|
files = [self._join('.hgignore')]
|
2007-06-18 22:24:34 +04:00
|
|
|
for name, path in self._ui.configitems("ui"):
|
|
|
|
if name == 'ignore' or name.startswith('ignore.'):
|
|
|
|
files.append(os.path.expanduser(path))
|
2007-06-18 22:24:34 +04:00
|
|
|
self._ignore = ignore.ignore(self._root, files, self._ui.warn)
|
2007-06-18 22:24:34 +04:00
|
|
|
return self._ignore
|
2007-06-18 22:24:34 +04:00
|
|
|
elif name == '_slash':
|
2007-06-18 22:24:34 +04:00
|
|
|
self._slash = self._ui.configbool('ui', 'slash') and os.sep != '/'
|
2007-06-18 22:24:34 +04:00
|
|
|
return self._slash
|
2008-06-26 22:46:34 +04:00
|
|
|
elif name == '_checklink':
|
|
|
|
self._checklink = util.checklink(self._root)
|
|
|
|
return self._checklink
|
2008-03-14 15:56:58 +03:00
|
|
|
elif name == '_checkexec':
|
|
|
|
self._checkexec = util.checkexec(self._root)
|
|
|
|
return self._checkexec
|
2008-06-26 22:58:24 +04:00
|
|
|
elif name == '_checkcase':
|
|
|
|
self._checkcase = not util.checkcase(self._join('.hg'))
|
|
|
|
return self._checkcase
|
2008-06-06 22:23:29 +04:00
|
|
|
elif name == 'normalize':
|
2008-06-26 22:58:24 +04:00
|
|
|
if self._checkcase:
|
2008-06-06 22:23:29 +04:00
|
|
|
self.normalize = self._normalize
|
|
|
|
else:
|
|
|
|
self.normalize = lambda x: x
|
|
|
|
return self.normalize
|
2007-06-18 22:24:33 +04:00
|
|
|
else:
|
|
|
|
raise AttributeError, name
|
|
|
|
|
2007-07-22 01:02:09 +04:00
|
|
|
def _join(self, f):
|
2007-06-18 22:24:34 +04:00
|
|
|
return os.path.join(self._root, f)
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2008-06-26 22:46:34 +04:00
|
|
|
def flagfunc(self, fallback):
|
|
|
|
if self._checklink:
|
|
|
|
if self._checkexec:
|
|
|
|
def f(x):
|
|
|
|
p = os.path.join(self._root, x)
|
|
|
|
if os.path.islink(p):
|
|
|
|
return 'l'
|
|
|
|
if util.is_exec(p):
|
|
|
|
return 'x'
|
|
|
|
return ''
|
|
|
|
return f
|
|
|
|
def f(x):
|
|
|
|
if os.path.islink(os.path.join(self._root, x)):
|
|
|
|
return 'l'
|
|
|
|
if 'x' in fallback(x):
|
|
|
|
return 'x'
|
|
|
|
return ''
|
|
|
|
return f
|
|
|
|
if self._checkexec:
|
|
|
|
def f(x):
|
|
|
|
if 'l' in fallback(x):
|
|
|
|
return 'l'
|
|
|
|
if util.is_exec(os.path.join(self._root, x)):
|
|
|
|
return 'x'
|
|
|
|
return ''
|
|
|
|
return f
|
|
|
|
return fallback
|
|
|
|
|
2005-08-28 01:21:25 +04:00
|
|
|
def getcwd(self):
|
|
|
|
cwd = os.getcwd()
|
2007-06-18 22:24:34 +04:00
|
|
|
if cwd == self._root: return ''
|
|
|
|
# self._root ends with a path separator if self._root is '/' or 'C:\'
|
|
|
|
rootsep = self._root
|
2008-01-09 15:30:35 +03:00
|
|
|
if not util.endswithsep(rootsep):
|
2007-03-16 06:22:58 +03:00
|
|
|
rootsep += os.sep
|
|
|
|
if cwd.startswith(rootsep):
|
|
|
|
return cwd[len(rootsep):]
|
|
|
|
else:
|
|
|
|
# we're outside the repo. return an absolute path.
|
|
|
|
return cwd
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2007-06-09 06:49:12 +04:00
|
|
|
def pathto(self, f, cwd=None):
|
|
|
|
if cwd is None:
|
|
|
|
cwd = self.getcwd()
|
2007-06-18 22:24:34 +04:00
|
|
|
path = util.pathto(self._root, cwd, f)
|
2007-06-09 06:49:12 +04:00
|
|
|
if self._slash:
|
2008-01-09 15:30:13 +03:00
|
|
|
return util.normpath(path)
|
2007-06-09 06:49:12 +04:00
|
|
|
return path
|
2007-06-09 06:49:12 +04:00
|
|
|
|
2005-08-28 01:21:25 +04:00
|
|
|
def __getitem__(self, key):
|
2007-07-22 01:02:09 +04:00
|
|
|
''' current states:
|
|
|
|
n normal
|
|
|
|
m needs merging
|
|
|
|
r marked for removal
|
|
|
|
a marked for addition
|
|
|
|
? not tracked'''
|
|
|
|
return self._map.get(key, ("?",))[0]
|
2005-08-28 01:21:25 +04:00
|
|
|
|
|
|
|
def __contains__(self, key):
|
2007-06-18 22:24:34 +04:00
|
|
|
return key in self._map
|
|
|
|
|
|
|
|
def __iter__(self):
|
|
|
|
a = self._map.keys()
|
|
|
|
a.sort()
|
|
|
|
for x in a:
|
|
|
|
yield x
|
2005-08-28 01:21:25 +04:00
|
|
|
|
|
|
|
def parents(self):
|
2007-06-18 22:24:34 +04:00
|
|
|
return self._pl
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2007-03-14 02:50:02 +03:00
|
|
|
def branch(self):
|
|
|
|
return self._branch
|
|
|
|
|
2005-08-28 01:21:25 +04:00
|
|
|
def setparents(self, p1, p2=nullid):
|
2007-07-22 01:44:38 +04:00
|
|
|
self._dirty = self._dirtypl = True
|
2007-06-18 22:24:34 +04:00
|
|
|
self._pl = p1, p2
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2007-03-14 02:50:02 +03:00
|
|
|
def setbranch(self, branch):
|
|
|
|
self._branch = branch
|
2007-06-18 22:24:34 +04:00
|
|
|
self._opener("branch", "w").write(branch + '\n')
|
2007-03-14 02:50:02 +03:00
|
|
|
|
2007-06-18 22:24:34 +04:00
|
|
|
def _read(self):
|
2007-06-18 22:24:34 +04:00
|
|
|
self._map = {}
|
|
|
|
self._copymap = {}
|
2007-07-20 02:43:25 +04:00
|
|
|
if not self._dirtypl:
|
|
|
|
self._pl = [nullid, nullid]
|
2007-06-18 22:24:33 +04:00
|
|
|
try:
|
2007-06-18 22:24:34 +04:00
|
|
|
st = self._opener("dirstate").read()
|
2007-06-18 22:24:33 +04:00
|
|
|
except IOError, err:
|
|
|
|
if err.errno != errno.ENOENT: raise
|
|
|
|
return
|
|
|
|
if not st:
|
|
|
|
return
|
|
|
|
|
2007-07-20 02:43:25 +04:00
|
|
|
if not self._dirtypl:
|
|
|
|
self._pl = [st[:20], st[20: 40]]
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2006-06-13 09:09:50 +04:00
|
|
|
# deref fields so they will be local in loop
|
2007-06-18 22:24:34 +04:00
|
|
|
dmap = self._map
|
|
|
|
copymap = self._copymap
|
2006-06-13 09:09:50 +04:00
|
|
|
unpack = struct.unpack
|
2007-06-18 22:24:34 +04:00
|
|
|
e_size = struct.calcsize(_format)
|
2007-09-24 21:41:54 +04:00
|
|
|
pos1 = 40
|
|
|
|
l = len(st)
|
|
|
|
|
|
|
|
# the inner loop
|
|
|
|
while pos1 < l:
|
|
|
|
pos2 = pos1 + e_size
|
|
|
|
e = unpack(">cllll", st[pos1:pos2]) # a literal here is faster
|
|
|
|
pos1 = pos2 + e[4]
|
|
|
|
f = st[pos2:pos1]
|
2005-08-28 01:21:25 +04:00
|
|
|
if '\0' in f:
|
|
|
|
f, c = f.split('\0')
|
2006-09-26 02:53:17 +04:00
|
|
|
copymap[f] = c
|
2007-09-24 21:41:54 +04:00
|
|
|
dmap[f] = e # we hold onto e[4] because making a subtuple is slow
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2007-06-18 22:24:34 +04:00
|
|
|
def invalidate(self):
|
2008-06-06 22:23:29 +04:00
|
|
|
for a in "_map _copymap _foldmap _branch _pl _dirs _ignore".split():
|
2007-07-20 02:43:25 +04:00
|
|
|
if a in self.__dict__:
|
|
|
|
delattr(self, a)
|
2007-07-22 01:02:09 +04:00
|
|
|
self._dirty = False
|
2007-04-24 23:02:51 +04:00
|
|
|
|
2005-08-28 01:21:25 +04:00
|
|
|
def copy(self, source, dest):
|
2008-06-15 15:01:03 +04:00
|
|
|
if source == dest:
|
|
|
|
return
|
2007-07-22 01:02:09 +04:00
|
|
|
self._dirty = True
|
2007-06-18 22:24:34 +04:00
|
|
|
self._copymap[dest] = source
|
2005-08-28 01:21:25 +04:00
|
|
|
|
|
|
|
def copied(self, file):
|
2007-06-18 22:24:34 +04:00
|
|
|
return self._copymap.get(file, None)
|
2006-09-26 02:53:17 +04:00
|
|
|
|
|
|
|
def copies(self):
|
2007-06-18 22:24:34 +04:00
|
|
|
return self._copymap
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2007-06-18 22:24:34 +04:00
|
|
|
def _incpath(self, path):
|
2007-09-24 21:36:38 +04:00
|
|
|
c = path.rfind('/')
|
|
|
|
if c >= 0:
|
|
|
|
dirs = self._dirs
|
|
|
|
base = path[:c]
|
|
|
|
if base not in dirs:
|
|
|
|
self._incpath(base)
|
|
|
|
dirs[base] = 1
|
|
|
|
else:
|
|
|
|
dirs[base] += 1
|
2007-06-18 22:24:34 +04:00
|
|
|
|
|
|
|
def _decpath(self, path):
|
2007-11-08 00:57:28 +03:00
|
|
|
c = path.rfind('/')
|
|
|
|
if c >= 0:
|
|
|
|
base = path[:c]
|
|
|
|
dirs = self._dirs
|
|
|
|
if dirs[base] == 1:
|
|
|
|
del dirs[base]
|
|
|
|
self._decpath(base)
|
|
|
|
else:
|
|
|
|
dirs[base] -= 1
|
2006-08-19 08:03:29 +04:00
|
|
|
|
2007-06-18 22:24:34 +04:00
|
|
|
def _incpathcheck(self, f):
|
|
|
|
if '\r' in f or '\n' in f:
|
2008-02-17 14:53:57 +03:00
|
|
|
raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
|
|
|
|
% f)
|
2007-06-18 22:24:34 +04:00
|
|
|
# shadows
|
|
|
|
if f in self._dirs:
|
2007-08-01 23:03:10 +04:00
|
|
|
raise util.Abort(_('directory %r already in dirstate') % f)
|
2007-06-18 22:24:34 +04:00
|
|
|
for c in strutil.rfindall(f, '/'):
|
|
|
|
d = f[:c]
|
|
|
|
if d in self._dirs:
|
|
|
|
break
|
2007-11-05 20:05:44 +03:00
|
|
|
if d in self._map and self[d] != 'r':
|
2007-08-01 23:03:10 +04:00
|
|
|
raise util.Abort(_('file %r in dirstate clashes with %r') %
|
|
|
|
(d, f))
|
2007-06-18 22:24:34 +04:00
|
|
|
self._incpath(f)
|
2006-08-19 08:03:29 +04:00
|
|
|
|
2007-11-08 00:57:28 +03:00
|
|
|
def _changepath(self, f, newstate, relaxed=False):
|
2007-11-05 20:05:44 +03:00
|
|
|
# handle upcoming path changes
|
|
|
|
oldstate = self[f]
|
|
|
|
if oldstate not in "?r" and newstate in "?r":
|
2007-11-08 00:57:28 +03:00
|
|
|
if "_dirs" in self.__dict__:
|
|
|
|
self._decpath(f)
|
2007-11-05 20:05:44 +03:00
|
|
|
return
|
|
|
|
if oldstate in "?r" and newstate not in "?r":
|
2007-11-08 00:57:28 +03:00
|
|
|
if relaxed and oldstate == '?':
|
|
|
|
# XXX
|
|
|
|
# in relaxed mode we assume the caller knows
|
|
|
|
# what it is doing, workaround for updating
|
|
|
|
# dir-to-file revisions
|
|
|
|
if "_dirs" in self.__dict__:
|
|
|
|
self._incpath(f)
|
|
|
|
return
|
2007-11-05 20:05:44 +03:00
|
|
|
self._incpathcheck(f)
|
|
|
|
return
|
|
|
|
|
2007-07-22 01:02:09 +04:00
|
|
|
def normal(self, f):
|
merge: forcefully mark files that we get from the second parent as dirty
After a hg merge, we want to include in the commit all the files that we
got from the second parent, so that we have the correct file-level
history. To make them visible to hg commit, we try to mark them as dirty.
Unfortunately, right now we can't really mark them as dirty[1] - the
best we can do is to mark them as needing a full comparison of their
contents, but they will still be considered clean if they happen to be
identical to the version in the first parent.
This changeset extends the dirstate format in a compatible way, so that
we can mark a file as dirty:
Right now we use a negative file size to indicate we don't have valid
stat data for this entry. In practice, this size is always -1.
This patch uses -2 to indicate that the entry is dirty. Older versions
of hg won't choke on this dirstate, but they may happily mark the file
as clean after a full comparison, destroying all of our hard work.
The patch adds a dirstate.normallookup method with the semantics of the
current normaldirty, and changes normaldirty to forcefully mark the
entry as dirty.
This should fix issue522.
[1] - well, we could put them in state 'm', but that state has a
different meaning.
2007-08-23 08:48:29 +04:00
|
|
|
'mark a file normal and clean'
|
2007-07-22 01:02:09 +04:00
|
|
|
self._dirty = True
|
2007-11-08 00:57:28 +03:00
|
|
|
self._changepath(f, 'n', True)
|
2007-07-22 01:02:09 +04:00
|
|
|
s = os.lstat(self._join(f))
|
2007-09-24 21:41:54 +04:00
|
|
|
self._map[f] = ('n', s.st_mode, s.st_size, s.st_mtime, 0)
|
2008-01-20 16:39:25 +03:00
|
|
|
if f in self._copymap:
|
2007-07-22 01:02:09 +04:00
|
|
|
del self._copymap[f]
|
2005-08-28 01:21:25 +04:00
|
|
|
|
merge: forcefully mark files that we get from the second parent as dirty
After a hg merge, we want to include in the commit all the files that we
got from the second parent, so that we have the correct file-level
history. To make them visible to hg commit, we try to mark them as dirty.
Unfortunately, right now we can't really mark them as dirty[1] - the
best we can do is to mark them as needing a full comparison of their
contents, but they will still be considered clean if they happen to be
identical to the version in the first parent.
This changeset extends the dirstate format in a compatible way, so that
we can mark a file as dirty:
Right now we use a negative file size to indicate we don't have valid
stat data for this entry. In practice, this size is always -1.
This patch uses -2 to indicate that the entry is dirty. Older versions
of hg won't choke on this dirstate, but they may happily mark the file
as clean after a full comparison, destroying all of our hard work.
The patch adds a dirstate.normallookup method with the semantics of the
current normaldirty, and changes normaldirty to forcefully mark the
entry as dirty.
This should fix issue522.
[1] - well, we could put them in state 'm', but that state has a
different meaning.
2007-08-23 08:48:29 +04:00
|
|
|
def normallookup(self, f):
|
2007-07-22 01:02:09 +04:00
|
|
|
'mark a file normal, but possibly dirty'
|
2008-03-18 10:07:39 +03:00
|
|
|
if self._pl[1] != nullid and f in self._map:
|
|
|
|
# if there is a merge going on and the file was either
|
|
|
|
# in state 'm' or dirty before being removed, restore that state.
|
|
|
|
entry = self._map[f]
|
|
|
|
if entry[0] == 'r' and entry[2] in (-1, -2):
|
|
|
|
source = self._copymap.get(f)
|
|
|
|
if entry[2] == -1:
|
|
|
|
self.merge(f)
|
|
|
|
elif entry[2] == -2:
|
|
|
|
self.normaldirty(f)
|
|
|
|
if source:
|
|
|
|
self.copy(source, f)
|
|
|
|
return
|
|
|
|
if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
|
|
|
|
return
|
2007-07-22 01:02:09 +04:00
|
|
|
self._dirty = True
|
2007-11-08 00:57:28 +03:00
|
|
|
self._changepath(f, 'n', True)
|
2007-09-24 21:41:54 +04:00
|
|
|
self._map[f] = ('n', 0, -1, -1, 0)
|
merge: forcefully mark files that we get from the second parent as dirty
After a hg merge, we want to include in the commit all the files that we
got from the second parent, so that we have the correct file-level
history. To make them visible to hg commit, we try to mark them as dirty.
Unfortunately, right now we can't really mark them as dirty[1] - the
best we can do is to mark them as needing a full comparison of their
contents, but they will still be considered clean if they happen to be
identical to the version in the first parent.
This changeset extends the dirstate format in a compatible way, so that
we can mark a file as dirty:
Right now we use a negative file size to indicate we don't have valid
stat data for this entry. In practice, this size is always -1.
This patch uses -2 to indicate that the entry is dirty. Older versions
of hg won't choke on this dirstate, but they may happily mark the file
as clean after a full comparison, destroying all of our hard work.
The patch adds a dirstate.normallookup method with the semantics of the
current normaldirty, and changes normaldirty to forcefully mark the
entry as dirty.
This should fix issue522.
[1] - well, we could put them in state 'm', but that state has a
different meaning.
2007-08-23 08:48:29 +04:00
|
|
|
if f in self._copymap:
|
|
|
|
del self._copymap[f]
|
|
|
|
|
|
|
|
def normaldirty(self, f):
|
|
|
|
'mark a file normal, but dirty'
|
|
|
|
self._dirty = True
|
2007-11-08 00:57:28 +03:00
|
|
|
self._changepath(f, 'n', True)
|
2007-09-24 21:41:54 +04:00
|
|
|
self._map[f] = ('n', 0, -2, -1, 0)
|
2007-07-22 01:02:09 +04:00
|
|
|
if f in self._copymap:
|
|
|
|
del self._copymap[f]
|
|
|
|
|
|
|
|
def add(self, f):
|
|
|
|
'mark a file added'
|
2007-07-22 01:02:09 +04:00
|
|
|
self._dirty = True
|
2007-11-05 20:05:44 +03:00
|
|
|
self._changepath(f, 'a')
|
2007-09-24 21:41:54 +04:00
|
|
|
self._map[f] = ('a', 0, -1, -1, 0)
|
2007-07-22 01:02:09 +04:00
|
|
|
if f in self._copymap:
|
|
|
|
del self._copymap[f]
|
|
|
|
|
|
|
|
def remove(self, f):
|
|
|
|
'mark a file removed'
|
|
|
|
self._dirty = True
|
2007-11-05 20:05:44 +03:00
|
|
|
self._changepath(f, 'r')
|
2008-03-18 10:07:39 +03:00
|
|
|
size = 0
|
|
|
|
if self._pl[1] != nullid and f in self._map:
|
|
|
|
entry = self._map[f]
|
|
|
|
if entry[0] == 'm':
|
|
|
|
size = -1
|
|
|
|
elif entry[0] == 'n' and entry[2] == -2:
|
|
|
|
size = -2
|
|
|
|
self._map[f] = ('r', 0, size, 0, 0)
|
|
|
|
if size == 0 and f in self._copymap:
|
2007-07-22 01:02:09 +04:00
|
|
|
del self._copymap[f]
|
|
|
|
|
|
|
|
def merge(self, f):
|
|
|
|
'mark a file merged'
|
|
|
|
self._dirty = True
|
2007-07-22 01:02:09 +04:00
|
|
|
s = os.lstat(self._join(f))
|
2007-11-08 00:57:28 +03:00
|
|
|
self._changepath(f, 'm', True)
|
2007-09-24 21:41:54 +04:00
|
|
|
self._map[f] = ('m', s.st_mode, s.st_size, s.st_mtime, 0)
|
2007-07-22 01:02:09 +04:00
|
|
|
if f in self._copymap:
|
|
|
|
del self._copymap[f]
|
|
|
|
|
|
|
|
def forget(self, f):
|
|
|
|
'forget a file'
|
|
|
|
self._dirty = True
|
|
|
|
try:
|
2007-11-05 20:05:44 +03:00
|
|
|
self._changepath(f, '?')
|
2007-07-22 01:02:09 +04:00
|
|
|
del self._map[f]
|
|
|
|
except KeyError:
|
2008-02-09 21:13:57 +03:00
|
|
|
self._ui.warn(_("not in dirstate: %s\n") % f)
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2008-06-06 22:23:29 +04:00
|
|
|
def _normalize(self, path):
|
|
|
|
normpath = os.path.normcase(os.path.normpath(path))
|
|
|
|
if normpath in self._foldmap:
|
|
|
|
return self._foldmap[normpath]
|
|
|
|
elif os.path.exists(path):
|
|
|
|
self._foldmap[normpath] = util.fspath(path, self._root)
|
|
|
|
return self._foldmap[normpath]
|
|
|
|
else:
|
|
|
|
return path
|
|
|
|
|
2007-08-06 06:04:56 +04:00
|
|
|
def clear(self):
|
|
|
|
self._map = {}
|
2007-11-05 20:05:44 +03:00
|
|
|
if "_dirs" in self.__dict__:
|
|
|
|
delattr(self, "_dirs");
|
2007-08-06 06:04:56 +04:00
|
|
|
self._copymap = {}
|
|
|
|
self._pl = [nullid, nullid]
|
2007-08-06 08:00:10 +04:00
|
|
|
self._dirty = True
|
2007-08-06 06:04:56 +04:00
|
|
|
|
2006-02-20 21:04:56 +03:00
|
|
|
def rebuild(self, parent, files):
|
2007-08-06 06:04:56 +04:00
|
|
|
self.clear()
|
2006-07-16 12:14:17 +04:00
|
|
|
for f in files:
|
2008-06-26 23:35:50 +04:00
|
|
|
if 'x' in files.flags(f):
|
2007-09-24 21:41:54 +04:00
|
|
|
self._map[f] = ('n', 0777, -1, 0, 0)
|
2006-02-20 21:04:56 +03:00
|
|
|
else:
|
2007-09-24 21:41:54 +04:00
|
|
|
self._map[f] = ('n', 0666, -1, 0, 0)
|
2007-06-18 22:24:34 +04:00
|
|
|
self._pl = (parent, nullid)
|
2007-07-22 01:02:09 +04:00
|
|
|
self._dirty = True
|
2005-08-28 01:21:25 +04:00
|
|
|
|
|
|
|
def write(self):
|
2007-06-18 22:24:34 +04:00
|
|
|
if not self._dirty:
|
2006-02-23 04:17:08 +03:00
|
|
|
return
|
2008-03-19 23:55:21 +03:00
|
|
|
st = self._opener("dirstate", "w", atomictemp=True)
|
2008-03-20 21:53:59 +03:00
|
|
|
|
|
|
|
try:
|
|
|
|
gran = int(self._ui.config('dirstate', 'granularity', 1))
|
|
|
|
except ValueError:
|
|
|
|
gran = 1
|
|
|
|
limit = sys.maxint
|
|
|
|
if gran > 0:
|
|
|
|
limit = util.fstat(st).st_mtime - gran
|
|
|
|
|
2007-04-24 23:02:42 +04:00
|
|
|
cs = cStringIO.StringIO()
|
2007-09-24 21:41:54 +04:00
|
|
|
copymap = self._copymap
|
|
|
|
pack = struct.pack
|
|
|
|
write = cs.write
|
|
|
|
write("".join(self._pl))
|
2007-06-18 22:24:34 +04:00
|
|
|
for f, e in self._map.iteritems():
|
2007-09-24 21:41:54 +04:00
|
|
|
if f in copymap:
|
|
|
|
f = "%s\0%s" % (f, copymap[f])
|
2008-03-19 23:55:21 +03:00
|
|
|
if e[3] > limit and e[0] == 'n':
|
|
|
|
e = (e[0], 0, -1, -1, 0)
|
2007-09-24 21:41:54 +04:00
|
|
|
e = pack(_format, e[0], e[1], e[2], e[3], len(f))
|
|
|
|
write(e)
|
|
|
|
write(f)
|
2007-04-24 23:02:42 +04:00
|
|
|
st.write(cs.getvalue())
|
2007-06-06 02:55:27 +04:00
|
|
|
st.rename()
|
2007-07-22 01:44:38 +04:00
|
|
|
self._dirty = self._dirtypl = False
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2007-07-22 01:02:09 +04:00
|
|
|
def _filter(self, files):
|
2005-08-28 01:21:25 +04:00
|
|
|
ret = {}
|
|
|
|
unknown = []
|
|
|
|
|
|
|
|
for x in files:
|
2005-11-14 04:59:35 +03:00
|
|
|
if x == '.':
|
2007-06-18 22:24:34 +04:00
|
|
|
return self._map.copy()
|
|
|
|
if x not in self._map:
|
2005-08-28 01:21:25 +04:00
|
|
|
unknown.append(x)
|
|
|
|
else:
|
2007-06-18 22:24:34 +04:00
|
|
|
ret[x] = self._map[x]
|
2005-08-28 01:21:25 +04:00
|
|
|
|
|
|
|
if not unknown:
|
|
|
|
return ret
|
|
|
|
|
2007-06-18 22:24:34 +04:00
|
|
|
b = self._map.keys()
|
2005-08-28 01:21:25 +04:00
|
|
|
b.sort()
|
|
|
|
blen = len(b)
|
|
|
|
|
|
|
|
for x in unknown:
|
2006-06-23 10:09:48 +04:00
|
|
|
bs = bisect.bisect(b, "%s%s" % (x, '/'))
|
2005-08-28 01:21:25 +04:00
|
|
|
while bs < blen:
|
|
|
|
s = b[bs]
|
2006-06-23 02:11:53 +04:00
|
|
|
if len(s) > len(x) and s.startswith(x):
|
2007-06-18 22:24:34 +04:00
|
|
|
ret[s] = self._map[s]
|
2005-08-28 01:21:25 +04:00
|
|
|
else:
|
|
|
|
break
|
|
|
|
bs += 1
|
|
|
|
return ret
|
|
|
|
|
2007-07-26 21:02:58 +04:00
|
|
|
def _supported(self, f, mode, verbose=False):
|
|
|
|
if stat.S_ISREG(mode) or stat.S_ISLNK(mode):
|
2005-11-03 02:46:31 +03:00
|
|
|
return True
|
|
|
|
if verbose:
|
|
|
|
kind = 'unknown'
|
2007-07-26 21:02:58 +04:00
|
|
|
if stat.S_ISCHR(mode): kind = _('character device')
|
|
|
|
elif stat.S_ISBLK(mode): kind = _('block device')
|
|
|
|
elif stat.S_ISFIFO(mode): kind = _('fifo')
|
|
|
|
elif stat.S_ISSOCK(mode): kind = _('socket')
|
|
|
|
elif stat.S_ISDIR(mode): kind = _('directory')
|
2007-06-18 22:24:34 +04:00
|
|
|
self._ui.warn(_('%s: unsupported file type (type is %s)\n')
|
2007-06-19 10:06:37 +04:00
|
|
|
% (self.pathto(f), kind))
|
2005-11-03 02:46:31 +03:00
|
|
|
return False
|
|
|
|
|
2008-02-08 23:07:55 +03:00
|
|
|
def _dirignore(self, f):
|
2008-04-05 20:15:04 +04:00
|
|
|
if f == '.':
|
|
|
|
return False
|
2008-02-08 23:07:55 +03:00
|
|
|
if self._ignore(f):
|
|
|
|
return True
|
|
|
|
for c in strutil.findall(f, '/'):
|
|
|
|
if self._ignore(f[:c]):
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
2008-05-12 20:37:07 +04:00
|
|
|
def walk(self, match):
|
2008-05-12 20:37:08 +04:00
|
|
|
# filter out the src and stat
|
2008-05-12 20:37:08 +04:00
|
|
|
for src, f, st in self.statwalk(match):
|
2008-05-12 20:37:08 +04:00
|
|
|
yield f
|
2006-10-27 08:54:24 +04:00
|
|
|
|
2008-05-12 20:37:08 +04:00
|
|
|
def statwalk(self, match, unknown=True, ignored=False):
|
2006-10-27 08:54:24 +04:00
|
|
|
'''
|
|
|
|
walk recursively through the directory tree, finding all files
|
|
|
|
matched by the match function
|
|
|
|
|
|
|
|
results are yielded in a tuple (src, filename, st), where src
|
|
|
|
is one of:
|
|
|
|
'f' the file was found in the directory tree
|
|
|
|
'm' the file was only in the dirstate and not in the tree
|
2006-10-27 20:24:10 +04:00
|
|
|
|
2006-10-27 08:54:24 +04:00
|
|
|
and st is the stat result if the file was found in the directory.
|
|
|
|
'''
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2008-05-12 20:37:07 +04:00
|
|
|
def fwarn(f, msg):
|
|
|
|
self._ui.warn('%s: %s\n' % (self.pathto(ff), msg))
|
|
|
|
return False
|
2008-05-12 20:37:08 +04:00
|
|
|
badfn = fwarn
|
|
|
|
if hasattr(match, 'bad'):
|
|
|
|
badfn = match.bad
|
2008-05-12 20:37:07 +04:00
|
|
|
|
2005-08-28 01:21:25 +04:00
|
|
|
# walk all files by default
|
2008-05-12 20:37:08 +04:00
|
|
|
files = match.files()
|
2005-08-28 01:21:25 +04:00
|
|
|
if not files:
|
2007-03-11 04:03:22 +03:00
|
|
|
files = ['.']
|
2007-06-18 22:24:34 +04:00
|
|
|
dc = self._map.copy()
|
2006-10-27 08:54:24 +04:00
|
|
|
else:
|
2006-10-27 23:10:01 +04:00
|
|
|
files = util.unique(files)
|
2007-07-22 01:02:09 +04:00
|
|
|
dc = self._filter(files)
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2006-10-27 08:54:24 +04:00
|
|
|
def imatch(file_):
|
2007-06-18 22:24:34 +04:00
|
|
|
if file_ not in dc and self._ignore(file_):
|
2005-09-01 18:34:53 +04:00
|
|
|
return False
|
2006-02-19 21:43:03 +03:00
|
|
|
return match(file_)
|
2005-09-09 02:01:33 +04:00
|
|
|
|
2008-03-02 15:52:34 +03:00
|
|
|
# TODO: don't walk unknown directories if unknown and ignored are False
|
2007-06-18 22:24:34 +04:00
|
|
|
ignore = self._ignore
|
2008-02-08 23:07:55 +03:00
|
|
|
dirignore = self._dirignore
|
2007-03-11 05:00:54 +03:00
|
|
|
if ignored:
|
|
|
|
imatch = match
|
|
|
|
ignore = util.never
|
2008-02-08 23:07:55 +03:00
|
|
|
dirignore = util.never
|
2006-10-27 21:09:33 +04:00
|
|
|
|
2007-06-18 22:24:34 +04:00
|
|
|
# self._root may end with a path separator when self._root == '/'
|
|
|
|
common_prefix_len = len(self._root)
|
2008-01-09 15:30:35 +03:00
|
|
|
if not util.endswithsep(self._root):
|
2006-07-25 23:22:56 +04:00
|
|
|
common_prefix_len += 1
|
2007-07-26 21:02:58 +04:00
|
|
|
|
|
|
|
normpath = util.normpath
|
2007-10-06 02:01:06 +04:00
|
|
|
listdir = osutil.listdir
|
2007-07-26 21:02:58 +04:00
|
|
|
lstat = os.lstat
|
|
|
|
bisect_left = bisect.bisect_left
|
|
|
|
isdir = os.path.isdir
|
|
|
|
pconvert = util.pconvert
|
|
|
|
join = os.path.join
|
|
|
|
s_isdir = stat.S_ISDIR
|
|
|
|
supported = self._supported
|
2007-07-26 21:02:58 +04:00
|
|
|
_join = self._join
|
|
|
|
known = {'.hg': 1}
|
2007-07-26 21:02:58 +04:00
|
|
|
|
2007-07-26 21:02:58 +04:00
|
|
|
# recursion free walker, faster than os.walk.
|
2005-09-01 18:34:53 +04:00
|
|
|
def findfiles(s):
|
|
|
|
work = [s]
|
2007-07-26 21:02:58 +04:00
|
|
|
wadd = work.append
|
|
|
|
found = []
|
|
|
|
add = found.append
|
2008-05-12 20:37:08 +04:00
|
|
|
if hasattr(match, 'dir'):
|
|
|
|
match.dir(normpath(s[common_prefix_len:]))
|
2005-09-01 18:34:53 +04:00
|
|
|
while work:
|
|
|
|
top = work.pop()
|
2007-10-06 02:01:06 +04:00
|
|
|
entries = listdir(top, stat=True)
|
2005-09-01 18:34:53 +04:00
|
|
|
# nd is the top of the repository dir tree
|
2007-07-26 21:02:58 +04:00
|
|
|
nd = normpath(top[common_prefix_len:])
|
2006-04-01 05:00:09 +04:00
|
|
|
if nd == '.':
|
|
|
|
nd = ''
|
|
|
|
else:
|
2006-04-14 00:46:05 +04:00
|
|
|
# do not recurse into a repo contained in this
|
|
|
|
# one. use bisect to find .hg directory so speed
|
|
|
|
# is good on big directory.
|
2007-10-06 02:01:06 +04:00
|
|
|
names = [e[0] for e in entries]
|
2007-07-26 21:02:58 +04:00
|
|
|
hg = bisect_left(names, '.hg')
|
2006-04-01 05:00:09 +04:00
|
|
|
if hg < len(names) and names[hg] == '.hg':
|
2007-07-26 21:02:58 +04:00
|
|
|
if isdir(join(top, '.hg')):
|
2006-04-01 05:00:09 +04:00
|
|
|
continue
|
2007-10-06 02:01:06 +04:00
|
|
|
for f, kind, st in entries:
|
2007-07-26 21:02:58 +04:00
|
|
|
np = pconvert(join(nd, f))
|
|
|
|
if np in known:
|
2005-09-01 18:34:53 +04:00
|
|
|
continue
|
2007-07-26 21:02:58 +04:00
|
|
|
known[np] = 1
|
2007-07-26 21:02:58 +04:00
|
|
|
p = join(top, f)
|
2005-09-09 22:46:35 +04:00
|
|
|
# don't trip over symlinks
|
2007-10-06 02:01:06 +04:00
|
|
|
if kind == stat.S_IFDIR:
|
2007-03-21 04:09:53 +03:00
|
|
|
if not ignore(np):
|
2007-07-26 21:02:58 +04:00
|
|
|
wadd(p)
|
2008-05-12 20:37:08 +04:00
|
|
|
if hasattr(match, 'dir'):
|
|
|
|
match.dir(np)
|
2007-07-26 21:02:58 +04:00
|
|
|
if np in dc and match(np):
|
2007-07-26 21:02:58 +04:00
|
|
|
add((np, 'm', st))
|
2006-10-27 08:54:24 +04:00
|
|
|
elif imatch(np):
|
2007-07-26 21:02:58 +04:00
|
|
|
if supported(np, st.st_mode):
|
2007-07-26 21:02:58 +04:00
|
|
|
add((np, 'f', st))
|
2005-11-03 02:46:31 +03:00
|
|
|
elif np in dc:
|
2007-07-26 21:02:58 +04:00
|
|
|
add((np, 'm', st))
|
|
|
|
found.sort()
|
|
|
|
return found
|
2005-10-16 02:43:40 +04:00
|
|
|
|
2005-09-01 18:34:53 +04:00
|
|
|
# step one, find all files that match our criteria
|
|
|
|
files.sort()
|
2006-10-27 23:10:01 +04:00
|
|
|
for ff in files:
|
2007-07-26 21:02:58 +04:00
|
|
|
nf = normpath(ff)
|
2007-07-26 21:02:58 +04:00
|
|
|
f = _join(ff)
|
2005-09-01 18:34:53 +04:00
|
|
|
try:
|
2007-07-26 21:02:58 +04:00
|
|
|
st = lstat(f)
|
2005-09-01 18:34:53 +04:00
|
|
|
except OSError, inst:
|
2005-12-01 19:48:29 +03:00
|
|
|
found = False
|
|
|
|
for fn in dc:
|
|
|
|
if nf == fn or (fn.startswith(nf) and fn[len(nf)] == '/'):
|
|
|
|
found = True
|
|
|
|
break
|
|
|
|
if not found:
|
2008-05-12 20:37:07 +04:00
|
|
|
if inst.errno != errno.ENOENT:
|
|
|
|
fwarn(ff, inst.strerror)
|
|
|
|
elif badfn(ff, inst.strerror) and imatch(nf):
|
2008-05-12 20:37:08 +04:00
|
|
|
yield 'f', ff, None
|
2005-09-01 18:34:53 +04:00
|
|
|
continue
|
2007-07-26 21:02:58 +04:00
|
|
|
if s_isdir(st.st_mode):
|
2008-02-08 23:07:55 +03:00
|
|
|
if not dirignore(nf):
|
|
|
|
for f, src, st in findfiles(f):
|
|
|
|
yield src, f, st
|
2005-10-16 02:43:40 +04:00
|
|
|
else:
|
2007-07-26 21:02:58 +04:00
|
|
|
if nf in known:
|
|
|
|
continue
|
|
|
|
known[nf] = 1
|
|
|
|
if match(nf):
|
|
|
|
if supported(ff, st.st_mode, verbose=True):
|
2008-06-06 22:23:29 +04:00
|
|
|
yield 'f', self.normalize(nf), st
|
2005-11-03 02:46:31 +03:00
|
|
|
elif ff in dc:
|
2006-10-27 23:10:01 +04:00
|
|
|
yield 'm', nf, st
|
2005-09-01 18:34:53 +04:00
|
|
|
|
|
|
|
# step two run through anything left in the dc hash and yield
|
|
|
|
# if we haven't already seen it
|
|
|
|
ks = dc.keys()
|
|
|
|
ks.sort()
|
|
|
|
for k in ks:
|
2007-07-26 21:02:58 +04:00
|
|
|
if k in known:
|
|
|
|
continue
|
|
|
|
known[k] = 1
|
|
|
|
if imatch(k):
|
2005-10-28 00:29:35 +04:00
|
|
|
yield 'm', k, None
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2008-06-26 23:35:50 +04:00
|
|
|
def status(self, match, ignored, clean, unknown):
|
|
|
|
listignored, listclean, listunknown = ignored, clean, unknown
|
|
|
|
|
2006-03-30 00:58:34 +04:00
|
|
|
lookup, modified, added, unknown, ignored = [], [], [], [], []
|
2006-07-21 03:21:07 +04:00
|
|
|
removed, deleted, clean = [], [], []
|
2005-08-28 01:21:25 +04:00
|
|
|
|
2007-07-26 21:02:58 +04:00
|
|
|
_join = self._join
|
|
|
|
lstat = os.lstat
|
|
|
|
cmap = self._copymap
|
|
|
|
dmap = self._map
|
|
|
|
ladd = lookup.append
|
|
|
|
madd = modified.append
|
|
|
|
aadd = added.append
|
|
|
|
uadd = unknown.append
|
|
|
|
iadd = ignored.append
|
|
|
|
radd = removed.append
|
|
|
|
dadd = deleted.append
|
|
|
|
cadd = clean.append
|
|
|
|
|
2008-06-26 23:35:50 +04:00
|
|
|
for src, fn, st in self.statwalk(match, listunknown, listignored):
|
2008-05-12 20:37:08 +04:00
|
|
|
if fn not in dmap:
|
2008-06-26 23:35:50 +04:00
|
|
|
if (listignored or match.exact(fn)) and self._dirignore(fn):
|
|
|
|
if listignored:
|
2008-02-08 23:07:55 +03:00
|
|
|
iadd(fn)
|
2008-06-26 23:35:50 +04:00
|
|
|
elif listunknown:
|
2007-07-26 21:02:58 +04:00
|
|
|
uadd(fn)
|
2005-10-28 00:29:35 +04:00
|
|
|
continue
|
2008-05-12 20:37:08 +04:00
|
|
|
|
|
|
|
state, mode, size, time, foo = dmap[fn]
|
|
|
|
|
2005-10-28 21:57:40 +04:00
|
|
|
if src == 'm':
|
2005-11-03 02:46:31 +03:00
|
|
|
nonexistent = True
|
|
|
|
if not st:
|
|
|
|
try:
|
2007-07-26 21:02:58 +04:00
|
|
|
st = lstat(_join(fn))
|
2005-11-03 02:46:31 +03:00
|
|
|
except OSError, inst:
|
2007-11-05 20:05:44 +03:00
|
|
|
if inst.errno not in (errno.ENOENT, errno.ENOTDIR):
|
2005-11-03 02:46:31 +03:00
|
|
|
raise
|
|
|
|
st = None
|
|
|
|
# We need to re-check that it is a valid file
|
2007-07-26 21:02:58 +04:00
|
|
|
if st and self._supported(fn, st.st_mode):
|
2005-11-03 02:46:31 +03:00
|
|
|
nonexistent = False
|
2008-05-12 20:37:08 +04:00
|
|
|
if nonexistent and state in "nma":
|
2007-07-26 21:02:58 +04:00
|
|
|
dadd(fn)
|
2005-10-28 21:57:40 +04:00
|
|
|
continue
|
2005-10-28 00:29:35 +04:00
|
|
|
# check the common case first
|
2008-05-12 20:37:08 +04:00
|
|
|
if state == 'n':
|
2005-10-28 00:29:35 +04:00
|
|
|
if not st:
|
2007-07-26 21:02:58 +04:00
|
|
|
st = lstat(_join(fn))
|
2008-03-14 15:56:58 +03:00
|
|
|
if (size >= 0 and
|
|
|
|
(size != st.st_size
|
|
|
|
or ((mode ^ st.st_mode) & 0100 and self._checkexec))
|
merge: forcefully mark files that we get from the second parent as dirty
After a hg merge, we want to include in the commit all the files that we
got from the second parent, so that we have the correct file-level
history. To make them visible to hg commit, we try to mark them as dirty.
Unfortunately, right now we can't really mark them as dirty[1] - the
best we can do is to mark them as needing a full comparison of their
contents, but they will still be considered clean if they happen to be
identical to the version in the first parent.
This changeset extends the dirstate format in a compatible way, so that
we can mark a file as dirty:
Right now we use a negative file size to indicate we don't have valid
stat data for this entry. In practice, this size is always -1.
This patch uses -2 to indicate that the entry is dirty. Older versions
of hg won't choke on this dirstate, but they may happily mark the file
as clean after a full comparison, destroying all of our hard work.
The patch adds a dirstate.normallookup method with the semantics of the
current normaldirty, and changes normaldirty to forcefully mark the
entry as dirty.
This should fix issue522.
[1] - well, we could put them in state 'm', but that state has a
different meaning.
2007-08-23 08:48:29 +04:00
|
|
|
or size == -2
|
2007-06-22 06:42:06 +04:00
|
|
|
or fn in self._copymap):
|
2007-07-26 21:02:58 +04:00
|
|
|
madd(fn)
|
2006-08-20 08:13:11 +04:00
|
|
|
elif time != int(st.st_mtime):
|
2007-07-26 21:02:58 +04:00
|
|
|
ladd(fn)
|
2008-06-26 23:35:50 +04:00
|
|
|
elif listclean:
|
2007-07-26 21:02:58 +04:00
|
|
|
cadd(fn)
|
2008-05-12 20:37:08 +04:00
|
|
|
elif state == 'm':
|
2007-07-26 21:02:58 +04:00
|
|
|
madd(fn)
|
2008-05-12 20:37:08 +04:00
|
|
|
elif state == 'a':
|
2007-07-26 21:02:58 +04:00
|
|
|
aadd(fn)
|
2008-05-12 20:37:08 +04:00
|
|
|
elif state == 'r':
|
2007-07-26 21:02:58 +04:00
|
|
|
radd(fn)
|
2005-10-28 00:29:35 +04:00
|
|
|
|
2006-07-21 03:21:07 +04:00
|
|
|
return (lookup, modified, added, removed, deleted, unknown, ignored,
|
|
|
|
clean)
|