sapling/edenscm/mercurial/dirstate.py

1689 lines
60 KiB
Python
Raw Normal View History

# dirstate.py - working directory tracking for mercurial
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
2010-01-20 07:20:08 +03:00
# GNU General Public License version 2 or any later version.
2015-12-22 08:38:53 +03:00
from __future__ import absolute_import
import collections
import contextlib
2015-12-22 08:38:53 +03:00
import errno
import os
import stat
from . import (
encoding,
error,
hintutil,
2015-12-22 08:38:53 +03:00
match as matchmod,
pathutil,
perftrace,
policy,
pycompat,
2015-12-22 08:38:53 +03:00
scmutil,
treedirstate,
treestate,
txnutil,
2015-12-22 08:38:53 +03:00
util,
)
from .i18n import _
from .node import hex, nullid
parsers = policy.importmod(r"parsers")
2009-04-30 05:47:18 +04:00
propertycache = util.propertycache
filecache = scmutil.filecache
_rangemask = 0x7FFFFFFF
parsers: inline fields of dirstate values in C version Previously, while unpacking the dirstate we'd create 3-4 new CPython objects for most dirstate values: - the state is a single character string, which is pooled by CPython - the mode is a new object if it isn't 0 due to being in the lookup set - the size is a new object if it is greater than 255 - the mtime is a new object if it isn't -1 due to being in the lookup set - the tuple to contain them all In some cases such as regular hg status, we actually look at all the objects. In other cases like hg add, hg status for a subdirectory, or hg status with the third-party hgwatchman enabled, we look at almost none of the objects. This patch eliminates most object creation in these cases by defining a custom C struct that is exposed to Python with an interface similar to a tuple. Only when tuple elements are actually requested are the respective objects created. The gains, where they're expected, are significant. The following tests are run against a working copy with over 270,000 files. parse_dirstate becomes significantly faster: $ hg perfdirstate before: wall 0.186437 comb 0.180000 user 0.160000 sys 0.020000 (best of 35) after: wall 0.093158 comb 0.100000 user 0.090000 sys 0.010000 (best of 95) and as a result, several commands benefit: $ time hg status # with hgwatchman enabled before: 0.42s user 0.14s system 99% cpu 0.563 total after: 0.34s user 0.12s system 99% cpu 0.471 total $ time hg add new-file before: 0.85s user 0.18s system 99% cpu 1.033 total after: 0.76s user 0.17s system 99% cpu 0.931 total There is a slight regression in regular status performance, but this is fixed in an upcoming patch.
2014-05-28 01:27:41 +04:00
dirstatetuple = parsers.dirstatetuple
class repocache(filecache):
"""filecache for files in .hg/"""
def join(self, obj, fname):
return obj._opener.join(fname)
2007-06-18 22:24:34 +04:00
class rootcache(filecache):
"""filecache for files in the repository root"""
def join(self, obj, fname):
return obj._join(fname)
def _getfsnow(vfs):
"""Get "now" timestamp on filesystem"""
tmpfd, tmpname = vfs.mkstemp()
try:
return os.fstat(tmpfd).st_mtime
finally:
os.close(tmpfd)
vfs.unlink(tmpname)
class dirstate(object):
def __init__(
self,
opener,
ui,
root,
validate,
sparsematchfn=None,
istreestate=False,
istreedirstate=False,
):
"""Create a new dirstate object.
opener is an open()-like callable that can be used to open the
dirstate file; root is the root of the directory tracked by
the dirstate.
"""
self._opener = opener
self._validate = validate
self._root = root
# ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
# UNC path pointing to root share (issue4557)
self._rootdir = pathutil.normasprefix(root)
self._dirty = False
self._lastnormaltime = 0
self._ui = ui
2012-03-01 19:39:58 +04:00
self._filecache = {}
self._parentwriters = 0
self._filename = "dirstate"
self._pendingfilename = "%s.pending" % self._filename
self._plchangecallbacks = {}
self._origpl = None
self._updatedfiles = set()
# TODO(quark): after migrating to treestate, remove legacy code.
self._istreestate = istreestate
self._istreedirstate = istreedirstate
if istreestate:
opener.makedirs("treestate")
self._mapcls = treestate.treestatemap
elif istreedirstate:
self._mapcls = treedirstate.treedirstatemap
else:
self._mapcls = dirstatemap
@contextlib.contextmanager
def parentchange(self):
"""Context manager for handling dirstate parents.
If an exception occurs in the scope of the context manager,
the incoherent dirstate won't be written when wlock is
released.
"""
self._parentwriters += 1
yield
# Typically we want the "undo" step of a context manager in a
# finally block so it happens even when an exception
# occurs. In this case, however, we only want to decrement
# parentwriters if the code in the with statement exits
# normally, so we don't have a try/finally here on purpose.
self._parentwriters -= 1
def beginparentchange(self):
"""Marks the beginning of a set of changes that involve changing
the dirstate parents. If there is an exception during this time,
the dirstate will not be written when the wlock is released. This
prevents writing an incoherent dirstate where the parent doesn't
match the contents.
"""
self._ui.deprecwarn(
"beginparentchange is obsoleted by the " "parentchange context manager.",
"4.3",
)
self._parentwriters += 1
def endparentchange(self):
"""Marks the end of a set of changes that involve changing the
dirstate parents. Once all parent changes have been marked done,
the wlock will be free to write the dirstate on release.
"""
self._ui.deprecwarn(
"endparentchange is obsoleted by the " "parentchange context manager.",
"4.3",
)
if self._parentwriters > 0:
self._parentwriters -= 1
def pendingparentchange(self):
"""Returns true if the dirstate is in the middle of a set of changes
that modify the dirstate parent.
"""
return self._parentwriters > 0
2009-04-30 05:47:18 +04:00
@propertycache
def _map(self):
"""Return the dirstate contents (see documentation for dirstatemap)."""
self._map = self._mapcls(self._ui, self._opener, self._root)
2009-04-30 05:47:18 +04:00
return self._map
@repocache("branch")
2009-04-30 05:47:18 +04:00
def _branch(self):
try:
return self._opener.read("branch").strip() or "default"
except IOError as inst:
if inst.errno != errno.ENOENT:
raise
2009-04-30 05:47:18 +04:00
return "default"
@property
2009-04-30 05:47:18 +04:00
def _pl(self):
return self._map.parents()
2009-04-30 05:47:18 +04:00
def hasdir(self, d):
return self._map.hastrackeddir(d)
@rootcache(".hgignore")
2009-04-30 05:47:18 +04:00
def _ignore(self):
# gitignore
globalignores = self._globalignorefiles()
return matchmod.gitignorematcher(self._root, "", gitignorepaths=globalignores)
2009-04-30 05:47:18 +04:00
@propertycache
def _slash(self):
return (
self._ui.plain() or self._ui.configbool("ui", "slash")
) and pycompat.ossep != "/"
2009-04-30 05:47:18 +04:00
@propertycache
def _checklink(self):
return util.checklink(self._root)
@propertycache
def _checkexec(self):
return util.checkexec(self._root)
@propertycache
def _checkcase(self):
return not util.fscasesensitive(self._join(".hg"))
2009-04-30 05:47:18 +04:00
2007-07-22 01:02:09 +04:00
def _join(self, f):
# much faster than os.path.join()
# it's safe because f is always a relative path
return self._rootdir + f
def flagfunc(self, buildfallback):
if self._checklink and self._checkexec:
def f(x):
try:
st = os.lstat(self._join(x))
if util.statislink(st):
return "l"
if util.statisexec(st):
return "x"
except OSError:
pass
return ""
return f
fallback = buildfallback()
if self._checklink:
def f(x):
if os.path.islink(self._join(x)):
return "l"
if "x" in fallback(x):
return "x"
return ""
return f
if self._checkexec:
def f(x):
if "l" in fallback(x):
return "l"
2011-05-08 22:45:47 +04:00
if util.isexec(self._join(x)):
return "x"
return ""
return f
else:
return fallback
@propertycache
def _cwd(self):
# internal config: ui.forcecwd
forcecwd = self._ui.config("ui", "forcecwd")
if forcecwd:
return forcecwd
return pycompat.getcwd()
def getcwd(self):
"""Return the path from which a canonical path is calculated.
This path should be used to resolve file patterns or to convert
canonical paths back to file paths for display. It shouldn't be
used to get real file paths. Use vfs functions instead.
"""
cwd = self._cwd
2010-01-25 09:05:27 +03:00
if cwd == self._root:
return ""
# self._root ends with a path separator if self._root is '/' or 'C:\'
rootsep = self._root
if not util.endswithsep(rootsep):
rootsep += pycompat.ossep
if cwd.startswith(rootsep):
return cwd[len(rootsep) :]
else:
# we're outside the repo. return an absolute path.
return cwd
def pathto(self, f, cwd=None):
if cwd is None:
cwd = self.getcwd()
path = util.pathto(self._root, cwd, f)
if self._slash:
return util.pconvert(path)
return path
def __getitem__(self, key):
"""Return the current state of key (a filename) in the dirstate.
States are:
n normal
m needs merging
r marked for removal
a marked for addition
? not tracked
"""
return self._map.get(key, ("?",))[0]
def __contains__(self, key):
return key in self._map
def __iter__(self):
return iter(sorted(self._map))
def items(self):
completion: add a debugpathcomplete command The bash_completion code uses "hg status" to generate a list of possible completions for commands that operate on files in the working directory. In a large working directory, this can result in a single tab-completion being very slow (several seconds) as a result of checking the status of every file, even when there is no need to check status or no possible matches. The new debugpathcomplete command gains performance in a few simple ways: * Allow completion to operate on just a single directory. When used to complete the right commands, this considerably reduces the number of completions returned, at no loss in functionality. * Never check the status of files. For completions that really must know if a file is modified, it is faster to use status: hg status -nm 'glob:myprefix**' Performance: Here are the commands used by bash_completion to complete, run in the root of the mozilla-central working dir (~77,000 files) and another repo (~165,000 files): All "normal state" files (used by e.g. remove, revert): mozilla other status -nmcd 'glob:**' 1.77 4.10 sec debugpathcomplete -f -n 0.53 1.26 debugpathcomplete -n 0.17 0.41 ("-f" means "complete full paths", rather than the current directory) Tracked files matching "a": mozilla other status -nmcd 'glob:a**' 0.26 0.47 debugpathcomplete -f -n a 0.10 0.24 debugpathcomplete -n a 0.10 0.22 We should be able to further improve completion performance once the critbit work lands. Right now, our performance is limited by the need to iterate over all keys in the dirstate.
2013-03-22 03:31:28 +04:00
return self._map.iteritems()
iteritems = items
def parents(self):
return [self._validate(p) for p in self._pl]
def p1(self):
return self._validate(self._pl[0])
def p2(self):
return self._validate(self._pl[1])
def branch(self):
return encoding.tolocal(self._branch)
def setparents(self, p1, p2=nullid):
"""Set dirstate parents to p1 and p2.
When moving from two parents to one, 'm' merged entries a
adjusted to normal and previous copy records discarded and
returned by the call.
See localrepo.setparents()
"""
if self._parentwriters == 0:
raise ValueError(
"cannot set dirstate parent without "
"calling dirstate.beginparentchange"
)
self._dirty = True
rebase: skip resolved but emptied revisions When rebasing, if a conflict occurs and is resolved in a way the rebased revision becomes empty, it is not skipped, unlike revisions being emptied without conflicts. The reason is: - File 'x' is merged and resolved, merge.update() marks it as 'm' in the dirstate. - rebase.concludenode() calls localrepo.commit(), which calls localrepo.status() which calls dirstate.status(). 'x' shows up as 'm' and is unconditionnally added to the modified files list, instead of being checked again. - localrepo.commit() detects 'x' as changed an create a new revision where only the manifest parents and linkrev differ. Marking 'x' as modified without checking it makes sense for regular merges. But in rebase case, the merge looks normal but the second parent is usually discarded. When this happens, 'm' files in dirstate are a bit irrelevant and should be considered 'n' possibly dirty instead. That is what the current patch does. Another approach, maybe more efficient, would be to pass another flag to merge.update() saying the 'branchmerge' is a bit of a lie and recordupdate() should call dirstate.normallookup() instead of merge(). It is also tempting to add this logic to dirstate.setparents(), moving from two to one parent is what invalidates the 'm' markers. But this is a far bigger change to make. v2: succumb to the temptation and move the logic in dirstate.setparents(). mpm suggested trying _filecommit() first but it is called by commitctx() which knows nothing about the dirstate and comes too late into the game. A second approach was to rewrite the 'm' state into 'n' on the fly in dirstate.status() which failed for graft in the following case: $ hg init repo $ cd repo $ echo a > a $ hg ci -qAm0 $ echo a >> a $ hg ci -m1 $ hg up 0 1 files updated, 0 files merged, 0 files removed, 0 files unresolved $ hg mv a b $ echo c > b $ hg ci -m2 created new head $ hg graft 1 --tool internal:local grafting revision 1 $ hg --config extensions.graphlog= glog --template '{rev} {desc|firstline}\n' @ 3 1 | o 2 2 | | o 1 1 |/ o 0 0 $ hg log -r 3 --debug --patch --git --copies changeset: 3:19cd7d1417952af13161b94c32e901769104560c tag: tip phase: draft parent: 2:b5c505595c9e9a12d5dd457919c143e05fc16fb8 parent: -1:0000000000000000000000000000000000000000 manifest: 3:3d27ce8d02241aa59b60804805edf103c5c0cda4 user: test date: Thu Jan 01 00:00:00 1970 +0000 extra: branch=default extra: source=a03df74c41413a75c0a42997fc36c2de97b26658 description: 1 Here, revision 3 is created because there is a copy record for 'b' in the dirstate and thus 'b' is considered modified. But this information is discarded at commit time since 'b' content is unchanged. I do not know if discarding this information is correct or not, but at this time we cannot represent it anyway. This patch therefore implements the last solution of moving the logic into dirstate.setparents(). It does not sound crazy as 'm' files makes no sense with only one parent. It also makes dirstate.merge() calls .lookupnormal() if there is one parent, to preserve the invariant. I am a bit concerned about introducing this kind of stateful behaviour to existing code which historically treated setparents() as a basic setter without side-effects. And doing that during the code freeze.
2012-04-22 22:06:36 +04:00
oldp2 = self._pl[1]
if self._origpl is None:
self._origpl = self._pl
self._map.setparents(p1, p2)
copies = {}
copymap = self._map.copymap
rebase: skip resolved but emptied revisions When rebasing, if a conflict occurs and is resolved in a way the rebased revision becomes empty, it is not skipped, unlike revisions being emptied without conflicts. The reason is: - File 'x' is merged and resolved, merge.update() marks it as 'm' in the dirstate. - rebase.concludenode() calls localrepo.commit(), which calls localrepo.status() which calls dirstate.status(). 'x' shows up as 'm' and is unconditionnally added to the modified files list, instead of being checked again. - localrepo.commit() detects 'x' as changed an create a new revision where only the manifest parents and linkrev differ. Marking 'x' as modified without checking it makes sense for regular merges. But in rebase case, the merge looks normal but the second parent is usually discarded. When this happens, 'm' files in dirstate are a bit irrelevant and should be considered 'n' possibly dirty instead. That is what the current patch does. Another approach, maybe more efficient, would be to pass another flag to merge.update() saying the 'branchmerge' is a bit of a lie and recordupdate() should call dirstate.normallookup() instead of merge(). It is also tempting to add this logic to dirstate.setparents(), moving from two to one parent is what invalidates the 'm' markers. But this is a far bigger change to make. v2: succumb to the temptation and move the logic in dirstate.setparents(). mpm suggested trying _filecommit() first but it is called by commitctx() which knows nothing about the dirstate and comes too late into the game. A second approach was to rewrite the 'm' state into 'n' on the fly in dirstate.status() which failed for graft in the following case: $ hg init repo $ cd repo $ echo a > a $ hg ci -qAm0 $ echo a >> a $ hg ci -m1 $ hg up 0 1 files updated, 0 files merged, 0 files removed, 0 files unresolved $ hg mv a b $ echo c > b $ hg ci -m2 created new head $ hg graft 1 --tool internal:local grafting revision 1 $ hg --config extensions.graphlog= glog --template '{rev} {desc|firstline}\n' @ 3 1 | o 2 2 | | o 1 1 |/ o 0 0 $ hg log -r 3 --debug --patch --git --copies changeset: 3:19cd7d1417952af13161b94c32e901769104560c tag: tip phase: draft parent: 2:b5c505595c9e9a12d5dd457919c143e05fc16fb8 parent: -1:0000000000000000000000000000000000000000 manifest: 3:3d27ce8d02241aa59b60804805edf103c5c0cda4 user: test date: Thu Jan 01 00:00:00 1970 +0000 extra: branch=default extra: source=a03df74c41413a75c0a42997fc36c2de97b26658 description: 1 Here, revision 3 is created because there is a copy record for 'b' in the dirstate and thus 'b' is considered modified. But this information is discarded at commit time since 'b' content is unchanged. I do not know if discarding this information is correct or not, but at this time we cannot represent it anyway. This patch therefore implements the last solution of moving the logic into dirstate.setparents(). It does not sound crazy as 'm' files makes no sense with only one parent. It also makes dirstate.merge() calls .lookupnormal() if there is one parent, to preserve the invariant. I am a bit concerned about introducing this kind of stateful behaviour to existing code which historically treated setparents() as a basic setter without side-effects. And doing that during the code freeze.
2012-04-22 22:06:36 +04:00
if oldp2 != nullid and p2 == nullid:
candidatefiles = self._map.nonnormalset.union(self._map.otherparentset)
for f in candidatefiles:
s = self._map.get(f)
if s is None:
continue
# Discard 'm' markers when moving away from a merge state
if s[0] == "m":
source = copymap.get(f)
if source:
copies[f] = source
rebase: skip resolved but emptied revisions When rebasing, if a conflict occurs and is resolved in a way the rebased revision becomes empty, it is not skipped, unlike revisions being emptied without conflicts. The reason is: - File 'x' is merged and resolved, merge.update() marks it as 'm' in the dirstate. - rebase.concludenode() calls localrepo.commit(), which calls localrepo.status() which calls dirstate.status(). 'x' shows up as 'm' and is unconditionnally added to the modified files list, instead of being checked again. - localrepo.commit() detects 'x' as changed an create a new revision where only the manifest parents and linkrev differ. Marking 'x' as modified without checking it makes sense for regular merges. But in rebase case, the merge looks normal but the second parent is usually discarded. When this happens, 'm' files in dirstate are a bit irrelevant and should be considered 'n' possibly dirty instead. That is what the current patch does. Another approach, maybe more efficient, would be to pass another flag to merge.update() saying the 'branchmerge' is a bit of a lie and recordupdate() should call dirstate.normallookup() instead of merge(). It is also tempting to add this logic to dirstate.setparents(), moving from two to one parent is what invalidates the 'm' markers. But this is a far bigger change to make. v2: succumb to the temptation and move the logic in dirstate.setparents(). mpm suggested trying _filecommit() first but it is called by commitctx() which knows nothing about the dirstate and comes too late into the game. A second approach was to rewrite the 'm' state into 'n' on the fly in dirstate.status() which failed for graft in the following case: $ hg init repo $ cd repo $ echo a > a $ hg ci -qAm0 $ echo a >> a $ hg ci -m1 $ hg up 0 1 files updated, 0 files merged, 0 files removed, 0 files unresolved $ hg mv a b $ echo c > b $ hg ci -m2 created new head $ hg graft 1 --tool internal:local grafting revision 1 $ hg --config extensions.graphlog= glog --template '{rev} {desc|firstline}\n' @ 3 1 | o 2 2 | | o 1 1 |/ o 0 0 $ hg log -r 3 --debug --patch --git --copies changeset: 3:19cd7d1417952af13161b94c32e901769104560c tag: tip phase: draft parent: 2:b5c505595c9e9a12d5dd457919c143e05fc16fb8 parent: -1:0000000000000000000000000000000000000000 manifest: 3:3d27ce8d02241aa59b60804805edf103c5c0cda4 user: test date: Thu Jan 01 00:00:00 1970 +0000 extra: branch=default extra: source=a03df74c41413a75c0a42997fc36c2de97b26658 description: 1 Here, revision 3 is created because there is a copy record for 'b' in the dirstate and thus 'b' is considered modified. But this information is discarded at commit time since 'b' content is unchanged. I do not know if discarding this information is correct or not, but at this time we cannot represent it anyway. This patch therefore implements the last solution of moving the logic into dirstate.setparents(). It does not sound crazy as 'm' files makes no sense with only one parent. It also makes dirstate.merge() calls .lookupnormal() if there is one parent, to preserve the invariant. I am a bit concerned about introducing this kind of stateful behaviour to existing code which historically treated setparents() as a basic setter without side-effects. And doing that during the code freeze.
2012-04-22 22:06:36 +04:00
self.normallookup(f)
# Also fix up otherparent markers
elif s[0] == "n" and s[2] == -2:
source = copymap.get(f)
if source:
copies[f] = source
self.add(f)
return copies
def setbranch(self, branch):
self._branch = encoding.fromlocal(branch)
f = self._opener("branch", "w", atomictemp=True, checkambig=True)
2012-04-19 19:11:42 +04:00
try:
f.write(self._branch + "\n")
2012-04-19 19:11:42 +04:00
f.close()
# make sure filecache has the correct stat info for _branch after
# replacing the underlying file
ce = self._filecache["_branch"]
if ce:
ce.refresh()
except: # re-raises
f.discard()
raise
def invalidate(self):
"""Causes the next access to reread the dirstate.
This is different from localrepo.invalidatedirstate() because it always
rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
check whether the dirstate has changed before rereading it."""
for a in ("_map", "_branch", "_ignore"):
if a in self.__dict__:
delattr(self, a)
self._lastnormaltime = 0
self._dirty = False
self._updatedfiles.clear()
self._parentwriters = 0
self._origpl = None
def copy(self, source, dest):
"""Mark dest as a copy of source. Unmark dest if source is None."""
if source == dest:
return
self._dirty = True
if self._istreestate:
self._map.copy(source, dest)
# treestatemap.copymap needs to be changed via the "copy" method.
# _updatedfiles is not used by treestatemap as it's tracked
# internally.
return
if source is not None:
self._map.copymap[dest] = source
self._updatedfiles.add(source)
self._updatedfiles.add(dest)
elif self._map.copymap.pop(dest, None):
self._updatedfiles.add(dest)
def copied(self, file):
if self._istreestate:
return self._map.copysource(file)
else:
return self._map.copymap.get(file, None)
def copies(self):
return self._map.copymap
def needcheck(self, file):
"""Mark file as need-check"""
if not self._istreestate:
raise error.ProgrammingError("needcheck is only supported by treestate")
changed = self._map.needcheck(file)
self._dirty |= changed
return changed
def clearneedcheck(self, file):
if not self._istreestate:
raise error.ProgrammingError("needcheck is only supported by treestate")
changed = self._map.clearneedcheck(file)
self._dirty |= changed
def setclock(self, clock):
"""Set fsmonitor clock"""
return self.setmeta("clock", clock)
def getclock(self):
"""Get fsmonitor clock"""
return self.getmeta("clock")
def setmeta(self, name, value):
"""Set metadata"""
if not self._istreestate:
raise error.ProgrammingError("setmeta is only supported by treestate")
value = value or None
if value != self.getmeta(name):
self._map.updatemetadata({name: value})
self._dirty = True
def getmeta(self, name):
"""Get metadata"""
if not self._istreestate:
raise error.ProgrammingError("getmeta is only supported by treestate")
# Normalize "" to "None"
return self._map.getmetadata().get(name) or None
def _addpath(self, f, state, mode, size, mtime):
oldstate = self[f]
if state == "a" or oldstate == "r":
scmutil.checkfilename(f)
if self._map.hastrackeddir(f):
raise error.Abort(_("directory %r already in dirstate") % f)
# shadows
for d in util.finddirs(f):
if self._map.hastrackeddir(d):
break
entry = self._map.get(d)
if entry is not None and entry[0] not in "r?":
raise error.Abort(_("file %r in dirstate clashes with %r") % (d, f))
self._dirty = True
self._updatedfiles.add(f)
self._map.addfile(f, oldstate, state, mode, size, mtime)
def normal(self, f):
"""Mark a file normal and clean."""
2007-07-22 01:02:09 +04:00
s = os.lstat(self._join(f))
mtime = s.st_mtime
self._addpath(f, "n", s.st_mode, s.st_size & _rangemask, mtime & _rangemask)
if not self._istreestate:
self._map.copymap.pop(f, None)
if f in self._map.nonnormalset:
self._map.nonnormalset.remove(f)
if mtime > self._lastnormaltime:
# Remember the most recent modification timeslot for status(),
# to make sure we won't miss future size-preserving file content
# modifications that happen within the same timeslot.
self._lastnormaltime = mtime
dirstate: avoid a race with multiple commits in the same process (issue2264, issue2516) The race happens when two commits in a row change the same file without changing its size, *if* those two commits happen in the same second in the same process while holding the same repo lock. For example: commit 1: M a M b commit 2: # same process, same second, same repo lock M b # modify b without changing its size M c This first manifested in transplant, which is the most common way to do multiple commits in the same process. But it can manifest in any script or extension that does multiple commits under the same repo lock. (Thus, the test script tests both transplant and a custom script.) The problem was that dirstate.status() failed to notice the change to b when localrepo is about to do the second commit, meaning that change gets left in the working directory. In the context of transplant, that means either a crash ("RuntimeError: nothing committed after transplant") or a silently inaccurate transplant, depending on whether any other files were modified by the second transplanted changeset. The fix is to make status() work a little harder when we have previously marked files as clean (state 'normal') in the same process. Specifically, dirstate.normal() adds files to self._lastnormal, and other state-changing methods remove them. Then dirstate.status() puts any files in self._lastnormal into state 'lookup', which will make localrepository.status() read file contents to see if it has really changed. So we pay a small performance penalty for the second (and subsequent) commits in the same process, without affecting the common case. Anything that does lots of status updates and checks in the same process could suffer a performance hit. Incidentally, there is a simpler fix: call dirstate.normallookup() on every file updated by commit() at the end of the commit. The trouble with that solution is that it imposes a performance penalty on the common case: it means the next status-dependent hg command after every "hg commit" will be a little bit slower. The patch here is more complex, but only affects performance for the uncommon case.
2011-03-21 00:41:09 +03:00
def normallookup(self, f):
"""Mark a file normal, but possibly dirty."""
if self._pl[1] != nullid:
# if there is a merge going on and the file was either
# in state 'm' (-1) or coming from other parent (-2) before
# being removed, restore that state.
entry = self._map.get(f)
if entry is not None:
if entry[0] == "r" and entry[2] in (-1, -2):
source = self._map.copymap.get(f)
if entry[2] == -1:
self.merge(f)
elif entry[2] == -2:
self.otherparent(f)
if source:
self.copy(source, f)
return
if entry[0] == "m" or entry[0] == "n" and entry[2] == -2:
return
self._addpath(f, "n", 0, -1, -1)
if not self._istreestate:
self._map.copymap.pop(f, None)
def otherparent(self, f):
"""Mark as coming from the other parent, always dirty."""
if self._pl[1] == nullid:
raise error.Abort(
_("setting %r to other parent " "only allowed in merges") % f
)
if f in self and self[f] == "n":
# merge-like
self._addpath(f, "m", 0, -2, -1)
else:
# add-like
self._addpath(f, "n", 0, -2, -1)
if not self._istreestate:
self._map.copymap.pop(f, None)
def add(self, f):
"""Mark a file added."""
self._addpath(f, "a", 0, -1, -1)
if not self._istreestate:
self._map.copymap.pop(f, None)
def remove(self, f):
"""Mark a file removed."""
self._dirty = True
oldstate = self[f]
size = 0
if self._pl[1] != nullid:
entry = self._map.get(f)
if entry is not None:
# backup the previous state
if entry[0] == "m": # merge
size = -1
elif entry[0] == "n" and entry[2] == -2: # other parent
size = -2
if not self._istreestate:
self._map.otherparentset.add(f)
self._updatedfiles.add(f)
self._map.removefile(f, oldstate, size)
if not self._istreestate:
if size == 0:
self._map.copymap.pop(f, None)
def merge(self, f):
"""Mark a file merged."""
rebase: skip resolved but emptied revisions When rebasing, if a conflict occurs and is resolved in a way the rebased revision becomes empty, it is not skipped, unlike revisions being emptied without conflicts. The reason is: - File 'x' is merged and resolved, merge.update() marks it as 'm' in the dirstate. - rebase.concludenode() calls localrepo.commit(), which calls localrepo.status() which calls dirstate.status(). 'x' shows up as 'm' and is unconditionnally added to the modified files list, instead of being checked again. - localrepo.commit() detects 'x' as changed an create a new revision where only the manifest parents and linkrev differ. Marking 'x' as modified without checking it makes sense for regular merges. But in rebase case, the merge looks normal but the second parent is usually discarded. When this happens, 'm' files in dirstate are a bit irrelevant and should be considered 'n' possibly dirty instead. That is what the current patch does. Another approach, maybe more efficient, would be to pass another flag to merge.update() saying the 'branchmerge' is a bit of a lie and recordupdate() should call dirstate.normallookup() instead of merge(). It is also tempting to add this logic to dirstate.setparents(), moving from two to one parent is what invalidates the 'm' markers. But this is a far bigger change to make. v2: succumb to the temptation and move the logic in dirstate.setparents(). mpm suggested trying _filecommit() first but it is called by commitctx() which knows nothing about the dirstate and comes too late into the game. A second approach was to rewrite the 'm' state into 'n' on the fly in dirstate.status() which failed for graft in the following case: $ hg init repo $ cd repo $ echo a > a $ hg ci -qAm0 $ echo a >> a $ hg ci -m1 $ hg up 0 1 files updated, 0 files merged, 0 files removed, 0 files unresolved $ hg mv a b $ echo c > b $ hg ci -m2 created new head $ hg graft 1 --tool internal:local grafting revision 1 $ hg --config extensions.graphlog= glog --template '{rev} {desc|firstline}\n' @ 3 1 | o 2 2 | | o 1 1 |/ o 0 0 $ hg log -r 3 --debug --patch --git --copies changeset: 3:19cd7d1417952af13161b94c32e901769104560c tag: tip phase: draft parent: 2:b5c505595c9e9a12d5dd457919c143e05fc16fb8 parent: -1:0000000000000000000000000000000000000000 manifest: 3:3d27ce8d02241aa59b60804805edf103c5c0cda4 user: test date: Thu Jan 01 00:00:00 1970 +0000 extra: branch=default extra: source=a03df74c41413a75c0a42997fc36c2de97b26658 description: 1 Here, revision 3 is created because there is a copy record for 'b' in the dirstate and thus 'b' is considered modified. But this information is discarded at commit time since 'b' content is unchanged. I do not know if discarding this information is correct or not, but at this time we cannot represent it anyway. This patch therefore implements the last solution of moving the logic into dirstate.setparents(). It does not sound crazy as 'm' files makes no sense with only one parent. It also makes dirstate.merge() calls .lookupnormal() if there is one parent, to preserve the invariant. I am a bit concerned about introducing this kind of stateful behaviour to existing code which historically treated setparents() as a basic setter without side-effects. And doing that during the code freeze.
2012-04-22 22:06:36 +04:00
if self._pl[1] == nullid:
return self.normallookup(f)
return self.otherparent(f)
def untrack(self, f):
"""Stops tracking a file in the dirstate. This is useful during
operations that want to stop tracking a file, but still have it show up
as untracked (like hg forget)."""
oldstate = self[f]
if self._map.untrackfile(f, oldstate):
self._dirty = True
if not self._istreestate:
self._updatedfiles.add(f)
self._map.copymap.pop(f, None)
def delete(self, f):
"""Removes a file from the dirstate entirely. This is useful during
operations like update, to remove files from the dirstate that are known
to be deleted."""
oldstate = self[f]
if self._map.deletefile(f, oldstate):
self._dirty = True
if not self._istreestate:
self._updatedfiles.add(f)
self._map.copymap.pop(f, None)
def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
if exists is None:
exists = os.path.lexists(os.path.join(self._root, path))
if not exists:
# Maybe a path component exists
if not ignoremissing and "/" in path:
d, f = path.rsplit("/", 1)
d = self._normalize(d, False, ignoremissing, None)
folded = d + "/" + f
else:
# No path components, preserve original case
folded = path
else:
# recursively normalize leading directory components
# against dirstate
if "/" in normed:
d, f = normed.rsplit("/", 1)
d = self._normalize(d, False, ignoremissing, True)
r = self._root + "/" + d
folded = d + "/" + util.fspath(f, r)
else:
folded = util.fspath(normed, self._root)
storemap[normed] = folded
return folded
def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
normed = util.normcase(path)
folded = self._map.filefoldmap.get(normed, None)
if folded is None:
if isknown:
folded = path
else:
folded = self._discoverpath(
path, normed, ignoremissing, exists, self._map.filefoldmap
)
return folded
def _normalize(self, path, isknown, ignoremissing=False, exists=None):
normed = util.normcase(path)
folded = self._map.filefoldmap.get(normed, None)
if folded is None:
folded = self._map.dirfoldmap.get(normed, None)
if folded is None:
if isknown:
folded = path
else:
# store discovered result in dirfoldmap so that future
# normalizefile calls don't start matching directories
folded = self._discoverpath(
path, normed, ignoremissing, exists, self._map.dirfoldmap
)
return folded
def normalize(self, path, isknown=False, ignoremissing=False):
"""
normalize the case of a pathname when on a casefolding filesystem
isknown specifies whether the filename came from walking the
disk, to avoid extra filesystem access.
If ignoremissing is True, missing path are returned
unchanged. Otherwise, we try harder to normalize possibly
existing path components.
The normalized case is determined based on the following precedence:
- version of name already stored in the dirstate
- version of name stored on disk
- version provided via command arguments
"""
if self._checkcase:
return self._normalize(path, isknown, ignoremissing)
return path
def clear(self):
self._map.clear()
self._lastnormaltime = 0
self._updatedfiles.clear()
2007-08-06 08:00:10 +04:00
self._dirty = True
def rebuild(self, parent, allfiles, changedfiles=None, exact=False):
# If exact is True, then assume only changedfiles can be changed, and
# other files cannot be possibly changed. This is used by "absorb" as
# a hint to perform a fast path for fsmonitor and sparse.
if changedfiles is None:
if exact:
raise error.ProgrammingError("exact requires changedfiles")
# Rebuild entire dirstate
changedfiles = allfiles
lastnormaltime = self._lastnormaltime
self.clear()
self._lastnormaltime = lastnormaltime
if self._origpl is None:
self._origpl = self._pl
self._map.setparents(parent, nullid)
for f in changedfiles:
if f in allfiles:
self.normallookup(f)
else:
self.untrack(f)
self._dirty = True
def identity(self):
"""Return identity of dirstate itself to detect changing in storage
If identity of previous dirstate is equal to this, writing
changes based on the former dirstate out can keep consistency.
"""
return self._map.identity
def write(self, tr):
2007-06-18 22:24:34 +04:00
if not self._dirty:
return
filename = self._filename
if tr:
# 'dirstate.write()' is not only for writing in-memory
# changes out, but also for dropping ambiguous timestamp.
# delayed writing re-raise "ambiguous timestamp issue".
# See also the wiki page below for detail:
# https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
# emulate dropping timestamp in 'parsers.pack_dirstate'
now = _getfsnow(self._opener)
self._map.clearambiguoustimes(self._updatedfiles, now)
# emulate that all 'dirstate.normal' results are written out
self._lastnormaltime = 0
self._updatedfiles.clear()
# delay writing in-memory changes out
tr.addfilegenerator(
"dirstate", (self._filename,), self._writedirstate, location="local"
)
return
st = self._opener(filename, "w", atomictemp=True, checkambig=True)
self._writedirstate(st)
@util.propertycache
def checkoutidentifier(self):
try:
return self._opener.read("checkoutidentifier")
except IOError as e:
if e.errno != errno.ENOENT:
raise
return ""
def addparentchangecallback(self, category, callback):
"""add a callback to be called when the wd parents are changed
Callback will be called with the following arguments:
dirstate, (oldp1, oldp2), (newp1, newp2)
Category is a unique identifier to allow overwriting an old callback
with a newer callback.
"""
self._plchangecallbacks[category] = callback
def _writedirstate(self, st):
# notify callbacks about parents change
if self._origpl is not None and self._origpl != self._pl:
for c, callback in sorted(self._plchangecallbacks.iteritems()):
callback(self, self._origpl, self._pl)
# if the first parent has changed then consider this a new checkout
if self._origpl[0] != self._pl[0]:
with self._opener("checkoutidentifier", "w", atomictemp=True) as f:
f.write(util.makerandomidentifier())
util.clearcachedproperty(self, "checkoutidentifier")
self._origpl = None
# use the modification time of the newly created temporary file as the
# filesystem's notion of 'now'
now = util.fstat(st).st_mtime & _rangemask
# enough 'delaywrite' prevents 'pack_dirstate' from dropping
# timestamp of each entries in dirstate, because of 'now > mtime'
delaywrite = self._ui.configint("debug", "dirstate.delaywrite")
if delaywrite > 0:
# do we have any files to delay for?
for f, e in self._map.iteritems():
if e[0] == "n" and e[3] == now:
import time # to avoid useless import
# rather than sleep n seconds, sleep until the next
# multiple of n seconds
clock = time.time()
start = int(clock) - (int(clock) % delaywrite)
end = start + delaywrite
time.sleep(end - clock)
now = end # trust our estimate that the end is near now
break
self._map.write(st, now)
self._lastnormaltime = 0
self._dirty = False
def _dirignore(self, f):
if f == "":
return False
dirstate: stop testing directories as files in ignore handling Summary: This is subtle. `.hgignore`'s current behavior: if `^foo$` rule exists, then directory `foo/` is ignored (ex. 'foo/bar' should be ignored). However, that imposes problems for the sparse ignore matcher, which is the "negate" of the "include" matcher. A user can write `[include]` glob patterns like: a*{b*,c*/d*}/e* The ignore matcher will be the negate of the above patterns. Then because `a1b2` does not match `a*{b*,c*/d*}/e`, the negate matcher returns "True", and the ignore matcher will ignore the directory. So even if file `a1b2/e3` should be selected, the parent directory being ignored cause the file to be ignored. That is clearly incorrect for sparse's usecase. I think the issue is fundementally a layer violation - it's the *matcher*'s responsibility to check whether one of the parent directory is matched (or ignored), not the directory walker's responsibility. This diff fixes the walker so it uses the visitdir interface, and moves back the directory check to hgignore matcher to maintain compatibility. For three matchers involved in ignore handling: - hgignore matcher: updated to do the recursive directory handling on its own - gitignore matcher: work out of box. already consider parent directories! - sparse matcher: want the new behavior `test-sparse-issues.t` is now green. With this change, the `forceincludematcher` subdir hack used in sparse is no longer necessary. Therefore removed. Besides, all ignore matchers can handle "visitdir" correctly. That is, if `visitdir('x')` returns `'all'`, then `visitdir('x/y')` will also return `'all'`. Therefore the parent directory logic in `dirstate.dirignore` becomes unnecessary and dropped. Reviewed By: DurhamG Differential Revision: D10861612 fbshipit-source-id: aa0c181ae64b361b85f08b8fecfdfe6331e9a4c2
2018-12-13 09:42:17 +03:00
visitdir = self._ignore.visitdir
if visitdir(f) == "all":
return True
return False
def _ignorefiles(self):
files = []
files += self._globalignorefiles()
return files
def _globalignorefiles(self):
files = []
for name, path in self._ui.configitems("ui"):
# A path could have an optional prefix (ex. "git:") to select file
# format
if name == "ignore" or name.startswith("ignore."):
# we need to use os.path.join here rather than self._join
# because path is arbitrary and user-specified
fullpath = os.path.join(self._rootdir, util.expandpath(path))
files.append(fullpath)
return files
def _walkexplicit(self, match):
"""Get stat data about the files explicitly specified by match.
Return a triple (results, dirsfound, dirsnotfound).
- results is a mapping from filename to stat result.
- dirsfound is a list of files found to be directories.
- dirsnotfound is a list of files that the dirstate thinks are
directories and that were not found."""
def badtype(mode):
kind = _("unknown")
2010-01-25 09:05:27 +03:00
if stat.S_ISCHR(mode):
kind = _("character device")
2010-01-25 09:05:27 +03:00
elif stat.S_ISBLK(mode):
kind = _("block device")
2010-01-25 09:05:27 +03:00
elif stat.S_ISFIFO(mode):
kind = _("fifo")
2010-01-25 09:05:27 +03:00
elif stat.S_ISSOCK(mode):
kind = _("socket")
2010-01-25 09:05:27 +03:00
elif stat.S_ISDIR(mode):
kind = _("directory")
return _("unsupported file type (type is %s)") % kind
matchedir = match.explicitdir
2009-06-01 02:54:18 +04:00
badfn = match.bad
dmap = self._map
lstat = os.lstat
getkind = stat.S_IFMT
dirkind = stat.S_IFDIR
regkind = stat.S_IFREG
lnkkind = stat.S_IFLNK
join = self._join
dirsfound = []
foundadd = dirsfound.append
dirsnotfound = []
notfoundadd = dirsnotfound.append
if not match.isexact() and self._checkcase:
normalize = self._normalize
else:
normalize = None
files = sorted(match.files())
if not files or "" in files:
files = [""]
# constructing the foldmap is expensive, so don't do it for the
# common case where files is ['']
normalize = None
results = {".hg": None}
for ff in files:
if normalize:
nf = normalize(ff, False, True)
else:
nf = ff
if nf in results:
continue
try:
st = lstat(join(nf))
kind = getkind(st.st_mode)
if kind == dirkind:
if nf in dmap:
# file replaced by dir on disk but still in dirstate
results[nf] = None
if matchedir:
matchedir(nf)
foundadd((nf, ff))
2010-09-24 21:46:54 +04:00
elif kind == regkind or kind == lnkkind:
results[nf] = st
else:
badfn(ff, badtype(kind))
if nf in dmap:
results[nf] = None
except OSError as inst: # nf not found on disk - it is dirstate only
if nf in dmap: # does it exactly match a missing file?
2009-06-01 02:54:18 +04:00
results[nf] = None
else: # does it match a missing directory?
if self._map.hasdir(nf):
if matchedir:
matchedir(nf)
notfoundadd(nf)
else:
badfn(ff, encoding.strtolocal(inst.strerror))
# Case insensitive filesystems cannot rely on lstat() failing to detect
# a case-only rename. Prune the stat object for any file that does not
# match the case in the filesystem, if there are multiple files that
# normalize to the same path.
if match.isexact() and self._checkcase:
normed = {}
for f, st in results.iteritems():
if st is None:
continue
nc = util.normcase(f)
paths = normed.get(nc)
if paths is None:
paths = set()
normed[nc] = paths
paths.add(f)
for norm, paths in normed.iteritems():
if len(paths) > 1:
for path in paths:
folded = self._discoverpath(
path, norm, True, None, self._map.dirfoldmap
)
if path != folded:
results[path] = None
return results, dirsfound, dirsnotfound
@util.timefunction("dirstatewalk", 0, "_ui")
def walk(self, match, unknown, ignored, full=True):
"""
Walk recursively through the directory tree, finding all files
matched by match.
If full is False, maybe skip some known-clean files.
Return a dict mapping filename to stat-like object (either
mercurial.osutil.stat instance or return value of os.stat()).
"""
# full is a flag that extensions that hook into walk can use -- this
# implementation doesn't use it at all. This satisfies the contract
# because we only guarantee a "maybe".
if ignored:
ignore = util.never
dirignore = util.never
elif unknown:
ignore = self._ignore
dirignore = self._dirignore
else:
# if not unknown and not ignored, drop dir recursion and step 2
ignore = util.always
dirignore = util.always
matchfn = match.matchfn
matchalways = match.always()
matchtdir = match.traversedir
dmap = self._map
listdir = util.listdir
lstat = os.lstat
dirkind = stat.S_IFDIR
regkind = stat.S_IFREG
lnkkind = stat.S_IFLNK
join = self._join
exact = skipstep3 = False
if match.isexact(): # match.exact
exact = True
dirignore = util.always # skip step 2
elif match.prefix(): # match.match, no patterns
skipstep3 = True
if not exact and self._checkcase:
normalize = self._normalize
normalizefile = self._normalizefile
skipstep3 = False
else:
normalize = self._normalize
normalizefile = None
# step 1: find all explicit files
results, work, dirsnotfound = self._walkexplicit(match)
skipstep3 = skipstep3 and not (work or dirsnotfound)
work = [d for d in work if not dirignore(d[0])]
# step 2: visit subdirectories
def traverse(work, alreadynormed):
wadd = work.append
while work:
nd = work.pop()
if not match.visitdir(nd):
continue
skip = None
if nd != "":
skip = ".hg"
try:
entries = listdir(join(nd), stat=True, skip=skip)
except OSError as inst:
if inst.errno in (errno.EACCES, errno.ENOENT):
match.bad(self.pathto(nd), encoding.strtolocal(inst.strerror))
continue
raise
for f, kind, st in entries:
if normalizefile:
# even though f might be a directory, we're only
# interested in comparing it to files currently in the
# dmap -- therefore normalizefile is enough
nf = normalizefile(nd and (nd + "/" + f) or f, True, True)
else:
nf = nd and (nd + "/" + f) or f
if nf not in results:
if kind == dirkind:
dirstate: stop testing directories as files in ignore handling Summary: This is subtle. `.hgignore`'s current behavior: if `^foo$` rule exists, then directory `foo/` is ignored (ex. 'foo/bar' should be ignored). However, that imposes problems for the sparse ignore matcher, which is the "negate" of the "include" matcher. A user can write `[include]` glob patterns like: a*{b*,c*/d*}/e* The ignore matcher will be the negate of the above patterns. Then because `a1b2` does not match `a*{b*,c*/d*}/e`, the negate matcher returns "True", and the ignore matcher will ignore the directory. So even if file `a1b2/e3` should be selected, the parent directory being ignored cause the file to be ignored. That is clearly incorrect for sparse's usecase. I think the issue is fundementally a layer violation - it's the *matcher*'s responsibility to check whether one of the parent directory is matched (or ignored), not the directory walker's responsibility. This diff fixes the walker so it uses the visitdir interface, and moves back the directory check to hgignore matcher to maintain compatibility. For three matchers involved in ignore handling: - hgignore matcher: updated to do the recursive directory handling on its own - gitignore matcher: work out of box. already consider parent directories! - sparse matcher: want the new behavior `test-sparse-issues.t` is now green. With this change, the `forceincludematcher` subdir hack used in sparse is no longer necessary. Therefore removed. Besides, all ignore matchers can handle "visitdir" correctly. That is, if `visitdir('x')` returns `'all'`, then `visitdir('x/y')` will also return `'all'`. Therefore the parent directory logic in `dirstate.dirignore` becomes unnecessary and dropped. Reviewed By: DurhamG Differential Revision: D10861612 fbshipit-source-id: aa0c181ae64b361b85f08b8fecfdfe6331e9a4c2
2018-12-13 09:42:17 +03:00
if not dirignore(nf):
if matchtdir:
matchtdir(nf)
wadd(nf)
if nf in dmap and (matchalways or matchfn(nf)):
results[nf] = None
elif kind == regkind or kind == lnkkind:
if nf in dmap:
if matchalways or matchfn(nf):
results[nf] = st
elif (matchalways or matchfn(nf)) and not ignore(nf):
# unknown file -- normalize if necessary
if not alreadynormed:
nf = normalize(nf, False, True)
results[nf] = st
elif nf in dmap and (matchalways or matchfn(nf)):
results[nf] = None
for nd, d in work:
# alreadynormed means that processwork doesn't have to do any
# expensive directory normalization
alreadynormed = not normalize or nd == d
traverse([d], alreadynormed)
del results[".hg"]
# step 3: visit remaining files from dmap
if not skipstep3 and not exact:
# If a dmap file is not in results yet, it was either
# a) not matching matchfn b) ignored, c) missing, or d) under a
# symlink directory.
if not results and matchalways:
visit = [f for f in dmap]
else:
visit = [f for f in dmap if f not in results and matchfn(f)]
visit.sort()
if unknown:
# unknown == True means we walked all dirs under the roots
# that wasn't ignored, and everything that matched was stat'ed
# and is already in results.
# The rest must thus be ignored or under a symlink.
pathauditor: disable cache of audited paths by default (issue5628) The initial attempt was to discard cache when appropriate, but it appears to be error prone. We had to carefully inspect all places where audit() is called e.g. without actually updating filesystem, before removing files and directories, etc. So, this patch disables the cache of audited paths by default, and enables it only for the following cases: - short-lived auditor objects - repo.vfs, repo.svfs, and repo.cachevfs, which are managed directories and considered sort of append-only (a file/directory would never be replaced with a symlink) There would be more cacheable vfs objects (e.g. mq.queue.opener), but I decided not to inspect all of them in this patch. We can make them cached later. Benchmark result: - using old clone of http://selenic.com/repo/linux-2.6/ (38319 files) - on tmpfs - run HGRCPATH=/dev/null hg up -q --time tip && hg up -q null - try 4 times and take the last three results original: real 7.480 secs (user 1.140+22.760 sys 0.150+1.690) real 8.010 secs (user 1.070+22.280 sys 0.170+2.120) real 7.470 secs (user 1.120+22.390 sys 0.120+1.910) clearcache (the other series): real 7.680 secs (user 1.120+23.420 sys 0.140+1.970) real 7.670 secs (user 1.110+23.620 sys 0.130+1.810) real 7.740 secs (user 1.090+23.510 sys 0.160+1.940) enable cache only for vfs and svfs (this series): real 8.730 secs (user 1.500+25.190 sys 0.260+2.260) real 8.750 secs (user 1.490+25.170 sys 0.250+2.340) real 9.010 secs (user 1.680+25.340 sys 0.280+2.540) remove cache function at all (for reference): real 9.620 secs (user 1.440+27.120 sys 0.250+2.980) real 9.420 secs (user 1.400+26.940 sys 0.320+3.130) real 9.760 secs (user 1.530+27.270 sys 0.250+2.970)
2017-07-26 16:10:15 +03:00
audit_path = pathutil.pathauditor(self._root, cached=True)
for nf in iter(visit):
# If a stat for the same file was already added with a
# different case, don't add one for this, since that would
# make it appear as if the file exists under both names
# on disk.
if normalizefile and normalizefile(nf, True, True) in results:
results[nf] = None
# Report ignored items in the dmap as long as they are not
# under a symlink directory.
elif audit_path.check(nf):
try:
results[nf] = lstat(join(nf))
# file was just ignored, no links, and exists
except OSError:
# file doesn't exist
results[nf] = None
else:
# It's either missing or under a symlink directory
# which we in this case report as missing
results[nf] = None
else:
# We may not have walked the full directory tree above,
# so stat and check everything we missed.
iv = iter(visit)
for st in util.statfiles([join(i) for i in visit]):
results[next(iv)] = st
return results
@perftrace.tracefunc("Status")
def status(self, match, ignored, clean, unknown):
"""Determine the status of the working copy relative to the
dirstate and return a pair of (unsure, status), where status is of type
scmutil.status and:
unsure:
files that might have been modified since the dirstate was
written, but need to be read to be sure (size is the same
but mtime differs)
status.modified:
files that have definitely been modified since the dirstate
was written (different size or mode)
status.clean:
files that have definitely not been modified since the
dirstate was written
"""
2008-06-26 23:35:50 +04:00
listignored, listclean, listunknown = ignored, clean, unknown
lookup, modified, added, unknown, ignored = [], [], [], [], []
removed, deleted, clean = [], [], []
dmap = self._map
dmap.preload()
dget = dmap.__getitem__
ladd = lookup.append # aka "unsure"
madd = modified.append
aadd = added.append
uadd = unknown.append
iadd = ignored.append
radd = removed.append
dadd = deleted.append
cadd = clean.append
mexact = match.exact
dirstate: stop testing directories as files in ignore handling Summary: This is subtle. `.hgignore`'s current behavior: if `^foo$` rule exists, then directory `foo/` is ignored (ex. 'foo/bar' should be ignored). However, that imposes problems for the sparse ignore matcher, which is the "negate" of the "include" matcher. A user can write `[include]` glob patterns like: a*{b*,c*/d*}/e* The ignore matcher will be the negate of the above patterns. Then because `a1b2` does not match `a*{b*,c*/d*}/e`, the negate matcher returns "True", and the ignore matcher will ignore the directory. So even if file `a1b2/e3` should be selected, the parent directory being ignored cause the file to be ignored. That is clearly incorrect for sparse's usecase. I think the issue is fundementally a layer violation - it's the *matcher*'s responsibility to check whether one of the parent directory is matched (or ignored), not the directory walker's responsibility. This diff fixes the walker so it uses the visitdir interface, and moves back the directory check to hgignore matcher to maintain compatibility. For three matchers involved in ignore handling: - hgignore matcher: updated to do the recursive directory handling on its own - gitignore matcher: work out of box. already consider parent directories! - sparse matcher: want the new behavior `test-sparse-issues.t` is now green. With this change, the `forceincludematcher` subdir hack used in sparse is no longer necessary. Therefore removed. Besides, all ignore matchers can handle "visitdir" correctly. That is, if `visitdir('x')` returns `'all'`, then `visitdir('x/y')` will also return `'all'`. Therefore the parent directory logic in `dirstate.dirignore` becomes unnecessary and dropped. Reviewed By: DurhamG Differential Revision: D10861612 fbshipit-source-id: aa0c181ae64b361b85f08b8fecfdfe6331e9a4c2
2018-12-13 09:42:17 +03:00
ignore = self._ignore
checkexec = self._checkexec
copymap = self._map.copymap
lastnormaltime = self._lastnormaltime
cleanmarked = False
if self._istreestate:
markclean = self._map.clearneedcheck
else:
markclean = lambda path: False
# We have seen some rare issues that a few "M" or "R" files show up
# while the files are expected to be clean. Log the reason of first few
# "M" files.
mtolog = ltolog = self._ui.configint("experimental", "samplestatus")
# We need to do full walks when either
# - we're listing all clean files, or
# - match.traversedir does something, because match.traversedir should
# be called for every dir in the working dir
full = listclean or match.traversedir is not None
for fn, st in self.walk(match, listunknown, listignored, full=full).iteritems():
try:
t = dget(fn)
# This "?" state is only tracked by treestate, emulate the old
# behavior - KeyError.
if t[0] == "?":
raise KeyError
except KeyError:
dirstate: stop testing directories as files in ignore handling Summary: This is subtle. `.hgignore`'s current behavior: if `^foo$` rule exists, then directory `foo/` is ignored (ex. 'foo/bar' should be ignored). However, that imposes problems for the sparse ignore matcher, which is the "negate" of the "include" matcher. A user can write `[include]` glob patterns like: a*{b*,c*/d*}/e* The ignore matcher will be the negate of the above patterns. Then because `a1b2` does not match `a*{b*,c*/d*}/e`, the negate matcher returns "True", and the ignore matcher will ignore the directory. So even if file `a1b2/e3` should be selected, the parent directory being ignored cause the file to be ignored. That is clearly incorrect for sparse's usecase. I think the issue is fundementally a layer violation - it's the *matcher*'s responsibility to check whether one of the parent directory is matched (or ignored), not the directory walker's responsibility. This diff fixes the walker so it uses the visitdir interface, and moves back the directory check to hgignore matcher to maintain compatibility. For three matchers involved in ignore handling: - hgignore matcher: updated to do the recursive directory handling on its own - gitignore matcher: work out of box. already consider parent directories! - sparse matcher: want the new behavior `test-sparse-issues.t` is now green. With this change, the `forceincludematcher` subdir hack used in sparse is no longer necessary. Therefore removed. Besides, all ignore matchers can handle "visitdir" correctly. That is, if `visitdir('x')` returns `'all'`, then `visitdir('x/y')` will also return `'all'`. Therefore the parent directory logic in `dirstate.dirignore` becomes unnecessary and dropped. Reviewed By: DurhamG Differential Revision: D10861612 fbshipit-source-id: aa0c181ae64b361b85f08b8fecfdfe6331e9a4c2
2018-12-13 09:42:17 +03:00
if (listignored or mexact(fn)) and ignore(fn):
2008-06-26 23:35:50 +04:00
if listignored:
iadd(fn)
else:
uadd(fn)
continue
2008-05-12 20:37:08 +04:00
# This is equivalent to 'state, mode, size, time = dmap[fn]' but not
# written like that for performance reasons. dmap[fn] is not a
# Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
# opcode has fast paths when the value to be unpacked is a tuple or
# a list, but falls back to creating a full-fledged iterator in
# general. That is much slower than simply accessing and storing the
# tuple members one by one.
state = t[0]
mode = t[1]
size = t[2]
time = t[3]
2008-05-12 20:37:08 +04:00
if not st and state in "nma":
dadd(fn)
elif state == "n":
if (
size >= 0
and (
(size != st.st_size and size != st.st_size & _rangemask)
or ((mode ^ st.st_mode) & 0o100 and checkexec)
)
or size == -2 # other parent
or fn in copymap
):
madd(fn)
if mtolog > 0:
mtolog -= 1
reasons = []
if size == -2:
reasons.append("exists in p2")
elif size != st.st_size:
reasons.append(
"size changed (%s -> %s)" % (size, st.st_size)
)
# See T39234759. Sometimes watchman returns 0 size
# (st.st_size) and we suspect it's incorrect.
# Do a double check with os.stat and log it.
if st.st_size == 0:
path = self._join(fn)
try:
reasons.append(
"os.stat size = %s" % os.stat(path).st_size
)
except Exception as ex:
reasons.append("os.stat failed (%s)" % ex)
if mode != st.st_mode:
reasons.append(
"mode changed (%s -> %s)" % (mode, st.st_mode)
)
if fn in copymap:
reasons.append("has copy information")
self._ui.log("status", "M %s: %s" % (fn, ", ".join(reasons)))
elif time != st.st_mtime and time != st.st_mtime & _rangemask:
if ltolog:
ltolog -= 1
reason = "mtime changed (%s -> %s)" % (time, st.st_mtime)
self._ui.log("status", "L %s: %s" % (fn, reason))
ladd(fn)
elif st.st_mtime == lastnormaltime:
# fn may have just been marked as normal and it may have
# changed in the same second without changing its size.
# This can happen if we quickly do multiple commits.
# Force lookup, so we don't miss such a racy file change.
if ltolog:
ltolog -= 1
reason = "mtime untrusted (%s)" % (st.st_mtime)
self._ui.log("status", "L %s: %s" % (fn, reason))
dirstate: avoid a race with multiple commits in the same process (issue2264, issue2516) The race happens when two commits in a row change the same file without changing its size, *if* those two commits happen in the same second in the same process while holding the same repo lock. For example: commit 1: M a M b commit 2: # same process, same second, same repo lock M b # modify b without changing its size M c This first manifested in transplant, which is the most common way to do multiple commits in the same process. But it can manifest in any script or extension that does multiple commits under the same repo lock. (Thus, the test script tests both transplant and a custom script.) The problem was that dirstate.status() failed to notice the change to b when localrepo is about to do the second commit, meaning that change gets left in the working directory. In the context of transplant, that means either a crash ("RuntimeError: nothing committed after transplant") or a silently inaccurate transplant, depending on whether any other files were modified by the second transplanted changeset. The fix is to make status() work a little harder when we have previously marked files as clean (state 'normal') in the same process. Specifically, dirstate.normal() adds files to self._lastnormal, and other state-changing methods remove them. Then dirstate.status() puts any files in self._lastnormal into state 'lookup', which will make localrepository.status() read file contents to see if it has really changed. So we pay a small performance penalty for the second (and subsequent) commits in the same process, without affecting the common case. Anything that does lots of status updates and checks in the same process could suffer a performance hit. Incidentally, there is a simpler fix: call dirstate.normallookup() on every file updated by commit() at the end of the commit. The trouble with that solution is that it imposes a performance penalty on the common case: it means the next status-dependent hg command after every "hg commit" will be a little bit slower. The patch here is more complex, but only affects performance for the uncommon case.
2011-03-21 00:41:09 +03:00
ladd(fn)
else:
cleanmarked |= markclean(fn)
if listclean:
cadd(fn)
elif state == "m":
madd(fn)
if mtolog > 0:
mtolog -= 1
self._ui.log("status", "M %s: state is 'm' (merge)" % fn)
elif state == "a":
aadd(fn)
elif state == "r":
radd(fn)
if cleanmarked:
self._dirty = True
perftrace.tracevalue("A/M/R Files", len(modified) + len(added) + len(removed))
if len(unknown) > 0:
perftrace.tracevalue("Unknown Files", len(unknown))
if len(ignored) > 0:
perftrace.tracevalue("Ignored Files", len(ignored))
return (
lookup,
scmutil.status(modified, added, removed, deleted, unknown, ignored, clean),
)
def matches(self, match):
"""
return files in the dirstate (in whatever state) filtered by match
"""
dmap = self._map
if match.always():
return dmap.keys()
files = match.files()
if match.isexact():
# fast path -- filter the other way around, since typically files is
# much smaller than dmap
return [f for f in files if f in dmap]
if match.prefix():
if self._istreestate:
# treestate has a fast path to get files inside a subdirectory.
# files are prefixes
result = set()
fastpathvalid = True
for prefix in files:
if prefix in dmap:
# prefix is a file
result.add(prefix)
elif dmap.hastrackeddir(prefix + "/"):
# prefix is a directory
result.update(dmap.keys(prefix=prefix + "/"))
else:
# unknown pattern (ex. "."), fast path is invalid
fastpathvalid = False
break
if fastpathvalid:
return sorted(result)
else:
# fast path -- all the values are known to be files, so just
# return that
if all(fn in dmap for fn in files):
return list(files)
return [f for f in dmap if match(f)]
def _actualfilename(self, tr):
if tr:
return self._pendingfilename
else:
return self._filename
def savebackup(self, tr, backupname):
"""Save current dirstate into backup file"""
filename = self._actualfilename(tr)
assert backupname != filename
# use '_writedirstate' instead of 'write' to write changes certainly,
# because the latter omits writing out if transaction is running.
# output file will be used to create backup of dirstate at this point.
if self._dirty or not self._opener.exists(filename):
self._writedirstate(
self._opener(filename, "w", atomictemp=True, checkambig=True)
)
if tr:
# ensure that subsequent tr.writepending returns True for
# changes written out above, even if dirstate is never
# changed after this
tr.addfilegenerator(
"dirstate", (self._filename,), self._writedirstate, location="local"
)
# ensure that pending file written above is unlinked at
# failure, even if tr.writepending isn't invoked until the
# end of this transaction
tr.registertmp(filename, location="local")
2017-03-21 16:50:28 +03:00
self._opener.tryunlink(backupname)
# hardlink backup is okay because _writedirstate is always called
# with an "atomictemp=True" file.
util.copyfile(
self._opener.join(filename), self._opener.join(backupname), hardlink=True
)
def restorebackup(self, tr, backupname):
"""Restore dirstate by backup file"""
# this "invalidate()" prevents "wlock.release()" from writing
# changes of dirstate out after restoring from backup file
self.invalidate()
filename = self._actualfilename(tr)
o = self._opener
if util.samefile(o.join(backupname), o.join(filename)):
o.unlink(backupname)
else:
o.rename(backupname, filename, checkambig=True)
def clearbackup(self, tr, backupname):
"""Clear backup file"""
self._opener.unlink(backupname)
def loginfo(self, ui, prefix):
try:
parents = [hex(p) if p != nullid else "" for p in self._pl]
except Exception:
# The dirstate may be too corrupt to read. We don't want to fail
# just because of logging, so log the parents as unknown.
parents = ("unknown", "unknown")
data = {
prefix + "checkoutidentifier": self.checkoutidentifier,
prefix + "wdirparent1": parents[0],
prefix + "wdirparent2": parents[1],
}
ui.log("dirstate_info", **data)
class dirstatemap(object):
"""Map encapsulating the dirstate's contents.
The dirstate contains the following state:
- `identity` is the identity of the dirstate file, which can be used to
detect when changes have occurred to the dirstate file.
- `parents` is a pair containing the parents of the working copy. The
parents are updated by calling `setparents`.
- the state map maps filenames to tuples of (state, mode, size, mtime),
where state is a single character representing 'normal', 'added',
'removed', or 'merged'. It is read by treating the dirstate as a
dict. File state is updated by calling the `addfile`, `removefile` and
`untrackfile` methods.
- `copymap` maps destination filenames to their source filename.
The dirstate also provides the following views onto the state:
- `nonnormalset` is a set of the filenames that have state other
than 'normal', or are normal but have an mtime of -1 ('normallookup').
- `otherparentset` is a set of the filenames that are marked as coming
from the second parent when the dirstate is currently being merged.
- `filefoldmap` is a dict mapping normalized filenames to the denormalized
form that they appear as in the dirstate.
- `dirfoldmap` is a dict mapping normalized directory names to the
denormalized form that they appear as in the dirstate.
"""
def __init__(self, ui, opener, root):
self._ui = ui
self._opener = opener
self._root = root
self._filename = "dirstate"
self._parents = None
self._dirtyparents = False
# for consistent view between _pl() and _read() invocations
self._pendingmode = None
@propertycache
def _map(self):
self._map = {}
self.read()
return self._map
@propertycache
def copymap(self):
self.copymap = {}
self._map
return self.copymap
def clear(self):
self._map.clear()
self.copymap.clear()
self.setparents(nullid, nullid)
util.clearcachedproperty(self, "_dirs")
util.clearcachedproperty(self, "_alldirs")
util.clearcachedproperty(self, "filefoldmap")
util.clearcachedproperty(self, "dirfoldmap")
util.clearcachedproperty(self, "nonnormalset")
util.clearcachedproperty(self, "otherparentset")
def iteritems(self):
return self._map.iteritems()
def __len__(self):
return len(self._map)
def __iter__(self):
return iter(self._map)
def get(self, key, default=None):
return self._map.get(key, default)
def __contains__(self, key):
return key in self._map
def __getitem__(self, key):
return self._map[key]
def keys(self):
return self._map.keys()
def preload(self):
"""Loads the underlying data, if it's not already loaded"""
self._map
def addfile(self, f, oldstate, state, mode, size, mtime):
"""Add a tracked file to the dirstate."""
if oldstate in "?r" and "_dirs" in self.__dict__:
self._dirs.addpath(f)
if oldstate == "?" and "_alldirs" in self.__dict__:
self._alldirs.addpath(f)
self._insert_tuple(f, state, mode, size, mtime)
if state != "n" or mtime == -1:
self.nonnormalset.add(f)
if size == -2:
self.otherparentset.add(f)
def removefile(self, f, oldstate, size):
"""
Mark a file as removed in the dirstate.
The `size` parameter is used to store sentinel values that indicate
the file's previous state. In the future, we should refactor this
to be more explicit about what that state is.
"""
if oldstate not in "?r" and "_dirs" in self.__dict__:
self._dirs.delpath(f)
if oldstate == "?" and "_alldirs" in self.__dict__:
self._alldirs.addpath(f)
if "filefoldmap" in self.__dict__:
normed = util.normcase(f)
self.filefoldmap.pop(normed, None)
self._insert_tuple(f, "r", 0, size, 0)
self.nonnormalset.add(f)
def deletefile(self, f, oldstat):
"""
Removes a file from the dirstate entirely, implying it doesn't even
exist on disk anymore and may not be untracked.
"""
# In the default dirstate implementation, deletefile is the same as
# untrackfile.
self.untrackfile(f, oldstat)
def untrackfile(self, f, oldstate):
"""
Remove a file from the dirstate, leaving it untracked. Returns True if
the file was previously recorded.
"""
exists = self._map.pop(f, None) is not None
if exists:
if oldstate != "r" and "_dirs" in self.__dict__:
self._dirs.delpath(f)
if "_alldirs" in self.__dict__:
self._alldirs.delpath(f)
if "filefoldmap" in self.__dict__:
normed = util.normcase(f)
self.filefoldmap.pop(normed, None)
self.nonnormalset.discard(f)
return exists
def clearambiguoustimes(self, files, now):
for f in files:
e = self.get(f)
if e is not None and e[0] == "n" and e[3] == now:
self._insert_tuple(f, e[0], e[1], e[2], -1)
self.nonnormalset.add(f)
def _insert_tuple(self, f, state, mode, size, mtime):
self._map[f] = dirstatetuple(state, mode, size, mtime)
def nonnormalentries(self):
"""Compute the nonnormal dirstate entries from the dmap"""
try:
return parsers.nonnormalotherparententries(self._map)
except AttributeError:
nonnorm = set()
otherparent = set()
for fname, e in self._map.iteritems():
if e[0] != "n" or e[3] == -1:
nonnorm.add(fname)
if e[0] == "n" and e[2] == -2:
otherparent.add(fname)
return nonnorm, otherparent
@propertycache
def filefoldmap(self):
"""Returns a dictionary mapping normalized case paths to their
non-normalized versions.
"""
try:
makefilefoldmap = parsers.make_file_foldmap
except AttributeError:
pass
else:
return makefilefoldmap(self._map, util.normcasespec, util.normcasefallback)
f = {}
normcase = util.normcase
for name, s in self._map.iteritems():
if s[0] != "r":
f[normcase(name)] = name
f["."] = "." # prevents useless util.fspath() invocation
return f
def hastrackeddir(self, d):
"""
Returns True if the dirstate contains a tracked (not removed) file
in this directory.
"""
return d in self._dirs
def hasdir(self, d):
"""
Returns True if the dirstate contains a file (tracked or removed)
in this directory.
"""
return d in self._alldirs
@propertycache
def _dirs(self):
return util.dirs(self._map, "r")
@propertycache
def _alldirs(self):
return util.dirs(self._map)
def _opendirstatefile(self):
fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
if self._pendingmode is not None and self._pendingmode != mode:
fp.close()
raise error.Abort(_("working directory state may be " "changed parallelly"))
self._pendingmode = mode
return fp
def parents(self):
if not self._parents:
try:
fp = self._opendirstatefile()
st = fp.read(40)
fp.close()
except IOError as err:
if err.errno != errno.ENOENT:
raise
# File doesn't exist, so the current state is empty
st = ""
l = len(st)
if l == 40:
self._parents = st[:20], st[20:40]
elif l == 0:
self._parents = [nullid, nullid]
else:
raise error.Abort(_("working directory state appears " "damaged!"))
return self._parents
def setparents(self, p1, p2):
self._parents = (p1, p2)
self._dirtyparents = True
def read(self):
# ignore HG_PENDING because identity is used only for writing
self.identity = util.filestat.frompath(self._opener.join(self._filename))
try:
fp = self._opendirstatefile()
try:
st = fp.read()
finally:
fp.close()
except IOError as err:
if err.errno != errno.ENOENT:
raise
return
if not st:
return
if util.safehasattr(parsers, "dict_new_presized"):
# Make an estimate of the number of files in the dirstate based on
# its size. From a linear regression on a set of real-world repos,
# all over 10,000 files, the size of a dirstate entry is 85
# bytes. The cost of resizing is significantly higher than the cost
# of filling in a larger presized dict, so subtract 20% from the
# size.
#
# This heuristic is imperfect in many ways, so in a future dirstate
# format update it makes sense to just record the number of entries
# on write.
self._map = parsers.dict_new_presized(len(st) / 71)
# Python's garbage collector triggers a GC each time a certain number
# of container objects (the number being defined by
# gc.get_threshold()) are allocated. parse_dirstate creates a tuple
# for each file in the dirstate. The C version then immediately marks
# them as not to be tracked by the collector. However, this has no
# effect on when GCs are triggered, only on what objects the GC looks
# into. This means that O(number of files) GCs are unavoidable.
# Depending on when in the process's lifetime the dirstate is parsed,
# this can get very expensive. As a workaround, disable GC while
# parsing the dirstate.
#
# (we cannot decorate the function directly since it is in a C module)
parse_dirstate = util.nogc(parsers.parse_dirstate)
p = parse_dirstate(self._map, self.copymap, st)
if not self._dirtyparents:
self.setparents(*p)
# Avoid excess attribute lookups by fast pathing certain checks
self.__contains__ = self._map.__contains__
self.__getitem__ = self._map.__getitem__
self.get = self._map.get
def write(self, st, now):
st.write(parsers.pack_dirstate(self._map, self.copymap, self.parents(), now))
st.close()
self._dirtyparents = False
self.nonnormalset, self.otherparentset = self.nonnormalentries()
@propertycache
def nonnormalset(self):
nonnorm, otherparents = self.nonnormalentries()
self.otherparentset = otherparents
return nonnorm
@propertycache
def otherparentset(self):
nonnorm, otherparents = self.nonnormalentries()
self.nonnormalset = nonnorm
return otherparents
@propertycache
def identity(self):
self._map
return self.identity
@propertycache
def dirfoldmap(self):
f = {}
normcase = util.normcase
for name in self._dirs:
f[normcase(name)] = name
return f