repoview: further remove repoview references

Summary:
Since repoview is removed, those concetps are useless. Therefore remove them.

This includes:
- repo.unfiltered(), repo.filtered(), repo.filtername
- changelog.filteredrevs
- error.FilteredIndexError, error.FilteredLookupError,
  error.FilteredRepoLookupError
- repo.unfilteredpropertycache, repo.filteredpropertycache,
  repo.unfilteredmethod
- index.headsrevsfiltered

Reviewed By: DurhamG

Differential Revision: D22367600

fbshipit-source-id: d133b8aaa136176b4c9f7f4b0c52ee60ac888531
This commit is contained in:
Jun Wu 2020-07-06 14:00:16 -07:00 committed by Facebook GitHub Bot
parent 021fa7eba5
commit dabce28285
90 changed files with 390 additions and 1040 deletions

View File

@ -391,7 +391,7 @@ def getvfs(repo):
def clearfilecache(repo, attrname):
unfi = repo.unfiltered()
unfi = repo
if attrname in vars(unfi):
delattr(unfi, attrname)
unfi._filecache.pop(attrname, None)
@ -919,7 +919,6 @@ def perfparents(ui, repo, **opts):
count = getint(ui, "perf", "parentscount", 1000)
if len(repo.changelog) < count:
raise error.Abort("repo needs %d commits for this test" % count)
repo = repo.unfiltered()
nl = [repo.changelog.node(i) for i in xrange(count)]
def d():

View File

@ -641,7 +641,7 @@ class fixupstate(object):
self.ui = ui or nullui()
self.opts = opts or {}
self.stack = stack
self.repo = stack[-1].repo().unfiltered()
self.repo = stack[-1].repo()
self.checkoutidentifier = self.repo.dirstate.checkoutidentifier
# following fields will be filled later

View File

@ -188,12 +188,12 @@ def unhide(ui, repo, *revs, **opts):
"""
revs = list(revs) + opts.pop("rev", [])
with repo.lock():
revs = set(scmutil.revrange(repo.unfiltered(), revs))
revs = set(scmutil.revrange(repo, revs))
_dounhide(repo, revs)
def _dounhide(repo, revs):
unfi = repo.unfiltered()
unfi = repo
if obsolete.isenabled(repo, obsolete.createmarkersopt):
ctxs = unfi.set("not public() & ::(%ld) & obsolete()", revs)
obsolete.revive(ctxs, operation="unhide")

View File

@ -232,7 +232,7 @@ def prune(ui, repo, *revs, **opts):
# informs that changeset have been pruned
ui.status(_("%i changesets pruned\n") % len(precs))
for ctx in repo.unfiltered().set("bookmark() and %ld", precs):
for ctx in repo.set("bookmark() and %ld", precs):
# used to be:
#
# ldest = list(repo.set('max((::%d) - obsolete())', ctx))

View File

@ -17,7 +17,7 @@ revsetpredicate = registrar.revsetpredicate()
@revsetpredicate("_destrestack(SRC)")
def _destrestack(repo, subset, x):
"""restack destination for given single source revision"""
unfi = repo.unfiltered()
unfi = repo
obsoleted = unfi.revs("obsolete()")
getparents = unfi.changelog.parentrevs
getphase = unfi._phasecache.phase

View File

@ -196,6 +196,6 @@ def split(ui, repo, *revs, **opts):
if torebase:
rebaseopts = {"dest": "_destrestack(SRC)", "rev": torebase}
rebase.rebase(ui, repo, **rebaseopts)
unfi = repo.unfiltered()
unfi = repo
with repo.transaction("post-split-hide"):
visibility.remove(repo, [unfi[r].node()])

View File

@ -61,7 +61,7 @@ def unamend(ui, repo, **opts):
version, regardless of whether the changes resulted from an :hg:`amend`
operation or from another operation, such as :hg:`rebase`.
"""
unfi = repo.unfiltered()
unfi = repo
# identify the commit from which to unamend
curctx = repo["."]

View File

@ -69,7 +69,6 @@ def _differentialhash(ui, repo, phabrev):
def _diff2o(ui, repo, rev1, rev2, *pats, **opts):
# Phabricator revs are often filtered (hidden)
repo = repo.unfiltered()
# First reconstruct textual diffs for rev1 and rev2 independently.
def changediff(node):
nodebase = repo[node].p1().node()
@ -166,14 +165,14 @@ def _diff(orig, ui, repo, *pats, **opts):
# if patterns aren't provided, restrict diff to files in both changesets
# this prevents performing a diff on rebased changes
if len(pats) == 0:
prev = set(repo.unfiltered()[rev].files())
prev = set(repo[rev].files())
curr = set(repo[targetrev].files())
pats = tuple(os.path.join(repo.root, p) for p in prev | curr)
if opts.get("since_last_submit_2o"):
return _diff2o(ui, repo, rev, targetrev, **opts)
else:
return orig(ui, repo.unfiltered(), *pats, **opts)
return orig(ui, repo, *pats, **opts)
@revsetpredicate("lastsubmitted(set)")
@ -196,6 +195,6 @@ def lastsubmitted(repo, subset, x):
lasthash = str(diffrev["hash"])
_maybepull(repo, lasthash)
resultrevs.add(repo.unfiltered()[lasthash].rev())
resultrevs.add(repo[lasthash].rev())
return subset & smartset.baseset(sorted(resultrevs))

View File

@ -310,7 +310,7 @@ def cloudremote(repo, subset, x):
repo, [nodemod.bin(nodehex) for nodehex in args]
)
hexnodespulled = [nodemod.hex(node) for node in nodespulled]
return subset & repo.unfiltered().revs("%ls", hexnodespulled)
return subset & repo.revs("%ls", hexnodespulled)
except Exception as e:
repo.ui.status(
_("unable to pull all changesets from the remote store\n%s\n") % e,
@ -324,7 +324,7 @@ def missingcloudrevspull(repo, nodes):
This is, for example, the case for all hidden revs on new clone + cloud sync.
"""
unfi = repo.unfiltered()
unfi = repo
def obscontains(nodebin):
return bool(unfi.obsstore.successors.get(nodebin, None))
@ -341,7 +341,7 @@ def missingcloudrevspull(repo, nodes):
@revsetpredicate("backedup")
def backedup(repo, subset, x):
"""draft changesets that have been backed up to Commit Cloud"""
unfi = repo.unfiltered()
unfi = repo
state = backupstate.BackupState(repo, ccutil.getremotepath(repo, None))
backedup = unfi.revs("not public() and ::%ln", state.heads)
return smartset.filteredset(subset & repo.revs("draft()"), lambda r: r in backedup)
@ -350,7 +350,7 @@ def backedup(repo, subset, x):
@revsetpredicate("notbackedup")
def notbackedup(repo, subset, x):
"""changesets that have not yet been backed up to Commit Cloud"""
unfi = repo.unfiltered()
unfi = repo
state = backupstate.BackupState(repo, ccutil.getremotepath(repo, None))
backedup = unfi.revs("not public() and ::%ln", state.heads)
return smartset.filteredset(

View File

@ -45,7 +45,7 @@ def _backup(
were backed up, and "failed" is a revset of the commits that could not be
backed up.
"""
unfi = repo.unfiltered()
unfi = repo
if revs is None:
# No revs specified. Back up all visible commits that are not already

View File

@ -122,7 +122,7 @@ def pushbackupbookmarks(repo, remotepath, getconnection, backupstate):
Push a backup bundle to the server that updates the infinitepush backup
bookmarks.
"""
unfi = repo.unfiltered()
unfi = repo
# Create backup bookmarks for the heads and bookmarks of the user. We
# need to include only commit that have been successfully backed up, so

View File

@ -47,7 +47,7 @@ class BackupState(object):
self.initfromserver()
return
heads = (nodemod.bin(head.strip()) for head in lines[2:])
hasnode = repo.unfiltered().changelog.hasnode
hasnode = repo.changelog.hasnode
self.heads = {h for h in heads if hasnode(h)}
else:
self.initfromserver()
@ -57,7 +57,7 @@ class BackupState(object):
# know are backed up.
repo = self.repo
remotepath = self.remotepath
unfi = repo.unfiltered()
unfi = repo
unknown = [
nodemod.hex(n)
for n in unfi.nodes(
@ -87,7 +87,7 @@ class BackupState(object):
@util.propertycache
def backedup(self):
unfi = self.repo.unfiltered()
unfi = self.repo
hasnode = unfi.changelog.hasnode
heads = [head for head in self.heads if hasnode(head)]
return set(unfi.nodes("not public() & ::%ln", heads))
@ -99,7 +99,7 @@ class BackupState(object):
f.write(encodeutf8("%s\n" % nodemod.hex(h)))
def update(self, newnodes, tr=None):
unfi = self.repo.unfiltered()
unfi = self.repo
# The new backed up heads are the heads of all commits we already knew
# were backed up plus the newly backed up commits.
self.heads = list(

View File

@ -890,7 +890,7 @@ def cloudcheck(ui, repo, dest=None, **opts):
revs = ["."]
remotepath = ccutil.getremotepath(repo, dest)
unfi = repo.unfiltered()
unfi = repo
revs = scmutil.revrange(repo, revs)
nodestocheck = [repo[r].hex() for r in revs]

View File

@ -373,7 +373,7 @@ def _applycloudchanges(repo, remotepath, lastsyncstate, cloudrefs, maxage, state
# Pull all the new heads and any bookmark hashes we don't have. We need to
# filter cloudrefs before pull as pull doesn't check if a rev is present
# locally.
unfi = repo.unfiltered()
unfi = repo
newheads = [head for head in cloudrefs.heads if head not in unfi]
if maxage is not None and maxage >= 0:
mindate = time.time() - maxage * 86400
@ -474,7 +474,7 @@ def _applycloudchanges(repo, remotepath, lastsyncstate, cloudrefs, maxage, state
if obsolete.isenabled(repo, obsolete.createmarkersopt) and not repo.ui.configbool(
"mutation", "proxy-obsstore"
):
unfi = repo.unfiltered()
unfi = repo
# Commits that are only visible in the cloud are commits that are
# ancestors of the cloud heads but are hidden locally.
cloudvisibleonly = list(
@ -630,7 +630,7 @@ def _processremotebookmarks(repo, cloudremotebooks, lastsyncstate):
"""returns True if cloudnode should be a new state for the remote bookmark
Both cloudnode and localnode are public commits."""
unfi = repo.unfiltered()
unfi = repo
if localnode not in unfi:
# we somehow don't have the localnode in the repo, probably may want
# to fetch it
@ -697,7 +697,7 @@ def _processremotebookmarks(repo, cloudremotebooks, lastsyncstate):
remote, name = bookmarks.splitremotename(name)
return not repo._scratchbranchmatcher.match(name)
unfi = repo.unfiltered()
unfi = repo
newnodes = set(
node
for name, node in pycompat.iteritems(updates)
@ -712,7 +712,7 @@ def _updateremotebookmarks(repo, tr, updates):
protectednames = set(repo.ui.configlist("remotenames", "selectivepulldefault"))
newremotebookmarks = {}
omittedremotebookmarks = []
unfi = repo.unfiltered()
unfi = repo
# Filter out any deletions of default names. These are protected and shouldn't
# be deleted.
@ -773,7 +773,7 @@ def _mergebookmarks(repo, tr, cloudbookmarks, lastsyncstate):
Returns a list of the omitted bookmark names.
"""
unfi = repo.unfiltered()
unfi = repo
localbookmarks = _getbookmarks(repo)
omittedbookmarks = set(lastsyncstate.omittedbookmarks)
changes = []
@ -864,7 +864,6 @@ def _mergeobsmarkers(repo, tr, obsmarkers):
if obsolete.isenabled(repo, obsolete.createmarkersopt):
tr._commitcloudskippendingobsmarkers = True
repo.obsstore.add(tr, obsmarkers)
repo.filteredrevcache.clear()
@perftrace.tracefunc("Check Omissions")
@ -876,7 +875,7 @@ def _checkomissions(repo, remotepath, lastsyncstate, tr):
them manually), then remove the tracking of those heads being omitted, and
restore any bookmarks that can now be restored.
"""
unfi = repo.unfiltered()
unfi = repo
lastomittedheads = set(lastsyncstate.omittedheads)
lastomittedbookmarks = set(lastsyncstate.omittedbookmarks)
lastomittedremotebookmarks = set(lastsyncstate.omittedremotebookmarks)

View File

@ -273,7 +273,7 @@ class mercurial_sink(common.converter_sink):
if commit.rev and commit.saverev:
extra["convert_revision"] = commit.rev
unfi = self.repo.unfiltered()
unfi = self.repo
while parents:
p1 = p2
p2 = parents.pop(0)

View File

@ -256,7 +256,7 @@ class state_update(object):
partial=False,
metadata=None,
):
self.repo = repo.unfiltered()
self.repo = repo
self.name = name
self.oldnode = oldnode
self.newnode = newnode

View File

@ -90,9 +90,6 @@ annotate cache greatly. Run "debugbuildlinkrevcache" before
# to avoid a file fetch if remotefilelog is used. (default: True)
forcetext = True
# use unfiltered repo for better performance.
unfilteredrepo = True
# sacrifice correctness in some corner cases for performance. it does not
# affect the correctness of the annotate cache being built. the option
# is experimental and may disappear in the future (default: False)

View File

@ -156,10 +156,6 @@ def fastannotate(ui, repo, *pats, **opts):
if not pats:
raise error.Abort(_("at least one filename or pattern is required"))
# performance hack: filtered repo can be slow. unfilter by default.
if ui.configbool("fastannotate", "unfilteredrepo", True):
repo = repo.unfiltered()
rev = opts.get("rev", ".")
rebuild = opts.get("rebuild", False)
@ -240,10 +236,6 @@ _knownopts = set(
def _annotatewrapper(orig, ui, repo, *pats, **opts):
"""used by wrapdefault"""
# we need this hack until the obsstore has 0.0 seconds perf impact
if ui.configbool("fastannotate", "unfilteredrepo", True):
repo = repo.unfiltered()
# treat the file as text (skip the isbinary check)
if ui.configbool("fastannotate", "forcetext", True):
opts["text"] = True
@ -291,8 +283,6 @@ def debugbuildannotatecache(ui, repo, *pats, **opts):
_("you need to provide a revision"),
hint=_("set fastannotate.mainbranch or use --rev"),
)
if ui.configbool("fastannotate", "unfilteredrepo", True):
repo = repo.unfiltered()
ctx = scmutil.revsingle(repo, rev)
m = scmutil.match(ctx, pats, opts)
paths = list(ctx.walk(m))

View File

@ -239,7 +239,7 @@ class gitnodemap(object):
for line in open(mapfile, "r"):
githexnode, hghexnode = line.split()
mapadd(bin(githexnode), bin(hghexnode))
unfi = repo.unfiltered()
unfi = repo
clnode = unfi.changelog.node
clrevision = unfi.changelog.changelogrevision
# Read git hashes from commit extras.

View File

@ -401,7 +401,7 @@ def updateglobalrevmeta(ui, repo, *args, **opts):
"""Reads globalrevs from the latest hg commits and adds them to the
globalrev-hg mapping."""
with repo.wlock(), repo.lock():
unfi = repo.unfiltered()
unfi = repo
clnode = unfi.changelog.node
clrevision = unfi.changelog.changelogrevision
globalrevmap = _globalrevmap(unfi)

View File

@ -141,7 +141,7 @@ def wrapupdate(
newnode = repo[node].node()
if matcher is None or matcher.always():
partial = False
distance = watchmanclient.calcdistance(repo.unfiltered(), oldnode, newnode)
distance = watchmanclient.calcdistance(repo, oldnode, newnode)
with watchmanclient.state_update(
repo,

View File

@ -1244,7 +1244,7 @@ class GitHandler(object):
new_refs[b"refs/heads/master"] = self.map_git_get(tip)
# mapped nodes might be hidden
unfiltered = self.repo.unfiltered()
unfiltered = self.repo
for rev, rev_refs in pycompat.iteritems(exportable):
ctx = self.repo[rev]
if not rev_refs:

View File

@ -521,7 +521,7 @@ class overlayrepo(object):
return overlayfilectx(self, path, fileid=fileid)
def unfiltered(self):
return self.handler.repo.unfiltered()
return self.handler.repo
def _makemaps(self, commits, refs):
baserev = self.handler.repo["tip"].rev()

View File

@ -2447,7 +2447,6 @@ def sqlrefill(ui, startrev, **opts):
startrev = int(startrev)
repo = repo.unfiltered()
with repo.lock():
repo.sqlconnect()
repo.sqlwritelock()
@ -2703,7 +2702,7 @@ def sqlverify(ui, repo, *args, **opts):
rl = revlogcache.get(filepath)
if rl is None:
if filepath == "00changelog.i":
rl = repo.unfiltered().changelog
rl = repo.changelog
elif filepath == "00manifest.i":
rl = repo.manifestlog._revlog
else:
@ -2759,7 +2758,7 @@ def _sqlverify(repo, minrev, maxrev, revlogcache):
rl = revlogcache.get(path)
if rl is None:
if path == "00changelog.i":
rl = repo.unfiltered().changelog
rl = repo.changelog
elif path == "00manifest.i":
rl = repo.manifestlog._revlog
else:

View File

@ -1289,7 +1289,7 @@ def _finishhistedit(ui, repo, state, fm):
mapping[n] = ()
# remove entries about unknown nodes
nodemap = repo.unfiltered().changelog.nodemap
nodemap = repo.changelog.nodemap
mapping = {
k: v
for k, v in mapping.items()
@ -1335,7 +1335,7 @@ def _aborthistedit(ui, repo, state):
os.remove(backupfile)
# check whether we should update away
unfi = repo.unfiltered()
unfi = repo
revs = list(unfi.revs("%ln::", leafs | tmpnodes))
if unfi.revs("parents() and (%n or %ld)", state.parentctxnode, revs):
with repo.transaction("histedit.abort") as tr:
@ -1635,7 +1635,7 @@ def adjustreplacementsfrommarkers(repo, oldreplacements):
if not obsolete.isenabled(repo, obsolete.createmarkersopt):
return oldreplacements
unfi = repo.unfiltered()
unfi = repo
nm = unfi.changelog.nodemap
obsstore = repo.obsstore
newreplacements = list(oldreplacements)
@ -1677,7 +1677,7 @@ def adjustreplacementsfrommutation(repo, oldreplacements):
state and does not account for changes that are not recorded there. This
function fixes that by adding data read from commit mutation records.
"""
unfi = repo.unfiltered()
unfi = repo
newreplacements = list(oldreplacements)
oldsuccs = [r[1] for r in oldreplacements]
# successors that have already been added to succstocheck once

View File

@ -423,7 +423,7 @@ def _dopull(orig, ui, repo, source="default", **opts):
source, branches = hg.parseurl(ui.expandpath(source), opts.get("branch"))
scratchbookmarks = {}
unfi = repo.unfiltered()
unfi = repo
unknownnodes = []
pullbookmarks = opts.get("bookmark") or []
if opts.get("rev", None):
@ -459,21 +459,6 @@ def _dopull(orig, ui, repo, source="default", **opts):
opts["bookmark"] = realbookmarks
opts["rev"] = [rev for rev in revs if rev not in scratchbookmarks]
# Pulling revisions that were filtered results in a error.
# Let's revive them.
unfi = repo.unfiltered()
torevive = []
for rev in opts.get("rev", []):
try:
repo[rev]
except error.FilteredRepoLookupError:
torevive.append(rev)
except error.RepoLookupError:
pass
if obsolete.isenabled(repo, obsolete.createmarkersopt):
obsolete.revive([unfi[r] for r in torevive])
visibility.add(repo, [unfi[r].node() for r in torevive])
if scratchbookmarks or unknownnodes:
# Set anyincoming to True
extensions.wrapfunction(discovery, "findcommonincoming", _findcommonincoming)

View File

@ -370,10 +370,10 @@ def gc(repo):
draftrevs = repo.revs("(not public()) & ::%ln", nodes)
else:
# non-narrow-heads: use unfiltered repo to get all drafts.
draftrevs = repo.unfiltered().revs("draft()")
draftrevs = repo.revs("draft()")
# Pass unfiltered repo in case we got unfiltered draft commits above
draftoids = {p.oid() for p in wrapper.extractpointers(repo.unfiltered(), draftrevs)}
draftoids = {p.oid() for p in wrapper.extractpointers(repo, draftrevs)}
oids = alloids - draftoids
if not oids:
return

View File

@ -359,7 +359,6 @@ def _buildlinkrevcache(ui, repo, db, end):
# 2441406: 10G by default (assuming page size = 4K).
maxpagesize = ui.configint("linkrevcache", "maxpagesize") or 2441406
repo = repo.unfiltered()
cl = repo.changelog
idx = cl.index
ml = repo.manifestlog
@ -446,8 +445,7 @@ def debugverifylinkrevcache(ui, repo, *pats, **opts):
paths = {} # {id: name}
nodes = {} # {id: name}
repo = repo.unfiltered()
idx = repo.unfiltered().changelog.index
idx = repo.changelog.index
db = repo._linkrevcache
paths = dict(db._getdb(db._pathdbname))
@ -500,7 +498,7 @@ def _adjustlinkrev(orig, self, *args, **kwds):
srcrev = args[-1]
cache = getattr(self._repo, "_linkrevcache", None)
if cache is not None and srcrev is not None:
index = repo.unfiltered().changelog.index
index = repo.changelog.index
try:
linkrevs = set(cache.getlinkrevs(self._path, self._filenode))
except Exception:

View File

@ -147,7 +147,7 @@ def populateresponseforphab(repo, diffnum):
# Remove it so we will bail out earlier next time.
del repo._phabstatusrevs
alldiffnumbers = [getdiffnum(repo, repo.unfiltered()[rev]) for rev in next_revs]
alldiffnumbers = [getdiffnum(repo, repo[rev]) for rev in next_revs]
okdiffnumbers = set(d for d in alldiffnumbers if d is not None)
# Make sure we always include the requested diff number
okdiffnumbers.add(diffnum)
@ -232,9 +232,7 @@ def showsyncstatus(repo, ctx, templ, **args):
return "sync"
elif count == 1:
precursors = list(obsutil.allpredecessors(repo.obsstore, [ctx.node()]))
hashes = [
repo.unfiltered()[h].hex() for h in precursors if h in repo.unfiltered()
]
hashes = [repo[h].hex() for h in precursors if h in repo]
# hashes[0] is the current
# hashes[1] is the previous
if len(hashes) > 1 and hashes[1] == remote:

View File

@ -296,7 +296,7 @@ def diffidtonode(repo, diffid):
continue
node = bin(hexnode)
unfi = repo.unfiltered()
unfi = repo
if node in unfi:
# Find a successor.
successors = list(
@ -327,7 +327,6 @@ def diffidtonode(repo, diffid):
def _lookupname(repo, name):
repo = repo.unfiltered()
if name.startswith("D") and name[1:].isdigit():
diffid = name[1:]
node = diffidtonode(repo, diffid)
@ -354,7 +353,6 @@ def _autopullphabdiff(repo, name, rewritepullrev=False):
if not repo.ui.configbool("phrevset", "autopull"):
return
repo = repo.unfiltered()
if (
name.startswith("D")
and name[1:].isdigit()

View File

@ -73,7 +73,7 @@ def _cleanuplanded(repo, dryrun=False):
% ex
)
return
unfi = repo.unfiltered()
unfi = repo
mutationentries = []
tohide = set()
markedcount = 0
@ -170,7 +170,7 @@ def createmarkers(pullres, repo, start, stop, fromdrafts=True):
if not tocreate:
return
unfi = repo.unfiltered()
unfi = repo
with unfi.lock(), unfi.transaction("pullcreatemarkers"):
if obsolete.isenabled(repo, obsolete.createmarkersopt):
obsolete.createmarkers(unfi, tocreate)
@ -216,7 +216,7 @@ def getmarkers(repo, landeddiffs):
def getmarkersfromdrafts(repo, landeddiffs):
tocreate = []
unfiltered = repo.unfiltered()
unfiltered = repo
for rev in unfiltered.revs("draft() - obsolete() - hidden()"):
rev = unfiltered[rev]

View File

@ -204,7 +204,7 @@ class rebaseruntime(object):
@property
def repo(self):
if self.prepared:
return self._repo.unfiltered()
return self._repo
else:
return self._repo
@ -220,7 +220,6 @@ class rebaseruntime(object):
def _writestatus(self, f):
repo = self.repo
assert repo.filtername is None
f.write(pycompat.encodeutf8(repo[self.originalwd].hex() + "\n"))
# was "dest". we now write dest per src root below.
f.write(b"\n")
@ -247,7 +246,6 @@ class rebaseruntime(object):
"""Restore a previously stored status"""
self.prepared = True
repo = self.repo
assert repo.filtername is None
legacydest = None
collapse = False
external = nullrev
@ -1286,8 +1284,8 @@ def _definepredmap(repo, rebaseset):
Returns a map of {rev: [preds]}, where preds are the predecessors of the
rebased node that are also being rebased.
"""
clnode = repo.unfiltered().changelog.node
clrev = repo.unfiltered().changelog.rev
clnode = repo.changelog.node
clrev = repo.changelog.rev
if mutation.enabled(repo):
predmap = {
r: [
@ -1586,7 +1584,6 @@ def _checkobsrebase(repo, ui, rebaseobsrevs, rebaseobsskipped):
def successorrevs(unfi, rev):
"""yield revision numbers for successors of rev"""
assert unfi.filtername is None
nodemap = unfi.changelog.nodemap
node = unfi[rev].node()
if mutation.enabled(unfi):
@ -1614,7 +1611,6 @@ def defineparents(repo, rev, destmap, state, skipped, obsskipped):
block below.
"""
# use unfiltered changelog since successorrevs may return filtered nodes
assert repo.filtername is None
cl = repo.changelog
def isancestor(a, b):
@ -2112,7 +2108,6 @@ def _computeobsoletenotrebased(repo, rebaseobsrevs, destmap):
obsoletenotrebased = {}
obsoletewithoutsuccessorindestination = set([])
assert repo.filtername is None
cl = repo.changelog
nodemap = cl.nodemap
for srcrev in rebaseobsrevs:

View File

@ -340,9 +340,9 @@ def cloneshallow(orig, ui, repo, *args, **opts):
repos = []
def pull_shallow(orig, self, *args, **kwargs):
repos.append(self.unfiltered())
repos.append(self)
# set up the client hooks so the post-clone update works
setupclient(self.ui, self.unfiltered())
setupclient(self.ui, self)
if shallowrepo.requirement not in self.requirements:
self.requirements.add(shallowrepo.requirement)

View File

@ -129,7 +129,7 @@ class remotefilectx(context.filectx):
repo = self._repo
path = self._path
fileid = self._filenode
cl = repo.unfiltered().changelog
cl = repo.changelog
mfl = repo.manifestlog
with repo.ui.timesection("scanlinkrev"), repo.ui.configoverride(
@ -272,7 +272,7 @@ class remotefilectx(context.filectx):
we get to a linkrev, we stop when we see any of the known linknodes.
"""
repo = self._repo
cl = repo.unfiltered().changelog
cl = repo.changelog
mfl = repo.manifestlog
linknode = self.getnodeinfo()[2]

View File

@ -44,7 +44,7 @@ def wraprepo(repo):
return path
@localrepo.unfilteredpropertycache
@util.propertycache
def fileslog(self):
return remotefilelog.remotefileslog(self)
@ -74,14 +74,12 @@ def wraprepo(repo):
else:
return super(shallowrepository, self).filectx(path, changeid, fileid)
@localrepo.unfilteredmethod
def close(self):
result = super(shallowrepository, self).close()
if "fileslog" in self.__dict__:
self.fileslog.abortpending()
return result
@localrepo.unfilteredmethod
def commitpending(self):
super(shallowrepository, self).commitpending()
@ -96,7 +94,6 @@ def wraprepo(repo):
domaintenancerepack(self)
self.numtransactioncommits = 0
@localrepo.unfilteredmethod
def commitctx(self, ctx, error=False):
"""Add a new revision to current repository.
Revision information is passed via the context argument.

View File

@ -335,7 +335,7 @@ def exfindcommonheads(orig, ui, local, remote, **kwargs):
# We only want to use this for existence checks. We don't want hidden
# commits to result in throwing an exception here.
cl = local.unfiltered().changelog
cl = local.changelog
if cl.tip() == nullid:
if srvheadhashes != [nullid]:
@ -365,7 +365,6 @@ def pullremotenames(repo, remote, bookmarks):
# they won't show up as heads on the next pull, so we
# remove them here otherwise we would require the user
# to issue a pull to refresh .hg/remotenames
repo = repo.unfiltered()
saveremotenames(repo, {path: bookmarks})
# repo.ui.paths.get(path) might be empty during clone.
@ -445,7 +444,7 @@ def exclone(orig, ui, *args, **opts):
vfs.write("bookmarks", b"")
# Invalidate bookmark caches.
repo._filecache.pop("_bookmarks", None)
repo.unfiltered().__dict__.pop("_bookmarks", None)
repo.__dict__.pop("_bookmarks", None)
# Avoid writing out bookmarks on transaction close.
tr.removefilegenerator("bookmarks")
@ -623,7 +622,7 @@ def expaths(orig, ui, repo, *args, **opts):
def exnowarnheads(orig, pushop):
heads = orig(pushop)
if pushop.to:
repo = pushop.repo.unfiltered()
repo = pushop.repo
rev = pushop.revs[0]
heads.add(repo[rev].node())
return heads
@ -775,7 +774,7 @@ def exlog(orig, ui, repo, *args, **opts):
def expushdiscoverybookmarks(pushop):
repo = pushop.repo.unfiltered()
repo = pushop.repo
remotemarks = pushop.remote.listkeys("bookmarks")
if pushop.delete:
@ -1265,7 +1264,6 @@ def displayremotebookmarks(ui, repo, opts, fm):
label = "log." + color
# it seems overkill to hide displaying hidden remote bookmarks
repo = repo.unfiltered()
useformatted = repo.ui.formatted
for name in sorted(ns.listnames(repo)):
@ -1623,7 +1621,6 @@ def upstream_revs(filt, repo, subset, x):
@revsetpredicate("upstream()")
def upstream(repo, subset, x):
"""Select changesets in an upstream repository according to remotenames."""
repo = repo.unfiltered()
upstream_names = repo.ui.configlist("remotenames", "upstream")
# override default args from hgrc with args passed in on the command line
if x:

View File

@ -81,7 +81,7 @@ def _revive(repo, rev):
"""Brings the given rev back into the repository. Finding it in backup
bundles if necessary.
"""
unfi = repo.unfiltered()
unfi = repo
try:
ctx = unfi[rev]
except error.RepoLookupError:

View File

@ -453,7 +453,7 @@ def _nothingtoshelvemessaging(ui, repo, pats, opts):
def _shelvecreatedcommit(ui, repo, node, name):
shelvedfile(repo, name, "oshelve").writeobsshelveinfo({"node": nodemod.hex(node)})
cmdutil.export(
repo.unfiltered(),
repo,
[node],
fp=shelvedfile(repo, name, patchextension).opener("wb"),
opts=mdiff.diffopts(git=True),
@ -537,7 +537,7 @@ def _docreatecmd(ui, repo, pats, opts):
# it might have been created previously and shelve just
# reuses it
try:
hg.update(repo.unfiltered(), parent.node())
hg.update(repo, parent.node())
except (KeyboardInterrupt, Exception):
# failed to update to the original revision, which has left us on the
# (hidden) shelve commit. Move directly to the original commit by
@ -774,7 +774,7 @@ def unshelvecontinue(ui, repo, state, opts):
try:
# if shelve is obs-based, we want rebase to be able
# to create markers to already-obsoleted commits
_repo = repo.unfiltered() if state.obsshelve else repo
_repo = repo if state.obsshelve else repo
with ui.configoverride(
{("experimental", "rebaseskipobsolete"): "off"}, "unshelve"
):
@ -834,7 +834,6 @@ def _unshelverestorecommit(ui, repo, basename, obsshelve):
if obsshelve:
md = shelvedfile(repo, basename, "oshelve").readobsshelveinfo()
shelvenode = nodemod.bin(md["node"])
repo = repo.unfiltered()
try:
shelvectx = repo[shelvenode]
except error.RepoLookupError:
@ -950,7 +949,7 @@ def _finishunshelve(repo, oldtiprev, tr, activebookmark, obsshelve):
# but it doesn't update the inmemory structures, so addchangegroup
# hooks still fire and try to operate on the missing commits.
# Clean up manually to prevent this.
repo.unfiltered().changelog.strip(oldtiprev, tr)
repo.changelog.strip(oldtiprev, tr)
_aborttransaction(repo)
@ -978,7 +977,7 @@ def _obsoleteredundantnodes(repo, tr, pctx, shelvectx, tmpwctx):
def _hidenodes(repo, nodes):
unfi = repo.unfiltered()
unfi = repo
if obsolete.isenabled(repo, obsolete.createmarkersopt):
markers = [(unfi[n], ()) for n in nodes]
obsolete.createmarkers(repo, markers)

View File

@ -587,9 +587,7 @@ def _smartlog(ui, repo, *pats, **opts):
return
# Print it!
revdag, reserved = getdag(
ui, repo.unfiltered(), sorted(revs, reverse=True), masterrev
)
revdag, reserved = getdag(ui, repo, sorted(revs, reverse=True), masterrev)
displayer = cmdutil.show_changeset(ui, repo, opts, buffered=True)
ui.pager("smartlog")
if ui.config("experimental", "graph.renderer") == "legacy":

View File

@ -133,14 +133,14 @@ def extsetup(ui):
def _dagwalker(orig, repo, revs):
return orig(repo.unfiltered(), revs)
return orig(repo, revs)
def _updaterepo(orig, repo, node, overwrite, **opts):
"""prevents the repo from updating onto a snapshot node
"""
allowsnapshots = repo.ui.configbool("ui", "allow-checkout-snapshot")
unfi = repo.unfiltered()
unfi = repo
if not allowsnapshots and node in unfi:
ctx = unfi[node]
if "snapshotmetadataid" in ctx.extra():
@ -157,7 +157,7 @@ def _updaterepo(orig, repo, node, overwrite, **opts):
def _updateheads(orig, self, repo, newheads, tr):
"""ensures that we don't try to make the snapshot nodes visible
"""
unfi = repo.unfiltered()
unfi = repo
heads = []
for h in newheads:
if h not in unfi:
@ -179,7 +179,7 @@ def _showgraphnode(orig, repo, ctx, **args):
def _update(orig, ui, repo, node=None, rev=None, **opts):
allowsnapshots = repo.ui.configbool("ui", "allow-checkout-snapshot")
unfi = repo.unfiltered()
unfi = repo
if not allowsnapshots and node in unfi:
ctx = unfi[node]
if "snapshotmetadataid" in ctx.extra():
@ -206,13 +206,13 @@ def _handlebundle2part(orig, self, bundle, part):
def _smartlogrevset(orig, repo, subset, x):
revs = orig(repo, subset, x)
snapshotstring = revsetlang.formatspec("snapshot()")
return smartset.addset(revs, repo.unfiltered().anyrevs([snapshotstring], user=True))
return smartset.addset(revs, repo.anyrevs([snapshotstring], user=True))
def _dounhide(orig, repo, revs):
"""prevents the snapshot nodes from being visible
"""
unfi = repo.unfiltered()
unfi = repo
revs = [r for r in revs if "snapshotmetadataid" not in unfi[r].extra()]
if len(revs) > 0:
orig(repo, revs)
@ -224,7 +224,7 @@ revsetpredicate = registrar.revsetpredicate()
@revsetpredicate("snapshot")
def snapshot(repo, subset, x):
"""Snapshot changesets"""
unfi = repo.unfiltered()
unfi = repo
# get all the hex nodes of snapshots from the file
nodes = repo.snapshotlist.snapshots
return subset & unfi.revs("%ls", nodes)

View File

@ -33,7 +33,7 @@ def getmetadatafromrevs(repo, revs):
"""get binary representation of snapshot metadata by a list of revs
"""
metadataids = set()
unfi = repo.unfiltered()
unfi = repo
for rev in revs:
# TODO(alexeyqu): move this check into a function
if rev not in unfi:

View File

@ -134,7 +134,7 @@ def getsnapshotctx(ui, repo, args):
raise error.Abort(_("you must specify a snapshot revision id\n"))
node = args[0]
try:
cctx = repo.unfiltered()[node]
cctx = repo[node]
except error.RepoLookupError:
ui.status(_("%s is not a valid revision id\n") % node)
raise
@ -151,10 +151,10 @@ def snapshotshow(ui, repo, *args, **opts):
rev = cctx.hex()
opts["rev"] = [rev]
opts["patch"] = True
revs, expr, filematcher = cmdutil.getlogrevs(repo.unfiltered(), [], opts)
revs, expr, filematcher = cmdutil.getlogrevs(repo, [], opts)
revmatchfn = filematcher(rev) if filematcher else None
ui.pager("snapshotshow")
displayer = cmdutil.show_changeset(ui, repo.unfiltered(), opts, buffered=True)
displayer = cmdutil.show_changeset(ui, repo, opts, buffered=True)
with extensions.wrappedfunction(patch, "diff", _diff), extensions.wrappedfunction(
cmdutil.changeset_printer, "_show", _show
), extensions.wrappedfunction(cmdutil.changeset_templater, "_show", _show):
@ -183,7 +183,7 @@ def _diff(orig, repo, *args, **kwargs):
if node2 is None:
# this should be the snapshot node
return
ctx2 = repo.unfiltered()[node2]
ctx2 = repo[node2]
date2 = util.datestr(ctx2.date())
node1 = kwargs.get("node1") or args[0]
if node1 is not None:
@ -257,11 +257,11 @@ def snapshotcheckout(ui, repo, *args, **opts):
with repo.wlock():
parents = [p.node() for p in cctx.parents()]
# First we check out on the 1st parent of the snapshot state
hg.update(repo.unfiltered(), parents[0], quietempty=True)
hg.update(repo, parents[0], quietempty=True)
# Then we update snapshot files in the working copy
# Here the dirstate is not updated because of the matcher
matcher = scmutil.matchfiles(repo, cctx.files(), opts)
mergemod.update(repo.unfiltered(), cctx.hex(), False, False, matcher=matcher)
mergemod.update(repo, cctx.hex(), False, False, matcher=matcher)
# Finally, we mark the modified files in the dirstate
scmutil.addremove(repo, matcher, "", opts)
# Tie the state to the 2nd parent if needed

View File

@ -59,7 +59,7 @@ class snapshotlist(object):
self._check(repo)
def _check(self, repo):
unfi = repo.unfiltered()
unfi = repo
toremove = set()
for snapshotnode in self.snapshots:
binsnapshotnode = node.bin(snapshotnode)
@ -92,7 +92,7 @@ class snapshotlist(object):
fm = ui.formatter("snapshots", opts)
if len(self.snapshots) == 0:
ui.status(_("no snapshots created\n"))
unfi = repo.unfiltered()
unfi = repo
for snapshotnode in self.snapshots:
ctx = unfi[snapshotnode]
message = ctx.description().split("\n")[0]

View File

@ -415,7 +415,7 @@ def _setupcommit(ui):
orig(self, node)
# Use unfiltered to avoid computing hidden commits
repo = self._repo.unfiltered()
repo = self._repo
if util.safehasattr(repo, "getsparsepatterns"):
ctx = repo[node]
@ -533,7 +533,7 @@ def _clonesparsecmd(orig, ui, repo, *args, **opts):
with self.ui.configoverride(overrides, "sparse"):
_config(
self.ui,
self.unfiltered(),
self,
pat,
{},
include=include,
@ -882,7 +882,7 @@ def _wraprepo(ui, repo):
if rev is None:
raise error.Abort(_("cannot parse sparse patterns from working copy"))
repo = self.unfiltered()
repo = self
if config is None:
if not self.localvfs.exists("sparse"):
self._warnfullcheckout()
@ -977,7 +977,7 @@ def _wraprepo(ui, repo):
)
def getrawprofile(self, profile, changeid):
repo = self.unfiltered()
repo = self
try:
simplecache = extensions.find("simplecache")
@ -1122,7 +1122,7 @@ def _wraprepo(ui, repo):
def getactiveprofiles(self):
# Use unfiltered to avoid computing hidden commits
repo = self.unfiltered()
repo = self
revs = [
repo.changelog.rev(node)
for node in repo.dirstate.parents()
@ -1737,7 +1737,6 @@ def debugsparsematch(ui, repo, *args, **opts):
working copy.
"""
# Make it work in an edenfs checkout.
repo = repo.unfiltered()
if "eden" in repo.requirements:
_wraprepo(ui, repo)
filename = opts.get("sparse_profile")

View File

@ -497,7 +497,7 @@ def wraprepo(repo):
try:
revset = "parents(%ld & draft() - hidden()) & public()"
with self.ui.configoverride({("devel", "legacy.revnum"): ""}):
draftparents = list(self.unfiltered().set(revset, revs))
draftparents = list(self.set(revset, revs))
if draftparents:
self.prefetchtrees([c.manifestnode() for c in draftparents])
@ -1075,7 +1075,7 @@ class basetreemanifestlog(object):
class treemanifestlog(basetreemanifestlog, manifest.manifestlog):
def __init__(self, opener, repo, treemanifest=False):
self._repo = repo.unfiltered()
self._repo = repo
basetreemanifestlog.__init__(self, self._repo)
assert treemanifest is False
cachesize = 4
@ -1102,7 +1102,7 @@ class treemanifestlog(basetreemanifestlog, manifest.manifestlog):
class treeonlymanifestlog(basetreemanifestlog):
def __init__(self, opener, repo):
self._repo = repo.unfiltered()
self._repo = repo
super(treeonlymanifestlog, self).__init__(self._repo)
self._opener = opener
self.ui = repo.ui
@ -2315,7 +2315,6 @@ def pull(orig, ui, repo, *pats, **opts):
def _postpullprefetch(ui, repo):
repo = repo.unfiltered()
ctxs = []
mfstore = repo.manifestlog.datastore

View File

@ -689,7 +689,6 @@ def unfilteredcmd(orig, *args, **opts):
for i in [1, 2]:
if len(args) > i and util.safehasattr(args[i], "unfiltered"):
args = list(args)
args[i] = args[i].unfiltered()
args = tuple(args)
return orig(*args, **opts)

View File

@ -177,7 +177,7 @@ def safelog(repo, command):
repo.ui.log("undologlock", "lock acquired\n")
tr = lighttransaction(repo)
with tr:
changes = log(repo.filtered("visible"), command, tr)
changes = log(repo, command, tr)
if changes and not ("undo" == command[0] or "redo" == command[0]):
_delundoredo(repo)
except error.LockUnavailable: # no write permissions
@ -539,7 +539,7 @@ def _cachedgetolddrafts(repo, nodedict):
oldlogrevstring = revsetlang.formatspec(
"(draft() & ancestors(%ls)) - %ls", oldheadslist, oldobslist
)
urepo = repo.unfiltered()
urepo = repo
cache[key] = smartset.baseset(urepo.revs(oldlogrevstring))
return cache[key]
@ -607,7 +607,7 @@ def _cachedgetoldworkingcopyparent(repo, wkpnode):
oldworkingparent = _readnode(repo, "workingparent.i", wkpnode)
oldworkingparent = filter(None, oldworkingparent.split("\n"))
oldwkprevstring = revsetlang.formatspec("%ls", oldworkingparent)
urepo = repo.unfiltered()
urepo = repo
cache[key] = smartset.baseset(urepo.revs(oldwkprevstring))
return cache[key]
@ -653,7 +653,6 @@ def showundonecommits(context, mapping, args):
def _donehexnodes(repo, reverseindex):
repo = repo.unfiltered()
revstring = revsetlang.formatspec("olddraft(%d)", reverseindex)
return list(repo.nodes(revstring))
@ -738,7 +737,6 @@ def oldworkingparenttemplate(context, mapping, args):
)
repo = mapping["ctx"]._repo
ctx = mapping["ctx"]
repo = repo.unfiltered()
revstring = revsetlang.formatspec("oldworkingcopyparent(%d)", reverseindex)
nodes = list(repo.nodes(revstring))
if ctx.node() in nodes:
@ -825,8 +823,6 @@ def undo(ui, repo, *args, **opts):
if interactive:
preview = True
repo = repo.unfiltered()
if branch and reverseindex != 1 and reverseindex != -1:
raise error.Abort(_("--branch with --index not supported"))
if relativeundo:
@ -982,7 +978,6 @@ def redo(ui, repo, *args, **opts):
with repo.wlock(), repo.lock(), repo.transaction("redo"):
cmdutil.checkunfinished(repo)
cmdutil.bailifchanged(repo)
repo = repo.unfiltered()
_undoto(ui, repo, reverseindex)
# update undredo by removing what the given undo added
_logundoredoindex(repo, shiftedindex, branch)
@ -998,8 +993,6 @@ def _undoto(ui, repo, reverseindex, keep=False, branch=None):
_("'undo --branch' is no longer supported in the current setup")
)
if repo != repo.unfiltered():
raise error.ProgrammingError(_("_undoto expects unfilterd repo"))
try:
nodedict = _readindex(repo, reverseindex)
except IndexError:
@ -1187,7 +1180,6 @@ def _findnextdelta(repo, reverseindex, branch, direction):
# copy parent change that effects the given branch
if 0 == direction: # no infinite cycles guarantee
raise error.ProgrammingError
repo = repo.unfiltered()
# current state
try:
nodedict = _readindex(repo, reverseindex)
@ -1270,7 +1262,7 @@ def smarthide(repo, revhide, revshow, local=False):
markers = []
nodes = []
for ctx in hidectxs:
unfi = repo.unfiltered()
unfi = repo
related = set()
if mutation.enabled(unfi):
related.update(mutation.allpredecessors(unfi, [ctx.node()]))
@ -1335,7 +1327,6 @@ def _preview(ui, repo, reverseindex):
opts = {}
opts["template"] = "{undopreview}"
repo = repo.unfiltered()
try:
nodedict = _readindex(repo, reverseindex)

View File

@ -163,7 +163,7 @@ def trypull(repo, xs):
attempt.execute(repo)
attempt = other
attempt.execute(repo)
unfi = repo.unfiltered()
unfi = repo
return all(x in unfi for x in xs)
return False

View File

@ -817,7 +817,6 @@ def summary(repo, other):
def validdest(repo, old, new):
"""Is the new bookmark destination a valid update from the old one"""
repo = repo.unfiltered()
if old == new:
# Old == new -> nothing to update.
return False
@ -1114,7 +1113,7 @@ def saveremotenames(repo, remotebookmarks, override=True):
_writesingleremotename(f, remote, nametype, rname, node)
journal = []
nm = repo.unfiltered().changelog.nodemap
nm = repo.changelog.nodemap
missingnode = False
for remote, rmbookmarks in pycompat.iteritems(remotebookmarks):
rmbookmarks = {} if rmbookmarks is None else rmbookmarks
@ -1409,7 +1408,7 @@ def updateaccessedbookmarks(repo, remotepath, bookmarks):
else:
newbookmarks[rname] = node
nodemap = repo.unfiltered().changelog.nodemap
nodemap = repo.changelog.nodemap
for rname, node in pycompat.iteritems(bookmarks):
# if the node is known locally, update the old value or add new
if bin(node) in nodemap:

View File

@ -18,11 +18,9 @@ from .node import nullid, nullrev
def updatecache(repo):
# Just use a shortcut path that construct the branchcache directly.
partial = repo._branchcaches.get(repo.filtername)
if partial is None:
partial = branchcache()
partial = branchcache()
partial.update(repo, None)
repo._branchcaches[repo.filtername] = partial
repo._branchcaches[None] = partial
class branchcache(dict):
@ -131,6 +129,4 @@ class branchcache(dict):
tiprev = branchheads[-1]
self.tipnode = cl.node(tiprev)
self.tiprev = tiprev
repo.ui.log(
"branchcache", "perftweaks updated %s branch cache\n", repo.filtername
)
repo.ui.log("branchcache", "perftweaks updated branch cache\n")

View File

@ -1998,7 +1998,7 @@ def handlecheckphases(op, inpart):
This is used to detect a push race.
"""
phasetonodes = phases.binarydecode(inpart)
unfi = op.repo.unfiltered()
unfi = op.repo
cl = unfi.changelog
phasecache = unfi._phasecache
msg = (
@ -2200,7 +2200,7 @@ def handlephases(op, inpart):
# type: (bundleoperation, unbundlepart) -> None
"""apply phases from bundle part to repo"""
headsbyphase = phases.binarydecode(inpart)
phases.updatephases(op.repo.unfiltered(), op.gettransaction, headsbyphase)
phases.updatephases(op.repo, op.gettransaction, headsbyphase)
@parthandler("reply:pushkey", ("return", "in-reply-to"))

View File

@ -208,14 +208,7 @@ class bundlechangelog(bundlerevlog, changelog.changelog):
# may replace this class with another that does. Same story with
# manifest and filelog classes.
# This bypasses filtering on changelog.node() and rev() because we need
# revision text of the bundle base even if it is hidden.
oldfilter = self.filteredrevs
try:
self.filteredrevs = ()
return changelog.changelog.revision(self, nodeorrev, raw=True)
finally:
self.filteredrevs = oldfilter
return changelog.changelog.revision(self, nodeorrev, raw=True)
def _loadvisibleheads(self, opener):
return visibility.bundlevisibleheads(opener)
@ -397,15 +390,15 @@ class bundlerepository(localrepo.localrepository):
return self.localvfs.open(self.tempfile, mode="rb")
@localrepo.unfilteredpropertycache
@util.propertycache
def _phasecache(self):
return bundlephasecache(self, self._phasedefaults)
@localrepo.unfilteredpropertycache
@util.propertycache
def _mutationstore(self):
return mutation.bundlemutationstore(self)
@localrepo.unfilteredpropertycache
@util.propertycache
def changelog(self):
# consume the header if it exists
self._cgunpacker.changelogheader()
@ -413,7 +406,7 @@ class bundlerepository(localrepo.localrepository):
self.manstart = self._cgunpacker.tell()
return c
@localrepo.unfilteredpropertycache
@util.propertycache
def manifestlog(self):
return super(bundlerepository, self).manifestlog
@ -421,7 +414,7 @@ class bundlerepository(localrepo.localrepository):
self._cgunpacker.seek(self.manstart)
# consume the header if it exists
self._cgunpacker.manifestheader()
linkmapper = self.unfiltered().changelog.rev
linkmapper = self.changelog.rev
m = bundlemanifest(self.svfs, self._cgunpacker, linkmapper)
self.filestart = self._cgunpacker.tell()
return m
@ -444,12 +437,12 @@ class bundlerepository(localrepo.localrepository):
self.filestart = self._cgunpacker.tell()
@localrepo.unfilteredpropertycache
@util.propertycache
def manstart(self):
self.changelog
return self.manstart
@localrepo.unfilteredpropertycache
@util.propertycache
def filestart(self):
self.manifestlog
@ -474,7 +467,8 @@ class bundlerepository(localrepo.localrepository):
if f in self._cgfilespos:
self._cgunpacker.seek(self._cgfilespos[f])
linkmapper = self.unfiltered().changelog.rev
# pyre-fixme[16]: Callable `changelog` has no attribute `rev`.
linkmapper = self.changelog.rev
return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper)
else:
return filelog.filelog(self.svfs, f)
@ -637,11 +631,6 @@ def getremotechanges(ui, repo, other, onlyheads=None, bundlename=None, force=Fal
localrepo = bundlerepo = bundlerepository(repo.baseui, repo.root, fname)
# this repo contains local and other now, so filter out local again
common = repo.heads()
if localrepo:
# Part of common may be remotely filtered
# So use an unfiltered version
# The discovery process probably need cleanup to avoid that
localrepo = localrepo.unfiltered()
csets = localrepo.changelog.findmissing(common, rheads)

View File

@ -70,7 +70,6 @@ typedef struct {
Py_ssize_t length; /* current number of elements */
PyObject* added; /* populated on demand */
PyObject* headrevs; /* cache, invalidated on changes */
PyObject* filteredrevs; /* filtered revs set */
nodetree* nt; /* base-16 trie */
unsigned ntlength; /* # nodes in use */
unsigned ntcapacity; /* # nodes allocated */
@ -416,32 +415,6 @@ static PyObject* list_copy(PyObject* list) {
return newlist;
}
static int check_filter(PyObject* filter, Py_ssize_t arg) {
if (filter) {
PyObject *arglist, *result;
int isfiltered;
arglist = Py_BuildValue("(n)", arg);
if (!arglist) {
return -1;
}
result = PyEval_CallObject(filter, arglist);
Py_DECREF(arglist);
if (!result) {
return -1;
}
/* PyObject_IsTrue returns 1 if true, 0 if false, -1 if error,
* same as this function, so we can just return it directly.*/
isfiltered = PyObject_IsTrue(result);
Py_DECREF(result);
return isfiltered;
} else {
return 0;
}
}
static Py_ssize_t add_roots_get_min(
indexObject* self,
PyObject* list,
@ -726,29 +699,10 @@ static PyObject* index_headrevs(indexObject* self, PyObject* args) {
Py_ssize_t i, j, len;
char* nothead = NULL;
PyObject* heads = NULL;
PyObject* filter = NULL;
PyObject* filteredrevs = Py_None;
if (!PyArg_ParseTuple(args, "|O", &filteredrevs)) {
return NULL;
}
if (self->headrevs && filteredrevs == self->filteredrevs)
if (self->headrevs)
return list_copy(self->headrevs);
Py_DECREF(self->filteredrevs);
self->filteredrevs = filteredrevs;
Py_INCREF(filteredrevs);
if (filteredrevs != Py_None) {
filter = PyObject_GetAttrString(filteredrevs, "__contains__");
if (!filter) {
PyErr_SetString(
PyExc_TypeError, "filteredrevs has no attribute __contains__");
goto bail;
}
}
len = index_length(self) - 1;
heads = PyList_New(0);
if (heads == NULL)
@ -769,26 +723,8 @@ static PyObject* index_headrevs(indexObject* self, PyObject* args) {
}
for (i = len - 1; i >= 0; i--) {
int isfiltered;
int parents[2];
/* If nothead[i] == 1, it means we've seen an unfiltered child of this
* node already, and therefore this node is not filtered. So we can skip
* the expensive check_filter step.
*/
if (nothead[i] != 1) {
isfiltered = check_filter(filter, i);
if (isfiltered == -1) {
PyErr_SetString(PyExc_TypeError, "unable to check filter");
goto bail;
}
if (isfiltered) {
nothead[i] = 1;
continue;
}
}
if (index_get_parents(self, i, parents, (int)len - 1) < 0)
goto bail;
for (j = 0; j < 2; j++) {
@ -811,11 +747,9 @@ static PyObject* index_headrevs(indexObject* self, PyObject* args) {
done:
self->headrevs = heads;
Py_XDECREF(filter);
free(nothead);
return list_copy(self->headrevs);
bail:
Py_XDECREF(filter);
Py_XDECREF(heads);
free(nothead);
return NULL;
@ -1854,7 +1788,6 @@ static int index_init(indexObject* self, PyObject* args) {
self->data = NULL;
memset(&self->buf, 0, sizeof(self->buf));
self->headrevs = NULL;
self->filteredrevs = Py_None;
Py_INCREF(Py_None);
self->nt = NULL;
self->offsets = NULL;
@ -1906,7 +1839,6 @@ static PyObject* index_nodemap(indexObject* self) {
static void index_dealloc(indexObject* self) {
_index_clearcaches(self);
Py_XDECREF(self->filteredrevs);
if (self->buf.buf) {
PyBuffer_Release(&self->buf);
memset(&self->buf, 0, sizeof(self->buf));
@ -1958,11 +1890,7 @@ static PyMethodDef index_methods[] = {
{"headrevs",
(PyCFunction)index_headrevs,
METH_VARARGS,
"get head revisions"}, /* Can do filtering since 3.2 */
{"headrevsfiltered",
(PyCFunction)index_headrevs,
METH_VARARGS,
"get filtered head revisions"}, /* Can always do filtering */
"get head revisions"},
{"deltachain",
(PyCFunction)index_deltachain,
METH_VARARGS,

View File

@ -324,7 +324,6 @@ class cg1unpacker(object):
- fewer heads than before: -1-removed heads (-2..-n)
- number of heads stays the same: 1
"""
repo = repo.unfiltered()
def csmap(x):
repo.ui.debug("add changeset %s\n" % short(x))
@ -1102,7 +1101,6 @@ def makestream(
bundler = getbundler(version, repo, bundlecaps=bundlecaps, b2caps=b2caps)
repo = repo.unfiltered()
commonrevs = outgoing.common
csets = outgoing.missing
@ -1119,9 +1117,7 @@ def makestream(
# heads have been requested (since we then know there all linkrevs will
# be pulled by the client).
heads.sort()
fastpathlinkrev = fastpath or (
repo.filtername is None and heads == sorted(repo.heads())
)
fastpathlinkrev = fastpath or heads == sorted(repo.heads())
repo.hook("preoutgoing", throw=True, source=source)
_changegroupinfo(repo, csets, source)

View File

@ -333,7 +333,6 @@ class changelog(revlog.revlog):
self._delayed = False
self._delaybuf = None
self._divert = False
self.filteredrevs = frozenset()
if uiconfig.configbool("format", "use-zstore-commit-data-revlog-fallback"):
self._zstorefallback = "revlog"
@ -351,32 +350,21 @@ class changelog(revlog.revlog):
# type: () -> bytes
"""filtered version of revlog.tip"""
for i in range(len(self) - 1, -2, -1):
if i not in self.filteredrevs:
# pyre-fixme[7]: Expected `bytes` but got implicit return value of
# `None`.
return self.node(i)
# pyre-fixme[7]: Expected `bytes` but got implicit return value of `None`.
return self.node(i)
def __contains__(self, rev):
"""filtered version of revlog.__contains__"""
return rev is not None and 0 <= rev < len(self) and rev not in self.filteredrevs
return rev is not None and 0 <= rev < len(self)
def __iter__(self):
"""filtered version of revlog.__iter__"""
if len(self.filteredrevs) == 0:
return revlog.revlog.__iter__(self)
def filterediter():
for i in range(len(self)):
if i not in self.filteredrevs:
yield i
return filterediter()
return revlog.revlog.__iter__(self)
def revs(self, start=0, stop=None):
"""filtered version of revlog.revs"""
for i in super(changelog, self).revs(start, stop):
if i not in self.filteredrevs:
yield i
yield i
@util.propertycache
def nodemap(self):
@ -408,45 +396,30 @@ class changelog(revlog.revlog):
heads in a consistent way, then discovery can just use references as
heads isntead.
"""
return self.index.headrevsfiltered(self.filteredrevs)
return self.index.headrevs()
def strip(self, *args, **kwargs):
# XXX make something better than assert
# We can't expect proper strip behavior if we are filtered.
assert not self.filteredrevs
super(changelog, self).strip(*args, **kwargs)
def rev(self, node):
"""filtered version of revlog.rev"""
r = super(changelog, self).rev(node)
if r in self.filteredrevs:
raise error.FilteredLookupError(
hex(node), self.indexfile, _("filtered node")
)
return r
def node(self, rev):
"""filtered version of revlog.node"""
if rev in self.filteredrevs:
raise error.FilteredIndexError(rev)
return super(changelog, self).node(rev)
def linkrev(self, rev):
"""filtered version of revlog.linkrev"""
if rev in self.filteredrevs:
raise error.FilteredIndexError(rev)
return super(changelog, self).linkrev(rev)
def parentrevs(self, rev):
"""filtered version of revlog.parentrevs"""
if rev in self.filteredrevs:
raise error.FilteredIndexError(rev)
return super(changelog, self).parentrevs(rev)
def flags(self, rev):
"""filtered version of revlog.flags"""
if rev in self.filteredrevs:
raise error.FilteredIndexError(rev)
return super(changelog, self).flags(rev)
def delayupdate(self, tr):

View File

@ -11,7 +11,6 @@ from .i18n import _
def shallowclone(source, repo):
"""clone from source into an empty shallow repo"""
repo = repo.unfiltered()
with repo.wlock(), repo.lock(), repo.transaction("clone"):
if any(

View File

@ -1168,7 +1168,7 @@ def openrevlog(repo, cmd, file_, opts):
r = None
if repo:
if cl:
r = repo.unfiltered().changelog
r = repo.changelog
elif dir:
if "treemanifest" not in repo.requirements:
raise error.Abort(

View File

@ -4666,7 +4666,7 @@ def phase(ui, repo, *revs, **opts):
# moving revision from public to draft may hide them
# We have to check result on an unfiltered repository
unfi = repo.unfiltered()
unfi = repo
cl = unfi.changelog
getphase = unfi._phasecache.phase
rejected = [n for n in nodes if getphase(unfi, cl.rev(n)) < targetphase]

View File

@ -938,7 +938,6 @@ def debugdifftree(ui, repo, *pats, **opts):
Print changed paths.
"""
revs = scmutil.revrange(repo, opts.get("rev"))
repo = repo.unfiltered()
oldrev = revs.first()
newrev = revs.last()
oldctx = repo[oldrev]
@ -2071,11 +2070,11 @@ def debugobsolete(ui, repo, precursor=None, *successors, **opts):
prec = parsenodeid(precursor)
parents = None
if opts["record_parents"]:
if prec not in repo.unfiltered():
if prec not in repo:
raise error.Abort(
"cannot used --record-parents on " "unknown changesets"
)
parents = repo.unfiltered()[prec].parents()
parents = repo[prec].parents()
parents = tuple(p.node() for p in parents)
repo.obsstore.create(
tr,
@ -2087,7 +2086,7 @@ def debugobsolete(ui, repo, precursor=None, *successors, **opts):
metadata=metadata,
ui=ui,
)
unfi = repo.unfiltered()
unfi = repo
if prec in unfi:
visibility.remove(unfi, [prec])
tr.close()

View File

@ -24,7 +24,6 @@ def debugmetalog(ui, repo, **opts):
matchdate = util.matchdate(timerange)
matchdatefuncs.append(matchdate)
repo = repo.unfiltered()
metalog = repo.svfs.metalog
metalogpath = repo.svfs.join("metalog")
roots = metalog.listroots(metalogpath)
@ -125,7 +124,6 @@ class displayer(object):
@command("debugmetalogroots", [] + cmdutil.templateopts)
def debugmetalogroots(ui, repo, **opts):
"""list roots stored in metalog"""
repo = repo.unfiltered()
metalog = repo.svfs.metalog
metalogpath = repo.svfs.join("metalog")
roots = metalog.listroots(metalogpath)

View File

@ -22,7 +22,7 @@ from .cmdtable import command
)
def debugmutation(ui, repo, **opts):
"""display the mutation history (or future) of a commit"""
unfi = repo.unfiltered()
unfi = repo
matchdatefuncs = []
for timerange in opts.get("time_range") or []:

View File

@ -435,7 +435,6 @@ def checknoisybranches(repo):
- Not edited locally ('predecessors(x) - x' is empty).
- Most (> 50%) changes are not authored by the current user.
"""
repo = repo.unfiltered()
ui = repo.ui
heads = repo.changelog._visibleheads.heads
noisyheads = set()

View File

@ -223,7 +223,7 @@ class HgServer(object):
else:
self.out_file = fdopen(out_fd, "wb")
self.repo = repo.unfiltered()
self.repo = repo
try:
self.treemanifest = extensions.find("treemanifest")
@ -279,11 +279,11 @@ class HgServer(object):
flags |= START_FLAGS_TREEMANIFEST_SUPPORTED
treemanifest_paths = [
shallowutil.getlocalpackpath(
self.repo.svfs.vfs.base, constants.TREEPACK_CATEGORY
self.repo.svfs.join(""), constants.TREEPACK_CATEGORY
),
shallowutil.getcachepackpath(self.repo, constants.TREEPACK_CATEGORY),
shallowutil.getlocalpackpath(
self.repo.svfs.vfs.base, constants.FILEPACK_CATEGORY
self.repo.svfs.join(""), constants.FILEPACK_CATEGORY
),
shallowutil.getcachepackpath(self.repo, constants.FILEPACK_CATEGORY),
]

View File

@ -368,20 +368,6 @@ class basectx(object):
return r
def _filterederror(repo, changeid):
"""build an exception to be raised about a filtered changeid
This is extracted in a function to help extensions (eg: evolve) to
experiment with various message variants."""
if repo.filtername.startswith("visible"):
msg = _("hidden revision '%s'") % changeid
hint = _("use --hidden to access hidden revisions")
return error.FilteredRepoLookupError(msg, hint=hint)
msg = _("filtered revision '%s' (not in '%s' subset)")
msg %= (changeid, repo.filtername)
return error.FilteredRepoLookupError(msg)
class changectx(basectx):
"""A changecontext object makes access to data related to a particular
changeset convenient. It represents a read-only context already present in
@ -419,7 +405,7 @@ class changectx(basectx):
# this is a hack to delay/avoid loading obsmarkers
# when we know that '.' won't be hidden
self._node = repo.dirstate.p1()
self._rev = repo.unfiltered().changelog.rev(self._node)
self._rev = repo.changelog.rev(self._node)
return
except Exception:
self._repo.ui.warn(
@ -433,8 +419,6 @@ class changectx(basectx):
self._node = changeid
self._rev = repo.changelog.rev(changeid)
return
except error.FilteredRepoLookupError:
raise
except LookupError:
# The only valid bytes changeid is a node, and if the node was not
# found above, this is now considered an unknown changeid.
@ -462,8 +446,6 @@ class changectx(basectx):
self._rev = r
self._node = repo.changelog.node(r)
return
except error.FilteredIndexError:
raise
except (ValueError, OverflowError, IndexError):
pass
@ -472,8 +454,6 @@ class changectx(basectx):
self._node = bin(changeid)
self._rev = repo.changelog.rev(self._node)
return
except error.FilteredLookupError:
raise
except (TypeError, LookupError):
pass
@ -484,22 +464,17 @@ class changectx(basectx):
return
except KeyError:
pass
except error.FilteredRepoLookupError:
raise
except error.RepoLookupError:
pass
self._node = repo.unfiltered().changelog._partialmatch(changeid)
self._node = repo.changelog._partialmatch(changeid)
if self._node is not None:
self._rev = repo.changelog.rev(self._node)
return
# lookup failed
# check if it might have come from damaged dirstate
#
# XXX we could avoid the unfiltered if we had a recognizable
# exception for filtered changeset access
if repo.local() and changeid in repo.unfiltered().dirstate.parents():
if repo.local() and changeid in repo.dirstate.parents():
msg = _("working directory has unknown parent '%s'!")
raise error.Abort(msg % short(changeid))
try:
@ -507,12 +482,6 @@ class changectx(basectx):
changeid = hex(changeid)
except TypeError:
pass
except (
error.FilteredIndexError,
error.FilteredLookupError,
error.FilteredRepoLookupError,
):
raise _filterederror(repo, changeid)
except IndexError:
pass
raise error.RepoLookupError(_("unknown revision '%s'") % changeid)
@ -921,7 +890,7 @@ class basefilectx(object):
:inclusive: if true, the src revision will also be checked
"""
repo = self._repo
cl = repo.unfiltered().changelog
cl = repo.changelog
mfl = repo.manifestlog
# fetch the linkrev
lkr = self.linkrev()
@ -1276,26 +1245,7 @@ class filectx(basefilectx):
@propertycache
def _changectx(self):
try:
return changectx(self._repo, self._changeid)
except error.FilteredRepoLookupError:
# Linkrev may point to any revision in the repository. When the
# repository is filtered this may lead to `filectx` trying to build
# `changectx` for filtered revision. In such case we fallback to
# creating `changectx` on the unfiltered version of the repository.
# This fallback should not be an issue because `changectx` from
# `filectx` are not used in complex operations that care about
# filtering.
#
# This fallback is a cheap and dirty fix that prevent several
# crashes. It does not ensure the behavior is correct. However the
# behavior was not correct before filtering either and "incorrect
# behavior" is seen as better as "crash"
#
# Linkrevs have several serious troubles with filtering that are
# complicated to solve. Proper handling of the issue here should be
# considered when solving linkrev issue are on the table.
return changectx(self._repo.unfiltered(), self._changeid)
return changectx(self._repo, self._changeid)
def filectx(self, fileid, changeid=None):
"""opens an arbitrary revision of the file without

View File

@ -153,9 +153,7 @@ class revlogbaseddag(basedag):
rl = self._revlog
if filterunknown:
return [
r
for r in map(rl.nodemap.get, ids)
if (r is not None and r != nullrev and r not in rl.filteredrevs)
r for r in map(rl.nodemap.get, ids) if (r is not None and r != nullrev)
]
return [self._internalize(i) for i in ids]

View File

@ -67,7 +67,7 @@ def findcommonincoming(
needlargestcommonset=needlargestcommonset,
)
common, anyinc, srvheads = res
unfi = repo.unfiltered()
unfi = repo
# anyinc = True prints "no changes found". However that is not always
# true if heads is provided. Do a double check.
if anyinc is False and heads and any(head not in unfi for head in heads):
@ -167,7 +167,7 @@ def findcommonoutgoing(
og.missingheads = onlyheads or repo.heads()
elif onlyheads is None:
# use visible heads as it should be cached
og.missingheads = repo.filtered("served").heads()
og.missingheads = repo.heads()
og.excluded = [ctx.node() for ctx in repo.set("secret()")]
else:
# compute common, missing and exclude secret stuff
@ -202,7 +202,7 @@ def findcommonoutgoing(
def _nowarnheads(pushop):
# Compute newly pushed bookmarks. We don't warn about bookmarked heads.
repo = pushop.repo.unfiltered()
repo = pushop.repo
remote = pushop.remote
localbookmarks = repo._bookmarks
remotebookmarks = remote.listkeys("bookmarks")

View File

@ -1179,7 +1179,6 @@ def _dispatch(req):
if repo:
ui = repo.ui
if options["hidden"]:
repo = repo.unfiltered()
repo.ui.setconfig("visibility", "all-heads", "true", "--hidden")
if repo != req.repo:
ui.atexit(repo.close)

View File

@ -591,7 +591,7 @@ def drawdag(repo, text, **opts):
# handle special comments
with repo.wlock(), repo.lock(), repo.transaction("drawdag"):
getctx = lambda x: repo.unfiltered()[committed[x.strip()]]
getctx = lambda x: repo[committed[x.strip()]]
if obsolete.isenabled(repo, obsolete.createmarkersopt):
for cmd, markers in obsmarkers:
obsrels = [(getctx(p), [getctx(s) for s in ss]) for p, ss in markers]

View File

@ -83,7 +83,7 @@ class eden_dirstate(dirstate.dirstate):
def _p1_ctx(self):
"""Return the context object for the first parent commit."""
return self._map._repo.unfiltered()[self.p1()]
return self._map._repo[self.p1()]
def _call_match_callbacks(self, match, results1, results2):
"""

View File

@ -96,10 +96,6 @@ class RevlogError(Hint, Context, Exception):
__bytes__ = _tobytes
class FilteredIndexError(IndexError):
__bytes__ = _tobytes
class LookupError(RevlogError, KeyError):
def __init__(self, name, index, message):
self.name = name
@ -124,10 +120,6 @@ class LookupError(RevlogError, KeyError):
return RevlogError.__str__(self)
class FilteredLookupError(LookupError):
pass
class ManifestLookupError(LookupError):
pass
@ -249,10 +241,6 @@ class RepoLookupError(RepoError):
pass
class FilteredRepoLookupError(RepoLookupError):
pass
class CapabilityError(RepoError):
pass

View File

@ -392,7 +392,7 @@ class pushoperation(object):
if self.revs is None:
# not target to push, all common are relevant
return self.outgoing.commonheads
unfi = self.repo.unfiltered()
unfi = self.repo
# I want cheads = heads(::missingheads and ::commonheads)
# (missingheads is revs with secret changeset filtered out)
#
@ -574,7 +574,7 @@ def _pushdiscoveryphase(pushop):
if pushop.repo.ui.configbool("experimental", "narrow-heads"):
return
outgoing = pushop.outgoing
unfi = pushop.repo.unfiltered()
unfi = pushop.repo
remotephases = pushop.remote.listkeys("phases")
pushop.remotephases = phases.remotephasessummary(
@ -621,7 +621,7 @@ def _pushdiscoveryobsmarkers(pushop):
@pushdiscovery("bookmarks")
def _pushdiscoverybookmarks(pushop):
ui = pushop.ui
repo = pushop.repo.unfiltered()
repo = pushop.repo
remote = pushop.remote
ui.debug("checking for updated bookmarks\n")
ancestors = ()
@ -689,7 +689,7 @@ def _pushdiscoverybookmarks(pushop):
def _pushcheckoutgoing(pushop):
outgoing = pushop.outgoing
unfi = pushop.repo.unfiltered()
unfi = pushop.repo
if not outgoing.missing:
# nothing to push
scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
@ -1083,9 +1083,7 @@ def _pushchangeset(pushop):
# TODO: get bundlecaps from remote
bundlecaps = None
# create a changegroup from local
if pushop.revs is None and not (
outgoing.excluded or pushop.repo.changelog.filteredrevs
):
if pushop.revs is None and not (outgoing.excluded):
# push everything,
# use the fast path, no race possible on push
cg = changegroup.makechangegroup(
@ -1492,7 +1490,7 @@ def _pulldiscoverychangegroup(pullop):
needlargestcommonset=False,
)
common, fetch, rheads = tmp
nm = pullop.repo.unfiltered().changelog.nodemap
nm = pullop.repo.changelog.nodemap
if fetch and rheads:
# If a remote heads is filtered locally, put in back in common.
#
@ -1696,7 +1694,7 @@ def _pullapplyphases(pullop, remotephases):
# should be seen as public
pheads = pullop.pulledsubset
dheads = []
unfi = pullop.repo.unfiltered()
unfi = pullop.repo
phase = unfi._phasecache.phase
rev = unfi.changelog.nodemap.get
public = phases.public

View File

@ -192,7 +192,7 @@ def repository(ui, path="", create=False, presetupfuncs=None):
repo = peer.local()
if not repo:
raise error.Abort(_("repository '%s' is not local") % (path or peer.url()))
return repo.filtered("visible")
return repo
def peer(uiorrepo, opts, path, create=False):
@ -308,13 +308,13 @@ def unshare(ui, repo):
# update store, spath, svfs and sjoin of repo
# invalidate before rerunning __init__
repo.unfiltered().invalidate(clearfilecache=True)
repo.unfiltered().invalidatedirstate()
repo.unfiltered().__init__(repo.baseui, repo.root)
repo.invalidate(clearfilecache=True)
repo.invalidatedirstate()
repo.__init__(repo.baseui, repo.root)
# reinitialize zstore
if repo.ui.configbool("format", "use-zstore-commit-data"):
repo.unfiltered()._syncrevlogtozstore()
repo._syncrevlogtozstore()
def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
@ -1064,7 +1064,6 @@ class cachedlocalrepo(object):
assert isinstance(repo, localrepo.localrepository)
self._repo = repo
self._state, self.mtime = self._repostate()
self._filtername = repo.filtername
def fetch(self):
"""Refresh (if necessary) and return a repository.
@ -1085,10 +1084,7 @@ class cachedlocalrepo(object):
return self._repo, False
repo = repository(self._repo.baseui, self._repo.url())
if self._filtername:
self._repo = repo.filtered(self._filtername)
else:
self._repo = repo.unfiltered()
self._repo = repo
self._state = state
self.mtime = mtime
@ -1116,10 +1112,6 @@ class cachedlocalrepo(object):
completely independent of the original.
"""
repo = repository(self._repo.baseui, self._repo.origroot)
if self._filtername:
repo = repo.filtered(self._filtername)
else:
repo = repo.unfiltered()
c = cachedlocalrepo(repo)
c._state = self._state
c.mtime = self.mtime

View File

@ -143,7 +143,7 @@ class filerevnav(revnav):
:path: path of the file we generate nav for
"""
# used for iteration
self._changelog = repo.unfiltered().changelog
self._changelog = repo.changelog
# used for hex generation
self._revlog = repo.file(path)

View File

@ -87,19 +87,18 @@ _cachedfiles = set()
class _basefilecache(scmutil.filecache):
"""All filecache usage on repo are done for logic that should be unfiltered
"""
"""filecache usage on repo"""
def __get__(self, repo, type=None):
if repo is None:
return self
return super(_basefilecache, self).__get__(repo.unfiltered(), type)
return super(_basefilecache, self).__get__(repo, type)
def __set__(self, repo, value):
return super(_basefilecache, self).__set__(repo.unfiltered(), value)
return super(_basefilecache, self).__set__(repo, value)
def __delete__(self, repo):
return super(_basefilecache, self).__delete__(repo.unfiltered())
return super(_basefilecache, self).__delete__(repo)
class repofilecache(_basefilecache):
@ -139,41 +138,15 @@ def isfilecached(repo, name):
This returns (cachedobj-or-None, iscached) tuple.
"""
cacheentry = repo.unfiltered()._filecache.get(name, None)
cacheentry = repo._filecache.get(name, None)
if not cacheentry:
return None, False
return cacheentry.obj, True
class unfilteredpropertycache(util.propertycache):
"""propertycache that apply to unfiltered repo only"""
def __get__(self, repo, type=None):
unfi = repo.unfiltered()
if unfi is repo:
return super(unfilteredpropertycache, self).__get__(unfi)
return getattr(unfi, self.name)
class filteredpropertycache(util.propertycache):
"""propertycache that must take filtering in account"""
def cachevalue(self, obj, value):
object.__setattr__(obj, self.name, value)
def hasunfilteredcache(repo, name):
"""check if a repo has an unfilteredpropertycache value for <name>"""
return name in vars(repo.unfiltered())
def unfilteredmethod(orig):
"""decorate method that always need to be run on unfiltered version"""
def wrapper(repo, *args, **kwargs):
return orig(repo.unfiltered(), *args, **kwargs)
return wrapper
def hascache(repo, name):
"""check if a repo has an value for <name>"""
return name in vars(repo)
moderncaps = {"lookup", "branchmap", "pushkey", "known", "getbundle", "unbundle"}
@ -188,7 +161,7 @@ class localpeer(repository.peer):
if caps is None:
caps = moderncaps.copy()
self._repo = repo.filtered("served")
self._repo = repo
self._ui = repo.ui
self._caps = repo._restrictcapabilities(caps)
@ -404,7 +377,6 @@ class localrepository(object):
def __init__(self, baseui, path, create=False):
self.requirements = set()
self.storerequirements = set()
self.filtername = None
# wvfs: rooted at the repository root, used to access the working copy
self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True, cacheaudited=False)
# localvfs: rooted at .hg, used to access repo files outside of
@ -598,15 +570,6 @@ class localrepository(object):
# Maps a property name to its util.filecacheentry
self._filecache = {}
# hold sets of revision to be filtered
# should be cleared when something might have changed the filter value:
# - new changesets,
# - phase change,
# - new obsolescence marker,
# - working directory parent change,
# - bookmark changes
self.filteredrevcache = {}
# post-dirstate-status hooks
self._postdsstatus = []
@ -705,12 +668,12 @@ class localrepository(object):
def _syncrevlogtozstore(self):
"""Sync commit data from revlog to zstore"""
zstore = bindings.zstore.zstore(self.svfs.join("hgcommits/v1"))
self.unfiltered().changelog.zstore = zstore
self.changelog.zstore = zstore
if self.ui.configbool(
"format", "use-zstore-commit-data-revlog-fallback"
) or self.ui.configbool("format", "use-zstore-commit-data-server-fallback"):
revs = list(self.unfiltered().revs("not public()"))
revs = list(self.revs("not public()"))
else:
revs = self
@ -824,14 +787,12 @@ class localrepository(object):
supported = basesupported
return supported
@unfilteredmethod
def close(self):
if util.safehasattr(self, "connectionpool"):
self.connectionpool.close()
self.commitpending()
@unfilteredmethod
def commitpending(self):
# If we have any pending manifests, commit them to disk.
if "manifestlog" in self.__dict__:
@ -997,16 +958,6 @@ class localrepository(object):
source, _branches = hg.parseurl(self.ui.expandpath(source))
return self.connectionpool.get(source, opts=opts)
def unfiltered(self):
"""Return unfiltered version of the repository
Intended to be overwritten by filtered repo."""
return self
def filtered(self, name):
"""Return a filtered version of a repository"""
return self
@repofilecache(localpaths=["shared"])
def sharedfeatures(self):
"""Returns the set of enabled 'shared' features for this repo"""
@ -1112,7 +1063,7 @@ class localrepository(object):
def manifestlog(self):
return manifest.manifestlog(self.svfs, self)
@unfilteredpropertycache
@util.propertycache
def fileslog(self):
return filelog.fileslog(self)
@ -1166,9 +1117,7 @@ class localrepository(object):
if isinstance(changeid, slice):
# wdirrev isn't contiguous so the slice shouldn't include it
return [
context.changectx(self, i)
for i in range(*changeid.indices(len(self)))
if i not in self.changelog.filteredrevs
context.changectx(self, i) for i in range(*changeid.indices(len(self)))
]
try:
return context.changectx(self, changeid)
@ -1265,7 +1214,7 @@ class localrepository(object):
"""
return hook.hook(self.ui, self, name, throw, **args)
@unfilteredpropertycache
@util.propertycache
def _mutationstore(self):
return mutation.makemutationstore(self)
@ -1281,7 +1230,7 @@ class localrepository(object):
"""returns a dictionary {branch: [branchheads]} with branchheads
ordered by increasing revision number"""
branchmap.updatecache(self)
return self._branchcaches[self.filtername]
return self._branchcaches[None]
def branchtip(self, branch, ignoremissing=False):
"""return the tip node for a given branch
@ -1313,11 +1262,10 @@ class localrepository(object):
def known(self, nodes):
cl = self.changelog
nm = cl.nodemap
filtered = cl.filteredrevs
result = []
for n in nodes:
r = nm.get(n)
resp = not (r is None or r in filtered)
resp = not (r is None)
result.append(resp)
return result
@ -1340,8 +1288,7 @@ class localrepository(object):
return False
if not self.publishing():
return True
# if publishing we can't copy if there is filtered content
return not self.filtered("visible").changelog.filteredrevs
return True
def shared(self):
"""the type of shared repository (None if not shared)"""
@ -1416,11 +1363,11 @@ class localrepository(object):
return data
@unfilteredpropertycache
@util.propertycache
def _encodefilterpats(self):
return self._loadfilter("encode")
@unfilteredpropertycache
@util.propertycache
def _decodefilterpats(self):
return self._loadfilter("decode")
@ -1520,7 +1467,7 @@ class localrepository(object):
args.update(bookmarks.preparehookargs(name, old, new))
repo.hook("pretxnclose-bookmark", throw=True, txnname=desc, **args)
if hook.hashook(repo.ui, "pretxnclose-phase"):
cl = repo.unfiltered().changelog
cl = repo.changelog
for rev, (old, new) in tr.changes["phases"].items():
args = tr.hookargs.copy()
node = hex(cl.node(rev))
@ -1571,18 +1518,16 @@ class localrepository(object):
tr.hookargs["txnid"] = txnid
# Write parts of the repository store that don't participate in the
# standard transaction mechanism.
unfi = self.unfiltered()
# TODO: Consider changing 'self' to 'reporef()'.
def commitnotransaction(tr):
unfi.commitpending()
self.commitpending()
def abortnotransaction(tr):
if "manifestlog" in unfi.__dict__:
if "manifestlog" in self.__dict__:
self.manifestlog.abortpending()
if "fileslog" in unfi.__dict__:
if "fileslog" in self.__dict__:
self.fileslog.abortpending()
def writependingnotransaction(tr):
@ -1624,7 +1569,7 @@ class localrepository(object):
)
if hook.hashook(repo.ui, "txnclose-phase"):
cl = repo.unfiltered().changelog
cl = repo.changelog
phasemv = sorted(tr.changes["phases"].items())
for rev, (old, new) in phasemv:
args = tr.hookargs.copy()
@ -1665,7 +1610,6 @@ class localrepository(object):
def undofiles(self):
return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
@unfilteredmethod
def _writejournal(self, desc):
self.dirstate.savebackup(None, "journal.dirstate")
self.localvfs.writeutf8("journal.branch", "default")
@ -1711,7 +1655,6 @@ class localrepository(object):
finally:
release(dsguard, lock, wlock)
@unfilteredmethod # Until we get smarter cache management
def _rollback(self, dryrun, force, dsguard):
ui = self.ui
try:
@ -1813,7 +1756,6 @@ class localrepository(object):
return updater
@unfilteredmethod
def updatecaches(self, tr=None):
"""warm appropriate caches
@ -1827,16 +1769,14 @@ class localrepository(object):
return
def invalidatecaches(self):
self.unfiltered()._branchcaches.clear()
self._branchcaches.clear()
self.invalidatevolatilesets()
def invalidatevolatilesets(self):
self.filteredrevcache.clear()
obsolete.clearobscaches(self)
mutation.clearobsoletecache(self)
unfi = self.unfiltered()
if "_phasecache" in unfi._filecache and "_phasecache" in unfi.__dict__:
unfi._phasecache.invalidate()
if "_phasecache" in self._filecache and "_phasecache" in self.__dict__:
self._phasecache.invalidate()
def invalidatedirstate(self):
"""Invalidates the dirstate, causing the next call to dirstate
@ -1852,13 +1792,13 @@ class localrepository(object):
self.dirstate.invalidate()
return
if hasunfilteredcache(self, "dirstate"):
if hascache(self, "dirstate"):
for k in self.dirstate._filecache:
try:
delattr(self.dirstate, k)
except AttributeError:
pass
delattr(self.unfiltered(), "dirstate")
delattr(self, "dirstate")
def invalidate(self, clearfilecache=False):
"""Invalidates both store and non-store parts other than dirstate
@ -1868,7 +1808,6 @@ class localrepository(object):
(e.g. incomplete fncache causes unintentional failure, but
redundant one doesn't).
"""
unfiltered = self.unfiltered() # all file caches are stored unfiltered
for k in list(self._filecache.keys()):
# dirstate is invalidated separately in invalidatedirstate()
if k == "dirstate":
@ -1883,7 +1822,7 @@ class localrepository(object):
# TODO: Solve the problem instead of working around it.
continue
if k == "manifestlog" and "manifestlog" in unfiltered.__dict__:
if k == "manifestlog" and "manifestlog" in self.__dict__:
# The manifestlog may have uncommitted additions, let's just
# flush them to disk so we don't lose them.
self.manifestlog.commitpending()
@ -1891,15 +1830,15 @@ class localrepository(object):
if clearfilecache:
del self._filecache[k]
try:
delattr(unfiltered, k)
delattr(self, k)
except AttributeError:
pass
if "fileslog" in unfiltered.__dict__:
if "fileslog" in self.__dict__:
# The fileslog may have uncommitted additions, let's just
# flush them to disk so we don't lose them.
unfiltered.fileslog.commitpending()
del unfiltered.__dict__["fileslog"]
self.fileslog.commitpending()
del self.__dict__["fileslog"]
self.invalidatecaches()
if not self.currenttransaction():
@ -1915,7 +1854,6 @@ class localrepository(object):
self.invalidate()
self.invalidatedirstate()
@unfilteredmethod
def _refreshfilecachestats(self, tr):
"""Reload stats of cached files so that they are flagged as valid"""
for k, ce in self._filecache.items():
@ -2259,7 +2197,6 @@ class localrepository(object):
elif f not in self.dirstate:
fail(f, _("file not tracked!"))
@unfilteredmethod
def commit(
self,
text="",
@ -2375,7 +2312,6 @@ class localrepository(object):
self._afterlock(commithook)
return ret
@unfilteredmethod
def commitctx(self, ctx, error=False):
"""Add a new revision to current repository.
Revision information is passed via the context argument.
@ -2552,7 +2488,6 @@ class localrepository(object):
tr.release()
lock.release()
@unfilteredmethod
def destroying(self):
"""Inform the repository that nodes are about to be destroyed.
Intended for use by strip and rollback, so there's a common
@ -2570,7 +2505,6 @@ class localrepository(object):
if "_phasecache" in vars(self):
self._phasecache.write()
@unfilteredmethod
def destroyed(self):
"""Inform the repository that nodes have been destroyed.
Intended for use by strip and rollback, so there's a common
@ -2675,8 +2609,6 @@ class localrepository(object):
headrevs = cl.index2.headsancestors(revs)
# headrevs is already in DESC.
reverse = not reverse
elif cl.filteredrevs:
headrevs = cl.index.headrevsfiltered(cl.filteredrevs)
else:
headrevs = cl.index.headrevs()
if start is not None:
@ -2692,7 +2624,7 @@ class localrepository(object):
return list(map(self.changelog.node, headrevs))
def branchheads(self, branch=None, start=None, closed=False):
"""return a (possibly filtered) list of heads for the given branch
"""return a list of heads for the given branch
Heads are returned in topological order, from newest to oldest.
If branch is None, use the dirstate branch.
@ -2751,7 +2683,7 @@ class localrepository(object):
command.
"""
@unfilteredpropertycache
@util.propertycache
def prepushoutgoinghooks(self):
"""Return util.hooks consists of a pushop with repo, remote, outgoing
methods, which are called before pushing changesets.

View File

@ -564,7 +564,7 @@ class manifestlog(object):
self._opener = opener
self._revlog = repo._constructmanifest()
self._repo = repo.unfiltered()
self._repo = repo
# A cache of the manifestctx or treemanifestctx for each directory
self._dirmancache = {}

View File

@ -259,12 +259,12 @@ class obsoletecache(object):
ispublic = getispublicfunc(repo)
if ispublic(node):
return False
obsolete = self.obsolete[repo.filtername]
obsolete = self.obsolete[None]
if node in obsolete:
return True
if self.complete[repo.filtername] or node in self.notobsolete[repo.filtername]:
if self.complete[None] or node in self.notobsolete[None]:
return False
unfi = repo.unfiltered()
unfi = repo
clhasnode = getisvisiblefunc(repo)
clrev = unfi.changelog.rev
@ -279,12 +279,12 @@ class obsoletecache(object):
if clhasnode(succ):
obsolete.add(node)
return True
self.notobsolete[repo.filtername].add(node)
self.notobsolete[None].add(node)
return False
def obsoletenodes(self, repo):
if self.complete[repo.filtername]:
return self.obsolete[repo.filtername]
if self.complete[None]:
return self.obsolete[None]
with perftrace.trace("Compute Obsolete Nodes"):
perftrace.traceflag("mutation")
@ -299,7 +299,7 @@ class obsoletecache(object):
# even if the filter for this repo includes other commits.
clhasnode = getisvisiblefunc(repo)
clrev = repo.changelog.rev
obsolete = self.obsolete[repo.filtername]
obsolete = self.obsolete[None]
for node in repo.nodes("not public()"):
succsets = successorssets(repo, node, closest=True)
if succsets != [[node]]:
@ -317,13 +317,13 @@ class obsoletecache(object):
seen.add(pred)
if clhasnode(pred) and pred != nullid:
obsolete.add(pred)
self.obsolete[repo.filtername] = frozenset(obsolete)
self.complete[repo.filtername] = True
self.obsolete[None] = frozenset(obsolete)
self.complete[None] = True
# Since we know all obsolete commits, no need to remember which ones
# are not obsolete.
if repo.filtername in self.notobsolete:
del self.notobsolete[repo.filtername]
return self.obsolete[repo.filtername]
if None in self.notobsolete:
del self.notobsolete[None]
return self.obsolete[None]
def isobsolete(repo, node):
@ -587,7 +587,7 @@ def foreground(repo, nodes):
The foreground of a commit is the transitive closure of all descendants
and successors of the commit.
"""
unfi = repo.unfiltered()
unfi = repo
nm = unfi.changelog.nodemap
foreground = set(nodes)
newnodes = set(nodes)

View File

@ -996,7 +996,7 @@ def _pushkeyescape(markers):
def listmarkers(repo):
"""List markers over pushkey"""
if not repo.obsstore:
if not getattr(repo, "obsstore", None):
return {}
return _pushkeyescape(sorted(repo.obsstore))
@ -1093,8 +1093,7 @@ def getrevs(repo, name):
"""Return the set of revision that belong to the <name> set
Such access may compute the set and cache it for future use"""
repo = repo.unfiltered()
if not repo.obsstore:
if not getattr(repo, "obsstore", None):
return frozenset()
if name not in repo.obsstore.caches:
repo.obsstore.caches[name] = cachefuncs[name](repo)
@ -1287,7 +1286,7 @@ def createmarkers(repo, relations, flag=0, date=None, metadata=None, operation=N
This function operates within a transaction of its own, but does
not take any lock on the repo.
"""
unfi = repo.unfiltered()
unfi = repo
# Change predecessors to unfiltered contexts. An X -> X marker can be used
# to revive X. Ideally it's () -> X. But predecessor must be a single node
@ -1399,7 +1398,6 @@ def createmarkers(repo, relations, flag=0, date=None, metadata=None, operation=N
metadata=localmetadata,
ui=repo.ui,
)
repo.filteredrevcache.clear()
tr.close()
finally:
tr.release()

View File

@ -227,7 +227,7 @@ def exclusivemarkers(repo, nodes):
"""
# running on a filtered repository would be dangerous as markers could be
# reported as exclusive when they are relevant for other filtered nodes.
unfi = repo.unfiltered()
unfi = repo
# shortcut to various useful item
nm = unfi.changelog.nodemap
@ -304,7 +304,6 @@ def foreground(repo, nodes):
Beware that possible obsolescence cycle may result if complex situation.
"""
repo = repo.unfiltered()
foreground = set(repo.set("%ln::", nodes))
if repo.obsstore:
# We only need this complicated logic if there is obsolescence
@ -324,7 +323,7 @@ def foreground(repo, nodes):
def getobsoleted(repo, tr):
"""return the set of pre-existing revisions obsoleted by a transaction"""
torev = repo.unfiltered().changelog.nodemap.get
torev = repo.changelog.nodemap.get
phase = repo._phasecache.phase
succsmarkers = repo.obsstore.successors.get
public = phases.public

View File

@ -136,7 +136,6 @@ def _readroots(repo, phasedefaults=None):
Return (roots, dirty) where dirty is true if roots differ from
what is being stored.
"""
repo = repo.unfiltered()
dirty = False
roots = [set() for i in allphases]
try:
@ -243,8 +242,6 @@ class phasecache(object):
revs = self._phasesets[p]
else:
revs = set.union(*[self._phasesets[p] for p in phases])
if repo.changelog.filteredrevs:
revs = revs - repo.changelog.filteredrevs
if subset is None:
return smartset.baseset(revs)
else:
@ -286,7 +283,6 @@ class phasecache(object):
def _getphaserevsnative(self, repo):
assert not self._headbased
repo = repo.unfiltered()
nativeroots = []
for phase in trackedphases:
nativeroots.append(list(map(repo.changelog.rev, self.phaseroots[phase])))
@ -294,7 +290,6 @@ class phasecache(object):
def _computephaserevspure(self, repo):
assert not self._headbased
repo = repo.unfiltered()
cl = repo.changelog
self._phasesets = [set() for phase in allphases]
roots = pycompat.maplist(cl.rev, self.phaseroots[secret])
@ -405,7 +400,6 @@ class phasecache(object):
# head-based phases do not need to track phase of new commits
# explcitly. visible heads and remotenames track them implicitly.
return
repo = repo.unfiltered()
self._retractboundary(repo, tr, targetphase, nodes)
if tr is not None and "phases" in tr.changes:
phasetracking = tr.changes["phases"]
@ -432,8 +426,6 @@ class phasecache(object):
else:
phasetracking = tr.changes.get("phases")
repo = repo.unfiltered()
delroots = [] # set of root deleted by this path
for phase in range(targetphase + 1, len(allphases)):
# filter nodes that are not in a compatible phase already
@ -472,7 +464,6 @@ class phasecache(object):
phasetracking = None
else:
phasetracking = tr.changes.get("phases")
repo = repo.unfiltered()
if (
self._retractboundary(repo, tr, targetphase, nodes)
and phasetracking is not None
@ -502,7 +493,6 @@ class phasecache(object):
assert not self._headbased
# Be careful to preserve shallow-copied values: do not update
# phaseroots values, replace them.
repo = repo.unfiltered()
currentroots = self.phaseroots[targetphase]
finalroots = oldroots = set(currentroots)
newroots = [n for n in nodes if self.phase(repo, repo[n].rev()) < targetphase]
@ -600,7 +590,7 @@ def listphases(repo):
keys = util.sortdict()
if not repo._phasecache._headbased:
value = "%i" % draft
cl = repo.unfiltered().changelog
cl = repo.changelog
for root in repo._phasecache.phaseroots[draft]:
if repo._phasecache.phase(repo, cl.rev(root)) <= draft:
keys[hex(root)] = value
@ -628,7 +618,6 @@ def listphases(repo):
def pushphase(repo, nhex, oldphasestr, newphasestr):
"""List phases root for serialization over pushkey"""
repo = repo.unfiltered()
with repo.lock():
currentphase = repo[nhex].phase()
newphase = abs(int(newphasestr)) # let's avoid negative index surprise
@ -684,7 +673,6 @@ def analyzeremotephases(repo, subset, roots):
Accept unknown element input
"""
repo = repo.unfiltered()
# build list from dictionary
draftroots = []
nodemap = repo.changelog.nodemap # to filter unknown nodes
@ -720,7 +708,6 @@ class remotephasessummary(object):
"""
def __init__(self, repo, remotesubset, remoteroots):
unfi = repo.unfiltered()
self._allremoteroots = remoteroots
self.publishing = remoteroots.get("publishing", False)
@ -728,7 +715,7 @@ class remotephasessummary(object):
ana = analyzeremotephases(repo, remotesubset, remoteroots)
self.publicheads, self.draftroots = ana
# Get the list of all "heads" revs draft on remote
dheads = unfi.set("heads(%ln::%ln)", self.draftroots, remotesubset)
dheads = repo.set("heads(%ln::%ln)", self.draftroots, remotesubset)
self.draftheads = [c.node() for c in dheads]
@ -737,7 +724,6 @@ def newheads(repo, heads, roots):
* `heads`: define the first subset
* `roots`: define the second we subtract from the first"""
repo = repo.unfiltered()
revset = repo.set(
"heads((%ln + parents(%ln)) - (%ln::%ln))", heads, roots, roots, heads
)

View File

@ -120,7 +120,6 @@ def strip(ui, repo, nodelist, backup=True, topic="backup"):
if backup in ["none", "strip"]:
backup = False
repo = repo.unfiltered()
repo.destroying()
cl = repo.changelog
@ -288,7 +287,7 @@ def strip(ui, repo, nodelist, backup=True, topic="backup"):
def safestriproots(ui, repo, nodes):
"""return list of roots of nodes where descendants are covered by nodes"""
torev = repo.unfiltered().changelog.rev
torev = repo.changelog.rev
revs = set(torev(n) for n in nodes)
# tostrip = wanted - unsafe = wanted - ancestors(orphaned)
# orphaned = affected - wanted
@ -367,7 +366,6 @@ def rebuildfncache(ui, repo):
Missing entries will be added. Extra entries will be removed.
"""
repo = repo.unfiltered()
if "fncache" not in repo.requirements:
ui.warn(

View File

@ -1189,8 +1189,7 @@ class revlog(object):
except RevlogError:
# parsers.c radix tree lookup gave multiple matches
# fast path: for unfiltered changelog, radix tree is accurate
if not getattr(self, "filteredrevs", None):
raise LookupError(id, self.indexfile, _("ambiguous identifier"))
raise LookupError(id, self.indexfile, _("ambiguous identifier"))
# fall through to slow path that filters hidden revisions
except (AttributeError, ValueError):
# we are pure python, or key was too short to search radix tree
@ -2419,11 +2418,6 @@ class revlog(object):
if len(destrevlog):
raise ValueError(_("destination revlog is not empty"))
if getattr(self, "filteredrevs", None):
raise ValueError(_("source revlog has filtered revisions"))
if getattr(destrevlog, "filteredrevs", None):
raise ValueError(_("destination revlog has filtered revisions"))
# lazydeltabase controls whether to reuse a cached delta, if possible.
oldlazydeltabase = destrevlog._lazydeltabase
oldamd = destrevlog._aggressivemergedeltas

View File

@ -2195,12 +2195,12 @@ def _mapbynodefunc(repo, s, f):
talking about nodes. Handles converting between rev numbers and nodes, and
filtering.
"""
cl = repo.unfiltered().changelog
cl = repo.changelog
torev = cl.rev
tonode = cl.node
nodemap = cl.nodemap
result = set(torev(n) for n in f(tonode(r) for r in s) if n in nodemap)
return smartset.baseset(result - repo.changelog.filteredrevs)
return smartset.baseset(result)
@predicate("allprecursors(set[, depth])")
@ -2626,7 +2626,7 @@ def matchany(ui, specs, repo=None, localalias=None):
tree = revsetlang.foldconcat(tree)
tree = revsetlang.analyze(tree)
if repo is not None:
lookup = repo.unfiltered().__contains__
lookup = repo.__contains__
unknownnames = list(scanunknowns([tree], lookup))
tree = revsetlang.optimize(tree)

View File

@ -739,7 +739,7 @@ def cleanupnodes(repo, replacements, operation, moves=None, metadata=None):
if moves is None:
moves = {}
# Unfiltered repo is needed since nodes in replacements might be hidden.
unfi = repo.unfiltered()
unfi = repo
for oldnode, newnodes in replacements.items():
if oldnode in moves:
continue
@ -1402,7 +1402,6 @@ def trackrevnumfortests(repo, specs):
candidates.append("max(desc(%s))" % desc.split()[0])
candidates.append("%s" % ctx.hex())
repo = repo.unfiltered()
for candidate in candidates:
try:
nodes = list(repo.nodes(candidate))

View File

@ -1195,7 +1195,7 @@ def shortest(context, mapping, args):
# _partialmatch() of filtered changelog could take O(len(repo)) time,
# which would be unacceptably slow. so we look for hash collision in
# unfiltered space, which means some hashes may be slightly longer.
cl = mapping["ctx"]._repo.unfiltered().changelog
cl = mapping["ctx"]._repo.changelog
return cl.shortest(node, minlength)

View File

@ -827,7 +827,6 @@ def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
def upgraderepo(ui, repo, run=False, optimize=None):
"""Upgrade a repository in place."""
optimize = set(optimize or [])
repo = repo.unfiltered()
# Ensure the repository can be upgraded.
missingreqs = requiredsourcerequirements(repo) - repo.requirements

View File

@ -36,7 +36,7 @@ class verifier(object):
# The match argument is always None in hg core, but e.g. the narrowhg
# extension will pass in a matcher here.
def __init__(self, repo, match=None, revs=None):
self.repo = repo.unfiltered()
self.repo = repo
self.ui = repo.ui
self.match = match or scmutil.matchall(repo)
self.badrevs = set()

View File

@ -22,7 +22,7 @@ def _convertfromobsolete(repo):
{("mutation", "enabled"): False, ("visibility", "enabled"): False},
"convertfromobsolete",
):
return list(repo.unfiltered().nodes("heads((not public()) - hidden())"))
return list(repo.nodes("heads((not public()) - hidden())"))
def starttracking(repo):
@ -123,7 +123,7 @@ class visibleheads(object):
newheads = list(newheads)
# Remove heads that are not actually heads, and preserve the ordering
# in self.heads for heads that have not changed.
unfi = repo.unfiltered()
unfi = repo
if len(newheads) > 1:
hasnode = repo.changelog.nodemap.__contains__
realnewheads = list(
@ -161,7 +161,7 @@ class visibleheads(object):
self._updateheads(repo, newheads, tr)
def remove(self, repo, oldnodes, tr):
unfi = repo.unfiltered()
unfi = repo
clrev = unfi.changelog.rev
clparents = unfi.changelog.parents
phasecache = unfi._phasecache

View File

@ -681,7 +681,7 @@ def getdispatchrepo(repo, proto, command):
extensions that need commands to operate on different repo views under
specialized circumstances.
"""
return repo.filtered("served")
return repo
def wrapstreamres(towrap, logger, start_time):
@ -872,7 +872,6 @@ def wireprotocommand(name, args=""):
@wireprotocommand("batch", "cmds *")
def batch(repo, proto, cmds, others):
repo = repo.filtered("served")
res = []
for pair in cmds.split(";"):
op, args = pair.split(" ", 1)

View File

@ -114,9 +114,6 @@ cdef class clindex(object):
def headrevs(self):
return self._origindex.headrevs()
def headrevsfiltered(self, filtered):
return self._origindex.headrevsfiltered(filtered)
def deltachain(self, rev, stoprev, generaldelta):
return self._origindex.deltachain(rev, stoprev, generaldelta)
@ -437,10 +434,7 @@ def reposetup(ui, repo):
except AttributeError:
pass
unfilteredmethod = localrepo.unfilteredmethod
class clindexrepo(repo.__class__):
@unfilteredmethod
def updatecaches(self, tr=None):
try:
self.changelog.index.updatecaches()
@ -448,7 +442,6 @@ def reposetup(ui, repo):
pass
super(clindexrepo, self).updatecaches(tr)
@unfilteredmethod
def destroying(self):
# Tell clindex to prepare for the strip. clindex will unlink
# nodemap and other caches.
@ -458,7 +451,6 @@ def reposetup(ui, repo):
pass
super(clindexrepo, self).destroying()
@unfilteredmethod
def destroyed(self):
# Force a reload of changelog. The current "self.changelog" object
# has an outdated snapshot of changelog.i. We need to read the new

View File

@ -14,6 +14,7 @@ from typing import (
Callable,
Dict,
Generator,
Iterable,
Iterator,
List,
NoReturn,
@ -41,52 +42,31 @@ import edenscm.mercurial.urllibcompat
import edenscm.mercurial.util
import edenscm.mercurial.vfs
_T = TypeVar("_T")
_T0 = TypeVar("_T0")
_T1 = TypeVar("_T1")
_Tlocalpeer = TypeVar("_Tlocalpeer", bound=localpeer)
_Tlocalrepository = TypeVar("_Tlocalrepository", bound=localrepository)
class _basefilecache(edenscm.mercurial.scmutil.filecache):
__doc__: str
def __delete__(self, repo) -> None:
...
def __get__(self, repo, type=...) -> Any:
...
def __set__(self, repo, value) -> None:
...
def __delete__(self, repo) -> None: ...
def __get__(self, repo, type=...) -> Any: ...
def __set__(self, repo, value) -> None: ...
class filteredpropertycache(edenscm.mercurial.util.propertycache):
__doc__: str
class locallegacypeer(edenscm.mercurial.repository.legacypeer, localpeer):
__doc__: str
_caps: Any
_repo: Any
_ui: Any
def __init__(self, repo) -> None:
...
def between(self, pairs) -> Any:
...
def branches(self, nodes) -> Any:
...
def changegroup(self, basenodes, source) -> Any:
...
def changegroupsubset(self, bases, heads, source) -> Any:
...
def __init__(self, repo) -> None: ...
def between(self, pairs) -> Any: ...
def branches(self, nodes) -> Any: ...
def changegroup(self, basenodes, source) -> Any: ...
def changegroupsubset(self, bases, heads, source) -> Any: ...
class localpeer(edenscm.mercurial.repository.peer):
__doc__: str
@ -94,61 +74,26 @@ class localpeer(edenscm.mercurial.repository.peer):
_repo: Any
_ui: Any
ui: Any
def __init__(self, repo, caps=...) -> None:
...
def branchmap(self) -> Any:
...
def canpush(self) -> bool:
...
def capabilities(self) -> Set[str]:
...
def close(self) -> None:
...
def debugwireargs(self, one, two, three=..., four=..., five=...) -> str:
...
def getbundle(self, source, heads=..., common=..., bundlecaps=..., **kwargs) -> Any:
...
def heads(self, *args, **kwargs) -> list:
...
def iterbatch(self) -> edenscm.mercurial.peer.localiterbatcher:
...
def known(self, nodes) -> Any:
...
def listkeys(self, namespace) -> Any:
...
def local(self) -> Any:
...
def lookup(self, key) -> Any:
...
def peer(self: _Tlocalpeer) -> _Tlocalpeer:
...
def pushkey(self, namespace, key, old, new) -> Any:
...
def stream_out(self) -> NoReturn:
...
def unbundle(self, cg, heads, url) -> Any:
...
def url(self) -> Any:
...
def __init__(self, repo, caps=...) -> None: ...
def branchmap(self) -> Any: ...
def canpush(self) -> bool: ...
def capabilities(self) -> Set[str]: ...
def close(self) -> None: ...
def debugwireargs(self, one, two, three=..., four=..., five=...) -> str: ...
def getbundle(
self, source, heads=..., common=..., bundlecaps=..., **kwargs
) -> Any: ...
def heads(self, *args, **kwargs) -> list: ...
def iterbatch(self) -> edenscm.mercurial.peer.localiterbatcher: ...
def known(self, nodes) -> Any: ...
def listkeys(self, namespace) -> Any: ...
def local(self) -> Any: ...
def lookup(self, key) -> Any: ...
def peer(self: _Tlocalpeer) -> _Tlocalpeer: ...
def pushkey(self, namespace, key, old, new) -> Any: ...
def stream_out(self) -> NoReturn: ...
def unbundle(self, cg, heads, url) -> Any: ...
def url(self) -> Any: ...
class localrepository(object):
__doc__: str
@ -181,9 +126,9 @@ class localrepository(object):
dirstate: edenscm.mercurial.dirstate.dirstate
disableeventreporting: Callable[..., contextlib._GeneratorContextManager]
featuresetupfuncs: __builtin__.set[nothing]
fileservice: Any
fileslog: Any
filteredrevcache: Dict[nothing, nothing]
filtername: None
filterpats: Dict[Any, List[Tuple[Any, Any, Any]]]
localvfs: edenscm.mercurial.vfs.vfs
manifestlog: Any
@ -212,63 +157,28 @@ class localrepository(object):
ui: edenscm.mercurial.ui.ui
vfs: edenscm.mercurial.vfs.vfs
wvfs: edenscm.mercurial.vfs.vfs
def __bool__(self) -> bool:
...
def __contains__(self, changeid) -> bool:
...
def __getitem__(self, changeid) -> Any:
...
def __init__(self, baseui, path, create=...) -> None:
...
def __iter__(self) -> Any:
...
def __len__(self) -> int:
...
def __nonzero__(self) -> bool:
...
def _afterlock(self, callback) -> None:
...
def _applyopenerreqs(self) -> None:
...
def _buildcacheupdater(self, newtransaction) -> Callable[[Any], Any]:
...
def _constructmanifest(self) -> Any:
...
def _currentlock(self, lockref) -> Any:
...
def _dirstatevalidate(self, node: bytes) -> bytes:
...
def __bool__(self) -> bool: ...
def __contains__(self, changeid) -> bool: ...
def __getitem__(self, changeid) -> Any: ...
def __init__(self, baseui, path, create=...) -> None: ...
def __iter__(self) -> Any: ...
def __len__(self) -> int: ...
def __nonzero__(self) -> bool: ...
def _afterlock(self, callback) -> None: ...
def _applyopenerreqs(self) -> None: ...
def _buildcacheupdater(self, newtransaction) -> Callable[[Any], Any]: ...
def _constructmanifest(self) -> Any: ...
def _currentlock(self, lockref) -> Any: ...
def _dirstatevalidate(self, node: bytes) -> bytes: ...
def _featuresetup(
self, setupfuncs, basesupported: _T1
) -> Union[__builtin__.set, _T1]:
...
def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist) -> Any:
...
def _filter(self, filterpats, filename, data) -> Any:
...
def _getsvfsward(self, origfunc) -> Callable:
...
def _getvfsward(self, origfunc) -> Callable:
...
) -> Union[__builtin__.set, _T1]: ...
def _filecommit(
self, fctx, manifest1, manifest2, linkrev, tr, changelist
) -> Any: ...
def _filter(self, filterpats, filename, data) -> Any: ...
def _getsvfsward(self, origfunc) -> Callable: ...
def _getvfsward(self, origfunc) -> Callable: ...
def _journalfiles(
self,
) -> Tuple[
@ -279,15 +189,9 @@ class localrepository(object):
Tuple[Any, str],
Tuple[Any, str],
Tuple[Any, str],
]:
...
def _loadextensions(self) -> None:
...
def _loadfilter(self, filter) -> List[Tuple[Any, Any, Any]]:
...
]: ...
def _loadextensions(self) -> None: ...
def _loadfilter(self, filter) -> List[Tuple[Any, Any, Any]]: ...
def _lock(
self,
vfs,
@ -298,314 +202,127 @@ class localrepository(object):
desc,
inheritchecker=...,
parentenvvar=...,
) -> edenscm.mercurial.lock.lock:
...
def _narrowheadsmigration(self) -> None:
...
def _refreshfilecachestats(repo: localrepository, *args, **kwargs) -> None:
...
def _restrictcapabilities(self, caps: Set[str]) -> Set[str]:
...
def _rollback(repo: localrepository, *args, **kwargs) -> int:
...
def _syncrevlogtozstore(self) -> None:
...
def _wlockchecktransaction(self) -> None:
...
def _writejournal(repo: localrepository, *args, **kwargs) -> None:
...
def _writerequirements(self) -> None:
...
def _writestorerequirements(self) -> None:
...
def _zstorecommitdatamigration(self) -> None:
...
def adddatafilter(self, name, filter) -> None:
...
def addpostdsstatus(self, ps, afterdirstatewrite=...) -> None:
...
def anyrevs(self, specs, user=..., localalias=...) -> Any:
...
def automigratefinish(self) -> None:
...
def automigratestart(self) -> None:
...
def between(self, pairs) -> List[List[nothing]]:
...
def branches(self, nodes) -> List[Tuple[Any, Any, Any, Any]]:
...
def branchheads(self, branch=..., start=..., closed=...) -> List[nothing]:
...
def branchmap(self) -> Any:
...
def branchtip(self, branch, ignoremissing=...) -> Any:
...
def cancopy(self) -> bool:
...
) -> edenscm.mercurial.lock.lock: ...
def _narrowheadsmigration(self) -> None: ...
def _prefetchtrees(
self,
rootdir: str,
mfnodes: Iterable[bytes],
basemfnodes: List[bytes],
directories: List[str],
depth: Optional[int] = None,
): ...
def _refreshfilecachestats(repo: localrepository, *args, **kwargs) -> None: ...
def _restrictcapabilities(self, caps: Set[str]) -> Set[str]: ...
def _rollback(repo: localrepository, *args, **kwargs) -> int: ...
def _syncrevlogtozstore(self) -> None: ...
def _wlockchecktransaction(self) -> None: ...
def _writejournal(repo: localrepository, *args, **kwargs) -> None: ...
def _writerequirements(self) -> None: ...
def _writestorerequirements(self) -> None: ...
def _zstorecommitdatamigration(self) -> None: ...
def adddatafilter(self, name, filter) -> None: ...
def addpostdsstatus(self, ps, afterdirstatewrite=...) -> None: ...
def anyrevs(self, specs, user=..., localalias=...) -> Any: ...
def automigratefinish(self) -> None: ...
def automigratestart(self) -> None: ...
def between(self, pairs) -> List[List[nothing]]: ...
def branches(self, nodes) -> List[Tuple[Any, Any, Any, Any]]: ...
def branchheads(self, branch=..., start=..., closed=...) -> List[nothing]: ...
def branchmap(self) -> Any: ...
def branchtip(self, branch, ignoremissing=...) -> Any: ...
def cancopy(self) -> bool: ...
def changectx(
self, changeid: Union[int, str, bytes, edenscm.mercurial.context.basectx]
) -> edenscm.mercurial.context.basectx:
...
def checkcommitpatterns(self, wctx, match, status, fail) -> None:
...
def checkpush(self, pushop) -> None:
...
def clearpostdsstatus(self) -> None:
...
def close(self) -> None:
...
def commit(repo: localrepository, *args, **kwargs) -> Any:
...
def commitctx(repo: localrepository, *args, **kwargs) -> Any:
...
def commitpending(repo: localrepository, *args, **kwargs) -> None:
...
def currenttransaction(self) -> Any:
...
def currentwlock(self) -> Any:
...
def debugwireargs(self, one, two, three=..., four=..., five=...) -> str:
...
def destroyed(self) -> None:
...
def destroying(self) -> None:
...
def file(self, f) -> edenscm.mercurial.filelog.filelog:
...
) -> edenscm.mercurial.context.basectx: ...
def checkcommitpatterns(self, wctx, match, status, fail) -> None: ...
def checkpush(self, pushop) -> None: ...
def clearpostdsstatus(self) -> None: ...
def close(self) -> None: ...
def commit(repo: localrepository, *args, **kwargs) -> Any: ...
def commitctx(repo: localrepository, *args, **kwargs) -> Any: ...
def commitpending(repo: localrepository, *args, **kwargs) -> None: ...
def currenttransaction(self) -> Any: ...
def currentwlock(self) -> Any: ...
def debugwireargs(self, one, two, three=..., four=..., five=...) -> str: ...
def destroyed(self) -> None: ...
def destroying(self) -> None: ...
def file(self, f) -> edenscm.mercurial.filelog.filelog: ...
def filectx(
self, path, changeid=..., fileid=...
) -> edenscm.mercurial.context.filectx:
...
def filtered(self, name) -> self:
...
def getcwd(self) -> str:
...
) -> edenscm.mercurial.context.filectx: ...
def getcwd(self) -> str: ...
def headrevs(
self, start=..., includepublic=..., includedraft=..., reverse=...
) -> List[int]:
...
def heads(self, start=..., includepublic=..., includedraft=...) -> List[bytes]:
...
def hook(self, name: str, throw: bool = ..., **args) -> Any:
...
def invalidate(self, clearfilecache=...) -> None:
...
def invalidateall(self) -> None:
...
def invalidatecaches(self) -> None:
...
def invalidatedirstate(self) -> None:
...
def invalidatevolatilesets(self) -> None:
...
def known(self, nodes) -> List[bool]:
...
def listkeys(self, namespace: str) -> Any:
...
def local(self) -> self:
...
def lock(self, wait: bool = ...) -> Any:
...
def lookup(self, key) -> Any:
...
def lookupbranch(self, key, remote=...) -> Any:
...
def nodebookmarks(self, node) -> list:
...
def nodes(self, expr, *args) -> Generator[Any, Any, None]:
...
def pathto(self, f, cwd=...) -> Any:
...
def peer(self) -> localpeer:
...
def postdsstatus(self, afterdirstatewrite=...) -> list:
...
def publishing(self) -> Any:
...
def pushkey(self, namespace, key, old, new) -> Any:
...
def recover(self) -> bool:
...
def revs(self, expr, *args) -> Any:
...
def rollback(self, dryrun=..., force=...) -> Any:
...
def savecommitmessage(self, text) -> Any:
...
def set(self, expr, *args) -> Generator[Any, Any, None]:
...
def setparents(self, p1, p2=...) -> None:
...
def shared(self) -> Optional[str]:
...
) -> List[int]: ...
def heads(self, start=..., includepublic=..., includedraft=...) -> List[bytes]: ...
def hook(self, name: str, throw: bool = ..., **args) -> Any: ...
def invalidate(self, clearfilecache=...) -> None: ...
def invalidateall(self) -> None: ...
def invalidatecaches(self) -> None: ...
def invalidatedirstate(self) -> None: ...
def invalidatevolatilesets(self) -> None: ...
def known(self, nodes) -> List[bool]: ...
def listkeys(self, namespace: str) -> Any: ...
def local(self) -> self: ...
def lock(self, wait: bool = ...) -> Any: ...
def lookup(self, key) -> Any: ...
def lookupbranch(self, key, remote=...) -> Any: ...
def nodebookmarks(self, node) -> list: ...
def nodes(self, expr, *args) -> Generator[Any, Any, None]: ...
def pathto(self, f, cwd=...) -> Any: ...
def peer(self) -> localpeer: ...
def postdsstatus(self, afterdirstatewrite=...) -> list: ...
def prefetchtrees(
self,
mfnodes: Iterable[bytes],
basemfnodes: Optional[List[bytes]] = None,
depth: Optional[int] = None,
): ...
def publishing(self) -> Any: ...
def pushkey(self, namespace, key, old, new) -> Any: ...
def recover(self) -> bool: ...
def revs(self, expr, *args) -> Any: ...
def rollback(self, dryrun=..., force=...) -> Any: ...
def savecommitmessage(self, text) -> Any: ...
def set(self, expr, *args) -> Generator[Any, Any, None]: ...
def setparents(self, p1, p2=...) -> None: ...
def shared(self) -> Optional[str]: ...
def status(
self, node1=..., node2=..., match=..., ignored=..., clean=..., unknown=...
) -> Any:
...
def transaction(self, desc, report=...) -> Any:
...
def undofiles(self) -> List[Tuple[Any, Any]]:
...
def unfiltered(self) -> self:
...
def updatecaches(repo: localrepository, *args, **kwargs) -> None:
...
def url(self) -> str:
...
def walk(self, match, node=...) -> Any:
...
def wjoin(self, f, *insidef) -> str:
...
def wlock(self, wait=...) -> Any:
...
def wread(self, filename) -> bytes:
...
def wwrite(self, filename, data, flags, backgroundclose=...) -> int:
...
def wwritedata(self, filename, data) -> Any:
...
) -> Any: ...
def transaction(self, desc, report=...) -> Any: ...
def undofiles(self) -> List[Tuple[Any, Any]]: ...
def updatecaches(repo: localrepository, *args, **kwargs) -> None: ...
def url(self) -> str: ...
def walk(self, match, node=...) -> Any: ...
def wjoin(self, f, *insidef) -> str: ...
def wlock(self, wait=...) -> Any: ...
def wread(self, filename) -> bytes: ...
def wwrite(self, filename, data, flags, backgroundclose=...) -> int: ...
def wwritedata(self, filename, data) -> Any: ...
class repofilecache(_basefilecache):
__doc__: str
def __init__(self, localpaths=..., sharedpaths=...) -> None:
...
def localjoin(self, obj, fname) -> Any:
...
def sharedjoin(self, obj, fname) -> Any:
...
def __init__(self, localpaths=..., sharedpaths=...) -> None: ...
def localjoin(self, obj, fname) -> Any: ...
def sharedjoin(self, obj, fname) -> Any: ...
class storecache(_basefilecache):
__doc__: str
def join(self, obj, fname) -> Any:
...
def join(self, obj, fname) -> Any: ...
class unfilteredpropertycache(edenscm.mercurial.util.propertycache):
__doc__: str
def __get__(self, repo, type=...) -> Any: ...
def __get__(self, repo, type=...) -> Any:
...
def aftertrans(files) -> Callable[[], Any]:
...
def hasunfilteredcache(repo, name) -> bool:
...
def instance(ui, path, create) -> localrepository:
...
def isfilecached(repo, name) -> Tuple[Any, bool]:
...
def islocal(path) -> bool:
...
def newreporequirements(repo: localrepository) -> Set[str]:
...
def newrepostorerequirements(repo: localrepository) -> Set[str]:
...
def release(*locks) -> None:
...
def undoname(fn) -> Any:
...
def unfilteredmethod(orig) -> Callable:
...
def aftertrans(files) -> Callable[[], Any]: ...
def hasunfilteredcache(repo, name) -> bool: ...
def instance(ui, path, create) -> localrepository: ...
def isfilecached(repo, name) -> Tuple[Any, bool]: ...
def islocal(path) -> bool: ...
def newreporequirements(repo: localrepository) -> Set[str]: ...
def newrepostorerequirements(repo: localrepository) -> Set[str]: ...
def release(*locks) -> None: ...
def undoname(fn) -> Any: ...
def unfilteredmethod(orig) -> Callable: ...