2013-10-16 04:20:12 +04:00
|
|
|
# __init__.py - remotefilelog extension
|
2013-05-07 03:44:04 +04:00
|
|
|
#
|
|
|
|
# Copyright 2013 Facebook, Inc.
|
|
|
|
#
|
|
|
|
# This software may be used and distributed according to the terms of the
|
|
|
|
# GNU General Public License version 2 or any later version.
|
2017-05-22 03:09:08 +03:00
|
|
|
"""
|
|
|
|
The remotefilelog extension is used to leave file contents on the server and
|
|
|
|
only download them ondemand as needed.
|
|
|
|
|
|
|
|
Configs:
|
|
|
|
|
|
|
|
``packs.maxchainlen`` specifies the maximum delta chain length in pack files
|
2017-07-17 13:04:22 +03:00
|
|
|
``remotefilelog.backgroundprefetch`` runs prefetch in background when True
|
2017-07-20 14:54:44 +03:00
|
|
|
``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and
|
|
|
|
update, and on other commands that use them. Different from pullprefetch.
|
2017-05-22 03:09:08 +03:00
|
|
|
"""
|
2013-05-07 04:26:27 +04:00
|
|
|
|
2016-10-21 21:02:09 +03:00
|
|
|
from . import fileserverclient, remotefilelog, remotefilectx, shallowstore
|
2016-04-05 02:26:12 +03:00
|
|
|
import shallowbundle, debugcommands, remotefilelogserver, shallowverifier
|
2016-10-21 21:02:09 +03:00
|
|
|
import shallowutil, shallowrepo
|
2016-05-03 22:34:09 +03:00
|
|
|
import repack as repackmod
|
2016-10-21 21:02:09 +03:00
|
|
|
from mercurial.node import hex
|
2013-05-21 02:03:07 +04:00
|
|
|
from mercurial.i18n import _
|
|
|
|
from mercurial.extensions import wrapfunction
|
2017-02-24 00:09:59 +03:00
|
|
|
from mercurial import (
|
|
|
|
changegroup,
|
|
|
|
changelog,
|
|
|
|
cmdutil,
|
|
|
|
commands,
|
|
|
|
context,
|
|
|
|
context,
|
|
|
|
copies,
|
|
|
|
debugcommands as hgdebugcommands,
|
|
|
|
dispatch,
|
|
|
|
error,
|
|
|
|
exchange,
|
|
|
|
extensions,
|
|
|
|
hg,
|
|
|
|
localrepo,
|
|
|
|
match,
|
|
|
|
merge,
|
|
|
|
patch,
|
2017-05-22 23:38:37 +03:00
|
|
|
registrar,
|
2017-02-24 00:09:59 +03:00
|
|
|
repair,
|
|
|
|
repoview,
|
|
|
|
revset,
|
|
|
|
scmutil,
|
|
|
|
smartset,
|
|
|
|
store,
|
|
|
|
templatekw,
|
|
|
|
util,
|
|
|
|
)
|
|
|
|
|
2016-10-21 21:02:09 +03:00
|
|
|
import os
|
2015-12-02 21:40:49 +03:00
|
|
|
import traceback
|
2013-05-07 04:26:27 +04:00
|
|
|
|
2017-02-24 00:09:59 +03:00
|
|
|
# ensures debug commands are registered
|
|
|
|
hgdebugcommands.command
|
2016-11-29 16:48:59 +03:00
|
|
|
|
2015-10-14 00:17:02 +03:00
|
|
|
try:
|
|
|
|
from mercurial import streamclone
|
|
|
|
streamclone._walkstreamfiles
|
|
|
|
hasstreamclone = True
|
2016-04-26 23:00:31 +03:00
|
|
|
except Exception:
|
2015-10-14 00:17:02 +03:00
|
|
|
hasstreamclone = False
|
|
|
|
|
2013-06-29 02:57:15 +04:00
|
|
|
cmdtable = {}
|
2017-05-22 23:38:37 +03:00
|
|
|
command = registrar.command(cmdtable)
|
2016-11-29 16:24:07 +03:00
|
|
|
testedwith = 'ships-with-fb-hgext'
|
2013-06-29 02:57:15 +04:00
|
|
|
|
2013-10-02 02:11:57 +04:00
|
|
|
repoclass = localrepo.localrepository
|
|
|
|
if util.safehasattr(repoclass, '_basesupported'):
|
2013-11-26 04:36:44 +04:00
|
|
|
repoclass._basesupported.add(shallowrepo.requirement)
|
2013-10-02 02:11:57 +04:00
|
|
|
else:
|
|
|
|
# hg <= 2.7
|
2013-11-26 04:36:44 +04:00
|
|
|
repoclass.supported.add(shallowrepo.requirement)
|
2013-10-02 02:11:57 +04:00
|
|
|
|
2013-05-21 02:03:07 +04:00
|
|
|
def uisetup(ui):
|
2016-04-26 23:00:31 +03:00
|
|
|
"""Wraps user facing Mercurial commands to swap them out with shallow
|
|
|
|
versions.
|
2013-11-06 05:19:59 +04:00
|
|
|
"""
|
2015-07-01 00:32:31 +03:00
|
|
|
hg.wirepeersetupfuncs.append(fileserverclient.peersetup)
|
|
|
|
|
2013-05-21 02:03:07 +04:00
|
|
|
entry = extensions.wrapcommand(commands.table, 'clone', cloneshallow)
|
|
|
|
entry[1].append(('', 'shallow', None,
|
2016-04-26 23:00:31 +03:00
|
|
|
_("create a shallow clone which uses remote file "
|
|
|
|
"history")))
|
2013-05-21 02:03:07 +04:00
|
|
|
|
2013-11-26 04:36:44 +04:00
|
|
|
extensions.wrapcommand(commands.table, 'debugindex',
|
|
|
|
debugcommands.debugindex)
|
|
|
|
extensions.wrapcommand(commands.table, 'debugindexdot',
|
|
|
|
debugcommands.debugindexdot)
|
2013-09-11 21:27:56 +04:00
|
|
|
extensions.wrapcommand(commands.table, 'log', log)
|
2014-08-07 05:50:57 +04:00
|
|
|
extensions.wrapcommand(commands.table, 'pull', pull)
|
2013-05-21 02:03:07 +04:00
|
|
|
|
2013-06-13 04:56:18 +04:00
|
|
|
# Prevent 'hg manifest --all'
|
|
|
|
def _manifest(orig, ui, repo, *args, **opts):
|
2013-11-26 04:36:44 +04:00
|
|
|
if shallowrepo.requirement in repo.requirements and opts.get('all'):
|
2016-04-26 23:00:31 +03:00
|
|
|
raise error.Abort(_("--all is not supported in a shallow repo"))
|
2013-06-13 04:56:18 +04:00
|
|
|
|
|
|
|
return orig(ui, repo, *args, **opts)
|
|
|
|
extensions.wrapcommand(commands.table, "manifest", _manifest)
|
|
|
|
|
2017-04-27 05:55:02 +03:00
|
|
|
# Wrap remotefilelog with lfs code
|
|
|
|
def _lfsloaded(loaded=False):
|
|
|
|
lfsmod = None
|
|
|
|
try:
|
|
|
|
lfsmod = extensions.find('lfs')
|
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
if lfsmod:
|
|
|
|
lfsmod.wrapfilelog(remotefilelog.remotefilelog)
|
|
|
|
extensions.afterloaded('lfs', _lfsloaded)
|
|
|
|
|
|
|
|
# debugdata needs remotefilelog.len to work
|
|
|
|
extensions.wrapcommand(commands.table, 'debugdata', debugdatashallow)
|
|
|
|
|
2013-05-21 02:03:07 +04:00
|
|
|
def cloneshallow(orig, ui, repo, *args, **opts):
|
|
|
|
if opts.get('shallow'):
|
2014-02-12 02:41:56 +04:00
|
|
|
repos = []
|
2015-12-11 04:25:14 +03:00
|
|
|
def pull_shallow(orig, self, *args, **kwargs):
|
|
|
|
if shallowrepo.requirement not in self.requirements:
|
|
|
|
repos.append(self.unfiltered())
|
|
|
|
# set up the client hooks so the post-clone update works
|
|
|
|
setupclient(self.ui, self.unfiltered())
|
2013-05-21 02:03:07 +04:00
|
|
|
|
2015-12-11 04:25:14 +03:00
|
|
|
# setupclient fixed the class on the repo itself
|
|
|
|
# but we also need to fix it on the repoview
|
|
|
|
if isinstance(self, repoview.repoview):
|
|
|
|
self.__class__.__bases__ = (self.__class__.__bases__[0],
|
|
|
|
self.unfiltered().__class__)
|
|
|
|
self.requirements.add(shallowrepo.requirement)
|
|
|
|
self._writerequirements()
|
2013-05-21 02:03:07 +04:00
|
|
|
|
2015-12-11 04:25:14 +03:00
|
|
|
# Since setupclient hadn't been called, exchange.pull was not
|
|
|
|
# wrapped. So we need to manually invoke our version of it.
|
|
|
|
return exchangepull(orig, self, *args, **kwargs)
|
|
|
|
else:
|
|
|
|
return orig(self, *args, **kwargs)
|
|
|
|
wrapfunction(exchange, 'pull', pull_shallow)
|
2013-05-21 02:03:07 +04:00
|
|
|
|
2015-10-14 00:17:02 +03:00
|
|
|
# Wrap the stream logic to add requirements and to pass include/exclude
|
|
|
|
# patterns around.
|
|
|
|
def setup_streamout(repo, remote):
|
2013-09-26 21:46:06 +04:00
|
|
|
# Replace remote.stream_out with a version that sends file
|
|
|
|
# patterns.
|
|
|
|
def stream_out_shallow(orig):
|
2013-11-26 04:36:44 +04:00
|
|
|
if shallowrepo.requirement in remote._capabilities():
|
2013-09-26 21:46:06 +04:00
|
|
|
opts = {}
|
2015-10-14 00:17:02 +03:00
|
|
|
if repo.includepattern:
|
|
|
|
opts['includepattern'] = '\0'.join(repo.includepattern)
|
|
|
|
if repo.excludepattern:
|
|
|
|
opts['excludepattern'] = '\0'.join(repo.excludepattern)
|
2013-09-26 21:46:06 +04:00
|
|
|
return remote._callstream('stream_out_shallow', **opts)
|
|
|
|
else:
|
|
|
|
return orig()
|
|
|
|
wrapfunction(remote, 'stream_out', stream_out_shallow)
|
2015-10-14 00:17:02 +03:00
|
|
|
if hasstreamclone:
|
|
|
|
def stream_wrap(orig, op):
|
|
|
|
setup_streamout(op.repo, op.remote)
|
|
|
|
return orig(op)
|
2016-04-26 23:00:31 +03:00
|
|
|
wrapfunction(streamclone, 'maybeperformlegacystreamclone',
|
|
|
|
stream_wrap)
|
2015-10-14 00:17:02 +03:00
|
|
|
|
|
|
|
def canperformstreamclone(orig, *args, **kwargs):
|
|
|
|
supported, requirements = orig(*args, **kwargs)
|
|
|
|
if requirements is not None:
|
|
|
|
requirements.add(shallowrepo.requirement)
|
|
|
|
return supported, requirements
|
|
|
|
wrapfunction(streamclone, 'canperformstreamclone',
|
|
|
|
canperformstreamclone)
|
|
|
|
else:
|
|
|
|
def stream_in_shallow(orig, repo, remote, requirements):
|
|
|
|
setup_streamout(repo, remote)
|
|
|
|
requirements.add(shallowrepo.requirement)
|
|
|
|
return orig(repo, remote, requirements)
|
2016-04-26 23:00:31 +03:00
|
|
|
wrapfunction(localrepo.localrepository, 'stream_in',
|
|
|
|
stream_in_shallow)
|
2013-05-21 02:03:07 +04:00
|
|
|
|
2013-06-08 02:13:58 +04:00
|
|
|
try:
|
|
|
|
orig(ui, repo, *args, **opts)
|
|
|
|
finally:
|
2014-02-12 02:41:56 +04:00
|
|
|
if opts.get('shallow'):
|
|
|
|
for r in repos:
|
2014-09-11 22:08:42 +04:00
|
|
|
if util.safehasattr(r, 'fileservice'):
|
|
|
|
r.fileservice.close()
|
2013-05-21 02:03:07 +04:00
|
|
|
|
2017-04-27 05:55:02 +03:00
|
|
|
def debugdatashallow(orig, *args, **kwds):
|
|
|
|
oldlen = remotefilelog.remotefilelog.__len__
|
|
|
|
try:
|
|
|
|
remotefilelog.remotefilelog.__len__ = lambda x: 1
|
|
|
|
return orig(*args, **kwds)
|
|
|
|
finally:
|
|
|
|
remotefilelog.remotefilelog.__len__ = oldlen
|
|
|
|
|
2013-05-21 02:03:07 +04:00
|
|
|
def reposetup(ui, repo):
|
|
|
|
if not isinstance(repo, localrepo.localrepository):
|
|
|
|
return
|
|
|
|
|
2017-07-20 14:54:44 +03:00
|
|
|
# put here intentionally bc doesnt work in uisetup
|
|
|
|
ui.setconfig('hooks', 'update.prefetch', wcpprefetch)
|
|
|
|
ui.setconfig('hooks', 'commit.prefetch', wcpprefetch)
|
|
|
|
|
2013-05-21 02:03:07 +04:00
|
|
|
isserverenabled = ui.configbool('remotefilelog', 'server')
|
2013-11-26 04:36:44 +04:00
|
|
|
isshallowclient = shallowrepo.requirement in repo.requirements
|
2013-05-21 02:03:07 +04:00
|
|
|
|
|
|
|
if isserverenabled and isshallowclient:
|
2016-04-26 23:00:31 +03:00
|
|
|
raise RuntimeError("Cannot be both a server and shallow client.")
|
2013-05-21 02:03:07 +04:00
|
|
|
|
|
|
|
if isshallowclient:
|
|
|
|
setupclient(ui, repo)
|
|
|
|
|
2013-06-12 00:34:39 +04:00
|
|
|
if isserverenabled:
|
2013-11-26 04:36:44 +04:00
|
|
|
remotefilelogserver.setupserver(ui, repo)
|
2013-06-28 02:14:22 +04:00
|
|
|
|
2013-05-21 02:03:07 +04:00
|
|
|
def setupclient(ui, repo):
|
2014-10-23 11:01:21 +04:00
|
|
|
if not isinstance(repo, localrepo.localrepository):
|
2013-05-21 02:03:07 +04:00
|
|
|
return
|
|
|
|
|
2013-11-26 04:36:44 +04:00
|
|
|
# Even clients get the server setup since they need to have the
|
|
|
|
# wireprotocol endpoints registered.
|
|
|
|
remotefilelogserver.onetimesetup(ui)
|
2013-06-12 00:34:39 +04:00
|
|
|
onetimeclientsetup(ui)
|
|
|
|
|
2013-05-21 02:03:07 +04:00
|
|
|
shallowrepo.wraprepo(repo)
|
|
|
|
repo.store = shallowstore.wrapstore(repo.store)
|
|
|
|
|
2013-06-12 00:34:39 +04:00
|
|
|
clientonetime = False
|
|
|
|
def onetimeclientsetup(ui):
|
|
|
|
global clientonetime
|
|
|
|
if clientonetime:
|
|
|
|
return
|
|
|
|
clientonetime = True
|
2013-05-21 02:03:07 +04:00
|
|
|
|
2014-11-08 05:35:52 +03:00
|
|
|
# some users in core still call changegroup.cg1packer directly
|
2014-11-08 02:39:20 +03:00
|
|
|
changegroup.cg1packer = shallowbundle.shallowcg1packer
|
2016-01-20 03:34:53 +03:00
|
|
|
|
|
|
|
packermap = None
|
2014-11-08 05:35:52 +03:00
|
|
|
if util.safehasattr(changegroup, 'packermap'):
|
2016-01-20 03:34:53 +03:00
|
|
|
packermap = changegroup.packermap
|
|
|
|
elif util.safehasattr(changegroup, '_packermap'):
|
|
|
|
packermap = changegroup._packermap
|
|
|
|
|
|
|
|
if packermap:
|
2014-11-08 05:35:52 +03:00
|
|
|
# Mercurial >= 3.3
|
2016-01-20 03:34:53 +03:00
|
|
|
packermap01 = packermap['01']
|
|
|
|
packermap02 = packermap['02']
|
|
|
|
packermap['01'] = (shallowbundle.shallowcg1packer,
|
2014-11-08 05:35:52 +03:00
|
|
|
packermap01[1])
|
2016-01-20 03:34:53 +03:00
|
|
|
packermap['02'] = (shallowbundle.shallowcg2packer,
|
2014-11-08 05:35:52 +03:00
|
|
|
packermap02[1])
|
2015-10-15 17:12:54 +03:00
|
|
|
if util.safehasattr(changegroup, '_addchangegroupfiles'):
|
|
|
|
fn = '_addchangegroupfiles' # hg >= 3.6
|
|
|
|
else:
|
|
|
|
fn = 'addchangegroupfiles' # hg <= 3.5
|
|
|
|
wrapfunction(changegroup, fn, shallowbundle.addchangegroupfiles)
|
2014-09-12 01:39:14 +04:00
|
|
|
wrapfunction(changegroup, 'getchangegroup', shallowbundle.getchangegroup)
|
2013-05-21 02:03:07 +04:00
|
|
|
|
|
|
|
def storewrapper(orig, requirements, path, vfstype):
|
|
|
|
s = orig(requirements, path, vfstype)
|
2013-11-26 04:36:44 +04:00
|
|
|
if shallowrepo.requirement in requirements:
|
2013-05-21 02:03:07 +04:00
|
|
|
s = shallowstore.wrapstore(s)
|
|
|
|
|
|
|
|
return s
|
|
|
|
wrapfunction(store, 'store', storewrapper)
|
|
|
|
|
2014-10-15 02:50:04 +04:00
|
|
|
extensions.wrapfunction(exchange, 'pull', exchangepull)
|
|
|
|
|
2013-07-02 02:35:08 +04:00
|
|
|
# prefetch files before update
|
2014-06-18 02:47:12 +04:00
|
|
|
def applyupdates(orig, repo, actions, wctx, mctx, overwrite, labels=None):
|
2013-11-26 04:36:44 +04:00
|
|
|
if shallowrepo.requirement in repo.requirements:
|
2013-05-21 02:03:07 +04:00
|
|
|
manifest = mctx.manifest()
|
|
|
|
files = []
|
2014-06-18 02:47:12 +04:00
|
|
|
for f, args, msg in actions['g']:
|
2013-05-21 02:03:07 +04:00
|
|
|
files.append((f, hex(manifest[f])))
|
|
|
|
# batch fetch the needed files from the server
|
2014-02-12 02:41:56 +04:00
|
|
|
repo.fileservice.prefetch(files)
|
2014-06-18 02:47:12 +04:00
|
|
|
return orig(repo, actions, wctx, mctx, overwrite, labels=labels)
|
2013-05-21 02:03:07 +04:00
|
|
|
wrapfunction(merge, 'applyupdates', applyupdates)
|
|
|
|
|
2015-12-15 01:49:34 +03:00
|
|
|
# Prefetch merge checkunknownfiles
|
|
|
|
def checkunknownfiles(orig, repo, wctx, mctx, force, actions,
|
|
|
|
*args, **kwargs):
|
|
|
|
if shallowrepo.requirement in repo.requirements:
|
|
|
|
files = []
|
2017-01-26 22:01:31 +03:00
|
|
|
sparsematch = repo.maybesparsematch(mctx.rev())
|
2015-12-15 01:49:34 +03:00
|
|
|
for f, (m, actionargs, msg) in actions.iteritems():
|
2015-12-16 23:59:44 +03:00
|
|
|
if sparsematch and not sparsematch(f):
|
|
|
|
continue
|
2015-12-15 01:49:34 +03:00
|
|
|
if m in ('c', 'dc', 'cm'):
|
|
|
|
files.append((f, hex(mctx.filenode(f))))
|
|
|
|
elif m == 'dg':
|
|
|
|
f2 = actionargs[0]
|
|
|
|
files.append((f2, hex(mctx.filenode(f2))))
|
|
|
|
# batch fetch the needed files from the server
|
|
|
|
repo.fileservice.prefetch(files)
|
|
|
|
return orig(repo, wctx, mctx, force, actions, *args, **kwargs)
|
|
|
|
wrapfunction(merge, '_checkunknownfiles', checkunknownfiles)
|
|
|
|
|
2015-12-15 01:44:08 +03:00
|
|
|
# Prefetch files before status attempts to look at their size and contents
|
|
|
|
def checklookup(orig, self, files):
|
|
|
|
repo = self._repo
|
|
|
|
if shallowrepo.requirement in repo.requirements:
|
|
|
|
prefetchfiles = []
|
|
|
|
for parent in self._parents:
|
|
|
|
for f in files:
|
|
|
|
if f in parent:
|
|
|
|
prefetchfiles.append((f, hex(parent.filenode(f))))
|
|
|
|
# batch fetch the needed files from the server
|
|
|
|
repo.fileservice.prefetch(prefetchfiles)
|
|
|
|
return orig(self, files)
|
|
|
|
wrapfunction(context.workingctx, '_checklookup', checklookup)
|
|
|
|
|
2015-12-15 01:44:11 +03:00
|
|
|
# Prefetch the logic that compares added and removed files for renames
|
|
|
|
def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
|
|
|
|
if shallowrepo.requirement in repo.requirements:
|
|
|
|
files = []
|
|
|
|
parentctx = repo['.']
|
|
|
|
for f in removed:
|
|
|
|
files.append((f, hex(parentctx.filenode(f))))
|
|
|
|
# batch fetch the needed files from the server
|
|
|
|
repo.fileservice.prefetch(files)
|
|
|
|
return orig(repo, matcher, added, removed, *args, **kwargs)
|
|
|
|
wrapfunction(scmutil, '_findrenames', findrenames)
|
|
|
|
|
2013-07-02 02:35:08 +04:00
|
|
|
# prefetch files before mergecopies check
|
2016-10-22 03:29:06 +03:00
|
|
|
def computenonoverlap(orig, repo, c1, c2, *args, **kwargs):
|
|
|
|
u1, u2 = orig(repo, c1, c2, *args, **kwargs)
|
2013-11-26 04:36:44 +04:00
|
|
|
if shallowrepo.requirement in repo.requirements:
|
2015-04-02 19:58:46 +03:00
|
|
|
m1 = c1.manifest()
|
|
|
|
m2 = c2.manifest()
|
2013-07-02 02:35:08 +04:00
|
|
|
files = []
|
2015-05-19 02:08:49 +03:00
|
|
|
|
2017-01-26 22:01:31 +03:00
|
|
|
sparsematch1 = repo.maybesparsematch(c1.rev())
|
2015-05-19 02:08:49 +03:00
|
|
|
if sparsematch1:
|
|
|
|
sparseu1 = []
|
|
|
|
for f in u1:
|
|
|
|
if sparsematch1(f):
|
|
|
|
files.append((f, hex(m1[f])))
|
|
|
|
sparseu1.append(f)
|
|
|
|
u1 = sparseu1
|
|
|
|
|
2017-01-26 22:01:31 +03:00
|
|
|
sparsematch2 = repo.maybesparsematch(c2.rev())
|
2015-05-19 02:08:49 +03:00
|
|
|
if sparsematch2:
|
|
|
|
sparseu2 = []
|
|
|
|
for f in u2:
|
|
|
|
if sparsematch2(f):
|
|
|
|
files.append((f, hex(m2[f])))
|
|
|
|
sparseu2.append(f)
|
|
|
|
u2 = sparseu2
|
2013-07-02 02:35:08 +04:00
|
|
|
|
|
|
|
# batch fetch the needed files from the server
|
2014-02-12 02:41:56 +04:00
|
|
|
repo.fileservice.prefetch(files)
|
2015-01-28 06:20:47 +03:00
|
|
|
return u1, u2
|
|
|
|
wrapfunction(copies, '_computenonoverlap', computenonoverlap)
|
2013-07-02 02:35:08 +04:00
|
|
|
|
|
|
|
# prefetch files before pathcopies check
|
2015-04-23 02:39:16 +03:00
|
|
|
def computeforwardmissing(orig, a, b, match=None):
|
|
|
|
missing = list(orig(a, b, match=match))
|
2013-07-02 02:35:08 +04:00
|
|
|
repo = a._repo
|
2013-11-26 04:36:44 +04:00
|
|
|
if shallowrepo.requirement in repo.requirements:
|
2013-07-02 02:35:08 +04:00
|
|
|
mb = b.manifest()
|
|
|
|
|
|
|
|
files = []
|
2017-01-26 22:01:31 +03:00
|
|
|
sparsematch = repo.maybesparsematch(b.rev())
|
2015-05-19 02:08:49 +03:00
|
|
|
if sparsematch:
|
|
|
|
sparsemissing = []
|
|
|
|
for f in missing:
|
|
|
|
if sparsematch(f):
|
|
|
|
files.append((f, hex(mb[f])))
|
|
|
|
sparsemissing.append(f)
|
|
|
|
missing = sparsemissing
|
2013-07-02 02:35:08 +04:00
|
|
|
|
|
|
|
# batch fetch the needed files from the server
|
2014-02-12 02:41:56 +04:00
|
|
|
repo.fileservice.prefetch(files)
|
2015-04-23 02:39:16 +03:00
|
|
|
return missing
|
2015-01-28 06:20:47 +03:00
|
|
|
wrapfunction(copies, '_computeforwardmissing', computeforwardmissing)
|
2013-07-02 02:35:08 +04:00
|
|
|
|
2013-11-06 05:19:59 +04:00
|
|
|
# close cache miss server connection after the command has finished
|
2014-02-12 02:41:56 +04:00
|
|
|
def runcommand(orig, lui, repo, *args, **kwargs):
|
2013-05-21 02:03:07 +04:00
|
|
|
try:
|
2014-02-12 02:41:56 +04:00
|
|
|
return orig(lui, repo, *args, **kwargs)
|
2013-05-21 02:03:07 +04:00
|
|
|
finally:
|
2016-04-05 02:22:16 +03:00
|
|
|
# repo can be None when running in chg:
|
|
|
|
# - at startup, reposetup was called because serve is not norepo
|
|
|
|
# - a norepo command like "help" is called
|
|
|
|
if repo and shallowrepo.requirement in repo.requirements:
|
2016-01-11 21:48:51 +03:00
|
|
|
repo.fileservice.close()
|
2013-05-21 02:03:07 +04:00
|
|
|
wrapfunction(dispatch, 'runcommand', runcommand)
|
|
|
|
|
|
|
|
# disappointing hacks below
|
|
|
|
templatekw.getrenamedfn = getrenamedfn
|
2013-06-12 00:34:39 +04:00
|
|
|
wrapfunction(revset, 'filelog', filelogrevset)
|
|
|
|
revset.symbols['filelog'] = revset.filelog
|
2013-05-21 02:03:07 +04:00
|
|
|
wrapfunction(cmdutil, 'walkfilerevs', walkfilerevs)
|
|
|
|
|
2013-11-06 05:19:59 +04:00
|
|
|
# prevent strip from stripping remotefilelogs
|
2013-05-21 02:03:07 +04:00
|
|
|
def _collectbrokencsets(orig, repo, files, striprev):
|
2013-11-26 04:36:44 +04:00
|
|
|
if shallowrepo.requirement in repo.requirements:
|
2013-09-26 21:46:06 +04:00
|
|
|
files = list([f for f in files if not repo.shallowmatch(f)])
|
2013-05-21 02:03:07 +04:00
|
|
|
return orig(repo, files, striprev)
|
|
|
|
wrapfunction(repair, '_collectbrokencsets', _collectbrokencsets)
|
|
|
|
|
2013-11-06 05:19:59 +04:00
|
|
|
# Don't commit filelogs until we know the commit hash, since the hash
|
|
|
|
# is present in the filelog blob.
|
|
|
|
# This violates Mercurial's filelog->manifest->changelog write order,
|
|
|
|
# but is generally fine for client repos.
|
2013-05-21 02:03:07 +04:00
|
|
|
pendingfilecommits = []
|
2017-05-17 02:23:37 +03:00
|
|
|
def addrawrevision(orig, self, rawtext, transaction, link, p1, p2, node,
|
|
|
|
flags, cachedelta=None, _metatuple=None):
|
2013-05-21 02:03:07 +04:00
|
|
|
if isinstance(link, int):
|
2017-05-17 02:23:37 +03:00
|
|
|
pendingfilecommits.append(
|
|
|
|
(self, rawtext, transaction, link, p1, p2, node, flags,
|
|
|
|
cachedelta, _metatuple))
|
2013-05-21 02:03:07 +04:00
|
|
|
return node
|
|
|
|
else:
|
2017-05-17 02:23:37 +03:00
|
|
|
return orig(self, rawtext, transaction, link, p1, p2, node, flags,
|
|
|
|
cachedelta, _metatuple=_metatuple)
|
|
|
|
wrapfunction(remotefilelog.remotefilelog, 'addrawrevision', addrawrevision)
|
2013-05-21 02:03:07 +04:00
|
|
|
|
|
|
|
def changelogadd(orig, self, *args):
|
2017-06-20 01:35:23 +03:00
|
|
|
oldlen = len(self)
|
2013-05-21 02:03:07 +04:00
|
|
|
node = orig(self, *args)
|
2017-06-20 01:35:23 +03:00
|
|
|
newlen = len(self)
|
|
|
|
if oldlen != newlen:
|
|
|
|
for oldargs in pendingfilecommits:
|
|
|
|
log, rt, tr, link, p1, p2, n, fl, c, m = oldargs
|
|
|
|
linknode = self.node(link)
|
|
|
|
if linknode == node:
|
|
|
|
log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m)
|
|
|
|
else:
|
|
|
|
raise error.ProgrammingError(
|
|
|
|
'pending multiple integer revisions are not supported')
|
|
|
|
else:
|
|
|
|
# "link" is actually wrong here (it is set to len(changelog))
|
|
|
|
# if changelog remains unchanged, skip writing file revisions
|
|
|
|
# but still do a sanity check about pending multiple revisions
|
|
|
|
if len(set(x[3] for x in pendingfilecommits)) > 1:
|
2017-05-17 02:23:37 +03:00
|
|
|
raise error.ProgrammingError(
|
|
|
|
'pending multiple integer revisions are not supported')
|
2017-06-20 01:35:23 +03:00
|
|
|
del pendingfilecommits[:]
|
2013-05-21 02:03:07 +04:00
|
|
|
return node
|
|
|
|
wrapfunction(changelog.changelog, 'add', changelogadd)
|
|
|
|
|
|
|
|
# changectx wrappers
|
|
|
|
def filectx(orig, self, path, fileid=None, filelog=None):
|
|
|
|
if fileid is None:
|
|
|
|
fileid = self.filenode(path)
|
2015-03-17 20:56:59 +03:00
|
|
|
if (shallowrepo.requirement in self._repo.requirements and
|
2013-09-26 21:46:06 +04:00
|
|
|
self._repo.shallowmatch(path)):
|
2013-05-21 02:03:07 +04:00
|
|
|
return remotefilectx.remotefilectx(self._repo, path,
|
|
|
|
fileid=fileid, changectx=self, filelog=filelog)
|
|
|
|
return orig(self, path, fileid=fileid, filelog=filelog)
|
|
|
|
wrapfunction(context.changectx, 'filectx', filectx)
|
|
|
|
|
|
|
|
def workingfilectx(orig, self, path, filelog=None):
|
2013-11-26 04:36:44 +04:00
|
|
|
if (shallowrepo.requirement in self._repo.requirements and
|
2013-09-26 21:46:06 +04:00
|
|
|
self._repo.shallowmatch(path)):
|
2013-05-21 02:03:07 +04:00
|
|
|
return remotefilectx.remoteworkingfilectx(self._repo,
|
|
|
|
path, workingctx=self, filelog=filelog)
|
|
|
|
return orig(self, path, filelog=filelog)
|
|
|
|
wrapfunction(context.workingctx, 'filectx', workingfilectx)
|
|
|
|
|
2013-08-30 22:27:09 +04:00
|
|
|
# prefetch required revisions before a diff
|
|
|
|
def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed,
|
2015-03-17 20:56:59 +03:00
|
|
|
copy, getfilectx, *args, **kwargs):
|
2013-11-26 04:36:44 +04:00
|
|
|
if shallowrepo.requirement in repo.requirements:
|
2013-08-30 22:27:09 +04:00
|
|
|
prefetch = []
|
|
|
|
mf1 = ctx1.manifest()
|
|
|
|
for fname in modified + added + removed:
|
|
|
|
if fname in mf1:
|
|
|
|
fnode = getfilectx(fname, ctx1).filenode()
|
2013-09-03 22:39:16 +04:00
|
|
|
# fnode can be None if it's a edited working ctx file
|
|
|
|
if fnode:
|
|
|
|
prefetch.append((fname, hex(fnode)))
|
2013-08-30 22:27:09 +04:00
|
|
|
if fname not in removed:
|
|
|
|
fnode = getfilectx(fname, ctx2).filenode()
|
2013-09-03 22:39:16 +04:00
|
|
|
if fnode:
|
|
|
|
prefetch.append((fname, hex(fnode)))
|
2013-08-30 22:27:09 +04:00
|
|
|
|
2014-02-12 02:41:56 +04:00
|
|
|
repo.fileservice.prefetch(prefetch)
|
2013-08-30 22:27:09 +04:00
|
|
|
|
|
|
|
return orig(repo, revs, ctx1, ctx2, modified, added, removed,
|
2015-03-17 20:56:59 +03:00
|
|
|
copy, getfilectx, *args, **kwargs)
|
2013-08-30 22:27:09 +04:00
|
|
|
wrapfunction(patch, 'trydiff', trydiff)
|
|
|
|
|
2013-08-31 02:43:22 +04:00
|
|
|
# Prevent verify from processing files
|
2016-01-08 03:57:39 +03:00
|
|
|
# a stub for mercurial.hg.verify()
|
2013-08-31 02:43:22 +04:00
|
|
|
def _verify(orig, repo):
|
2016-01-08 03:57:39 +03:00
|
|
|
lock = repo.lock()
|
2013-08-31 02:43:22 +04:00
|
|
|
try:
|
2016-01-08 03:57:39 +03:00
|
|
|
return shallowverifier.shallowverifier(repo).verify()
|
|
|
|
finally:
|
|
|
|
lock.release()
|
|
|
|
|
|
|
|
wrapfunction(hg, 'verify', _verify)
|
2013-08-31 02:43:22 +04:00
|
|
|
|
2014-09-08 17:20:59 +04:00
|
|
|
if util.safehasattr(cmdutil, '_revertprefetch'):
|
|
|
|
wrapfunction(cmdutil, '_revertprefetch', _revertprefetch)
|
|
|
|
else:
|
|
|
|
wrapfunction(cmdutil, 'revert', revert)
|
2013-09-17 22:24:31 +04:00
|
|
|
|
2013-05-21 02:03:07 +04:00
|
|
|
def getrenamedfn(repo, endrev=None):
|
|
|
|
rcache = {}
|
|
|
|
|
|
|
|
def getrenamed(fn, rev):
|
|
|
|
'''looks up all renames for a file (up to endrev) the first
|
|
|
|
time the file is given. It indexes on the changerev and only
|
|
|
|
parses the manifest if linkrev != changerev.
|
|
|
|
Returns rename info for fn at changerev rev.'''
|
|
|
|
if rev in rcache.setdefault(fn, {}):
|
|
|
|
return rcache[fn][rev]
|
|
|
|
|
|
|
|
try:
|
|
|
|
fctx = repo[rev].filectx(fn)
|
|
|
|
for ancestor in fctx.ancestors():
|
|
|
|
if ancestor.path() == fn:
|
|
|
|
renamed = ancestor.renamed()
|
|
|
|
rcache[fn][ancestor.rev()] = renamed
|
|
|
|
|
|
|
|
return fctx.renamed()
|
|
|
|
except error.LookupError:
|
|
|
|
return None
|
|
|
|
|
|
|
|
return getrenamed
|
|
|
|
|
|
|
|
def walkfilerevs(orig, repo, match, follow, revs, fncache):
|
2013-11-26 04:36:44 +04:00
|
|
|
if not shallowrepo.requirement in repo.requirements:
|
2013-05-21 02:03:07 +04:00
|
|
|
return orig(repo, match, follow, revs, fncache)
|
|
|
|
|
2013-11-06 05:19:59 +04:00
|
|
|
# remotefilelog's can't be walked in rev order, so throw.
|
|
|
|
# The caller will see the exception and walk the commit tree instead.
|
2013-06-13 04:56:18 +04:00
|
|
|
if not follow:
|
|
|
|
raise cmdutil.FileWalkError("Cannot walk via filelog")
|
|
|
|
|
2013-05-21 02:03:07 +04:00
|
|
|
wanted = set()
|
|
|
|
minrev, maxrev = min(revs), max(revs)
|
|
|
|
|
2013-06-13 04:56:18 +04:00
|
|
|
pctx = repo['.']
|
|
|
|
for filename in match.files():
|
|
|
|
if filename not in pctx:
|
2016-04-26 23:00:31 +03:00
|
|
|
raise error.Abort(_('cannot follow file not in parent '
|
2013-06-13 04:56:18 +04:00
|
|
|
'revision: "%s"') % filename)
|
|
|
|
fctx = pctx[filename]
|
2013-05-21 02:03:07 +04:00
|
|
|
|
2013-06-13 04:56:18 +04:00
|
|
|
linkrev = fctx.linkrev()
|
|
|
|
if linkrev >= minrev and linkrev <= maxrev:
|
|
|
|
fncache.setdefault(linkrev, []).append(filename)
|
|
|
|
wanted.add(linkrev)
|
2013-05-21 02:03:07 +04:00
|
|
|
|
2013-06-13 04:56:18 +04:00
|
|
|
for ancestor in fctx.ancestors():
|
|
|
|
linkrev = ancestor.linkrev()
|
|
|
|
if linkrev >= minrev and linkrev <= maxrev:
|
|
|
|
fncache.setdefault(linkrev, []).append(ancestor.path())
|
|
|
|
wanted.add(linkrev)
|
2013-05-21 02:03:07 +04:00
|
|
|
|
|
|
|
return wanted
|
|
|
|
|
|
|
|
def filelogrevset(orig, repo, subset, x):
|
|
|
|
"""``filelog(pattern)``
|
|
|
|
Changesets connected to the specified filelog.
|
|
|
|
|
|
|
|
For performance reasons, ``filelog()`` does not show every changeset
|
|
|
|
that affects the requested file(s). See :hg:`help log` for details. For
|
|
|
|
a slower, more accurate result, use ``file()``.
|
|
|
|
"""
|
|
|
|
|
2013-11-26 04:36:44 +04:00
|
|
|
if not shallowrepo.requirement in repo.requirements:
|
2013-05-21 02:03:07 +04:00
|
|
|
return orig(repo, subset, x)
|
|
|
|
|
|
|
|
# i18n: "filelog" is a keyword
|
|
|
|
pat = revset.getstring(x, _("filelog requires a pattern"))
|
2013-09-26 21:46:06 +04:00
|
|
|
m = match.match(repo.root, repo.getcwd(), [pat], default='relpath',
|
2013-05-21 02:03:07 +04:00
|
|
|
ctx=repo[None])
|
|
|
|
s = set()
|
|
|
|
|
2013-09-26 21:46:06 +04:00
|
|
|
if not match.patkind(pat):
|
2013-05-21 02:03:07 +04:00
|
|
|
# slow
|
|
|
|
for r in subset:
|
|
|
|
ctx = repo[r]
|
|
|
|
cfiles = ctx.files()
|
|
|
|
for f in m.files():
|
|
|
|
if f in cfiles:
|
|
|
|
s.add(ctx.rev())
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
# partial
|
2016-07-22 23:09:48 +03:00
|
|
|
files = (f for f in repo[None] if m(f))
|
|
|
|
for f in files:
|
|
|
|
fctx = repo[None].filectx(f)
|
|
|
|
s.add(fctx.linkrev())
|
|
|
|
for actx in fctx.ancestors():
|
|
|
|
s.add(actx.linkrev())
|
2013-05-21 02:03:07 +04:00
|
|
|
|
2017-02-24 00:09:59 +03:00
|
|
|
return smartset.baseset([r for r in subset if r in s])
|
2013-05-21 02:03:07 +04:00
|
|
|
|
2016-03-03 21:40:31 +03:00
|
|
|
@command('gc', [], _('hg gc [REPO...]'), norepo=True)
|
2013-06-29 02:57:15 +04:00
|
|
|
def gc(ui, *args, **opts):
|
2013-10-03 03:21:48 +04:00
|
|
|
'''garbage collect the client and server filelog caches
|
2013-06-29 02:57:15 +04:00
|
|
|
'''
|
2013-10-03 03:21:48 +04:00
|
|
|
cachepaths = set()
|
|
|
|
|
|
|
|
# get the system client cache
|
2016-06-03 19:45:58 +03:00
|
|
|
systemcache = shallowutil.getcachepath(ui, allowempty=True)
|
2013-10-03 03:21:48 +04:00
|
|
|
if systemcache:
|
|
|
|
cachepaths.add(systemcache)
|
|
|
|
|
|
|
|
# get repo client and server cache
|
2016-05-20 18:33:34 +03:00
|
|
|
repopaths = []
|
|
|
|
pwd = ui.environ.get('PWD')
|
|
|
|
if pwd:
|
|
|
|
repopaths.append(pwd)
|
|
|
|
|
2013-10-03 03:21:48 +04:00
|
|
|
repopaths.extend(args)
|
|
|
|
repos = []
|
|
|
|
for repopath in repopaths:
|
2013-08-15 21:56:25 +04:00
|
|
|
try:
|
2013-10-03 03:21:48 +04:00
|
|
|
repo = hg.peer(ui, {}, repopath)
|
|
|
|
repos.append(repo)
|
|
|
|
|
2016-06-03 19:45:58 +03:00
|
|
|
repocache = shallowutil.getcachepath(repo.ui, allowempty=True)
|
2013-10-03 03:21:48 +04:00
|
|
|
if repocache:
|
|
|
|
cachepaths.add(repocache)
|
2013-08-15 21:56:25 +04:00
|
|
|
except error.RepoError:
|
2013-10-03 03:21:48 +04:00
|
|
|
pass
|
|
|
|
|
|
|
|
# gc client cache
|
|
|
|
for cachepath in cachepaths:
|
|
|
|
gcclient(ui, cachepath)
|
|
|
|
|
|
|
|
# gc server cache
|
|
|
|
for repo in repos:
|
2013-11-26 04:36:44 +04:00
|
|
|
remotefilelogserver.gcserver(ui, repo._repo)
|
2013-06-29 02:57:15 +04:00
|
|
|
|
2013-10-03 03:21:48 +04:00
|
|
|
def gcclient(ui, cachepath):
|
2013-06-29 02:57:15 +04:00
|
|
|
# get list of repos that use this cache
|
|
|
|
repospath = os.path.join(cachepath, 'repos')
|
|
|
|
if not os.path.exists(repospath):
|
2016-04-26 23:00:31 +03:00
|
|
|
ui.warn(_("no known cache at %s\n") % cachepath)
|
2013-06-29 02:57:15 +04:00
|
|
|
return
|
|
|
|
|
|
|
|
reposfile = open(repospath, 'r')
|
|
|
|
repos = set([r[:-1] for r in reposfile.readlines()])
|
|
|
|
reposfile.close()
|
|
|
|
|
|
|
|
# build list of useful files
|
|
|
|
validrepos = []
|
2014-02-12 04:25:55 +04:00
|
|
|
keepkeys = set()
|
2013-06-29 02:57:15 +04:00
|
|
|
|
|
|
|
_analyzing = _("analyzing repositories")
|
2014-02-12 04:25:55 +04:00
|
|
|
|
2016-04-05 02:26:12 +03:00
|
|
|
sharedcache = None
|
2013-06-29 02:57:15 +04:00
|
|
|
|
|
|
|
count = 0
|
|
|
|
for path in repos:
|
|
|
|
ui.progress(_analyzing, count, unit="repos", total=len(repos))
|
|
|
|
count += 1
|
2015-12-02 21:40:49 +03:00
|
|
|
try:
|
|
|
|
path = ui.expandpath(os.path.normpath(path))
|
|
|
|
except TypeError as e:
|
2016-04-26 23:00:31 +03:00
|
|
|
ui.warn(_("warning: malformed path: %r:%s\n") % (path, e))
|
2015-12-02 21:40:49 +03:00
|
|
|
traceback.print_exc()
|
|
|
|
continue
|
2013-06-29 02:57:15 +04:00
|
|
|
try:
|
|
|
|
peer = hg.peer(ui, {}, path)
|
|
|
|
except error.RepoError:
|
|
|
|
continue
|
|
|
|
|
|
|
|
validrepos.append(path)
|
|
|
|
|
2016-10-25 22:30:59 +03:00
|
|
|
# Protect against any repo or config changes that have happened since
|
|
|
|
# this repo was added to the repos file. We'd rather this loop succeed
|
|
|
|
# and too much be deleted, than the loop fail and nothing gets deleted.
|
|
|
|
if shallowrepo.requirement not in peer._repo.requirements:
|
|
|
|
continue
|
|
|
|
|
|
|
|
if not util.safehasattr(peer._repo, 'name'):
|
|
|
|
ui.warn(_("repo %s is a misconfigured remotefilelog repo\n") % path)
|
|
|
|
continue
|
|
|
|
|
2013-08-15 22:00:51 +04:00
|
|
|
reponame = peer._repo.name
|
2016-04-05 02:26:12 +03:00
|
|
|
if not sharedcache:
|
2016-04-27 01:10:38 +03:00
|
|
|
sharedcache = peer._repo.sharedstore
|
2015-12-08 20:53:33 +03:00
|
|
|
|
|
|
|
# We want to keep:
|
|
|
|
# 1. All parents of draft commits
|
|
|
|
# 2. Recent heads in the repo
|
2016-04-26 23:00:31 +03:00
|
|
|
# 3. The tip commit (since it may be an old head, but we still want it)
|
|
|
|
keepset = ("(parents(draft()) + (heads(all()) & date(-7)) + tip) & "
|
|
|
|
"public()")
|
|
|
|
keep = peer._repo.revs(keepset)
|
2013-06-29 02:57:15 +04:00
|
|
|
for r in keep:
|
|
|
|
m = peer._repo[r].manifest()
|
|
|
|
for filename, filenode in m.iteritems():
|
2013-08-15 22:00:51 +04:00
|
|
|
key = fileserverclient.getcachekey(reponame, filename,
|
|
|
|
hex(filenode))
|
2014-02-12 04:25:55 +04:00
|
|
|
keepkeys.add(key)
|
2013-06-29 02:57:15 +04:00
|
|
|
|
|
|
|
ui.progress(_analyzing, None)
|
|
|
|
|
|
|
|
# write list of valid repos back
|
2013-06-29 04:12:20 +04:00
|
|
|
oldumask = os.umask(0o002)
|
|
|
|
try:
|
|
|
|
reposfile = open(repospath, 'w')
|
|
|
|
reposfile.writelines([("%s\n" % r) for r in validrepos])
|
|
|
|
reposfile.close()
|
|
|
|
finally:
|
|
|
|
os.umask(oldumask)
|
2013-06-29 02:57:15 +04:00
|
|
|
|
|
|
|
# prune cache
|
2016-04-05 02:26:12 +03:00
|
|
|
if sharedcache is not None:
|
|
|
|
sharedcache.gc(keepkeys)
|
2015-12-02 21:40:49 +03:00
|
|
|
else:
|
|
|
|
ui.warn(_("warning: no valid repos in repofile\n"))
|
|
|
|
|
2013-09-11 21:27:56 +04:00
|
|
|
|
|
|
|
def log(orig, ui, repo, *pats, **opts):
|
2016-04-04 23:33:28 +03:00
|
|
|
if shallowrepo.requirement not in repo.requirements:
|
|
|
|
return orig(ui, repo, *pats, **opts)
|
|
|
|
|
2015-11-10 03:54:14 +03:00
|
|
|
follow = opts.get('follow')
|
|
|
|
revs = opts.get('rev')
|
|
|
|
if pats:
|
|
|
|
# Force slowpath for non-follow patterns and follows that start from
|
|
|
|
# non-working-copy-parent revs.
|
|
|
|
if not follow or revs:
|
|
|
|
# This forces the slowpath
|
|
|
|
opts['removed'] = True
|
|
|
|
|
|
|
|
# If this is a non-follow log without any revs specified, recommend that
|
|
|
|
# the user add -f to speed it up.
|
|
|
|
if not follow and not revs:
|
|
|
|
match, pats = scmutil.matchandpats(repo['.'], pats, opts)
|
|
|
|
isfile = not match.anypats()
|
|
|
|
if isfile:
|
|
|
|
for file in match.files():
|
|
|
|
if not os.path.isfile(repo.wjoin(file)):
|
|
|
|
isfile = False
|
|
|
|
break
|
|
|
|
|
|
|
|
if isfile:
|
|
|
|
ui.warn(_("warning: file log can be slow on large repos - " +
|
|
|
|
"use -f to speed it up\n"))
|
2013-09-11 21:27:56 +04:00
|
|
|
|
|
|
|
return orig(ui, repo, *pats, **opts)
|
2013-09-18 07:15:08 +04:00
|
|
|
|
2017-07-20 14:54:44 +03:00
|
|
|
def wcpprefetch(ui, repo, **kwargs):
|
|
|
|
"""Prefetches in background revisions specified by bgprefetchrevs revset.
|
|
|
|
Does background repack if backgroundrepack flag is set in config.
|
|
|
|
"""
|
|
|
|
shallow = shallowrepo.requirement in repo.requirements
|
|
|
|
bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs', None)
|
|
|
|
|
|
|
|
if shallow and bgprefetchrevs:
|
|
|
|
bgrepack = repo.ui.configbool('remotefilelog',
|
|
|
|
'backgroundrepack', False)
|
|
|
|
def anon():
|
|
|
|
if util.safehasattr(repo, 'ranprefetch') and repo.ranprefetch:
|
|
|
|
return
|
|
|
|
repo.ranprefetch = True
|
|
|
|
repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack)
|
|
|
|
|
|
|
|
repo._afterlock(anon)
|
|
|
|
|
2014-08-07 05:50:57 +04:00
|
|
|
def pull(orig, ui, repo, *pats, **opts):
|
|
|
|
result = orig(ui, repo, *pats, **opts)
|
|
|
|
|
|
|
|
if shallowrepo.requirement in repo.requirements:
|
|
|
|
# prefetch if it's configured
|
|
|
|
prefetchrevset = ui.config('remotefilelog', 'pullprefetch', None)
|
2017-07-17 13:04:22 +03:00
|
|
|
bgrepack = repo.ui.configbool('remotefilelog',
|
|
|
|
'backgroundrepack', False)
|
|
|
|
bgprefetch = repo.ui.configbool('remotefilelog',
|
|
|
|
'backgroundprefetch', False)
|
|
|
|
|
2014-08-07 05:50:57 +04:00
|
|
|
if prefetchrevset:
|
2016-04-26 23:00:31 +03:00
|
|
|
ui.status(_("prefetching file contents\n"))
|
2014-08-07 05:50:57 +04:00
|
|
|
revs = repo.revs(prefetchrevset)
|
2014-08-19 20:33:31 +04:00
|
|
|
base = repo['.'].rev()
|
2017-07-17 13:04:22 +03:00
|
|
|
if bgprefetch:
|
|
|
|
repo.backgroundprefetch(prefetchrevset, repack=bgrepack)
|
|
|
|
else:
|
|
|
|
repo.prefetch(revs, base=base)
|
|
|
|
if bgrepack:
|
|
|
|
repackmod.backgroundrepack(repo, incremental=True)
|
|
|
|
elif bgrepack:
|
2016-08-05 20:00:22 +03:00
|
|
|
repackmod.backgroundrepack(repo, incremental=True)
|
2017-07-17 13:04:22 +03:00
|
|
|
|
2014-08-07 05:50:57 +04:00
|
|
|
return result
|
|
|
|
|
2014-10-15 02:50:04 +04:00
|
|
|
def exchangepull(orig, repo, remote, *args, **kwargs):
|
|
|
|
# Hook into the callstream/getbundle to insert bundle capabilities
|
|
|
|
# during a pull.
|
2014-11-08 05:31:48 +03:00
|
|
|
def localgetbundle(orig, source, heads=None, common=None, bundlecaps=None,
|
|
|
|
**kwargs):
|
2014-10-15 02:50:04 +04:00
|
|
|
if not bundlecaps:
|
2014-11-08 05:31:48 +03:00
|
|
|
bundlecaps = set()
|
|
|
|
bundlecaps.add('remotefilelog')
|
|
|
|
return orig(source, heads=heads, common=common, bundlecaps=bundlecaps,
|
|
|
|
**kwargs)
|
2014-10-15 02:50:04 +04:00
|
|
|
|
2016-04-26 23:00:31 +03:00
|
|
|
if util.safehasattr(remote, '_callstream'):
|
fix crash in "hg pull" when remotefilelog not enabled
Summary:
764cd9916c94 recently introduced code that was unconditionally checking the
repo.includepattern and repo.excludepattern attributes on a local repository
without first checking if this is a shallow repository. These attributes only
exist on shallow repositories, causing "hg pull" to crash on non-shallow
repositories. This crash wouldn't happen in simple circumstances, since the
remotefilelog extension only gets fully set up once a shallow repository object
has been created, however when using chg you can end up with scenarios where a
non-shallow repository is used in the same hg process after a shallow one.
This refactors the code to now store the local repository object on the remote
peer rather than trying to store the individual shallow, includepattern, and
excludepattern attributes.
Overall this code does still feel a bit janky to me -- the rest of the peer API
is independent of the local repository, but the _callstream() wrapper cares
about the local repository being referenced. It seems like we should ideally
redesign the APIs so that _callstream() receives the local repository data as
an argument (or we should make the peer <--> local repository assocation more
formal and explicit if think it's better to force an association here).
Test Plan: Added a new test which triggered the crash, but passes with these changes.
Reviewers: ttung, mitrandir, durham
Reviewed By: durham
Subscribers: net-systems-diffs@, yogeshwer
Differential Revision: https://phabricator.intern.facebook.com/D3756493
Tasks: 12823586
Signature: t1:3756493:1471971600:9666e9c31bf59070c3ace0821d47d322671eb5b1
2016-08-24 00:14:42 +03:00
|
|
|
remote._localrepo = repo
|
2016-04-26 23:00:31 +03:00
|
|
|
elif util.safehasattr(remote, 'getbundle'):
|
2014-10-15 02:50:04 +04:00
|
|
|
wrapfunction(remote, 'getbundle', localgetbundle)
|
|
|
|
|
|
|
|
return orig(repo, remote, *args, **kwargs)
|
|
|
|
|
2013-11-26 04:36:44 +04:00
|
|
|
def revert(orig, ui, repo, ctx, parents, *pats, **opts):
|
|
|
|
# prefetch prior to reverting
|
2014-09-08 17:20:59 +04:00
|
|
|
# used for old mercurial version
|
2013-11-26 04:36:44 +04:00
|
|
|
if shallowrepo.requirement in repo.requirements:
|
|
|
|
files = []
|
|
|
|
m = scmutil.match(ctx, pats, opts)
|
|
|
|
mf = ctx.manifest()
|
|
|
|
m.bad = lambda x, y: False
|
|
|
|
for path in ctx.walk(m):
|
|
|
|
files.append((path, hex(mf[path])))
|
2014-02-12 02:41:56 +04:00
|
|
|
repo.fileservice.prefetch(files)
|
2013-11-26 04:36:44 +04:00
|
|
|
|
|
|
|
return orig(ui, repo, ctx, parents, *pats, **opts)
|
|
|
|
|
2014-09-08 17:20:59 +04:00
|
|
|
def _revertprefetch(orig, repo, ctx, *files):
|
|
|
|
# prefetch data that needs to be reverted
|
|
|
|
# used for new mercurial version
|
|
|
|
if shallowrepo.requirement in repo.requirements:
|
|
|
|
allfiles = []
|
|
|
|
mf = ctx.manifest()
|
2017-01-26 22:01:31 +03:00
|
|
|
sparsematch = repo.maybesparsematch(ctx.rev())
|
2014-09-08 17:20:59 +04:00
|
|
|
for f in files:
|
|
|
|
for path in f:
|
2016-11-07 21:30:10 +03:00
|
|
|
if (not sparsematch or sparsematch(path)) and path in mf:
|
2015-04-02 19:58:46 +03:00
|
|
|
allfiles.append((path, hex(mf[path])))
|
2014-09-08 17:20:59 +04:00
|
|
|
repo.fileservice.prefetch(allfiles)
|
|
|
|
return orig(repo, ctx, *files)
|
|
|
|
|
2014-07-24 07:37:48 +04:00
|
|
|
@command('debugremotefilelog', [
|
2013-09-18 07:15:08 +04:00
|
|
|
('d', 'decompress', None, _('decompress the filelog first')),
|
2016-03-03 21:40:31 +03:00
|
|
|
], _('hg debugremotefilelog <path>'), norepo=True)
|
2016-06-30 20:14:17 +03:00
|
|
|
def debugremotefilelog(ui, path, **opts):
|
|
|
|
return debugcommands.debugremotefilelog(ui, path, **opts)
|
2013-09-18 07:15:08 +04:00
|
|
|
|
2014-07-24 07:37:48 +04:00
|
|
|
@command('verifyremotefilelog', [
|
2013-09-18 07:15:08 +04:00
|
|
|
('d', 'decompress', None, _('decompress the filelogs first')),
|
2016-03-03 21:40:31 +03:00
|
|
|
], _('hg verifyremotefilelogs <directory>'), norepo=True)
|
2016-06-30 20:14:17 +03:00
|
|
|
def verifyremotefilelog(ui, path, **opts):
|
|
|
|
return debugcommands.verifyremotefilelog(ui, path, **opts)
|
2014-01-16 01:41:29 +04:00
|
|
|
|
2016-05-16 20:59:09 +03:00
|
|
|
@command('debugdatapack', [
|
2016-07-29 03:07:04 +03:00
|
|
|
('', 'long', None, _('print the long hashes')),
|
2016-07-29 03:15:21 +03:00
|
|
|
('', 'node', '', _('dump the contents of node'), 'NODE'),
|
2016-05-16 20:59:09 +03:00
|
|
|
], _('hg debugdatapack <path>'), norepo=True)
|
2016-06-30 20:14:17 +03:00
|
|
|
def debugdatapack(ui, path, **opts):
|
2016-07-29 03:07:04 +03:00
|
|
|
return debugcommands.debugdatapack(ui, path, **opts)
|
2016-05-16 20:59:09 +03:00
|
|
|
|
|
|
|
@command('debughistorypack', [
|
|
|
|
], _('hg debughistorypack <path>'), norepo=True)
|
2016-06-30 20:14:17 +03:00
|
|
|
def debughistorypack(ui, path, **opts):
|
|
|
|
return debugcommands.debughistorypack(ui, path)
|
2016-05-16 20:59:09 +03:00
|
|
|
|
2016-05-20 19:31:28 +03:00
|
|
|
@command('debugwaitonrepack', [
|
|
|
|
], _('hg debugwaitonrepack'))
|
2016-06-30 20:14:17 +03:00
|
|
|
def debugwaitonrepack(ui, repo, **opts):
|
2016-05-20 19:31:28 +03:00
|
|
|
return debugcommands.debugwaitonrepack(repo)
|
|
|
|
|
2017-07-17 13:04:22 +03:00
|
|
|
@command('debugwaitonprefetch', [
|
|
|
|
], _('hg debugwaitonprefetch'))
|
|
|
|
def debugwaitonprefetch(ui, repo, **opts):
|
|
|
|
return debugcommands.debugwaitonprefetch(repo)
|
|
|
|
|
2015-04-16 10:38:43 +03:00
|
|
|
@command('prefetch', [
|
2014-01-16 01:41:29 +04:00
|
|
|
('r', 'rev', [], _('prefetch the specified revisions'), _('REV')),
|
2017-07-17 13:04:22 +03:00
|
|
|
('', 'repack', False, _('run repack after prefetch')),
|
2014-01-16 01:41:29 +04:00
|
|
|
] + commands.walkopts, _('hg prefetch [OPTIONS] [FILE...]'))
|
|
|
|
def prefetch(ui, repo, *pats, **opts):
|
|
|
|
"""prefetch file revisions from the server
|
|
|
|
|
|
|
|
Prefetchs file revisions for the specified revs and stores them in the
|
2017-07-25 16:35:16 +03:00
|
|
|
local remotefilelog cache. If no rev is specified, the default rev is
|
|
|
|
used which is the union of dot, draft, pullprefetch and bgprefetchrev.
|
|
|
|
File names or patterns can be used to limit which files are downloaded.
|
2014-01-16 01:41:29 +04:00
|
|
|
|
|
|
|
Return 0 on success.
|
|
|
|
"""
|
|
|
|
if not shallowrepo.requirement in repo.requirements:
|
2016-04-26 23:00:31 +03:00
|
|
|
raise error.Abort(_("repo is not shallow"))
|
2014-01-16 01:41:29 +04:00
|
|
|
|
|
|
|
if not opts.get('rev'):
|
2017-07-25 16:35:16 +03:00
|
|
|
revset = ['.', 'draft()']
|
|
|
|
|
|
|
|
prefetchrevset = ui.config('remotefilelog', 'pullprefetch', None)
|
|
|
|
if prefetchrevset:
|
|
|
|
revset.append('(%s)' % prefetchrevset)
|
|
|
|
bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs', None)
|
|
|
|
if bgprefetchrevs:
|
|
|
|
revset.append('(%s)' % bgprefetchrevs)
|
|
|
|
|
|
|
|
opts['rev'] = ['+'.join(revset)]
|
2014-01-16 01:41:29 +04:00
|
|
|
|
|
|
|
revs = scmutil.revrange(repo, opts.get('rev'))
|
|
|
|
|
2017-07-17 13:04:22 +03:00
|
|
|
repo.prefetch(revs, repack=opts.get('repack'), pats=pats, opts=opts)
|
2016-04-28 02:49:09 +03:00
|
|
|
|
2016-05-05 00:53:23 +03:00
|
|
|
@command('repack', [
|
|
|
|
('', 'background', None, _('run in a background process'), None),
|
2016-05-20 19:31:25 +03:00
|
|
|
('', 'incremental', None, _('do an incremental repack'), None),
|
2016-05-05 00:53:23 +03:00
|
|
|
], _('hg repack [OPTIONS]'))
|
2016-04-28 02:49:09 +03:00
|
|
|
def repack(ui, repo, *pats, **opts):
|
2016-05-05 00:53:23 +03:00
|
|
|
if opts.get('background'):
|
2016-05-20 19:31:25 +03:00
|
|
|
repackmod.backgroundrepack(repo, incremental=opts.get('incremental'))
|
2016-05-05 00:53:23 +03:00
|
|
|
return
|
|
|
|
|
2016-05-20 19:31:25 +03:00
|
|
|
if opts.get('incremental'):
|
|
|
|
repackmod.incrementalrepack(repo)
|
|
|
|
else:
|
|
|
|
repackmod.fullrepack(repo)
|