2016-08-30 02:19:52 +03:00
|
|
|
# __init__.py
|
|
|
|
#
|
|
|
|
# Copyright 2016 Facebook, Inc.
|
|
|
|
#
|
|
|
|
# This software may be used and distributed according to the terms of the
|
|
|
|
# GNU General Public License version 2 or any later version.
|
2017-03-10 01:45:23 +03:00
|
|
|
"""
|
|
|
|
The treemanifest extension is to aid in the transition from flat manifests to
|
|
|
|
treemanifests. It has a client portion that's used to construct trees during
|
|
|
|
client pulls and commits, and a server portion which is used to generate
|
|
|
|
tree manifests side-by-side normal flat manifests.
|
|
|
|
|
|
|
|
Configs:
|
|
|
|
|
|
|
|
``treemanifest.server`` is used to indicate that this repo can serve
|
|
|
|
treemanifests
|
|
|
|
"""
|
2016-08-30 02:19:52 +03:00
|
|
|
|
2016-11-17 00:51:48 +03:00
|
|
|
"""allows using and migrating to tree manifests
|
|
|
|
|
|
|
|
When autocreatetrees is enabled, you can limit which bookmarks are initially
|
|
|
|
converted to trees during pull by specifying `treemanifest.allowedtreeroots`.
|
|
|
|
|
|
|
|
[treemanifest]
|
|
|
|
allowedtreeroots = master,stable
|
|
|
|
|
2017-03-02 03:55:19 +03:00
|
|
|
Enabling `treemanifest.usecunionstore` will cause the extension to use the
|
|
|
|
native implementation of the datapack stores.
|
|
|
|
|
|
|
|
[treemanifest]
|
|
|
|
usecunionstore = True
|
|
|
|
|
2017-04-20 07:14:03 +03:00
|
|
|
Disabling `treemanifest.demanddownload` will prevent the extension from
|
|
|
|
automatically downloading trees from the server when they don't exist locally.
|
|
|
|
|
|
|
|
[treemanifest]
|
|
|
|
demanddownload = True
|
|
|
|
|
2017-05-01 08:05:09 +03:00
|
|
|
Setting `treemanifest.pullprefetchcount` to an integer N will cause the latest N
|
2017-04-27 20:44:33 +03:00
|
|
|
commits' manifests to be downloaded (if they aren't already).
|
|
|
|
|
|
|
|
[treemanifest]
|
|
|
|
pullprefetchcount = 0
|
2017-05-01 08:05:09 +03:00
|
|
|
|
|
|
|
Setting `treemanifest.sendtrees` to True will include tree packs in sent
|
|
|
|
bundles.
|
|
|
|
|
|
|
|
[treemanifest]
|
|
|
|
sendtrees = False
|
2016-11-17 00:51:48 +03:00
|
|
|
"""
|
|
|
|
|
2016-08-30 02:19:52 +03:00
|
|
|
from mercurial import (
|
2017-04-27 20:44:33 +03:00
|
|
|
bundle2,
|
2016-09-20 02:30:17 +03:00
|
|
|
changegroup,
|
2016-08-30 02:19:52 +03:00
|
|
|
cmdutil,
|
2017-04-20 07:14:03 +03:00
|
|
|
commands,
|
2016-12-03 01:37:51 +03:00
|
|
|
error,
|
2017-04-27 20:44:34 +03:00
|
|
|
exchange,
|
2016-09-20 02:30:17 +03:00
|
|
|
extensions,
|
2017-04-20 07:14:03 +03:00
|
|
|
hg,
|
2016-08-30 02:19:52 +03:00
|
|
|
localrepo,
|
2017-03-10 01:45:23 +03:00
|
|
|
manifest,
|
2017-03-07 22:15:25 +03:00
|
|
|
mdiff,
|
2017-04-20 07:14:03 +03:00
|
|
|
phases,
|
2017-03-10 01:45:23 +03:00
|
|
|
revlog,
|
2017-04-20 07:14:03 +03:00
|
|
|
sshserver,
|
2017-03-10 01:45:23 +03:00
|
|
|
store,
|
2016-09-20 02:30:17 +03:00
|
|
|
util,
|
2017-04-19 00:42:33 +03:00
|
|
|
vfs as vfsmod,
|
2017-04-20 07:14:03 +03:00
|
|
|
wireproto,
|
2016-08-30 02:19:52 +03:00
|
|
|
)
|
2016-09-20 02:30:17 +03:00
|
|
|
from mercurial.i18n import _
|
2017-04-20 07:14:03 +03:00
|
|
|
from mercurial.node import bin, hex, nullid
|
2016-09-20 02:30:17 +03:00
|
|
|
|
2017-04-20 07:14:04 +03:00
|
|
|
from remotefilelog.contentstore import (
|
|
|
|
manifestrevlogstore,
|
|
|
|
unioncontentstore,
|
|
|
|
)
|
|
|
|
from remotefilelog.metadatastore import (
|
|
|
|
unionmetadatastore,
|
|
|
|
)
|
2016-09-20 02:30:17 +03:00
|
|
|
from remotefilelog.datapack import datapackstore, mutabledatapack
|
2017-03-07 22:15:26 +03:00
|
|
|
from remotefilelog.historypack import historypackstore, mutablehistorypack
|
2017-04-27 20:44:33 +03:00
|
|
|
from remotefilelog import shallowrepo, shallowutil, wirepack
|
2017-04-20 07:14:04 +03:00
|
|
|
from remotefilelog.repack import _runrepack
|
2017-02-24 01:03:03 +03:00
|
|
|
import cstore
|
2016-08-30 02:19:52 +03:00
|
|
|
|
2017-03-19 05:38:45 +03:00
|
|
|
import os
|
2016-09-20 02:30:17 +03:00
|
|
|
import struct
|
|
|
|
|
2016-08-30 02:19:52 +03:00
|
|
|
cmdtable = {}
|
|
|
|
command = cmdutil.command(cmdtable)
|
|
|
|
|
2016-10-21 21:02:22 +03:00
|
|
|
PACK_CATEGORY='manifests'
|
2016-08-30 02:19:52 +03:00
|
|
|
|
2017-04-27 20:44:33 +03:00
|
|
|
TREEGROUP_PARTTYPE = 'b2x:treegroup'
|
|
|
|
RECEIVEDNODE_RECORD = 'receivednodes'
|
|
|
|
|
2016-09-20 02:30:17 +03:00
|
|
|
def extsetup(ui):
|
|
|
|
extensions.wrapfunction(changegroup.cg1unpacker, '_unpackmanifests',
|
|
|
|
_unpackmanifests)
|
2017-03-10 01:45:23 +03:00
|
|
|
extensions.wrapfunction(revlog.revlog, 'checkhash', _checkhash)
|
|
|
|
|
|
|
|
wrappropertycache(localrepo.localrepository, 'manifestlog', getmanifestlog)
|
2016-09-20 02:30:17 +03:00
|
|
|
|
2017-03-10 01:45:23 +03:00
|
|
|
extensions.wrapfunction(manifest.memmanifestctx, 'write', _writemanifest)
|
|
|
|
|
2017-04-27 20:44:33 +03:00
|
|
|
extensions.wrapcommand(commands.table, 'pull', pull)
|
|
|
|
|
2017-04-20 07:14:03 +03:00
|
|
|
wireproto.commands['gettreepack'] = (servergettreepack, '*')
|
2017-04-27 20:44:33 +03:00
|
|
|
wireproto.wirepeer.gettreepack = clientgettreepack
|
2017-04-20 07:14:03 +03:00
|
|
|
|
2016-08-30 02:19:52 +03:00
|
|
|
def reposetup(ui, repo):
|
|
|
|
wraprepo(repo)
|
|
|
|
|
|
|
|
def wraprepo(repo):
|
|
|
|
if not isinstance(repo, localrepo.localrepository):
|
|
|
|
return
|
|
|
|
|
2017-03-10 01:45:23 +03:00
|
|
|
repo.svfs.treemanifestserver = repo.ui.configbool('treemanifest', 'server')
|
2017-03-10 01:45:23 +03:00
|
|
|
if repo.svfs.treemanifestserver:
|
|
|
|
serverreposetup(repo)
|
|
|
|
else:
|
2017-03-10 01:45:23 +03:00
|
|
|
clientreposetup(repo)
|
|
|
|
|
|
|
|
def clientreposetup(repo):
|
2016-12-03 01:37:51 +03:00
|
|
|
repo.name = repo.ui.config('remotefilelog', 'reponame')
|
|
|
|
if not repo.name:
|
|
|
|
raise error.Abort(_("remotefilelog.reponame must be configured"))
|
|
|
|
|
2017-01-14 01:58:20 +03:00
|
|
|
try:
|
|
|
|
extensions.find('fastmanifest')
|
|
|
|
except KeyError:
|
|
|
|
raise error.Abort(_("cannot use treemanifest without fastmanifest"))
|
|
|
|
|
2016-10-21 21:02:22 +03:00
|
|
|
usecdatapack = repo.ui.configbool('remotefilelog', 'fastdatapack')
|
|
|
|
|
2016-10-21 21:02:20 +03:00
|
|
|
packpath = shallowutil.getcachepackpath(repo, PACK_CATEGORY)
|
2016-10-21 21:02:22 +03:00
|
|
|
|
|
|
|
localpackpath = shallowutil.getlocalpackpath(repo.svfs.vfs.base,
|
|
|
|
PACK_CATEGORY)
|
2017-04-27 20:44:34 +03:00
|
|
|
|
|
|
|
# Data store
|
2017-03-02 03:55:19 +03:00
|
|
|
if repo.ui.configbool("treemanifest", "usecunionstore"):
|
|
|
|
datastore = cstore.datapackstore(packpath)
|
|
|
|
localdatastore = cstore.datapackstore(localpackpath)
|
2017-04-20 07:14:03 +03:00
|
|
|
# TODO: can't use remotedatastore with cunionstore yet
|
2017-03-02 03:55:19 +03:00
|
|
|
repo.svfs.manifestdatastore = cstore.uniondatapackstore(
|
|
|
|
[localdatastore, datastore])
|
|
|
|
else:
|
|
|
|
datastore = datapackstore(repo.ui, packpath, usecdatapack=usecdatapack)
|
|
|
|
localdatastore = datapackstore(repo.ui, localpackpath,
|
|
|
|
usecdatapack=usecdatapack)
|
2017-04-20 07:14:03 +03:00
|
|
|
stores = [datastore, localdatastore]
|
|
|
|
remotedatastore = remotetreedatastore(repo)
|
|
|
|
if repo.ui.configbool("treemanifest", "demanddownload", True):
|
|
|
|
stores.append(remotedatastore)
|
2017-03-02 03:55:19 +03:00
|
|
|
|
2017-04-20 07:14:03 +03:00
|
|
|
repo.svfs.manifestdatastore = unioncontentstore(*stores,
|
|
|
|
writestore=localdatastore)
|
|
|
|
remotedatastore.setshared(repo.svfs.manifestdatastore)
|
2016-10-21 21:02:22 +03:00
|
|
|
|
2016-12-03 01:37:45 +03:00
|
|
|
repo.svfs.sharedmanifestdatastores = [datastore]
|
|
|
|
repo.svfs.localmanifestdatastores = [localdatastore]
|
2016-09-20 02:30:17 +03:00
|
|
|
|
2017-04-27 20:44:34 +03:00
|
|
|
# History store
|
|
|
|
sharedhistorystore = historypackstore(repo.ui, packpath)
|
|
|
|
localhistorystore = historypackstore(repo.ui, localpackpath)
|
2017-03-07 22:15:26 +03:00
|
|
|
repo.svfs.sharedmanifesthistorystores = [
|
2017-04-27 20:44:34 +03:00
|
|
|
sharedhistorystore
|
2017-03-07 22:15:26 +03:00
|
|
|
]
|
|
|
|
repo.svfs.localmanifesthistorystores = [
|
2017-04-27 20:44:34 +03:00
|
|
|
localhistorystore,
|
2017-03-07 22:15:26 +03:00
|
|
|
]
|
2017-04-27 20:44:34 +03:00
|
|
|
repo.svfs.manifesthistorystore = unionmetadatastore(
|
|
|
|
sharedhistorystore,
|
|
|
|
localhistorystore,
|
|
|
|
writestore=localhistorystore,
|
|
|
|
)
|
2017-03-07 22:15:26 +03:00
|
|
|
|
2017-03-10 01:45:23 +03:00
|
|
|
class treemanifestlog(manifest.manifestlog):
|
|
|
|
def __init__(self, opener):
|
|
|
|
usetreemanifest = False
|
|
|
|
cachesize = 4
|
|
|
|
|
|
|
|
opts = getattr(opener, 'options', None)
|
|
|
|
if opts is not None:
|
|
|
|
usetreemanifest = opts.get('treemanifest', usetreemanifest)
|
|
|
|
cachesize = opts.get('manifestcachesize', cachesize)
|
|
|
|
self._treeinmem = usetreemanifest
|
|
|
|
|
|
|
|
self._revlog = manifest.manifestrevlog(opener,
|
|
|
|
indexfile='00manifesttree.i')
|
|
|
|
|
|
|
|
# A cache of the manifestctx or treemanifestctx for each directory
|
|
|
|
self._dirmancache = {}
|
|
|
|
self._dirmancache[''] = util.lrucachedict(cachesize)
|
|
|
|
|
|
|
|
self.cachesize = cachesize
|
|
|
|
|
2017-03-10 01:45:23 +03:00
|
|
|
def serverreposetup(repo):
|
|
|
|
extensions.wrapfunction(manifest.manifestrevlog, 'addgroup',
|
|
|
|
_addmanifestgroup)
|
|
|
|
|
2017-04-20 07:14:03 +03:00
|
|
|
def _capabilities(orig, repo, proto):
|
|
|
|
caps = orig(repo, proto)
|
|
|
|
caps.append('gettreepack')
|
|
|
|
return caps
|
|
|
|
extensions.wrapfunction(wireproto, '_capabilities', _capabilities)
|
|
|
|
|
2017-04-27 20:44:34 +03:00
|
|
|
packpath = repo.vfs.join('cache/packs/%s' % PACK_CATEGORY)
|
|
|
|
|
|
|
|
# Data store
|
|
|
|
datastore = cstore.datapackstore(packpath)
|
|
|
|
revlogstore = manifestrevlogstore(repo)
|
|
|
|
repo.svfs.manifestdatastore = unioncontentstore(datastore, revlogstore)
|
|
|
|
|
|
|
|
# History store
|
|
|
|
historystore = historypackstore(repo.ui, packpath)
|
|
|
|
repo.svfs.manifesthistorystore = unionmetadatastore(
|
|
|
|
historystore,
|
|
|
|
revlogstore,
|
|
|
|
)
|
|
|
|
|
2017-03-10 01:45:23 +03:00
|
|
|
def _addmanifestgroup(*args, **kwargs):
|
|
|
|
raise error.Abort(_("cannot push commits to a treemanifest transition "
|
|
|
|
"server without pushrebase"))
|
|
|
|
|
2017-03-10 01:45:23 +03:00
|
|
|
def getmanifestlog(orig, self):
|
|
|
|
mfl = orig(self)
|
|
|
|
|
2017-04-20 07:14:04 +03:00
|
|
|
# The treemanifest needs its own opener with the treemanifest option set. We
|
|
|
|
# can't just set it globally because the normal repo needs to access the
|
|
|
|
# manifest without the treemanifest option set. Unfortunately, openers don't
|
|
|
|
# have nice easy copy functions, so we have to redo the appropriate creation
|
|
|
|
# based on the type of store.
|
|
|
|
repostore = self.store
|
|
|
|
if isinstance(repostore, store.fncachestore):
|
|
|
|
opener = store._fncachevfs(repostore.rawvfs,
|
|
|
|
repostore.fncache,
|
|
|
|
repostore.encode)
|
|
|
|
elif isinstance(repostore, store.encodedstore):
|
|
|
|
opener = vfsmod.filtervfs(repostore.rawvfs,
|
|
|
|
store.encodefilename)
|
|
|
|
else:
|
|
|
|
opener = vfsmod.filtervfs(repostore.rawvfs,
|
|
|
|
store.encodedir)
|
|
|
|
|
2017-03-10 01:45:23 +03:00
|
|
|
opener.options = self.svfs.options.copy()
|
|
|
|
opener.options.update({
|
|
|
|
'treemanifest': True,
|
|
|
|
})
|
2017-04-20 07:14:04 +03:00
|
|
|
|
2017-03-10 01:45:23 +03:00
|
|
|
mfl.treemanifestlog = treemanifestlog(opener)
|
|
|
|
|
|
|
|
return mfl
|
|
|
|
|
2017-03-10 01:45:23 +03:00
|
|
|
def _writemanifest(orig, self, transaction, link, p1, p2, added, removed):
|
|
|
|
n = orig(self, transaction, link, p1, p2, added, removed)
|
|
|
|
|
|
|
|
if not self._manifestlog._revlog.opener.treemanifestserver:
|
|
|
|
return n
|
|
|
|
|
|
|
|
# Since we're adding the root flat manifest, let's add the corresponding
|
|
|
|
# root tree manifest.
|
|
|
|
mfl = self._manifestlog
|
|
|
|
treemfl = mfl.treemanifestlog
|
|
|
|
|
|
|
|
m = self._manifestdict
|
|
|
|
|
|
|
|
parentflat = mfl[p1].read()
|
|
|
|
diff = parentflat.diff(m)
|
|
|
|
|
|
|
|
newtree = treemfl[p1].read().copy()
|
|
|
|
added = []
|
|
|
|
removed = []
|
|
|
|
for filename, (old, new) in diff.iteritems():
|
|
|
|
if new is not None and new[0] is not None:
|
|
|
|
added.append(filename)
|
|
|
|
newtree[filename] = new[0]
|
|
|
|
newtree.setflag(filename, new[1])
|
|
|
|
else:
|
|
|
|
removed.append(filename)
|
|
|
|
del newtree[filename]
|
|
|
|
|
|
|
|
try:
|
|
|
|
treemfrevlog = treemfl._revlog
|
|
|
|
oldaddrevision = treemfrevlog.addrevision
|
|
|
|
def addusingnode(*args, **kwargs):
|
|
|
|
newkwargs = kwargs.copy()
|
|
|
|
newkwargs['node'] = n
|
|
|
|
return oldaddrevision(*args, **newkwargs)
|
|
|
|
treemfrevlog.addrevision = addusingnode
|
|
|
|
|
|
|
|
def readtree(dir, node):
|
|
|
|
return treemfl.get(dir, node).read()
|
|
|
|
treemfrevlog.add(newtree, transaction, link, p1, p2, added, removed,
|
|
|
|
readtree=readtree)
|
|
|
|
finally:
|
|
|
|
del treemfrevlog.__dict__['addrevision']
|
|
|
|
|
|
|
|
return n
|
|
|
|
|
2017-03-19 05:38:45 +03:00
|
|
|
@command('debuggentrees', [
|
|
|
|
('s', 'skip-allowed-roots', None,
|
|
|
|
_('skips the check for only generating on allowed roots')),
|
|
|
|
('', 'verify', None,
|
|
|
|
_('verify consistency of tree data')),
|
|
|
|
], _('hg debuggentrees FIRSTREV LASTREV'))
|
|
|
|
def debuggentrees(ui, repo, rev1, rev2, *args, **opts):
|
|
|
|
rev1 = repo.revs(rev1).first()
|
|
|
|
rev2 = repo.revs(rev2).last()
|
|
|
|
|
|
|
|
mfrevlog = repo.manifestlog._revlog
|
|
|
|
mfrev1 = mfrevlog.rev(repo[rev1].manifestnode())
|
|
|
|
mfrev2 = mfrevlog.rev(repo[rev2].manifestnode()) + 1
|
|
|
|
|
|
|
|
packpath = shallowutil.getcachepackpath(repo, PACK_CATEGORY)
|
|
|
|
if opts.get('skip_allowed_roots', False):
|
|
|
|
ui.setconfig('treemanifest', 'allowedtreeroots', None)
|
|
|
|
with mutabledatapack(repo.ui, packpath) as dpack:
|
|
|
|
with mutablehistorypack(repo.ui, packpath) as hpack:
|
|
|
|
recordmanifest(dpack, hpack, repo, mfrev1, mfrev2,
|
|
|
|
verify=opts.get('verify', False))
|
|
|
|
|
2017-03-10 01:45:23 +03:00
|
|
|
@command('backfilltree', [
|
|
|
|
('l', 'limit', '10000000', _(''))
|
|
|
|
], _('hg backfilltree [OPTIONS]'))
|
|
|
|
def backfilltree(ui, repo, *args, **opts):
|
|
|
|
with repo.wlock():
|
|
|
|
with repo.lock():
|
|
|
|
with repo.transaction('backfilltree') as tr:
|
|
|
|
_backfill(tr, repo, int(opts.get('limit')))
|
|
|
|
|
|
|
|
def _backfill(tr, repo, limit):
|
|
|
|
ui = repo.ui
|
|
|
|
cl = repo.changelog
|
|
|
|
mfl = repo.manifestlog
|
|
|
|
tmfl = mfl.treemanifestlog
|
|
|
|
treerevlog = tmfl._revlog
|
|
|
|
|
|
|
|
maxrev = len(treerevlog) - 1
|
|
|
|
start = treerevlog.linkrev(maxrev) + 1
|
|
|
|
end = min(len(cl), start + limit)
|
|
|
|
|
|
|
|
converting = _("converting")
|
|
|
|
|
|
|
|
ui.progress(converting, 0, total=end - start)
|
|
|
|
for i in xrange(start, end):
|
|
|
|
ctx = repo[i]
|
|
|
|
newflat = ctx.manifest()
|
|
|
|
p1 = ctx.p1()
|
|
|
|
p2 = ctx.p2()
|
|
|
|
p1node = p1.manifestnode()
|
|
|
|
p2node = p2.manifestnode()
|
|
|
|
if p1node != nullid:
|
|
|
|
if (p1node not in treerevlog.nodemap or
|
|
|
|
(p2node != nullid and p2node not in treerevlog.nodemap)):
|
|
|
|
ui.warn(_("unable to find parent nodes %s %s\n") % (hex(p1node),
|
|
|
|
hex(p2node)))
|
|
|
|
return
|
|
|
|
parentflat = mfl[p1node].read()
|
|
|
|
parenttree = tmfl[p1node].read()
|
|
|
|
else:
|
|
|
|
parentflat = manifest.manifestdict()
|
|
|
|
parenttree = manifest.treemanifest()
|
|
|
|
|
|
|
|
diff = parentflat.diff(newflat)
|
|
|
|
|
|
|
|
newtree = parenttree.copy()
|
|
|
|
added = []
|
|
|
|
removed = []
|
|
|
|
for filename, (old, new) in diff.iteritems():
|
|
|
|
if new is not None and new[0] is not None:
|
|
|
|
added.append(filename)
|
|
|
|
newtree[filename] = new[0]
|
|
|
|
newtree.setflag(filename, new[1])
|
|
|
|
else:
|
|
|
|
removed.append(filename)
|
|
|
|
del newtree[filename]
|
|
|
|
|
|
|
|
try:
|
|
|
|
oldaddrevision = treerevlog.addrevision
|
|
|
|
def addusingnode(*args, **kwargs):
|
|
|
|
newkwargs = kwargs.copy()
|
|
|
|
newkwargs['node'] = ctx.manifestnode()
|
|
|
|
return oldaddrevision(*args, **newkwargs)
|
|
|
|
treerevlog.addrevision = addusingnode
|
|
|
|
def readtree(dir, node):
|
|
|
|
return tmfl.get(dir, node).read()
|
|
|
|
treerevlog.add(newtree, tr, ctx.rev(), p1node, p2node, added,
|
|
|
|
removed, readtree=readtree)
|
|
|
|
finally:
|
|
|
|
del treerevlog.__dict__['addrevision']
|
|
|
|
|
|
|
|
ui.progress(converting, i - start, total=end - start)
|
|
|
|
|
|
|
|
ui.progress(converting, None)
|
|
|
|
|
2016-09-20 02:30:17 +03:00
|
|
|
def _unpackmanifests(orig, self, repo, *args, **kwargs):
|
2016-11-16 23:11:15 +03:00
|
|
|
mfrevlog = repo.manifestlog._revlog
|
|
|
|
oldtip = len(mfrevlog)
|
2016-09-20 02:30:17 +03:00
|
|
|
|
|
|
|
orig(self, repo, *args, **kwargs)
|
|
|
|
|
|
|
|
if (util.safehasattr(repo.svfs, "manifestdatastore") and
|
|
|
|
repo.ui.configbool('treemanifest', 'autocreatetrees')):
|
2016-10-21 21:02:20 +03:00
|
|
|
|
|
|
|
# TODO: only put in cache if pulling from main server
|
|
|
|
packpath = shallowutil.getcachepackpath(repo, PACK_CATEGORY)
|
2017-01-13 20:42:25 +03:00
|
|
|
with mutabledatapack(repo.ui, packpath) as dpack:
|
2017-03-07 22:15:25 +03:00
|
|
|
with mutablehistorypack(repo.ui, packpath) as hpack:
|
|
|
|
recordmanifest(dpack, hpack, repo, oldtip, len(mfrevlog))
|
2016-09-20 02:30:17 +03:00
|
|
|
|
|
|
|
# Alert the store that there may be new packs
|
|
|
|
repo.svfs.manifestdatastore.markforrefresh()
|
|
|
|
|
2017-03-07 22:15:25 +03:00
|
|
|
class InterceptedMutableDataPack(object):
|
|
|
|
"""This classes intercepts data pack writes and replaces the node for the
|
|
|
|
root with the provided node. This is useful for forcing a tree manifest to
|
|
|
|
be referencable via its flat hash.
|
2016-10-21 21:02:26 +03:00
|
|
|
"""
|
2016-11-30 02:37:58 +03:00
|
|
|
def __init__(self, pack, node, p1node):
|
2016-09-20 02:30:17 +03:00
|
|
|
self._pack = pack
|
|
|
|
self._node = node
|
2016-11-30 02:37:58 +03:00
|
|
|
self._p1node = p1node
|
2016-09-20 02:30:17 +03:00
|
|
|
|
|
|
|
def add(self, name, node, deltabasenode, delta):
|
|
|
|
# For the root node, provide the flat manifest as the key
|
|
|
|
if name == "":
|
|
|
|
node = self._node
|
2016-11-30 02:37:58 +03:00
|
|
|
if deltabasenode != nullid:
|
|
|
|
deltabasenode = self._p1node
|
2016-09-20 02:30:17 +03:00
|
|
|
return self._pack.add(name, node, deltabasenode, delta)
|
|
|
|
|
2017-03-07 22:15:25 +03:00
|
|
|
class InterceptedMutableHistoryPack(object):
|
2017-04-27 20:44:34 +03:00
|
|
|
"""This classes intercepts history pack writes and replaces the node for the
|
|
|
|
root with the provided node. This is useful for forcing a tree manifest to
|
|
|
|
be referencable via its flat hash.
|
2017-03-07 22:15:25 +03:00
|
|
|
"""
|
2017-04-27 20:44:34 +03:00
|
|
|
def __init__(self, pack, node, p1node):
|
|
|
|
self._pack = pack
|
2017-03-07 22:15:25 +03:00
|
|
|
self._node = node
|
|
|
|
self._p1node = p1node
|
|
|
|
self.entries = []
|
|
|
|
|
|
|
|
def add(self, filename, node, p1, p2, linknode, copyfrom):
|
|
|
|
# For the root node, provide the flat manifest as the key
|
|
|
|
if filename == "":
|
|
|
|
node = self._node
|
|
|
|
if p1 != nullid:
|
|
|
|
p1 = self._p1node
|
2017-04-27 20:44:34 +03:00
|
|
|
self._pack.add(filename, node, p1, p2, linknode, copyfrom)
|
2017-03-07 22:15:25 +03:00
|
|
|
|
2017-03-19 05:38:45 +03:00
|
|
|
def recordmanifest(datapack, historypack, repo, oldtip, newtip, verify=False):
|
2017-03-07 22:15:25 +03:00
|
|
|
cl = repo.changelog
|
2016-11-16 23:11:15 +03:00
|
|
|
mfl = repo.manifestlog
|
|
|
|
mfrevlog = mfl._revlog
|
2016-09-20 02:30:17 +03:00
|
|
|
total = newtip - oldtip
|
|
|
|
ui = repo.ui
|
|
|
|
builttrees = {}
|
|
|
|
message = _('priming tree cache')
|
|
|
|
ui.progress(message, 0, total=total)
|
|
|
|
|
2016-09-21 23:51:39 +03:00
|
|
|
refcount = {}
|
|
|
|
for rev in xrange(oldtip, newtip):
|
2016-11-16 23:11:15 +03:00
|
|
|
p1 = mfrevlog.parentrevs(rev)[0]
|
|
|
|
p1node = mfrevlog.node(p1)
|
2016-09-21 23:51:39 +03:00
|
|
|
refcount[p1node] = refcount.get(p1node, 0) + 1
|
|
|
|
|
2016-11-17 00:51:48 +03:00
|
|
|
allowedtreeroots = set()
|
|
|
|
for name in repo.ui.configlist('treemanifest', 'allowedtreeroots'):
|
|
|
|
if name in repo:
|
|
|
|
allowedtreeroots.add(repo[name].manifestnode())
|
|
|
|
|
2017-03-07 22:15:25 +03:00
|
|
|
includedentries = set()
|
2016-09-20 02:30:17 +03:00
|
|
|
for rev in xrange(oldtip, newtip):
|
|
|
|
ui.progress(message, rev - oldtip, total=total)
|
2017-01-01 05:22:38 +03:00
|
|
|
p1, p2 = mfrevlog.parentrevs(rev)
|
2016-11-16 23:11:15 +03:00
|
|
|
p1node = mfrevlog.node(p1)
|
2017-01-01 05:22:38 +03:00
|
|
|
p2node = mfrevlog.node(p2)
|
2017-03-07 22:15:25 +03:00
|
|
|
linkrev = mfrevlog.linkrev(rev)
|
|
|
|
linknode = cl.node(linkrev)
|
2016-09-20 02:30:17 +03:00
|
|
|
|
2016-12-03 01:37:55 +03:00
|
|
|
if p1node == nullid:
|
2017-02-24 01:03:03 +03:00
|
|
|
origtree = cstore.treemanifest(repo.svfs.manifestdatastore)
|
2016-12-03 01:37:55 +03:00
|
|
|
elif p1node in builttrees:
|
2016-09-20 02:30:17 +03:00
|
|
|
origtree = builttrees[p1node]
|
|
|
|
else:
|
2016-11-16 23:11:15 +03:00
|
|
|
origtree = mfl[p1node].read()._treemanifest()
|
2016-09-20 02:30:17 +03:00
|
|
|
|
2016-11-30 02:37:58 +03:00
|
|
|
if origtree is None:
|
2016-11-17 00:51:48 +03:00
|
|
|
if allowedtreeroots and p1node not in allowedtreeroots:
|
|
|
|
continue
|
|
|
|
|
2016-11-16 23:11:15 +03:00
|
|
|
p1mf = mfl[p1node].read()
|
2017-03-07 22:15:25 +03:00
|
|
|
p1linknode = cl.node(mfrevlog.linkrev(p1))
|
2017-02-24 01:03:03 +03:00
|
|
|
origtree = cstore.treemanifest(repo.svfs.manifestdatastore)
|
2016-09-20 02:30:17 +03:00
|
|
|
for filename, node, flag in p1mf.iterentries():
|
|
|
|
origtree.set(filename, node, flag)
|
2017-03-07 22:15:25 +03:00
|
|
|
|
|
|
|
tempdatapack = InterceptedMutableDataPack(datapack, p1node, nullid)
|
2017-04-27 20:44:34 +03:00
|
|
|
temphistorypack = InterceptedMutableHistoryPack(historypack, p1node,
|
|
|
|
nullid)
|
2017-03-07 22:15:25 +03:00
|
|
|
for nname, nnode, ntext, np1text, np1, np2 in origtree.finalize():
|
|
|
|
# No need to compute a delta, since we know the parent isn't
|
|
|
|
# already a tree.
|
|
|
|
tempdatapack.add(nname, nnode, nullid, ntext)
|
|
|
|
temphistorypack.add(nname, nnode, np1, np2, p1linknode, '')
|
2017-03-07 22:15:25 +03:00
|
|
|
includedentries.add((nname, nnode))
|
2017-03-07 22:15:25 +03:00
|
|
|
|
2016-09-20 02:30:17 +03:00
|
|
|
builttrees[p1node] = origtree
|
|
|
|
|
2016-09-21 23:51:39 +03:00
|
|
|
# Remove the tree from the cache once we've processed its final use.
|
|
|
|
# Otherwise memory explodes
|
|
|
|
p1refcount = refcount[p1node] - 1
|
|
|
|
if p1refcount == 0:
|
|
|
|
builttrees.pop(p1node, None)
|
|
|
|
refcount[p1node] = p1refcount
|
|
|
|
|
2017-01-01 05:22:38 +03:00
|
|
|
if p2node != nullid:
|
|
|
|
node = mfrevlog.node(rev)
|
|
|
|
diff = mfl[p1node].read().diff(mfl[node].read())
|
|
|
|
deletes = []
|
|
|
|
adds = []
|
|
|
|
for filename, ((anode, aflag), (bnode, bflag)) in diff.iteritems():
|
|
|
|
if bnode is None:
|
|
|
|
deletes.append(filename)
|
|
|
|
else:
|
|
|
|
adds.append((filename, bnode, bflag))
|
|
|
|
else:
|
|
|
|
# This will generally be very quick, since p1 == deltabase
|
|
|
|
delta = mfrevlog.revdiff(p1, rev)
|
|
|
|
|
|
|
|
deletes = []
|
|
|
|
adds = []
|
|
|
|
|
|
|
|
# Inspect the delta and read the added files from it
|
|
|
|
current = 0
|
|
|
|
end = len(delta)
|
|
|
|
while current < end:
|
|
|
|
try:
|
|
|
|
block = ''
|
|
|
|
# Deltas are of the form:
|
|
|
|
# <start><end><datalen><data>
|
2017-01-03 16:09:06 +03:00
|
|
|
# Where start and end say what bytes to delete, and data
|
|
|
|
# says what bytes to insert in their place. So we can just
|
|
|
|
# read <data> to figure out all the added files.
|
2017-01-01 05:22:38 +03:00
|
|
|
byte1, byte2, blocklen = struct.unpack(">lll",
|
|
|
|
delta[current:current + 12])
|
|
|
|
current += 12
|
|
|
|
if blocklen:
|
|
|
|
block = delta[current:current + blocklen]
|
|
|
|
current += blocklen
|
|
|
|
except struct.error:
|
|
|
|
raise RuntimeError("patch cannot be decoded")
|
|
|
|
|
2017-01-03 16:09:06 +03:00
|
|
|
# An individual delta block may contain multiple newline
|
|
|
|
# delimited entries.
|
2017-01-01 05:22:38 +03:00
|
|
|
for line in block.split('\n'):
|
|
|
|
if not line:
|
|
|
|
continue
|
|
|
|
fname, rest = line.split('\0')
|
|
|
|
fnode = rest[:40]
|
|
|
|
fflag = rest[40:]
|
|
|
|
adds.append((fname, bin(fnode), fflag))
|
|
|
|
|
2017-03-07 22:15:25 +03:00
|
|
|
allfiles = set(repo.changelog.readfiles(linkrev))
|
2017-01-01 05:22:38 +03:00
|
|
|
deletes = allfiles.difference(fname for fname, fnode, fflag in adds)
|
2016-09-20 02:30:17 +03:00
|
|
|
|
|
|
|
# Apply the changes on top of the parent tree
|
|
|
|
newtree = origtree.copy()
|
|
|
|
for fname in deletes:
|
|
|
|
newtree.set(fname, None, None)
|
|
|
|
|
|
|
|
for fname, fnode, fflags in adds:
|
|
|
|
newtree.set(fname, fnode, fflags)
|
|
|
|
|
2017-03-07 22:15:25 +03:00
|
|
|
tempdatapack = InterceptedMutableDataPack(datapack, mfrevlog.node(rev),
|
|
|
|
p1node)
|
2017-04-27 20:44:34 +03:00
|
|
|
temphistorypack = InterceptedMutableHistoryPack(historypack,
|
|
|
|
mfrevlog.node(rev),
|
2017-03-07 22:15:25 +03:00
|
|
|
p1node)
|
2017-03-19 05:38:45 +03:00
|
|
|
mfdatastore = repo.svfs.manifestdatastore
|
2017-03-07 22:15:25 +03:00
|
|
|
newtreeiter = newtree.finalize(origtree if p1node != nullid else None)
|
|
|
|
for nname, nnode, ntext, np1text, np1, np2 in newtreeiter:
|
2017-03-19 05:38:45 +03:00
|
|
|
if verify:
|
|
|
|
# Verify all children of the tree already exist in the store
|
|
|
|
# somewhere.
|
|
|
|
lines = ntext.split('\n')
|
|
|
|
for line in lines:
|
|
|
|
if not line:
|
|
|
|
continue
|
|
|
|
childname, nodeflag = line.split('\0')
|
|
|
|
childpath = os.path.join(nname, childname)
|
|
|
|
cnode = nodeflag[:40]
|
|
|
|
cflag = nodeflag[40:]
|
|
|
|
if (cflag == 't' and
|
|
|
|
(childpath + '/', bin(cnode)) not in includedentries and
|
|
|
|
mfdatastore.getmissing([(childpath, bin(cnode))])):
|
|
|
|
import pdb
|
|
|
|
pdb.set_trace()
|
|
|
|
pass
|
|
|
|
|
2017-03-07 22:15:25 +03:00
|
|
|
# Only use deltas if the delta base is in this same pack file
|
|
|
|
if np1 != nullid and (nname, np1) in includedentries:
|
2017-03-07 22:15:25 +03:00
|
|
|
delta = mdiff.textdiff(np1text, ntext)
|
2017-03-07 22:15:25 +03:00
|
|
|
deltabase = np1
|
2017-03-07 22:15:25 +03:00
|
|
|
else:
|
|
|
|
delta = ntext
|
2017-03-07 22:15:25 +03:00
|
|
|
deltabase = nullid
|
|
|
|
tempdatapack.add(nname, nnode, deltabase, delta)
|
2017-03-07 22:15:25 +03:00
|
|
|
temphistorypack.add(nname, nnode, np1, np2, linknode, '')
|
2017-03-07 22:15:25 +03:00
|
|
|
includedentries.add((nname, nnode))
|
2017-03-07 22:15:25 +03:00
|
|
|
|
2017-01-01 05:22:38 +03:00
|
|
|
if ui.configbool('treemanifest', 'verifyautocreate', False):
|
2016-11-17 00:51:48 +03:00
|
|
|
diff = newtree.diff(origtree)
|
2016-09-21 23:57:06 +03:00
|
|
|
for fname in deletes:
|
2017-01-01 05:22:38 +03:00
|
|
|
fdiff = diff.get(fname)
|
|
|
|
if fdiff is None:
|
2016-09-21 23:57:06 +03:00
|
|
|
import pdb
|
|
|
|
pdb.set_trace()
|
|
|
|
pass
|
2017-01-01 05:22:38 +03:00
|
|
|
else:
|
|
|
|
l, r = fdiff
|
|
|
|
if l != (None, ''):
|
|
|
|
import pdb
|
|
|
|
pdb.set_trace()
|
|
|
|
pass
|
2016-09-21 23:57:06 +03:00
|
|
|
|
|
|
|
for fname, fnode, fflags in adds:
|
2017-01-01 05:22:38 +03:00
|
|
|
fdiff = diff.get(fname)
|
|
|
|
if fdiff is None:
|
|
|
|
# Sometimes adds are no-ops, so they don't show up in the
|
|
|
|
# diff.
|
|
|
|
if origtree.get(fname) != newtree.get(fname):
|
|
|
|
import pdb
|
|
|
|
pdb.set_trace()
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
l, r = fdiff
|
|
|
|
if l != (fnode, fflags):
|
|
|
|
import pdb
|
|
|
|
pdb.set_trace()
|
|
|
|
pass
|
2016-11-16 23:11:15 +03:00
|
|
|
builttrees[mfrevlog.node(rev)] = newtree
|
2016-09-20 02:30:17 +03:00
|
|
|
|
2016-11-16 23:11:15 +03:00
|
|
|
mfnode = mfrevlog.node(rev)
|
2016-09-21 23:51:39 +03:00
|
|
|
if refcount.get(mfnode) > 0:
|
|
|
|
builttrees[mfnode] = newtree
|
|
|
|
|
2016-09-20 02:30:17 +03:00
|
|
|
ui.progress(message, None)
|
2017-03-07 22:15:25 +03:00
|
|
|
|
2017-03-10 01:45:23 +03:00
|
|
|
def _checkhash(orig, self, *args, **kwargs):
|
|
|
|
# Don't validate root hashes during the transition to treemanifest
|
|
|
|
if self.indexfile.endswith('00manifesttree.i'):
|
|
|
|
return
|
|
|
|
return orig(self, *args, **kwargs)
|
|
|
|
|
|
|
|
def wrappropertycache(cls, propname, wrapper):
|
|
|
|
"""Wraps a filecache property. These can't be wrapped using the normal
|
|
|
|
wrapfunction. This should eventually go into upstream Mercurial.
|
|
|
|
"""
|
|
|
|
assert callable(wrapper)
|
|
|
|
for currcls in cls.__mro__:
|
|
|
|
if propname in currcls.__dict__:
|
|
|
|
origfn = currcls.__dict__[propname].func
|
|
|
|
assert callable(origfn)
|
|
|
|
def wrap(*args, **kwargs):
|
|
|
|
return wrapper(origfn, *args, **kwargs)
|
|
|
|
currcls.__dict__[propname].func = wrap
|
|
|
|
break
|
|
|
|
|
|
|
|
if currcls is object:
|
|
|
|
raise AttributeError(_("%s has no property '%s'") %
|
|
|
|
(type(currcls), propname))
|
2017-04-20 07:14:03 +03:00
|
|
|
|
|
|
|
@command('prefetchtrees', [
|
|
|
|
('r', 'rev', '', _("revs to prefetch the trees for")),
|
2017-04-20 07:14:04 +03:00
|
|
|
('', 'base', '', _("revs that are assumed to already be local")),
|
2017-04-20 07:14:03 +03:00
|
|
|
] + commands.walkopts, _('--rev REVS PATTERN..'))
|
|
|
|
def prefetchtrees(ui, repo, *args, **opts):
|
|
|
|
revs = repo.revs(opts.get('rev'))
|
2017-04-20 07:14:04 +03:00
|
|
|
baserevs = []
|
|
|
|
if opts.get('base'):
|
|
|
|
baserevs = repo.revs(opts.get('base'))
|
2017-04-20 07:14:03 +03:00
|
|
|
|
|
|
|
mfnodes = set()
|
|
|
|
for rev in revs:
|
|
|
|
mfnodes.add(repo[rev].manifestnode())
|
|
|
|
|
2017-04-20 07:14:04 +03:00
|
|
|
basemfnodes = set()
|
|
|
|
for rev in baserevs:
|
|
|
|
basemfnodes.add(repo[rev].manifestnode())
|
|
|
|
|
|
|
|
_prefetchtrees(repo, '', mfnodes, basemfnodes, [])
|
2017-04-20 07:14:03 +03:00
|
|
|
|
|
|
|
def _prefetchtrees(repo, rootdir, mfnodes, basemfnodes, directories):
|
|
|
|
# If possible, use remotefilelog's more expressive fallbackpath
|
|
|
|
if util.safehasattr(repo, 'fallbackpath'):
|
|
|
|
fallbackpath = repo.fallbackpath
|
|
|
|
else:
|
|
|
|
fallbackpath = repo.ui.config('paths', 'default')
|
|
|
|
|
|
|
|
remote = hg.peer(repo.ui, {}, fallbackpath)
|
|
|
|
if 'gettreepack' not in remote._capabilities():
|
|
|
|
raise error.Abort(_("missing gettreepack capability on remote"))
|
2017-04-27 20:44:33 +03:00
|
|
|
bundle = remote.gettreepack(rootdir, mfnodes, basemfnodes, directories)
|
|
|
|
|
|
|
|
try:
|
|
|
|
op = bundle2.processbundle(repo, bundle, None)
|
|
|
|
|
|
|
|
receivednodes = op.records[RECEIVEDNODE_RECORD]
|
|
|
|
missingnodes = set(mfnodes)
|
|
|
|
for reply in receivednodes:
|
|
|
|
missingnodes.difference_update(n for d, n
|
|
|
|
in reply
|
|
|
|
if d == rootdir)
|
|
|
|
if missingnodes:
|
|
|
|
raise error.Abort(_("unable to download %d trees (%s,...)") %
|
2017-04-27 20:44:34 +03:00
|
|
|
(len(missingnodes), list(missingnodes)[0]))
|
2017-04-27 20:44:33 +03:00
|
|
|
except bundle2.AbortFromPart as exc:
|
|
|
|
repo.ui.status(_('remote: abort: %s\n') % exc)
|
|
|
|
raise error.Abort(_('pull failed on remote'), hint=exc.hint)
|
|
|
|
except error.BundleValueError as exc:
|
|
|
|
raise error.Abort(_('missing support for %s') % exc)
|
|
|
|
|
2017-04-27 20:44:34 +03:00
|
|
|
@bundle2.parthandler(TREEGROUP_PARTTYPE, ('version', 'treecache'))
|
2017-04-27 20:44:33 +03:00
|
|
|
def treeparthandler(op, part):
|
2017-04-27 20:44:34 +03:00
|
|
|
"""Handles received tree packs. If `treecache` is True, the received data
|
|
|
|
goes in to the shared pack cache. Otherwise, the received data goes into the
|
|
|
|
permanent repo local data.
|
|
|
|
"""
|
2017-04-27 20:44:33 +03:00
|
|
|
repo = op.repo
|
|
|
|
|
|
|
|
version = part.params.get('version')
|
|
|
|
if version != '1':
|
|
|
|
raise error.Abort(_("unknown treegroup bundle2 part version: %s") %
|
|
|
|
version)
|
2017-04-20 07:14:03 +03:00
|
|
|
|
2017-04-27 20:44:34 +03:00
|
|
|
if part.params.get('treecache', 'False') == 'True':
|
|
|
|
packpath = shallowutil.getcachepackpath(repo, PACK_CATEGORY)
|
|
|
|
else:
|
|
|
|
packpath = shallowutil.getlocalpackpath(repo.svfs.vfs.base,
|
|
|
|
PACK_CATEGORY)
|
2017-04-27 20:44:33 +03:00
|
|
|
receivedhistory, receiveddata = wirepack.receivepack(repo.ui, part,
|
2017-04-20 07:14:03 +03:00
|
|
|
packpath)
|
2017-04-27 20:44:33 +03:00
|
|
|
|
|
|
|
op.records.add(RECEIVEDNODE_RECORD, receiveddata)
|
2017-04-20 07:14:03 +03:00
|
|
|
|
2017-04-27 20:44:33 +03:00
|
|
|
def pull(orig, ui, repo, *pats, **opts):
|
|
|
|
result = orig(ui, repo, *pats, **opts)
|
|
|
|
|
|
|
|
# prefetch if it's configured
|
|
|
|
prefetchcount = ui.configint('treemanifest', 'pullprefetchcount', None)
|
|
|
|
if prefetchcount:
|
|
|
|
ui.status(_("prefetching trees\n"))
|
|
|
|
|
|
|
|
mfstore = repo.svfs.manifestdatastore
|
|
|
|
|
|
|
|
# Calculate what recent manifests are we missing
|
|
|
|
firstrev = max(0, repo['tip'].rev() - prefetchcount + 1)
|
|
|
|
ctxs = list(repo.set('%s:', firstrev))
|
|
|
|
mfnodes = (ctx.manifestnode() for ctx in ctxs)
|
|
|
|
missingnodes = mfstore.getmissing(('', n) for n in mfnodes)
|
|
|
|
mfnodes = list(n for k, n in missingnodes)
|
|
|
|
|
|
|
|
# Calculate which parents we already have
|
|
|
|
parentctxs = repo.set('parents(roots(%ln:))',
|
|
|
|
(ctx.node() for ctx in ctxs))
|
|
|
|
basemfnodes = set(ctx.manifestnode() for ctx in parentctxs)
|
|
|
|
missingbases = list(mfstore.getmissing(('', n) for n in basemfnodes))
|
|
|
|
basemfnodes.difference_update(n for k, n in missingbases)
|
|
|
|
|
|
|
|
_prefetchtrees(repo, '', mfnodes, basemfnodes, [])
|
|
|
|
|
|
|
|
return result
|
|
|
|
|
2017-04-27 20:44:33 +03:00
|
|
|
def clientgettreepack(remote, rootdir, mfnodes, basemfnodes, directories):
|
|
|
|
opts = {}
|
|
|
|
opts['rootdir'] = rootdir
|
|
|
|
opts['mfnodes'] = wireproto.encodelist(mfnodes)
|
|
|
|
opts['basemfnodes'] = wireproto.encodelist(basemfnodes)
|
|
|
|
opts['directories'] = ','.join(wireproto.escapearg(d) for d in directories)
|
2017-04-20 07:14:03 +03:00
|
|
|
|
2017-04-27 20:44:33 +03:00
|
|
|
f = remote._callcompressable("gettreepack", **opts)
|
|
|
|
return bundle2.getunbundler(remote.ui, f)
|
2017-04-20 07:14:03 +03:00
|
|
|
|
2017-04-20 07:14:04 +03:00
|
|
|
class treememoizer(object):
|
|
|
|
"""A class that keeps references to trees until they've been consumed the
|
|
|
|
expected number of times.
|
|
|
|
"""
|
|
|
|
def __init__(self, store):
|
|
|
|
self._store = store
|
|
|
|
self._counts = {}
|
|
|
|
self._cache = {}
|
|
|
|
|
|
|
|
def adduse(self, node):
|
|
|
|
self._counts[node] = self._counts.get(node, 0) + 1
|
|
|
|
|
|
|
|
def get(self, node):
|
|
|
|
tree = self._cache.get(node)
|
|
|
|
if tree is None:
|
|
|
|
tree = cstore.treemanifest(self._store, node)
|
|
|
|
self._cache[node] = tree
|
|
|
|
|
|
|
|
count = self._counts.get(node, 1)
|
|
|
|
count -= 1
|
|
|
|
self._counts[node] = max(count, 0)
|
|
|
|
if count <= 0:
|
|
|
|
del self._cache[node]
|
|
|
|
|
|
|
|
return tree
|
|
|
|
|
2017-04-20 07:14:03 +03:00
|
|
|
def servergettreepack(repo, proto, args):
|
|
|
|
"""A server api for requesting a pack of tree information.
|
|
|
|
"""
|
|
|
|
if shallowrepo.requirement in repo.requirements:
|
|
|
|
raise error.Abort(_('cannot fetch remote files from shallow repo'))
|
|
|
|
if not isinstance(proto, sshserver.sshserver):
|
|
|
|
raise error.Abort(_('cannot fetch remote files over non-ssh protocol'))
|
|
|
|
|
2017-04-27 20:44:33 +03:00
|
|
|
rootdir = args['rootdir']
|
|
|
|
|
|
|
|
# Sort to produce a consistent output
|
|
|
|
mfnodes = sorted(wireproto.decodelist(args['mfnodes']))
|
|
|
|
basemfnodes = sorted(wireproto.decodelist(args['basemfnodes']))
|
|
|
|
directories = sorted(list(wireproto.unescapearg(d) for d
|
|
|
|
in args['directories'].split(',') if d != ''))
|
|
|
|
|
2017-04-27 20:44:33 +03:00
|
|
|
try:
|
|
|
|
bundler = bundle2.bundle20(repo.ui)
|
2017-04-27 20:44:33 +03:00
|
|
|
packstream = generatepackstream(repo, rootdir, mfnodes,
|
|
|
|
basemfnodes, directories)
|
|
|
|
part = bundler.newpart(TREEGROUP_PARTTYPE, data=packstream)
|
2017-04-27 20:44:33 +03:00
|
|
|
part.addparam('version', '1')
|
2017-04-27 20:44:34 +03:00
|
|
|
part.addparam('treecache', 'True')
|
2017-04-27 20:44:33 +03:00
|
|
|
|
|
|
|
except error.Abort as exc:
|
|
|
|
# cleanly forward Abort error to the client
|
|
|
|
bundler = bundle2.bundle20(repo.ui)
|
|
|
|
manargs = [('message', str(exc))]
|
|
|
|
advargs = []
|
|
|
|
if exc.hint is not None:
|
|
|
|
advargs.append(('hint', exc.hint))
|
|
|
|
bundler.addpart(bundle2.bundlepart('error:abort',
|
|
|
|
manargs, advargs))
|
|
|
|
return wireproto.streamres(gen=bundler.getchunks(), v1compressible=True)
|
|
|
|
|
2017-04-27 20:44:33 +03:00
|
|
|
def generatepackstream(repo, rootdir, mfnodes, basemfnodes, directories):
|
2017-04-27 20:44:33 +03:00
|
|
|
"""
|
|
|
|
All size/len/counts are network order unsigned ints.
|
|
|
|
|
|
|
|
Request args:
|
|
|
|
|
|
|
|
`rootdir` - The directory of the tree to send (including its children)
|
|
|
|
`mfnodes` - The manifest nodes of the specified root directory to send.
|
|
|
|
`basemfnodes` - The manifest nodes of the specified root directory that are
|
|
|
|
already on the client.
|
|
|
|
`directories` - The fullpath (not relative path) of directories underneath
|
|
|
|
the rootdir that should be sent.
|
|
|
|
|
|
|
|
Response format:
|
|
|
|
|
|
|
|
[<fileresponse>,...]<10 null bytes>
|
|
|
|
fileresponse = <filename len: 2 byte><filename><history><deltas>
|
|
|
|
history = <count: 4 byte>[<history entry>,...]
|
|
|
|
historyentry = <node: 20 byte><p1: 20 byte><p2: 20 byte>
|
|
|
|
<linknode: 20 byte><copyfrom len: 2 byte><copyfrom>
|
|
|
|
deltas = <count: 4 byte>[<delta entry>,...]
|
|
|
|
deltaentry = <node: 20 byte><deltabase: 20 byte>
|
|
|
|
<delta len: 8 byte><delta>
|
|
|
|
"""
|
|
|
|
if rootdir:
|
|
|
|
raise RuntimeError("rootdir not supported just yet: %s" %
|
|
|
|
rootdir)
|
|
|
|
if directories:
|
|
|
|
raise RuntimeError("directories arg is not supported yet ('%s')" %
|
|
|
|
', '.join(directories))
|
|
|
|
|
2017-04-27 20:44:34 +03:00
|
|
|
historystore = repo.svfs.manifesthistorystore
|
|
|
|
datastore = repo.svfs.manifestdatastore
|
2017-04-27 20:44:33 +03:00
|
|
|
|
|
|
|
mfnodeset = set(mfnodes)
|
|
|
|
basemfnodeset = set(basemfnodes)
|
|
|
|
|
|
|
|
# Count how many times we will need each comparison node, so we can keep
|
|
|
|
# trees in memory the appropriate amount of time.
|
2017-04-27 20:44:34 +03:00
|
|
|
trees = treememoizer(datastore)
|
2017-04-27 20:44:33 +03:00
|
|
|
for node in mfnodes:
|
2017-04-27 20:44:34 +03:00
|
|
|
p1node, p2node = historystore.getnodeinfo(rootdir, node)[:2]
|
2017-04-27 20:44:33 +03:00
|
|
|
if p1node != nullid and (p1node in mfnodeset or
|
|
|
|
p1node in basemfnodeset):
|
|
|
|
trees.adduse(p1node)
|
|
|
|
else:
|
|
|
|
for basenode in basemfnodes:
|
|
|
|
trees.adduse(basenode)
|
|
|
|
if p2node != nullid and (p2node in mfnodeset or
|
|
|
|
p2node in basemfnodeset):
|
|
|
|
trees.adduse(p2node)
|
|
|
|
|
|
|
|
for node in mfnodes:
|
|
|
|
treemf = trees.get(node)
|
|
|
|
|
2017-04-27 20:44:34 +03:00
|
|
|
p1node, p2node = historystore.getnodeinfo(rootdir, node)[:2]
|
2017-04-27 20:44:33 +03:00
|
|
|
# If p1 is being sent or is already on the client, chances are
|
|
|
|
# that's the best thing for us to delta against.
|
|
|
|
if p1node != nullid and (p1node in mfnodeset or
|
|
|
|
p1node in basemfnodeset):
|
|
|
|
basetrees = [trees.get(p1node)]
|
|
|
|
else:
|
|
|
|
basetrees = [trees.get(basenode) for basenode in basemfnodes]
|
|
|
|
|
|
|
|
if p2node != nullid and (p2node in mfnodeset or
|
|
|
|
p2node in basemfnodeset):
|
|
|
|
basetrees.append(trees.get(p2node))
|
|
|
|
|
|
|
|
subtrees = treemf.walksubtrees(comparetrees=basetrees)
|
|
|
|
for subname, subnode, subtext, x, x, x in subtrees:
|
|
|
|
# Append data
|
|
|
|
data = [(subnode, nullid, subtext)]
|
|
|
|
|
|
|
|
# Append history
|
|
|
|
# Only append first history for now, since the entire manifest
|
|
|
|
# history is very long.
|
|
|
|
# Append data
|
|
|
|
data = [(subnode, nullid, subtext)]
|
|
|
|
|
|
|
|
# Append history
|
2017-04-27 20:44:34 +03:00
|
|
|
histdata = historystore.getnodeinfo(subname, subnode)
|
|
|
|
p1node, p2node, linknode, copyfrom = histdata
|
2017-04-27 20:44:33 +03:00
|
|
|
history = [(subnode, p1node, p2node, linknode, copyfrom)]
|
|
|
|
|
|
|
|
for chunk in wirepack.sendpackpart(subname, history, data):
|
|
|
|
yield chunk
|
|
|
|
|
|
|
|
yield wirepack.closepart()
|
2017-04-20 07:14:03 +03:00
|
|
|
|
|
|
|
class remotetreedatastore(object):
|
|
|
|
def __init__(self, repo):
|
|
|
|
self._repo = repo
|
|
|
|
self._shared = None
|
|
|
|
|
|
|
|
def setshared(self, shared):
|
|
|
|
self._shared = shared
|
|
|
|
|
|
|
|
def get(self, name, node):
|
|
|
|
# Only look at the server if not root or is public
|
|
|
|
if name == '':
|
|
|
|
mfrevlog = self._repo.manifestlog._revlog
|
|
|
|
rev = mfrevlog.rev(node)
|
|
|
|
linkrev = mfrevlog.linkrev(rev)
|
|
|
|
if self._repo[linkrev].phase() != phases.public:
|
2017-04-27 20:44:34 +03:00
|
|
|
raise KeyError((name, node))
|
2017-04-20 07:14:03 +03:00
|
|
|
|
|
|
|
_prefetchtrees(self._repo, name, [node], [], [])
|
|
|
|
self._shared.markforrefresh()
|
|
|
|
return self._shared.get(name, node)
|
|
|
|
|
|
|
|
def getdeltachain(self, name, node):
|
|
|
|
# Since our remote content stores just contain full texts, we return a
|
|
|
|
# fake delta chain that just consists of a single full text revision.
|
|
|
|
# The nullid in the deltabasenode slot indicates that the revision is a
|
|
|
|
# fulltext.
|
|
|
|
revision = self.get(name, node)
|
|
|
|
return [(name, node, None, nullid, revision)]
|
|
|
|
|
|
|
|
def add(self, name, node, data):
|
|
|
|
raise RuntimeError("cannot add to a remote store")
|
|
|
|
|
|
|
|
def getmissing(self, keys):
|
|
|
|
return keys
|
|
|
|
|
|
|
|
def markledger(self, ledger):
|
|
|
|
pass
|
2017-04-20 07:14:04 +03:00
|
|
|
|
|
|
|
def serverrepack(repo):
|
|
|
|
packpath = repo.vfs.join('cache/packs/%s' % PACK_CATEGORY)
|
|
|
|
|
|
|
|
dpackstore = datapackstore(repo.ui, packpath)
|
|
|
|
revlogstore = manifestrevlogstore(repo)
|
|
|
|
datastore = unioncontentstore(dpackstore, revlogstore)
|
|
|
|
|
|
|
|
hpackstore = historypackstore(repo.ui, packpath)
|
|
|
|
histstore = unionmetadatastore(hpackstore, revlogstore)
|
|
|
|
|
|
|
|
_runrepack(repo, datastore, histstore, packpath, PACK_CATEGORY)
|
2017-04-27 20:44:34 +03:00
|
|
|
|
|
|
|
@exchange.b2partsgenerator('treepack')
|
|
|
|
def gettreepackpart(pushop, bundler):
|
|
|
|
"""add parts containing trees being pushed"""
|
2017-05-01 08:05:09 +03:00
|
|
|
if ('treepack' in pushop.stepsdone or
|
|
|
|
not pushop.repo.ui.configbool('treemanifest', 'sendtrees')):
|
2017-04-27 20:44:34 +03:00
|
|
|
return
|
|
|
|
pushop.stepsdone.add('treepack')
|
|
|
|
|
|
|
|
rootdir = ''
|
|
|
|
mfnodes = []
|
|
|
|
basemfnodes = []
|
|
|
|
directories = []
|
|
|
|
|
|
|
|
outgoing = pushop.outgoing
|
|
|
|
for node in outgoing.missing:
|
|
|
|
mfnode = pushop.repo[node].manifestnode()
|
|
|
|
mfnodes.append(mfnode)
|
|
|
|
basectxs = pushop.repo.set('parents(roots(%ln))', outgoing.missing)
|
|
|
|
for basectx in basectxs:
|
|
|
|
basemfnodes.append(basectx.manifestnode())
|
|
|
|
|
|
|
|
packstream = generatepackstream(pushop.repo, rootdir, mfnodes,
|
|
|
|
basemfnodes, directories)
|
|
|
|
part = bundler.newpart(TREEGROUP_PARTTYPE, data=packstream)
|
|
|
|
part.addparam('version', '1')
|
|
|
|
part.addparam('treecache', 'False')
|