2016-08-30 02:19:52 +03:00
|
|
|
# __init__.py
|
|
|
|
#
|
|
|
|
# Copyright 2016 Facebook, Inc.
|
|
|
|
#
|
|
|
|
# This software may be used and distributed according to the terms of the
|
|
|
|
# GNU General Public License version 2 or any later version.
|
2017-03-10 01:45:23 +03:00
|
|
|
"""
|
|
|
|
The treemanifest extension is to aid in the transition from flat manifests to
|
|
|
|
treemanifests. It has a client portion that's used to construct trees during
|
|
|
|
client pulls and commits, and a server portion which is used to generate
|
|
|
|
tree manifests side-by-side normal flat manifests.
|
|
|
|
|
|
|
|
Configs:
|
|
|
|
|
|
|
|
``treemanifest.server`` is used to indicate that this repo can serve
|
|
|
|
treemanifests
|
|
|
|
"""
|
2016-08-30 02:19:52 +03:00
|
|
|
|
2016-11-17 00:51:48 +03:00
|
|
|
"""allows using and migrating to tree manifests
|
|
|
|
|
|
|
|
When autocreatetrees is enabled, you can limit which bookmarks are initially
|
|
|
|
converted to trees during pull by specifying `treemanifest.allowedtreeroots`.
|
|
|
|
|
|
|
|
[treemanifest]
|
|
|
|
allowedtreeroots = master,stable
|
|
|
|
|
2017-03-02 03:55:19 +03:00
|
|
|
Enabling `treemanifest.usecunionstore` will cause the extension to use the
|
|
|
|
native implementation of the datapack stores.
|
|
|
|
|
|
|
|
[treemanifest]
|
|
|
|
usecunionstore = True
|
|
|
|
|
2017-04-20 07:14:03 +03:00
|
|
|
Disabling `treemanifest.demanddownload` will prevent the extension from
|
|
|
|
automatically downloading trees from the server when they don't exist locally.
|
|
|
|
|
|
|
|
[treemanifest]
|
|
|
|
demanddownload = True
|
|
|
|
|
2017-05-01 08:05:09 +03:00
|
|
|
Setting `treemanifest.pullprefetchcount` to an integer N will cause the latest N
|
2017-04-27 20:44:33 +03:00
|
|
|
commits' manifests to be downloaded (if they aren't already).
|
|
|
|
|
|
|
|
[treemanifest]
|
|
|
|
pullprefetchcount = 0
|
2017-05-01 08:05:09 +03:00
|
|
|
|
2017-06-05 23:48:58 +03:00
|
|
|
`treemanifest.pullprefetchrevs` specifies a revset of commits who's trees should
|
|
|
|
be prefetched after a pull. Defaults to None.
|
|
|
|
|
|
|
|
[treemanifest]
|
|
|
|
pullprefetchrevs = master + stable
|
|
|
|
|
2017-06-20 21:08:15 +03:00
|
|
|
Setting `treemanifest.repackstartrev` and `treemanifest.repackendrev` causes `hg
|
|
|
|
repack --incremental` to only repack the revlog entries in the given range. The
|
|
|
|
default values are 0 and len(changelog) - 1, respectively.
|
|
|
|
|
|
|
|
[treemanifest]
|
|
|
|
repackstartrev = 0
|
|
|
|
repackendrev = 1000
|
2017-07-11 01:53:12 +03:00
|
|
|
|
2017-07-14 21:47:12 +03:00
|
|
|
Setting `treemanifest.treeonly` to True will force all manifest reads to use the
|
|
|
|
tree format. This is useful in the final stages of a migration to treemanifest
|
|
|
|
to prevent accesses of flat manifests.
|
|
|
|
|
|
|
|
[treemanifest]
|
|
|
|
treeonly = True
|
|
|
|
|
2016-11-17 00:51:48 +03:00
|
|
|
"""
|
|
|
|
|
2016-08-30 02:19:52 +03:00
|
|
|
from mercurial import (
|
2017-04-27 20:44:33 +03:00
|
|
|
bundle2,
|
2017-08-29 23:02:22 +03:00
|
|
|
bundlerepo,
|
2016-09-20 02:30:17 +03:00
|
|
|
changegroup,
|
2017-04-20 07:14:03 +03:00
|
|
|
commands,
|
2016-12-03 01:37:51 +03:00
|
|
|
error,
|
2017-04-27 20:44:34 +03:00
|
|
|
exchange,
|
2016-09-20 02:30:17 +03:00
|
|
|
extensions,
|
2017-04-20 07:14:03 +03:00
|
|
|
hg,
|
2016-08-30 02:19:52 +03:00
|
|
|
localrepo,
|
2017-03-10 01:45:23 +03:00
|
|
|
manifest,
|
2017-03-07 22:15:25 +03:00
|
|
|
mdiff,
|
2017-04-20 07:14:03 +03:00
|
|
|
phases,
|
2017-11-09 21:28:53 +03:00
|
|
|
policy,
|
2017-05-22 23:38:37 +03:00
|
|
|
registrar,
|
2017-05-10 23:48:34 +03:00
|
|
|
repair,
|
2017-03-10 01:45:23 +03:00
|
|
|
revlog,
|
2017-04-20 07:14:03 +03:00
|
|
|
sshserver,
|
2017-11-07 20:06:24 +03:00
|
|
|
templatekw,
|
2016-09-20 02:30:17 +03:00
|
|
|
util,
|
2017-04-20 07:14:03 +03:00
|
|
|
wireproto,
|
2016-08-30 02:19:52 +03:00
|
|
|
)
|
2016-09-20 02:30:17 +03:00
|
|
|
from mercurial.i18n import _
|
2017-04-20 07:14:03 +03:00
|
|
|
from mercurial.node import bin, hex, nullid
|
2016-09-20 02:30:17 +03:00
|
|
|
|
2017-04-20 07:14:04 +03:00
|
|
|
from remotefilelog.contentstore import (
|
|
|
|
manifestrevlogstore,
|
|
|
|
unioncontentstore,
|
|
|
|
)
|
|
|
|
from remotefilelog.metadatastore import (
|
|
|
|
unionmetadatastore,
|
|
|
|
)
|
2017-11-09 21:28:53 +03:00
|
|
|
from remotefilelog.datapack import (
|
2017-11-09 21:28:53 +03:00
|
|
|
datapack,
|
2017-11-09 21:28:53 +03:00
|
|
|
datapackstore,
|
|
|
|
mutabledatapack,
|
|
|
|
)
|
|
|
|
from remotefilelog.historypack import (
|
2017-11-09 21:28:53 +03:00
|
|
|
historypack,
|
2017-11-09 21:28:53 +03:00
|
|
|
historypackstore,
|
|
|
|
mutablehistorypack,
|
|
|
|
)
|
2017-04-27 20:44:33 +03:00
|
|
|
from remotefilelog import shallowrepo, shallowutil, wirepack
|
2017-11-09 21:28:53 +03:00
|
|
|
from remotefilelog.repack import (
|
2017-11-09 21:28:53 +03:00
|
|
|
_computeincrementaldatapack,
|
|
|
|
_computeincrementalhistorypack,
|
2017-11-09 21:28:53 +03:00
|
|
|
_runrepack,
|
2017-11-09 21:28:53 +03:00
|
|
|
_topacks,
|
2017-11-17 02:28:07 +03:00
|
|
|
backgroundrepack,
|
2017-11-09 21:28:53 +03:00
|
|
|
)
|
2017-02-24 01:03:03 +03:00
|
|
|
import cstore
|
2016-08-30 02:19:52 +03:00
|
|
|
|
2017-03-19 05:38:45 +03:00
|
|
|
import os
|
2016-09-20 02:30:17 +03:00
|
|
|
import struct
|
2017-06-20 21:08:15 +03:00
|
|
|
import time
|
2016-09-20 02:30:17 +03:00
|
|
|
|
2017-11-09 21:28:53 +03:00
|
|
|
osutil = policy.importmod(r'osutil')
|
|
|
|
|
2016-08-30 02:19:52 +03:00
|
|
|
cmdtable = {}
|
2017-05-22 23:38:37 +03:00
|
|
|
command = registrar.command(cmdtable)
|
2016-08-30 02:19:52 +03:00
|
|
|
|
2017-10-23 20:07:38 +03:00
|
|
|
configtable = {}
|
|
|
|
configitem = registrar.configitem(configtable)
|
|
|
|
|
2017-11-03 19:24:39 +03:00
|
|
|
configitem('treemanifest', 'sendtrees', default=False)
|
2017-10-23 20:07:38 +03:00
|
|
|
configitem('treemanifest', 'server', default=False)
|
|
|
|
|
2016-10-21 21:02:22 +03:00
|
|
|
PACK_CATEGORY='manifests'
|
2016-08-30 02:19:52 +03:00
|
|
|
|
2017-04-27 20:44:33 +03:00
|
|
|
TREEGROUP_PARTTYPE = 'b2x:treegroup'
|
2017-07-11 01:53:12 +03:00
|
|
|
# Temporary part type while we migrate the arguments
|
|
|
|
TREEGROUP_PARTTYPE2 = 'b2x:treegroup2'
|
2017-04-27 20:44:33 +03:00
|
|
|
RECEIVEDNODE_RECORD = 'receivednodes'
|
|
|
|
|
2017-06-05 23:48:58 +03:00
|
|
|
# When looking for a recent manifest to consider our base during tree
|
|
|
|
# prefetches, this constant defines how far back we should search.
|
|
|
|
BASENODESEARCHMAX = 25000
|
|
|
|
|
2017-08-31 21:14:39 +03:00
|
|
|
def treeenabled(ui):
|
2017-10-17 23:05:18 +03:00
|
|
|
return ui.config('extensions', 'treemanifest') not in (None, '!')
|
2017-08-31 21:14:39 +03:00
|
|
|
|
2017-06-19 18:02:17 +03:00
|
|
|
def uisetup(ui):
|
2016-09-20 02:30:17 +03:00
|
|
|
extensions.wrapfunction(changegroup.cg1unpacker, '_unpackmanifests',
|
2017-08-29 23:02:22 +03:00
|
|
|
_unpackmanifestscg1)
|
|
|
|
extensions.wrapfunction(changegroup.cg3unpacker, '_unpackmanifests',
|
|
|
|
_unpackmanifestscg3)
|
2017-03-10 01:45:23 +03:00
|
|
|
extensions.wrapfunction(revlog.revlog, 'checkhash', _checkhash)
|
|
|
|
|
|
|
|
wrappropertycache(localrepo.localrepository, 'manifestlog', getmanifestlog)
|
2016-09-20 02:30:17 +03:00
|
|
|
|
2017-03-10 01:45:23 +03:00
|
|
|
extensions.wrapfunction(manifest.memmanifestctx, 'write', _writemanifest)
|
|
|
|
|
2017-04-27 20:44:33 +03:00
|
|
|
extensions.wrapcommand(commands.table, 'pull', pull)
|
|
|
|
|
2017-04-20 07:14:03 +03:00
|
|
|
wireproto.commands['gettreepack'] = (servergettreepack, '*')
|
2017-04-27 20:44:33 +03:00
|
|
|
wireproto.wirepeer.gettreepack = clientgettreepack
|
2017-04-20 07:14:03 +03:00
|
|
|
|
2017-05-10 23:48:34 +03:00
|
|
|
extensions.wrapfunction(repair, 'striptrees', striptrees)
|
2017-08-29 23:02:22 +03:00
|
|
|
extensions.wrapfunction(repair, '_collectmanifest', _collectmanifest)
|
|
|
|
extensions.wrapfunction(repair, 'stripmanifest', stripmanifest)
|
2017-08-29 23:02:22 +03:00
|
|
|
extensions.wrapfunction(bundle2, '_addpartsfromopts', _addpartsfromopts)
|
2017-08-29 23:02:22 +03:00
|
|
|
extensions.wrapfunction(bundlerepo.bundlerepository, '_handlebundle2part',
|
|
|
|
_handlebundle2part)
|
2017-08-29 23:02:22 +03:00
|
|
|
extensions.wrapfunction(bundle2, 'getrepocaps', getrepocaps)
|
2017-06-19 18:02:17 +03:00
|
|
|
_registerbundle2parts()
|
2017-05-10 23:48:34 +03:00
|
|
|
|
2017-11-07 20:06:24 +03:00
|
|
|
extensions.wrapfunction(templatekw, 'showmanifest', showmanifest)
|
|
|
|
templatekw.keywords['manifest'] = templatekw.showmanifest
|
|
|
|
|
2017-11-15 02:55:34 +03:00
|
|
|
# Change manifest template output
|
|
|
|
templatekw.defaulttempl['manifest'] = '{node}'
|
|
|
|
|
2017-11-07 20:06:24 +03:00
|
|
|
def showmanifest(orig, **args):
|
2017-11-15 02:55:34 +03:00
|
|
|
"""Same implementation as the upstream showmanifest, but without the 'rev'
|
|
|
|
field."""
|
|
|
|
ctx, templ = args[r'ctx'], args[r'templ']
|
|
|
|
mnode = ctx.manifestnode()
|
|
|
|
if mnode is None:
|
|
|
|
# just avoid crash, we might want to use the 'ff...' hash in future
|
|
|
|
return
|
|
|
|
|
|
|
|
mhex = hex(mnode)
|
|
|
|
args = args.copy()
|
|
|
|
args.update({r'node': mhex})
|
|
|
|
f = templ('manifest', **args)
|
|
|
|
return templatekw._mappable(f, None, f, lambda x: { 'node': mhex})
|
2017-11-07 20:06:24 +03:00
|
|
|
|
2017-08-29 23:02:22 +03:00
|
|
|
def getrepocaps(orig, repo, *args, **kwargs):
|
|
|
|
caps = orig(repo, *args, **kwargs)
|
2017-11-02 03:10:05 +03:00
|
|
|
if treeenabled(repo.ui):
|
2017-08-29 23:02:22 +03:00
|
|
|
caps['treemanifest'] = ('True',)
|
|
|
|
return caps
|
|
|
|
|
2017-08-29 23:02:22 +03:00
|
|
|
def _collectmanifest(orig, repo, striprev):
|
|
|
|
if repo.ui.configbool("treemanifest", "treeonly"):
|
|
|
|
return []
|
|
|
|
return orig(repo, striprev)
|
|
|
|
|
|
|
|
def stripmanifest(orig, repo, striprev, tr, files):
|
|
|
|
if repo.ui.configbool("treemanifest", "treeonly"):
|
|
|
|
return
|
|
|
|
orig(repo, striprev, tr, files)
|
|
|
|
|
2016-08-30 02:19:52 +03:00
|
|
|
def reposetup(ui, repo):
|
|
|
|
wraprepo(repo)
|
|
|
|
|
|
|
|
def wraprepo(repo):
|
|
|
|
if not isinstance(repo, localrepo.localrepository):
|
|
|
|
return
|
|
|
|
|
2017-03-10 01:45:23 +03:00
|
|
|
repo.svfs.treemanifestserver = repo.ui.configbool('treemanifest', 'server')
|
2017-03-10 01:45:23 +03:00
|
|
|
if repo.svfs.treemanifestserver:
|
|
|
|
serverreposetup(repo)
|
|
|
|
else:
|
2017-03-10 01:45:23 +03:00
|
|
|
clientreposetup(repo)
|
|
|
|
|
|
|
|
def clientreposetup(repo):
|
2016-12-03 01:37:51 +03:00
|
|
|
repo.name = repo.ui.config('remotefilelog', 'reponame')
|
|
|
|
if not repo.name:
|
|
|
|
raise error.Abort(_("remotefilelog.reponame must be configured"))
|
|
|
|
|
2017-07-14 21:47:12 +03:00
|
|
|
if not repo.ui.configbool('treemanifest', 'treeonly'):
|
|
|
|
# If we're not a pure-tree repo, we must be using fastmanifest to
|
|
|
|
# provide the hybrid manifest implementation.
|
|
|
|
try:
|
|
|
|
extensions.find('fastmanifest')
|
|
|
|
except KeyError:
|
|
|
|
raise error.Abort(_("cannot use treemanifest without fastmanifest"))
|
2017-01-14 01:58:20 +03:00
|
|
|
|
2017-08-08 05:27:17 +03:00
|
|
|
def setuptreestores(repo, mfl):
|
2017-08-08 05:27:17 +03:00
|
|
|
if repo.ui.configbool("treemanifest", "server"):
|
|
|
|
packpath = repo.vfs.join('cache/packs/%s' % PACK_CATEGORY)
|
|
|
|
|
|
|
|
# Data store
|
|
|
|
datastore = cstore.datapackstore(packpath)
|
|
|
|
revlogstore = manifestrevlogstore(repo)
|
2017-08-08 05:27:17 +03:00
|
|
|
mfl.datastore = unioncontentstore(datastore, revlogstore)
|
2017-08-08 05:27:17 +03:00
|
|
|
|
|
|
|
# History store
|
|
|
|
historystore = historypackstore(repo.ui, packpath)
|
2017-08-08 05:27:17 +03:00
|
|
|
mfl.historystore = unionmetadatastore(
|
2017-08-08 05:27:17 +03:00
|
|
|
historystore,
|
|
|
|
revlogstore,
|
|
|
|
)
|
2017-08-08 05:27:17 +03:00
|
|
|
|
2017-08-08 05:27:17 +03:00
|
|
|
return
|
|
|
|
|
2016-10-21 21:02:22 +03:00
|
|
|
usecdatapack = repo.ui.configbool('remotefilelog', 'fastdatapack')
|
|
|
|
|
2017-08-29 23:02:22 +03:00
|
|
|
if not util.safehasattr(repo, 'name'):
|
|
|
|
repo.name = repo.ui.config('remotefilelog', 'reponame')
|
2016-10-21 21:02:20 +03:00
|
|
|
packpath = shallowutil.getcachepackpath(repo, PACK_CATEGORY)
|
2016-10-21 21:02:22 +03:00
|
|
|
|
|
|
|
localpackpath = shallowutil.getlocalpackpath(repo.svfs.vfs.base,
|
|
|
|
PACK_CATEGORY)
|
2017-04-27 20:44:34 +03:00
|
|
|
|
|
|
|
# Data store
|
2017-03-02 03:55:19 +03:00
|
|
|
if repo.ui.configbool("treemanifest", "usecunionstore"):
|
|
|
|
datastore = cstore.datapackstore(packpath)
|
|
|
|
localdatastore = cstore.datapackstore(localpackpath)
|
2017-04-20 07:14:03 +03:00
|
|
|
# TODO: can't use remotedatastore with cunionstore yet
|
2017-11-07 05:13:55 +03:00
|
|
|
# TODO make reportmetrics work with cstore
|
2017-08-08 05:27:17 +03:00
|
|
|
mfl.datastore = cstore.uniondatapackstore([localdatastore, datastore])
|
2017-03-02 03:55:19 +03:00
|
|
|
else:
|
|
|
|
datastore = datapackstore(repo.ui, packpath, usecdatapack=usecdatapack)
|
|
|
|
localdatastore = datapackstore(repo.ui, localpackpath,
|
|
|
|
usecdatapack=usecdatapack)
|
2017-04-20 07:14:03 +03:00
|
|
|
stores = [datastore, localdatastore]
|
|
|
|
remotedatastore = remotetreedatastore(repo)
|
|
|
|
if repo.ui.configbool("treemanifest", "demanddownload", True):
|
|
|
|
stores.append(remotedatastore)
|
2017-03-02 03:55:19 +03:00
|
|
|
|
2017-08-08 05:27:17 +03:00
|
|
|
mfl.datastore = unioncontentstore(*stores,
|
|
|
|
writestore=localdatastore)
|
|
|
|
remotedatastore.setshared(mfl.datastore)
|
2016-10-21 21:02:22 +03:00
|
|
|
|
2017-08-08 05:27:17 +03:00
|
|
|
mfl.shareddatastores = [datastore]
|
|
|
|
mfl.localdatastores = [localdatastore]
|
2017-08-29 23:02:22 +03:00
|
|
|
mfl.ui = repo.ui
|
2016-09-20 02:30:17 +03:00
|
|
|
|
2017-04-27 20:44:34 +03:00
|
|
|
# History store
|
|
|
|
sharedhistorystore = historypackstore(repo.ui, packpath)
|
|
|
|
localhistorystore = historypackstore(repo.ui, localpackpath)
|
2017-08-08 05:27:17 +03:00
|
|
|
mfl.sharedhistorystores = [
|
2017-04-27 20:44:34 +03:00
|
|
|
sharedhistorystore
|
2017-03-07 22:15:26 +03:00
|
|
|
]
|
2017-08-08 05:27:17 +03:00
|
|
|
mfl.localhistorystores = [
|
2017-04-27 20:44:34 +03:00
|
|
|
localhistorystore,
|
2017-03-07 22:15:26 +03:00
|
|
|
]
|
2017-08-08 05:27:17 +03:00
|
|
|
mfl.historystore = unionmetadatastore(
|
2017-04-27 20:44:34 +03:00
|
|
|
sharedhistorystore,
|
|
|
|
localhistorystore,
|
|
|
|
writestore=localhistorystore,
|
|
|
|
)
|
2017-11-07 05:13:55 +03:00
|
|
|
shallowutil.reportpackmetrics(repo.ui, 'treestore', mfl.datastore,
|
|
|
|
mfl.historystore)
|
2017-03-07 22:15:26 +03:00
|
|
|
|
2017-03-10 01:45:23 +03:00
|
|
|
class treemanifestlog(manifest.manifestlog):
|
2017-05-10 23:48:34 +03:00
|
|
|
def __init__(self, opener, treemanifest=False):
|
|
|
|
assert treemanifest is False
|
2017-03-10 01:45:23 +03:00
|
|
|
cachesize = 4
|
|
|
|
|
|
|
|
opts = getattr(opener, 'options', None)
|
|
|
|
if opts is not None:
|
|
|
|
cachesize = opts.get('manifestcachesize', cachesize)
|
2017-05-10 23:48:34 +03:00
|
|
|
self._treeinmem = True
|
2017-03-10 01:45:23 +03:00
|
|
|
|
2017-07-14 21:47:12 +03:00
|
|
|
self._opener = opener
|
2017-03-10 01:45:23 +03:00
|
|
|
self._revlog = manifest.manifestrevlog(opener,
|
2017-05-10 23:48:34 +03:00
|
|
|
indexfile='00manifesttree.i',
|
|
|
|
treemanifest=True)
|
2017-03-10 01:45:23 +03:00
|
|
|
|
|
|
|
# A cache of the manifestctx or treemanifestctx for each directory
|
|
|
|
self._dirmancache = {}
|
|
|
|
self._dirmancache[''] = util.lrucachedict(cachesize)
|
|
|
|
|
|
|
|
self.cachesize = cachesize
|
|
|
|
|
2017-07-14 21:47:12 +03:00
|
|
|
class treeonlymanifestlog(object):
|
|
|
|
def __init__(self, opener):
|
|
|
|
self._opener = opener
|
2017-08-29 23:02:22 +03:00
|
|
|
self._memtrees = {}
|
2017-07-14 21:47:12 +03:00
|
|
|
|
|
|
|
def __getitem__(self, node):
|
|
|
|
return self.get('', node)
|
|
|
|
|
2017-07-14 21:47:12 +03:00
|
|
|
def get(self, dir, node, verify=True):
|
2017-07-14 21:47:12 +03:00
|
|
|
if dir != '':
|
|
|
|
raise RuntimeError("native tree manifestlog doesn't support "
|
|
|
|
"subdir reads: (%s, %s)" % (dir, hex(node)))
|
2017-08-29 23:02:22 +03:00
|
|
|
if node == nullid:
|
|
|
|
return treemanifestctx(self, dir, node)
|
2017-07-14 21:47:12 +03:00
|
|
|
|
2017-08-29 23:02:22 +03:00
|
|
|
memtree = self._memtrees.get((dir, node))
|
|
|
|
if memtree is not None:
|
|
|
|
return memtree
|
|
|
|
|
2017-08-08 05:27:17 +03:00
|
|
|
store = self.datastore
|
2017-08-29 23:02:22 +03:00
|
|
|
|
2017-08-29 23:02:22 +03:00
|
|
|
try:
|
|
|
|
store.get(dir, node)
|
|
|
|
except KeyError:
|
2017-07-14 21:47:12 +03:00
|
|
|
raise KeyError("tree node not found (%s, %s)" %
|
|
|
|
(dir, hex(node)))
|
2017-07-14 21:47:12 +03:00
|
|
|
|
2017-08-29 23:02:22 +03:00
|
|
|
return treemanifestctx(self, dir, node)
|
|
|
|
|
2017-09-22 07:23:40 +03:00
|
|
|
def addmemtree(self, node, tree, p1, p2):
|
2017-08-29 23:02:22 +03:00
|
|
|
ctx = treemanifestctx(self, '', node)
|
|
|
|
ctx._data = tree
|
2017-09-22 07:23:40 +03:00
|
|
|
ctx.parents = (p1, p2)
|
2017-08-29 23:02:22 +03:00
|
|
|
self._memtrees[('', node)] = ctx
|
|
|
|
|
2017-07-14 21:47:12 +03:00
|
|
|
def clearcaches(self):
|
2017-08-29 23:02:22 +03:00
|
|
|
self._memtrees.clear()
|
2017-07-14 21:47:12 +03:00
|
|
|
|
|
|
|
class treemanifestctx(object):
|
|
|
|
def __init__(self, manifestlog, dir, node):
|
|
|
|
self._manifestlog = manifestlog
|
|
|
|
self._dir = dir
|
|
|
|
self._node = node
|
|
|
|
self._data = None
|
|
|
|
|
|
|
|
def read(self):
|
|
|
|
if self._data is None:
|
2017-08-08 05:27:17 +03:00
|
|
|
store = self._manifestlog.datastore
|
2017-07-14 21:47:12 +03:00
|
|
|
self._data = cstore.treemanifest(store, self._node)
|
|
|
|
return self._data
|
|
|
|
|
|
|
|
def node(self):
|
|
|
|
return self._node
|
|
|
|
|
|
|
|
def new(self, dir=''):
|
|
|
|
if dir != '':
|
|
|
|
raise RuntimeError("native tree manifestlog doesn't support "
|
|
|
|
"subdir creation: '%s'" % dir)
|
|
|
|
|
2017-08-08 05:27:17 +03:00
|
|
|
store = self._manifestlog.datastore
|
2017-07-14 21:47:12 +03:00
|
|
|
return cstore.treemanifest(store)
|
|
|
|
|
|
|
|
def copy(self):
|
2017-08-29 23:02:22 +03:00
|
|
|
memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
|
|
|
|
memmf._treemanifest = self.read().copy()
|
|
|
|
return memmf
|
2017-07-14 21:47:12 +03:00
|
|
|
|
|
|
|
@util.propertycache
|
|
|
|
def parents(self):
|
2017-08-08 05:27:17 +03:00
|
|
|
store = self._manifestlog.historystore
|
2017-08-29 23:02:22 +03:00
|
|
|
p1, p2, linkrev, copyfrom = store.getnodeinfo(self._dir, self._node)
|
2017-07-14 21:47:12 +03:00
|
|
|
if copyfrom:
|
|
|
|
p1 = nullid
|
|
|
|
return p1, p2
|
|
|
|
|
|
|
|
def readdelta(self, shallow=False):
|
|
|
|
'''Returns a manifest containing just the entries that are present
|
|
|
|
in this manifest, but not in its p1 manifest. This is efficient to read
|
|
|
|
if the revlog delta is already p1.
|
|
|
|
|
|
|
|
If `shallow` is True, this will read the delta for this directory,
|
|
|
|
without recursively reading subdirectory manifests. Instead, any
|
|
|
|
subdirectory entry will be reported as it appears in the manifest, i.e.
|
|
|
|
the subdirectory will be reported among files and distinguished only by
|
|
|
|
its 't' flag.
|
|
|
|
'''
|
2017-08-08 05:27:17 +03:00
|
|
|
store = self._manifestlog.datastore
|
2017-08-29 23:02:22 +03:00
|
|
|
p1, p2 = self.parents
|
|
|
|
mf = self.read()
|
|
|
|
if p1 == nullid:
|
|
|
|
parentmf = cstore.treemanifest(store)
|
|
|
|
else:
|
|
|
|
parentmf = cstore.treemanifest(store, p1)
|
2017-07-14 21:47:12 +03:00
|
|
|
|
|
|
|
if shallow:
|
|
|
|
# This appears to only be used for changegroup creation in
|
|
|
|
# upstream changegroup.py. Since we use pack files for all native
|
|
|
|
# tree exchanges, we shouldn't need to implement this.
|
|
|
|
raise NotImplemented("native trees don't support shallow "
|
|
|
|
"readdelta yet")
|
|
|
|
else:
|
|
|
|
md = cstore.treemanifest(store)
|
2017-08-29 23:02:22 +03:00
|
|
|
for f, ((n1, fl1), (n2, fl2)) in parentmf.diff(mf).iteritems():
|
2017-07-14 21:47:12 +03:00
|
|
|
if n2:
|
|
|
|
md[f] = n2
|
|
|
|
if fl2:
|
|
|
|
md.setflag(f, fl2)
|
|
|
|
return md
|
|
|
|
|
|
|
|
def readfast(self, shallow=False):
|
|
|
|
'''Calls either readdelta or read, based on which would be less work.
|
|
|
|
readdelta is called if the delta is against the p1, and therefore can be
|
|
|
|
read quickly.
|
|
|
|
|
|
|
|
If `shallow` is True, it only returns the entries from this manifest,
|
|
|
|
and not any submanifests.
|
|
|
|
'''
|
|
|
|
return self.readdelta(shallow=shallow)
|
|
|
|
|
|
|
|
def find(self, key):
|
|
|
|
return self.read().find(key)
|
|
|
|
|
2017-08-29 23:02:22 +03:00
|
|
|
class memtreemanifestctx(object):
|
|
|
|
def __init__(self, manifestlog, dir=''):
|
|
|
|
self._manifestlog = manifestlog
|
|
|
|
self._dir = dir
|
|
|
|
store = self._manifestlog.datastore
|
|
|
|
self._treemanifest = cstore.treemanifest(store)
|
|
|
|
|
|
|
|
def new(self, dir=''):
|
|
|
|
return memtreemanifestctx(self._manifestlog, dir=dir)
|
|
|
|
|
|
|
|
def copy(self):
|
|
|
|
memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
|
|
|
|
memmf._treemanifest = self._treemanifest.copy()
|
|
|
|
return memmf
|
|
|
|
|
|
|
|
def read(self):
|
|
|
|
return self._treemanifest
|
|
|
|
|
|
|
|
def write(self, transaction, link, p1, p2, added, removed):
|
|
|
|
if not util.safehasattr(transaction, 'treedatapack'):
|
|
|
|
mfl = self._manifestlog
|
|
|
|
opener = mfl._opener
|
|
|
|
ui = self._manifestlog.ui
|
|
|
|
packpath = shallowutil.getlocalpackpath(
|
|
|
|
opener.vfs.base,
|
|
|
|
'manifests')
|
|
|
|
transaction.treedatapack = mutabledatapack(
|
|
|
|
ui,
|
|
|
|
packpath)
|
|
|
|
transaction.treehistpack = mutablehistorypack(
|
|
|
|
ui,
|
|
|
|
packpath)
|
|
|
|
def finalize(tr):
|
|
|
|
tr.treedatapack.close()
|
|
|
|
tr.treehistpack.close()
|
|
|
|
mfl.datastore.markforrefresh()
|
|
|
|
def abort(tr):
|
|
|
|
tr.treedatapack.abort()
|
|
|
|
tr.treehistpack.abort()
|
|
|
|
def writepending(tr):
|
|
|
|
finalize(tr)
|
2017-08-30 03:03:50 +03:00
|
|
|
transaction.treedatapack = mutabledatapack(
|
|
|
|
ui,
|
2017-08-29 23:02:22 +03:00
|
|
|
packpath)
|
2017-08-30 03:03:50 +03:00
|
|
|
transaction.treehistpack = mutablehistorypack(
|
|
|
|
ui,
|
2017-08-29 23:02:22 +03:00
|
|
|
packpath)
|
|
|
|
# re-register to write pending changes so that a series
|
|
|
|
# of writes are correctly flushed to the store. This
|
|
|
|
# happens during amend.
|
|
|
|
tr.addpending('treepack', writepending)
|
|
|
|
transaction.addfinalize('treepack', finalize)
|
|
|
|
transaction.addabort('treepack', abort)
|
|
|
|
transaction.addpending('treepack', writepending)
|
|
|
|
|
|
|
|
dpack = transaction.treedatapack
|
|
|
|
hpack = transaction.treehistpack
|
|
|
|
|
|
|
|
newtree = self._treemanifest
|
|
|
|
p1tree = self._manifestlog[p1].read()
|
|
|
|
newtreeiter = newtree.finalize(p1tree)
|
|
|
|
|
|
|
|
node = None
|
|
|
|
for nname, nnode, ntext, np1text, np1, np2 in newtreeiter:
|
|
|
|
# Not using deltas, since there aren't any other trees in
|
|
|
|
# this pack it could delta against.
|
|
|
|
dpack.add(nname, nnode, revlog.nullid, ntext)
|
|
|
|
hpack.add(nname, nnode, np1, np2, revlog.nullid, '')
|
|
|
|
if nname == "":
|
|
|
|
node = nnode
|
|
|
|
|
2017-08-29 23:02:22 +03:00
|
|
|
if node is not None:
|
2017-09-22 07:23:40 +03:00
|
|
|
self._manifestlog.addmemtree(node, newtree, p1, p2)
|
2017-08-29 23:02:22 +03:00
|
|
|
return node
|
|
|
|
|
2017-03-10 01:45:23 +03:00
|
|
|
def serverreposetup(repo):
|
|
|
|
extensions.wrapfunction(manifest.manifestrevlog, 'addgroup',
|
|
|
|
_addmanifestgroup)
|
|
|
|
|
2017-04-20 07:14:03 +03:00
|
|
|
def _capabilities(orig, repo, proto):
|
|
|
|
caps = orig(repo, proto)
|
|
|
|
caps.append('gettreepack')
|
|
|
|
return caps
|
2017-08-23 07:58:37 +03:00
|
|
|
|
|
|
|
if util.safehasattr(wireproto, '_capabilities'):
|
|
|
|
extensions.wrapfunction(wireproto, '_capabilities', _capabilities)
|
|
|
|
else:
|
|
|
|
extensions.wrapfunction(wireproto, 'capabilities', _capabilities)
|
2017-04-20 07:14:03 +03:00
|
|
|
|
2017-03-10 01:45:23 +03:00
|
|
|
def _addmanifestgroup(*args, **kwargs):
|
|
|
|
raise error.Abort(_("cannot push commits to a treemanifest transition "
|
|
|
|
"server without pushrebase"))
|
|
|
|
|
2017-03-10 01:45:23 +03:00
|
|
|
def getmanifestlog(orig, self):
|
2017-08-31 21:14:39 +03:00
|
|
|
if not treeenabled(self.ui):
|
|
|
|
return orig(self)
|
|
|
|
|
2017-07-14 21:47:12 +03:00
|
|
|
if self.ui.configbool('treemanifest', 'treeonly'):
|
2017-07-14 21:47:12 +03:00
|
|
|
mfl = treeonlymanifestlog(self.svfs)
|
2017-08-08 05:27:17 +03:00
|
|
|
setuptreestores(self, mfl)
|
2017-07-14 21:47:12 +03:00
|
|
|
else:
|
|
|
|
mfl = orig(self)
|
|
|
|
mfl.treemanifestlog = treemanifestlog(self.svfs)
|
2017-08-08 05:27:17 +03:00
|
|
|
setuptreestores(self, mfl.treemanifestlog)
|
|
|
|
mfl.datastore = mfl.treemanifestlog.datastore
|
|
|
|
mfl.historystore = mfl.treemanifestlog.historystore
|
|
|
|
|
|
|
|
if util.safehasattr(mfl.treemanifestlog, 'shareddatastores'):
|
|
|
|
mfl.shareddatastores = mfl.treemanifestlog.shareddatastores
|
|
|
|
mfl.localdatastores = mfl.treemanifestlog.localdatastores
|
|
|
|
mfl.sharedhistorystores = mfl.treemanifestlog.sharedhistorystores
|
|
|
|
mfl.localhistorystores = mfl.treemanifestlog.localhistorystores
|
|
|
|
|
2017-03-10 01:45:23 +03:00
|
|
|
return mfl
|
|
|
|
|
2017-03-10 01:45:23 +03:00
|
|
|
def _writemanifest(orig, self, transaction, link, p1, p2, added, removed):
|
|
|
|
n = orig(self, transaction, link, p1, p2, added, removed)
|
|
|
|
|
2017-08-31 21:14:39 +03:00
|
|
|
mfl = self._manifestlog
|
|
|
|
if (not util.safehasattr(mfl._revlog.opener, 'treemanifestserver') or
|
|
|
|
not mfl._revlog.opener.treemanifestserver):
|
2017-03-10 01:45:23 +03:00
|
|
|
return n
|
|
|
|
|
|
|
|
# Since we're adding the root flat manifest, let's add the corresponding
|
|
|
|
# root tree manifest.
|
|
|
|
treemfl = mfl.treemanifestlog
|
|
|
|
|
|
|
|
m = self._manifestdict
|
|
|
|
|
|
|
|
parentflat = mfl[p1].read()
|
|
|
|
diff = parentflat.diff(m)
|
|
|
|
|
|
|
|
newtree = treemfl[p1].read().copy()
|
|
|
|
added = []
|
|
|
|
removed = []
|
|
|
|
for filename, (old, new) in diff.iteritems():
|
|
|
|
if new is not None and new[0] is not None:
|
|
|
|
added.append(filename)
|
|
|
|
newtree[filename] = new[0]
|
|
|
|
newtree.setflag(filename, new[1])
|
|
|
|
else:
|
|
|
|
removed.append(filename)
|
|
|
|
del newtree[filename]
|
|
|
|
|
|
|
|
try:
|
|
|
|
treemfrevlog = treemfl._revlog
|
|
|
|
oldaddrevision = treemfrevlog.addrevision
|
|
|
|
def addusingnode(*args, **kwargs):
|
|
|
|
newkwargs = kwargs.copy()
|
|
|
|
newkwargs['node'] = n
|
|
|
|
return oldaddrevision(*args, **newkwargs)
|
|
|
|
treemfrevlog.addrevision = addusingnode
|
|
|
|
|
|
|
|
def readtree(dir, node):
|
|
|
|
return treemfl.get(dir, node).read()
|
|
|
|
treemfrevlog.add(newtree, transaction, link, p1, p2, added, removed,
|
|
|
|
readtree=readtree)
|
|
|
|
finally:
|
|
|
|
del treemfrevlog.__dict__['addrevision']
|
|
|
|
|
|
|
|
return n
|
|
|
|
|
2017-03-19 05:38:45 +03:00
|
|
|
@command('debuggentrees', [
|
|
|
|
('s', 'skip-allowed-roots', None,
|
|
|
|
_('skips the check for only generating on allowed roots')),
|
|
|
|
('', 'verify', None,
|
|
|
|
_('verify consistency of tree data')),
|
|
|
|
], _('hg debuggentrees FIRSTREV LASTREV'))
|
|
|
|
def debuggentrees(ui, repo, rev1, rev2, *args, **opts):
|
|
|
|
rev1 = repo.revs(rev1).first()
|
|
|
|
rev2 = repo.revs(rev2).last()
|
|
|
|
|
|
|
|
mfrevlog = repo.manifestlog._revlog
|
|
|
|
mfrev1 = mfrevlog.rev(repo[rev1].manifestnode())
|
|
|
|
mfrev2 = mfrevlog.rev(repo[rev2].manifestnode()) + 1
|
|
|
|
|
|
|
|
packpath = shallowutil.getcachepackpath(repo, PACK_CATEGORY)
|
|
|
|
if opts.get('skip_allowed_roots', False):
|
|
|
|
ui.setconfig('treemanifest', 'allowedtreeroots', None)
|
|
|
|
with mutabledatapack(repo.ui, packpath) as dpack:
|
|
|
|
with mutablehistorypack(repo.ui, packpath) as hpack:
|
|
|
|
recordmanifest(dpack, hpack, repo, mfrev1, mfrev2,
|
|
|
|
verify=opts.get('verify', False))
|
|
|
|
|
2017-03-10 01:45:23 +03:00
|
|
|
@command('backfilltree', [
|
|
|
|
('l', 'limit', '10000000', _(''))
|
|
|
|
], _('hg backfilltree [OPTIONS]'))
|
|
|
|
def backfilltree(ui, repo, *args, **opts):
|
2017-07-17 22:02:08 +03:00
|
|
|
with repo.wlock(), repo.lock(), repo.transaction('backfilltree') as tr:
|
|
|
|
_backfill(tr, repo, int(opts.get('limit')))
|
2017-03-10 01:45:23 +03:00
|
|
|
|
|
|
|
def _backfill(tr, repo, limit):
|
|
|
|
ui = repo.ui
|
|
|
|
cl = repo.changelog
|
|
|
|
mfl = repo.manifestlog
|
|
|
|
tmfl = mfl.treemanifestlog
|
|
|
|
treerevlog = tmfl._revlog
|
|
|
|
|
|
|
|
maxrev = len(treerevlog) - 1
|
|
|
|
start = treerevlog.linkrev(maxrev) + 1
|
|
|
|
end = min(len(cl), start + limit)
|
|
|
|
|
|
|
|
converting = _("converting")
|
|
|
|
|
|
|
|
ui.progress(converting, 0, total=end - start)
|
|
|
|
for i in xrange(start, end):
|
|
|
|
ctx = repo[i]
|
|
|
|
newflat = ctx.manifest()
|
|
|
|
p1 = ctx.p1()
|
|
|
|
p2 = ctx.p2()
|
|
|
|
p1node = p1.manifestnode()
|
|
|
|
p2node = p2.manifestnode()
|
|
|
|
if p1node != nullid:
|
|
|
|
if (p1node not in treerevlog.nodemap or
|
|
|
|
(p2node != nullid and p2node not in treerevlog.nodemap)):
|
|
|
|
ui.warn(_("unable to find parent nodes %s %s\n") % (hex(p1node),
|
|
|
|
hex(p2node)))
|
|
|
|
return
|
|
|
|
parentflat = mfl[p1node].read()
|
|
|
|
parenttree = tmfl[p1node].read()
|
|
|
|
else:
|
|
|
|
parentflat = manifest.manifestdict()
|
|
|
|
parenttree = manifest.treemanifest()
|
|
|
|
|
|
|
|
diff = parentflat.diff(newflat)
|
|
|
|
|
|
|
|
newtree = parenttree.copy()
|
|
|
|
added = []
|
|
|
|
removed = []
|
|
|
|
for filename, (old, new) in diff.iteritems():
|
|
|
|
if new is not None and new[0] is not None:
|
|
|
|
added.append(filename)
|
|
|
|
newtree[filename] = new[0]
|
|
|
|
newtree.setflag(filename, new[1])
|
|
|
|
else:
|
|
|
|
removed.append(filename)
|
|
|
|
del newtree[filename]
|
|
|
|
|
|
|
|
try:
|
|
|
|
oldaddrevision = treerevlog.addrevision
|
|
|
|
def addusingnode(*args, **kwargs):
|
|
|
|
newkwargs = kwargs.copy()
|
|
|
|
newkwargs['node'] = ctx.manifestnode()
|
|
|
|
return oldaddrevision(*args, **newkwargs)
|
|
|
|
treerevlog.addrevision = addusingnode
|
|
|
|
def readtree(dir, node):
|
|
|
|
return tmfl.get(dir, node).read()
|
|
|
|
treerevlog.add(newtree, tr, ctx.rev(), p1node, p2node, added,
|
|
|
|
removed, readtree=readtree)
|
|
|
|
finally:
|
|
|
|
del treerevlog.__dict__['addrevision']
|
|
|
|
|
|
|
|
ui.progress(converting, i - start, total=end - start)
|
|
|
|
|
|
|
|
ui.progress(converting, None)
|
|
|
|
|
2017-08-29 23:02:22 +03:00
|
|
|
def _unpackmanifestscg3(orig, self, repo, *args, **kwargs):
|
2017-08-31 21:14:39 +03:00
|
|
|
if not treeenabled(repo.ui):
|
|
|
|
return orig(self, repo, *args, **kwargs)
|
|
|
|
|
2017-08-29 23:02:22 +03:00
|
|
|
if repo.ui.configbool('treemanifest', 'treeonly'):
|
|
|
|
self.manifestheader()
|
2017-09-20 02:48:19 +03:00
|
|
|
for delta in self.deltaiter():
|
|
|
|
pass
|
|
|
|
# Handle sub-tree manifests
|
|
|
|
for chunkdata in iter(self.filelogheader, {}):
|
|
|
|
for delta in self.deltaiter():
|
|
|
|
pass
|
2017-08-29 23:02:22 +03:00
|
|
|
return
|
|
|
|
return orig(self, repo, *args, **kwargs)
|
|
|
|
|
|
|
|
def _unpackmanifestscg1(orig, self, repo, *args, **kwargs):
|
2017-08-31 21:14:39 +03:00
|
|
|
if not treeenabled(repo.ui):
|
|
|
|
return orig(self, repo, *args, **kwargs)
|
|
|
|
|
2017-08-29 23:02:22 +03:00
|
|
|
if repo.ui.configbool('treemanifest', 'treeonly'):
|
2017-08-29 23:02:22 +03:00
|
|
|
self.manifestheader()
|
2017-11-02 03:10:05 +03:00
|
|
|
for chunkdata in self.deltaiter():
|
|
|
|
pass
|
2017-08-29 23:02:22 +03:00
|
|
|
return
|
|
|
|
|
2016-11-16 23:11:15 +03:00
|
|
|
mfrevlog = repo.manifestlog._revlog
|
|
|
|
oldtip = len(mfrevlog)
|
2016-09-20 02:30:17 +03:00
|
|
|
|
|
|
|
orig(self, repo, *args, **kwargs)
|
|
|
|
|
2017-08-08 05:27:17 +03:00
|
|
|
if (util.safehasattr(repo.manifestlog, "datastore") and
|
2016-09-20 02:30:17 +03:00
|
|
|
repo.ui.configbool('treemanifest', 'autocreatetrees')):
|
2016-10-21 21:02:20 +03:00
|
|
|
|
|
|
|
# TODO: only put in cache if pulling from main server
|
|
|
|
packpath = shallowutil.getcachepackpath(repo, PACK_CATEGORY)
|
2017-01-13 20:42:25 +03:00
|
|
|
with mutabledatapack(repo.ui, packpath) as dpack:
|
2017-03-07 22:15:25 +03:00
|
|
|
with mutablehistorypack(repo.ui, packpath) as hpack:
|
|
|
|
recordmanifest(dpack, hpack, repo, oldtip, len(mfrevlog))
|
2016-09-20 02:30:17 +03:00
|
|
|
|
|
|
|
# Alert the store that there may be new packs
|
2017-08-08 05:27:17 +03:00
|
|
|
repo.manifestlog.datastore.markforrefresh()
|
2016-09-20 02:30:17 +03:00
|
|
|
|
2017-03-07 22:15:25 +03:00
|
|
|
class InterceptedMutableDataPack(object):
|
|
|
|
"""This classes intercepts data pack writes and replaces the node for the
|
|
|
|
root with the provided node. This is useful for forcing a tree manifest to
|
|
|
|
be referencable via its flat hash.
|
2016-10-21 21:02:26 +03:00
|
|
|
"""
|
2016-11-30 02:37:58 +03:00
|
|
|
def __init__(self, pack, node, p1node):
|
2016-09-20 02:30:17 +03:00
|
|
|
self._pack = pack
|
|
|
|
self._node = node
|
2016-11-30 02:37:58 +03:00
|
|
|
self._p1node = p1node
|
2016-09-20 02:30:17 +03:00
|
|
|
|
|
|
|
def add(self, name, node, deltabasenode, delta):
|
|
|
|
# For the root node, provide the flat manifest as the key
|
|
|
|
if name == "":
|
|
|
|
node = self._node
|
2016-11-30 02:37:58 +03:00
|
|
|
if deltabasenode != nullid:
|
|
|
|
deltabasenode = self._p1node
|
2016-09-20 02:30:17 +03:00
|
|
|
return self._pack.add(name, node, deltabasenode, delta)
|
|
|
|
|
2017-03-07 22:15:25 +03:00
|
|
|
class InterceptedMutableHistoryPack(object):
|
2017-04-27 20:44:34 +03:00
|
|
|
"""This classes intercepts history pack writes and replaces the node for the
|
|
|
|
root with the provided node. This is useful for forcing a tree manifest to
|
|
|
|
be referencable via its flat hash.
|
2017-03-07 22:15:25 +03:00
|
|
|
"""
|
2017-04-27 20:44:34 +03:00
|
|
|
def __init__(self, pack, node, p1node):
|
|
|
|
self._pack = pack
|
2017-03-07 22:15:25 +03:00
|
|
|
self._node = node
|
|
|
|
self._p1node = p1node
|
|
|
|
self.entries = []
|
|
|
|
|
|
|
|
def add(self, filename, node, p1, p2, linknode, copyfrom):
|
|
|
|
# For the root node, provide the flat manifest as the key
|
|
|
|
if filename == "":
|
|
|
|
node = self._node
|
|
|
|
if p1 != nullid:
|
|
|
|
p1 = self._p1node
|
2017-04-27 20:44:34 +03:00
|
|
|
self._pack.add(filename, node, p1, p2, linknode, copyfrom)
|
2017-03-07 22:15:25 +03:00
|
|
|
|
2017-03-19 05:38:45 +03:00
|
|
|
def recordmanifest(datapack, historypack, repo, oldtip, newtip, verify=False):
|
2017-03-07 22:15:25 +03:00
|
|
|
cl = repo.changelog
|
2016-11-16 23:11:15 +03:00
|
|
|
mfl = repo.manifestlog
|
|
|
|
mfrevlog = mfl._revlog
|
2016-09-20 02:30:17 +03:00
|
|
|
total = newtip - oldtip
|
|
|
|
ui = repo.ui
|
|
|
|
builttrees = {}
|
|
|
|
message = _('priming tree cache')
|
|
|
|
ui.progress(message, 0, total=total)
|
|
|
|
|
2016-09-21 23:51:39 +03:00
|
|
|
refcount = {}
|
|
|
|
for rev in xrange(oldtip, newtip):
|
2016-11-16 23:11:15 +03:00
|
|
|
p1 = mfrevlog.parentrevs(rev)[0]
|
|
|
|
p1node = mfrevlog.node(p1)
|
2016-09-21 23:51:39 +03:00
|
|
|
refcount[p1node] = refcount.get(p1node, 0) + 1
|
|
|
|
|
2016-11-17 00:51:48 +03:00
|
|
|
allowedtreeroots = set()
|
|
|
|
for name in repo.ui.configlist('treemanifest', 'allowedtreeroots'):
|
|
|
|
if name in repo:
|
|
|
|
allowedtreeroots.add(repo[name].manifestnode())
|
|
|
|
|
2017-03-07 22:15:25 +03:00
|
|
|
includedentries = set()
|
2016-09-20 02:30:17 +03:00
|
|
|
for rev in xrange(oldtip, newtip):
|
|
|
|
ui.progress(message, rev - oldtip, total=total)
|
2017-01-01 05:22:38 +03:00
|
|
|
p1, p2 = mfrevlog.parentrevs(rev)
|
2016-11-16 23:11:15 +03:00
|
|
|
p1node = mfrevlog.node(p1)
|
2017-01-01 05:22:38 +03:00
|
|
|
p2node = mfrevlog.node(p2)
|
2017-03-07 22:15:25 +03:00
|
|
|
linkrev = mfrevlog.linkrev(rev)
|
|
|
|
linknode = cl.node(linkrev)
|
2016-09-20 02:30:17 +03:00
|
|
|
|
2016-12-03 01:37:55 +03:00
|
|
|
if p1node == nullid:
|
2017-08-08 05:27:17 +03:00
|
|
|
origtree = cstore.treemanifest(mfl.datastore)
|
2016-12-03 01:37:55 +03:00
|
|
|
elif p1node in builttrees:
|
2016-09-20 02:30:17 +03:00
|
|
|
origtree = builttrees[p1node]
|
|
|
|
else:
|
2016-11-16 23:11:15 +03:00
|
|
|
origtree = mfl[p1node].read()._treemanifest()
|
2016-09-20 02:30:17 +03:00
|
|
|
|
2016-11-30 02:37:58 +03:00
|
|
|
if origtree is None:
|
2016-11-17 00:51:48 +03:00
|
|
|
if allowedtreeroots and p1node not in allowedtreeroots:
|
|
|
|
continue
|
|
|
|
|
2016-11-16 23:11:15 +03:00
|
|
|
p1mf = mfl[p1node].read()
|
2017-03-07 22:15:25 +03:00
|
|
|
p1linknode = cl.node(mfrevlog.linkrev(p1))
|
2017-08-08 05:27:17 +03:00
|
|
|
origtree = cstore.treemanifest(mfl.datastore)
|
2016-09-20 02:30:17 +03:00
|
|
|
for filename, node, flag in p1mf.iterentries():
|
|
|
|
origtree.set(filename, node, flag)
|
2017-03-07 22:15:25 +03:00
|
|
|
|
|
|
|
tempdatapack = InterceptedMutableDataPack(datapack, p1node, nullid)
|
2017-04-27 20:44:34 +03:00
|
|
|
temphistorypack = InterceptedMutableHistoryPack(historypack, p1node,
|
|
|
|
nullid)
|
2017-03-07 22:15:25 +03:00
|
|
|
for nname, nnode, ntext, np1text, np1, np2 in origtree.finalize():
|
|
|
|
# No need to compute a delta, since we know the parent isn't
|
|
|
|
# already a tree.
|
|
|
|
tempdatapack.add(nname, nnode, nullid, ntext)
|
|
|
|
temphistorypack.add(nname, nnode, np1, np2, p1linknode, '')
|
2017-03-07 22:15:25 +03:00
|
|
|
includedentries.add((nname, nnode))
|
2017-03-07 22:15:25 +03:00
|
|
|
|
2016-09-20 02:30:17 +03:00
|
|
|
builttrees[p1node] = origtree
|
|
|
|
|
2016-09-21 23:51:39 +03:00
|
|
|
# Remove the tree from the cache once we've processed its final use.
|
|
|
|
# Otherwise memory explodes
|
|
|
|
p1refcount = refcount[p1node] - 1
|
|
|
|
if p1refcount == 0:
|
|
|
|
builttrees.pop(p1node, None)
|
|
|
|
refcount[p1node] = p1refcount
|
|
|
|
|
2017-01-01 05:22:38 +03:00
|
|
|
if p2node != nullid:
|
|
|
|
node = mfrevlog.node(rev)
|
|
|
|
diff = mfl[p1node].read().diff(mfl[node].read())
|
|
|
|
deletes = []
|
|
|
|
adds = []
|
|
|
|
for filename, ((anode, aflag), (bnode, bflag)) in diff.iteritems():
|
|
|
|
if bnode is None:
|
|
|
|
deletes.append(filename)
|
|
|
|
else:
|
|
|
|
adds.append((filename, bnode, bflag))
|
|
|
|
else:
|
|
|
|
# This will generally be very quick, since p1 == deltabase
|
|
|
|
delta = mfrevlog.revdiff(p1, rev)
|
|
|
|
|
|
|
|
deletes = []
|
|
|
|
adds = []
|
|
|
|
|
|
|
|
# Inspect the delta and read the added files from it
|
|
|
|
current = 0
|
|
|
|
end = len(delta)
|
|
|
|
while current < end:
|
|
|
|
try:
|
|
|
|
block = ''
|
|
|
|
# Deltas are of the form:
|
|
|
|
# <start><end><datalen><data>
|
2017-01-03 16:09:06 +03:00
|
|
|
# Where start and end say what bytes to delete, and data
|
|
|
|
# says what bytes to insert in their place. So we can just
|
|
|
|
# read <data> to figure out all the added files.
|
2017-01-01 05:22:38 +03:00
|
|
|
byte1, byte2, blocklen = struct.unpack(">lll",
|
|
|
|
delta[current:current + 12])
|
|
|
|
current += 12
|
|
|
|
if blocklen:
|
|
|
|
block = delta[current:current + blocklen]
|
|
|
|
current += blocklen
|
|
|
|
except struct.error:
|
|
|
|
raise RuntimeError("patch cannot be decoded")
|
|
|
|
|
2017-01-03 16:09:06 +03:00
|
|
|
# An individual delta block may contain multiple newline
|
|
|
|
# delimited entries.
|
2017-01-01 05:22:38 +03:00
|
|
|
for line in block.split('\n'):
|
|
|
|
if not line:
|
|
|
|
continue
|
|
|
|
fname, rest = line.split('\0')
|
|
|
|
fnode = rest[:40]
|
|
|
|
fflag = rest[40:]
|
|
|
|
adds.append((fname, bin(fnode), fflag))
|
|
|
|
|
2017-03-07 22:15:25 +03:00
|
|
|
allfiles = set(repo.changelog.readfiles(linkrev))
|
2017-01-01 05:22:38 +03:00
|
|
|
deletes = allfiles.difference(fname for fname, fnode, fflag in adds)
|
2016-09-20 02:30:17 +03:00
|
|
|
|
|
|
|
# Apply the changes on top of the parent tree
|
|
|
|
newtree = origtree.copy()
|
|
|
|
for fname in deletes:
|
|
|
|
newtree.set(fname, None, None)
|
|
|
|
|
|
|
|
for fname, fnode, fflags in adds:
|
|
|
|
newtree.set(fname, fnode, fflags)
|
|
|
|
|
2017-03-07 22:15:25 +03:00
|
|
|
tempdatapack = InterceptedMutableDataPack(datapack, mfrevlog.node(rev),
|
|
|
|
p1node)
|
2017-04-27 20:44:34 +03:00
|
|
|
temphistorypack = InterceptedMutableHistoryPack(historypack,
|
|
|
|
mfrevlog.node(rev),
|
2017-03-07 22:15:25 +03:00
|
|
|
p1node)
|
2017-08-08 05:27:17 +03:00
|
|
|
mfdatastore = mfl.datastore
|
2017-03-07 22:15:25 +03:00
|
|
|
newtreeiter = newtree.finalize(origtree if p1node != nullid else None)
|
|
|
|
for nname, nnode, ntext, np1text, np1, np2 in newtreeiter:
|
2017-03-19 05:38:45 +03:00
|
|
|
if verify:
|
|
|
|
# Verify all children of the tree already exist in the store
|
|
|
|
# somewhere.
|
|
|
|
lines = ntext.split('\n')
|
|
|
|
for line in lines:
|
|
|
|
if not line:
|
|
|
|
continue
|
|
|
|
childname, nodeflag = line.split('\0')
|
|
|
|
childpath = os.path.join(nname, childname)
|
|
|
|
cnode = nodeflag[:40]
|
|
|
|
cflag = nodeflag[40:]
|
|
|
|
if (cflag == 't' and
|
|
|
|
(childpath + '/', bin(cnode)) not in includedentries and
|
|
|
|
mfdatastore.getmissing([(childpath, bin(cnode))])):
|
|
|
|
import pdb
|
|
|
|
pdb.set_trace()
|
|
|
|
|
2017-03-07 22:15:25 +03:00
|
|
|
# Only use deltas if the delta base is in this same pack file
|
|
|
|
if np1 != nullid and (nname, np1) in includedentries:
|
2017-03-07 22:15:25 +03:00
|
|
|
delta = mdiff.textdiff(np1text, ntext)
|
2017-03-07 22:15:25 +03:00
|
|
|
deltabase = np1
|
2017-03-07 22:15:25 +03:00
|
|
|
else:
|
|
|
|
delta = ntext
|
2017-03-07 22:15:25 +03:00
|
|
|
deltabase = nullid
|
|
|
|
tempdatapack.add(nname, nnode, deltabase, delta)
|
2017-03-07 22:15:25 +03:00
|
|
|
temphistorypack.add(nname, nnode, np1, np2, linknode, '')
|
2017-03-07 22:15:25 +03:00
|
|
|
includedentries.add((nname, nnode))
|
2017-03-07 22:15:25 +03:00
|
|
|
|
2017-01-01 05:22:38 +03:00
|
|
|
if ui.configbool('treemanifest', 'verifyautocreate', False):
|
2016-11-17 00:51:48 +03:00
|
|
|
diff = newtree.diff(origtree)
|
2016-09-21 23:57:06 +03:00
|
|
|
for fname in deletes:
|
2017-01-01 05:22:38 +03:00
|
|
|
fdiff = diff.get(fname)
|
|
|
|
if fdiff is None:
|
2016-09-21 23:57:06 +03:00
|
|
|
import pdb
|
|
|
|
pdb.set_trace()
|
2017-01-01 05:22:38 +03:00
|
|
|
else:
|
|
|
|
l, r = fdiff
|
|
|
|
if l != (None, ''):
|
|
|
|
import pdb
|
|
|
|
pdb.set_trace()
|
2016-09-21 23:57:06 +03:00
|
|
|
|
|
|
|
for fname, fnode, fflags in adds:
|
2017-01-01 05:22:38 +03:00
|
|
|
fdiff = diff.get(fname)
|
|
|
|
if fdiff is None:
|
|
|
|
# Sometimes adds are no-ops, so they don't show up in the
|
|
|
|
# diff.
|
|
|
|
if origtree.get(fname) != newtree.get(fname):
|
|
|
|
import pdb
|
|
|
|
pdb.set_trace()
|
|
|
|
else:
|
|
|
|
l, r = fdiff
|
|
|
|
if l != (fnode, fflags):
|
|
|
|
import pdb
|
|
|
|
pdb.set_trace()
|
2016-11-16 23:11:15 +03:00
|
|
|
builttrees[mfrevlog.node(rev)] = newtree
|
2016-09-20 02:30:17 +03:00
|
|
|
|
2016-11-16 23:11:15 +03:00
|
|
|
mfnode = mfrevlog.node(rev)
|
2016-09-21 23:51:39 +03:00
|
|
|
if refcount.get(mfnode) > 0:
|
|
|
|
builttrees[mfnode] = newtree
|
|
|
|
|
2016-09-20 02:30:17 +03:00
|
|
|
ui.progress(message, None)
|
2017-03-07 22:15:25 +03:00
|
|
|
|
2017-03-10 01:45:23 +03:00
|
|
|
def _checkhash(orig, self, *args, **kwargs):
|
|
|
|
# Don't validate root hashes during the transition to treemanifest
|
|
|
|
if self.indexfile.endswith('00manifesttree.i'):
|
|
|
|
return
|
|
|
|
return orig(self, *args, **kwargs)
|
|
|
|
|
|
|
|
def wrappropertycache(cls, propname, wrapper):
|
|
|
|
"""Wraps a filecache property. These can't be wrapped using the normal
|
|
|
|
wrapfunction. This should eventually go into upstream Mercurial.
|
|
|
|
"""
|
|
|
|
assert callable(wrapper)
|
|
|
|
for currcls in cls.__mro__:
|
|
|
|
if propname in currcls.__dict__:
|
|
|
|
origfn = currcls.__dict__[propname].func
|
|
|
|
assert callable(origfn)
|
|
|
|
def wrap(*args, **kwargs):
|
|
|
|
return wrapper(origfn, *args, **kwargs)
|
|
|
|
currcls.__dict__[propname].func = wrap
|
|
|
|
break
|
|
|
|
|
|
|
|
if currcls is object:
|
|
|
|
raise AttributeError(_("%s has no property '%s'") %
|
|
|
|
(type(currcls), propname))
|
2017-04-20 07:14:03 +03:00
|
|
|
|
|
|
|
@command('prefetchtrees', [
|
|
|
|
('r', 'rev', '', _("revs to prefetch the trees for")),
|
2017-11-17 02:28:07 +03:00
|
|
|
('', 'repack', False, _('run repack after prefetch')),
|
2017-11-17 02:28:07 +03:00
|
|
|
('b', 'base', '', _("rev that is assumed to already be local")),
|
2017-04-20 07:14:03 +03:00
|
|
|
] + commands.walkopts, _('--rev REVS PATTERN..'))
|
|
|
|
def prefetchtrees(ui, repo, *args, **opts):
|
|
|
|
revs = repo.revs(opts.get('rev'))
|
|
|
|
mfnodes = set()
|
|
|
|
for rev in revs:
|
|
|
|
mfnodes.add(repo[rev].manifestnode())
|
|
|
|
|
2017-11-17 02:28:07 +03:00
|
|
|
basemfnode = set()
|
|
|
|
base = opts.get('base')
|
|
|
|
if base:
|
|
|
|
basemfnode.add(repo[base].manifestnode())
|
2017-04-20 07:14:04 +03:00
|
|
|
|
2017-11-17 02:28:07 +03:00
|
|
|
_prefetchtrees(repo, '', mfnodes, basemfnode, [])
|
2017-04-20 07:14:03 +03:00
|
|
|
|
2017-11-17 02:28:07 +03:00
|
|
|
# Run repack in background
|
|
|
|
if opts.get('repack'):
|
|
|
|
backgroundrepack(repo, incremental=True)
|
|
|
|
|
2017-04-20 07:14:03 +03:00
|
|
|
def _prefetchtrees(repo, rootdir, mfnodes, basemfnodes, directories):
|
|
|
|
# If possible, use remotefilelog's more expressive fallbackpath
|
|
|
|
if util.safehasattr(repo, 'fallbackpath'):
|
|
|
|
fallbackpath = repo.fallbackpath
|
|
|
|
else:
|
|
|
|
fallbackpath = repo.ui.config('paths', 'default')
|
|
|
|
|
2017-06-20 21:08:15 +03:00
|
|
|
start = time.time()
|
2017-04-20 07:14:03 +03:00
|
|
|
remote = hg.peer(repo.ui, {}, fallbackpath)
|
2017-08-23 07:58:37 +03:00
|
|
|
if 'gettreepack' not in shallowutil.peercapabilities(remote):
|
2017-04-20 07:14:03 +03:00
|
|
|
raise error.Abort(_("missing gettreepack capability on remote"))
|
2017-11-02 03:10:05 +03:00
|
|
|
remote.ui.pushbuffer()
|
2017-04-27 20:44:33 +03:00
|
|
|
bundle = remote.gettreepack(rootdir, mfnodes, basemfnodes, directories)
|
|
|
|
|
|
|
|
try:
|
|
|
|
op = bundle2.processbundle(repo, bundle, None)
|
|
|
|
|
|
|
|
receivednodes = op.records[RECEIVEDNODE_RECORD]
|
2017-06-20 21:08:15 +03:00
|
|
|
count = 0
|
2017-04-27 20:44:33 +03:00
|
|
|
missingnodes = set(mfnodes)
|
|
|
|
for reply in receivednodes:
|
|
|
|
missingnodes.difference_update(n for d, n
|
|
|
|
in reply
|
|
|
|
if d == rootdir)
|
2017-06-20 21:08:15 +03:00
|
|
|
count += len(reply)
|
|
|
|
if op.repo.ui.configbool("remotefilelog", "debug"):
|
|
|
|
op.repo.ui.warn(_("%s trees fetched over %0.2fs\n") %
|
|
|
|
(count, time.time() - start))
|
|
|
|
|
2017-04-27 20:44:33 +03:00
|
|
|
if missingnodes:
|
|
|
|
raise error.Abort(_("unable to download %d trees (%s,...)") %
|
2017-04-27 20:44:34 +03:00
|
|
|
(len(missingnodes), list(missingnodes)[0]))
|
2017-04-27 20:44:33 +03:00
|
|
|
except bundle2.AbortFromPart as exc:
|
2017-11-02 03:10:05 +03:00
|
|
|
repo.ui.debug('remote: abort: %s\n' % exc)
|
|
|
|
hexnodes = list(hex(mfnode) for mfnode in mfnodes)
|
|
|
|
nodestr = '\n'.join(hexnodes[:10])
|
|
|
|
if len(hexnodes) > 10:
|
|
|
|
nodestr += '\n...'
|
|
|
|
raise error.Abort(_('unable to download the following trees from the '
|
|
|
|
'server:\n%s') % nodestr, hint=exc.hint)
|
2017-04-27 20:44:33 +03:00
|
|
|
except error.BundleValueError as exc:
|
|
|
|
raise error.Abort(_('missing support for %s') % exc)
|
2017-09-22 07:23:40 +03:00
|
|
|
finally:
|
|
|
|
# Manually destruct the peer, so we can collect any error output
|
|
|
|
remote._cleanup()
|
|
|
|
|
|
|
|
output = remote.ui.popbuffer()
|
|
|
|
if output:
|
2017-11-02 03:10:05 +03:00
|
|
|
repo.ui.debug(output)
|
2017-04-27 20:44:33 +03:00
|
|
|
|
2017-06-19 18:02:17 +03:00
|
|
|
def _registerbundle2parts():
|
2017-07-11 01:53:12 +03:00
|
|
|
@bundle2.parthandler(TREEGROUP_PARTTYPE2, ('version', 'cache', 'category'))
|
|
|
|
def treeparthandler2(op, part):
|
|
|
|
"""Handles received tree packs. If `cache` is True, the received
|
2017-06-19 18:02:17 +03:00
|
|
|
data goes in to the shared pack cache. Otherwise, the received data
|
|
|
|
goes into the permanent repo local data.
|
|
|
|
"""
|
|
|
|
repo = op.repo
|
|
|
|
|
|
|
|
version = part.params.get('version')
|
|
|
|
if version != '1':
|
|
|
|
raise error.Abort(
|
|
|
|
_("unknown treegroup bundle2 part version: %s") % version)
|
|
|
|
|
2017-07-11 01:53:12 +03:00
|
|
|
category = part.params.get('category', '')
|
|
|
|
if category != PACK_CATEGORY:
|
|
|
|
raise error.Abort(_("invalid treegroup pack category: %s") %
|
|
|
|
category)
|
|
|
|
|
2017-11-02 03:10:05 +03:00
|
|
|
# Treemanifest servers don't accept tree directly. They must go through
|
|
|
|
# pushrebase, which uses it's own part type and handler.
|
2017-11-02 03:10:05 +03:00
|
|
|
if repo.svfs.treemanifestserver:
|
2017-11-02 03:10:05 +03:00
|
|
|
return
|
|
|
|
|
2017-07-11 01:53:12 +03:00
|
|
|
if part.params.get('cache', 'False') == 'True':
|
2017-06-19 18:02:17 +03:00
|
|
|
packpath = shallowutil.getcachepackpath(repo, PACK_CATEGORY)
|
|
|
|
else:
|
|
|
|
packpath = shallowutil.getlocalpackpath(repo.svfs.vfs.base,
|
|
|
|
PACK_CATEGORY)
|
|
|
|
receivedhistory, receiveddata = wirepack.receivepack(repo.ui, part,
|
|
|
|
packpath)
|
|
|
|
|
|
|
|
op.records.add(RECEIVEDNODE_RECORD, receiveddata)
|
|
|
|
|
2017-07-11 01:53:12 +03:00
|
|
|
@bundle2.parthandler(TREEGROUP_PARTTYPE, ('version', 'treecache'))
|
|
|
|
def treeparthandler(op, part):
|
|
|
|
treecache = part.params.pop('treecache')
|
|
|
|
part.params['cache'] = treecache
|
|
|
|
part.params['category'] = PACK_CATEGORY
|
2017-07-12 18:53:09 +03:00
|
|
|
return treeparthandler2(op, part)
|
2017-07-11 01:53:12 +03:00
|
|
|
|
|
|
|
@exchange.b2partsgenerator(TREEGROUP_PARTTYPE)
|
2017-06-19 18:02:17 +03:00
|
|
|
def gettreepackpart(pushop, bundler):
|
2017-07-11 01:53:12 +03:00
|
|
|
# We no longer generate old tree groups
|
|
|
|
pass
|
|
|
|
|
|
|
|
@exchange.b2partsgenerator(TREEGROUP_PARTTYPE2)
|
|
|
|
def gettreepackpart2(pushop, bundler):
|
2017-06-19 18:02:17 +03:00
|
|
|
"""add parts containing trees being pushed"""
|
2017-11-03 19:24:39 +03:00
|
|
|
if ('treepack' in pushop.stepsdone or
|
|
|
|
not treeenabled(pushop.repo.ui)):
|
2017-06-19 18:02:17 +03:00
|
|
|
return
|
|
|
|
pushop.stepsdone.add('treepack')
|
|
|
|
|
2017-11-02 03:10:05 +03:00
|
|
|
# Only add trees if we have them
|
|
|
|
if _cansendtrees(pushop.repo, pushop.outgoing.missing):
|
|
|
|
part = createtreepackpart(pushop.repo, pushop.outgoing,
|
|
|
|
TREEGROUP_PARTTYPE2)
|
|
|
|
bundler.addpart(part)
|
2017-07-11 01:53:12 +03:00
|
|
|
|
2017-08-29 23:02:22 +03:00
|
|
|
@exchange.getbundle2partsgenerator(TREEGROUP_PARTTYPE2)
|
|
|
|
def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
|
|
|
|
b2caps=None, heads=None, common=None,
|
|
|
|
**kwargs):
|
|
|
|
"""add parts containing trees being pulled"""
|
|
|
|
if ('True' not in b2caps.get('treemanifest', []) or
|
2017-08-31 21:14:39 +03:00
|
|
|
not treeenabled(repo.ui) or
|
2017-08-29 23:02:22 +03:00
|
|
|
repo.svfs.treemanifestserver or
|
|
|
|
not kwargs.get('cg', True)):
|
|
|
|
return
|
|
|
|
|
|
|
|
outgoing = exchange._computeoutgoing(repo, heads, common)
|
2017-11-02 03:10:05 +03:00
|
|
|
if _cansendtrees(repo, outgoing.missing):
|
|
|
|
part = createtreepackpart(repo, outgoing, TREEGROUP_PARTTYPE2)
|
|
|
|
bundler.addpart(part)
|
|
|
|
|
|
|
|
def _cansendtrees(repo, nodes):
|
2017-11-03 19:24:39 +03:00
|
|
|
sendtrees = repo.ui.configbool('treemanifest', 'sendtrees')
|
|
|
|
if not sendtrees:
|
|
|
|
return False
|
|
|
|
|
2017-11-02 03:10:05 +03:00
|
|
|
mfnodes = []
|
|
|
|
for node in nodes:
|
|
|
|
mfnodes.append(('', repo[node].manifestnode()))
|
|
|
|
|
|
|
|
return not repo.manifestlog.datastore.getmissing(mfnodes)
|
2017-08-29 23:02:22 +03:00
|
|
|
|
2017-07-11 01:53:12 +03:00
|
|
|
def createtreepackpart(repo, outgoing, partname):
|
|
|
|
rootdir = ''
|
|
|
|
mfnodes = []
|
|
|
|
basemfnodes = []
|
|
|
|
directories = []
|
|
|
|
|
|
|
|
for node in outgoing.missing:
|
|
|
|
mfnode = repo[node].manifestnode()
|
|
|
|
mfnodes.append(mfnode)
|
|
|
|
basectxs = repo.set('parents(roots(%ln))', outgoing.missing)
|
|
|
|
for basectx in basectxs:
|
|
|
|
basemfnodes.append(basectx.manifestnode())
|
|
|
|
|
|
|
|
packstream = generatepackstream(repo, rootdir, mfnodes,
|
|
|
|
basemfnodes, directories)
|
|
|
|
part = bundle2.bundlepart(
|
|
|
|
partname,
|
|
|
|
data = packstream)
|
|
|
|
part.addparam('version', '1')
|
|
|
|
part.addparam('cache', 'False')
|
|
|
|
part.addparam('category', PACK_CATEGORY)
|
|
|
|
|
|
|
|
return part
|
2017-04-20 07:14:03 +03:00
|
|
|
|
2017-04-27 20:44:33 +03:00
|
|
|
def pull(orig, ui, repo, *pats, **opts):
|
|
|
|
result = orig(ui, repo, *pats, **opts)
|
2017-08-31 21:14:39 +03:00
|
|
|
if not treeenabled(repo.ui):
|
|
|
|
return result
|
|
|
|
|
2017-06-05 23:48:58 +03:00
|
|
|
repo = repo.unfiltered()
|
2017-04-27 20:44:33 +03:00
|
|
|
|
2017-06-05 23:48:58 +03:00
|
|
|
ctxs = []
|
2017-08-08 05:27:17 +03:00
|
|
|
mfstore = repo.manifestlog.datastore
|
2017-06-05 23:48:58 +03:00
|
|
|
|
2017-04-27 20:44:33 +03:00
|
|
|
# prefetch if it's configured
|
|
|
|
prefetchcount = ui.configint('treemanifest', 'pullprefetchcount', None)
|
|
|
|
if prefetchcount:
|
|
|
|
# Calculate what recent manifests are we missing
|
|
|
|
firstrev = max(0, repo['tip'].rev() - prefetchcount + 1)
|
2017-06-05 23:48:58 +03:00
|
|
|
ctxs.extend(repo.set('%s: & public()', firstrev))
|
|
|
|
|
|
|
|
# Prefetch specific commits
|
|
|
|
prefetchrevs = ui.config('treemanifest', 'pullprefetchrevs', None)
|
|
|
|
if prefetchrevs:
|
|
|
|
ctxs.extend(repo.set(prefetchrevs))
|
|
|
|
|
|
|
|
mfnodes = None
|
|
|
|
if ctxs:
|
|
|
|
missingnodes = mfstore.getmissing(('', c.manifestnode()) for c in ctxs)
|
2017-04-27 20:44:33 +03:00
|
|
|
mfnodes = list(n for k, n in missingnodes)
|
|
|
|
|
2017-06-05 23:48:58 +03:00
|
|
|
if mfnodes:
|
|
|
|
ui.status(_("prefetching trees\n"))
|
|
|
|
# Calculate which parents we already have
|
|
|
|
ctxnodes = list(ctx.node() for ctx in ctxs)
|
|
|
|
parentctxs = repo.set('parents(%ln) - %ln',
|
|
|
|
ctxnodes, ctxnodes)
|
|
|
|
basemfnodes = set(ctx.manifestnode() for ctx in parentctxs)
|
|
|
|
missingbases = list(mfstore.getmissing(('', n) for n in basemfnodes))
|
|
|
|
basemfnodes.difference_update(n for k, n in missingbases)
|
|
|
|
|
2017-06-05 23:48:58 +03:00
|
|
|
# If we have no base nodes, scan the change log looking for a
|
|
|
|
# semi-recent manifest node to treat as the base.
|
|
|
|
if not basemfnodes:
|
2017-06-14 03:31:47 +03:00
|
|
|
basemfnodes = _findrecenttree(repo, len(repo.changelog) - 1)
|
2017-06-05 23:48:58 +03:00
|
|
|
|
2017-06-05 23:48:58 +03:00
|
|
|
_prefetchtrees(repo, '', mfnodes, basemfnodes, [])
|
2017-04-27 20:44:33 +03:00
|
|
|
|
|
|
|
return result
|
|
|
|
|
2017-06-14 03:31:47 +03:00
|
|
|
def _findrecenttree(repo, startrev):
|
|
|
|
cl = repo.changelog
|
2017-08-08 05:27:17 +03:00
|
|
|
mfstore = repo.manifestlog.datastore
|
2017-06-14 03:31:47 +03:00
|
|
|
phasecache = repo._phasecache
|
2017-06-20 21:08:15 +03:00
|
|
|
maxrev = min(len(cl) - 1, startrev + BASENODESEARCHMAX)
|
2017-06-14 03:31:47 +03:00
|
|
|
minrev = max(0, startrev - BASENODESEARCHMAX)
|
2017-06-20 21:08:15 +03:00
|
|
|
|
|
|
|
# Look up and down from the given rev
|
|
|
|
phase = phasecache.phase
|
|
|
|
walksize = max(maxrev - startrev, startrev - minrev) + 1
|
|
|
|
for offset in xrange(0, walksize):
|
|
|
|
revs = []
|
|
|
|
uprev = startrev + offset
|
|
|
|
downrev = startrev - offset
|
|
|
|
if uprev <= maxrev:
|
|
|
|
revs.append(uprev)
|
|
|
|
if downrev >= minrev:
|
|
|
|
revs.append(downrev)
|
|
|
|
for rev in revs:
|
|
|
|
if phase(repo, rev) != phases.public:
|
|
|
|
continue
|
|
|
|
mfnode = cl.changelogrevision(rev).manifest
|
|
|
|
missing = mfstore.getmissing([('', mfnode)])
|
|
|
|
if not missing:
|
|
|
|
return [mfnode]
|
2017-06-14 03:31:47 +03:00
|
|
|
|
|
|
|
return []
|
|
|
|
|
2017-04-27 20:44:33 +03:00
|
|
|
def clientgettreepack(remote, rootdir, mfnodes, basemfnodes, directories):
|
|
|
|
opts = {}
|
|
|
|
opts['rootdir'] = rootdir
|
|
|
|
opts['mfnodes'] = wireproto.encodelist(mfnodes)
|
|
|
|
opts['basemfnodes'] = wireproto.encodelist(basemfnodes)
|
|
|
|
opts['directories'] = ','.join(wireproto.escapearg(d) for d in directories)
|
2017-04-20 07:14:03 +03:00
|
|
|
|
2017-04-27 20:44:33 +03:00
|
|
|
f = remote._callcompressable("gettreepack", **opts)
|
|
|
|
return bundle2.getunbundler(remote.ui, f)
|
2017-04-20 07:14:03 +03:00
|
|
|
|
2017-04-20 07:14:04 +03:00
|
|
|
class treememoizer(object):
|
|
|
|
"""A class that keeps references to trees until they've been consumed the
|
|
|
|
expected number of times.
|
|
|
|
"""
|
|
|
|
def __init__(self, store):
|
|
|
|
self._store = store
|
|
|
|
self._counts = {}
|
|
|
|
self._cache = {}
|
|
|
|
|
|
|
|
def adduse(self, node):
|
|
|
|
self._counts[node] = self._counts.get(node, 0) + 1
|
|
|
|
|
|
|
|
def get(self, node):
|
|
|
|
tree = self._cache.get(node)
|
|
|
|
if tree is None:
|
|
|
|
tree = cstore.treemanifest(self._store, node)
|
|
|
|
self._cache[node] = tree
|
|
|
|
|
|
|
|
count = self._counts.get(node, 1)
|
|
|
|
count -= 1
|
|
|
|
self._counts[node] = max(count, 0)
|
|
|
|
if count <= 0:
|
|
|
|
del self._cache[node]
|
|
|
|
|
|
|
|
return tree
|
|
|
|
|
2017-04-20 07:14:03 +03:00
|
|
|
def servergettreepack(repo, proto, args):
|
|
|
|
"""A server api for requesting a pack of tree information.
|
|
|
|
"""
|
|
|
|
if shallowrepo.requirement in repo.requirements:
|
|
|
|
raise error.Abort(_('cannot fetch remote files from shallow repo'))
|
|
|
|
if not isinstance(proto, sshserver.sshserver):
|
|
|
|
raise error.Abort(_('cannot fetch remote files over non-ssh protocol'))
|
|
|
|
|
2017-04-27 20:44:33 +03:00
|
|
|
rootdir = args['rootdir']
|
|
|
|
|
|
|
|
# Sort to produce a consistent output
|
|
|
|
mfnodes = sorted(wireproto.decodelist(args['mfnodes']))
|
|
|
|
basemfnodes = sorted(wireproto.decodelist(args['basemfnodes']))
|
|
|
|
directories = sorted(list(wireproto.unescapearg(d) for d
|
|
|
|
in args['directories'].split(',') if d != ''))
|
|
|
|
|
2017-04-27 20:44:33 +03:00
|
|
|
try:
|
|
|
|
bundler = bundle2.bundle20(repo.ui)
|
2017-04-27 20:44:33 +03:00
|
|
|
packstream = generatepackstream(repo, rootdir, mfnodes,
|
|
|
|
basemfnodes, directories)
|
2017-07-11 01:53:12 +03:00
|
|
|
part = bundler.newpart(TREEGROUP_PARTTYPE2, data=packstream)
|
2017-04-27 20:44:33 +03:00
|
|
|
part.addparam('version', '1')
|
2017-07-11 01:53:12 +03:00
|
|
|
part.addparam('cache', 'True')
|
|
|
|
part.addparam('category', PACK_CATEGORY)
|
2017-04-27 20:44:33 +03:00
|
|
|
|
|
|
|
except error.Abort as exc:
|
|
|
|
# cleanly forward Abort error to the client
|
|
|
|
bundler = bundle2.bundle20(repo.ui)
|
|
|
|
manargs = [('message', str(exc))]
|
|
|
|
advargs = []
|
|
|
|
if exc.hint is not None:
|
|
|
|
advargs.append(('hint', exc.hint))
|
|
|
|
bundler.addpart(bundle2.bundlepart('error:abort',
|
|
|
|
manargs, advargs))
|
|
|
|
return wireproto.streamres(gen=bundler.getchunks(), v1compressible=True)
|
|
|
|
|
2017-04-27 20:44:33 +03:00
|
|
|
def generatepackstream(repo, rootdir, mfnodes, basemfnodes, directories):
|
2017-04-27 20:44:33 +03:00
|
|
|
"""
|
|
|
|
All size/len/counts are network order unsigned ints.
|
|
|
|
|
|
|
|
Request args:
|
|
|
|
|
|
|
|
`rootdir` - The directory of the tree to send (including its children)
|
|
|
|
`mfnodes` - The manifest nodes of the specified root directory to send.
|
|
|
|
`basemfnodes` - The manifest nodes of the specified root directory that are
|
|
|
|
already on the client.
|
|
|
|
`directories` - The fullpath (not relative path) of directories underneath
|
|
|
|
the rootdir that should be sent.
|
|
|
|
|
|
|
|
Response format:
|
|
|
|
|
|
|
|
[<fileresponse>,...]<10 null bytes>
|
|
|
|
fileresponse = <filename len: 2 byte><filename><history><deltas>
|
|
|
|
history = <count: 4 byte>[<history entry>,...]
|
|
|
|
historyentry = <node: 20 byte><p1: 20 byte><p2: 20 byte>
|
|
|
|
<linknode: 20 byte><copyfrom len: 2 byte><copyfrom>
|
|
|
|
deltas = <count: 4 byte>[<delta entry>,...]
|
|
|
|
deltaentry = <node: 20 byte><deltabase: 20 byte>
|
|
|
|
<delta len: 8 byte><delta>
|
|
|
|
"""
|
|
|
|
if directories:
|
|
|
|
raise RuntimeError("directories arg is not supported yet ('%s')" %
|
|
|
|
', '.join(directories))
|
|
|
|
|
2017-08-08 05:27:17 +03:00
|
|
|
historystore = repo.manifestlog.historystore
|
|
|
|
datastore = repo.manifestlog.datastore
|
2017-04-27 20:44:33 +03:00
|
|
|
|
2017-06-22 02:59:44 +03:00
|
|
|
# If asking for a sub-tree, start from the top level tree since the native
|
|
|
|
# treemanifest currently doesn't support
|
|
|
|
if rootdir != '':
|
|
|
|
mfrevlog = repo.manifestlog.treemanifestlog._revlog.dirlog(rootdir)
|
|
|
|
cl = repo.changelog
|
|
|
|
topnodes = []
|
|
|
|
for node in mfnodes:
|
|
|
|
clrev = mfrevlog.linkrev(mfrevlog.rev(node))
|
|
|
|
topnode = cl.changelogrevision(clrev).manifest
|
|
|
|
topnodes.append(topnode)
|
|
|
|
mfnodes = topnodes
|
|
|
|
rootdir = ''
|
|
|
|
|
|
|
|
# Since the native treemanifest implementation currently doesn't support
|
|
|
|
# sub-tree traversals, we can't do base node comparisons correctly.
|
|
|
|
basemfnodes = []
|
|
|
|
|
treemanifest: allow using manifest nodes as base nodes during pack fetch
Summary:
When requesting trees from the server, the client sends a list of manifest nodes
it wants and base nodes it has, then the server sends back any parts of the tree
that are in the desired manifests but not in the base nodes. If no base nodes
are sent, then the server tries to pick appropriate base nodes from the list of
manifests being requested.
Previously, when requesting manifest X, it would only try to pick X^ as the base
node (assuming X^ was in the request set). This meant that if you requested X
and X^^, it wouldn't realize they were very similar and instead it would send
the entire X and entire X^^ trees (with lots of duplication).
With this patch, it will always use the previously requested manifest as the
base node for the next manifest, if no base nodes are specified. So requests X
and X^^ will return the minimal set of nodes.
Test Plan: Added a test. It fails before and passes afterwards
Reviewers: #fbhgext, simpkins
Reviewed By: #fbhgext, simpkins
Differential Revision: https://phab.mercurial-scm.org/D75
2017-07-14 20:34:12 +03:00
|
|
|
# Only use the first two base trees, since the current tree
|
|
|
|
# implementation cannot handle more yet.
|
|
|
|
basemfnodes = basemfnodes[:2]
|
|
|
|
|
2017-04-27 20:44:33 +03:00
|
|
|
mfnodeset = set(mfnodes)
|
|
|
|
basemfnodeset = set(basemfnodes)
|
|
|
|
|
|
|
|
# Count how many times we will need each comparison node, so we can keep
|
|
|
|
# trees in memory the appropriate amount of time.
|
2017-04-27 20:44:34 +03:00
|
|
|
trees = treememoizer(datastore)
|
treemanifest: allow using manifest nodes as base nodes during pack fetch
Summary:
When requesting trees from the server, the client sends a list of manifest nodes
it wants and base nodes it has, then the server sends back any parts of the tree
that are in the desired manifests but not in the base nodes. If no base nodes
are sent, then the server tries to pick appropriate base nodes from the list of
manifests being requested.
Previously, when requesting manifest X, it would only try to pick X^ as the base
node (assuming X^ was in the request set). This meant that if you requested X
and X^^, it wouldn't realize they were very similar and instead it would send
the entire X and entire X^^ trees (with lots of duplication).
With this patch, it will always use the previously requested manifest as the
base node for the next manifest, if no base nodes are specified. So requests X
and X^^ will return the minimal set of nodes.
Test Plan: Added a test. It fails before and passes afterwards
Reviewers: #fbhgext, simpkins
Reviewed By: #fbhgext, simpkins
Differential Revision: https://phab.mercurial-scm.org/D75
2017-07-14 20:34:12 +03:00
|
|
|
prevmfnode = None
|
2017-04-27 20:44:33 +03:00
|
|
|
for node in mfnodes:
|
2017-04-27 20:44:34 +03:00
|
|
|
p1node, p2node = historystore.getnodeinfo(rootdir, node)[:2]
|
2017-04-27 20:44:33 +03:00
|
|
|
if p1node != nullid and (p1node in mfnodeset or
|
|
|
|
p1node in basemfnodeset):
|
|
|
|
trees.adduse(p1node)
|
treemanifest: allow using manifest nodes as base nodes during pack fetch
Summary:
When requesting trees from the server, the client sends a list of manifest nodes
it wants and base nodes it has, then the server sends back any parts of the tree
that are in the desired manifests but not in the base nodes. If no base nodes
are sent, then the server tries to pick appropriate base nodes from the list of
manifests being requested.
Previously, when requesting manifest X, it would only try to pick X^ as the base
node (assuming X^ was in the request set). This meant that if you requested X
and X^^, it wouldn't realize they were very similar and instead it would send
the entire X and entire X^^ trees (with lots of duplication).
With this patch, it will always use the previously requested manifest as the
base node for the next manifest, if no base nodes are specified. So requests X
and X^^ will return the minimal set of nodes.
Test Plan: Added a test. It fails before and passes afterwards
Reviewers: #fbhgext, simpkins
Reviewed By: #fbhgext, simpkins
Differential Revision: https://phab.mercurial-scm.org/D75
2017-07-14 20:34:12 +03:00
|
|
|
elif basemfnodes:
|
2017-04-27 20:44:33 +03:00
|
|
|
for basenode in basemfnodes:
|
|
|
|
trees.adduse(basenode)
|
treemanifest: allow using manifest nodes as base nodes during pack fetch
Summary:
When requesting trees from the server, the client sends a list of manifest nodes
it wants and base nodes it has, then the server sends back any parts of the tree
that are in the desired manifests but not in the base nodes. If no base nodes
are sent, then the server tries to pick appropriate base nodes from the list of
manifests being requested.
Previously, when requesting manifest X, it would only try to pick X^ as the base
node (assuming X^ was in the request set). This meant that if you requested X
and X^^, it wouldn't realize they were very similar and instead it would send
the entire X and entire X^^ trees (with lots of duplication).
With this patch, it will always use the previously requested manifest as the
base node for the next manifest, if no base nodes are specified. So requests X
and X^^ will return the minimal set of nodes.
Test Plan: Added a test. It fails before and passes afterwards
Reviewers: #fbhgext, simpkins
Reviewed By: #fbhgext, simpkins
Differential Revision: https://phab.mercurial-scm.org/D75
2017-07-14 20:34:12 +03:00
|
|
|
elif prevmfnode:
|
|
|
|
# If there are no base nodes and the parent isn't one of the
|
|
|
|
# requested mfnodes, then pick another mfnode as a base.
|
|
|
|
trees.adduse(prevmfnode)
|
|
|
|
|
|
|
|
prevmfnode = node
|
2017-04-27 20:44:33 +03:00
|
|
|
if p2node != nullid and (p2node in mfnodeset or
|
|
|
|
p2node in basemfnodeset):
|
|
|
|
trees.adduse(p2node)
|
|
|
|
|
treemanifest: allow using manifest nodes as base nodes during pack fetch
Summary:
When requesting trees from the server, the client sends a list of manifest nodes
it wants and base nodes it has, then the server sends back any parts of the tree
that are in the desired manifests but not in the base nodes. If no base nodes
are sent, then the server tries to pick appropriate base nodes from the list of
manifests being requested.
Previously, when requesting manifest X, it would only try to pick X^ as the base
node (assuming X^ was in the request set). This meant that if you requested X
and X^^, it wouldn't realize they were very similar and instead it would send
the entire X and entire X^^ trees (with lots of duplication).
With this patch, it will always use the previously requested manifest as the
base node for the next manifest, if no base nodes are specified. So requests X
and X^^ will return the minimal set of nodes.
Test Plan: Added a test. It fails before and passes afterwards
Reviewers: #fbhgext, simpkins
Reviewed By: #fbhgext, simpkins
Differential Revision: https://phab.mercurial-scm.org/D75
2017-07-14 20:34:12 +03:00
|
|
|
prevmfnode = None
|
2017-04-27 20:44:33 +03:00
|
|
|
for node in mfnodes:
|
|
|
|
treemf = trees.get(node)
|
|
|
|
|
2017-04-27 20:44:34 +03:00
|
|
|
p1node, p2node = historystore.getnodeinfo(rootdir, node)[:2]
|
2017-04-27 20:44:33 +03:00
|
|
|
# If p1 is being sent or is already on the client, chances are
|
|
|
|
# that's the best thing for us to delta against.
|
|
|
|
if p1node != nullid and (p1node in mfnodeset or
|
|
|
|
p1node in basemfnodeset):
|
|
|
|
basetrees = [trees.get(p1node)]
|
treemanifest: allow using manifest nodes as base nodes during pack fetch
Summary:
When requesting trees from the server, the client sends a list of manifest nodes
it wants and base nodes it has, then the server sends back any parts of the tree
that are in the desired manifests but not in the base nodes. If no base nodes
are sent, then the server tries to pick appropriate base nodes from the list of
manifests being requested.
Previously, when requesting manifest X, it would only try to pick X^ as the base
node (assuming X^ was in the request set). This meant that if you requested X
and X^^, it wouldn't realize they were very similar and instead it would send
the entire X and entire X^^ trees (with lots of duplication).
With this patch, it will always use the previously requested manifest as the
base node for the next manifest, if no base nodes are specified. So requests X
and X^^ will return the minimal set of nodes.
Test Plan: Added a test. It fails before and passes afterwards
Reviewers: #fbhgext, simpkins
Reviewed By: #fbhgext, simpkins
Differential Revision: https://phab.mercurial-scm.org/D75
2017-07-14 20:34:12 +03:00
|
|
|
elif basemfnodes:
|
2017-04-27 20:44:33 +03:00
|
|
|
basetrees = [trees.get(basenode) for basenode in basemfnodes]
|
treemanifest: allow using manifest nodes as base nodes during pack fetch
Summary:
When requesting trees from the server, the client sends a list of manifest nodes
it wants and base nodes it has, then the server sends back any parts of the tree
that are in the desired manifests but not in the base nodes. If no base nodes
are sent, then the server tries to pick appropriate base nodes from the list of
manifests being requested.
Previously, when requesting manifest X, it would only try to pick X^ as the base
node (assuming X^ was in the request set). This meant that if you requested X
and X^^, it wouldn't realize they were very similar and instead it would send
the entire X and entire X^^ trees (with lots of duplication).
With this patch, it will always use the previously requested manifest as the
base node for the next manifest, if no base nodes are specified. So requests X
and X^^ will return the minimal set of nodes.
Test Plan: Added a test. It fails before and passes afterwards
Reviewers: #fbhgext, simpkins
Reviewed By: #fbhgext, simpkins
Differential Revision: https://phab.mercurial-scm.org/D75
2017-07-14 20:34:12 +03:00
|
|
|
elif prevmfnode:
|
|
|
|
# If there are no base nodes and the parent isn't one of the
|
|
|
|
# requested mfnodes, then pick another mfnode as a base.
|
|
|
|
basetrees = [trees.get(prevmfnode)]
|
|
|
|
else:
|
|
|
|
basetrees = []
|
|
|
|
prevmfnode = node
|
2017-04-27 20:44:33 +03:00
|
|
|
|
|
|
|
if p2node != nullid and (p2node in mfnodeset or
|
|
|
|
p2node in basemfnodeset):
|
|
|
|
basetrees.append(trees.get(p2node))
|
|
|
|
|
treemanifest: allow using manifest nodes as base nodes during pack fetch
Summary:
When requesting trees from the server, the client sends a list of manifest nodes
it wants and base nodes it has, then the server sends back any parts of the tree
that are in the desired manifests but not in the base nodes. If no base nodes
are sent, then the server tries to pick appropriate base nodes from the list of
manifests being requested.
Previously, when requesting manifest X, it would only try to pick X^ as the base
node (assuming X^ was in the request set). This meant that if you requested X
and X^^, it wouldn't realize they were very similar and instead it would send
the entire X and entire X^^ trees (with lots of duplication).
With this patch, it will always use the previously requested manifest as the
base node for the next manifest, if no base nodes are specified. So requests X
and X^^ will return the minimal set of nodes.
Test Plan: Added a test. It fails before and passes afterwards
Reviewers: #fbhgext, simpkins
Reviewed By: #fbhgext, simpkins
Differential Revision: https://phab.mercurial-scm.org/D75
2017-07-14 20:34:12 +03:00
|
|
|
subtrees = treemf.walksubtrees(comparetrees=basetrees)
|
2017-04-27 20:44:33 +03:00
|
|
|
for subname, subnode, subtext, x, x, x in subtrees:
|
|
|
|
# Append data
|
|
|
|
data = [(subnode, nullid, subtext)]
|
|
|
|
|
|
|
|
# Append history
|
|
|
|
# Only append first history for now, since the entire manifest
|
|
|
|
# history is very long.
|
|
|
|
# Append data
|
|
|
|
data = [(subnode, nullid, subtext)]
|
|
|
|
|
|
|
|
# Append history
|
2017-04-27 20:44:34 +03:00
|
|
|
histdata = historystore.getnodeinfo(subname, subnode)
|
|
|
|
p1node, p2node, linknode, copyfrom = histdata
|
2017-04-27 20:44:33 +03:00
|
|
|
history = [(subnode, p1node, p2node, linknode, copyfrom)]
|
|
|
|
|
|
|
|
for chunk in wirepack.sendpackpart(subname, history, data):
|
|
|
|
yield chunk
|
|
|
|
|
|
|
|
yield wirepack.closepart()
|
2017-04-20 07:14:03 +03:00
|
|
|
|
|
|
|
class remotetreedatastore(object):
|
|
|
|
def __init__(self, repo):
|
|
|
|
self._repo = repo
|
|
|
|
self._shared = None
|
|
|
|
|
|
|
|
def setshared(self, shared):
|
|
|
|
self._shared = shared
|
|
|
|
|
|
|
|
def get(self, name, node):
|
|
|
|
# Only look at the server if not root or is public
|
2017-06-14 03:31:47 +03:00
|
|
|
basemfnodes = []
|
2017-04-20 07:14:03 +03:00
|
|
|
if name == '':
|
2017-08-29 23:02:22 +03:00
|
|
|
if util.safehasattr(self._repo.manifestlog, '_revlog'):
|
|
|
|
mfrevlog = self._repo.manifestlog._revlog
|
|
|
|
rev = mfrevlog.rev(node)
|
|
|
|
linkrev = mfrevlog.linkrev(rev)
|
|
|
|
if self._repo[linkrev].phase() != phases.public:
|
|
|
|
raise KeyError((name, node))
|
|
|
|
else:
|
|
|
|
# TODO: improve linkrev guessing when the revlog isn't available
|
|
|
|
linkrev = self._repo['tip'].rev()
|
2017-04-20 07:14:03 +03:00
|
|
|
|
2017-06-14 03:31:47 +03:00
|
|
|
# Find a recent tree that we already have
|
2017-06-20 21:08:15 +03:00
|
|
|
basemfnodes = _findrecenttree(self._repo, linkrev)
|
2017-06-14 03:31:47 +03:00
|
|
|
|
|
|
|
_prefetchtrees(self._repo, name, [node], basemfnodes, [])
|
2017-04-20 07:14:03 +03:00
|
|
|
self._shared.markforrefresh()
|
|
|
|
return self._shared.get(name, node)
|
|
|
|
|
|
|
|
def getdeltachain(self, name, node):
|
|
|
|
# Since our remote content stores just contain full texts, we return a
|
|
|
|
# fake delta chain that just consists of a single full text revision.
|
|
|
|
# The nullid in the deltabasenode slot indicates that the revision is a
|
|
|
|
# fulltext.
|
|
|
|
revision = self.get(name, node)
|
|
|
|
return [(name, node, None, nullid, revision)]
|
|
|
|
|
|
|
|
def add(self, name, node, data):
|
|
|
|
raise RuntimeError("cannot add to a remote store")
|
|
|
|
|
|
|
|
def getmissing(self, keys):
|
|
|
|
return keys
|
|
|
|
|
2017-11-09 21:32:15 +03:00
|
|
|
def markledger(self, ledger, options=None):
|
2017-04-20 07:14:03 +03:00
|
|
|
pass
|
2017-04-20 07:14:04 +03:00
|
|
|
|
2017-11-07 05:13:55 +03:00
|
|
|
def getmetrics(self):
|
|
|
|
return {}
|
|
|
|
|
2017-11-09 21:32:15 +03:00
|
|
|
def serverrepack(repo, incremental=False, options=None):
|
2017-04-20 07:14:04 +03:00
|
|
|
packpath = repo.vfs.join('cache/packs/%s' % PACK_CATEGORY)
|
|
|
|
|
|
|
|
revlogstore = manifestrevlogstore(repo)
|
|
|
|
|
2017-11-09 21:28:53 +03:00
|
|
|
try:
|
|
|
|
files = osutil.listdir(packpath, stat=True)
|
|
|
|
except OSError:
|
|
|
|
files = []
|
|
|
|
|
|
|
|
# Data store
|
|
|
|
fulldatapackstore = datapackstore(repo.ui, packpath)
|
|
|
|
if incremental:
|
|
|
|
datastores = _topacks(packpath,
|
|
|
|
_computeincrementaldatapack(repo.ui, files),
|
|
|
|
datapack)
|
|
|
|
else:
|
|
|
|
datastores = [fulldatapackstore]
|
|
|
|
datastores.append(revlogstore)
|
|
|
|
datastore = unioncontentstore(*datastores)
|
|
|
|
|
|
|
|
# History store
|
|
|
|
if incremental:
|
|
|
|
historystores = _topacks(packpath,
|
|
|
|
_computeincrementalhistorypack(repo.ui, files),
|
|
|
|
historypack)
|
|
|
|
else:
|
|
|
|
historystores = [historypackstore(repo.ui, packpath)]
|
|
|
|
historystores.append(revlogstore)
|
|
|
|
histstore = unionmetadatastore(*historystores)
|
2017-04-20 07:14:04 +03:00
|
|
|
|
2017-06-20 21:08:15 +03:00
|
|
|
startrev = repo.ui.configint('treemanifest', 'repackstartrev', 0)
|
|
|
|
endrev = repo.ui.configint('treemanifest', 'repackendrev',
|
|
|
|
len(repo.changelog) - 1)
|
|
|
|
if startrev == 0 and incremental:
|
2017-05-17 01:28:13 +03:00
|
|
|
latestpackedlinkrev = 0
|
|
|
|
mfrevlog = repo.manifestlog.treemanifestlog._revlog
|
|
|
|
for i in xrange(len(mfrevlog) - 1, 0, -1):
|
|
|
|
node = mfrevlog.node(i)
|
2017-11-09 21:28:53 +03:00
|
|
|
if not fulldatapackstore.getmissing([('', node)]):
|
2017-05-17 01:28:13 +03:00
|
|
|
latestpackedlinkrev = mfrevlog.linkrev(i)
|
|
|
|
break
|
2017-06-20 21:08:15 +03:00
|
|
|
startrev = latestpackedlinkrev + 1
|
2017-05-17 01:28:13 +03:00
|
|
|
|
2017-06-20 21:08:15 +03:00
|
|
|
revlogstore.setrepacklinkrevrange(startrev, endrev)
|
2017-11-09 21:32:15 +03:00
|
|
|
_runrepack(repo, datastore, histstore, packpath, PACK_CATEGORY,
|
|
|
|
options=options)
|
2017-04-27 20:44:34 +03:00
|
|
|
|
2017-05-10 23:48:34 +03:00
|
|
|
def striptrees(orig, repo, tr, striprev, files):
|
2017-08-31 21:14:39 +03:00
|
|
|
if not treeenabled(repo.ui):
|
|
|
|
return orig(repo, tr, striprev, files)
|
|
|
|
|
2017-05-10 23:48:34 +03:00
|
|
|
if repo.ui.configbool('treemanifest', 'server'):
|
|
|
|
treerevlog = repo.manifestlog.treemanifestlog._revlog
|
|
|
|
for dir in util.dirs(files):
|
|
|
|
# If the revlog doesn't exist, this returns an empty revlog and is a
|
|
|
|
# no-op.
|
|
|
|
rl = treerevlog.dirlog(dir)
|
|
|
|
rl.strip(striprev, tr)
|
|
|
|
|
|
|
|
treerevlog.strip(striprev, tr)
|
2017-08-29 23:02:22 +03:00
|
|
|
|
|
|
|
def _addpartsfromopts(orig, ui, repo, bundler, source, outgoing, opts):
|
|
|
|
orig(ui, repo, bundler, source, outgoing, opts)
|
2017-11-02 03:10:05 +03:00
|
|
|
|
|
|
|
# Only add trees if we have them
|
|
|
|
if _cansendtrees(repo, outgoing.missing):
|
2017-08-29 23:02:22 +03:00
|
|
|
part = createtreepackpart(repo, outgoing, TREEGROUP_PARTTYPE2)
|
|
|
|
bundler.addpart(part)
|
2017-08-29 23:02:22 +03:00
|
|
|
|
|
|
|
def _handlebundle2part(orig, self, part):
|
|
|
|
if part.type == TREEGROUP_PARTTYPE2:
|
|
|
|
tempstore = wirepack.wirepackstore(part.read())
|
|
|
|
|
|
|
|
# Point the bundle repo at the temp stores
|
|
|
|
mfl = self.manifestlog
|
|
|
|
mfl.datastore = unioncontentstore(
|
|
|
|
tempstore,
|
|
|
|
mfl.datastore)
|
|
|
|
mfl.historystore = unionmetadatastore(
|
|
|
|
tempstore,
|
|
|
|
mfl.historystore)
|
|
|
|
else:
|
|
|
|
orig(self, part)
|