2015-10-03 01:51:32 +03:00
|
|
|
# streamclone.py - producing and consuming streaming repository data
|
|
|
|
#
|
|
|
|
# Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
|
|
|
|
#
|
|
|
|
# This software may be used and distributed according to the terms of the
|
|
|
|
# GNU General Public License version 2 or any later version.
|
|
|
|
|
|
|
|
from __future__ import absolute_import
|
|
|
|
|
streamclone: support for producing and consuming stream clone bundles
Up to this point, stream clones only existed as a dynamically generated
data format produced and consumed during streaming clones. In order to
support this efficient cloning format with the clone bundles feature, we
need a more formal, on disk representation of the streaming clone data.
This patch introduces a new "bundle" type for streaming clones. Unlike
existing bundles, it does not contain changegroup data. It does,
however, share the same concepts like the 4 byte header which identifies
the type of data that follows and the 2 byte abbreviation for
compression types (of which only "UN" is currently supported).
The new bundle format is essentially the existing stream clone version 1
data format with some headers at the beginning.
Content negotiation at stream clone request time checked for repository
format/requirements compatibility before initiating a stream clone. We
can't do active content negotiation when using clone bundles. So, we put
this set of requirements inside the payload so consumers have a built-in
mechanism for checking compatibility before reading and applying lots of
data. Of course, we will also advertise this requirements set in clone
bundles. But that's for another patch.
We currently don't have a mechanism to produce and consume this new
bundle format. This will be implemented in upcoming patches.
It's worth noting that if a legacy client attempts to `hg unbundle` a
stream clone bundle (with the "HGS1" header), it will abort with:
"unknown bundle version S1," which seems appropriate.
2015-10-17 21:14:52 +03:00
|
|
|
import struct
|
2015-10-03 02:05:52 +03:00
|
|
|
|
2015-10-03 01:58:24 +03:00
|
|
|
from .i18n import _
|
2015-10-03 01:51:32 +03:00
|
|
|
from . import (
|
|
|
|
branchmap,
|
2015-10-03 01:58:24 +03:00
|
|
|
error,
|
2017-06-09 20:41:13 +03:00
|
|
|
phases,
|
2015-10-03 02:05:52 +03:00
|
|
|
store,
|
2015-10-03 01:58:24 +03:00
|
|
|
util,
|
2015-10-03 01:51:32 +03:00
|
|
|
)
|
|
|
|
|
2015-10-05 04:35:19 +03:00
|
|
|
def canperformstreamclone(pullop, bailifbundle2supported=False):
|
2015-10-03 08:22:11 +03:00
|
|
|
"""Whether it is possible to perform a streaming clone as part of pull.
|
|
|
|
|
2015-10-05 04:35:19 +03:00
|
|
|
``bailifbundle2supported`` will cause the function to return False if
|
|
|
|
bundle2 stream clones are supported. It should only be called by the
|
|
|
|
legacy stream clone code path.
|
|
|
|
|
2015-10-03 08:22:11 +03:00
|
|
|
Returns a tuple of (supported, requirements). ``supported`` is True if
|
|
|
|
streaming clone is supported and False otherwise. ``requirements`` is
|
|
|
|
a set of repo requirements from the remote, or ``None`` if stream clone
|
|
|
|
isn't supported.
|
|
|
|
"""
|
2015-10-04 21:50:42 +03:00
|
|
|
repo = pullop.repo
|
|
|
|
remote = pullop.remote
|
|
|
|
|
2015-10-05 04:35:19 +03:00
|
|
|
bundle2supported = False
|
|
|
|
if pullop.canusebundle2:
|
|
|
|
if 'v1' in pullop.remotebundle2caps.get('stream', []):
|
|
|
|
bundle2supported = True
|
|
|
|
# else
|
|
|
|
# Server doesn't support bundle2 stream clone or doesn't support
|
|
|
|
# the versions we support. Fall back and possibly allow legacy.
|
|
|
|
|
|
|
|
# Ensures legacy code path uses available bundle2.
|
|
|
|
if bailifbundle2supported and bundle2supported:
|
|
|
|
return False, None
|
|
|
|
# Ensures bundle2 doesn't try to do a stream clone if it isn't supported.
|
|
|
|
#elif not bailifbundle2supported and not bundle2supported:
|
|
|
|
# return False, None
|
|
|
|
|
2015-10-03 07:53:25 +03:00
|
|
|
# Streaming clone only works on empty repositories.
|
|
|
|
if len(repo):
|
|
|
|
return False, None
|
|
|
|
|
2015-10-03 08:22:11 +03:00
|
|
|
# Streaming clone only works if all data is being requested.
|
2015-10-04 21:50:42 +03:00
|
|
|
if pullop.heads:
|
2015-10-03 08:22:11 +03:00
|
|
|
return False, None
|
|
|
|
|
2015-10-04 21:50:42 +03:00
|
|
|
streamrequested = pullop.streamclonerequested
|
|
|
|
|
2015-10-03 08:22:11 +03:00
|
|
|
# If we don't have a preference, let the server decide for us. This
|
|
|
|
# likely only comes into play in LANs.
|
|
|
|
if streamrequested is None:
|
|
|
|
# The server can advertise whether to prefer streaming clone.
|
|
|
|
streamrequested = remote.capable('stream-preferred')
|
|
|
|
|
|
|
|
if not streamrequested:
|
|
|
|
return False, None
|
|
|
|
|
|
|
|
# In order for stream clone to work, the client has to support all the
|
|
|
|
# requirements advertised by the server.
|
|
|
|
#
|
|
|
|
# The server advertises its requirements via the "stream" and "streamreqs"
|
|
|
|
# capability. "stream" (a value-less capability) is advertised if and only
|
|
|
|
# if the only requirement is "revlogv1." Else, the "streamreqs" capability
|
|
|
|
# is advertised and contains a comma-delimited list of requirements.
|
|
|
|
requirements = set()
|
|
|
|
if remote.capable('stream'):
|
|
|
|
requirements.add('revlogv1')
|
|
|
|
else:
|
|
|
|
streamreqs = remote.capable('streamreqs')
|
|
|
|
# This is weird and shouldn't happen with modern servers.
|
|
|
|
if not streamreqs:
|
2017-05-09 06:01:06 +03:00
|
|
|
pullop.repo.ui.warn(_(
|
|
|
|
'warning: stream clone requested but server has them '
|
|
|
|
'disabled\n'))
|
2015-10-03 08:22:11 +03:00
|
|
|
return False, None
|
|
|
|
|
|
|
|
streamreqs = set(streamreqs.split(','))
|
|
|
|
# Server requires something we don't support. Bail.
|
2017-05-09 06:01:06 +03:00
|
|
|
missingreqs = streamreqs - repo.supportedformats
|
|
|
|
if missingreqs:
|
|
|
|
pullop.repo.ui.warn(_(
|
|
|
|
'warning: stream clone requested but client is missing '
|
|
|
|
'requirements: %s\n') % ', '.join(sorted(missingreqs)))
|
|
|
|
pullop.repo.ui.warn(
|
|
|
|
_('(see https://www.mercurial-scm.org/wiki/MissingRequirement '
|
|
|
|
'for more information)\n'))
|
2015-10-03 08:22:11 +03:00
|
|
|
return False, None
|
|
|
|
requirements = streamreqs
|
|
|
|
|
|
|
|
return True, requirements
|
|
|
|
|
2015-10-04 21:34:28 +03:00
|
|
|
def maybeperformlegacystreamclone(pullop):
|
|
|
|
"""Possibly perform a legacy stream clone operation.
|
|
|
|
|
|
|
|
Legacy stream clones are performed as part of pull but before all other
|
|
|
|
operations.
|
|
|
|
|
|
|
|
A legacy stream clone will not be performed if a bundle2 stream clone is
|
|
|
|
supported.
|
|
|
|
"""
|
2015-10-04 21:50:42 +03:00
|
|
|
supported, requirements = canperformstreamclone(pullop)
|
2015-10-04 21:20:52 +03:00
|
|
|
|
2015-10-03 08:22:11 +03:00
|
|
|
if not supported:
|
|
|
|
return
|
|
|
|
|
2015-10-04 21:50:42 +03:00
|
|
|
repo = pullop.repo
|
|
|
|
remote = pullop.remote
|
|
|
|
|
2015-10-03 09:08:15 +03:00
|
|
|
# Save remote branchmap. We will use it later to speed up branchcache
|
|
|
|
# creation.
|
|
|
|
rbranchmap = None
|
|
|
|
if remote.capable('branchmap'):
|
|
|
|
rbranchmap = remote.branchmap()
|
|
|
|
|
2015-10-04 22:07:01 +03:00
|
|
|
repo.ui.status(_('streaming all changes\n'))
|
|
|
|
|
2015-10-03 09:08:15 +03:00
|
|
|
fp = remote.stream_out()
|
|
|
|
l = fp.readline()
|
|
|
|
try:
|
|
|
|
resp = int(l)
|
|
|
|
except ValueError:
|
|
|
|
raise error.ResponseError(
|
|
|
|
_('unexpected response from remote server:'), l)
|
|
|
|
if resp == 1:
|
2015-10-08 22:55:45 +03:00
|
|
|
raise error.Abort(_('operation forbidden by server'))
|
2015-10-03 09:08:15 +03:00
|
|
|
elif resp == 2:
|
2015-10-08 22:55:45 +03:00
|
|
|
raise error.Abort(_('locking the remote repository failed'))
|
2015-10-03 09:08:15 +03:00
|
|
|
elif resp != 0:
|
2015-10-08 22:55:45 +03:00
|
|
|
raise error.Abort(_('the server sent an unknown error code'))
|
2015-10-03 09:08:15 +03:00
|
|
|
|
2015-10-05 04:44:46 +03:00
|
|
|
l = fp.readline()
|
|
|
|
try:
|
|
|
|
filecount, bytecount = map(int, l.split(' ', 1))
|
|
|
|
except (ValueError, TypeError):
|
|
|
|
raise error.ResponseError(
|
|
|
|
_('unexpected response from remote server:'), l)
|
|
|
|
|
2016-01-16 00:14:49 +03:00
|
|
|
with repo.lock():
|
2015-10-05 04:44:46 +03:00
|
|
|
consumev1(repo, fp, filecount, bytecount)
|
2015-10-04 21:27:10 +03:00
|
|
|
|
|
|
|
# new requirements = old non-format requirements +
|
|
|
|
# new format-related remote requirements
|
|
|
|
# requirements from the streamed-in repository
|
|
|
|
repo.requirements = requirements | (
|
|
|
|
repo.requirements - repo.supportedformats)
|
|
|
|
repo._applyopenerreqs()
|
|
|
|
repo._writerequirements()
|
|
|
|
|
|
|
|
if rbranchmap:
|
|
|
|
branchmap.replacecache(repo, rbranchmap)
|
|
|
|
|
|
|
|
repo.invalidate()
|
2015-10-03 07:39:04 +03:00
|
|
|
|
2017-06-09 20:41:13 +03:00
|
|
|
def allowservergeneration(repo):
|
2015-10-03 02:24:56 +03:00
|
|
|
"""Whether streaming clones are allowed from the server."""
|
codemod: register core configitems using a script
This is done by a script [2] using RedBaron [1], a tool designed for doing
code refactoring. All "default" values are decided by the script and are
strongly consistent with the existing code.
There are 2 changes done manually to fix tests:
[warn] mercurial/exchange.py: experimental.bundle2-output-capture: default needs manual removal
[warn] mercurial/localrepo.py: experimental.hook-track-tags: default needs manual removal
Since RedBaron is not confident about how to indent things [2].
[1]: https://github.com/PyCQA/redbaron
[2]: https://github.com/PyCQA/redbaron/issues/100
[3]:
#!/usr/bin/env python
# codemod_configitems.py - codemod tool to fill configitems
#
# Copyright 2017 Facebook, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import, print_function
import os
import sys
import redbaron
def readpath(path):
with open(path) as f:
return f.read()
def writepath(path, content):
with open(path, 'w') as f:
f.write(content)
_configmethods = {'config', 'configbool', 'configint', 'configbytes',
'configlist', 'configdate'}
def extractstring(rnode):
"""get the string from a RedBaron string or call_argument node"""
while rnode.type != 'string':
rnode = rnode.value
return rnode.value[1:-1] # unquote, "'str'" -> "str"
def uiconfigitems(red):
"""match *.ui.config* pattern, yield (node, method, args, section, name)"""
for node in red.find_all('atomtrailers'):
entry = None
try:
obj = node[-3].value
method = node[-2].value
args = node[-1]
section = args[0].value
name = args[1].value
if (obj in ('ui', 'self') and method in _configmethods
and section.type == 'string' and name.type == 'string'):
entry = (node, method, args, extractstring(section),
extractstring(name))
except Exception:
pass
else:
if entry:
yield entry
def coreconfigitems(red):
"""match coreconfigitem(...) pattern, yield (node, args, section, name)"""
for node in red.find_all('atomtrailers'):
entry = None
try:
args = node[1]
section = args[0].value
name = args[1].value
if (node[0].value == 'coreconfigitem' and section.type == 'string'
and name.type == 'string'):
entry = (node, args, extractstring(section),
extractstring(name))
except Exception:
pass
else:
if entry:
yield entry
def registercoreconfig(cfgred, section, name, defaultrepr):
"""insert coreconfigitem to cfgred AST
section and name are plain string, defaultrepr is a string
"""
# find a place to insert the "coreconfigitem" item
entries = list(coreconfigitems(cfgred))
for node, args, nodesection, nodename in reversed(entries):
if (nodesection, nodename) < (section, name):
# insert after this entry
node.insert_after(
'coreconfigitem(%r, %r,\n'
' default=%s,\n'
')' % (section, name, defaultrepr))
return
def main(argv):
if not argv:
print('Usage: codemod_configitems.py FILES\n'
'For example, FILES could be "{hgext,mercurial}/*/**.py"')
dirname = os.path.dirname
reporoot = dirname(dirname(dirname(os.path.abspath(__file__))))
# register configitems to this destination
cfgpath = os.path.join(reporoot, 'mercurial', 'configitems.py')
cfgred = redbaron.RedBaron(readpath(cfgpath))
# state about what to do
registered = set((s, n) for n, a, s, n in coreconfigitems(cfgred))
toregister = {} # {(section, name): defaultrepr}
coreconfigs = set() # {(section, name)}, whether it's used in core
# first loop: scan all files before taking any action
for i, path in enumerate(argv):
print('(%d/%d) scanning %s' % (i + 1, len(argv), path))
iscore = ('mercurial' in path) and ('hgext' not in path)
red = redbaron.RedBaron(readpath(path))
# find all repo.ui.config* and ui.config* calls, and collect their
# section, name and default value information.
for node, method, args, section, name in uiconfigitems(red):
if section == 'web':
# [web] section has some weirdness, ignore them for now
continue
defaultrepr = None
key = (section, name)
if len(args) == 2:
if key in registered:
continue
if method == 'configlist':
defaultrepr = 'list'
elif method == 'configbool':
defaultrepr = 'False'
else:
defaultrepr = 'None'
elif len(args) >= 3 and (args[2].target is None or
args[2].target.value == 'default'):
# try to understand the "default" value
dnode = args[2].value
if dnode.type == 'name':
if dnode.value in {'None', 'True', 'False'}:
defaultrepr = dnode.value
elif dnode.type == 'string':
defaultrepr = repr(dnode.value[1:-1])
elif dnode.type in ('int', 'float'):
defaultrepr = dnode.value
# inconsistent default
if key in toregister and toregister[key] != defaultrepr:
defaultrepr = None
# interesting to rewrite
if key not in registered:
if defaultrepr is None:
print('[note] %s: %s.%s: unsupported default'
% (path, section, name))
registered.add(key) # skip checking it again
else:
toregister[key] = defaultrepr
if iscore:
coreconfigs.add(key)
# second loop: rewrite files given "toregister" result
for path in argv:
# reconstruct redbaron - trade CPU for memory
red = redbaron.RedBaron(readpath(path))
changed = False
for node, method, args, section, name in uiconfigitems(red):
key = (section, name)
defaultrepr = toregister.get(key)
if defaultrepr is None or key not in coreconfigs:
continue
if len(args) >= 3 and (args[2].target is None or
args[2].target.value == 'default'):
try:
del args[2]
changed = True
except Exception:
# redbaron fails to do the rewrite due to indentation
# see https://github.com/PyCQA/redbaron/issues/100
print('[warn] %s: %s.%s: default needs manual removal'
% (path, section, name))
if key not in registered:
print('registering %s.%s' % (section, name))
registercoreconfig(cfgred, section, name, defaultrepr)
registered.add(key)
if changed:
print('updating %s' % path)
writepath(path, red.dumps())
if toregister:
print('updating configitems.py')
writepath(cfgpath, cfgred.dumps())
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
2017-07-15 00:22:40 +03:00
|
|
|
if not repo.ui.configbool('server', 'uncompressed', untrusted=True):
|
2017-06-09 20:41:13 +03:00
|
|
|
return False
|
|
|
|
|
|
|
|
# The way stream clone works makes it impossible to hide secret changesets.
|
|
|
|
# So don't allow this by default.
|
|
|
|
secret = phases.hassecret(repo)
|
|
|
|
if secret:
|
2017-06-30 04:44:14 +03:00
|
|
|
return repo.ui.configbool('server', 'uncompressedallowsecret')
|
2017-06-09 20:41:13 +03:00
|
|
|
|
|
|
|
return True
|
2015-10-03 02:24:56 +03:00
|
|
|
|
2015-10-03 02:05:52 +03:00
|
|
|
# This is it's own function so extensions can override it.
|
|
|
|
def _walkstreamfiles(repo):
|
|
|
|
return repo.store.walk()
|
|
|
|
|
|
|
|
def generatev1(repo):
|
|
|
|
"""Emit content for version 1 of a streaming clone.
|
|
|
|
|
2015-10-05 05:06:06 +03:00
|
|
|
This returns a 3-tuple of (file count, byte size, data iterator).
|
2015-10-03 02:05:52 +03:00
|
|
|
|
2015-10-05 05:06:06 +03:00
|
|
|
The data iterator consists of N entries for each file being transferred.
|
|
|
|
Each file entry starts as a line with the file name and integer size
|
|
|
|
delimited by a null byte.
|
2015-10-03 02:05:52 +03:00
|
|
|
|
|
|
|
The raw file data follows. Following the raw file data is the next file
|
|
|
|
entry, or EOF.
|
|
|
|
|
|
|
|
When used on the wire protocol, an additional line indicating protocol
|
|
|
|
success will be prepended to the stream. This function is not responsible
|
|
|
|
for adding it.
|
|
|
|
|
|
|
|
This function will obtain a repository lock to ensure a consistent view of
|
|
|
|
the store is captured. It therefore may raise LockError.
|
|
|
|
"""
|
|
|
|
entries = []
|
|
|
|
total_bytes = 0
|
|
|
|
# Get consistent snapshot of repo, lock during scan.
|
2016-01-16 00:14:50 +03:00
|
|
|
with repo.lock():
|
2015-10-03 02:05:52 +03:00
|
|
|
repo.ui.debug('scanning\n')
|
|
|
|
for name, ename, size in _walkstreamfiles(repo):
|
|
|
|
if size:
|
|
|
|
entries.append((name, size))
|
|
|
|
total_bytes += size
|
|
|
|
|
|
|
|
repo.ui.debug('%d files, %d bytes to transfer\n' %
|
|
|
|
(len(entries), total_bytes))
|
|
|
|
|
|
|
|
svfs = repo.svfs
|
|
|
|
debugflag = repo.ui.debugflag
|
|
|
|
|
2015-10-05 05:06:06 +03:00
|
|
|
def emitrevlogdata():
|
2017-07-02 05:26:34 +03:00
|
|
|
for name, size in entries:
|
|
|
|
if debugflag:
|
|
|
|
repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
|
|
|
|
# partially encode name over the wire for backwards compat
|
|
|
|
yield '%s\0%d\n' % (store.encodedir(name), size)
|
2017-07-07 17:19:31 +03:00
|
|
|
# auditing at this stage is both pointless (paths are already
|
|
|
|
# trusted by the local repo) and expensive
|
2017-07-07 17:25:16 +03:00
|
|
|
with svfs(name, 'rb', auditpath=False) as fp:
|
|
|
|
if size <= 65536:
|
2017-07-02 05:26:34 +03:00
|
|
|
yield fp.read(size)
|
2017-07-07 17:25:16 +03:00
|
|
|
else:
|
|
|
|
for chunk in util.filechunkiter(fp, limit=size):
|
|
|
|
yield chunk
|
2015-10-05 05:06:06 +03:00
|
|
|
|
|
|
|
return len(entries), total_bytes, emitrevlogdata()
|
|
|
|
|
|
|
|
def generatev1wireproto(repo):
|
|
|
|
"""Emit content for version 1 of streaming clone suitable for the wire.
|
|
|
|
|
|
|
|
This is the data output from ``generatev1()`` with a header line
|
|
|
|
indicating file count and byte size.
|
|
|
|
"""
|
|
|
|
filecount, bytecount, it = generatev1(repo)
|
|
|
|
yield '%d %d\n' % (filecount, bytecount)
|
|
|
|
for chunk in it:
|
|
|
|
yield chunk
|
2015-10-03 02:05:52 +03:00
|
|
|
|
streamclone: support for producing and consuming stream clone bundles
Up to this point, stream clones only existed as a dynamically generated
data format produced and consumed during streaming clones. In order to
support this efficient cloning format with the clone bundles feature, we
need a more formal, on disk representation of the streaming clone data.
This patch introduces a new "bundle" type for streaming clones. Unlike
existing bundles, it does not contain changegroup data. It does,
however, share the same concepts like the 4 byte header which identifies
the type of data that follows and the 2 byte abbreviation for
compression types (of which only "UN" is currently supported).
The new bundle format is essentially the existing stream clone version 1
data format with some headers at the beginning.
Content negotiation at stream clone request time checked for repository
format/requirements compatibility before initiating a stream clone. We
can't do active content negotiation when using clone bundles. So, we put
this set of requirements inside the payload so consumers have a built-in
mechanism for checking compatibility before reading and applying lots of
data. Of course, we will also advertise this requirements set in clone
bundles. But that's for another patch.
We currently don't have a mechanism to produce and consume this new
bundle format. This will be implemented in upcoming patches.
It's worth noting that if a legacy client attempts to `hg unbundle` a
stream clone bundle (with the "HGS1" header), it will abort with:
"unknown bundle version S1," which seems appropriate.
2015-10-17 21:14:52 +03:00
|
|
|
def generatebundlev1(repo, compression='UN'):
|
|
|
|
"""Emit content for version 1 of a stream clone bundle.
|
|
|
|
|
|
|
|
The first 4 bytes of the output ("HGS1") denote this as stream clone
|
|
|
|
bundle version 1.
|
|
|
|
|
|
|
|
The next 2 bytes indicate the compression type. Only "UN" is currently
|
|
|
|
supported.
|
|
|
|
|
|
|
|
The next 16 bytes are two 64-bit big endian unsigned integers indicating
|
|
|
|
file count and byte count, respectively.
|
|
|
|
|
|
|
|
The next 2 bytes is a 16-bit big endian unsigned short declaring the length
|
|
|
|
of the requirements string, including a trailing \0. The following N bytes
|
|
|
|
are the requirements string, which is ASCII containing a comma-delimited
|
|
|
|
list of repo requirements that are needed to support the data.
|
|
|
|
|
|
|
|
The remaining content is the output of ``generatev1()`` (which may be
|
|
|
|
compressed in the future).
|
|
|
|
|
|
|
|
Returns a tuple of (requirements, data generator).
|
|
|
|
"""
|
|
|
|
if compression != 'UN':
|
|
|
|
raise ValueError('we do not support the compression argument yet')
|
|
|
|
|
|
|
|
requirements = repo.requirements & repo.supportedformats
|
|
|
|
requires = ','.join(sorted(requirements))
|
|
|
|
|
|
|
|
def gen():
|
|
|
|
yield 'HGS1'
|
|
|
|
yield compression
|
|
|
|
|
|
|
|
filecount, bytecount, it = generatev1(repo)
|
|
|
|
repo.ui.status(_('writing %d bytes for %d files\n') %
|
|
|
|
(bytecount, filecount))
|
|
|
|
|
|
|
|
yield struct.pack('>QQ', filecount, bytecount)
|
|
|
|
yield struct.pack('>H', len(requires) + 1)
|
|
|
|
yield requires + '\0'
|
|
|
|
|
|
|
|
# This is where we'll add compression in the future.
|
|
|
|
assert compression == 'UN'
|
|
|
|
|
|
|
|
seen = 0
|
2016-03-11 17:28:27 +03:00
|
|
|
repo.ui.progress(_('bundle'), 0, total=bytecount, unit=_('bytes'))
|
streamclone: support for producing and consuming stream clone bundles
Up to this point, stream clones only existed as a dynamically generated
data format produced and consumed during streaming clones. In order to
support this efficient cloning format with the clone bundles feature, we
need a more formal, on disk representation of the streaming clone data.
This patch introduces a new "bundle" type for streaming clones. Unlike
existing bundles, it does not contain changegroup data. It does,
however, share the same concepts like the 4 byte header which identifies
the type of data that follows and the 2 byte abbreviation for
compression types (of which only "UN" is currently supported).
The new bundle format is essentially the existing stream clone version 1
data format with some headers at the beginning.
Content negotiation at stream clone request time checked for repository
format/requirements compatibility before initiating a stream clone. We
can't do active content negotiation when using clone bundles. So, we put
this set of requirements inside the payload so consumers have a built-in
mechanism for checking compatibility before reading and applying lots of
data. Of course, we will also advertise this requirements set in clone
bundles. But that's for another patch.
We currently don't have a mechanism to produce and consume this new
bundle format. This will be implemented in upcoming patches.
It's worth noting that if a legacy client attempts to `hg unbundle` a
stream clone bundle (with the "HGS1" header), it will abort with:
"unknown bundle version S1," which seems appropriate.
2015-10-17 21:14:52 +03:00
|
|
|
|
|
|
|
for chunk in it:
|
|
|
|
seen += len(chunk)
|
2016-03-11 17:28:27 +03:00
|
|
|
repo.ui.progress(_('bundle'), seen, total=bytecount,
|
|
|
|
unit=_('bytes'))
|
streamclone: support for producing and consuming stream clone bundles
Up to this point, stream clones only existed as a dynamically generated
data format produced and consumed during streaming clones. In order to
support this efficient cloning format with the clone bundles feature, we
need a more formal, on disk representation of the streaming clone data.
This patch introduces a new "bundle" type for streaming clones. Unlike
existing bundles, it does not contain changegroup data. It does,
however, share the same concepts like the 4 byte header which identifies
the type of data that follows and the 2 byte abbreviation for
compression types (of which only "UN" is currently supported).
The new bundle format is essentially the existing stream clone version 1
data format with some headers at the beginning.
Content negotiation at stream clone request time checked for repository
format/requirements compatibility before initiating a stream clone. We
can't do active content negotiation when using clone bundles. So, we put
this set of requirements inside the payload so consumers have a built-in
mechanism for checking compatibility before reading and applying lots of
data. Of course, we will also advertise this requirements set in clone
bundles. But that's for another patch.
We currently don't have a mechanism to produce and consume this new
bundle format. This will be implemented in upcoming patches.
It's worth noting that if a legacy client attempts to `hg unbundle` a
stream clone bundle (with the "HGS1" header), it will abort with:
"unknown bundle version S1," which seems appropriate.
2015-10-17 21:14:52 +03:00
|
|
|
yield chunk
|
|
|
|
|
|
|
|
repo.ui.progress(_('bundle'), None)
|
|
|
|
|
|
|
|
return requirements, gen()
|
|
|
|
|
2015-10-05 04:44:46 +03:00
|
|
|
def consumev1(repo, fp, filecount, bytecount):
|
2015-10-03 02:05:52 +03:00
|
|
|
"""Apply the contents from version 1 of a streaming clone file handle.
|
|
|
|
|
2016-10-18 00:16:55 +03:00
|
|
|
This takes the output from "stream_out" and applies it to the specified
|
2015-10-03 02:05:52 +03:00
|
|
|
repository.
|
|
|
|
|
2016-10-18 00:16:55 +03:00
|
|
|
Like "stream_out," the status line added by the wire protocol is not
|
|
|
|
handled by this function.
|
2015-10-03 02:05:52 +03:00
|
|
|
"""
|
2016-01-16 00:14:49 +03:00
|
|
|
with repo.lock():
|
2015-10-03 02:05:52 +03:00
|
|
|
repo.ui.status(_('%d files to transfer, %s of data\n') %
|
2015-10-05 04:44:46 +03:00
|
|
|
(filecount, util.bytecount(bytecount)))
|
2015-10-03 02:05:52 +03:00
|
|
|
handled_bytes = 0
|
2016-03-11 17:28:27 +03:00
|
|
|
repo.ui.progress(_('clone'), 0, total=bytecount, unit=_('bytes'))
|
2017-02-16 00:17:39 +03:00
|
|
|
start = util.timer()
|
2015-10-03 02:05:52 +03:00
|
|
|
|
2016-09-11 21:06:29 +03:00
|
|
|
# TODO: get rid of (potential) inconsistency
|
|
|
|
#
|
|
|
|
# If transaction is started and any @filecache property is
|
|
|
|
# changed at this point, it causes inconsistency between
|
|
|
|
# in-memory cached property and streamclone-ed file on the
|
|
|
|
# disk. Nested transaction prevents transaction scope "clone"
|
|
|
|
# below from writing in-memory changes out at the end of it,
|
|
|
|
# even though in-memory changes are discarded at the end of it
|
|
|
|
# regardless of transaction nesting.
|
|
|
|
#
|
|
|
|
# But transaction nesting can't be simply prohibited, because
|
|
|
|
# nesting occurs also in ordinary case (e.g. enabling
|
|
|
|
# clonebundles).
|
|
|
|
|
2016-01-16 00:14:49 +03:00
|
|
|
with repo.transaction('clone'):
|
2016-01-15 00:44:01 +03:00
|
|
|
with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
|
2016-01-03 03:11:36 +03:00
|
|
|
for i in xrange(filecount):
|
|
|
|
# XXX doesn't support '\n' or '\r' in filenames
|
|
|
|
l = fp.readline()
|
|
|
|
try:
|
|
|
|
name, size = l.split('\0', 1)
|
|
|
|
size = int(size)
|
|
|
|
except (ValueError, TypeError):
|
|
|
|
raise error.ResponseError(
|
|
|
|
_('unexpected response from remote server:'), l)
|
|
|
|
if repo.ui.debugflag:
|
|
|
|
repo.ui.debug('adding %s (%s)\n' %
|
|
|
|
(name, util.bytecount(size)))
|
|
|
|
# for backwards compat, name was partially encoded
|
2016-01-15 00:44:01 +03:00
|
|
|
path = store.decodedir(name)
|
|
|
|
with repo.svfs(path, 'w', backgroundclose=True) as ofp:
|
2016-01-03 03:11:36 +03:00
|
|
|
for chunk in util.filechunkiter(fp, limit=size):
|
|
|
|
handled_bytes += len(chunk)
|
|
|
|
repo.ui.progress(_('clone'), handled_bytes,
|
2016-03-11 17:28:27 +03:00
|
|
|
total=bytecount, unit=_('bytes'))
|
2016-01-03 03:11:36 +03:00
|
|
|
ofp.write(chunk)
|
2015-10-03 02:05:52 +03:00
|
|
|
|
2016-09-11 21:06:29 +03:00
|
|
|
# force @filecache properties to be reloaded from
|
|
|
|
# streamclone-ed file at next access
|
|
|
|
repo.invalidate(clearfilecache=True)
|
2015-10-03 02:05:52 +03:00
|
|
|
|
2017-02-16 00:17:39 +03:00
|
|
|
elapsed = util.timer() - start
|
2015-10-03 02:05:52 +03:00
|
|
|
if elapsed <= 0:
|
|
|
|
elapsed = 0.001
|
|
|
|
repo.ui.progress(_('clone'), None)
|
|
|
|
repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
|
2015-10-05 04:44:46 +03:00
|
|
|
(util.bytecount(bytecount), elapsed,
|
|
|
|
util.bytecount(bytecount / elapsed)))
|
streamclone: support for producing and consuming stream clone bundles
Up to this point, stream clones only existed as a dynamically generated
data format produced and consumed during streaming clones. In order to
support this efficient cloning format with the clone bundles feature, we
need a more formal, on disk representation of the streaming clone data.
This patch introduces a new "bundle" type for streaming clones. Unlike
existing bundles, it does not contain changegroup data. It does,
however, share the same concepts like the 4 byte header which identifies
the type of data that follows and the 2 byte abbreviation for
compression types (of which only "UN" is currently supported).
The new bundle format is essentially the existing stream clone version 1
data format with some headers at the beginning.
Content negotiation at stream clone request time checked for repository
format/requirements compatibility before initiating a stream clone. We
can't do active content negotiation when using clone bundles. So, we put
this set of requirements inside the payload so consumers have a built-in
mechanism for checking compatibility before reading and applying lots of
data. Of course, we will also advertise this requirements set in clone
bundles. But that's for another patch.
We currently don't have a mechanism to produce and consume this new
bundle format. This will be implemented in upcoming patches.
It's worth noting that if a legacy client attempts to `hg unbundle` a
stream clone bundle (with the "HGS1" header), it will abort with:
"unknown bundle version S1," which seems appropriate.
2015-10-17 21:14:52 +03:00
|
|
|
|
2016-01-15 09:48:54 +03:00
|
|
|
def readbundle1header(fp):
|
streamclone: support for producing and consuming stream clone bundles
Up to this point, stream clones only existed as a dynamically generated
data format produced and consumed during streaming clones. In order to
support this efficient cloning format with the clone bundles feature, we
need a more formal, on disk representation of the streaming clone data.
This patch introduces a new "bundle" type for streaming clones. Unlike
existing bundles, it does not contain changegroup data. It does,
however, share the same concepts like the 4 byte header which identifies
the type of data that follows and the 2 byte abbreviation for
compression types (of which only "UN" is currently supported).
The new bundle format is essentially the existing stream clone version 1
data format with some headers at the beginning.
Content negotiation at stream clone request time checked for repository
format/requirements compatibility before initiating a stream clone. We
can't do active content negotiation when using clone bundles. So, we put
this set of requirements inside the payload so consumers have a built-in
mechanism for checking compatibility before reading and applying lots of
data. Of course, we will also advertise this requirements set in clone
bundles. But that's for another patch.
We currently don't have a mechanism to produce and consume this new
bundle format. This will be implemented in upcoming patches.
It's worth noting that if a legacy client attempts to `hg unbundle` a
stream clone bundle (with the "HGS1" header), it will abort with:
"unknown bundle version S1," which seems appropriate.
2015-10-17 21:14:52 +03:00
|
|
|
compression = fp.read(2)
|
|
|
|
if compression != 'UN':
|
|
|
|
raise error.Abort(_('only uncompressed stream clone bundles are '
|
|
|
|
'supported; got %s') % compression)
|
|
|
|
|
|
|
|
filecount, bytecount = struct.unpack('>QQ', fp.read(16))
|
|
|
|
requireslen = struct.unpack('>H', fp.read(2))[0]
|
|
|
|
requires = fp.read(requireslen)
|
|
|
|
|
|
|
|
if not requires.endswith('\0'):
|
|
|
|
raise error.Abort(_('malformed stream clone bundle: '
|
|
|
|
'requirements not properly encoded'))
|
|
|
|
|
|
|
|
requirements = set(requires.rstrip('\0').split(','))
|
2016-01-15 09:48:54 +03:00
|
|
|
|
|
|
|
return filecount, bytecount, requirements
|
|
|
|
|
|
|
|
def applybundlev1(repo, fp):
|
|
|
|
"""Apply the content from a stream clone bundle version 1.
|
|
|
|
|
|
|
|
We assume the 4 byte header has been read and validated and the file handle
|
|
|
|
is at the 2 byte compression identifier.
|
|
|
|
"""
|
|
|
|
if len(repo):
|
|
|
|
raise error.Abort(_('cannot apply stream clone bundle on non-empty '
|
|
|
|
'repo'))
|
|
|
|
|
|
|
|
filecount, bytecount, requirements = readbundle1header(fp)
|
streamclone: support for producing and consuming stream clone bundles
Up to this point, stream clones only existed as a dynamically generated
data format produced and consumed during streaming clones. In order to
support this efficient cloning format with the clone bundles feature, we
need a more formal, on disk representation of the streaming clone data.
This patch introduces a new "bundle" type for streaming clones. Unlike
existing bundles, it does not contain changegroup data. It does,
however, share the same concepts like the 4 byte header which identifies
the type of data that follows and the 2 byte abbreviation for
compression types (of which only "UN" is currently supported).
The new bundle format is essentially the existing stream clone version 1
data format with some headers at the beginning.
Content negotiation at stream clone request time checked for repository
format/requirements compatibility before initiating a stream clone. We
can't do active content negotiation when using clone bundles. So, we put
this set of requirements inside the payload so consumers have a built-in
mechanism for checking compatibility before reading and applying lots of
data. Of course, we will also advertise this requirements set in clone
bundles. But that's for another patch.
We currently don't have a mechanism to produce and consume this new
bundle format. This will be implemented in upcoming patches.
It's worth noting that if a legacy client attempts to `hg unbundle` a
stream clone bundle (with the "HGS1" header), it will abort with:
"unknown bundle version S1," which seems appropriate.
2015-10-17 21:14:52 +03:00
|
|
|
missingreqs = requirements - repo.supportedformats
|
|
|
|
if missingreqs:
|
|
|
|
raise error.Abort(_('unable to apply stream clone: '
|
|
|
|
'unsupported format: %s') %
|
|
|
|
', '.join(sorted(missingreqs)))
|
|
|
|
|
|
|
|
consumev1(repo, fp, filecount, bytecount)
|
|
|
|
|
|
|
|
class streamcloneapplier(object):
|
|
|
|
"""Class to manage applying streaming clone bundles.
|
|
|
|
|
|
|
|
We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle
|
|
|
|
readers to perform bundle type-specific functionality.
|
|
|
|
"""
|
|
|
|
def __init__(self, fh):
|
|
|
|
self._fh = fh
|
|
|
|
|
|
|
|
def apply(self, repo):
|
|
|
|
return applybundlev1(repo, self._fh)
|