2010-07-15 00:25:15 +04:00
|
|
|
# wireproto.py - generic wire protocol support functions
|
|
|
|
#
|
|
|
|
# Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
|
|
|
|
#
|
|
|
|
# This software may be used and distributed according to the terms of the
|
|
|
|
# GNU General Public License version 2 or any later version.
|
|
|
|
|
2015-08-09 04:53:17 +03:00
|
|
|
from __future__ import absolute_import
|
2010-07-15 00:25:15 +04:00
|
|
|
|
2016-06-10 07:12:33 +03:00
|
|
|
import hashlib
|
2015-08-09 04:53:17 +03:00
|
|
|
import os
|
|
|
|
import tempfile
|
|
|
|
|
|
|
|
from .i18n import _
|
|
|
|
from .node import (
|
|
|
|
bin,
|
|
|
|
hex,
|
2017-05-11 20:50:05 +03:00
|
|
|
nullid,
|
2015-08-09 04:53:17 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
from . import (
|
|
|
|
bundle2,
|
|
|
|
changegroup as changegroupmod,
|
2017-09-11 04:43:59 +03:00
|
|
|
discovery,
|
2015-08-09 04:53:17 +03:00
|
|
|
encoding,
|
|
|
|
error,
|
|
|
|
exchange,
|
|
|
|
peer,
|
|
|
|
pushkey as pushkeymod,
|
2017-02-13 17:36:38 +03:00
|
|
|
pycompat,
|
2017-08-11 06:58:28 +03:00
|
|
|
repository,
|
2015-10-03 02:05:52 +03:00
|
|
|
streamclone,
|
2015-08-09 04:53:17 +03:00
|
|
|
util,
|
|
|
|
)
|
2014-03-28 22:10:33 +04:00
|
|
|
|
2016-04-07 02:22:12 +03:00
|
|
|
urlerr = util.urlerr
|
|
|
|
urlreq = util.urlreq
|
|
|
|
|
2017-02-10 19:56:59 +03:00
|
|
|
bundle2requiredmain = _('incompatible Mercurial client; bundle2 required')
|
|
|
|
bundle2requiredhint = _('see https://www.mercurial-scm.org/wiki/'
|
|
|
|
'IncompatibleClient')
|
|
|
|
bundle2required = '%s\n(%s)\n' % (bundle2requiredmain, bundle2requiredhint)
|
2015-12-05 02:12:11 +03:00
|
|
|
|
2014-03-28 22:10:33 +04:00
|
|
|
class abstractserverproto(object):
|
|
|
|
"""abstract class that summarizes the protocol API
|
|
|
|
|
|
|
|
Used as reference and documentation.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def getargs(self, args):
|
|
|
|
"""return the value for arguments in <args>
|
|
|
|
|
|
|
|
returns a list of values (same order as <args>)"""
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
|
|
|
def getfile(self, fp):
|
|
|
|
"""write the whole content of a file into a file like object
|
|
|
|
|
|
|
|
The file is in the form::
|
|
|
|
|
|
|
|
(<chunk-size>\n<chunk>)+0\n
|
|
|
|
|
|
|
|
chunk size is the ascii version of the int.
|
|
|
|
"""
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
|
|
|
def redirect(self):
|
|
|
|
"""may setup interception for stdout and stderr
|
|
|
|
|
|
|
|
See also the `restore` method."""
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
|
|
|
# If the `redirect` function does install interception, the `restore`
|
|
|
|
# function MUST be defined. If interception is not used, this function
|
|
|
|
# MUST NOT be defined.
|
|
|
|
#
|
|
|
|
# left commented here on purpose
|
|
|
|
#
|
|
|
|
#def restore(self):
|
|
|
|
# """reinstall previous stdout and stderr and return intercepted stdout
|
|
|
|
# """
|
|
|
|
# raise NotImplementedError()
|
|
|
|
|
2016-03-02 02:39:25 +03:00
|
|
|
class remoteiterbatcher(peer.iterbatcher):
|
|
|
|
def __init__(self, remote):
|
|
|
|
super(remoteiterbatcher, self).__init__()
|
|
|
|
self._remote = remote
|
|
|
|
|
2016-03-02 02:41:43 +03:00
|
|
|
def __getattr__(self, name):
|
2017-08-10 07:51:45 +03:00
|
|
|
# Validate this method is batchable, since submit() only supports
|
|
|
|
# batchable methods.
|
|
|
|
fn = getattr(self._remote, name)
|
|
|
|
if not getattr(fn, 'batchable', None):
|
|
|
|
raise error.ProgrammingError('Attempted to batch a non-batchable '
|
|
|
|
'call to %r' % name)
|
|
|
|
|
2016-03-02 02:41:43 +03:00
|
|
|
return super(remoteiterbatcher, self).__getattr__(name)
|
|
|
|
|
2016-03-02 02:39:25 +03:00
|
|
|
def submit(self):
|
|
|
|
"""Break the batch request into many patch calls and pipeline them.
|
|
|
|
|
|
|
|
This is mostly valuable over http where request sizes can be
|
|
|
|
limited, but can be used in other places as well.
|
|
|
|
"""
|
2017-08-10 09:29:30 +03:00
|
|
|
# 2-tuple of (command, arguments) that represents what will be
|
|
|
|
# sent over the wire.
|
|
|
|
requests = []
|
|
|
|
|
|
|
|
# 4-tuple of (command, final future, @batchable generator, remote
|
|
|
|
# future).
|
|
|
|
results = []
|
|
|
|
|
|
|
|
for command, args, opts, finalfuture in self.calls:
|
|
|
|
mtd = getattr(self._remote, command)
|
2016-03-02 02:41:43 +03:00
|
|
|
batchable = mtd.batchable(mtd.im_self, *args, **opts)
|
2017-08-10 09:29:30 +03:00
|
|
|
|
|
|
|
commandargs, fremote = next(batchable)
|
|
|
|
assert fremote
|
|
|
|
requests.append((command, commandargs))
|
|
|
|
results.append((command, finalfuture, batchable, fremote))
|
|
|
|
|
|
|
|
if requests:
|
|
|
|
self._resultiter = self._remote._submitbatch(requests)
|
|
|
|
|
|
|
|
self._results = results
|
2016-03-02 02:39:25 +03:00
|
|
|
|
|
|
|
def results(self):
|
2017-08-10 09:29:30 +03:00
|
|
|
for command, finalfuture, batchable, remotefuture in self._results:
|
|
|
|
# Get the raw result, set it in the remote future, feed it
|
|
|
|
# back into the @batchable generator so it can be decoded, and
|
|
|
|
# set the result on the final future to this value.
|
|
|
|
remoteresult = next(self._resultiter)
|
|
|
|
remotefuture.set(remoteresult)
|
|
|
|
finalfuture.set(next(batchable))
|
|
|
|
|
|
|
|
# Verify our @batchable generators only emit 2 values.
|
|
|
|
try:
|
|
|
|
next(batchable)
|
|
|
|
except StopIteration:
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
raise error.ProgrammingError('%s @batchable generator emitted '
|
|
|
|
'unexpected value count' % command)
|
|
|
|
|
|
|
|
yield finalfuture.value
|
2016-03-02 02:39:25 +03:00
|
|
|
|
batching: migrate basic noop batching into peer.peer
"Real" batching only makes sense for wirepeers, but it greatly
simplifies the clients of peer instances if they can be ignorant to
actual batching capabilities of that peer. By moving the
not-really-batched batching code into peer.peer, all peer instances
now work with the batching API, thus simplifying users.
This leaves a couple of name forwards in wirepeer.py. Originally I had
planned to clean those up, but it kind of unclarifies other bits of
code that want to use batching, so I think it makes sense for the
names to stay exposed by wireproto. Specifically, almost nothing is
currently aware of peer (see largefiles.proto for an example), so
making them be aware of the peer module *and* the wireproto module
seems like some abstraction leakage. I *think* the right long-term fix
would actually be to make wireproto an implementation detail that
clients wouldn't need to know about, but I don't really know what that
would entail at the moment.
As far as I'm aware, no clients of batching in third-party extensions
will need updating, which is nice icing.
2015-08-05 21:51:34 +03:00
|
|
|
# Forward a couple of names from peer to make wireproto interactions
|
|
|
|
# slightly more sensible.
|
|
|
|
batchable = peer.batchable
|
|
|
|
future = peer.future
|
2011-06-15 00:51:26 +04:00
|
|
|
|
2010-07-16 02:52:13 +04:00
|
|
|
# list of nodes encoding / decoding
|
|
|
|
|
|
|
|
def decodelist(l, sep=' '):
|
2011-03-22 09:40:02 +03:00
|
|
|
if l:
|
|
|
|
return map(bin, l.split(sep))
|
|
|
|
return []
|
2010-07-16 02:52:13 +04:00
|
|
|
|
|
|
|
def encodelist(l, sep=' '):
|
2015-01-07 11:07:29 +03:00
|
|
|
try:
|
|
|
|
return sep.join(map(hex, l))
|
|
|
|
except TypeError:
|
|
|
|
raise
|
2010-07-16 02:52:13 +04:00
|
|
|
|
2011-06-15 00:52:58 +04:00
|
|
|
# batched call argument encoding
|
|
|
|
|
|
|
|
def escapearg(plain):
|
|
|
|
return (plain
|
wireproto: correctly escape batched args and responses (issue4739)
This issue appears to be as old as wireproto batching itself: I can
reproduce the failure as far back as 6afda0a50a20 trivially by
rebasing the test changes in this patch, which was back in the 1.9
era. I didn't test before that change, because prior to that the
testfile has a different name and I'm lazy.
Note that the test thought it was checking this case, but it actually
wasn't: it put a literal ; in the arg and response for its greet
command, but the mangle/unmangle step defined in the test meant that
instead of "Fo, =;o" going over the wire, "Gp-!><p" went instead,
which doesn't contain any special characters (those being [.=;]) and
thus not exercising the escaping. The test has been updated to use
pre-unmangled special characters, so the request is now "Fo+<:o",
which mangles to "Gp,=;p". I have confirmed that the test fails
without the adjustment to the escaping rules in wireproto.py.
No existing clients of RPC batching were depending on the old behavior
in any way. The only *actual* users of batchable RPCs in core were:
1) largefiles, wherein it batches up many statlfile calls. It sends
hexlified hashes over the wire and gets a 0, 1, or 2 back as a
response. No risk of special characters.
2) setdiscovery, which was using heads() and known(), both of which
communicate via hexlified nodes. Again, no risk of special characters.
Since the escaping functionality has been completely broken since it
was introduced, we know that it has no users. As such, we can change
the escaping mechanism without having to worry about backwards
compatibility issues.
For the curious, this was detected by chance: it happens that the
lz4-compressed text of a test file for remotefilelog compressed to
something containing a ;, which then caused the failure when I moved
remotefilelog to using batching for file content fetching.
2015-07-01 02:19:17 +03:00
|
|
|
.replace(':', ':c')
|
|
|
|
.replace(',', ':o')
|
|
|
|
.replace(';', ':s')
|
|
|
|
.replace('=', ':e'))
|
2011-06-15 00:52:58 +04:00
|
|
|
|
|
|
|
def unescapearg(escaped):
|
|
|
|
return (escaped
|
wireproto: correctly escape batched args and responses (issue4739)
This issue appears to be as old as wireproto batching itself: I can
reproduce the failure as far back as 6afda0a50a20 trivially by
rebasing the test changes in this patch, which was back in the 1.9
era. I didn't test before that change, because prior to that the
testfile has a different name and I'm lazy.
Note that the test thought it was checking this case, but it actually
wasn't: it put a literal ; in the arg and response for its greet
command, but the mangle/unmangle step defined in the test meant that
instead of "Fo, =;o" going over the wire, "Gp-!><p" went instead,
which doesn't contain any special characters (those being [.=;]) and
thus not exercising the escaping. The test has been updated to use
pre-unmangled special characters, so the request is now "Fo+<:o",
which mangles to "Gp,=;p". I have confirmed that the test fails
without the adjustment to the escaping rules in wireproto.py.
No existing clients of RPC batching were depending on the old behavior
in any way. The only *actual* users of batchable RPCs in core were:
1) largefiles, wherein it batches up many statlfile calls. It sends
hexlified hashes over the wire and gets a 0, 1, or 2 back as a
response. No risk of special characters.
2) setdiscovery, which was using heads() and known(), both of which
communicate via hexlified nodes. Again, no risk of special characters.
Since the escaping functionality has been completely broken since it
was introduced, we know that it has no users. As such, we can change
the escaping mechanism without having to worry about backwards
compatibility issues.
For the curious, this was detected by chance: it happens that the
lz4-compressed text of a test file for remotefilelog compressed to
something containing a ;, which then caused the failure when I moved
remotefilelog to using batching for file content fetching.
2015-07-01 02:19:17 +03:00
|
|
|
.replace(':e', '=')
|
|
|
|
.replace(':s', ';')
|
|
|
|
.replace(':o', ',')
|
|
|
|
.replace(':c', ':'))
|
2011-06-15 00:52:58 +04:00
|
|
|
|
2016-08-06 23:46:28 +03:00
|
|
|
def encodebatchcmds(req):
|
|
|
|
"""Return a ``cmds`` argument value for the ``batch`` command."""
|
|
|
|
cmds = []
|
|
|
|
for op, argsdict in req:
|
2016-08-06 23:55:21 +03:00
|
|
|
# Old servers didn't properly unescape argument names. So prevent
|
|
|
|
# the sending of argument names that may not be decoded properly by
|
|
|
|
# servers.
|
|
|
|
assert all(escapearg(k) == k for k in argsdict)
|
|
|
|
|
2016-08-06 23:46:28 +03:00
|
|
|
args = ','.join('%s=%s' % (escapearg(k), escapearg(v))
|
|
|
|
for k, v in argsdict.iteritems())
|
|
|
|
cmds.append('%s %s' % (op, args))
|
|
|
|
|
|
|
|
return ';'.join(cmds)
|
|
|
|
|
2014-05-22 20:53:52 +04:00
|
|
|
# mapping of options accepted by getbundle and their types
|
|
|
|
#
|
|
|
|
# Meant to be extended by extensions. It is extensions responsibility to ensure
|
|
|
|
# such options are properly processed in exchange.getbundle.
|
|
|
|
#
|
|
|
|
# supported types are:
|
|
|
|
#
|
|
|
|
# :nodes: list of binary nodes
|
|
|
|
# :csv: list of comma-separated values
|
2015-06-01 20:28:40 +03:00
|
|
|
# :scsv: list of comma-separated values return as set
|
2014-05-22 20:53:52 +04:00
|
|
|
# :plain: string with no transformation needed.
|
|
|
|
gboptsmap = {'heads': 'nodes',
|
|
|
|
'common': 'nodes',
|
2014-08-29 14:36:17 +04:00
|
|
|
'obsmarkers': 'boolean',
|
2015-06-01 20:28:40 +03:00
|
|
|
'bundlecaps': 'scsv',
|
2014-06-01 03:48:29 +04:00
|
|
|
'listkeys': 'csv',
|
2015-10-14 20:36:20 +03:00
|
|
|
'cg': 'boolean',
|
|
|
|
'cbattempted': 'boolean'}
|
2014-05-22 20:53:52 +04:00
|
|
|
|
2010-07-15 01:34:57 +04:00
|
|
|
# client side
|
|
|
|
|
2017-08-11 06:58:28 +03:00
|
|
|
class wirepeer(repository.legacypeer):
|
2015-12-05 00:15:14 +03:00
|
|
|
"""Client-side interface for communicating with a peer repository.
|
2011-06-15 00:52:58 +04:00
|
|
|
|
2015-12-05 00:15:14 +03:00
|
|
|
Methods commonly call wire protocol commands of the same name.
|
|
|
|
|
|
|
|
See also httppeer.py and sshpeer.py for protocol-specific
|
|
|
|
implementations of this interface.
|
|
|
|
"""
|
2017-08-11 06:58:28 +03:00
|
|
|
# Begin of basewirepeer interface.
|
2011-06-15 00:52:58 +04:00
|
|
|
|
2016-03-02 02:39:25 +03:00
|
|
|
def iterbatch(self):
|
|
|
|
return remoteiterbatcher(self)
|
|
|
|
|
2011-06-15 00:56:20 +04:00
|
|
|
@batchable
|
2010-07-15 01:34:57 +04:00
|
|
|
def lookup(self, key):
|
|
|
|
self.requirecap('lookup', _('look up remote revision'))
|
2011-06-15 00:56:20 +04:00
|
|
|
f = future()
|
2014-03-12 21:00:51 +04:00
|
|
|
yield {'key': encoding.fromlocal(key)}, f
|
2011-06-15 00:56:20 +04:00
|
|
|
d = f.value
|
2010-07-15 01:34:57 +04:00
|
|
|
success, data = d[:-1].split(" ", 1)
|
|
|
|
if int(success):
|
2011-06-15 00:56:20 +04:00
|
|
|
yield bin(data)
|
2017-09-02 00:00:13 +03:00
|
|
|
else:
|
|
|
|
self._abort(error.RepoError(data))
|
2010-07-15 01:34:57 +04:00
|
|
|
|
2011-06-15 00:56:20 +04:00
|
|
|
@batchable
|
2010-07-15 01:34:57 +04:00
|
|
|
def heads(self):
|
2011-06-15 00:56:20 +04:00
|
|
|
f = future()
|
|
|
|
yield {}, f
|
|
|
|
d = f.value
|
2010-07-15 01:34:57 +04:00
|
|
|
try:
|
2011-06-15 00:56:20 +04:00
|
|
|
yield decodelist(d[:-1])
|
2011-03-23 05:26:19 +03:00
|
|
|
except ValueError:
|
2010-08-15 13:05:04 +04:00
|
|
|
self._abort(error.ResponseError(_("unexpected response:"), d))
|
2010-07-15 01:34:57 +04:00
|
|
|
|
2011-06-15 00:56:20 +04:00
|
|
|
@batchable
|
2011-03-22 11:22:21 +03:00
|
|
|
def known(self, nodes):
|
2011-06-15 00:56:20 +04:00
|
|
|
f = future()
|
2014-03-12 21:00:51 +04:00
|
|
|
yield {'nodes': encodelist(nodes)}, f
|
2011-06-15 00:56:20 +04:00
|
|
|
d = f.value
|
2011-03-22 11:22:21 +03:00
|
|
|
try:
|
2014-08-15 06:37:46 +04:00
|
|
|
yield [bool(int(b)) for b in d]
|
2011-03-23 05:26:19 +03:00
|
|
|
except ValueError:
|
2011-03-22 11:22:21 +03:00
|
|
|
self._abort(error.ResponseError(_("unexpected response:"), d))
|
|
|
|
|
2011-06-15 00:56:20 +04:00
|
|
|
@batchable
|
2010-07-15 01:34:57 +04:00
|
|
|
def branchmap(self):
|
2011-06-15 00:56:20 +04:00
|
|
|
f = future()
|
|
|
|
yield {}, f
|
|
|
|
d = f.value
|
2010-07-15 01:34:57 +04:00
|
|
|
try:
|
|
|
|
branchmap = {}
|
|
|
|
for branchpart in d.splitlines():
|
2010-07-16 02:52:13 +04:00
|
|
|
branchname, branchheads = branchpart.split(' ', 1)
|
2016-04-07 02:22:12 +03:00
|
|
|
branchname = encoding.tolocal(urlreq.unquote(branchname))
|
2010-07-16 02:52:13 +04:00
|
|
|
branchheads = decodelist(branchheads)
|
2010-07-15 01:34:57 +04:00
|
|
|
branchmap[branchname] = branchheads
|
2011-06-15 00:56:20 +04:00
|
|
|
yield branchmap
|
2010-07-15 01:34:57 +04:00
|
|
|
except TypeError:
|
|
|
|
self._abort(error.ResponseError(_("unexpected response:"), d))
|
|
|
|
|
2017-08-11 06:58:28 +03:00
|
|
|
@batchable
|
|
|
|
def listkeys(self, namespace):
|
|
|
|
if not self.capable('pushkey'):
|
|
|
|
yield {}, None
|
|
|
|
f = future()
|
|
|
|
self.ui.debug('preparing listkeys for "%s"\n' % namespace)
|
|
|
|
yield {'namespace': encoding.fromlocal(namespace)}, f
|
|
|
|
d = f.value
|
|
|
|
self.ui.debug('received listkey for "%s": %i bytes\n'
|
|
|
|
% (namespace, len(d)))
|
|
|
|
yield pushkeymod.decodekeys(d)
|
2010-07-15 01:34:57 +04:00
|
|
|
|
2011-06-15 00:56:20 +04:00
|
|
|
@batchable
|
2010-07-15 01:34:57 +04:00
|
|
|
def pushkey(self, namespace, key, old, new):
|
|
|
|
if not self.capable('pushkey'):
|
2011-06-15 00:56:20 +04:00
|
|
|
yield False, None
|
|
|
|
f = future()
|
2012-07-28 14:28:35 +04:00
|
|
|
self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
|
2014-03-12 21:00:51 +04:00
|
|
|
yield {'namespace': encoding.fromlocal(namespace),
|
|
|
|
'key': encoding.fromlocal(key),
|
|
|
|
'old': encoding.fromlocal(old),
|
|
|
|
'new': encoding.fromlocal(new)}, f
|
2011-06-15 00:56:20 +04:00
|
|
|
d = f.value
|
2011-12-12 18:16:58 +04:00
|
|
|
d, output = d.split('\n', 1)
|
2011-02-21 02:37:55 +03:00
|
|
|
try:
|
|
|
|
d = bool(int(d))
|
|
|
|
except ValueError:
|
|
|
|
raise error.ResponseError(
|
|
|
|
_('push failed (unexpected response):'), d)
|
2011-12-12 18:16:58 +04:00
|
|
|
for l in output.splitlines(True):
|
|
|
|
self.ui.status(_('remote: '), l)
|
2011-06-15 00:56:20 +04:00
|
|
|
yield d
|
2010-07-15 01:34:57 +04:00
|
|
|
|
2010-07-15 01:55:44 +04:00
|
|
|
def stream_out(self):
|
|
|
|
return self._callstream('stream_out')
|
|
|
|
|
2014-05-22 20:53:52 +04:00
|
|
|
def getbundle(self, source, **kwargs):
|
2011-03-23 18:02:11 +03:00
|
|
|
self.requirecap('getbundle', _('look up remote changes'))
|
|
|
|
opts = {}
|
2015-05-10 15:11:13 +03:00
|
|
|
bundlecaps = kwargs.get('bundlecaps')
|
|
|
|
if bundlecaps is not None:
|
|
|
|
kwargs['bundlecaps'] = sorted(bundlecaps)
|
|
|
|
else:
|
|
|
|
bundlecaps = () # kwargs could have it to None
|
2014-05-22 20:53:52 +04:00
|
|
|
for key, value in kwargs.iteritems():
|
|
|
|
if value is None:
|
|
|
|
continue
|
|
|
|
keytype = gboptsmap.get(key)
|
|
|
|
if keytype is None:
|
|
|
|
assert False, 'unexpected'
|
|
|
|
elif keytype == 'nodes':
|
|
|
|
value = encodelist(value)
|
2015-06-01 20:28:40 +03:00
|
|
|
elif keytype in ('csv', 'scsv'):
|
2014-05-22 20:53:52 +04:00
|
|
|
value = ','.join(value)
|
2014-05-23 04:20:52 +04:00
|
|
|
elif keytype == 'boolean':
|
2014-08-29 14:51:00 +04:00
|
|
|
value = '%i' % bool(value)
|
2014-05-22 20:53:52 +04:00
|
|
|
elif keytype != 'plain':
|
|
|
|
raise KeyError('unknown getbundle option type %s'
|
|
|
|
% keytype)
|
|
|
|
opts[key] = value
|
2014-03-29 01:24:13 +04:00
|
|
|
f = self._callcompressable("getbundle", **opts)
|
2015-05-16 21:30:07 +03:00
|
|
|
if any((cap.startswith('HG2') for cap in bundlecaps)):
|
2015-04-07 02:04:33 +03:00
|
|
|
return bundle2.getunbundler(self.ui, f)
|
2014-04-15 23:20:33 +04:00
|
|
|
else:
|
2014-09-02 14:11:36 +04:00
|
|
|
return changegroupmod.cg1unpacker(f, 'UN')
|
2011-03-23 18:02:11 +03:00
|
|
|
|
2016-08-05 23:34:30 +03:00
|
|
|
def unbundle(self, cg, heads, url):
|
2010-07-15 02:12:18 +04:00
|
|
|
'''Send cg (a readable file-like object representing the
|
|
|
|
changegroup to push, typically a chunkbuffer object) to the
|
2014-04-15 19:53:10 +04:00
|
|
|
remote server as a bundle.
|
|
|
|
|
|
|
|
When pushing a bundle10 stream, return an integer indicating the
|
2017-06-16 19:37:22 +03:00
|
|
|
result of the push (see changegroup.apply()).
|
2014-04-15 19:53:10 +04:00
|
|
|
|
2016-08-05 23:34:30 +03:00
|
|
|
When pushing a bundle20 stream, return a bundle20 stream.
|
|
|
|
|
|
|
|
`url` is the url the client thinks it's pushing to, which is
|
|
|
|
visible to hooks.
|
|
|
|
'''
|
2010-07-15 02:12:18 +04:00
|
|
|
|
2011-05-24 18:12:01 +04:00
|
|
|
if heads != ['force'] and self.capable('unbundlehash'):
|
2011-04-15 20:05:56 +04:00
|
|
|
heads = encodelist(['hashed',
|
2016-06-10 07:12:33 +03:00
|
|
|
hashlib.sha1(''.join(sorted(heads))).digest()])
|
2011-04-15 20:05:56 +04:00
|
|
|
else:
|
|
|
|
heads = encodelist(heads)
|
|
|
|
|
2014-04-15 19:53:10 +04:00
|
|
|
if util.safehasattr(cg, 'deltaheader'):
|
|
|
|
# this a bundle10, do the old style call sequence
|
|
|
|
ret, output = self._callpush("unbundle", cg, heads=heads)
|
|
|
|
if ret == "":
|
|
|
|
raise error.ResponseError(
|
|
|
|
_('push failed:'), output)
|
|
|
|
try:
|
|
|
|
ret = int(ret)
|
|
|
|
except ValueError:
|
|
|
|
raise error.ResponseError(
|
|
|
|
_('push failed (unexpected response):'), ret)
|
2010-07-15 02:12:18 +04:00
|
|
|
|
2014-04-15 19:53:10 +04:00
|
|
|
for l in output.splitlines(True):
|
|
|
|
self.ui.status(_('remote: '), l)
|
|
|
|
else:
|
|
|
|
# bundle2 push. Send a stream, fetch a stream.
|
|
|
|
stream = self._calltwowaystream('unbundle', cg, heads=heads)
|
2015-04-07 02:04:33 +03:00
|
|
|
ret = bundle2.getunbundler(self.ui, stream)
|
2010-07-15 02:12:18 +04:00
|
|
|
return ret
|
|
|
|
|
2017-08-11 06:58:28 +03:00
|
|
|
# End of basewirepeer interface.
|
|
|
|
|
|
|
|
# Begin of baselegacywirepeer interface.
|
|
|
|
|
|
|
|
def branches(self, nodes):
|
|
|
|
n = encodelist(nodes)
|
|
|
|
d = self._call("branches", nodes=n)
|
|
|
|
try:
|
|
|
|
br = [tuple(decodelist(b)) for b in d.splitlines()]
|
|
|
|
return br
|
|
|
|
except ValueError:
|
|
|
|
self._abort(error.ResponseError(_("unexpected response:"), d))
|
|
|
|
|
|
|
|
def between(self, pairs):
|
|
|
|
batch = 8 # avoid giant requests
|
|
|
|
r = []
|
|
|
|
for i in xrange(0, len(pairs), batch):
|
|
|
|
n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
|
|
|
|
d = self._call("between", pairs=n)
|
|
|
|
try:
|
|
|
|
r.extend(l and decodelist(l) or [] for l in d.splitlines())
|
|
|
|
except ValueError:
|
|
|
|
self._abort(error.ResponseError(_("unexpected response:"), d))
|
|
|
|
return r
|
|
|
|
|
|
|
|
def changegroup(self, nodes, kind):
|
|
|
|
n = encodelist(nodes)
|
|
|
|
f = self._callcompressable("changegroup", roots=n)
|
|
|
|
return changegroupmod.cg1unpacker(f, 'UN')
|
|
|
|
|
|
|
|
def changegroupsubset(self, bases, heads, kind):
|
|
|
|
self.requirecap('changegroupsubset', _('look up remote changes'))
|
|
|
|
bases = encodelist(bases)
|
|
|
|
heads = encodelist(heads)
|
|
|
|
f = self._callcompressable("changegroupsubset",
|
|
|
|
bases=bases, heads=heads)
|
|
|
|
return changegroupmod.cg1unpacker(f, 'UN')
|
|
|
|
|
|
|
|
# End of baselegacywirepeer interface.
|
|
|
|
|
|
|
|
def _submitbatch(self, req):
|
|
|
|
"""run batch request <req> on the server
|
|
|
|
|
|
|
|
Returns an iterator of the raw responses from the server.
|
|
|
|
"""
|
|
|
|
rsp = self._callstream("batch", cmds=encodebatchcmds(req))
|
|
|
|
chunk = rsp.read(1024)
|
|
|
|
work = [chunk]
|
|
|
|
while chunk:
|
|
|
|
while ';' not in chunk and chunk:
|
|
|
|
chunk = rsp.read(1024)
|
|
|
|
work.append(chunk)
|
|
|
|
merged = ''.join(work)
|
|
|
|
while ';' in merged:
|
|
|
|
one, merged = merged.split(';', 1)
|
|
|
|
yield unescapearg(one)
|
|
|
|
chunk = rsp.read(1024)
|
|
|
|
work = [merged, chunk]
|
|
|
|
yield unescapearg(''.join(work))
|
|
|
|
|
|
|
|
def _submitone(self, op, args):
|
|
|
|
return self._call(op, **args)
|
|
|
|
|
2011-04-29 16:54:01 +04:00
|
|
|
def debugwireargs(self, one, two, three=None, four=None, five=None):
|
2011-03-22 09:38:32 +03:00
|
|
|
# don't pass optional arguments left at their default value
|
|
|
|
opts = {}
|
|
|
|
if three is not None:
|
|
|
|
opts['three'] = three
|
|
|
|
if four is not None:
|
|
|
|
opts['four'] = four
|
|
|
|
return self._call('debugwireargs', one=one, two=two, **opts)
|
|
|
|
|
2014-03-28 23:29:34 +04:00
|
|
|
def _call(self, cmd, **args):
|
|
|
|
"""execute <cmd> on the server
|
|
|
|
|
|
|
|
The command is expected to return a simple string.
|
|
|
|
|
|
|
|
returns the server reply as a string."""
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
|
|
|
def _callstream(self, cmd, **args):
|
|
|
|
"""execute <cmd> on the server
|
|
|
|
|
2016-03-02 22:18:43 +03:00
|
|
|
The command is expected to return a stream. Note that if the
|
|
|
|
command doesn't return a stream, _callstream behaves
|
|
|
|
differently for ssh and http peers.
|
2014-03-28 23:29:34 +04:00
|
|
|
|
2016-03-02 22:18:43 +03:00
|
|
|
returns the server reply as a file like object.
|
|
|
|
"""
|
2014-03-28 23:29:34 +04:00
|
|
|
raise NotImplementedError()
|
|
|
|
|
2014-03-29 01:24:13 +04:00
|
|
|
def _callcompressable(self, cmd, **args):
|
|
|
|
"""execute <cmd> on the server
|
|
|
|
|
|
|
|
The command is expected to return a stream.
|
|
|
|
|
2014-04-13 21:01:00 +04:00
|
|
|
The stream may have been compressed in some implementations. This
|
2014-03-29 01:24:13 +04:00
|
|
|
function takes care of the decompression. This is the only difference
|
|
|
|
with _callstream.
|
|
|
|
|
|
|
|
returns the server reply as a file like object.
|
|
|
|
"""
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
2014-03-28 23:29:34 +04:00
|
|
|
def _callpush(self, cmd, fp, **args):
|
|
|
|
"""execute a <cmd> on server
|
|
|
|
|
|
|
|
The command is expected to be related to a push. Push has a special
|
|
|
|
return method.
|
|
|
|
|
|
|
|
returns the server reply as a (ret, output) tuple. ret is either
|
|
|
|
empty (error) or a stringified int.
|
|
|
|
"""
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
2014-04-16 01:19:46 +04:00
|
|
|
def _calltwowaystream(self, cmd, fp, **args):
|
|
|
|
"""execute <cmd> on server
|
|
|
|
|
|
|
|
The command will send a stream to the server and get a stream in reply.
|
|
|
|
"""
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
2014-03-28 23:29:34 +04:00
|
|
|
def _abort(self, exception):
|
|
|
|
"""clearly abort the wire protocol connection and raise the exception
|
|
|
|
"""
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
2010-07-15 01:34:57 +04:00
|
|
|
# server side
|
|
|
|
|
2014-03-28 22:37:42 +04:00
|
|
|
# wire protocol command can either return a string or one of these classes.
|
2010-07-20 22:53:33 +04:00
|
|
|
class streamres(object):
|
2014-03-28 22:37:42 +04:00
|
|
|
"""wireproto reply: binary stream
|
|
|
|
|
|
|
|
The call was successful and the result is a stream.
|
2016-11-21 00:50:45 +03:00
|
|
|
|
|
|
|
Accepts either a generator or an object with a ``read(size)`` method.
|
|
|
|
|
|
|
|
``v1compressible`` indicates whether this data can be compressed to
|
|
|
|
"version 1" clients (technically: HTTP peers using
|
|
|
|
application/mercurial-0.1 media type). This flag should NOT be used on
|
|
|
|
new commands because new clients should support a more modern compression
|
|
|
|
mechanism.
|
2014-03-28 22:37:42 +04:00
|
|
|
"""
|
2016-11-21 00:50:45 +03:00
|
|
|
def __init__(self, gen=None, reader=None, v1compressible=False):
|
2010-07-20 22:53:33 +04:00
|
|
|
self.gen = gen
|
2016-11-21 00:50:45 +03:00
|
|
|
self.reader = reader
|
|
|
|
self.v1compressible = v1compressible
|
2010-07-20 22:53:33 +04:00
|
|
|
|
|
|
|
class pushres(object):
|
2014-03-28 22:37:42 +04:00
|
|
|
"""wireproto reply: success with simple integer return
|
|
|
|
|
|
|
|
The call was successful and returned an integer contained in `self.res`.
|
|
|
|
"""
|
2010-07-20 22:53:33 +04:00
|
|
|
def __init__(self, res):
|
|
|
|
self.res = res
|
|
|
|
|
2010-10-11 21:45:36 +04:00
|
|
|
class pusherr(object):
|
2014-03-28 22:37:42 +04:00
|
|
|
"""wireproto reply: failure
|
|
|
|
|
|
|
|
The call failed. The `self.res` attribute contains the error message.
|
|
|
|
"""
|
2010-10-11 21:45:36 +04:00
|
|
|
def __init__(self, res):
|
|
|
|
self.res = res
|
|
|
|
|
2011-08-02 23:21:10 +04:00
|
|
|
class ooberror(object):
|
2014-03-28 22:37:42 +04:00
|
|
|
"""wireproto reply: failure of a batch of operation
|
|
|
|
|
|
|
|
Something failed during a batch call. The error message is stored in
|
|
|
|
`self.message`.
|
|
|
|
"""
|
2011-08-02 23:21:10 +04:00
|
|
|
def __init__(self, message):
|
|
|
|
self.message = message
|
|
|
|
|
2016-07-15 23:41:34 +03:00
|
|
|
def getdispatchrepo(repo, proto, command):
|
|
|
|
"""Obtain the repo used for processing wire protocol commands.
|
|
|
|
|
|
|
|
The intent of this function is to serve as a monkeypatch point for
|
|
|
|
extensions that need commands to operate on different repo views under
|
|
|
|
specialized circumstances.
|
|
|
|
"""
|
|
|
|
return repo.filtered('served')
|
|
|
|
|
2010-07-15 00:25:15 +04:00
|
|
|
def dispatch(repo, proto, command):
|
2016-07-15 23:41:34 +03:00
|
|
|
repo = getdispatchrepo(repo, proto, command)
|
2010-07-15 00:25:15 +04:00
|
|
|
func, spec = commands[command]
|
|
|
|
args = proto.getargs(spec)
|
2010-07-20 22:53:33 +04:00
|
|
|
return func(repo, proto, *args)
|
2010-07-15 00:25:15 +04:00
|
|
|
|
2011-03-22 09:38:32 +03:00
|
|
|
def options(cmd, keys, others):
|
|
|
|
opts = {}
|
|
|
|
for k in keys:
|
|
|
|
if k in others:
|
|
|
|
opts[k] = others[k]
|
|
|
|
del others[k]
|
|
|
|
if others:
|
2016-10-20 17:53:36 +03:00
|
|
|
util.stderr.write("warning: %s ignored unexpected arguments %s\n"
|
|
|
|
% (cmd, ",".join(others)))
|
2011-03-22 09:38:32 +03:00
|
|
|
return opts
|
|
|
|
|
2015-12-20 22:56:24 +03:00
|
|
|
def bundle1allowed(repo, action):
|
|
|
|
"""Whether a bundle1 operation is allowed from the server.
|
|
|
|
|
|
|
|
Priority is:
|
|
|
|
|
|
|
|
1. server.bundle1gd.<action> (if generaldelta active)
|
|
|
|
2. server.bundle1.<action>
|
|
|
|
3. server.bundle1gd (if generaldelta active)
|
|
|
|
4. server.bundle1
|
|
|
|
"""
|
|
|
|
ui = repo.ui
|
|
|
|
gd = 'generaldelta' in repo.requirements
|
|
|
|
|
|
|
|
if gd:
|
|
|
|
v = ui.configbool('server', 'bundle1gd.%s' % action, None)
|
|
|
|
if v is not None:
|
|
|
|
return v
|
|
|
|
|
2015-12-05 02:12:11 +03:00
|
|
|
v = ui.configbool('server', 'bundle1.%s' % action, None)
|
|
|
|
if v is not None:
|
|
|
|
return v
|
|
|
|
|
2015-12-20 22:56:24 +03:00
|
|
|
if gd:
|
2017-06-30 04:44:07 +03:00
|
|
|
v = ui.configbool('server', 'bundle1gd')
|
2015-12-20 22:56:24 +03:00
|
|
|
if v is not None:
|
|
|
|
return v
|
|
|
|
|
2017-06-30 04:44:06 +03:00
|
|
|
return ui.configbool('server', 'bundle1')
|
2015-12-05 02:12:11 +03:00
|
|
|
|
2016-12-25 01:21:46 +03:00
|
|
|
def supportedcompengines(ui, proto, role):
|
|
|
|
"""Obtain the list of supported compression engines for a request."""
|
|
|
|
assert role in (util.CLIENTROLE, util.SERVERROLE)
|
|
|
|
|
|
|
|
compengines = util.compengines.supportedwireengines(role)
|
|
|
|
|
|
|
|
# Allow config to override default list and ordering.
|
|
|
|
if role == util.SERVERROLE:
|
|
|
|
configengines = ui.configlist('server', 'compressionengines')
|
|
|
|
config = 'server.compressionengines'
|
|
|
|
else:
|
|
|
|
# This is currently implemented mainly to facilitate testing. In most
|
|
|
|
# cases, the server should be in charge of choosing a compression engine
|
|
|
|
# because a server has the most to lose from a sub-optimal choice. (e.g.
|
|
|
|
# CPU DoS due to an expensive engine or a network DoS due to poor
|
|
|
|
# compression ratio).
|
|
|
|
configengines = ui.configlist('experimental',
|
|
|
|
'clientcompressionengines')
|
|
|
|
config = 'experimental.clientcompressionengines'
|
|
|
|
|
|
|
|
# No explicit config. Filter out the ones that aren't supposed to be
|
|
|
|
# advertised and return default ordering.
|
|
|
|
if not configengines:
|
|
|
|
attr = 'serverpriority' if role == util.SERVERROLE else 'clientpriority'
|
|
|
|
return [e for e in compengines
|
|
|
|
if getattr(e.wireprotosupport(), attr) > 0]
|
|
|
|
|
|
|
|
# If compression engines are listed in the config, assume there is a good
|
|
|
|
# reason for it (like server operators wanting to achieve specific
|
|
|
|
# performance characteristics). So fail fast if the config references
|
|
|
|
# unusable compression engines.
|
|
|
|
validnames = set(e.name() for e in compengines)
|
|
|
|
invalidnames = set(e for e in configengines if e not in validnames)
|
|
|
|
if invalidnames:
|
|
|
|
raise error.Abort(_('invalid compression engine defined in %s: %s') %
|
|
|
|
(config, ', '.join(sorted(invalidnames))))
|
|
|
|
|
|
|
|
compengines = [e for e in compengines if e.name() in configengines]
|
|
|
|
compengines = sorted(compengines,
|
|
|
|
key=lambda e: configengines.index(e.name()))
|
|
|
|
|
|
|
|
if not compengines:
|
|
|
|
raise error.Abort(_('%s config option does not specify any known '
|
|
|
|
'compression engines') % config,
|
|
|
|
hint=_('usable compression engines: %s') %
|
|
|
|
', '.sorted(validnames))
|
|
|
|
|
|
|
|
return compengines
|
|
|
|
|
2014-03-29 01:30:11 +04:00
|
|
|
# list of commands
|
|
|
|
commands = {}
|
|
|
|
|
|
|
|
def wireprotocommand(name, args=''):
|
2014-04-13 21:01:00 +04:00
|
|
|
"""decorator for wire protocol command"""
|
2014-03-29 01:30:11 +04:00
|
|
|
def register(func):
|
|
|
|
commands[name] = (func, args)
|
|
|
|
return func
|
|
|
|
return register
|
|
|
|
|
2014-03-29 01:35:36 +04:00
|
|
|
@wireprotocommand('batch', 'cmds *')
|
2011-06-15 00:52:58 +04:00
|
|
|
def batch(repo, proto, cmds, others):
|
2013-01-13 11:39:16 +04:00
|
|
|
repo = repo.filtered("served")
|
2011-06-15 00:52:58 +04:00
|
|
|
res = []
|
|
|
|
for pair in cmds.split(';'):
|
|
|
|
op, args = pair.split(' ', 1)
|
|
|
|
vals = {}
|
|
|
|
for a in args.split(','):
|
|
|
|
if a:
|
|
|
|
n, v = a.split('=')
|
2016-08-06 23:55:21 +03:00
|
|
|
vals[unescapearg(n)] = unescapearg(v)
|
2011-06-15 00:52:58 +04:00
|
|
|
func, spec = commands[op]
|
|
|
|
if spec:
|
|
|
|
keys = spec.split()
|
|
|
|
data = {}
|
|
|
|
for k in keys:
|
|
|
|
if k == '*':
|
|
|
|
star = {}
|
|
|
|
for key in vals.keys():
|
|
|
|
if key not in keys:
|
|
|
|
star[key] = vals[key]
|
|
|
|
data['*'] = star
|
|
|
|
else:
|
|
|
|
data[k] = vals[k]
|
|
|
|
result = func(repo, proto, *[data[k] for k in keys])
|
|
|
|
else:
|
|
|
|
result = func(repo, proto)
|
2011-08-02 23:21:10 +04:00
|
|
|
if isinstance(result, ooberror):
|
|
|
|
return result
|
2011-06-15 00:52:58 +04:00
|
|
|
res.append(escapearg(result))
|
|
|
|
return ';'.join(res)
|
|
|
|
|
2014-03-29 01:36:05 +04:00
|
|
|
@wireprotocommand('between', 'pairs')
|
2010-07-15 00:33:21 +04:00
|
|
|
def between(repo, proto, pairs):
|
2010-07-16 02:52:13 +04:00
|
|
|
pairs = [decodelist(p, '-') for p in pairs.split(" ")]
|
2010-07-15 00:25:15 +04:00
|
|
|
r = []
|
|
|
|
for b in repo.between(pairs):
|
2010-07-16 02:52:13 +04:00
|
|
|
r.append(encodelist(b) + "\n")
|
2010-07-15 00:25:15 +04:00
|
|
|
return "".join(r)
|
|
|
|
|
2014-03-29 01:36:41 +04:00
|
|
|
@wireprotocommand('branchmap')
|
2010-07-15 00:33:21 +04:00
|
|
|
def branchmap(repo, proto):
|
2013-01-06 07:36:30 +04:00
|
|
|
branchmap = repo.branchmap()
|
2010-07-15 00:25:15 +04:00
|
|
|
heads = []
|
|
|
|
for branch, nodes in branchmap.iteritems():
|
2016-04-07 02:22:12 +03:00
|
|
|
branchname = urlreq.quote(encoding.fromlocal(branch))
|
2010-07-16 02:52:13 +04:00
|
|
|
branchnodes = encodelist(nodes)
|
|
|
|
heads.append('%s %s' % (branchname, branchnodes))
|
2010-07-15 00:25:15 +04:00
|
|
|
return '\n'.join(heads)
|
|
|
|
|
2014-03-29 01:37:02 +04:00
|
|
|
@wireprotocommand('branches', 'nodes')
|
2010-07-15 00:33:21 +04:00
|
|
|
def branches(repo, proto, nodes):
|
2010-07-16 02:52:13 +04:00
|
|
|
nodes = decodelist(nodes)
|
2010-07-15 00:25:15 +04:00
|
|
|
r = []
|
|
|
|
for b in repo.branches(nodes):
|
2010-07-16 02:52:13 +04:00
|
|
|
r.append(encodelist(b) + "\n")
|
2010-07-15 00:25:15 +04:00
|
|
|
return "".join(r)
|
|
|
|
|
2015-11-03 23:31:33 +03:00
|
|
|
@wireprotocommand('clonebundles', '')
|
|
|
|
def clonebundles(repo, proto):
|
|
|
|
"""Server command for returning info for available bundles to seed clones.
|
|
|
|
|
|
|
|
Clients will parse this response and determine what bundle to fetch.
|
|
|
|
|
|
|
|
Extensions may wrap this command to filter or dynamically emit data
|
|
|
|
depending on the request. e.g. you could advertise URLs for the closest
|
|
|
|
data center given the client's IP address.
|
|
|
|
"""
|
2017-03-02 05:23:18 +03:00
|
|
|
return repo.vfs.tryread('clonebundles.manifest')
|
2014-03-11 12:38:02 +04:00
|
|
|
|
|
|
|
wireprotocaps = ['lookup', 'changegroupsubset', 'branchmap', 'pushkey',
|
|
|
|
'known', 'getbundle', 'unbundlehash', 'batch']
|
2014-03-13 01:46:41 +04:00
|
|
|
|
|
|
|
def _capabilities(repo, proto):
|
|
|
|
"""return a list of capabilities for a repo
|
|
|
|
|
|
|
|
This function exists to allow extensions to easily wrap capabilities
|
|
|
|
computation
|
|
|
|
|
|
|
|
- returns a lists: easy to alter
|
|
|
|
- change done here will be propagated to both `capabilities` and `hello`
|
2014-04-13 21:01:00 +04:00
|
|
|
command without any other action needed.
|
2014-03-13 01:46:41 +04:00
|
|
|
"""
|
2014-03-11 12:38:02 +04:00
|
|
|
# copy to prevent modification of the global list
|
|
|
|
caps = list(wireprotocaps)
|
2017-06-09 20:41:13 +03:00
|
|
|
if streamclone.allowservergeneration(repo):
|
2017-06-30 04:44:12 +03:00
|
|
|
if repo.ui.configbool('server', 'preferuncompressed'):
|
2012-04-04 02:00:47 +04:00
|
|
|
caps.append('stream-preferred')
|
clone: only use stream when we understand the revlog format
This patch fixes issues with stream cloning in the presense of parentdelta,
lwcopy and similar additions that change the interpretation of the revlog
format, or the format itself.
Currently, the stream capability is sent like this:
stream=<version of changelog>
But the client doesn't actually check the version number; also, it only checks
the changelog and it doesn't capture the interpretation-changes and
flag-changes in parentdelta and lwcopy.
This patch removes the 'stream' capability whenever we use a non-basic revlog
format, to prevent old clients from receiving incorrect data. In those cases,
a new capability called 'streamreqs' is added instead. Instead of a revlog
version, it comes with a list of revlog-format relevant requirements, which
are a subset of the repository requirements, excluding things that are not
relevant for stream.
New clients use this to determine whether or not they can stream. Old clients
only look for the 'stream' capability, as always. New servers will still send
this when serving old repositories.
2010-09-15 13:06:22 +04:00
|
|
|
requiredformats = repo.requirements & repo.supportedformats
|
|
|
|
# if our local revlogs are just revlogv1, add 'stream' cap
|
2017-02-11 03:56:29 +03:00
|
|
|
if not requiredformats - {'revlogv1'}:
|
clone: only use stream when we understand the revlog format
This patch fixes issues with stream cloning in the presense of parentdelta,
lwcopy and similar additions that change the interpretation of the revlog
format, or the format itself.
Currently, the stream capability is sent like this:
stream=<version of changelog>
But the client doesn't actually check the version number; also, it only checks
the changelog and it doesn't capture the interpretation-changes and
flag-changes in parentdelta and lwcopy.
This patch removes the 'stream' capability whenever we use a non-basic revlog
format, to prevent old clients from receiving incorrect data. In those cases,
a new capability called 'streamreqs' is added instead. Instead of a revlog
version, it comes with a list of revlog-format relevant requirements, which
are a subset of the repository requirements, excluding things that are not
relevant for stream.
New clients use this to determine whether or not they can stream. Old clients
only look for the 'stream' capability, as always. New servers will still send
this when serving old repositories.
2010-09-15 13:06:22 +04:00
|
|
|
caps.append('stream')
|
|
|
|
# otherwise, add 'streamreqs' detailing our local revlog format
|
|
|
|
else:
|
2015-10-20 13:28:42 +03:00
|
|
|
caps.append('streamreqs=%s' % ','.join(sorted(requiredformats)))
|
codemod: register core configitems using a script
This is done by a script [2] using RedBaron [1], a tool designed for doing
code refactoring. All "default" values are decided by the script and are
strongly consistent with the existing code.
There are 2 changes done manually to fix tests:
[warn] mercurial/exchange.py: experimental.bundle2-output-capture: default needs manual removal
[warn] mercurial/localrepo.py: experimental.hook-track-tags: default needs manual removal
Since RedBaron is not confident about how to indent things [2].
[1]: https://github.com/PyCQA/redbaron
[2]: https://github.com/PyCQA/redbaron/issues/100
[3]:
#!/usr/bin/env python
# codemod_configitems.py - codemod tool to fill configitems
#
# Copyright 2017 Facebook, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import, print_function
import os
import sys
import redbaron
def readpath(path):
with open(path) as f:
return f.read()
def writepath(path, content):
with open(path, 'w') as f:
f.write(content)
_configmethods = {'config', 'configbool', 'configint', 'configbytes',
'configlist', 'configdate'}
def extractstring(rnode):
"""get the string from a RedBaron string or call_argument node"""
while rnode.type != 'string':
rnode = rnode.value
return rnode.value[1:-1] # unquote, "'str'" -> "str"
def uiconfigitems(red):
"""match *.ui.config* pattern, yield (node, method, args, section, name)"""
for node in red.find_all('atomtrailers'):
entry = None
try:
obj = node[-3].value
method = node[-2].value
args = node[-1]
section = args[0].value
name = args[1].value
if (obj in ('ui', 'self') and method in _configmethods
and section.type == 'string' and name.type == 'string'):
entry = (node, method, args, extractstring(section),
extractstring(name))
except Exception:
pass
else:
if entry:
yield entry
def coreconfigitems(red):
"""match coreconfigitem(...) pattern, yield (node, args, section, name)"""
for node in red.find_all('atomtrailers'):
entry = None
try:
args = node[1]
section = args[0].value
name = args[1].value
if (node[0].value == 'coreconfigitem' and section.type == 'string'
and name.type == 'string'):
entry = (node, args, extractstring(section),
extractstring(name))
except Exception:
pass
else:
if entry:
yield entry
def registercoreconfig(cfgred, section, name, defaultrepr):
"""insert coreconfigitem to cfgred AST
section and name are plain string, defaultrepr is a string
"""
# find a place to insert the "coreconfigitem" item
entries = list(coreconfigitems(cfgred))
for node, args, nodesection, nodename in reversed(entries):
if (nodesection, nodename) < (section, name):
# insert after this entry
node.insert_after(
'coreconfigitem(%r, %r,\n'
' default=%s,\n'
')' % (section, name, defaultrepr))
return
def main(argv):
if not argv:
print('Usage: codemod_configitems.py FILES\n'
'For example, FILES could be "{hgext,mercurial}/*/**.py"')
dirname = os.path.dirname
reporoot = dirname(dirname(dirname(os.path.abspath(__file__))))
# register configitems to this destination
cfgpath = os.path.join(reporoot, 'mercurial', 'configitems.py')
cfgred = redbaron.RedBaron(readpath(cfgpath))
# state about what to do
registered = set((s, n) for n, a, s, n in coreconfigitems(cfgred))
toregister = {} # {(section, name): defaultrepr}
coreconfigs = set() # {(section, name)}, whether it's used in core
# first loop: scan all files before taking any action
for i, path in enumerate(argv):
print('(%d/%d) scanning %s' % (i + 1, len(argv), path))
iscore = ('mercurial' in path) and ('hgext' not in path)
red = redbaron.RedBaron(readpath(path))
# find all repo.ui.config* and ui.config* calls, and collect their
# section, name and default value information.
for node, method, args, section, name in uiconfigitems(red):
if section == 'web':
# [web] section has some weirdness, ignore them for now
continue
defaultrepr = None
key = (section, name)
if len(args) == 2:
if key in registered:
continue
if method == 'configlist':
defaultrepr = 'list'
elif method == 'configbool':
defaultrepr = 'False'
else:
defaultrepr = 'None'
elif len(args) >= 3 and (args[2].target is None or
args[2].target.value == 'default'):
# try to understand the "default" value
dnode = args[2].value
if dnode.type == 'name':
if dnode.value in {'None', 'True', 'False'}:
defaultrepr = dnode.value
elif dnode.type == 'string':
defaultrepr = repr(dnode.value[1:-1])
elif dnode.type in ('int', 'float'):
defaultrepr = dnode.value
# inconsistent default
if key in toregister and toregister[key] != defaultrepr:
defaultrepr = None
# interesting to rewrite
if key not in registered:
if defaultrepr is None:
print('[note] %s: %s.%s: unsupported default'
% (path, section, name))
registered.add(key) # skip checking it again
else:
toregister[key] = defaultrepr
if iscore:
coreconfigs.add(key)
# second loop: rewrite files given "toregister" result
for path in argv:
# reconstruct redbaron - trade CPU for memory
red = redbaron.RedBaron(readpath(path))
changed = False
for node, method, args, section, name in uiconfigitems(red):
key = (section, name)
defaultrepr = toregister.get(key)
if defaultrepr is None or key not in coreconfigs:
continue
if len(args) >= 3 and (args[2].target is None or
args[2].target.value == 'default'):
try:
del args[2]
changed = True
except Exception:
# redbaron fails to do the rewrite due to indentation
# see https://github.com/PyCQA/redbaron/issues/100
print('[warn] %s: %s.%s: default needs manual removal'
% (path, section, name))
if key not in registered:
print('registering %s.%s' % (section, name))
registercoreconfig(cfgred, section, name, defaultrepr)
registered.add(key)
if changed:
print('updating %s' % path)
writepath(path, red.dumps())
if toregister:
print('updating configitems.py')
writepath(cfgpath, cfgred.dumps())
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
2017-07-15 00:22:40 +03:00
|
|
|
if repo.ui.configbool('experimental', 'bundle2-advertise'):
|
2014-08-25 21:21:47 +04:00
|
|
|
capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
|
2016-04-07 02:22:12 +03:00
|
|
|
caps.append('bundle2=' + urlreq.quote(capsblob))
|
2016-03-29 00:41:29 +03:00
|
|
|
caps.append('unbundle=%s' % ','.join(bundle2.bundlepriority))
|
2016-11-29 07:46:42 +03:00
|
|
|
|
|
|
|
if proto.name == 'http':
|
|
|
|
caps.append('httpheader=%d' %
|
2017-06-30 04:44:11 +03:00
|
|
|
repo.ui.configint('server', 'maxhttpheaderlen'))
|
codemod: register core configitems using a script
This is done by a script [2] using RedBaron [1], a tool designed for doing
code refactoring. All "default" values are decided by the script and are
strongly consistent with the existing code.
There are 2 changes done manually to fix tests:
[warn] mercurial/exchange.py: experimental.bundle2-output-capture: default needs manual removal
[warn] mercurial/localrepo.py: experimental.hook-track-tags: default needs manual removal
Since RedBaron is not confident about how to indent things [2].
[1]: https://github.com/PyCQA/redbaron
[2]: https://github.com/PyCQA/redbaron/issues/100
[3]:
#!/usr/bin/env python
# codemod_configitems.py - codemod tool to fill configitems
#
# Copyright 2017 Facebook, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import, print_function
import os
import sys
import redbaron
def readpath(path):
with open(path) as f:
return f.read()
def writepath(path, content):
with open(path, 'w') as f:
f.write(content)
_configmethods = {'config', 'configbool', 'configint', 'configbytes',
'configlist', 'configdate'}
def extractstring(rnode):
"""get the string from a RedBaron string or call_argument node"""
while rnode.type != 'string':
rnode = rnode.value
return rnode.value[1:-1] # unquote, "'str'" -> "str"
def uiconfigitems(red):
"""match *.ui.config* pattern, yield (node, method, args, section, name)"""
for node in red.find_all('atomtrailers'):
entry = None
try:
obj = node[-3].value
method = node[-2].value
args = node[-1]
section = args[0].value
name = args[1].value
if (obj in ('ui', 'self') and method in _configmethods
and section.type == 'string' and name.type == 'string'):
entry = (node, method, args, extractstring(section),
extractstring(name))
except Exception:
pass
else:
if entry:
yield entry
def coreconfigitems(red):
"""match coreconfigitem(...) pattern, yield (node, args, section, name)"""
for node in red.find_all('atomtrailers'):
entry = None
try:
args = node[1]
section = args[0].value
name = args[1].value
if (node[0].value == 'coreconfigitem' and section.type == 'string'
and name.type == 'string'):
entry = (node, args, extractstring(section),
extractstring(name))
except Exception:
pass
else:
if entry:
yield entry
def registercoreconfig(cfgred, section, name, defaultrepr):
"""insert coreconfigitem to cfgred AST
section and name are plain string, defaultrepr is a string
"""
# find a place to insert the "coreconfigitem" item
entries = list(coreconfigitems(cfgred))
for node, args, nodesection, nodename in reversed(entries):
if (nodesection, nodename) < (section, name):
# insert after this entry
node.insert_after(
'coreconfigitem(%r, %r,\n'
' default=%s,\n'
')' % (section, name, defaultrepr))
return
def main(argv):
if not argv:
print('Usage: codemod_configitems.py FILES\n'
'For example, FILES could be "{hgext,mercurial}/*/**.py"')
dirname = os.path.dirname
reporoot = dirname(dirname(dirname(os.path.abspath(__file__))))
# register configitems to this destination
cfgpath = os.path.join(reporoot, 'mercurial', 'configitems.py')
cfgred = redbaron.RedBaron(readpath(cfgpath))
# state about what to do
registered = set((s, n) for n, a, s, n in coreconfigitems(cfgred))
toregister = {} # {(section, name): defaultrepr}
coreconfigs = set() # {(section, name)}, whether it's used in core
# first loop: scan all files before taking any action
for i, path in enumerate(argv):
print('(%d/%d) scanning %s' % (i + 1, len(argv), path))
iscore = ('mercurial' in path) and ('hgext' not in path)
red = redbaron.RedBaron(readpath(path))
# find all repo.ui.config* and ui.config* calls, and collect their
# section, name and default value information.
for node, method, args, section, name in uiconfigitems(red):
if section == 'web':
# [web] section has some weirdness, ignore them for now
continue
defaultrepr = None
key = (section, name)
if len(args) == 2:
if key in registered:
continue
if method == 'configlist':
defaultrepr = 'list'
elif method == 'configbool':
defaultrepr = 'False'
else:
defaultrepr = 'None'
elif len(args) >= 3 and (args[2].target is None or
args[2].target.value == 'default'):
# try to understand the "default" value
dnode = args[2].value
if dnode.type == 'name':
if dnode.value in {'None', 'True', 'False'}:
defaultrepr = dnode.value
elif dnode.type == 'string':
defaultrepr = repr(dnode.value[1:-1])
elif dnode.type in ('int', 'float'):
defaultrepr = dnode.value
# inconsistent default
if key in toregister and toregister[key] != defaultrepr:
defaultrepr = None
# interesting to rewrite
if key not in registered:
if defaultrepr is None:
print('[note] %s: %s.%s: unsupported default'
% (path, section, name))
registered.add(key) # skip checking it again
else:
toregister[key] = defaultrepr
if iscore:
coreconfigs.add(key)
# second loop: rewrite files given "toregister" result
for path in argv:
# reconstruct redbaron - trade CPU for memory
red = redbaron.RedBaron(readpath(path))
changed = False
for node, method, args, section, name in uiconfigitems(red):
key = (section, name)
defaultrepr = toregister.get(key)
if defaultrepr is None or key not in coreconfigs:
continue
if len(args) >= 3 and (args[2].target is None or
args[2].target.value == 'default'):
try:
del args[2]
changed = True
except Exception:
# redbaron fails to do the rewrite due to indentation
# see https://github.com/PyCQA/redbaron/issues/100
print('[warn] %s: %s.%s: default needs manual removal'
% (path, section, name))
if key not in registered:
print('registering %s.%s' % (section, name))
registercoreconfig(cfgred, section, name, defaultrepr)
registered.add(key)
if changed:
print('updating %s' % path)
writepath(path, red.dumps())
if toregister:
print('updating configitems.py')
writepath(cfgpath, cfgred.dumps())
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
2017-07-15 00:22:40 +03:00
|
|
|
if repo.ui.configbool('experimental', 'httppostargs'):
|
2016-11-29 07:46:42 +03:00
|
|
|
caps.append('httppostargs')
|
|
|
|
|
2016-12-25 01:21:46 +03:00
|
|
|
# FUTURE advertise 0.2rx once support is implemented
|
|
|
|
# FUTURE advertise minrx and mintx after consulting config option
|
|
|
|
caps.append('httpmediatype=0.1rx,0.1tx,0.2tx')
|
|
|
|
|
|
|
|
compengines = supportedcompengines(repo.ui, proto, util.SERVERROLE)
|
|
|
|
if compengines:
|
|
|
|
comptypes = ','.join(urlreq.quote(e.wireprotosupport().name)
|
|
|
|
for e in compengines)
|
|
|
|
caps.append('compression=%s' % comptypes)
|
|
|
|
|
2014-03-13 01:46:41 +04:00
|
|
|
return caps
|
|
|
|
|
2014-04-13 21:01:00 +04:00
|
|
|
# If you are writing an extension and consider wrapping this function. Wrap
|
2014-03-13 01:46:41 +04:00
|
|
|
# `_capabilities` instead.
|
2014-03-29 01:37:28 +04:00
|
|
|
@wireprotocommand('capabilities')
|
2014-03-13 01:46:41 +04:00
|
|
|
def capabilities(repo, proto):
|
|
|
|
return ' '.join(_capabilities(repo, proto))
|
2010-07-15 22:56:52 +04:00
|
|
|
|
2014-03-29 01:38:19 +04:00
|
|
|
@wireprotocommand('changegroup', 'roots')
|
2010-07-15 00:43:20 +04:00
|
|
|
def changegroup(repo, proto, roots):
|
2010-07-16 02:52:13 +04:00
|
|
|
nodes = decodelist(roots)
|
2017-09-11 04:48:42 +03:00
|
|
|
outgoing = discovery.outgoing(repo, missingroots=nodes,
|
|
|
|
missingheads=repo.heads())
|
|
|
|
cg = changegroupmod.makechangegroup(repo, outgoing, '01', 'serve')
|
2016-11-21 00:50:45 +03:00
|
|
|
return streamres(reader=cg, v1compressible=True)
|
2010-07-15 00:43:20 +04:00
|
|
|
|
2014-03-29 01:38:40 +04:00
|
|
|
@wireprotocommand('changegroupsubset', 'bases heads')
|
2010-07-15 00:43:20 +04:00
|
|
|
def changegroupsubset(repo, proto, bases, heads):
|
2010-07-16 02:52:13 +04:00
|
|
|
bases = decodelist(bases)
|
|
|
|
heads = decodelist(heads)
|
2017-09-11 04:43:59 +03:00
|
|
|
outgoing = discovery.outgoing(repo, missingroots=bases,
|
|
|
|
missingheads=heads)
|
|
|
|
cg = changegroupmod.makechangegroup(repo, outgoing, '01', 'serve')
|
2016-11-21 00:50:45 +03:00
|
|
|
return streamres(reader=cg, v1compressible=True)
|
2010-07-15 00:43:20 +04:00
|
|
|
|
2014-03-29 01:39:06 +04:00
|
|
|
@wireprotocommand('debugwireargs', 'one two *')
|
2011-03-22 09:38:32 +03:00
|
|
|
def debugwireargs(repo, proto, one, two, others):
|
|
|
|
# only accept optional args from the known set
|
|
|
|
opts = options('debugwireargs', ['three', 'four'], others)
|
|
|
|
return repo.debugwireargs(one, two, **opts)
|
2011-03-22 09:38:32 +03:00
|
|
|
|
2014-03-29 01:40:07 +04:00
|
|
|
@wireprotocommand('getbundle', '*')
|
2011-03-23 18:02:11 +03:00
|
|
|
def getbundle(repo, proto, others):
|
2014-05-22 20:53:52 +04:00
|
|
|
opts = options('getbundle', gboptsmap.keys(), others)
|
2011-03-23 18:02:11 +03:00
|
|
|
for k, v in opts.iteritems():
|
2014-05-22 20:53:52 +04:00
|
|
|
keytype = gboptsmap[k]
|
|
|
|
if keytype == 'nodes':
|
2013-02-10 02:42:03 +04:00
|
|
|
opts[k] = decodelist(v)
|
2014-05-22 20:53:52 +04:00
|
|
|
elif keytype == 'csv':
|
2015-06-01 20:28:40 +03:00
|
|
|
opts[k] = list(v.split(','))
|
|
|
|
elif keytype == 'scsv':
|
2013-02-10 02:42:03 +04:00
|
|
|
opts[k] = set(v.split(','))
|
2014-05-23 04:20:52 +04:00
|
|
|
elif keytype == 'boolean':
|
2015-10-14 20:58:35 +03:00
|
|
|
# Client should serialize False as '0', which is a non-empty string
|
|
|
|
# so it evaluates as a True bool.
|
|
|
|
if v == '0':
|
|
|
|
opts[k] = False
|
|
|
|
else:
|
|
|
|
opts[k] = bool(v)
|
2014-05-22 20:53:52 +04:00
|
|
|
elif keytype != 'plain':
|
|
|
|
raise KeyError('unknown getbundle option type %s'
|
|
|
|
% keytype)
|
2015-12-05 02:12:11 +03:00
|
|
|
|
2015-12-20 22:56:24 +03:00
|
|
|
if not bundle1allowed(repo, 'pull'):
|
2015-12-05 02:12:11 +03:00
|
|
|
if not exchange.bundle2requested(opts.get('bundlecaps')):
|
2017-02-10 20:06:08 +03:00
|
|
|
if proto.name == 'http':
|
|
|
|
return ooberror(bundle2required)
|
|
|
|
raise error.Abort(bundle2requiredmain,
|
|
|
|
hint=bundle2requiredhint)
|
2015-12-05 02:12:11 +03:00
|
|
|
|
2017-02-10 20:20:58 +03:00
|
|
|
try:
|
2017-06-30 04:44:10 +03:00
|
|
|
if repo.ui.configbool('server', 'disablefullbundle'):
|
2017-05-11 20:50:05 +03:00
|
|
|
# Check to see if this is a full clone.
|
|
|
|
clheads = set(repo.changelog.heads())
|
|
|
|
heads = set(opts.get('heads', set()))
|
|
|
|
common = set(opts.get('common', set()))
|
|
|
|
common.discard(nullid)
|
|
|
|
if not common and clheads == heads:
|
|
|
|
raise error.Abort(
|
|
|
|
_('server has pull-based clones disabled'),
|
|
|
|
hint=_('remove --pull if specified or upgrade Mercurial'))
|
|
|
|
|
2017-02-10 20:20:58 +03:00
|
|
|
chunks = exchange.getbundlechunks(repo, 'serve', **opts)
|
|
|
|
except error.Abort as exc:
|
|
|
|
# cleanly forward Abort error to the client
|
|
|
|
if not exchange.bundle2requested(opts.get('bundlecaps')):
|
|
|
|
if proto.name == 'http':
|
|
|
|
return ooberror(str(exc) + '\n')
|
|
|
|
raise # cannot do better for bundle1 + ssh
|
|
|
|
# bundle2 request expect a bundle2 reply
|
|
|
|
bundler = bundle2.bundle20(repo.ui)
|
|
|
|
manargs = [('message', str(exc))]
|
|
|
|
advargs = []
|
|
|
|
if exc.hint is not None:
|
|
|
|
advargs.append(('hint', exc.hint))
|
|
|
|
bundler.addpart(bundle2.bundlepart('error:abort',
|
|
|
|
manargs, advargs))
|
|
|
|
return streamres(gen=bundler.getchunks(), v1compressible=True)
|
2016-11-21 00:50:45 +03:00
|
|
|
return streamres(gen=chunks, v1compressible=True)
|
2011-03-23 18:02:11 +03:00
|
|
|
|
2014-03-29 01:40:31 +04:00
|
|
|
@wireprotocommand('heads')
|
2010-07-15 00:33:21 +04:00
|
|
|
def heads(repo, proto):
|
2013-01-06 07:36:30 +04:00
|
|
|
h = repo.heads()
|
2010-07-16 02:52:13 +04:00
|
|
|
return encodelist(h) + "\n"
|
2010-07-15 00:25:15 +04:00
|
|
|
|
2014-03-29 01:40:44 +04:00
|
|
|
@wireprotocommand('hello')
|
2010-07-15 22:56:52 +04:00
|
|
|
def hello(repo, proto):
|
|
|
|
'''the hello command returns a set of lines describing various
|
|
|
|
interesting things about the server, in an RFC822-like format.
|
|
|
|
Currently the only one defined is "capabilities", which
|
|
|
|
consists of a line in the form:
|
|
|
|
|
|
|
|
capabilities: space separated list of tokens
|
|
|
|
'''
|
|
|
|
return "capabilities: %s\n" % (capabilities(repo, proto))
|
|
|
|
|
2014-03-29 01:42:21 +04:00
|
|
|
@wireprotocommand('listkeys', 'namespace')
|
2010-07-15 00:33:21 +04:00
|
|
|
def listkeys(repo, proto, namespace):
|
2011-10-10 15:52:54 +04:00
|
|
|
d = repo.listkeys(encoding.tolocal(namespace)).items()
|
2014-05-28 02:00:08 +04:00
|
|
|
return pushkeymod.encodekeys(d)
|
2010-07-15 00:25:15 +04:00
|
|
|
|
2014-03-29 01:42:38 +04:00
|
|
|
@wireprotocommand('lookup', 'key')
|
2010-07-15 00:33:21 +04:00
|
|
|
def lookup(repo, proto, key):
|
2010-07-15 00:25:15 +04:00
|
|
|
try:
|
2012-01-19 06:00:48 +04:00
|
|
|
k = encoding.tolocal(key)
|
|
|
|
c = repo[k]
|
|
|
|
r = c.hex()
|
2010-07-15 00:25:15 +04:00
|
|
|
success = 1
|
2015-06-24 08:20:08 +03:00
|
|
|
except Exception as inst:
|
2010-07-15 00:25:15 +04:00
|
|
|
r = str(inst)
|
|
|
|
success = 0
|
|
|
|
return "%s %s\n" % (success, r)
|
|
|
|
|
2014-03-29 01:42:06 +04:00
|
|
|
@wireprotocommand('known', 'nodes *')
|
2011-05-24 19:48:16 +04:00
|
|
|
def known(repo, proto, nodes, others):
|
2011-03-22 11:22:21 +03:00
|
|
|
return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes)))
|
|
|
|
|
2014-03-29 01:42:55 +04:00
|
|
|
@wireprotocommand('pushkey', 'namespace key old new')
|
2010-07-15 00:33:21 +04:00
|
|
|
def pushkey(repo, proto, namespace, key, old, new):
|
2010-11-29 03:21:47 +03:00
|
|
|
# compatibility with pre-1.8 clients which were accidentally
|
|
|
|
# sending raw binary nodes rather than utf-8-encoded hex
|
2017-03-15 17:06:50 +03:00
|
|
|
if len(new) == 20 and util.escapestr(new) != new:
|
2010-11-29 03:21:47 +03:00
|
|
|
# looks like it could be a binary node
|
|
|
|
try:
|
2011-04-30 15:59:14 +04:00
|
|
|
new.decode('utf-8')
|
2010-11-29 03:21:47 +03:00
|
|
|
new = encoding.tolocal(new) # but cleanly decodes as UTF-8
|
|
|
|
except UnicodeDecodeError:
|
|
|
|
pass # binary, leave unmodified
|
|
|
|
else:
|
|
|
|
new = encoding.tolocal(new) # normal path
|
|
|
|
|
2012-10-16 01:37:34 +04:00
|
|
|
if util.safehasattr(proto, 'restore'):
|
|
|
|
|
|
|
|
proto.redirect()
|
|
|
|
|
|
|
|
try:
|
|
|
|
r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
|
|
|
|
encoding.tolocal(old), new) or False
|
2015-10-08 22:55:45 +03:00
|
|
|
except error.Abort:
|
2012-10-16 01:37:34 +04:00
|
|
|
r = False
|
|
|
|
|
|
|
|
output = proto.restore()
|
|
|
|
|
|
|
|
return '%s\n%s' % (int(r), output)
|
|
|
|
|
2011-10-10 15:52:54 +04:00
|
|
|
r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
|
|
|
|
encoding.tolocal(old), new)
|
2010-07-15 00:25:15 +04:00
|
|
|
return '%s\n' % int(r)
|
|
|
|
|
2014-03-29 01:43:11 +04:00
|
|
|
@wireprotocommand('stream_out')
|
2010-07-15 01:19:27 +04:00
|
|
|
def stream(repo, proto):
|
2010-07-20 22:52:23 +04:00
|
|
|
'''If the server supports streaming clone, it advertises the "stream"
|
|
|
|
capability with a value representing the version and flags of the repo
|
|
|
|
it is serving. Client checks to see if it understands the format.
|
|
|
|
'''
|
2017-06-09 20:41:13 +03:00
|
|
|
if not streamclone.allowservergeneration(repo):
|
2010-07-20 22:52:23 +04:00
|
|
|
return '1\n'
|
|
|
|
|
2015-05-21 20:27:22 +03:00
|
|
|
def getstream(it):
|
|
|
|
yield '0\n'
|
|
|
|
for chunk in it:
|
|
|
|
yield chunk
|
|
|
|
|
2010-07-20 22:52:23 +04:00
|
|
|
try:
|
2015-05-21 20:27:22 +03:00
|
|
|
# LockError may be raised before the first result is yielded. Don't
|
|
|
|
# emit output until we're sure we got the lock successfully.
|
2015-10-05 05:06:06 +03:00
|
|
|
it = streamclone.generatev1wireproto(repo)
|
2016-11-21 00:50:45 +03:00
|
|
|
return streamres(gen=getstream(it))
|
2010-07-20 22:52:23 +04:00
|
|
|
except error.LockError:
|
2015-05-21 20:27:22 +03:00
|
|
|
return '2\n'
|
2010-07-15 01:19:27 +04:00
|
|
|
|
2014-03-29 01:43:30 +04:00
|
|
|
@wireprotocommand('unbundle', 'heads')
|
2010-07-15 20:24:42 +04:00
|
|
|
def unbundle(repo, proto, heads):
|
2010-07-16 02:52:13 +04:00
|
|
|
their_heads = decodelist(heads)
|
2010-07-15 20:24:42 +04:00
|
|
|
|
2014-04-05 04:15:25 +04:00
|
|
|
try:
|
|
|
|
proto.redirect()
|
2010-10-11 21:44:33 +04:00
|
|
|
|
2014-04-05 04:15:25 +04:00
|
|
|
exchange.check_heads(repo, their_heads, 'preparing changes')
|
2010-07-15 20:24:42 +04:00
|
|
|
|
2014-04-05 04:15:25 +04:00
|
|
|
# write bundle data to temporary file because it can be big
|
|
|
|
fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
|
2017-02-13 17:36:38 +03:00
|
|
|
fp = os.fdopen(fd, pycompat.sysstr('wb+'))
|
2014-04-05 04:15:25 +04:00
|
|
|
r = 0
|
2010-07-15 20:24:42 +04:00
|
|
|
try:
|
2014-04-05 04:15:25 +04:00
|
|
|
proto.getfile(fp)
|
2014-04-05 04:28:59 +04:00
|
|
|
fp.seek(0)
|
2014-04-14 23:45:30 +04:00
|
|
|
gen = exchange.readbundle(repo.ui, fp, None)
|
2015-12-05 02:12:11 +03:00
|
|
|
if (isinstance(gen, changegroupmod.cg1unpacker)
|
2015-12-20 22:56:24 +03:00
|
|
|
and not bundle1allowed(repo, 'push')):
|
2017-02-10 19:56:59 +03:00
|
|
|
if proto.name == 'http':
|
|
|
|
# need to special case http because stderr do not get to
|
|
|
|
# the http client on failed push so we need to abuse some
|
|
|
|
# other error type to make sure the message get to the
|
|
|
|
# user.
|
|
|
|
return ooberror(bundle2required)
|
|
|
|
raise error.Abort(bundle2requiredmain,
|
|
|
|
hint=bundle2requiredhint)
|
2015-12-05 02:12:11 +03:00
|
|
|
|
2014-04-05 04:28:59 +04:00
|
|
|
r = exchange.unbundle(repo, gen, their_heads, 'serve',
|
|
|
|
proto._client())
|
2014-04-15 19:53:10 +04:00
|
|
|
if util.safehasattr(r, 'addpart'):
|
2014-04-18 00:47:38 +04:00
|
|
|
# The return looks streamable, we are in the bundle2 case and
|
2014-04-15 19:53:10 +04:00
|
|
|
# should return a stream.
|
2016-11-21 00:50:45 +03:00
|
|
|
return streamres(gen=r.getchunks())
|
2014-04-05 04:15:25 +04:00
|
|
|
return pushres(r)
|
2010-07-15 20:24:42 +04:00
|
|
|
|
2014-04-05 04:15:25 +04:00
|
|
|
finally:
|
|
|
|
fp.close()
|
|
|
|
os.unlink(tempname)
|
2015-04-16 10:56:50 +03:00
|
|
|
|
2015-10-08 22:55:45 +03:00
|
|
|
except (error.BundleValueError, error.Abort, error.PushRaced) as exc:
|
2015-04-16 10:56:50 +03:00
|
|
|
# handle non-bundle2 case first
|
|
|
|
if not getattr(exc, 'duringunbundle2', False):
|
|
|
|
try:
|
|
|
|
raise
|
2015-10-08 22:55:45 +03:00
|
|
|
except error.Abort:
|
2016-10-20 17:53:36 +03:00
|
|
|
# The old code we moved used util.stderr directly.
|
2015-04-16 10:56:50 +03:00
|
|
|
# We did not change it to minimise code change.
|
|
|
|
# This need to be moved to something proper.
|
|
|
|
# Feel free to do it.
|
2016-10-20 17:53:36 +03:00
|
|
|
util.stderr.write("abort: %s\n" % exc)
|
2017-02-10 19:56:52 +03:00
|
|
|
if exc.hint is not None:
|
|
|
|
util.stderr.write("(%s)\n" % exc.hint)
|
2015-04-16 10:56:50 +03:00
|
|
|
return pushres(0)
|
|
|
|
except error.PushRaced:
|
|
|
|
return pusherr(str(exc))
|
|
|
|
|
|
|
|
bundler = bundle2.bundle20(repo.ui)
|
2015-04-16 10:17:37 +03:00
|
|
|
for out in getattr(exc, '_bundle2salvagedoutput', ()):
|
|
|
|
bundler.addpart(out)
|
2015-04-16 10:56:50 +03:00
|
|
|
try:
|
2015-06-10 20:10:53 +03:00
|
|
|
try:
|
|
|
|
raise
|
2015-06-24 08:20:08 +03:00
|
|
|
except error.PushkeyFailed as exc:
|
2015-06-10 20:10:53 +03:00
|
|
|
# check client caps
|
|
|
|
remotecaps = getattr(exc, '_replycaps', None)
|
|
|
|
if (remotecaps is not None
|
|
|
|
and 'pushkey' not in remotecaps.get('error', ())):
|
|
|
|
# no support remote side, fallback to Abort handler.
|
|
|
|
raise
|
|
|
|
part = bundler.newpart('error:pushkey')
|
|
|
|
part.addparam('in-reply-to', exc.partid)
|
|
|
|
if exc.namespace is not None:
|
|
|
|
part.addparam('namespace', exc.namespace, mandatory=False)
|
|
|
|
if exc.key is not None:
|
|
|
|
part.addparam('key', exc.key, mandatory=False)
|
|
|
|
if exc.new is not None:
|
|
|
|
part.addparam('new', exc.new, mandatory=False)
|
|
|
|
if exc.old is not None:
|
|
|
|
part.addparam('old', exc.old, mandatory=False)
|
|
|
|
if exc.ret is not None:
|
|
|
|
part.addparam('ret', exc.ret, mandatory=False)
|
2015-06-24 08:20:08 +03:00
|
|
|
except error.BundleValueError as exc:
|
2015-04-09 23:25:48 +03:00
|
|
|
errpart = bundler.newpart('error:unsupportedcontent')
|
2014-05-29 03:46:58 +04:00
|
|
|
if exc.parttype is not None:
|
|
|
|
errpart.addparam('parttype', exc.parttype)
|
2014-05-29 02:57:23 +04:00
|
|
|
if exc.params:
|
|
|
|
errpart.addparam('params', '\0'.join(exc.params))
|
2015-10-08 22:55:45 +03:00
|
|
|
except error.Abort as exc:
|
2015-04-16 10:56:50 +03:00
|
|
|
manargs = [('message', str(exc))]
|
2014-04-22 02:48:52 +04:00
|
|
|
advargs = []
|
2015-04-16 10:56:50 +03:00
|
|
|
if exc.hint is not None:
|
|
|
|
advargs.append(('hint', exc.hint))
|
2015-04-09 23:25:48 +03:00
|
|
|
bundler.addpart(bundle2.bundlepart('error:abort',
|
2014-04-22 02:48:52 +04:00
|
|
|
manargs, advargs))
|
2015-06-24 08:20:08 +03:00
|
|
|
except error.PushRaced as exc:
|
2015-04-09 23:25:48 +03:00
|
|
|
bundler.newpart('error:pushraced', [('message', str(exc))])
|
2016-11-21 00:50:45 +03:00
|
|
|
return streamres(gen=bundler.getchunks())
|