2010-07-15 00:25:15 +04:00
|
|
|
# wireproto.py - generic wire protocol support functions
|
|
|
|
#
|
|
|
|
# Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
|
|
|
|
#
|
|
|
|
# This software may be used and distributed according to the terms of the
|
|
|
|
# GNU General Public License version 2 or any later version.
|
|
|
|
|
2010-08-15 13:05:04 +04:00
|
|
|
import urllib, tempfile, os, sys
|
2010-07-15 00:25:15 +04:00
|
|
|
from i18n import _
|
|
|
|
from node import bin, hex
|
2010-07-15 20:24:42 +04:00
|
|
|
import changegroup as changegroupmod
|
2012-07-13 23:47:06 +04:00
|
|
|
import peer, error, encoding, util, store
|
2010-07-15 00:25:15 +04:00
|
|
|
|
2011-06-15 00:51:26 +04:00
|
|
|
# abstract batching support
|
|
|
|
|
|
|
|
class future(object):
|
|
|
|
'''placeholder for a value to be set later'''
|
|
|
|
def set(self, value):
|
2011-07-26 01:05:01 +04:00
|
|
|
if util.safehasattr(self, 'value'):
|
2011-06-15 00:51:26 +04:00
|
|
|
raise error.RepoError("future is already set")
|
|
|
|
self.value = value
|
|
|
|
|
|
|
|
class batcher(object):
|
|
|
|
'''base class for batches of commands submittable in a single request
|
|
|
|
|
2012-05-12 17:54:54 +04:00
|
|
|
All methods invoked on instances of this class are simply queued and
|
|
|
|
return a a future for the result. Once you call submit(), all the queued
|
|
|
|
calls are performed and the results set in their respective futures.
|
2011-06-15 00:51:26 +04:00
|
|
|
'''
|
|
|
|
def __init__(self):
|
|
|
|
self.calls = []
|
|
|
|
def __getattr__(self, name):
|
|
|
|
def call(*args, **opts):
|
|
|
|
resref = future()
|
|
|
|
self.calls.append((name, args, opts, resref,))
|
|
|
|
return resref
|
|
|
|
return call
|
|
|
|
def submit(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
class localbatch(batcher):
|
|
|
|
'''performs the queued calls directly'''
|
|
|
|
def __init__(self, local):
|
|
|
|
batcher.__init__(self)
|
|
|
|
self.local = local
|
|
|
|
def submit(self):
|
|
|
|
for name, args, opts, resref in self.calls:
|
|
|
|
resref.set(getattr(self.local, name)(*args, **opts))
|
|
|
|
|
|
|
|
class remotebatch(batcher):
|
|
|
|
'''batches the queued calls; uses as few roundtrips as possible'''
|
|
|
|
def __init__(self, remote):
|
2012-05-12 17:54:54 +04:00
|
|
|
'''remote must support _submitbatch(encbatch) and
|
|
|
|
_submitone(op, encargs)'''
|
2011-06-15 00:51:26 +04:00
|
|
|
batcher.__init__(self)
|
|
|
|
self.remote = remote
|
|
|
|
def submit(self):
|
|
|
|
req, rsp = [], []
|
|
|
|
for name, args, opts, resref in self.calls:
|
|
|
|
mtd = getattr(self.remote, name)
|
2011-07-26 01:05:01 +04:00
|
|
|
batchablefn = getattr(mtd, 'batchable', None)
|
|
|
|
if batchablefn is not None:
|
|
|
|
batchable = batchablefn(mtd.im_self, *args, **opts)
|
2011-06-15 00:51:26 +04:00
|
|
|
encargsorres, encresref = batchable.next()
|
|
|
|
if encresref:
|
|
|
|
req.append((name, encargsorres,))
|
|
|
|
rsp.append((batchable, encresref, resref,))
|
|
|
|
else:
|
|
|
|
resref.set(encargsorres)
|
|
|
|
else:
|
|
|
|
if req:
|
|
|
|
self._submitreq(req, rsp)
|
|
|
|
req, rsp = [], []
|
|
|
|
resref.set(mtd(*args, **opts))
|
|
|
|
if req:
|
|
|
|
self._submitreq(req, rsp)
|
|
|
|
def _submitreq(self, req, rsp):
|
|
|
|
encresults = self.remote._submitbatch(req)
|
|
|
|
for encres, r in zip(encresults, rsp):
|
|
|
|
batchable, encresref, resref = r
|
|
|
|
encresref.set(encres)
|
|
|
|
resref.set(batchable.next())
|
|
|
|
|
|
|
|
def batchable(f):
|
|
|
|
'''annotation for batchable methods
|
|
|
|
|
|
|
|
Such methods must implement a coroutine as follows:
|
|
|
|
|
|
|
|
@batchable
|
|
|
|
def sample(self, one, two=None):
|
|
|
|
# Handle locally computable results first:
|
|
|
|
if not one:
|
|
|
|
yield "a local result", None
|
|
|
|
# Build list of encoded arguments suitable for your wire protocol:
|
|
|
|
encargs = [('one', encode(one),), ('two', encode(two),)]
|
|
|
|
# Create future for injection of encoded result:
|
|
|
|
encresref = future()
|
|
|
|
# Return encoded arguments and future:
|
|
|
|
yield encargs, encresref
|
2012-05-12 17:54:54 +04:00
|
|
|
# Assuming the future to be filled with the result from the batched
|
|
|
|
# request now. Decode it:
|
2011-06-15 00:51:26 +04:00
|
|
|
yield decode(encresref.value)
|
|
|
|
|
2012-05-12 17:54:54 +04:00
|
|
|
The decorator returns a function which wraps this coroutine as a plain
|
|
|
|
method, but adds the original method as an attribute called "batchable",
|
|
|
|
which is used by remotebatch to split the call into separate encoding and
|
|
|
|
decoding phases.
|
2011-06-15 00:51:26 +04:00
|
|
|
'''
|
|
|
|
def plain(*args, **opts):
|
|
|
|
batchable = f(*args, **opts)
|
|
|
|
encargsorres, encresref = batchable.next()
|
|
|
|
if not encresref:
|
|
|
|
return encargsorres # a local result in this case
|
|
|
|
self = args[0]
|
|
|
|
encresref.set(self._submitone(f.func_name, encargsorres))
|
|
|
|
return batchable.next()
|
|
|
|
setattr(plain, 'batchable', f)
|
|
|
|
return plain
|
|
|
|
|
2010-07-16 02:52:13 +04:00
|
|
|
# list of nodes encoding / decoding
|
|
|
|
|
|
|
|
def decodelist(l, sep=' '):
|
2011-03-22 09:40:02 +03:00
|
|
|
if l:
|
|
|
|
return map(bin, l.split(sep))
|
|
|
|
return []
|
2010-07-16 02:52:13 +04:00
|
|
|
|
|
|
|
def encodelist(l, sep=' '):
|
|
|
|
return sep.join(map(hex, l))
|
|
|
|
|
2011-06-15 00:52:58 +04:00
|
|
|
# batched call argument encoding
|
|
|
|
|
|
|
|
def escapearg(plain):
|
|
|
|
return (plain
|
|
|
|
.replace(':', '::')
|
|
|
|
.replace(',', ':,')
|
|
|
|
.replace(';', ':;')
|
|
|
|
.replace('=', ':='))
|
|
|
|
|
|
|
|
def unescapearg(escaped):
|
|
|
|
return (escaped
|
|
|
|
.replace(':=', '=')
|
|
|
|
.replace(':;', ';')
|
|
|
|
.replace(':,', ',')
|
|
|
|
.replace('::', ':'))
|
|
|
|
|
2010-07-15 01:34:57 +04:00
|
|
|
# client side
|
|
|
|
|
2011-06-15 00:52:58 +04:00
|
|
|
def todict(**args):
|
|
|
|
return args
|
|
|
|
|
2012-07-13 23:47:06 +04:00
|
|
|
class wirepeer(peer.peerrepository):
|
2011-06-15 00:52:58 +04:00
|
|
|
|
|
|
|
def batch(self):
|
|
|
|
return remotebatch(self)
|
|
|
|
def _submitbatch(self, req):
|
|
|
|
cmds = []
|
|
|
|
for op, argsdict in req:
|
|
|
|
args = ','.join('%s=%s' % p for p in argsdict.iteritems())
|
|
|
|
cmds.append('%s %s' % (op, args))
|
|
|
|
rsp = self._call("batch", cmds=';'.join(cmds))
|
|
|
|
return rsp.split(';')
|
|
|
|
def _submitone(self, op, args):
|
|
|
|
return self._call(op, **args)
|
|
|
|
|
2011-06-15 00:56:20 +04:00
|
|
|
@batchable
|
2010-07-15 01:34:57 +04:00
|
|
|
def lookup(self, key):
|
|
|
|
self.requirecap('lookup', _('look up remote revision'))
|
2011-06-15 00:56:20 +04:00
|
|
|
f = future()
|
|
|
|
yield todict(key=encoding.fromlocal(key)), f
|
|
|
|
d = f.value
|
2010-07-15 01:34:57 +04:00
|
|
|
success, data = d[:-1].split(" ", 1)
|
|
|
|
if int(success):
|
2011-06-15 00:56:20 +04:00
|
|
|
yield bin(data)
|
2010-07-15 01:34:57 +04:00
|
|
|
self._abort(error.RepoError(data))
|
|
|
|
|
2011-06-15 00:56:20 +04:00
|
|
|
@batchable
|
2010-07-15 01:34:57 +04:00
|
|
|
def heads(self):
|
2011-06-15 00:56:20 +04:00
|
|
|
f = future()
|
|
|
|
yield {}, f
|
|
|
|
d = f.value
|
2010-07-15 01:34:57 +04:00
|
|
|
try:
|
2011-06-15 00:56:20 +04:00
|
|
|
yield decodelist(d[:-1])
|
2011-03-23 05:26:19 +03:00
|
|
|
except ValueError:
|
2010-08-15 13:05:04 +04:00
|
|
|
self._abort(error.ResponseError(_("unexpected response:"), d))
|
2010-07-15 01:34:57 +04:00
|
|
|
|
2011-06-15 00:56:20 +04:00
|
|
|
@batchable
|
2011-03-22 11:22:21 +03:00
|
|
|
def known(self, nodes):
|
2011-06-15 00:56:20 +04:00
|
|
|
f = future()
|
|
|
|
yield todict(nodes=encodelist(nodes)), f
|
|
|
|
d = f.value
|
2011-03-22 11:22:21 +03:00
|
|
|
try:
|
2011-06-15 00:56:20 +04:00
|
|
|
yield [bool(int(f)) for f in d]
|
2011-03-23 05:26:19 +03:00
|
|
|
except ValueError:
|
2011-03-22 11:22:21 +03:00
|
|
|
self._abort(error.ResponseError(_("unexpected response:"), d))
|
|
|
|
|
2011-06-15 00:56:20 +04:00
|
|
|
@batchable
|
2010-07-15 01:34:57 +04:00
|
|
|
def branchmap(self):
|
2011-06-15 00:56:20 +04:00
|
|
|
f = future()
|
|
|
|
yield {}, f
|
|
|
|
d = f.value
|
2010-07-15 01:34:57 +04:00
|
|
|
try:
|
|
|
|
branchmap = {}
|
|
|
|
for branchpart in d.splitlines():
|
2010-07-16 02:52:13 +04:00
|
|
|
branchname, branchheads = branchpart.split(' ', 1)
|
2010-11-25 00:56:32 +03:00
|
|
|
branchname = encoding.tolocal(urllib.unquote(branchname))
|
2010-07-16 02:52:13 +04:00
|
|
|
branchheads = decodelist(branchheads)
|
2010-07-15 01:34:57 +04:00
|
|
|
branchmap[branchname] = branchheads
|
2011-06-15 00:56:20 +04:00
|
|
|
yield branchmap
|
2010-07-15 01:34:57 +04:00
|
|
|
except TypeError:
|
|
|
|
self._abort(error.ResponseError(_("unexpected response:"), d))
|
|
|
|
|
|
|
|
def branches(self, nodes):
|
2010-07-16 02:52:13 +04:00
|
|
|
n = encodelist(nodes)
|
2010-07-15 01:34:57 +04:00
|
|
|
d = self._call("branches", nodes=n)
|
|
|
|
try:
|
2010-07-16 02:52:13 +04:00
|
|
|
br = [tuple(decodelist(b)) for b in d.splitlines()]
|
2010-07-15 01:34:57 +04:00
|
|
|
return br
|
2011-03-23 05:26:19 +03:00
|
|
|
except ValueError:
|
2010-07-15 01:34:57 +04:00
|
|
|
self._abort(error.ResponseError(_("unexpected response:"), d))
|
|
|
|
|
|
|
|
def between(self, pairs):
|
2010-07-15 01:35:51 +04:00
|
|
|
batch = 8 # avoid giant requests
|
|
|
|
r = []
|
|
|
|
for i in xrange(0, len(pairs), batch):
|
2010-07-16 02:52:13 +04:00
|
|
|
n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
|
2010-07-15 01:35:51 +04:00
|
|
|
d = self._call("between", pairs=n)
|
|
|
|
try:
|
2010-07-16 02:52:13 +04:00
|
|
|
r.extend(l and decodelist(l) or [] for l in d.splitlines())
|
2011-03-23 05:26:19 +03:00
|
|
|
except ValueError:
|
2010-07-15 01:35:51 +04:00
|
|
|
self._abort(error.ResponseError(_("unexpected response:"), d))
|
|
|
|
return r
|
2010-07-15 01:34:57 +04:00
|
|
|
|
2011-06-15 00:56:20 +04:00
|
|
|
@batchable
|
2010-07-15 01:34:57 +04:00
|
|
|
def pushkey(self, namespace, key, old, new):
|
|
|
|
if not self.capable('pushkey'):
|
2011-06-15 00:56:20 +04:00
|
|
|
yield False, None
|
|
|
|
f = future()
|
2012-07-28 14:28:35 +04:00
|
|
|
self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
|
2011-06-15 00:56:20 +04:00
|
|
|
yield todict(namespace=encoding.fromlocal(namespace),
|
|
|
|
key=encoding.fromlocal(key),
|
|
|
|
old=encoding.fromlocal(old),
|
|
|
|
new=encoding.fromlocal(new)), f
|
|
|
|
d = f.value
|
2011-12-12 18:16:58 +04:00
|
|
|
d, output = d.split('\n', 1)
|
2011-02-21 02:37:55 +03:00
|
|
|
try:
|
|
|
|
d = bool(int(d))
|
|
|
|
except ValueError:
|
|
|
|
raise error.ResponseError(
|
|
|
|
_('push failed (unexpected response):'), d)
|
2011-12-12 18:16:58 +04:00
|
|
|
for l in output.splitlines(True):
|
|
|
|
self.ui.status(_('remote: '), l)
|
2011-06-15 00:56:20 +04:00
|
|
|
yield d
|
2010-07-15 01:34:57 +04:00
|
|
|
|
2011-06-15 00:56:20 +04:00
|
|
|
@batchable
|
2010-07-15 01:34:57 +04:00
|
|
|
def listkeys(self, namespace):
|
|
|
|
if not self.capable('pushkey'):
|
2011-06-15 00:56:20 +04:00
|
|
|
yield {}, None
|
|
|
|
f = future()
|
2012-07-28 14:28:35 +04:00
|
|
|
self.ui.debug('preparing listkeys for "%s"\n' % namespace)
|
2011-06-15 00:56:20 +04:00
|
|
|
yield todict(namespace=encoding.fromlocal(namespace)), f
|
|
|
|
d = f.value
|
2010-07-15 01:34:57 +04:00
|
|
|
r = {}
|
|
|
|
for l in d.splitlines():
|
|
|
|
k, v = l.split('\t')
|
2010-11-29 03:21:47 +03:00
|
|
|
r[encoding.tolocal(k)] = encoding.tolocal(v)
|
2011-06-15 00:56:20 +04:00
|
|
|
yield r
|
2010-07-15 01:34:57 +04:00
|
|
|
|
2010-07-15 01:55:44 +04:00
|
|
|
def stream_out(self):
|
|
|
|
return self._callstream('stream_out')
|
|
|
|
|
2010-07-15 02:09:31 +04:00
|
|
|
def changegroup(self, nodes, kind):
|
2010-07-16 02:52:13 +04:00
|
|
|
n = encodelist(nodes)
|
2010-07-15 02:09:31 +04:00
|
|
|
f = self._callstream("changegroup", roots=n)
|
2010-09-20 23:32:21 +04:00
|
|
|
return changegroupmod.unbundle10(self._decompress(f), 'UN')
|
2010-07-15 02:09:31 +04:00
|
|
|
|
|
|
|
def changegroupsubset(self, bases, heads, kind):
|
|
|
|
self.requirecap('changegroupsubset', _('look up remote changes'))
|
2010-07-16 02:52:13 +04:00
|
|
|
bases = encodelist(bases)
|
|
|
|
heads = encodelist(heads)
|
2010-09-20 23:32:21 +04:00
|
|
|
f = self._callstream("changegroupsubset",
|
|
|
|
bases=bases, heads=heads)
|
|
|
|
return changegroupmod.unbundle10(self._decompress(f), 'UN')
|
2010-07-15 02:09:31 +04:00
|
|
|
|
2011-03-23 18:02:11 +03:00
|
|
|
def getbundle(self, source, heads=None, common=None):
|
|
|
|
self.requirecap('getbundle', _('look up remote changes'))
|
|
|
|
opts = {}
|
|
|
|
if heads is not None:
|
|
|
|
opts['heads'] = encodelist(heads)
|
|
|
|
if common is not None:
|
|
|
|
opts['common'] = encodelist(common)
|
|
|
|
f = self._callstream("getbundle", **opts)
|
|
|
|
return changegroupmod.unbundle10(self._decompress(f), 'UN')
|
|
|
|
|
2010-07-15 02:12:18 +04:00
|
|
|
def unbundle(self, cg, heads, source):
|
|
|
|
'''Send cg (a readable file-like object representing the
|
|
|
|
changegroup to push, typically a chunkbuffer object) to the
|
|
|
|
remote server as a bundle. Return an integer indicating the
|
|
|
|
result of the push (see localrepository.addchangegroup()).'''
|
|
|
|
|
2011-05-24 18:12:01 +04:00
|
|
|
if heads != ['force'] and self.capable('unbundlehash'):
|
2011-04-15 20:05:56 +04:00
|
|
|
heads = encodelist(['hashed',
|
|
|
|
util.sha1(''.join(sorted(heads))).digest()])
|
|
|
|
else:
|
|
|
|
heads = encodelist(heads)
|
|
|
|
|
|
|
|
ret, output = self._callpush("unbundle", cg, heads=heads)
|
2010-07-15 02:12:18 +04:00
|
|
|
if ret == "":
|
|
|
|
raise error.ResponseError(
|
|
|
|
_('push failed:'), output)
|
|
|
|
try:
|
|
|
|
ret = int(ret)
|
2010-08-27 21:32:40 +04:00
|
|
|
except ValueError:
|
2010-07-15 02:12:18 +04:00
|
|
|
raise error.ResponseError(
|
|
|
|
_('push failed (unexpected response):'), ret)
|
|
|
|
|
|
|
|
for l in output.splitlines(True):
|
|
|
|
self.ui.status(_('remote: '), l)
|
|
|
|
return ret
|
|
|
|
|
2011-04-29 16:54:01 +04:00
|
|
|
def debugwireargs(self, one, two, three=None, four=None, five=None):
|
2011-03-22 09:38:32 +03:00
|
|
|
# don't pass optional arguments left at their default value
|
|
|
|
opts = {}
|
|
|
|
if three is not None:
|
|
|
|
opts['three'] = three
|
|
|
|
if four is not None:
|
|
|
|
opts['four'] = four
|
|
|
|
return self._call('debugwireargs', one=one, two=two, **opts)
|
|
|
|
|
2010-07-15 01:34:57 +04:00
|
|
|
# server side
|
|
|
|
|
2010-07-20 22:53:33 +04:00
|
|
|
class streamres(object):
|
|
|
|
def __init__(self, gen):
|
|
|
|
self.gen = gen
|
|
|
|
|
|
|
|
class pushres(object):
|
|
|
|
def __init__(self, res):
|
|
|
|
self.res = res
|
|
|
|
|
2010-10-11 21:45:36 +04:00
|
|
|
class pusherr(object):
|
|
|
|
def __init__(self, res):
|
|
|
|
self.res = res
|
|
|
|
|
2011-08-02 23:21:10 +04:00
|
|
|
class ooberror(object):
|
|
|
|
def __init__(self, message):
|
|
|
|
self.message = message
|
|
|
|
|
2010-07-15 00:25:15 +04:00
|
|
|
def dispatch(repo, proto, command):
|
2013-01-13 11:39:16 +04:00
|
|
|
repo = repo.filtered("served")
|
2010-07-15 00:25:15 +04:00
|
|
|
func, spec = commands[command]
|
|
|
|
args = proto.getargs(spec)
|
2010-07-20 22:53:33 +04:00
|
|
|
return func(repo, proto, *args)
|
2010-07-15 00:25:15 +04:00
|
|
|
|
2011-03-22 09:38:32 +03:00
|
|
|
def options(cmd, keys, others):
|
|
|
|
opts = {}
|
|
|
|
for k in keys:
|
|
|
|
if k in others:
|
|
|
|
opts[k] = others[k]
|
|
|
|
del others[k]
|
|
|
|
if others:
|
|
|
|
sys.stderr.write("abort: %s got unexpected arguments %s\n"
|
|
|
|
% (cmd, ",".join(others)))
|
|
|
|
return opts
|
|
|
|
|
2011-06-15 00:52:58 +04:00
|
|
|
def batch(repo, proto, cmds, others):
|
2013-01-13 11:39:16 +04:00
|
|
|
repo = repo.filtered("served")
|
2011-06-15 00:52:58 +04:00
|
|
|
res = []
|
|
|
|
for pair in cmds.split(';'):
|
|
|
|
op, args = pair.split(' ', 1)
|
|
|
|
vals = {}
|
|
|
|
for a in args.split(','):
|
|
|
|
if a:
|
|
|
|
n, v = a.split('=')
|
|
|
|
vals[n] = unescapearg(v)
|
|
|
|
func, spec = commands[op]
|
|
|
|
if spec:
|
|
|
|
keys = spec.split()
|
|
|
|
data = {}
|
|
|
|
for k in keys:
|
|
|
|
if k == '*':
|
|
|
|
star = {}
|
|
|
|
for key in vals.keys():
|
|
|
|
if key not in keys:
|
|
|
|
star[key] = vals[key]
|
|
|
|
data['*'] = star
|
|
|
|
else:
|
|
|
|
data[k] = vals[k]
|
|
|
|
result = func(repo, proto, *[data[k] for k in keys])
|
|
|
|
else:
|
|
|
|
result = func(repo, proto)
|
2011-08-02 23:21:10 +04:00
|
|
|
if isinstance(result, ooberror):
|
|
|
|
return result
|
2011-06-15 00:52:58 +04:00
|
|
|
res.append(escapearg(result))
|
|
|
|
return ';'.join(res)
|
|
|
|
|
2010-07-15 00:33:21 +04:00
|
|
|
def between(repo, proto, pairs):
|
2010-07-16 02:52:13 +04:00
|
|
|
pairs = [decodelist(p, '-') for p in pairs.split(" ")]
|
2010-07-15 00:25:15 +04:00
|
|
|
r = []
|
|
|
|
for b in repo.between(pairs):
|
2010-07-16 02:52:13 +04:00
|
|
|
r.append(encodelist(b) + "\n")
|
2010-07-15 00:25:15 +04:00
|
|
|
return "".join(r)
|
|
|
|
|
2010-07-15 00:33:21 +04:00
|
|
|
def branchmap(repo, proto):
|
2013-01-06 07:36:30 +04:00
|
|
|
branchmap = repo.branchmap()
|
2010-07-15 00:25:15 +04:00
|
|
|
heads = []
|
|
|
|
for branch, nodes in branchmap.iteritems():
|
2010-11-25 00:56:32 +03:00
|
|
|
branchname = urllib.quote(encoding.fromlocal(branch))
|
2010-07-16 02:52:13 +04:00
|
|
|
branchnodes = encodelist(nodes)
|
|
|
|
heads.append('%s %s' % (branchname, branchnodes))
|
2010-07-15 00:25:15 +04:00
|
|
|
return '\n'.join(heads)
|
|
|
|
|
2010-07-15 00:33:21 +04:00
|
|
|
def branches(repo, proto, nodes):
|
2010-07-16 02:52:13 +04:00
|
|
|
nodes = decodelist(nodes)
|
2010-07-15 00:25:15 +04:00
|
|
|
r = []
|
|
|
|
for b in repo.branches(nodes):
|
2010-07-16 02:52:13 +04:00
|
|
|
r.append(encodelist(b) + "\n")
|
2010-07-15 00:25:15 +04:00
|
|
|
return "".join(r)
|
|
|
|
|
2010-07-15 22:56:52 +04:00
|
|
|
def capabilities(repo, proto):
|
2011-04-15 20:05:56 +04:00
|
|
|
caps = ('lookup changegroupsubset branchmap pushkey known getbundle '
|
2011-06-15 00:52:58 +04:00
|
|
|
'unbundlehash batch').split()
|
2010-07-20 22:52:23 +04:00
|
|
|
if _allowstream(repo.ui):
|
2012-04-04 02:00:47 +04:00
|
|
|
if repo.ui.configbool('server', 'preferuncompressed', False):
|
|
|
|
caps.append('stream-preferred')
|
clone: only use stream when we understand the revlog format
This patch fixes issues with stream cloning in the presense of parentdelta,
lwcopy and similar additions that change the interpretation of the revlog
format, or the format itself.
Currently, the stream capability is sent like this:
stream=<version of changelog>
But the client doesn't actually check the version number; also, it only checks
the changelog and it doesn't capture the interpretation-changes and
flag-changes in parentdelta and lwcopy.
This patch removes the 'stream' capability whenever we use a non-basic revlog
format, to prevent old clients from receiving incorrect data. In those cases,
a new capability called 'streamreqs' is added instead. Instead of a revlog
version, it comes with a list of revlog-format relevant requirements, which
are a subset of the repository requirements, excluding things that are not
relevant for stream.
New clients use this to determine whether or not they can stream. Old clients
only look for the 'stream' capability, as always. New servers will still send
this when serving old repositories.
2010-09-15 13:06:22 +04:00
|
|
|
requiredformats = repo.requirements & repo.supportedformats
|
|
|
|
# if our local revlogs are just revlogv1, add 'stream' cap
|
|
|
|
if not requiredformats - set(('revlogv1',)):
|
|
|
|
caps.append('stream')
|
|
|
|
# otherwise, add 'streamreqs' detailing our local revlog format
|
|
|
|
else:
|
|
|
|
caps.append('streamreqs=%s' % ','.join(requiredformats))
|
2010-07-15 22:56:52 +04:00
|
|
|
caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority))
|
2011-04-30 21:04:37 +04:00
|
|
|
caps.append('httpheader=1024')
|
2010-07-15 22:56:52 +04:00
|
|
|
return ' '.join(caps)
|
|
|
|
|
2010-07-15 00:43:20 +04:00
|
|
|
def changegroup(repo, proto, roots):
|
2010-07-16 02:52:13 +04:00
|
|
|
nodes = decodelist(roots)
|
2010-07-15 00:43:20 +04:00
|
|
|
cg = repo.changegroup(nodes, 'serve')
|
2010-07-20 22:53:33 +04:00
|
|
|
return streamres(proto.groupchunks(cg))
|
2010-07-15 00:43:20 +04:00
|
|
|
|
|
|
|
def changegroupsubset(repo, proto, bases, heads):
|
2010-07-16 02:52:13 +04:00
|
|
|
bases = decodelist(bases)
|
|
|
|
heads = decodelist(heads)
|
2010-07-15 00:43:20 +04:00
|
|
|
cg = repo.changegroupsubset(bases, heads, 'serve')
|
2010-07-20 22:53:33 +04:00
|
|
|
return streamres(proto.groupchunks(cg))
|
2010-07-15 00:43:20 +04:00
|
|
|
|
2011-03-22 09:38:32 +03:00
|
|
|
def debugwireargs(repo, proto, one, two, others):
|
|
|
|
# only accept optional args from the known set
|
|
|
|
opts = options('debugwireargs', ['three', 'four'], others)
|
|
|
|
return repo.debugwireargs(one, two, **opts)
|
2011-03-22 09:38:32 +03:00
|
|
|
|
2011-03-23 18:02:11 +03:00
|
|
|
def getbundle(repo, proto, others):
|
|
|
|
opts = options('getbundle', ['heads', 'common'], others)
|
|
|
|
for k, v in opts.iteritems():
|
|
|
|
opts[k] = decodelist(v)
|
|
|
|
cg = repo.getbundle('serve', **opts)
|
|
|
|
return streamres(proto.groupchunks(cg))
|
|
|
|
|
2010-07-15 00:33:21 +04:00
|
|
|
def heads(repo, proto):
|
2013-01-06 07:36:30 +04:00
|
|
|
h = repo.heads()
|
2010-07-16 02:52:13 +04:00
|
|
|
return encodelist(h) + "\n"
|
2010-07-15 00:25:15 +04:00
|
|
|
|
2010-07-15 22:56:52 +04:00
|
|
|
def hello(repo, proto):
|
|
|
|
'''the hello command returns a set of lines describing various
|
|
|
|
interesting things about the server, in an RFC822-like format.
|
|
|
|
Currently the only one defined is "capabilities", which
|
|
|
|
consists of a line in the form:
|
|
|
|
|
|
|
|
capabilities: space separated list of tokens
|
|
|
|
'''
|
|
|
|
return "capabilities: %s\n" % (capabilities(repo, proto))
|
|
|
|
|
2010-07-15 00:33:21 +04:00
|
|
|
def listkeys(repo, proto, namespace):
|
2011-10-10 15:52:54 +04:00
|
|
|
d = repo.listkeys(encoding.tolocal(namespace)).items()
|
2010-11-29 03:21:47 +03:00
|
|
|
t = '\n'.join(['%s\t%s' % (encoding.fromlocal(k), encoding.fromlocal(v))
|
|
|
|
for k, v in d])
|
2010-07-15 00:25:15 +04:00
|
|
|
return t
|
|
|
|
|
2010-07-15 00:33:21 +04:00
|
|
|
def lookup(repo, proto, key):
|
2010-07-15 00:25:15 +04:00
|
|
|
try:
|
2012-01-19 06:00:48 +04:00
|
|
|
k = encoding.tolocal(key)
|
|
|
|
c = repo[k]
|
|
|
|
r = c.hex()
|
2010-07-15 00:25:15 +04:00
|
|
|
success = 1
|
|
|
|
except Exception, inst:
|
|
|
|
r = str(inst)
|
|
|
|
success = 0
|
|
|
|
return "%s %s\n" % (success, r)
|
|
|
|
|
2011-05-24 19:48:16 +04:00
|
|
|
def known(repo, proto, nodes, others):
|
2011-03-22 11:22:21 +03:00
|
|
|
return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes)))
|
|
|
|
|
2010-07-15 00:33:21 +04:00
|
|
|
def pushkey(repo, proto, namespace, key, old, new):
|
2010-11-29 03:21:47 +03:00
|
|
|
# compatibility with pre-1.8 clients which were accidentally
|
|
|
|
# sending raw binary nodes rather than utf-8-encoded hex
|
|
|
|
if len(new) == 20 and new.encode('string-escape') != new:
|
|
|
|
# looks like it could be a binary node
|
|
|
|
try:
|
2011-04-30 15:59:14 +04:00
|
|
|
new.decode('utf-8')
|
2010-11-29 03:21:47 +03:00
|
|
|
new = encoding.tolocal(new) # but cleanly decodes as UTF-8
|
|
|
|
except UnicodeDecodeError:
|
|
|
|
pass # binary, leave unmodified
|
|
|
|
else:
|
|
|
|
new = encoding.tolocal(new) # normal path
|
|
|
|
|
2012-10-16 01:37:34 +04:00
|
|
|
if util.safehasattr(proto, 'restore'):
|
|
|
|
|
|
|
|
proto.redirect()
|
|
|
|
|
|
|
|
try:
|
|
|
|
r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
|
|
|
|
encoding.tolocal(old), new) or False
|
|
|
|
except util.Abort:
|
|
|
|
r = False
|
|
|
|
|
|
|
|
output = proto.restore()
|
|
|
|
|
|
|
|
return '%s\n%s' % (int(r), output)
|
|
|
|
|
2011-10-10 15:52:54 +04:00
|
|
|
r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
|
|
|
|
encoding.tolocal(old), new)
|
2010-07-15 00:25:15 +04:00
|
|
|
return '%s\n' % int(r)
|
|
|
|
|
2010-07-20 22:52:23 +04:00
|
|
|
def _allowstream(ui):
|
|
|
|
return ui.configbool('server', 'uncompressed', True, untrusted=True)
|
|
|
|
|
2010-07-15 01:19:27 +04:00
|
|
|
def stream(repo, proto):
|
2010-07-20 22:52:23 +04:00
|
|
|
'''If the server supports streaming clone, it advertises the "stream"
|
|
|
|
capability with a value representing the version and flags of the repo
|
|
|
|
it is serving. Client checks to see if it understands the format.
|
|
|
|
|
|
|
|
The format is simple: the server writes out a line with the amount
|
2012-08-16 00:38:42 +04:00
|
|
|
of files, then the total amount of bytes to be transferred (separated
|
2010-07-20 22:52:23 +04:00
|
|
|
by a space). Then, for each file, the server first writes the filename
|
|
|
|
and filesize (separated by the null character), then the file contents.
|
|
|
|
'''
|
|
|
|
|
|
|
|
if not _allowstream(repo.ui):
|
|
|
|
return '1\n'
|
|
|
|
|
|
|
|
entries = []
|
|
|
|
total_bytes = 0
|
|
|
|
try:
|
|
|
|
# get consistent snapshot of repo, lock during scan
|
|
|
|
lock = repo.lock()
|
|
|
|
try:
|
|
|
|
repo.ui.debug('scanning\n')
|
|
|
|
for name, ename, size in repo.store.walk():
|
2013-01-15 23:55:47 +04:00
|
|
|
if size:
|
|
|
|
entries.append((name, size))
|
|
|
|
total_bytes += size
|
2010-07-20 22:52:23 +04:00
|
|
|
finally:
|
|
|
|
lock.release()
|
|
|
|
except error.LockError:
|
|
|
|
return '2\n' # error: 2
|
|
|
|
|
|
|
|
def streamer(repo, entries, total):
|
|
|
|
'''stream out all metadata files in repository.'''
|
|
|
|
yield '0\n' # success
|
|
|
|
repo.ui.debug('%d files, %d bytes to transfer\n' %
|
|
|
|
(len(entries), total_bytes))
|
|
|
|
yield '%d %d\n' % (len(entries), total_bytes)
|
2012-09-14 23:05:12 +04:00
|
|
|
|
|
|
|
sopener = repo.sopener
|
|
|
|
oldaudit = sopener.mustaudit
|
2012-09-14 23:06:40 +04:00
|
|
|
debugflag = repo.ui.debugflag
|
2012-09-14 23:05:12 +04:00
|
|
|
sopener.mustaudit = False
|
|
|
|
|
|
|
|
try:
|
|
|
|
for name, size in entries:
|
2012-09-14 23:06:40 +04:00
|
|
|
if debugflag:
|
|
|
|
repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
|
2012-09-14 23:05:12 +04:00
|
|
|
# partially encode name over the wire for backwards compat
|
|
|
|
yield '%s\0%d\n' % (store.encodedir(name), size)
|
2012-09-14 23:05:37 +04:00
|
|
|
if size <= 65536:
|
2012-09-15 10:38:02 +04:00
|
|
|
fp = sopener(name)
|
|
|
|
try:
|
|
|
|
data = fp.read(size)
|
|
|
|
finally:
|
|
|
|
fp.close()
|
|
|
|
yield data
|
2012-09-14 23:05:37 +04:00
|
|
|
else:
|
|
|
|
for chunk in util.filechunkiter(sopener(name), limit=size):
|
|
|
|
yield chunk
|
2012-09-18 19:00:58 +04:00
|
|
|
# replace with "finally:" when support for python 2.4 has been dropped
|
|
|
|
except Exception:
|
2012-09-14 23:05:12 +04:00
|
|
|
sopener.mustaudit = oldaudit
|
2012-09-18 19:00:58 +04:00
|
|
|
raise
|
|
|
|
sopener.mustaudit = oldaudit
|
2010-07-20 22:52:23 +04:00
|
|
|
|
|
|
|
return streamres(streamer(repo, entries, total_bytes))
|
2010-07-15 01:19:27 +04:00
|
|
|
|
2010-07-15 20:24:42 +04:00
|
|
|
def unbundle(repo, proto, heads):
|
2010-07-16 02:52:13 +04:00
|
|
|
their_heads = decodelist(heads)
|
2010-07-15 20:24:42 +04:00
|
|
|
|
|
|
|
def check_heads():
|
2013-01-06 07:36:30 +04:00
|
|
|
heads = repo.heads()
|
2011-04-15 20:05:56 +04:00
|
|
|
heads_hash = util.sha1(''.join(sorted(heads))).digest()
|
|
|
|
return (their_heads == ['force'] or their_heads == heads or
|
|
|
|
their_heads == ['hashed', heads_hash])
|
2010-07-15 20:24:42 +04:00
|
|
|
|
2010-10-11 21:44:33 +04:00
|
|
|
proto.redirect()
|
|
|
|
|
2010-07-15 20:24:42 +04:00
|
|
|
# fail early if possible
|
|
|
|
if not check_heads():
|
2013-04-11 16:54:18 +04:00
|
|
|
return pusherr('repository changed while preparing changes - '
|
|
|
|
'please try again')
|
2010-07-15 20:24:42 +04:00
|
|
|
|
|
|
|
# write bundle data to temporary file because it can be big
|
|
|
|
fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
|
|
|
|
fp = os.fdopen(fd, 'wb+')
|
|
|
|
r = 0
|
|
|
|
try:
|
|
|
|
proto.getfile(fp)
|
|
|
|
lock = repo.lock()
|
|
|
|
try:
|
|
|
|
if not check_heads():
|
|
|
|
# someone else committed/pushed/unbundled while we
|
|
|
|
# were transferring data
|
2013-04-11 16:54:18 +04:00
|
|
|
return pusherr('repository changed while uploading changes - '
|
|
|
|
'please try again')
|
2010-07-15 20:24:42 +04:00
|
|
|
|
|
|
|
# push can proceed
|
|
|
|
fp.seek(0)
|
2010-08-26 00:33:06 +04:00
|
|
|
gen = changegroupmod.readbundle(fp, None)
|
2010-07-15 20:24:42 +04:00
|
|
|
|
|
|
|
try:
|
2011-11-28 04:32:13 +04:00
|
|
|
r = repo.addchangegroup(gen, 'serve', proto._client())
|
2010-07-15 20:24:42 +04:00
|
|
|
except util.Abort, inst:
|
|
|
|
sys.stderr.write("abort: %s\n" % inst)
|
|
|
|
finally:
|
|
|
|
lock.release()
|
2010-10-11 21:44:11 +04:00
|
|
|
return pushres(r)
|
2010-07-15 20:24:42 +04:00
|
|
|
|
|
|
|
finally:
|
|
|
|
fp.close()
|
|
|
|
os.unlink(tempname)
|
|
|
|
|
2010-07-15 00:25:15 +04:00
|
|
|
commands = {
|
2011-06-15 00:52:58 +04:00
|
|
|
'batch': (batch, 'cmds *'),
|
2010-07-15 00:25:15 +04:00
|
|
|
'between': (between, 'pairs'),
|
|
|
|
'branchmap': (branchmap, ''),
|
|
|
|
'branches': (branches, 'nodes'),
|
2010-07-15 22:56:52 +04:00
|
|
|
'capabilities': (capabilities, ''),
|
2010-07-15 00:43:20 +04:00
|
|
|
'changegroup': (changegroup, 'roots'),
|
|
|
|
'changegroupsubset': (changegroupsubset, 'bases heads'),
|
2011-03-22 09:38:32 +03:00
|
|
|
'debugwireargs': (debugwireargs, 'one two *'),
|
2011-03-23 18:02:11 +03:00
|
|
|
'getbundle': (getbundle, '*'),
|
2010-07-15 00:25:15 +04:00
|
|
|
'heads': (heads, ''),
|
2010-07-15 22:56:52 +04:00
|
|
|
'hello': (hello, ''),
|
2011-05-24 19:48:16 +04:00
|
|
|
'known': (known, 'nodes *'),
|
2010-07-15 00:25:15 +04:00
|
|
|
'listkeys': (listkeys, 'namespace'),
|
|
|
|
'lookup': (lookup, 'key'),
|
|
|
|
'pushkey': (pushkey, 'namespace key old new'),
|
2010-07-15 01:19:27 +04:00
|
|
|
'stream_out': (stream, ''),
|
2010-07-15 20:24:42 +04:00
|
|
|
'unbundle': (unbundle, 'heads'),
|
2010-07-15 00:25:15 +04:00
|
|
|
}
|