2007-12-03 14:27:11 +03:00
|
|
|
#
|
|
|
|
# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
|
|
|
|
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
|
|
|
|
#
|
2009-04-26 03:08:54 +04:00
|
|
|
# This software may be used and distributed according to the terms of the
|
2010-01-20 07:20:08 +03:00
|
|
|
# GNU General Public License version 2 or any later version.
|
2007-12-03 14:27:11 +03:00
|
|
|
|
2015-10-31 16:07:40 +03:00
|
|
|
from __future__ import absolute_import
|
|
|
|
|
|
|
|
import cgi
|
protocol: send application/mercurial-0.2 responses to capable clients
With this commit, the HTTP transport now parses the X-HgProto-<N>
header to determine what media type and compression engine to use for
responses. So far, we only compress responses that are already being
compressed with zlib today (stream response types to specific
commands). We can expand things to cover additional response types
later.
The practical side-effect of this commit is that non-zlib compression
engines will be used if both ends support them. This means if both
ends have zstd support, zstd - not zlib - will be used to compress
data!
When cloning the mozilla-unified repository between a local HTTP
server and client, the benefits of non-zlib compression are quite
noticeable:
engine server CPU (s) client CPU (s) bundle size
zlib (l=6) 174.1 283.2 1,148,547,026
zstd (l=1) 99.2 267.3 1,127,513,841
zstd (l=3) 103.1 266.9 1,018,861,363
zstd (l=7) 128.3 269.7 919,190,278
zstd (l=10) 162.0 - 894,547,179
none 95.3 277.2 4,097,566,064
The default zstd compression level is 3. So if you deploy zstd
capable Mercurial to your clients and servers and CPU time on
your server is dominated by "getbundle" requests (clients cloning
and pulling) - and my experience at Mozilla tells me this is often
the case - this commit could drastically reduce your server-side
CPU usage *and* save on bandwidth costs!
Another benefit of this change is that server operators can install
*any* compression engine. While it isn't enabled by default, the
"none" compression engine can now be used to disable wire protocol
compression completely. Previously, commands like "getbundle" always
zlib compressed output, adding considerable overhead to generating
responses. If you are on a high speed network and your server is under
high load, it might be advantageous to trade bandwidth for CPU.
Although, zstd at level 1 doesn't use that much CPU, so I'm not
convinced that disabling compression wholesale is worthwhile. And, my
data seems to indicate a slow down on the client without compression.
I suspect this is due to a lack of buffering resulting in an increase
in socket read() calls and/or the fact we're transferring an extra 3 GB
of data (parsing HTTP chunked transfer and processing extra TCP packets
can add up). This is definitely worth investigating and optimizing. But
since the "none" compressor isn't enabled by default, I'm inclined to
punt on this issue.
This commit introduces tons of tests. Some of these should arguably
have been implemented on previous commits. But it was difficult to
test without the server functionality in place.
2016-12-25 01:29:32 +03:00
|
|
|
import struct
|
2015-10-31 16:07:40 +03:00
|
|
|
|
|
|
|
from .common import (
|
|
|
|
HTTP_OK,
|
|
|
|
)
|
|
|
|
|
|
|
|
from .. import (
|
|
|
|
util,
|
|
|
|
wireproto,
|
|
|
|
)
|
2016-04-10 23:55:37 +03:00
|
|
|
stringio = util.stringio
|
2007-12-03 14:27:11 +03:00
|
|
|
|
2016-04-07 02:22:12 +03:00
|
|
|
urlerr = util.urlerr
|
|
|
|
urlreq = util.urlreq
|
|
|
|
|
2010-07-16 00:05:04 +04:00
|
|
|
HGTYPE = 'application/mercurial-0.1'
|
protocol: send application/mercurial-0.2 responses to capable clients
With this commit, the HTTP transport now parses the X-HgProto-<N>
header to determine what media type and compression engine to use for
responses. So far, we only compress responses that are already being
compressed with zlib today (stream response types to specific
commands). We can expand things to cover additional response types
later.
The practical side-effect of this commit is that non-zlib compression
engines will be used if both ends support them. This means if both
ends have zstd support, zstd - not zlib - will be used to compress
data!
When cloning the mozilla-unified repository between a local HTTP
server and client, the benefits of non-zlib compression are quite
noticeable:
engine server CPU (s) client CPU (s) bundle size
zlib (l=6) 174.1 283.2 1,148,547,026
zstd (l=1) 99.2 267.3 1,127,513,841
zstd (l=3) 103.1 266.9 1,018,861,363
zstd (l=7) 128.3 269.7 919,190,278
zstd (l=10) 162.0 - 894,547,179
none 95.3 277.2 4,097,566,064
The default zstd compression level is 3. So if you deploy zstd
capable Mercurial to your clients and servers and CPU time on
your server is dominated by "getbundle" requests (clients cloning
and pulling) - and my experience at Mozilla tells me this is often
the case - this commit could drastically reduce your server-side
CPU usage *and* save on bandwidth costs!
Another benefit of this change is that server operators can install
*any* compression engine. While it isn't enabled by default, the
"none" compression engine can now be used to disable wire protocol
compression completely. Previously, commands like "getbundle" always
zlib compressed output, adding considerable overhead to generating
responses. If you are on a high speed network and your server is under
high load, it might be advantageous to trade bandwidth for CPU.
Although, zstd at level 1 doesn't use that much CPU, so I'm not
convinced that disabling compression wholesale is worthwhile. And, my
data seems to indicate a slow down on the client without compression.
I suspect this is due to a lack of buffering resulting in an increase
in socket read() calls and/or the fact we're transferring an extra 3 GB
of data (parsing HTTP chunked transfer and processing extra TCP packets
can add up). This is definitely worth investigating and optimizing. But
since the "none" compressor isn't enabled by default, I'm inclined to
punt on this issue.
This commit introduces tons of tests. Some of these should arguably
have been implemented on previous commits. But it was difficult to
test without the server functionality in place.
2016-12-25 01:29:32 +03:00
|
|
|
HGTYPE2 = 'application/mercurial-0.2'
|
2011-08-02 23:21:10 +04:00
|
|
|
HGERRTYPE = 'application/hg-error'
|
2008-01-28 16:58:03 +03:00
|
|
|
|
2016-12-25 00:46:02 +03:00
|
|
|
def decodevaluefromheaders(req, headerprefix):
|
|
|
|
"""Decode a long value from multiple HTTP request headers."""
|
|
|
|
chunks = []
|
|
|
|
i = 1
|
|
|
|
while True:
|
|
|
|
v = req.env.get('HTTP_%s_%d' % (
|
|
|
|
headerprefix.upper().replace('-', '_'), i))
|
|
|
|
if v is None:
|
|
|
|
break
|
|
|
|
chunks.append(v)
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
return ''.join(chunks)
|
|
|
|
|
2014-03-28 22:10:33 +04:00
|
|
|
class webproto(wireproto.abstractserverproto):
|
2011-06-08 02:39:20 +04:00
|
|
|
def __init__(self, req, ui):
|
2010-07-16 00:05:04 +04:00
|
|
|
self.req = req
|
|
|
|
self.response = ''
|
2011-06-08 02:39:20 +04:00
|
|
|
self.ui = ui
|
2016-11-29 07:46:59 +03:00
|
|
|
self.name = 'http'
|
|
|
|
|
2010-07-16 00:05:04 +04:00
|
|
|
def getargs(self, args):
|
2011-04-30 21:04:37 +04:00
|
|
|
knownargs = self._args()
|
2010-07-16 00:05:04 +04:00
|
|
|
data = {}
|
|
|
|
keys = args.split()
|
|
|
|
for k in keys:
|
|
|
|
if k == '*':
|
|
|
|
star = {}
|
2011-04-30 21:04:37 +04:00
|
|
|
for key in knownargs.keys():
|
2011-03-22 09:38:32 +03:00
|
|
|
if key != 'cmd' and key not in keys:
|
2011-04-30 21:04:37 +04:00
|
|
|
star[key] = knownargs[key][0]
|
2010-07-16 00:05:04 +04:00
|
|
|
data['*'] = star
|
|
|
|
else:
|
2011-04-30 21:04:37 +04:00
|
|
|
data[k] = knownargs[k][0]
|
2010-07-16 00:05:04 +04:00
|
|
|
return [data[k] for k in keys]
|
2011-04-30 21:04:37 +04:00
|
|
|
def _args(self):
|
|
|
|
args = self.req.form.copy()
|
2016-03-11 19:37:00 +03:00
|
|
|
postlen = int(self.req.env.get('HTTP_X_HGARGS_POST', 0))
|
|
|
|
if postlen:
|
|
|
|
args.update(cgi.parse_qs(
|
|
|
|
self.req.read(postlen), keep_blank_values=True))
|
|
|
|
return args
|
2016-12-25 00:46:02 +03:00
|
|
|
|
|
|
|
argvalue = decodevaluefromheaders(self.req, 'X-HgArg')
|
|
|
|
args.update(cgi.parse_qs(argvalue, keep_blank_values=True))
|
2011-04-30 21:04:37 +04:00
|
|
|
return args
|
2010-07-16 20:16:15 +04:00
|
|
|
def getfile(self, fp):
|
|
|
|
length = int(self.req.env['CONTENT_LENGTH'])
|
|
|
|
for s in util.filechunkiter(self.req, limit=length):
|
|
|
|
fp.write(s)
|
|
|
|
def redirect(self):
|
2011-06-08 02:39:20 +04:00
|
|
|
self.oldio = self.ui.fout, self.ui.ferr
|
2016-04-10 23:55:37 +03:00
|
|
|
self.ui.ferr = self.ui.fout = stringio()
|
2011-06-08 02:39:20 +04:00
|
|
|
def restore(self):
|
|
|
|
val = self.ui.fout.getvalue()
|
|
|
|
self.ui.ferr, self.ui.fout = self.oldio
|
|
|
|
return val
|
wireproto: compress data from a generator
Currently, the "getbundle" wire protocol command obtains a generator of
data, converts it to a util.chunkbuffer, then converts it back to a
generator via the protocol's groupchunks() implementation. For the SSH
protocol, groupchunks() simply reads 4kb chunks then write()s the
data to a file descriptor. For the HTTP protocol, groupchunks() reads
32kb chunks, feeds those into a zlib compressor, emits compressed data
as it is available, and that is sent to the WSGI layer, where it is
likely turned into HTTP chunked transfer chunks as is or further
buffered and turned into a larger chunk.
For both the SSH and HTTP protocols, there is inefficiency from using
util.chunkbuffer.
For SSH, emitting consistent 4kb chunks sounds nice. However, the file
descriptor it is writing to is almost certainly buffered. That means
that a Python .write() probably doesn't translate into exactly what is
written to the I/O layer.
For HTTP, we're going through an intermediate layer to zlib compress
data. So all util.chunkbuffer is doing is ensuring that the chunks we
feed into the zlib compressor are of uniform size. This means more CPU
time in Python buffering and emitting chunks in util.chunkbuffer but
fewer function calls to zlib.
This patch introduces and implements a new wire protocol abstract
method: compresschunks(). It is like groupchunks() except it operates
on a generator instead of something with a .read(). The SSH
implementation simply proxies chunks. The HTTP implementation uses
zlib compression.
To avoid duplicate code, the HTTP groupchunks() has been reimplemented
in terms of compresschunks().
To prove this all works, the "getbundle" wire protocol command has been
switched to compresschunks(). This removes the util.chunkbuffer from
that command. Now, data essentially streams straight from the
changegroup emitter to the wire, possibly through a zlib compressor.
Generators all the way, baby.
There were slim to no performance changes on the server as measured
with the mozilla-central repository. This is likely because CPU
time is dominated by reading revlogs, producing the changegroup, and
zlib compressing the output stream. Still, this brings us a little
closer to our ideal of using generators everywhere.
2016-10-16 21:10:21 +03:00
|
|
|
|
2010-07-16 00:05:04 +04:00
|
|
|
def _client(self):
|
|
|
|
return 'remote:%s:%s:%s' % (
|
|
|
|
self.req.env.get('wsgi.url_scheme') or 'http',
|
2016-04-07 02:22:12 +03:00
|
|
|
urlreq.quote(self.req.env.get('REMOTE_HOST', '')),
|
|
|
|
urlreq.quote(self.req.env.get('REMOTE_USER', '')))
|
2008-01-28 16:58:03 +03:00
|
|
|
|
protocol: send application/mercurial-0.2 responses to capable clients
With this commit, the HTTP transport now parses the X-HgProto-<N>
header to determine what media type and compression engine to use for
responses. So far, we only compress responses that are already being
compressed with zlib today (stream response types to specific
commands). We can expand things to cover additional response types
later.
The practical side-effect of this commit is that non-zlib compression
engines will be used if both ends support them. This means if both
ends have zstd support, zstd - not zlib - will be used to compress
data!
When cloning the mozilla-unified repository between a local HTTP
server and client, the benefits of non-zlib compression are quite
noticeable:
engine server CPU (s) client CPU (s) bundle size
zlib (l=6) 174.1 283.2 1,148,547,026
zstd (l=1) 99.2 267.3 1,127,513,841
zstd (l=3) 103.1 266.9 1,018,861,363
zstd (l=7) 128.3 269.7 919,190,278
zstd (l=10) 162.0 - 894,547,179
none 95.3 277.2 4,097,566,064
The default zstd compression level is 3. So if you deploy zstd
capable Mercurial to your clients and servers and CPU time on
your server is dominated by "getbundle" requests (clients cloning
and pulling) - and my experience at Mozilla tells me this is often
the case - this commit could drastically reduce your server-side
CPU usage *and* save on bandwidth costs!
Another benefit of this change is that server operators can install
*any* compression engine. While it isn't enabled by default, the
"none" compression engine can now be used to disable wire protocol
compression completely. Previously, commands like "getbundle" always
zlib compressed output, adding considerable overhead to generating
responses. If you are on a high speed network and your server is under
high load, it might be advantageous to trade bandwidth for CPU.
Although, zstd at level 1 doesn't use that much CPU, so I'm not
convinced that disabling compression wholesale is worthwhile. And, my
data seems to indicate a slow down on the client without compression.
I suspect this is due to a lack of buffering resulting in an increase
in socket read() calls and/or the fact we're transferring an extra 3 GB
of data (parsing HTTP chunked transfer and processing extra TCP packets
can add up). This is definitely worth investigating and optimizing. But
since the "none" compressor isn't enabled by default, I'm inclined to
punt on this issue.
This commit introduces tons of tests. Some of these should arguably
have been implemented on previous commits. But it was difficult to
test without the server functionality in place.
2016-12-25 01:29:32 +03:00
|
|
|
def responsetype(self, v1compressible=False):
|
|
|
|
"""Determine the appropriate response type and compression settings.
|
|
|
|
|
|
|
|
The ``v1compressible`` argument states whether the response with
|
|
|
|
application/mercurial-0.1 media types should be zlib compressed.
|
|
|
|
|
|
|
|
Returns a tuple of (mediatype, compengine, engineopts).
|
|
|
|
"""
|
|
|
|
# For now, if it isn't compressible in the old world, it's never
|
|
|
|
# compressible. We can change this to send uncompressed 0.2 payloads
|
|
|
|
# later.
|
|
|
|
if not v1compressible:
|
|
|
|
return HGTYPE, None, None
|
|
|
|
|
|
|
|
# Determine the response media type and compression engine based
|
|
|
|
# on the request parameters.
|
|
|
|
protocaps = decodevaluefromheaders(self.req, 'X-HgProto').split(' ')
|
|
|
|
|
|
|
|
if '0.2' in protocaps:
|
|
|
|
# Default as defined by wire protocol spec.
|
|
|
|
compformats = ['zlib', 'none']
|
|
|
|
for cap in protocaps:
|
|
|
|
if cap.startswith('comp='):
|
|
|
|
compformats = cap[5:].split(',')
|
|
|
|
break
|
|
|
|
|
|
|
|
# Now find an agreed upon compression format.
|
|
|
|
for engine in wireproto.supportedcompengines(self.ui, self,
|
|
|
|
util.SERVERROLE):
|
|
|
|
if engine.wireprotosupport().name in compformats:
|
|
|
|
opts = {}
|
|
|
|
level = self.ui.configint('server',
|
|
|
|
'%slevel' % engine.name())
|
|
|
|
if level is not None:
|
|
|
|
opts['level'] = level
|
|
|
|
|
|
|
|
return HGTYPE2, engine, opts
|
|
|
|
|
|
|
|
# No mutually supported compression format. Fall back to the
|
|
|
|
# legacy protocol.
|
|
|
|
|
|
|
|
# Don't allow untrusted settings because disabling compression or
|
|
|
|
# setting a very high compression level could lead to flooding
|
|
|
|
# the server's network or CPU.
|
|
|
|
opts = {'level': self.ui.configint('server', 'zliblevel', -1)}
|
|
|
|
return HGTYPE, util.compengines['zlib'], opts
|
|
|
|
|
2010-07-16 00:05:04 +04:00
|
|
|
def iscmd(cmd):
|
|
|
|
return cmd in wireproto.commands
|
|
|
|
|
|
|
|
def call(repo, req, cmd):
|
2011-06-08 02:39:20 +04:00
|
|
|
p = webproto(req, repo.ui)
|
protocol: send application/mercurial-0.2 responses to capable clients
With this commit, the HTTP transport now parses the X-HgProto-<N>
header to determine what media type and compression engine to use for
responses. So far, we only compress responses that are already being
compressed with zlib today (stream response types to specific
commands). We can expand things to cover additional response types
later.
The practical side-effect of this commit is that non-zlib compression
engines will be used if both ends support them. This means if both
ends have zstd support, zstd - not zlib - will be used to compress
data!
When cloning the mozilla-unified repository between a local HTTP
server and client, the benefits of non-zlib compression are quite
noticeable:
engine server CPU (s) client CPU (s) bundle size
zlib (l=6) 174.1 283.2 1,148,547,026
zstd (l=1) 99.2 267.3 1,127,513,841
zstd (l=3) 103.1 266.9 1,018,861,363
zstd (l=7) 128.3 269.7 919,190,278
zstd (l=10) 162.0 - 894,547,179
none 95.3 277.2 4,097,566,064
The default zstd compression level is 3. So if you deploy zstd
capable Mercurial to your clients and servers and CPU time on
your server is dominated by "getbundle" requests (clients cloning
and pulling) - and my experience at Mozilla tells me this is often
the case - this commit could drastically reduce your server-side
CPU usage *and* save on bandwidth costs!
Another benefit of this change is that server operators can install
*any* compression engine. While it isn't enabled by default, the
"none" compression engine can now be used to disable wire protocol
compression completely. Previously, commands like "getbundle" always
zlib compressed output, adding considerable overhead to generating
responses. If you are on a high speed network and your server is under
high load, it might be advantageous to trade bandwidth for CPU.
Although, zstd at level 1 doesn't use that much CPU, so I'm not
convinced that disabling compression wholesale is worthwhile. And, my
data seems to indicate a slow down on the client without compression.
I suspect this is due to a lack of buffering resulting in an increase
in socket read() calls and/or the fact we're transferring an extra 3 GB
of data (parsing HTTP chunked transfer and processing extra TCP packets
can add up). This is definitely worth investigating and optimizing. But
since the "none" compressor isn't enabled by default, I'm inclined to
punt on this issue.
This commit introduces tons of tests. Some of these should arguably
have been implemented on previous commits. But it was difficult to
test without the server functionality in place.
2016-12-25 01:29:32 +03:00
|
|
|
|
|
|
|
def genversion2(gen, compress, engine, engineopts):
|
|
|
|
# application/mercurial-0.2 always sends a payload header
|
|
|
|
# identifying the compression engine.
|
|
|
|
name = engine.wireprotosupport().name
|
|
|
|
assert 0 < len(name) < 256
|
|
|
|
yield struct.pack('B', len(name))
|
|
|
|
yield name
|
|
|
|
|
|
|
|
if compress:
|
|
|
|
for chunk in engine.compressstream(gen, opts=engineopts):
|
|
|
|
yield chunk
|
|
|
|
else:
|
|
|
|
for chunk in gen:
|
|
|
|
yield chunk
|
|
|
|
|
2010-07-20 22:53:33 +04:00
|
|
|
rsp = wireproto.dispatch(repo, p, cmd)
|
2010-07-20 11:56:37 +04:00
|
|
|
if isinstance(rsp, str):
|
2013-01-15 04:07:03 +04:00
|
|
|
req.respond(HTTP_OK, HGTYPE, body=rsp)
|
|
|
|
return []
|
2010-07-20 11:56:37 +04:00
|
|
|
elif isinstance(rsp, wireproto.streamres):
|
2016-11-21 00:50:45 +03:00
|
|
|
if rsp.reader:
|
|
|
|
gen = iter(lambda: rsp.reader.read(32768), '')
|
|
|
|
else:
|
|
|
|
gen = rsp.gen
|
|
|
|
|
protocol: send application/mercurial-0.2 responses to capable clients
With this commit, the HTTP transport now parses the X-HgProto-<N>
header to determine what media type and compression engine to use for
responses. So far, we only compress responses that are already being
compressed with zlib today (stream response types to specific
commands). We can expand things to cover additional response types
later.
The practical side-effect of this commit is that non-zlib compression
engines will be used if both ends support them. This means if both
ends have zstd support, zstd - not zlib - will be used to compress
data!
When cloning the mozilla-unified repository between a local HTTP
server and client, the benefits of non-zlib compression are quite
noticeable:
engine server CPU (s) client CPU (s) bundle size
zlib (l=6) 174.1 283.2 1,148,547,026
zstd (l=1) 99.2 267.3 1,127,513,841
zstd (l=3) 103.1 266.9 1,018,861,363
zstd (l=7) 128.3 269.7 919,190,278
zstd (l=10) 162.0 - 894,547,179
none 95.3 277.2 4,097,566,064
The default zstd compression level is 3. So if you deploy zstd
capable Mercurial to your clients and servers and CPU time on
your server is dominated by "getbundle" requests (clients cloning
and pulling) - and my experience at Mozilla tells me this is often
the case - this commit could drastically reduce your server-side
CPU usage *and* save on bandwidth costs!
Another benefit of this change is that server operators can install
*any* compression engine. While it isn't enabled by default, the
"none" compression engine can now be used to disable wire protocol
compression completely. Previously, commands like "getbundle" always
zlib compressed output, adding considerable overhead to generating
responses. If you are on a high speed network and your server is under
high load, it might be advantageous to trade bandwidth for CPU.
Although, zstd at level 1 doesn't use that much CPU, so I'm not
convinced that disabling compression wholesale is worthwhile. And, my
data seems to indicate a slow down on the client without compression.
I suspect this is due to a lack of buffering resulting in an increase
in socket read() calls and/or the fact we're transferring an extra 3 GB
of data (parsing HTTP chunked transfer and processing extra TCP packets
can add up). This is definitely worth investigating and optimizing. But
since the "none" compressor isn't enabled by default, I'm inclined to
punt on this issue.
This commit introduces tons of tests. Some of these should arguably
have been implemented on previous commits. But it was difficult to
test without the server functionality in place.
2016-12-25 01:29:32 +03:00
|
|
|
# This code for compression should not be streamres specific. It
|
|
|
|
# is here because we only compress streamres at the moment.
|
|
|
|
mediatype, engine, engineopts = p.responsetype(rsp.v1compressible)
|
|
|
|
|
|
|
|
if mediatype == HGTYPE and rsp.v1compressible:
|
|
|
|
gen = engine.compressstream(gen, engineopts)
|
|
|
|
elif mediatype == HGTYPE2:
|
|
|
|
gen = genversion2(gen, rsp.v1compressible, engine, engineopts)
|
2016-11-21 00:50:45 +03:00
|
|
|
|
protocol: send application/mercurial-0.2 responses to capable clients
With this commit, the HTTP transport now parses the X-HgProto-<N>
header to determine what media type and compression engine to use for
responses. So far, we only compress responses that are already being
compressed with zlib today (stream response types to specific
commands). We can expand things to cover additional response types
later.
The practical side-effect of this commit is that non-zlib compression
engines will be used if both ends support them. This means if both
ends have zstd support, zstd - not zlib - will be used to compress
data!
When cloning the mozilla-unified repository between a local HTTP
server and client, the benefits of non-zlib compression are quite
noticeable:
engine server CPU (s) client CPU (s) bundle size
zlib (l=6) 174.1 283.2 1,148,547,026
zstd (l=1) 99.2 267.3 1,127,513,841
zstd (l=3) 103.1 266.9 1,018,861,363
zstd (l=7) 128.3 269.7 919,190,278
zstd (l=10) 162.0 - 894,547,179
none 95.3 277.2 4,097,566,064
The default zstd compression level is 3. So if you deploy zstd
capable Mercurial to your clients and servers and CPU time on
your server is dominated by "getbundle" requests (clients cloning
and pulling) - and my experience at Mozilla tells me this is often
the case - this commit could drastically reduce your server-side
CPU usage *and* save on bandwidth costs!
Another benefit of this change is that server operators can install
*any* compression engine. While it isn't enabled by default, the
"none" compression engine can now be used to disable wire protocol
compression completely. Previously, commands like "getbundle" always
zlib compressed output, adding considerable overhead to generating
responses. If you are on a high speed network and your server is under
high load, it might be advantageous to trade bandwidth for CPU.
Although, zstd at level 1 doesn't use that much CPU, so I'm not
convinced that disabling compression wholesale is worthwhile. And, my
data seems to indicate a slow down on the client without compression.
I suspect this is due to a lack of buffering resulting in an increase
in socket read() calls and/or the fact we're transferring an extra 3 GB
of data (parsing HTTP chunked transfer and processing extra TCP packets
can add up). This is definitely worth investigating and optimizing. But
since the "none" compressor isn't enabled by default, I'm inclined to
punt on this issue.
This commit introduces tons of tests. Some of these should arguably
have been implemented on previous commits. But it was difficult to
test without the server functionality in place.
2016-12-25 01:29:32 +03:00
|
|
|
req.respond(HTTP_OK, mediatype)
|
2016-11-21 00:50:45 +03:00
|
|
|
return gen
|
2010-07-20 11:56:37 +04:00
|
|
|
elif isinstance(rsp, wireproto.pushres):
|
2011-06-08 02:39:20 +04:00
|
|
|
val = p.restore()
|
2013-01-15 04:05:12 +04:00
|
|
|
rsp = '%d\n%s' % (rsp.res, val)
|
2013-01-15 04:07:03 +04:00
|
|
|
req.respond(HTTP_OK, HGTYPE, body=rsp)
|
|
|
|
return []
|
2010-10-11 21:45:36 +04:00
|
|
|
elif isinstance(rsp, wireproto.pusherr):
|
2010-10-11 21:47:11 +04:00
|
|
|
# drain the incoming bundle
|
|
|
|
req.drain()
|
2011-06-08 02:39:20 +04:00
|
|
|
p.restore()
|
2010-10-11 21:45:36 +04:00
|
|
|
rsp = '0\n%s\n' % rsp.res
|
2013-01-15 04:07:03 +04:00
|
|
|
req.respond(HTTP_OK, HGTYPE, body=rsp)
|
|
|
|
return []
|
2011-08-02 23:21:10 +04:00
|
|
|
elif isinstance(rsp, wireproto.ooberror):
|
|
|
|
rsp = rsp.message
|
2013-01-15 04:07:03 +04:00
|
|
|
req.respond(HTTP_OK, HGERRTYPE, body=rsp)
|
|
|
|
return []
|