2017-04-11 03:56:01 +03:00
|
|
|
# wirepack.py - wireprotocol for exchanging packs
|
|
|
|
#
|
|
|
|
# Copyright 2017 Facebook, Inc.
|
|
|
|
#
|
|
|
|
# This software may be used and distributed according to the terms of the
|
|
|
|
# GNU General Public License version 2 or any later version.
|
2018-01-09 05:58:08 +03:00
|
|
|
from __future__ import absolute_import
|
2017-04-11 03:56:01 +03:00
|
|
|
|
|
|
|
import struct
|
|
|
|
from collections import defaultdict
|
2018-07-06 03:45:27 +03:00
|
|
|
from StringIO import StringIO
|
|
|
|
|
2019-01-30 03:25:33 +03:00
|
|
|
from edenscm.mercurial import progress
|
|
|
|
from edenscm.mercurial.i18n import _
|
|
|
|
from edenscm.mercurial.node import hex, nullid
|
2018-07-06 03:45:27 +03:00
|
|
|
|
|
|
|
from . import constants, shallowutil
|
|
|
|
from .shallowutil import readexactly, readpath, readunpack
|
2017-04-11 03:56:01 +03:00
|
|
|
|
|
|
|
|
flake8: enable F821 check
Summary:
This check is useful and detects real errors (ex. fbconduit). Unfortunately
`arc lint` will run it with both py2 and py3 so a lot of py2 builtins will
still be warned.
I didn't find a clean way to disable py3 check. So this diff tries to fix them.
For `xrange`, the change was done by a script:
```
import sys
import redbaron
headertypes = {'comment', 'endl', 'from_import', 'import', 'string',
'assignment', 'atomtrailers'}
xrangefix = '''try:
xrange(0)
except NameError:
xrange = range
'''
def isxrange(x):
try:
return x[0].value == 'xrange'
except Exception:
return False
def main(argv):
for i, path in enumerate(argv):
print('(%d/%d) scanning %s' % (i + 1, len(argv), path))
content = open(path).read()
try:
red = redbaron.RedBaron(content)
except Exception:
print(' warning: failed to parse')
continue
hasxrange = red.find('atomtrailersnode', value=isxrange)
hasxrangefix = 'xrange = range' in content
if hasxrangefix or not hasxrange:
print(' no need to change')
continue
# find a place to insert the compatibility statement
changed = False
for node in red:
if node.type in headertypes:
continue
# node.insert_before is an easier API, but it has bugs changing
# other "finally" and "except" positions. So do the insert
# manually.
# # node.insert_before(xrangefix)
line = node.absolute_bounding_box.top_left.line - 1
lines = content.splitlines(1)
content = ''.join(lines[:line]) + xrangefix + ''.join(lines[line:])
changed = True
break
if changed:
# "content" is faster than "red.dumps()"
open(path, 'w').write(content)
print(' updated')
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
```
For other py2 builtins that do not have a py3 equivalent, some `# noqa`
were added as a workaround for now.
Reviewed By: DurhamG
Differential Revision: D6934535
fbshipit-source-id: 546b62830af144bc8b46788d2e0fd00496838939
2018-02-10 04:31:44 +03:00
|
|
|
try:
|
|
|
|
xrange(0)
|
|
|
|
except NameError:
|
|
|
|
xrange = range
|
|
|
|
|
2018-05-30 12:16:33 +03:00
|
|
|
|
2017-04-11 03:56:01 +03:00
|
|
|
def sendpackpart(filename, history, data):
|
2017-08-29 23:02:22 +03:00
|
|
|
"""A wirepack is formatted as follows:
|
|
|
|
|
|
|
|
wirepack = <filename len: 2 byte unsigned int><filename>
|
2017-12-01 03:47:19 +03:00
|
|
|
<history len: 4 byte unsigned int>[<history rev>,...]
|
|
|
|
<data len: 4 byte unsigned int>[<data rev>,...]
|
2017-08-29 23:02:22 +03:00
|
|
|
|
|
|
|
hist rev = <node: 20 byte>
|
|
|
|
<p1node: 20 byte>
|
|
|
|
<p2node: 20 byte>
|
|
|
|
<linknode: 20 byte>
|
|
|
|
<copyfromlen: 2 byte unsigned int>
|
|
|
|
<copyfrom>
|
|
|
|
|
|
|
|
data rev = <node: 20 byte>
|
|
|
|
<deltabasenode: 20 byte>
|
|
|
|
<delta len: 8 byte unsigned int>
|
|
|
|
<delta>
|
|
|
|
"""
|
2018-05-30 12:16:33 +03:00
|
|
|
rawfilenamelen = struct.pack(constants.FILENAMESTRUCT, len(filename))
|
|
|
|
yield "%s%s" % (rawfilenamelen, filename)
|
2017-04-11 03:56:01 +03:00
|
|
|
|
|
|
|
# Serialize and send history
|
2018-05-30 12:16:33 +03:00
|
|
|
historylen = struct.pack("!I", len(history))
|
|
|
|
rawhistory = ""
|
2017-04-11 03:56:01 +03:00
|
|
|
for entry in history:
|
2018-05-30 12:16:33 +03:00
|
|
|
copyfrom = entry[4] or ""
|
2017-04-11 03:56:01 +03:00
|
|
|
copyfromlen = len(copyfrom)
|
|
|
|
tup = entry[:-1] + (copyfromlen,)
|
2018-05-30 12:16:33 +03:00
|
|
|
rawhistory += struct.pack("!20s20s20s20sH", *tup)
|
2017-04-11 03:56:01 +03:00
|
|
|
if copyfrom:
|
|
|
|
rawhistory += copyfrom
|
|
|
|
|
2018-05-30 12:16:33 +03:00
|
|
|
yield "%s%s" % (historylen, rawhistory)
|
2017-04-11 03:56:01 +03:00
|
|
|
|
|
|
|
# Serialize and send data
|
2018-05-30 12:16:33 +03:00
|
|
|
yield struct.pack("!I", len(data))
|
2017-04-11 03:56:01 +03:00
|
|
|
|
2017-08-29 23:02:22 +03:00
|
|
|
# TODO: support datapack metadata
|
2017-04-11 03:56:01 +03:00
|
|
|
for node, deltabase, delta in data:
|
2018-05-30 12:16:33 +03:00
|
|
|
deltalen = struct.pack("!Q", len(delta))
|
|
|
|
yield "%s%s%s%s" % (node, deltabase, deltalen, delta)
|
|
|
|
|
2017-04-11 03:56:01 +03:00
|
|
|
|
|
|
|
def closepart():
|
2018-05-30 12:16:33 +03:00
|
|
|
return "\0" * 10
|
|
|
|
|
2017-04-11 03:56:01 +03:00
|
|
|
|
2018-06-27 05:13:49 +03:00
|
|
|
def receivepack(ui, fh, dpack, hpack):
|
2017-04-11 03:56:01 +03:00
|
|
|
receiveddata = []
|
|
|
|
receivedhistory = []
|
2018-06-27 05:13:49 +03:00
|
|
|
|
|
|
|
with progress.bar(ui, _("receiving pack")) as prog:
|
|
|
|
while True:
|
|
|
|
filename = readpath(fh)
|
|
|
|
count = 0
|
|
|
|
|
|
|
|
# Store the history for later sorting
|
|
|
|
for value in readhistory(fh):
|
2019-05-01 21:08:14 +03:00
|
|
|
node, p1, p2, linknode, copyfrom = value
|
|
|
|
hpack.add(filename, node, p1, p2, linknode, copyfrom)
|
2018-06-27 05:13:49 +03:00
|
|
|
receivedhistory.append((filename, node))
|
|
|
|
count += 1
|
|
|
|
|
|
|
|
for node, deltabase, delta in readdeltas(fh):
|
|
|
|
dpack.add(filename, node, deltabase, delta)
|
|
|
|
receiveddata.append((filename, node))
|
|
|
|
count += 1
|
|
|
|
|
|
|
|
if count == 0 and filename == "":
|
|
|
|
break
|
|
|
|
prog.value += 1
|
|
|
|
|
2017-04-11 03:56:01 +03:00
|
|
|
return receiveddata, receivedhistory
|
|
|
|
|
2018-05-30 12:16:33 +03:00
|
|
|
|
2017-04-27 20:44:33 +03:00
|
|
|
def readhistory(fh):
|
2018-05-30 12:16:33 +03:00
|
|
|
count = readunpack(fh, "!I")[0]
|
2017-04-11 03:56:01 +03:00
|
|
|
for i in xrange(count):
|
2018-05-30 12:16:33 +03:00
|
|
|
entry = readunpack(fh, "!20s20s20s20sH")
|
2017-04-11 03:56:01 +03:00
|
|
|
if entry[4] != 0:
|
2017-04-27 20:44:33 +03:00
|
|
|
copyfrom = readexactly(fh, entry[4])
|
2017-04-11 03:56:01 +03:00
|
|
|
else:
|
2018-05-30 12:16:33 +03:00
|
|
|
copyfrom = ""
|
2017-04-11 03:56:01 +03:00
|
|
|
entry = entry[:4] + (copyfrom,)
|
|
|
|
yield entry
|
|
|
|
|
2018-05-30 12:16:33 +03:00
|
|
|
|
2017-04-27 20:44:33 +03:00
|
|
|
def readdeltas(fh):
|
2018-05-30 12:16:33 +03:00
|
|
|
count = readunpack(fh, "!I")[0]
|
2017-04-11 03:56:01 +03:00
|
|
|
for i in xrange(count):
|
2018-05-30 12:16:33 +03:00
|
|
|
node, deltabase, deltalen = readunpack(fh, "!20s20sQ")
|
2017-04-27 20:44:33 +03:00
|
|
|
delta = readexactly(fh, deltalen)
|
2017-04-11 03:56:01 +03:00
|
|
|
yield (node, deltabase, delta)
|
2017-08-29 23:02:22 +03:00
|
|
|
|
2018-05-30 12:16:33 +03:00
|
|
|
|
2017-08-29 23:02:22 +03:00
|
|
|
class wirepackstore(object):
|
|
|
|
def __init__(self, wirepack):
|
|
|
|
self._data = {}
|
|
|
|
self._history = {}
|
|
|
|
fh = StringIO(wirepack)
|
|
|
|
self._load(fh)
|
|
|
|
|
2018-03-17 01:16:33 +03:00
|
|
|
def __iter__(self):
|
|
|
|
for key in self._data:
|
|
|
|
yield key
|
|
|
|
|
2017-08-29 23:02:22 +03:00
|
|
|
def get(self, name, node):
|
|
|
|
raise RuntimeError("must use getdeltachain with wirepackstore")
|
|
|
|
|
|
|
|
def getdeltachain(self, name, node):
|
|
|
|
delta, deltabase = self._data[(name, node)]
|
|
|
|
return [(name, node, name, deltabase, delta)]
|
|
|
|
|
|
|
|
def getmeta(self, name, node):
|
|
|
|
try:
|
|
|
|
size = len(self._data[(name, node)])
|
|
|
|
except KeyError:
|
|
|
|
raise KeyError((name, hex(node)))
|
2018-05-30 12:16:33 +03:00
|
|
|
return {constants.METAKEYFLAG: "", constants.METAKEYSIZE: size}
|
2017-08-29 23:02:22 +03:00
|
|
|
|
|
|
|
def getancestors(self, name, node, known=None):
|
|
|
|
if known is None:
|
|
|
|
known = set()
|
|
|
|
if node in known:
|
|
|
|
return []
|
|
|
|
|
|
|
|
ancestors = {}
|
|
|
|
seen = set()
|
|
|
|
missing = [(name, node)]
|
|
|
|
while missing:
|
|
|
|
curname, curnode = missing.pop()
|
|
|
|
info = self._history.get((name, node))
|
|
|
|
if info is None:
|
|
|
|
continue
|
|
|
|
|
|
|
|
p1, p2, linknode, copyfrom = info
|
|
|
|
if p1 != nullid and p1 not in known:
|
|
|
|
key = (name if not copyfrom else copyfrom, p1)
|
|
|
|
if key not in seen:
|
|
|
|
seen.add(key)
|
|
|
|
missing.append(key)
|
|
|
|
if p2 != nullid and p2 not in known:
|
|
|
|
key = (name, p2)
|
|
|
|
if key not in seen:
|
|
|
|
seen.add(key)
|
|
|
|
missing.append(key)
|
|
|
|
|
|
|
|
ancestors[curnode] = (p1, p2, linknode, copyfrom)
|
|
|
|
if not ancestors:
|
|
|
|
raise KeyError((name, hex(node)))
|
|
|
|
return ancestors
|
|
|
|
|
|
|
|
def getnodeinfo(self, name, node):
|
|
|
|
try:
|
|
|
|
return self._history[(name, node)]
|
|
|
|
except KeyError:
|
|
|
|
raise KeyError((name, hex(node)))
|
|
|
|
|
|
|
|
def add(self, *args):
|
|
|
|
raise RuntimeError("cannot add to a wirepack store")
|
|
|
|
|
|
|
|
def getmissing(self, keys):
|
|
|
|
missing = []
|
|
|
|
for name, node in keys:
|
|
|
|
if (name, node) not in self._data:
|
|
|
|
missing.append((name, node))
|
|
|
|
|
|
|
|
return missing
|
|
|
|
|
|
|
|
def _load(self, fh):
|
|
|
|
data = self._data
|
|
|
|
history = self._history
|
|
|
|
while True:
|
|
|
|
filename = readpath(fh)
|
|
|
|
count = 0
|
|
|
|
|
|
|
|
# Store the history for later sorting
|
|
|
|
for value in readhistory(fh):
|
|
|
|
node = value[0]
|
|
|
|
history[(filename, node)] = value[1:]
|
|
|
|
count += 1
|
|
|
|
|
|
|
|
for node, deltabase, delta in readdeltas(fh):
|
|
|
|
data[(filename, node)] = (delta, deltabase)
|
|
|
|
count += 1
|
|
|
|
|
2018-05-30 12:16:33 +03:00
|
|
|
if count == 0 and filename == "":
|
2017-08-29 23:02:22 +03:00
|
|
|
break
|
|
|
|
|
2017-11-09 21:32:15 +03:00
|
|
|
def markledger(self, ledger, options=None):
|
2017-08-29 23:02:22 +03:00
|
|
|
pass
|
|
|
|
|
|
|
|
def cleanup(self, ledger):
|
|
|
|
pass
|
2018-09-13 13:39:45 +03:00
|
|
|
|
|
|
|
def debugstats(self):
|
|
|
|
return "%d data items, %d history items" % (len(self._data), len(self._history))
|