2013-11-26 04:36:44 +04:00
|
|
|
# remotefilelogserver.py - server logic for a remotefilelog server
|
|
|
|
#
|
|
|
|
# Copyright 2013 Facebook, Inc.
|
|
|
|
#
|
|
|
|
# This software may be used and distributed according to the terms of the
|
|
|
|
# GNU General Public License version 2 or any later version.
|
2017-04-27 05:52:20 +03:00
|
|
|
from __future__ import absolute_import
|
2013-11-26 04:36:44 +04:00
|
|
|
|
2015-01-15 00:14:35 +03:00
|
|
|
from mercurial import wireproto, changegroup, match, util, changelog, context
|
2016-04-26 23:00:31 +03:00
|
|
|
from mercurial import exchange, sshserver, store, error
|
2013-11-26 04:36:44 +04:00
|
|
|
from mercurial.extensions import wrapfunction
|
2015-06-30 18:04:47 +03:00
|
|
|
from mercurial.hgweb import protocol as httpprotocol
|
2013-11-26 04:36:44 +04:00
|
|
|
from mercurial.node import bin, hex, nullid, nullrev
|
|
|
|
from mercurial.i18n import _
|
2017-04-27 05:52:20 +03:00
|
|
|
from . import (
|
|
|
|
constants,
|
|
|
|
lz4wrapper,
|
|
|
|
shallowrepo,
|
|
|
|
shallowutil,
|
|
|
|
wirepack,
|
|
|
|
)
|
2017-04-11 03:56:01 +03:00
|
|
|
import errno, stat, os, time
|
2013-11-26 04:36:44 +04:00
|
|
|
|
flake8: enable F821 check
Summary:
This check is useful and detects real errors (ex. fbconduit). Unfortunately
`arc lint` will run it with both py2 and py3 so a lot of py2 builtins will
still be warned.
I didn't find a clean way to disable py3 check. So this diff tries to fix them.
For `xrange`, the change was done by a script:
```
import sys
import redbaron
headertypes = {'comment', 'endl', 'from_import', 'import', 'string',
'assignment', 'atomtrailers'}
xrangefix = '''try:
xrange(0)
except NameError:
xrange = range
'''
def isxrange(x):
try:
return x[0].value == 'xrange'
except Exception:
return False
def main(argv):
for i, path in enumerate(argv):
print('(%d/%d) scanning %s' % (i + 1, len(argv), path))
content = open(path).read()
try:
red = redbaron.RedBaron(content)
except Exception:
print(' warning: failed to parse')
continue
hasxrange = red.find('atomtrailersnode', value=isxrange)
hasxrangefix = 'xrange = range' in content
if hasxrangefix or not hasxrange:
print(' no need to change')
continue
# find a place to insert the compatibility statement
changed = False
for node in red:
if node.type in headertypes:
continue
# node.insert_before is an easier API, but it has bugs changing
# other "finally" and "except" positions. So do the insert
# manually.
# # node.insert_before(xrangefix)
line = node.absolute_bounding_box.top_left.line - 1
lines = content.splitlines(1)
content = ''.join(lines[:line]) + xrangefix + ''.join(lines[line:])
changed = True
break
if changed:
# "content" is faster than "red.dumps()"
open(path, 'w').write(content)
print(' updated')
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
```
For other py2 builtins that do not have a py3 equivalent, some `# noqa`
were added as a workaround for now.
Reviewed By: DurhamG
Differential Revision: D6934535
fbshipit-source-id: 546b62830af144bc8b46788d2e0fd00496838939
2018-02-10 04:31:44 +03:00
|
|
|
try:
|
|
|
|
xrange(0)
|
|
|
|
except NameError:
|
|
|
|
xrange = range
|
|
|
|
|
2015-10-14 00:17:02 +03:00
|
|
|
try:
|
|
|
|
from mercurial import streamclone
|
|
|
|
streamclone._walkstreamfiles
|
|
|
|
hasstreamclone = True
|
2016-04-26 23:00:31 +03:00
|
|
|
except Exception:
|
2015-10-14 00:17:02 +03:00
|
|
|
hasstreamclone = False
|
|
|
|
|
2013-11-26 04:36:44 +04:00
|
|
|
def setupserver(ui, repo):
|
|
|
|
"""Sets up a normal Mercurial repo so it can serve files to shallow repos.
|
|
|
|
"""
|
|
|
|
onetimesetup(ui)
|
|
|
|
|
|
|
|
# don't send files to shallow clients during pulls
|
|
|
|
def generatefiles(orig, self, changedfiles, linknodes, commonrevs, source):
|
|
|
|
caps = self._bundlecaps or []
|
|
|
|
if shallowrepo.requirement in caps:
|
|
|
|
# only send files that don't match the specified patterns
|
|
|
|
includepattern = None
|
|
|
|
excludepattern = None
|
|
|
|
for cap in (self._bundlecaps or []):
|
|
|
|
if cap.startswith("includepattern="):
|
|
|
|
includepattern = cap[len("includepattern="):].split('\0')
|
|
|
|
elif cap.startswith("excludepattern="):
|
|
|
|
excludepattern = cap[len("excludepattern="):].split('\0')
|
|
|
|
|
|
|
|
m = match.always(repo.root, '')
|
|
|
|
if includepattern or excludepattern:
|
|
|
|
m = match.match(repo.root, '', None,
|
|
|
|
includepattern, excludepattern)
|
|
|
|
|
|
|
|
changedfiles = list([f for f in changedfiles if not m(f)])
|
|
|
|
return orig(self, changedfiles, linknodes, commonrevs, source)
|
|
|
|
|
2014-09-12 01:39:14 +04:00
|
|
|
wrapfunction(changegroup.cg1packer, 'generatefiles', generatefiles)
|
2013-11-26 04:36:44 +04:00
|
|
|
|
|
|
|
onetime = False
|
|
|
|
def onetimesetup(ui):
|
2015-06-30 17:12:38 +03:00
|
|
|
"""Configures the wireprotocol for both clients and servers.
|
2013-11-26 04:36:44 +04:00
|
|
|
"""
|
|
|
|
global onetime
|
|
|
|
if onetime:
|
|
|
|
return
|
|
|
|
onetime = True
|
|
|
|
|
|
|
|
# support file content requests
|
2016-08-02 20:40:42 +03:00
|
|
|
wireproto.commands['getflogheads'] = (getflogheads, 'path')
|
2013-11-26 04:36:44 +04:00
|
|
|
wireproto.commands['getfiles'] = (getfiles, '')
|
2015-07-01 00:32:31 +03:00
|
|
|
wireproto.commands['getfile'] = (getfile, 'file node')
|
2016-05-16 20:59:09 +03:00
|
|
|
wireproto.commands['getpackv1'] = (getpack, '*')
|
2013-11-26 04:36:44 +04:00
|
|
|
|
|
|
|
class streamstate(object):
|
|
|
|
match = None
|
|
|
|
shallowremote = False
|
2017-08-31 21:14:39 +03:00
|
|
|
noflatmf = False
|
2013-11-26 04:36:44 +04:00
|
|
|
state = streamstate()
|
|
|
|
|
|
|
|
def stream_out_shallow(repo, proto, other):
|
|
|
|
includepattern = None
|
|
|
|
excludepattern = None
|
|
|
|
raw = other.get('includepattern')
|
|
|
|
if raw:
|
|
|
|
includepattern = raw.split('\0')
|
|
|
|
raw = other.get('excludepattern')
|
|
|
|
if raw:
|
|
|
|
excludepattern = raw.split('\0')
|
|
|
|
|
|
|
|
oldshallow = state.shallowremote
|
|
|
|
oldmatch = state.match
|
2017-08-31 21:14:39 +03:00
|
|
|
oldnoflatmf = state.noflatmf
|
2013-11-26 04:36:44 +04:00
|
|
|
try:
|
|
|
|
state.shallowremote = True
|
|
|
|
state.match = match.always(repo.root, '')
|
2017-08-31 21:14:39 +03:00
|
|
|
state.noflatmf = other.get('noflatmanifest') == 'True'
|
2013-11-26 04:36:44 +04:00
|
|
|
if includepattern or excludepattern:
|
|
|
|
state.match = match.match(repo.root, '', None,
|
|
|
|
includepattern, excludepattern)
|
2015-05-28 03:29:34 +03:00
|
|
|
streamres = wireproto.stream(repo, proto)
|
|
|
|
|
|
|
|
# Force the first value to execute, so the file list is computed
|
|
|
|
# within the try/finally scope
|
2016-08-26 18:48:07 +03:00
|
|
|
first = next(streamres.gen)
|
|
|
|
second = next(streamres.gen)
|
2015-05-28 03:29:34 +03:00
|
|
|
def gen():
|
|
|
|
yield first
|
|
|
|
yield second
|
|
|
|
for value in streamres.gen:
|
|
|
|
yield value
|
|
|
|
return wireproto.streamres(gen())
|
2013-11-26 04:36:44 +04:00
|
|
|
finally:
|
|
|
|
state.shallowremote = oldshallow
|
|
|
|
state.match = oldmatch
|
2017-08-31 21:14:39 +03:00
|
|
|
state.noflatmf = oldnoflatmf
|
2013-11-26 04:36:44 +04:00
|
|
|
|
|
|
|
wireproto.commands['stream_out_shallow'] = (stream_out_shallow, '*')
|
|
|
|
|
|
|
|
# don't clone filelogs to shallow clients
|
|
|
|
def _walkstreamfiles(orig, repo):
|
|
|
|
if state.shallowremote:
|
|
|
|
# if we are shallow ourselves, stream our local commits
|
|
|
|
if shallowrepo.requirement in repo.requirements:
|
|
|
|
striplen = len(repo.store.path) + 1
|
|
|
|
readdir = repo.store.rawvfs.readdir
|
|
|
|
visit = [os.path.join(repo.store.path, 'data')]
|
|
|
|
while visit:
|
|
|
|
p = visit.pop()
|
|
|
|
for f, kind, st in readdir(p, stat=True):
|
|
|
|
fp = p + '/' + f
|
|
|
|
if kind == stat.S_IFREG:
|
|
|
|
if not fp.endswith('.i') and not fp.endswith('.d'):
|
|
|
|
n = util.pconvert(fp[striplen:])
|
|
|
|
yield (store.decodedir(n), n, st.st_size)
|
|
|
|
if kind == stat.S_IFDIR:
|
|
|
|
visit.append(fp)
|
|
|
|
|
2017-04-20 07:14:04 +03:00
|
|
|
shallowtrees = repo.ui.configbool('remotefilelog', 'shallowtrees',
|
|
|
|
False)
|
|
|
|
if 'treemanifest' in repo.requirements and not shallowtrees:
|
2016-08-30 02:19:52 +03:00
|
|
|
for (u, e, s) in repo.store.datafiles():
|
|
|
|
if (u.startswith('meta/') and
|
|
|
|
(u.endswith('.i') or u.endswith('.d'))):
|
|
|
|
yield (u, e, s)
|
|
|
|
|
2013-11-26 04:36:44 +04:00
|
|
|
# Return .d and .i files that do not match the shallow pattern
|
2015-09-05 22:24:04 +03:00
|
|
|
match = state.match
|
|
|
|
if match and not match.always():
|
|
|
|
for (u, e, s) in repo.store.datafiles():
|
|
|
|
f = u[5:-2] # trim data/... and .i/.d
|
|
|
|
if not state.match(f):
|
|
|
|
yield (u, e, s)
|
2013-11-26 04:36:44 +04:00
|
|
|
|
|
|
|
for x in repo.store.topfiles():
|
2017-04-27 22:03:56 +03:00
|
|
|
if shallowtrees and x[0][:15] == '00manifesttree.':
|
2017-04-20 07:14:04 +03:00
|
|
|
continue
|
2017-08-31 21:14:39 +03:00
|
|
|
if state.noflatmf and x[0][:11] == '00manifest.':
|
|
|
|
continue
|
2013-11-26 04:36:44 +04:00
|
|
|
yield x
|
2017-04-20 07:14:04 +03:00
|
|
|
|
2013-11-26 04:36:44 +04:00
|
|
|
elif shallowrepo.requirement in repo.requirements:
|
|
|
|
# don't allow cloning from a shallow repo to a full repo
|
|
|
|
# since it would require fetching every version of every
|
|
|
|
# file in order to create the revlogs.
|
2016-04-26 23:00:31 +03:00
|
|
|
raise error.Abort(_("Cannot clone from a shallow repo "
|
|
|
|
"to a full repo."))
|
2013-11-26 04:36:44 +04:00
|
|
|
else:
|
|
|
|
for x in orig(repo):
|
|
|
|
yield x
|
|
|
|
|
2015-10-14 00:17:02 +03:00
|
|
|
# This function moved in Mercurial 3.5 and 3.6
|
|
|
|
if hasstreamclone:
|
|
|
|
wrapfunction(streamclone, '_walkstreamfiles', _walkstreamfiles)
|
|
|
|
elif util.safehasattr(wireproto, '_walkstreamfiles'):
|
2015-05-28 03:29:34 +03:00
|
|
|
wrapfunction(wireproto, '_walkstreamfiles', _walkstreamfiles)
|
|
|
|
else:
|
|
|
|
wrapfunction(exchange, '_walkstreamfiles', _walkstreamfiles)
|
2013-11-26 04:36:44 +04:00
|
|
|
|
|
|
|
# We no longer use getbundle_shallow commands, but we must still
|
|
|
|
# support it for migration purposes
|
|
|
|
def getbundleshallow(repo, proto, others):
|
|
|
|
bundlecaps = others.get('bundlecaps', '')
|
|
|
|
bundlecaps = set(bundlecaps.split(','))
|
|
|
|
bundlecaps.add('remotefilelog')
|
|
|
|
others['bundlecaps'] = ','.join(bundlecaps)
|
|
|
|
|
|
|
|
return wireproto.commands["getbundle"][0](repo, proto, others)
|
|
|
|
|
|
|
|
wireproto.commands["getbundle_shallow"] = (getbundleshallow, '*')
|
|
|
|
|
|
|
|
# expose remotefilelog capabilities
|
2015-06-30 00:35:32 +03:00
|
|
|
def _capabilities(orig, repo, proto):
|
2013-11-26 04:36:44 +04:00
|
|
|
caps = orig(repo, proto)
|
2015-06-30 00:36:25 +03:00
|
|
|
if ((shallowrepo.requirement in repo.requirements or
|
2015-07-01 00:32:31 +03:00
|
|
|
ui.configbool('remotefilelog', 'server'))):
|
|
|
|
if isinstance(proto, sshserver.sshserver):
|
|
|
|
# legacy getfiles method which only works over ssh
|
|
|
|
caps.append(shallowrepo.requirement)
|
2016-08-02 20:40:42 +03:00
|
|
|
caps.append('getflogheads')
|
|
|
|
caps.append('getfile')
|
2013-11-26 04:36:44 +04:00
|
|
|
return caps
|
2017-08-22 01:10:41 +03:00
|
|
|
if util.safehasattr(wireproto, '_capabilities'):
|
|
|
|
wrapfunction(wireproto, '_capabilities', _capabilities)
|
|
|
|
else:
|
|
|
|
wrapfunction(wireproto, 'capabilities', _capabilities)
|
2013-11-26 04:36:44 +04:00
|
|
|
|
2016-11-16 23:11:08 +03:00
|
|
|
def _adjustlinkrev(orig, self, *args, **kwargs):
|
2015-05-23 07:32:12 +03:00
|
|
|
# When generating file blobs, taking the real path is too slow on large
|
|
|
|
# repos, so force it to just return the linkrev directly.
|
|
|
|
repo = self._repo
|
|
|
|
if util.safehasattr(repo, 'forcelinkrev') and repo.forcelinkrev:
|
2016-11-16 23:11:08 +03:00
|
|
|
return self._filelog.linkrev(self._filelog.rev(self._filenode))
|
|
|
|
return orig(self, *args, **kwargs)
|
2015-05-23 07:32:12 +03:00
|
|
|
|
|
|
|
wrapfunction(context.basefilectx, '_adjustlinkrev', _adjustlinkrev)
|
2015-01-15 00:14:35 +03:00
|
|
|
|
2015-06-30 18:04:47 +03:00
|
|
|
def _iscmd(orig, cmd):
|
|
|
|
if cmd == 'getfiles':
|
|
|
|
return False
|
|
|
|
return orig(cmd)
|
|
|
|
|
|
|
|
wrapfunction(httpprotocol, 'iscmd', _iscmd)
|
|
|
|
|
2015-06-30 22:02:07 +03:00
|
|
|
def _loadfileblob(repo, cachepath, path, node):
|
|
|
|
filecachepath = os.path.join(cachepath, path, hex(node))
|
2017-07-21 18:07:09 +03:00
|
|
|
if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0:
|
2015-06-30 22:02:07 +03:00
|
|
|
filectx = repo.filectx(path, fileid=node)
|
|
|
|
if filectx.node() == nullid:
|
|
|
|
repo.changelog = changelog.changelog(repo.svfs)
|
|
|
|
filectx = repo.filectx(path, fileid=node)
|
|
|
|
|
|
|
|
text = createfileblob(filectx)
|
2017-04-27 05:52:20 +03:00
|
|
|
text = lz4wrapper.lzcompresshc(text)
|
2015-06-30 22:02:07 +03:00
|
|
|
|
2015-06-30 23:58:15 +03:00
|
|
|
# everything should be user & group read/writable
|
|
|
|
oldumask = os.umask(0o002)
|
2015-06-30 22:02:07 +03:00
|
|
|
try:
|
2015-06-30 23:58:15 +03:00
|
|
|
dirname = os.path.dirname(filecachepath)
|
|
|
|
if not os.path.exists(dirname):
|
2015-12-10 21:11:27 +03:00
|
|
|
try:
|
|
|
|
os.makedirs(dirname)
|
2016-04-26 23:00:31 +03:00
|
|
|
except OSError as ex:
|
2015-12-10 21:11:27 +03:00
|
|
|
if ex.errno != errno.EEXIST:
|
|
|
|
raise
|
|
|
|
|
2015-09-28 20:31:38 +03:00
|
|
|
f = None
|
2015-06-30 23:58:15 +03:00
|
|
|
try:
|
2015-09-28 20:31:38 +03:00
|
|
|
f = util.atomictempfile(filecachepath, "w")
|
|
|
|
f.write(text)
|
|
|
|
except (IOError, OSError):
|
2015-06-30 23:58:15 +03:00
|
|
|
# Don't abort if the user only has permission to read,
|
|
|
|
# and not write.
|
|
|
|
pass
|
2015-09-28 20:31:38 +03:00
|
|
|
finally:
|
|
|
|
if f:
|
|
|
|
f.close()
|
2015-06-30 23:58:15 +03:00
|
|
|
finally:
|
|
|
|
os.umask(oldumask)
|
2015-06-30 22:02:07 +03:00
|
|
|
else:
|
2018-02-05 22:08:19 +03:00
|
|
|
with util.posixfile(filecachepath, "r") as f:
|
2015-06-30 22:02:07 +03:00
|
|
|
text = f.read()
|
|
|
|
return text
|
|
|
|
|
2016-08-02 20:40:42 +03:00
|
|
|
def getflogheads(repo, proto, path):
|
|
|
|
"""A server api for requesting a filelog's heads
|
|
|
|
"""
|
|
|
|
flog = repo.file(path)
|
|
|
|
heads = flog.heads()
|
2016-08-02 20:41:41 +03:00
|
|
|
return '\n'.join((hex(head) for head in heads if head != nullid))
|
2016-08-02 20:40:42 +03:00
|
|
|
|
2015-07-01 00:32:31 +03:00
|
|
|
def getfile(repo, proto, file, node):
|
2015-08-04 21:59:53 +03:00
|
|
|
"""A server api for requesting a particular version of a file. Can be used
|
|
|
|
in batches to request many files at once. The return protocol is:
|
|
|
|
<errorcode>\0<data/errormsg> where <errorcode> is 0 for success or
|
|
|
|
non-zero for an error.
|
2017-04-26 23:08:13 +03:00
|
|
|
|
|
|
|
data is a compressed blob with revlog flag and ancestors information. See
|
|
|
|
createfileblob for its content.
|
2015-08-04 21:59:53 +03:00
|
|
|
"""
|
2015-07-01 00:32:31 +03:00
|
|
|
if shallowrepo.requirement in repo.requirements:
|
2015-08-04 21:59:53 +03:00
|
|
|
return '1\0' + _('cannot fetch remote files from shallow repo')
|
2015-07-01 00:32:31 +03:00
|
|
|
cachepath = repo.ui.config("remotefilelog", "servercachepath")
|
|
|
|
if not cachepath:
|
|
|
|
cachepath = os.path.join(repo.path, "remotefilelogcache")
|
|
|
|
node = bin(node.strip())
|
|
|
|
if node == nullid:
|
2015-08-04 21:59:53 +03:00
|
|
|
return '0\0'
|
|
|
|
return '0\0' + _loadfileblob(repo, cachepath, file, node)
|
2015-07-01 00:32:31 +03:00
|
|
|
|
2013-11-26 04:36:44 +04:00
|
|
|
def getfiles(repo, proto):
|
|
|
|
"""A server api for requesting particular versions of particular files.
|
|
|
|
"""
|
|
|
|
if shallowrepo.requirement in repo.requirements:
|
2016-04-26 23:00:31 +03:00
|
|
|
raise error.Abort(_('cannot fetch remote files from shallow repo'))
|
2015-06-29 23:34:31 +03:00
|
|
|
if not isinstance(proto, sshserver.sshserver):
|
2016-04-26 23:00:31 +03:00
|
|
|
raise error.Abort(_('cannot fetch remote files over non-ssh protocol'))
|
2013-11-26 04:36:44 +04:00
|
|
|
|
|
|
|
def streamer():
|
|
|
|
fin = proto.fin
|
|
|
|
|
|
|
|
cachepath = repo.ui.config("remotefilelog", "servercachepath")
|
|
|
|
if not cachepath:
|
|
|
|
cachepath = os.path.join(repo.path, "remotefilelogcache")
|
|
|
|
|
2015-06-30 23:58:15 +03:00
|
|
|
while True:
|
|
|
|
request = fin.readline()[:-1]
|
|
|
|
if not request:
|
|
|
|
break
|
2013-11-26 04:36:44 +04:00
|
|
|
|
2015-06-30 23:58:15 +03:00
|
|
|
node = bin(request[:40])
|
|
|
|
if node == nullid:
|
|
|
|
yield '0\n'
|
|
|
|
continue
|
2013-11-26 04:36:44 +04:00
|
|
|
|
2015-06-30 23:58:15 +03:00
|
|
|
path = request[40:]
|
2013-11-26 04:36:44 +04:00
|
|
|
|
2015-06-30 23:58:15 +03:00
|
|
|
text = _loadfileblob(repo, cachepath, path, node)
|
2013-11-26 04:36:44 +04:00
|
|
|
|
2015-06-30 23:58:15 +03:00
|
|
|
yield '%d\n%s' % (len(text), text)
|
2013-11-26 04:36:44 +04:00
|
|
|
|
2015-06-30 23:58:15 +03:00
|
|
|
# it would be better to only flush after processing a whole batch
|
|
|
|
# but currently we don't know if there are more requests coming
|
|
|
|
proto.fout.flush()
|
2013-11-26 04:36:44 +04:00
|
|
|
return wireproto.streamres(streamer())
|
|
|
|
|
|
|
|
def createfileblob(filectx):
|
2017-04-26 23:08:13 +03:00
|
|
|
"""
|
|
|
|
format:
|
|
|
|
v0:
|
|
|
|
str(len(rawtext)) + '\0' + rawtext + ancestortext
|
|
|
|
v1:
|
|
|
|
'v1' + '\n' + metalist + '\0' + rawtext + ancestortext
|
|
|
|
metalist := metalist + '\n' + meta | meta
|
|
|
|
meta := sizemeta | flagmeta
|
|
|
|
sizemeta := METAKEYSIZE + str(len(rawtext))
|
|
|
|
flagmeta := METAKEYFLAG + str(flag)
|
|
|
|
|
|
|
|
note: sizemeta must exist. METAKEYFLAG and METAKEYSIZE must have a
|
|
|
|
length of 1.
|
|
|
|
"""
|
|
|
|
flog = filectx.filelog()
|
|
|
|
frev = filectx.filerev()
|
|
|
|
revlogflags = flog.flags(frev)
|
|
|
|
if revlogflags == 0:
|
|
|
|
# normal files
|
|
|
|
text = filectx.data()
|
|
|
|
else:
|
|
|
|
# lfs, read raw revision data
|
|
|
|
text = flog.revision(frev, raw=True)
|
|
|
|
|
2015-01-15 00:14:35 +03:00
|
|
|
repo = filectx._repo
|
2013-11-26 04:36:44 +04:00
|
|
|
|
|
|
|
ancestors = [filectx]
|
2015-01-15 00:14:35 +03:00
|
|
|
|
|
|
|
try:
|
|
|
|
repo.forcelinkrev = True
|
|
|
|
ancestors.extend([f for f in filectx.ancestors()])
|
|
|
|
|
|
|
|
ancestortext = ""
|
|
|
|
for ancestorctx in ancestors:
|
|
|
|
parents = ancestorctx.parents()
|
|
|
|
p1 = nullid
|
|
|
|
p2 = nullid
|
|
|
|
if len(parents) > 0:
|
|
|
|
p1 = parents[0].filenode()
|
|
|
|
if len(parents) > 1:
|
|
|
|
p2 = parents[1].filenode()
|
|
|
|
|
|
|
|
copyname = ""
|
|
|
|
rename = ancestorctx.renamed()
|
|
|
|
if rename:
|
|
|
|
copyname = rename[0]
|
|
|
|
linknode = ancestorctx.node()
|
|
|
|
ancestortext += "%s%s%s%s%s\0" % (
|
|
|
|
ancestorctx.filenode(), p1, p2, linknode,
|
|
|
|
copyname)
|
|
|
|
finally:
|
|
|
|
repo.forcelinkrev = False
|
2013-11-26 04:36:44 +04:00
|
|
|
|
2017-04-27 05:52:20 +03:00
|
|
|
header = shallowutil.buildfileblobheader(len(text), revlogflags)
|
2017-04-26 23:08:13 +03:00
|
|
|
|
|
|
|
return "%s\0%s%s" % (header, text, ancestortext)
|
2013-11-26 04:36:44 +04:00
|
|
|
|
|
|
|
def gcserver(ui, repo):
|
|
|
|
if not repo.ui.configbool("remotefilelog", "server"):
|
|
|
|
return
|
|
|
|
|
|
|
|
neededfiles = set()
|
2015-11-13 20:56:52 +03:00
|
|
|
heads = repo.revs("heads(tip~25000:) - null")
|
2013-11-26 04:36:44 +04:00
|
|
|
|
2017-03-14 06:51:37 +03:00
|
|
|
cachepath = repo.vfs.join("remotefilelogcache")
|
2013-11-26 04:36:44 +04:00
|
|
|
for head in heads:
|
|
|
|
mf = repo[head].manifest()
|
|
|
|
for filename, filenode in mf.iteritems():
|
|
|
|
filecachepath = os.path.join(cachepath, filename, hex(filenode))
|
|
|
|
neededfiles.add(filecachepath)
|
|
|
|
|
|
|
|
# delete unneeded older files
|
|
|
|
days = repo.ui.configint("remotefilelog", "serverexpiration", 30)
|
|
|
|
expiration = time.time() - (days * 24 * 60 * 60)
|
|
|
|
|
|
|
|
_removing = _("removing old server cache")
|
|
|
|
count = 0
|
|
|
|
ui.progress(_removing, count, unit="files")
|
|
|
|
for root, dirs, files in os.walk(cachepath):
|
|
|
|
for file in files:
|
|
|
|
filepath = os.path.join(root, file)
|
|
|
|
count += 1
|
|
|
|
ui.progress(_removing, count, unit="files")
|
|
|
|
if filepath in neededfiles:
|
|
|
|
continue
|
|
|
|
|
|
|
|
stat = os.stat(filepath)
|
|
|
|
if stat.st_mtime < expiration:
|
|
|
|
os.remove(filepath)
|
|
|
|
|
|
|
|
ui.progress(_removing, None)
|
2016-05-16 20:59:09 +03:00
|
|
|
|
|
|
|
def getpack(repo, proto, args):
|
|
|
|
"""A server api for requesting a pack of file information.
|
|
|
|
"""
|
|
|
|
if shallowrepo.requirement in repo.requirements:
|
|
|
|
raise error.Abort(_('cannot fetch remote files from shallow repo'))
|
|
|
|
if not isinstance(proto, sshserver.sshserver):
|
|
|
|
raise error.Abort(_('cannot fetch remote files over non-ssh protocol'))
|
|
|
|
|
|
|
|
def streamer():
|
|
|
|
"""Request format:
|
|
|
|
|
|
|
|
[<filerequest>,...]\0\0
|
|
|
|
filerequest = <filename len: 2 byte><filename><count: 4 byte>
|
|
|
|
[<node: 20 byte>,...]
|
|
|
|
|
|
|
|
Response format:
|
2017-04-11 03:56:01 +03:00
|
|
|
[<fileresponse>,...]<10 null bytes>
|
2016-05-16 20:59:09 +03:00
|
|
|
fileresponse = <filename len: 2 byte><filename><history><deltas>
|
|
|
|
history = <count: 4 byte>[<history entry>,...]
|
|
|
|
historyentry = <node: 20 byte><p1: 20 byte><p2: 20 byte>
|
|
|
|
<linknode: 20 byte><copyfrom len: 2 byte><copyfrom>
|
2017-04-11 03:56:01 +03:00
|
|
|
deltas = <count: 4 byte>[<delta entry>,...]
|
2016-05-16 20:59:09 +03:00
|
|
|
deltaentry = <node: 20 byte><deltabase: 20 byte>
|
|
|
|
<delta len: 8 byte><delta>
|
|
|
|
"""
|
|
|
|
files = _receivepackrequest(proto.fin)
|
|
|
|
|
|
|
|
# Sort the files by name, so we provide deterministic results
|
|
|
|
for filename, nodes in sorted(files.iteritems()):
|
|
|
|
fl = repo.file(filename)
|
|
|
|
|
|
|
|
# Compute history
|
|
|
|
history = []
|
|
|
|
for rev in fl.ancestors(list(fl.rev(n) for n in nodes),
|
|
|
|
inclusive=True):
|
|
|
|
x, x, x, x, linkrev, p1, p2, node = fl.index[rev]
|
|
|
|
copyfrom = ''
|
|
|
|
p1node = fl.node(p1)
|
|
|
|
p2node = fl.node(p2)
|
|
|
|
linknode = repo.changelog.node(linkrev)
|
2016-05-20 19:31:34 +03:00
|
|
|
if p1node == nullid:
|
|
|
|
copydata = fl.renamed(node)
|
|
|
|
if copydata:
|
|
|
|
copyfrom, copynode = copydata
|
|
|
|
p1node = copynode
|
|
|
|
|
2016-05-16 20:59:09 +03:00
|
|
|
history.append((node, p1node, p2node, linknode, copyfrom))
|
|
|
|
|
|
|
|
# Scan and send deltas
|
|
|
|
chain = _getdeltachain(fl, nodes, -1)
|
|
|
|
|
2017-04-11 03:56:01 +03:00
|
|
|
for chunk in wirepack.sendpackpart(filename, history, chain):
|
|
|
|
yield chunk
|
2016-05-16 20:59:09 +03:00
|
|
|
|
2017-04-11 03:56:01 +03:00
|
|
|
yield wirepack.closepart()
|
2016-05-16 20:59:09 +03:00
|
|
|
proto.fout.flush()
|
|
|
|
|
|
|
|
return wireproto.streamres(streamer())
|
|
|
|
|
|
|
|
def _receivepackrequest(stream):
|
|
|
|
files = {}
|
|
|
|
while True:
|
2017-04-27 05:52:20 +03:00
|
|
|
filenamelen = shallowutil.readunpack(stream,
|
|
|
|
constants.FILENAMESTRUCT)[0]
|
2016-05-16 20:59:09 +03:00
|
|
|
if filenamelen == 0:
|
|
|
|
break
|
|
|
|
|
2017-04-27 05:52:20 +03:00
|
|
|
filename = shallowutil.readexactly(stream, filenamelen)
|
2016-05-16 20:59:09 +03:00
|
|
|
|
2017-04-27 05:52:20 +03:00
|
|
|
nodecount = shallowutil.readunpack(stream,
|
|
|
|
constants.PACKREQUESTCOUNTSTRUCT)[0]
|
2016-05-16 20:59:09 +03:00
|
|
|
|
|
|
|
# Read N nodes
|
2017-04-27 05:52:20 +03:00
|
|
|
nodes = shallowutil.readexactly(stream, constants.NODESIZE * nodecount)
|
2016-05-16 20:59:09 +03:00
|
|
|
nodes = set(nodes[i:i + constants.NODESIZE] for i in
|
|
|
|
xrange(0, len(nodes), constants.NODESIZE))
|
|
|
|
|
|
|
|
files[filename] = nodes
|
|
|
|
|
|
|
|
return files
|
|
|
|
|
|
|
|
def _getdeltachain(fl, nodes, stophint):
|
2016-05-20 19:31:34 +03:00
|
|
|
"""Produces a chain of deltas that includes each of the given nodes.
|
|
|
|
|
|
|
|
`stophint` - The changeset rev number to stop at. If it's set to >= 0, we
|
|
|
|
will return not only the deltas for the requested nodes, but also all
|
|
|
|
necessary deltas in their delta chains, as long as the deltas have link revs
|
|
|
|
>= the stophint. This allows us to return an approximately minimal delta
|
|
|
|
chain when the user performs a pull. If `stophint` is set to -1, all nodes
|
|
|
|
will return full texts. """
|
2016-05-16 20:59:09 +03:00
|
|
|
chain = []
|
|
|
|
|
|
|
|
seen = set()
|
|
|
|
for node in nodes:
|
|
|
|
startrev = fl.rev(node)
|
|
|
|
cur = startrev
|
|
|
|
while True:
|
|
|
|
if cur in seen:
|
|
|
|
break
|
|
|
|
start, length, size, base, linkrev, p1, p2, node = fl.index[cur]
|
|
|
|
if linkrev < stophint and cur != startrev:
|
|
|
|
break
|
|
|
|
|
2016-05-20 19:31:34 +03:00
|
|
|
# Return a full text if:
|
|
|
|
# - the caller requested it (via stophint == -1)
|
|
|
|
# - the revlog chain has ended (via base==null or base==node)
|
|
|
|
# - p1 is null. In some situations this can mean it's a copy, so
|
|
|
|
# we need to use fl.read() to remove the copymetadata.
|
|
|
|
if (stophint == -1 or base == nullrev or base == cur
|
|
|
|
or p1 == nullrev):
|
|
|
|
delta = fl.read(cur)
|
2016-05-16 20:59:09 +03:00
|
|
|
base = nullrev
|
|
|
|
else:
|
|
|
|
delta = fl._chunk(cur)
|
|
|
|
|
|
|
|
basenode = fl.node(base)
|
|
|
|
chain.append((node, basenode, delta))
|
|
|
|
seen.add(cur)
|
|
|
|
|
|
|
|
if base == nullrev:
|
|
|
|
break
|
|
|
|
cur = base
|
|
|
|
|
|
|
|
chain.reverse()
|
|
|
|
return chain
|