sapling/hgext/remotefilelog/fileserverclient.py

757 lines
27 KiB
Python
Raw Normal View History

# fileserverclient.py - client for communicating with the cache process
#
# Copyright 2013 Facebook, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import hashlib
import io
import itertools
import os
import struct
import subprocess
import threading
import time
2013-05-07 03:59:05 +04:00
from mercurial import (
encoding,
error,
httppeer,
progress,
revlog,
sshpeer,
util,
wireproto,
)
from mercurial.i18n import _
from mercurial.node import bin, hex, nullid
from . import constants, shallowutil, wirepack
from .contentstore import unioncontentstore
from .lz4wrapper import lz4decompress
from .metadatastore import unionmetadatastore
2013-05-07 03:59:05 +04:00
# Statistics for debugging
fetchcost = 0
fetches = 0
fetched = 0
2013-07-02 04:37:55 +04:00
fetchmisses = 0
_lfsmod = None
def getcachekey(reponame, file, id):
[remotefilelog] use hashlib to compute sha1 hashes Summary: hg-crew's c27dc3c3122 and c27dc3c3122^ were breaking our extensions: ``` $ hg log -r c27dc3c3122^ changeset: 9010734b79911d2d2e7405d91a4df479b35b3841 user: Augie Fackler <raf@durin42.com> date: Thu, 09 Jun 2016 21:12:33 -0700 s.ummary: cleanup: replace uses of util.(md5|sha1|sha256|sha512) with hashlib.\1 ``` ``` $ hg log -r c27dc3c3122 changeset: 0d55a7b8d07bf948c935822e6eea85b044383f00 user: Augie Fackler <raf@durin42.com> date: Thu, 09 Jun 2016 21:13:23 -0700 s.ummary: util: drop local aliases for md5, sha1, sha256, and sha512 ``` I did a grep over facebook-hg-rpms to see what was affected: ``` $ grep "util\.\(md5\|sha1\|sha256\|sha512\)" -r ~/facebook-hg-rpms /home/jeroenv/facebook-hg-rpms/remotefilelog/remotefilelog/basestore.py: sha = util.sha1(filename).digest() /home/jeroenv/facebook-hg-rpms/remotefilelog/remotefilelog/basestore.py: sha = util.sha1(filename).digest() /home/jeroenv/facebook-hg-rpms/remotefilelog/remotefilelog/shallowutil.py: pathhash = util.sha1(file).hexdigest() /home/jeroenv/facebook-hg-rpms/remotefilelog/remotefilelog/shallowutil.py: pathhash = util.sha1(file).hexdigest() /home/jeroenv/facebook-hg-rpms/remotefilelog/remotefilelog/debugcommands.py: filekey = util.sha1(file).hexdigest() /home/jeroenv/facebook-hg-rpms/remotefilelog/remotefilelog/historypack.py: namehash = util.sha1(name).digest() /home/jeroenv/facebook-hg-rpms/remotefilelog/remotefilelog/historypack.py: node = util.sha1(filename).digest() /home/jeroenv/facebook-hg-rpms/remotefilelog/remotefilelog/historypack.py: files = ((util.sha1(filename).digest(), offset, size) /home/jeroenv/facebook-hg-rpms/remotefilelog/remotefilelog/fileserverclient.py: pathhash = util.sha1(file).hexdigest() /home/jeroenv/facebook-hg-rpms/remotefilelog/remotefilelog/fileserverclient.py: pathhash = util.sha1(file).hexdigest() /home/jeroenv/facebook-hg-rpms/remotefilelog/remotefilelog/basepack.py: self.sha = util.sha1() /home/jeroenv/facebook-hg-rpms/remotefilelog/tests/test-datapack.py: return util.sha1(content).digest() /home/jeroenv/facebook-hg-rpms/remotefilelog/tests/test-histpack.py: return util.sha1(content).digest() Binary file /home/jeroenv/facebook-hg-rpms/hg-crew/.hg/store/data/mercurial/revlog.py.i matches /home/jeroenv/facebook-hg-rpms/fb-hgext/sparse.py: return util.sha1(fh.read()).hexdigest() /home/jeroenv/facebook-hg-rpms/fb-hgext/sparse.py: sha1 = util.sha1() /home/jeroenv/facebook-hg-rpms/fb-hgext/sparse.py: sha1 = util.sha1() /home/jeroenv/facebook-hg-rpms/fb-hgext/sparse.py: sha1 = util.sha1() /home/jeroenv/facebook-hg-rpms/fb-hgext/sparse.py: sha1 = util.sha1() /home/jeroenv/facebook-hg-rpms/mutable-history/hgext/simple4server.py: sha = util.sha1() /home/jeroenv/facebook-hg-rpms/mutable-history/hgext/evolve.py: sha = util.sha1() ``` This diff is part of the fix. Test Plan: Ran the tests. ``` $MERCURIALRUNTEST -S -j 48 --with-hg ~/local/facebook-hg-rpms/hg-crew/hg ``` Reviewers: #sourcecontrol, ttung Differential Revision: https://phabricator.intern.facebook.com/D3440041 Tasks: 11762191
2016-06-16 01:48:16 +03:00
pathhash = hashlib.sha1(file).hexdigest()
return os.path.join(reponame, pathhash[:2], pathhash[2:], id)
def getlocalkey(file, id):
[remotefilelog] use hashlib to compute sha1 hashes Summary: hg-crew's c27dc3c3122 and c27dc3c3122^ were breaking our extensions: ``` $ hg log -r c27dc3c3122^ changeset: 9010734b79911d2d2e7405d91a4df479b35b3841 user: Augie Fackler <raf@durin42.com> date: Thu, 09 Jun 2016 21:12:33 -0700 s.ummary: cleanup: replace uses of util.(md5|sha1|sha256|sha512) with hashlib.\1 ``` ``` $ hg log -r c27dc3c3122 changeset: 0d55a7b8d07bf948c935822e6eea85b044383f00 user: Augie Fackler <raf@durin42.com> date: Thu, 09 Jun 2016 21:13:23 -0700 s.ummary: util: drop local aliases for md5, sha1, sha256, and sha512 ``` I did a grep over facebook-hg-rpms to see what was affected: ``` $ grep "util\.\(md5\|sha1\|sha256\|sha512\)" -r ~/facebook-hg-rpms /home/jeroenv/facebook-hg-rpms/remotefilelog/remotefilelog/basestore.py: sha = util.sha1(filename).digest() /home/jeroenv/facebook-hg-rpms/remotefilelog/remotefilelog/basestore.py: sha = util.sha1(filename).digest() /home/jeroenv/facebook-hg-rpms/remotefilelog/remotefilelog/shallowutil.py: pathhash = util.sha1(file).hexdigest() /home/jeroenv/facebook-hg-rpms/remotefilelog/remotefilelog/shallowutil.py: pathhash = util.sha1(file).hexdigest() /home/jeroenv/facebook-hg-rpms/remotefilelog/remotefilelog/debugcommands.py: filekey = util.sha1(file).hexdigest() /home/jeroenv/facebook-hg-rpms/remotefilelog/remotefilelog/historypack.py: namehash = util.sha1(name).digest() /home/jeroenv/facebook-hg-rpms/remotefilelog/remotefilelog/historypack.py: node = util.sha1(filename).digest() /home/jeroenv/facebook-hg-rpms/remotefilelog/remotefilelog/historypack.py: files = ((util.sha1(filename).digest(), offset, size) /home/jeroenv/facebook-hg-rpms/remotefilelog/remotefilelog/fileserverclient.py: pathhash = util.sha1(file).hexdigest() /home/jeroenv/facebook-hg-rpms/remotefilelog/remotefilelog/fileserverclient.py: pathhash = util.sha1(file).hexdigest() /home/jeroenv/facebook-hg-rpms/remotefilelog/remotefilelog/basepack.py: self.sha = util.sha1() /home/jeroenv/facebook-hg-rpms/remotefilelog/tests/test-datapack.py: return util.sha1(content).digest() /home/jeroenv/facebook-hg-rpms/remotefilelog/tests/test-histpack.py: return util.sha1(content).digest() Binary file /home/jeroenv/facebook-hg-rpms/hg-crew/.hg/store/data/mercurial/revlog.py.i matches /home/jeroenv/facebook-hg-rpms/fb-hgext/sparse.py: return util.sha1(fh.read()).hexdigest() /home/jeroenv/facebook-hg-rpms/fb-hgext/sparse.py: sha1 = util.sha1() /home/jeroenv/facebook-hg-rpms/fb-hgext/sparse.py: sha1 = util.sha1() /home/jeroenv/facebook-hg-rpms/fb-hgext/sparse.py: sha1 = util.sha1() /home/jeroenv/facebook-hg-rpms/fb-hgext/sparse.py: sha1 = util.sha1() /home/jeroenv/facebook-hg-rpms/mutable-history/hgext/simple4server.py: sha = util.sha1() /home/jeroenv/facebook-hg-rpms/mutable-history/hgext/evolve.py: sha = util.sha1() ``` This diff is part of the fix. Test Plan: Ran the tests. ``` $MERCURIALRUNTEST -S -j 48 --with-hg ~/local/facebook-hg-rpms/hg-crew/hg ``` Reviewers: #sourcecontrol, ttung Differential Revision: https://phabricator.intern.facebook.com/D3440041 Tasks: 11762191
2016-06-16 01:48:16 +03:00
pathhash = hashlib.sha1(file).hexdigest()
2013-05-18 05:08:53 +04:00
return os.path.join(pathhash, id)
def peersetup(ui, peer):
class remotefilepeer(peer.__class__):
@wireproto.batchable
def getfile(self, file, node):
if not self.capable("getfile"):
raise error.Abort(
"configured remotefile server does not support getfile"
)
f = wireproto.future()
yield {"file": file, "node": node}, f
code, data = f.value.split("\0", 1)
if int(code):
raise error.LookupError(file, node, data)
yield data
@wireproto.batchable
def getflogheads(self, path):
if not self.capable("getflogheads"):
raise error.Abort(
"configured remotefile server does not " "support getflogheads"
)
f = wireproto.future()
yield {"path": path}, f
heads = f.value.split("\n") if f.value else []
yield heads
fix crash in "hg pull" when remotefilelog not enabled Summary: 764cd9916c94 recently introduced code that was unconditionally checking the repo.includepattern and repo.excludepattern attributes on a local repository without first checking if this is a shallow repository. These attributes only exist on shallow repositories, causing "hg pull" to crash on non-shallow repositories. This crash wouldn't happen in simple circumstances, since the remotefilelog extension only gets fully set up once a shallow repository object has been created, however when using chg you can end up with scenarios where a non-shallow repository is used in the same hg process after a shallow one. This refactors the code to now store the local repository object on the remote peer rather than trying to store the individual shallow, includepattern, and excludepattern attributes. Overall this code does still feel a bit janky to me -- the rest of the peer API is independent of the local repository, but the _callstream() wrapper cares about the local repository being referenced. It seems like we should ideally redesign the APIs so that _callstream() receives the local repository data as an argument (or we should make the peer <--> local repository assocation more formal and explicit if think it's better to force an association here). Test Plan: Added a new test which triggered the crash, but passes with these changes. Reviewers: ttung, mitrandir, durham Reviewed By: durham Subscribers: net-systems-diffs@, yogeshwer Differential Revision: https://phabricator.intern.facebook.com/D3756493 Tasks: 12823586 Signature: t1:3756493:1471971600:9666e9c31bf59070c3ace0821d47d322671eb5b1
2016-08-24 00:14:42 +03:00
def _updatecallstreamopts(self, command, opts):
if command != "getbundle":
fix crash in "hg pull" when remotefilelog not enabled Summary: 764cd9916c94 recently introduced code that was unconditionally checking the repo.includepattern and repo.excludepattern attributes on a local repository without first checking if this is a shallow repository. These attributes only exist on shallow repositories, causing "hg pull" to crash on non-shallow repositories. This crash wouldn't happen in simple circumstances, since the remotefilelog extension only gets fully set up once a shallow repository object has been created, however when using chg you can end up with scenarios where a non-shallow repository is used in the same hg process after a shallow one. This refactors the code to now store the local repository object on the remote peer rather than trying to store the individual shallow, includepattern, and excludepattern attributes. Overall this code does still feel a bit janky to me -- the rest of the peer API is independent of the local repository, but the _callstream() wrapper cares about the local repository being referenced. It seems like we should ideally redesign the APIs so that _callstream() receives the local repository data as an argument (or we should make the peer <--> local repository assocation more formal and explicit if think it's better to force an association here). Test Plan: Added a new test which triggered the crash, but passes with these changes. Reviewers: ttung, mitrandir, durham Reviewed By: durham Subscribers: net-systems-diffs@, yogeshwer Differential Revision: https://phabricator.intern.facebook.com/D3756493 Tasks: 12823586 Signature: t1:3756493:1471971600:9666e9c31bf59070c3ace0821d47d322671eb5b1
2016-08-24 00:14:42 +03:00
return
if "remotefilelog" not in shallowutil.peercapabilities(self):
fix crash in "hg pull" when remotefilelog not enabled Summary: 764cd9916c94 recently introduced code that was unconditionally checking the repo.includepattern and repo.excludepattern attributes on a local repository without first checking if this is a shallow repository. These attributes only exist on shallow repositories, causing "hg pull" to crash on non-shallow repositories. This crash wouldn't happen in simple circumstances, since the remotefilelog extension only gets fully set up once a shallow repository object has been created, however when using chg you can end up with scenarios where a non-shallow repository is used in the same hg process after a shallow one. This refactors the code to now store the local repository object on the remote peer rather than trying to store the individual shallow, includepattern, and excludepattern attributes. Overall this code does still feel a bit janky to me -- the rest of the peer API is independent of the local repository, but the _callstream() wrapper cares about the local repository being referenced. It seems like we should ideally redesign the APIs so that _callstream() receives the local repository data as an argument (or we should make the peer <--> local repository assocation more formal and explicit if think it's better to force an association here). Test Plan: Added a new test which triggered the crash, but passes with these changes. Reviewers: ttung, mitrandir, durham Reviewed By: durham Subscribers: net-systems-diffs@, yogeshwer Differential Revision: https://phabricator.intern.facebook.com/D3756493 Tasks: 12823586 Signature: t1:3756493:1471971600:9666e9c31bf59070c3ace0821d47d322671eb5b1
2016-08-24 00:14:42 +03:00
return
if not util.safehasattr(self, "_localrepo"):
fix crash in "hg pull" when remotefilelog not enabled Summary: 764cd9916c94 recently introduced code that was unconditionally checking the repo.includepattern and repo.excludepattern attributes on a local repository without first checking if this is a shallow repository. These attributes only exist on shallow repositories, causing "hg pull" to crash on non-shallow repositories. This crash wouldn't happen in simple circumstances, since the remotefilelog extension only gets fully set up once a shallow repository object has been created, however when using chg you can end up with scenarios where a non-shallow repository is used in the same hg process after a shallow one. This refactors the code to now store the local repository object on the remote peer rather than trying to store the individual shallow, includepattern, and excludepattern attributes. Overall this code does still feel a bit janky to me -- the rest of the peer API is independent of the local repository, but the _callstream() wrapper cares about the local repository being referenced. It seems like we should ideally redesign the APIs so that _callstream() receives the local repository data as an argument (or we should make the peer <--> local repository assocation more formal and explicit if think it's better to force an association here). Test Plan: Added a new test which triggered the crash, but passes with these changes. Reviewers: ttung, mitrandir, durham Reviewed By: durham Subscribers: net-systems-diffs@, yogeshwer Differential Revision: https://phabricator.intern.facebook.com/D3756493 Tasks: 12823586 Signature: t1:3756493:1471971600:9666e9c31bf59070c3ace0821d47d322671eb5b1
2016-08-24 00:14:42 +03:00
return
if constants.REQUIREMENT not in self._localrepo.requirements:
fix crash in "hg pull" when remotefilelog not enabled Summary: 764cd9916c94 recently introduced code that was unconditionally checking the repo.includepattern and repo.excludepattern attributes on a local repository without first checking if this is a shallow repository. These attributes only exist on shallow repositories, causing "hg pull" to crash on non-shallow repositories. This crash wouldn't happen in simple circumstances, since the remotefilelog extension only gets fully set up once a shallow repository object has been created, however when using chg you can end up with scenarios where a non-shallow repository is used in the same hg process after a shallow one. This refactors the code to now store the local repository object on the remote peer rather than trying to store the individual shallow, includepattern, and excludepattern attributes. Overall this code does still feel a bit janky to me -- the rest of the peer API is independent of the local repository, but the _callstream() wrapper cares about the local repository being referenced. It seems like we should ideally redesign the APIs so that _callstream() receives the local repository data as an argument (or we should make the peer <--> local repository assocation more formal and explicit if think it's better to force an association here). Test Plan: Added a new test which triggered the crash, but passes with these changes. Reviewers: ttung, mitrandir, durham Reviewed By: durham Subscribers: net-systems-diffs@, yogeshwer Differential Revision: https://phabricator.intern.facebook.com/D3756493 Tasks: 12823586 Signature: t1:3756493:1471971600:9666e9c31bf59070c3ace0821d47d322671eb5b1
2016-08-24 00:14:42 +03:00
return
bundlecaps = opts.get("bundlecaps")
fix crash in "hg pull" when remotefilelog not enabled Summary: 764cd9916c94 recently introduced code that was unconditionally checking the repo.includepattern and repo.excludepattern attributes on a local repository without first checking if this is a shallow repository. These attributes only exist on shallow repositories, causing "hg pull" to crash on non-shallow repositories. This crash wouldn't happen in simple circumstances, since the remotefilelog extension only gets fully set up once a shallow repository object has been created, however when using chg you can end up with scenarios where a non-shallow repository is used in the same hg process after a shallow one. This refactors the code to now store the local repository object on the remote peer rather than trying to store the individual shallow, includepattern, and excludepattern attributes. Overall this code does still feel a bit janky to me -- the rest of the peer API is independent of the local repository, but the _callstream() wrapper cares about the local repository being referenced. It seems like we should ideally redesign the APIs so that _callstream() receives the local repository data as an argument (or we should make the peer <--> local repository assocation more formal and explicit if think it's better to force an association here). Test Plan: Added a new test which triggered the crash, but passes with these changes. Reviewers: ttung, mitrandir, durham Reviewed By: durham Subscribers: net-systems-diffs@, yogeshwer Differential Revision: https://phabricator.intern.facebook.com/D3756493 Tasks: 12823586 Signature: t1:3756493:1471971600:9666e9c31bf59070c3ace0821d47d322671eb5b1
2016-08-24 00:14:42 +03:00
if bundlecaps:
bundlecaps = [bundlecaps]
else:
bundlecaps = []
# shallow, includepattern, and excludepattern are a hacky way of
# carrying over data from the local repo to this getbundle
# command. We need to do it this way because bundle1 getbundle
# doesn't provide any other place we can hook in to manipulate
# getbundle args before it goes across the wire. Once we get rid
# of bundle1, we can use bundle2's _pullbundle2extraprepare to
# do this more cleanly.
bundlecaps.append("remotefilelog")
fix crash in "hg pull" when remotefilelog not enabled Summary: 764cd9916c94 recently introduced code that was unconditionally checking the repo.includepattern and repo.excludepattern attributes on a local repository without first checking if this is a shallow repository. These attributes only exist on shallow repositories, causing "hg pull" to crash on non-shallow repositories. This crash wouldn't happen in simple circumstances, since the remotefilelog extension only gets fully set up once a shallow repository object has been created, however when using chg you can end up with scenarios where a non-shallow repository is used in the same hg process after a shallow one. This refactors the code to now store the local repository object on the remote peer rather than trying to store the individual shallow, includepattern, and excludepattern attributes. Overall this code does still feel a bit janky to me -- the rest of the peer API is independent of the local repository, but the _callstream() wrapper cares about the local repository being referenced. It seems like we should ideally redesign the APIs so that _callstream() receives the local repository data as an argument (or we should make the peer <--> local repository assocation more formal and explicit if think it's better to force an association here). Test Plan: Added a new test which triggered the crash, but passes with these changes. Reviewers: ttung, mitrandir, durham Reviewed By: durham Subscribers: net-systems-diffs@, yogeshwer Differential Revision: https://phabricator.intern.facebook.com/D3756493 Tasks: 12823586 Signature: t1:3756493:1471971600:9666e9c31bf59070c3ace0821d47d322671eb5b1
2016-08-24 00:14:42 +03:00
if self._localrepo.includepattern:
patterns = "\0".join(self._localrepo.includepattern)
fix crash in "hg pull" when remotefilelog not enabled Summary: 764cd9916c94 recently introduced code that was unconditionally checking the repo.includepattern and repo.excludepattern attributes on a local repository without first checking if this is a shallow repository. These attributes only exist on shallow repositories, causing "hg pull" to crash on non-shallow repositories. This crash wouldn't happen in simple circumstances, since the remotefilelog extension only gets fully set up once a shallow repository object has been created, however when using chg you can end up with scenarios where a non-shallow repository is used in the same hg process after a shallow one. This refactors the code to now store the local repository object on the remote peer rather than trying to store the individual shallow, includepattern, and excludepattern attributes. Overall this code does still feel a bit janky to me -- the rest of the peer API is independent of the local repository, but the _callstream() wrapper cares about the local repository being referenced. It seems like we should ideally redesign the APIs so that _callstream() receives the local repository data as an argument (or we should make the peer <--> local repository assocation more formal and explicit if think it's better to force an association here). Test Plan: Added a new test which triggered the crash, but passes with these changes. Reviewers: ttung, mitrandir, durham Reviewed By: durham Subscribers: net-systems-diffs@, yogeshwer Differential Revision: https://phabricator.intern.facebook.com/D3756493 Tasks: 12823586 Signature: t1:3756493:1471971600:9666e9c31bf59070c3ace0821d47d322671eb5b1
2016-08-24 00:14:42 +03:00
includecap = "includepattern=" + patterns
bundlecaps.append(includecap)
if self._localrepo.excludepattern:
patterns = "\0".join(self._localrepo.excludepattern)
fix crash in "hg pull" when remotefilelog not enabled Summary: 764cd9916c94 recently introduced code that was unconditionally checking the repo.includepattern and repo.excludepattern attributes on a local repository without first checking if this is a shallow repository. These attributes only exist on shallow repositories, causing "hg pull" to crash on non-shallow repositories. This crash wouldn't happen in simple circumstances, since the remotefilelog extension only gets fully set up once a shallow repository object has been created, however when using chg you can end up with scenarios where a non-shallow repository is used in the same hg process after a shallow one. This refactors the code to now store the local repository object on the remote peer rather than trying to store the individual shallow, includepattern, and excludepattern attributes. Overall this code does still feel a bit janky to me -- the rest of the peer API is independent of the local repository, but the _callstream() wrapper cares about the local repository being referenced. It seems like we should ideally redesign the APIs so that _callstream() receives the local repository data as an argument (or we should make the peer <--> local repository assocation more formal and explicit if think it's better to force an association here). Test Plan: Added a new test which triggered the crash, but passes with these changes. Reviewers: ttung, mitrandir, durham Reviewed By: durham Subscribers: net-systems-diffs@, yogeshwer Differential Revision: https://phabricator.intern.facebook.com/D3756493 Tasks: 12823586 Signature: t1:3756493:1471971600:9666e9c31bf59070c3ace0821d47d322671eb5b1
2016-08-24 00:14:42 +03:00
excludecap = "excludepattern=" + patterns
bundlecaps.append(excludecap)
opts["bundlecaps"] = ",".join(bundlecaps)
fix crash in "hg pull" when remotefilelog not enabled Summary: 764cd9916c94 recently introduced code that was unconditionally checking the repo.includepattern and repo.excludepattern attributes on a local repository without first checking if this is a shallow repository. These attributes only exist on shallow repositories, causing "hg pull" to crash on non-shallow repositories. This crash wouldn't happen in simple circumstances, since the remotefilelog extension only gets fully set up once a shallow repository object has been created, however when using chg you can end up with scenarios where a non-shallow repository is used in the same hg process after a shallow one. This refactors the code to now store the local repository object on the remote peer rather than trying to store the individual shallow, includepattern, and excludepattern attributes. Overall this code does still feel a bit janky to me -- the rest of the peer API is independent of the local repository, but the _callstream() wrapper cares about the local repository being referenced. It seems like we should ideally redesign the APIs so that _callstream() receives the local repository data as an argument (or we should make the peer <--> local repository assocation more formal and explicit if think it's better to force an association here). Test Plan: Added a new test which triggered the crash, but passes with these changes. Reviewers: ttung, mitrandir, durham Reviewed By: durham Subscribers: net-systems-diffs@, yogeshwer Differential Revision: https://phabricator.intern.facebook.com/D3756493 Tasks: 12823586 Signature: t1:3756493:1471971600:9666e9c31bf59070c3ace0821d47d322671eb5b1
2016-08-24 00:14:42 +03:00
def _callstream(self, cmd, **opts):
self._updatecallstreamopts(cmd, opts)
return super(remotefilepeer, self)._callstream(cmd, **opts)
peer.__class__ = remotefilepeer
class cacheconnection(object):
"""The connection for communicating with the remote cache. Performs
gets and sets by communicating with an external process that has the
cache-specific implementation.
"""
def __init__(self):
self.pipeo = self.pipei = None
self.subprocess = None
self.connected = False
def connect(self, cachecommand):
if self.pipeo:
raise error.Abort(_("cache connection already open"))
# Use subprocess.Popen() directly rather than the wrappers in
# util in order to pipe stderr to /dev/null, thereby preventing
# hangs in cases where the cache process fills the stderr pipe
# buffer (since remotefilelog never reads from stderr).
self.subprocess = subprocess.Popen(
cachecommand,
shell=True,
close_fds=util.closefds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=open(os.devnull, "wb"),
)
self.pipei = self.subprocess.stdin
self.pipeo = self.subprocess.stdout
self.connected = True
def close(self):
def tryclose(pipe):
try:
pipe.close()
except Exception:
pass
if self.connected:
try:
self.pipei.write("exit\n")
except Exception:
pass
tryclose(self.pipei)
self.pipei = None
tryclose(self.pipeo)
self.pipeo = None
try:
# Wait for process to terminate, making sure to avoid deadlock.
# See https://docs.python.org/2/library/subprocess.html for
# warnings about wait() and deadlocking.
self.subprocess.communicate()
except Exception:
pass
self.subprocess = None
self.connected = False
def request(self, request, flush=True):
if self.connected:
try:
self.pipei.write(request)
if flush:
self.pipei.flush()
except IOError:
self.close()
def receiveline(self):
if not self.connected:
return None
try:
result = self.pipeo.readline()[:-1]
if not result:
self.close()
except IOError:
self.close()
return result
def _getfilesbatch(remote, receivemissing, progresstick, missed, idmap, batchsize):
# Over http(s), iterbatch is a streamy method and we can start
# looking at results early. This means we send one (potentially
# large) request, but then we show nice progress as we process
# file results, rather than showing chunks of $batchsize in
# progress.
#
# Over ssh, iterbatch isn't streamy because batch() wasn't
# explicitly designed as a streaming method. In the future we
# should probably introduce a streambatch() method upstream and
# use that for this.
if (
getattr(remote, "iterbatch", False)
and remote.capable("httppostargs")
and isinstance(remote, httppeer.httppeer)
):
b = remote.iterbatch()
for m in missed:
file_ = idmap[m]
node = m[-40:]
b.getfile(file_, node)
b.submit()
for m, r in itertools.izip(missed, b.results()):
file_ = idmap[m]
node = m[-40:]
progresstick(file_)
receivemissing(io.BytesIO("%d\n%s" % (len(r), r)), file_, node)
return
while missed:
chunk, missed = missed[:batchsize], missed[batchsize:]
b = remote.iterbatch()
for m in chunk:
file_ = idmap[m]
node = m[-40:]
b.getfile(file_, node)
b.submit()
for m, v in zip(chunk, b.results()):
file_ = idmap[m]
node = m[-40:]
progresstick(file_)
receivemissing(io.BytesIO("%d\n%s" % (len(v), v)), file_, node)
def _getfiles_optimistic(remote, receivemissing, progresstick, missed, idmap, step):
remote._callstream("getfiles")
i = 0
pipeo = shallowutil.trygetattr(remote, ("_pipeo", "pipeo"))
pipei = shallowutil.trygetattr(remote, ("_pipei", "pipei"))
while i < len(missed):
# issue a batch of requests
start = i
end = min(len(missed), start + step)
i = end
for missingid in missed[start:end]:
# issue new request
versionid = missingid[-40:]
file = idmap[missingid]
sshrequest = "%s%s\n" % (versionid, file)
pipeo.write(sshrequest)
pipeo.flush()
# receive batch results
for missingid in missed[start:end]:
versionid = missingid[-40:]
file = idmap[missingid]
progresstick(file)
receivemissing(pipei, file, versionid)
# End the command
pipeo.write("\n")
pipeo.flush()
def _getfiles_threaded(remote, receivemissing, progresstick, missed, idmap, step):
remote._callstream("getfiles")
pipeo = shallowutil.trygetattr(remote, ("_pipeo", "pipeo"))
pipei = shallowutil.trygetattr(remote, ("_pipei", "pipei"))
def writer():
for missingid in missed:
versionid = missingid[-40:]
file = idmap[missingid]
sshrequest = "%s%s\n" % (versionid, file)
pipeo.write(sshrequest)
pipeo.flush()
writerthread = threading.Thread(target=writer)
writerthread.daemon = True
writerthread.start()
for missingid in missed:
versionid = missingid[-40:]
file = idmap[missingid]
progresstick(file)
receivemissing(pipei, file, versionid)
writerthread.join()
# End the command
pipeo.write("\n")
pipeo.flush()
class fileserverclient(object):
"""A client for requesting files from the remote file server.
"""
def __init__(self, repo):
ui = repo.ui
self.repo = repo
2013-05-07 03:51:48 +04:00
self.ui = ui
self.cacheprocess = ui.config("remotefilelog", "cacheprocess")
if self.cacheprocess:
self.cacheprocess = util.expandpath(self.cacheprocess)
# This option causes remotefilelog to pass the full file path to the
# cacheprocess instead of a hashed key.
self.cacheprocesspasspath = ui.configbool(
"remotefilelog", "cacheprocess.includepath"
)
self.debugoutput = ui.configbool("remotefilelog", "debug")
self.remotecache = cacheconnection()
def setstore(self, datastore, historystore, writedata, writehistory):
self.datastore = datastore
self.historystore = historystore
self.writedata = writedata
self.writehistory = writehistory
def _connect(self):
return self.repo.connectionpool.get(self.repo.fallbackpath)
2013-06-08 02:13:58 +04:00
def request(self, fileids):
"""Takes a list of filename/node pairs and fetches them from the
server. Files are stored in the local cache.
A list of nodes that the server couldn't find is returned.
If the connection fails, an exception is raised.
"""
if not self.remotecache.connected:
self.connect()
cache = self.remotecache
writedata = self.writedata
if self.ui.configbool("remotefilelog", "fetchpacks"):
self.requestpack(fileids)
return
repo = self.repo
total = len(fileids)
request = "get\n%d\n" % total
2013-06-08 02:13:58 +04:00
idmap = {}
reponame = repo.name
getfilenamepath = lambda name: None
if util.safehasattr(self.writedata, "_getfilenamepath"):
getfilenamepath = self.writedata._getfilenamepath
for file, id in fileids:
fullid = getcachekey(reponame, file, id)
if self.cacheprocesspasspath:
request += file + "\0"
2013-06-08 02:13:58 +04:00
request += fullid + "\n"
idmap[fullid] = file
cache.request(request)
with progress.bar(self.ui, _("downloading"), total=total) as prog:
missed = []
count = 0
while True:
missingid = cache.receiveline()
if not missingid:
missedset = set(missed)
for missingid in idmap.iterkeys():
if not missingid in missedset:
missed.append(missingid)
self.ui.warn(
_(
"warning: cache connection closed early - "
+ "falling back to server\n"
)
)
break
if missingid == "0":
break
if missingid.startswith("_hits_"):
# receive progress reports
parts = missingid.split("_")
count += int(parts[2])
prog.value = count
continue
missed.append(missingid)
# If the cacheprocess knew the filename, it should store it
# somewhere useful (e.g. in a pack file it generates). Otherwise,
# we must write the filename out for it.
if not self.cacheprocesspasspath:
missedset = set(missed)
for fullid, file in idmap.iteritems():
if fullid not in missedset:
filenamepath = getfilenamepath(fullid)
if (
filenamepath is not None
and os.path.isdir(os.path.dirname(filenamepath))
and not os.path.exists(filenamepath)
):
shallowutil.writefile(filenamepath, file, readonly=True)
global fetchmisses
fetchmisses += len(missed)
count = [total - len(missed)]
fromcache = count[0]
prog.value = count[0]
self.ui.log(
"remotefilelog",
"remote cache hit rate is %r of %r\n",
count[0],
total,
hit=count[0],
total=total,
)
oldumask = os.umask(0o002)
try:
# receive cache misses from master
if missed:
# try to get rid of possible duplicates
missedset = set(missed)
if len(missedset) != len(missed):
self.ui.develwarn("Fetch request contains duplicates")
missed = list(missedset)
def progresstick(name=""):
count[0] += 1
prog.value = (count[0], name)
# When verbose is true, sshpeer prints 'running ssh...'
# to stdout, which can interfere with some command
# outputs
verbose = self.ui.verbose
self.ui.verbose = False
try:
with self._connect() as conn:
remote = conn.peer
# TODO: deduplicate this with the constant in
# shallowrepo
if remote.capable("remotefilelog"):
if not isinstance(remote, sshpeer.sshpeer):
msg = "remotefilelog requires ssh servers"
raise error.Abort(msg)
step = self.ui.configint(
"remotefilelog", "getfilesstep", 10000
)
getfilestype = self.ui.config(
"remotefilelog", "getfilestype", "optimistic"
)
if getfilestype == "threaded":
_getfiles = _getfiles_threaded
else:
_getfiles = _getfiles_optimistic
_getfiles(
remote,
self.receivemissing,
progresstick,
missed,
idmap,
step,
)
elif remote.capable("getfile"):
if remote.capable("batch"):
batchdefault = 100
else:
batchdefault = 10
batchsize = self.ui.configint(
"remotefilelog", "batchsize", batchdefault
)
_getfilesbatch(
remote,
self.receivemissing,
progresstick,
missed,
idmap,
batchsize,
)
else:
msg = (
"configured remotefilelog server"
" does not support remotefilelog"
)
raise error.Abort(msg)
self.ui.log(
"remotefilefetchlog",
"Success\n",
fetched_files=count[0] - fromcache,
total_to_fetch=total - fromcache,
)
except Exception:
self.ui.log(
"remotefilefetchlog",
"Fail\n",
fetched_files=count[0] - fromcache,
total_to_fetch=total - fromcache,
)
raise
finally:
self.ui.verbose = verbose
# send to memcache
if self.ui.configbool("remotefilelog", "updatesharedcache"):
count[0] = len(missed)
request = "set\n%d\n%s\n" % (count[0], "\n".join(missed))
cache.request(request)
# mark ourselves as a user of this cache
writedata.markrepo(self.repo.path)
finally:
os.umask(oldumask)
def receivemissing(self, pipe, filename, node):
line = pipe.readline()[:-1]
if not line:
raise error.ResponseError(
_("error downloading file contents:"), _("connection closed early")
)
size = int(line)
data = pipe.read(size)
if len(data) != size:
raise error.ResponseError(
_("error downloading file contents:"),
_("only received %s of %s bytes") % (len(data), size),
)
self.writedata.addremotefilelognode(filename, bin(node), lz4decompress(data))
def requestpack(self, fileids):
"""Requests the given file revisions from the server in a pack format.
See `remotefilelogserver.getpack` for the file format.
"""
try:
with self._connect() as conn:
total = len(fileids)
rcvd = 0
remote = conn.peer
remote._callstream("getpackv1")
self._sendpackrequest(remote, fileids)
2017-07-08 07:08:26 +03:00
packpath = shallowutil.getcachepackpath(
self.repo, constants.FILEPACK_CATEGORY
)
pipei = shallowutil.trygetattr(remote, ("_pipei", "pipei"))
receiveddata, receivedhistory = shallowutil.receivepack(
self.repo.ui, pipei, packpath
)
rcvd = len(receiveddata)
self.ui.log(
"remotefilefetchlog",
"Success(pack)\n" if (rcvd == total) else "Fail(pack)\n",
fetched_files=rcvd,
total_to_fetch=total,
)
2017-07-08 07:08:26 +03:00
except Exception:
self.ui.log(
"remotefilefetchlog",
"Fail(pack)\n",
fetched_files=rcvd,
total_to_fetch=total,
)
raise
def _sendpackrequest(self, remote, fileids):
"""Formats and writes the given fileids to the remote as part of a
getpackv1 call.
"""
# Sort the requests by name, so we receive requests in batches by name
grouped = {}
for filename, node in fileids:
grouped.setdefault(filename, set()).add(node)
# Issue request
pipeo = shallowutil.trygetattr(remote, ("_pipeo", "pipeo"))
for filename, nodes in grouped.iteritems():
filenamelen = struct.pack(constants.FILENAMESTRUCT, len(filename))
countlen = struct.pack(constants.PACKREQUESTCOUNTSTRUCT, len(nodes))
rawnodes = "".join(bin(n) for n in nodes)
pipeo.write("%s%s%s%s" % (filenamelen, filename, countlen, rawnodes))
pipeo.flush()
pipeo.write(struct.pack(constants.FILENAMESTRUCT, 0))
pipeo.flush()
def connect(self):
if self.cacheprocess:
cmd = "%s %s" % (self.cacheprocess, self.writedata._path)
self.remotecache.connect(cmd)
else:
# If no cache process is specified, we fake one that always
# returns cache misses. This enables tests to run easily
# and may eventually allow us to be a drop in replacement
# for the largefiles extension.
class simplecache(object):
def __init__(self):
self.missingids = []
self.connected = True
def close(self):
pass
def request(self, value, flush=True):
lines = value.split("\n")
if lines[0] != "get":
return
self.missingids = lines[2:-1]
self.missingids.append("0")
def receiveline(self):
if len(self.missingids) > 0:
return self.missingids.pop(0)
return None
self.remotecache = simplecache()
def close(self):
# Make it "run-tests.py -i" friendly
if "TESTTMP" in encoding.environ:
global fetchcost
fetchcost = 0
if fetches:
msg = (
"%s files fetched over %d fetches - "
+ "(%d misses, %0.2f%% hit ratio) over %0.2fs\n"
) % (
fetched,
fetches,
fetchmisses,
float(fetched - fetchmisses) / float(fetched) * 100.0,
fetchcost,
)
if self.debugoutput:
self.ui.warn(msg)
self.ui.log(
"remotefilelog.prefetch",
msg.replace("%", "%%"),
remotefilelogfetched=fetched,
remotefilelogfetches=fetches,
remotefilelogfetchmisses=fetchmisses,
remotefilelogfetchtime=fetchcost * 1000,
)
2013-05-07 03:59:05 +04:00
if self.remotecache.connected:
self.remotecache.close()
2013-05-07 03:49:55 +04:00
def prefetch(self, fileids, force=False, fetchdata=True, fetchhistory=False):
2013-05-07 03:49:55 +04:00
"""downloads the given file versions to the cache
"""
repo = self.repo
idstocheck = []
2013-05-07 03:49:55 +04:00
for file, id in fileids:
# hack
# - we don't use .hgtags
# - workingctx produces ids with length 42,
# which we skip since they aren't in any cache
if file == ".hgtags" or len(id) == 42 or not repo.shallowmatch(file):
2013-05-07 03:49:55 +04:00
continue
idstocheck.append((file, bin(id)))
2013-05-07 03:49:55 +04:00
datastore = self.datastore
historystore = self.historystore
if force:
datastore = unioncontentstore(*repo.fileslog.shareddatastores)
historystore = unionmetadatastore(*repo.fileslog.sharedhistorystores)
missingids = set()
if fetchdata:
missingids.update(datastore.getmissing(idstocheck))
if fetchhistory:
missingids.update(historystore.getmissing(idstocheck))
2013-05-07 03:49:55 +04:00
# partition missing nodes into nullid and not-nullid so we can
# warn about this filtering potentially shadowing bugs.
nullids = len([None for unused, id in missingids if id == nullid])
if nullids:
missingids = [(f, id) for f, id in missingids if id != nullid]
repo.ui.develwarn(
(
"remotefilelog not fetching %d null revs"
" - this is likely hiding bugs" % nullids
),
config="remotefilelog-ext",
)
batchlfsdownloads = self.ui.configbool(
"remotefilelog", "_batchlfsdownloads", True
)
dolfsprefetch = self.ui.configbool("remotefilelog", "dolfsprefetch", True)
2013-05-07 03:49:55 +04:00
if missingids:
2013-05-07 03:59:05 +04:00
global fetches, fetched, fetchcost
fetches += 1
# We want to be able to detect excess individual file downloads, so
# let's log that information for debugging.
if fetches >= 15 and fetches < 18:
if fetches == 15:
fetchwarning = self.ui.config("remotefilelog", "fetchwarning")
if fetchwarning:
self.ui.warn(fetchwarning + "\n")
self.logstacktrace()
missingids = [(file, hex(id)) for file, id in missingids]
2013-05-07 03:59:05 +04:00
fetched += len(missingids)
start = time.time()
with self.ui.timesection("fetchingfiles"):
missingids = self.request(missingids)
2013-05-07 03:49:55 +04:00
if missingids:
raise error.Abort(_("unable to download %d files") % len(missingids))
2013-05-07 03:59:05 +04:00
fetchcost += time.time() - start
if not batchlfsdownloads and dolfsprefetch:
self._lfsprefetch(fileids)
if batchlfsdownloads and dolfsprefetch:
self._lfsprefetch(fileids)
def _lfsprefetch(self, fileids):
if not _lfsmod or not util.safehasattr(self.repo.svfs, "lfslocalblobstore"):
return
if not _lfsmod.wrapper.candownload(self.repo):
return
pointers = []
store = self.repo.svfs.lfslocalblobstore
for file, id in fileids:
node = bin(id)
rlog = self.repo.file(file)
if rlog.flags(node) & revlog.REVIDX_EXTSTORED:
text = rlog.revision(node, raw=True)
p = _lfsmod.pointer.deserialize(text)
oid = p.oid()
if not store.has(oid):
pointers.append(p)
if len(pointers) > 0:
self.repo.svfs.lfsremoteblobstore.readbatch(pointers, store)
assert all(store.has(p.oid()) for p in pointers)
def logstacktrace(self):
import traceback
self.ui.log(
"remotefilelog",
"excess remotefilelog fetching:\n%s\n",
"".join(traceback.format_stack()),
)