2013-06-21 21:14:29 +04:00
|
|
|
# fileserverclient.py - client for communicating with the cache process
|
2013-05-07 03:44:04 +04:00
|
|
|
#
|
|
|
|
# Copyright 2013 Facebook, Inc.
|
|
|
|
#
|
|
|
|
# This software may be used and distributed according to the terms of the
|
|
|
|
# GNU General Public License version 2 or any later version.
|
|
|
|
|
2017-08-22 01:10:41 +03:00
|
|
|
from __future__ import absolute_import
|
|
|
|
|
2019-01-02 15:40:13 +03:00
|
|
|
import functools
|
2018-07-06 03:45:27 +03:00
|
|
|
import hashlib
|
|
|
|
import io
|
2016-03-22 20:06:24 +03:00
|
|
|
import itertools
|
2018-07-06 03:45:27 +03:00
|
|
|
import os
|
|
|
|
import struct
|
2018-09-26 03:39:46 +03:00
|
|
|
import subprocess
|
2017-11-23 01:15:06 +03:00
|
|
|
import threading
|
2018-07-06 03:45:27 +03:00
|
|
|
import time
|
2013-05-07 03:59:05 +04:00
|
|
|
|
2019-01-30 03:25:33 +03:00
|
|
|
from edenscm.mercurial import (
|
2018-12-15 04:10:55 +03:00
|
|
|
encoding,
|
|
|
|
error,
|
|
|
|
httppeer,
|
|
|
|
progress,
|
|
|
|
revlog,
|
|
|
|
sshpeer,
|
|
|
|
util,
|
|
|
|
wireproto,
|
|
|
|
)
|
2019-01-30 03:25:33 +03:00
|
|
|
from edenscm.mercurial.i18n import _
|
|
|
|
from edenscm.mercurial.node import bin, hex, nullid
|
2018-05-30 12:16:33 +03:00
|
|
|
|
|
|
|
from . import constants, shallowutil, wirepack
|
2017-08-07 22:33:30 +03:00
|
|
|
from .contentstore import unioncontentstore
|
2017-08-22 01:10:41 +03:00
|
|
|
from .lz4wrapper import lz4decompress
|
2018-07-06 03:45:27 +03:00
|
|
|
from .metadatastore import unionmetadatastore
|
|
|
|
|
2016-05-16 20:59:09 +03:00
|
|
|
|
2013-05-07 03:59:05 +04:00
|
|
|
# Statistics for debugging
|
|
|
|
fetchcost = 0
|
|
|
|
fetches = 0
|
|
|
|
fetched = 0
|
2013-07-02 04:37:55 +04:00
|
|
|
fetchmisses = 0
|
2013-05-07 03:44:04 +04:00
|
|
|
|
2017-09-21 00:02:58 +03:00
|
|
|
_lfsmod = None
|
2013-05-07 03:44:04 +04:00
|
|
|
|
2018-05-30 12:16:33 +03:00
|
|
|
|
2013-08-15 22:00:51 +04:00
|
|
|
def getcachekey(reponame, file, id):
|
2016-06-16 01:48:16 +03:00
|
|
|
pathhash = hashlib.sha1(file).hexdigest()
|
2013-08-15 22:00:51 +04:00
|
|
|
return os.path.join(reponame, pathhash[:2], pathhash[2:], id)
|
2013-09-07 00:28:15 +04:00
|
|
|
|
2018-05-30 12:16:33 +03:00
|
|
|
|
2013-09-07 00:28:15 +04:00
|
|
|
def getlocalkey(file, id):
|
2016-06-16 01:48:16 +03:00
|
|
|
pathhash = hashlib.sha1(file).hexdigest()
|
2013-05-18 05:08:53 +04:00
|
|
|
return os.path.join(pathhash, id)
|
|
|
|
|
2018-05-30 12:16:33 +03:00
|
|
|
|
2015-07-01 00:32:31 +03:00
|
|
|
def peersetup(ui, peer):
|
|
|
|
class remotefilepeer(peer.__class__):
|
2015-07-01 00:34:01 +03:00
|
|
|
@wireproto.batchable
|
2015-07-01 00:32:31 +03:00
|
|
|
def getfile(self, file, node):
|
2018-05-30 12:16:33 +03:00
|
|
|
if not self.capable("getfile"):
|
2016-04-26 23:00:31 +03:00
|
|
|
raise error.Abort(
|
2018-05-30 12:16:33 +03:00
|
|
|
"configured remotefile server does not support getfile"
|
|
|
|
)
|
2015-07-01 00:34:01 +03:00
|
|
|
f = wireproto.future()
|
2018-05-30 12:16:33 +03:00
|
|
|
yield {"file": file, "node": node}, f
|
|
|
|
code, data = f.value.split("\0", 1)
|
2015-08-04 21:59:53 +03:00
|
|
|
if int(code):
|
2015-08-18 22:35:21 +03:00
|
|
|
raise error.LookupError(file, node, data)
|
2015-08-04 21:59:53 +03:00
|
|
|
yield data
|
2016-07-22 23:47:02 +03:00
|
|
|
|
2016-08-02 20:40:42 +03:00
|
|
|
@wireproto.batchable
|
|
|
|
def getflogheads(self, path):
|
2018-05-30 12:16:33 +03:00
|
|
|
if not self.capable("getflogheads"):
|
|
|
|
raise error.Abort(
|
|
|
|
"configured remotefile server does not " "support getflogheads"
|
|
|
|
)
|
2016-08-02 20:40:42 +03:00
|
|
|
f = wireproto.future()
|
2018-05-30 12:16:33 +03:00
|
|
|
yield {"path": path}, f
|
|
|
|
heads = f.value.split("\n") if f.value else []
|
2016-08-02 20:40:42 +03:00
|
|
|
yield heads
|
|
|
|
|
fix crash in "hg pull" when remotefilelog not enabled
Summary:
764cd9916c94 recently introduced code that was unconditionally checking the
repo.includepattern and repo.excludepattern attributes on a local repository
without first checking if this is a shallow repository. These attributes only
exist on shallow repositories, causing "hg pull" to crash on non-shallow
repositories. This crash wouldn't happen in simple circumstances, since the
remotefilelog extension only gets fully set up once a shallow repository object
has been created, however when using chg you can end up with scenarios where a
non-shallow repository is used in the same hg process after a shallow one.
This refactors the code to now store the local repository object on the remote
peer rather than trying to store the individual shallow, includepattern, and
excludepattern attributes.
Overall this code does still feel a bit janky to me -- the rest of the peer API
is independent of the local repository, but the _callstream() wrapper cares
about the local repository being referenced. It seems like we should ideally
redesign the APIs so that _callstream() receives the local repository data as
an argument (or we should make the peer <--> local repository assocation more
formal and explicit if think it's better to force an association here).
Test Plan: Added a new test which triggered the crash, but passes with these changes.
Reviewers: ttung, mitrandir, durham
Reviewed By: durham
Subscribers: net-systems-diffs@, yogeshwer
Differential Revision: https://phabricator.intern.facebook.com/D3756493
Tasks: 12823586
Signature: t1:3756493:1471971600:9666e9c31bf59070c3ace0821d47d322671eb5b1
2016-08-24 00:14:42 +03:00
|
|
|
def _updatecallstreamopts(self, command, opts):
|
2018-05-30 12:16:33 +03:00
|
|
|
if command != "getbundle":
|
fix crash in "hg pull" when remotefilelog not enabled
Summary:
764cd9916c94 recently introduced code that was unconditionally checking the
repo.includepattern and repo.excludepattern attributes on a local repository
without first checking if this is a shallow repository. These attributes only
exist on shallow repositories, causing "hg pull" to crash on non-shallow
repositories. This crash wouldn't happen in simple circumstances, since the
remotefilelog extension only gets fully set up once a shallow repository object
has been created, however when using chg you can end up with scenarios where a
non-shallow repository is used in the same hg process after a shallow one.
This refactors the code to now store the local repository object on the remote
peer rather than trying to store the individual shallow, includepattern, and
excludepattern attributes.
Overall this code does still feel a bit janky to me -- the rest of the peer API
is independent of the local repository, but the _callstream() wrapper cares
about the local repository being referenced. It seems like we should ideally
redesign the APIs so that _callstream() receives the local repository data as
an argument (or we should make the peer <--> local repository assocation more
formal and explicit if think it's better to force an association here).
Test Plan: Added a new test which triggered the crash, but passes with these changes.
Reviewers: ttung, mitrandir, durham
Reviewed By: durham
Subscribers: net-systems-diffs@, yogeshwer
Differential Revision: https://phabricator.intern.facebook.com/D3756493
Tasks: 12823586
Signature: t1:3756493:1471971600:9666e9c31bf59070c3ace0821d47d322671eb5b1
2016-08-24 00:14:42 +03:00
|
|
|
return
|
2018-05-30 12:16:33 +03:00
|
|
|
if "remotefilelog" not in shallowutil.peercapabilities(self):
|
fix crash in "hg pull" when remotefilelog not enabled
Summary:
764cd9916c94 recently introduced code that was unconditionally checking the
repo.includepattern and repo.excludepattern attributes on a local repository
without first checking if this is a shallow repository. These attributes only
exist on shallow repositories, causing "hg pull" to crash on non-shallow
repositories. This crash wouldn't happen in simple circumstances, since the
remotefilelog extension only gets fully set up once a shallow repository object
has been created, however when using chg you can end up with scenarios where a
non-shallow repository is used in the same hg process after a shallow one.
This refactors the code to now store the local repository object on the remote
peer rather than trying to store the individual shallow, includepattern, and
excludepattern attributes.
Overall this code does still feel a bit janky to me -- the rest of the peer API
is independent of the local repository, but the _callstream() wrapper cares
about the local repository being referenced. It seems like we should ideally
redesign the APIs so that _callstream() receives the local repository data as
an argument (or we should make the peer <--> local repository assocation more
formal and explicit if think it's better to force an association here).
Test Plan: Added a new test which triggered the crash, but passes with these changes.
Reviewers: ttung, mitrandir, durham
Reviewed By: durham
Subscribers: net-systems-diffs@, yogeshwer
Differential Revision: https://phabricator.intern.facebook.com/D3756493
Tasks: 12823586
Signature: t1:3756493:1471971600:9666e9c31bf59070c3ace0821d47d322671eb5b1
2016-08-24 00:14:42 +03:00
|
|
|
return
|
2018-05-30 12:16:33 +03:00
|
|
|
if not util.safehasattr(self, "_localrepo"):
|
fix crash in "hg pull" when remotefilelog not enabled
Summary:
764cd9916c94 recently introduced code that was unconditionally checking the
repo.includepattern and repo.excludepattern attributes on a local repository
without first checking if this is a shallow repository. These attributes only
exist on shallow repositories, causing "hg pull" to crash on non-shallow
repositories. This crash wouldn't happen in simple circumstances, since the
remotefilelog extension only gets fully set up once a shallow repository object
has been created, however when using chg you can end up with scenarios where a
non-shallow repository is used in the same hg process after a shallow one.
This refactors the code to now store the local repository object on the remote
peer rather than trying to store the individual shallow, includepattern, and
excludepattern attributes.
Overall this code does still feel a bit janky to me -- the rest of the peer API
is independent of the local repository, but the _callstream() wrapper cares
about the local repository being referenced. It seems like we should ideally
redesign the APIs so that _callstream() receives the local repository data as
an argument (or we should make the peer <--> local repository assocation more
formal and explicit if think it's better to force an association here).
Test Plan: Added a new test which triggered the crash, but passes with these changes.
Reviewers: ttung, mitrandir, durham
Reviewed By: durham
Subscribers: net-systems-diffs@, yogeshwer
Differential Revision: https://phabricator.intern.facebook.com/D3756493
Tasks: 12823586
Signature: t1:3756493:1471971600:9666e9c31bf59070c3ace0821d47d322671eb5b1
2016-08-24 00:14:42 +03:00
|
|
|
return
|
2016-10-21 21:02:09 +03:00
|
|
|
if constants.REQUIREMENT not in self._localrepo.requirements:
|
fix crash in "hg pull" when remotefilelog not enabled
Summary:
764cd9916c94 recently introduced code that was unconditionally checking the
repo.includepattern and repo.excludepattern attributes on a local repository
without first checking if this is a shallow repository. These attributes only
exist on shallow repositories, causing "hg pull" to crash on non-shallow
repositories. This crash wouldn't happen in simple circumstances, since the
remotefilelog extension only gets fully set up once a shallow repository object
has been created, however when using chg you can end up with scenarios where a
non-shallow repository is used in the same hg process after a shallow one.
This refactors the code to now store the local repository object on the remote
peer rather than trying to store the individual shallow, includepattern, and
excludepattern attributes.
Overall this code does still feel a bit janky to me -- the rest of the peer API
is independent of the local repository, but the _callstream() wrapper cares
about the local repository being referenced. It seems like we should ideally
redesign the APIs so that _callstream() receives the local repository data as
an argument (or we should make the peer <--> local repository assocation more
formal and explicit if think it's better to force an association here).
Test Plan: Added a new test which triggered the crash, but passes with these changes.
Reviewers: ttung, mitrandir, durham
Reviewed By: durham
Subscribers: net-systems-diffs@, yogeshwer
Differential Revision: https://phabricator.intern.facebook.com/D3756493
Tasks: 12823586
Signature: t1:3756493:1471971600:9666e9c31bf59070c3ace0821d47d322671eb5b1
2016-08-24 00:14:42 +03:00
|
|
|
return
|
|
|
|
|
2018-05-30 12:16:33 +03:00
|
|
|
bundlecaps = opts.get("bundlecaps")
|
fix crash in "hg pull" when remotefilelog not enabled
Summary:
764cd9916c94 recently introduced code that was unconditionally checking the
repo.includepattern and repo.excludepattern attributes on a local repository
without first checking if this is a shallow repository. These attributes only
exist on shallow repositories, causing "hg pull" to crash on non-shallow
repositories. This crash wouldn't happen in simple circumstances, since the
remotefilelog extension only gets fully set up once a shallow repository object
has been created, however when using chg you can end up with scenarios where a
non-shallow repository is used in the same hg process after a shallow one.
This refactors the code to now store the local repository object on the remote
peer rather than trying to store the individual shallow, includepattern, and
excludepattern attributes.
Overall this code does still feel a bit janky to me -- the rest of the peer API
is independent of the local repository, but the _callstream() wrapper cares
about the local repository being referenced. It seems like we should ideally
redesign the APIs so that _callstream() receives the local repository data as
an argument (or we should make the peer <--> local repository assocation more
formal and explicit if think it's better to force an association here).
Test Plan: Added a new test which triggered the crash, but passes with these changes.
Reviewers: ttung, mitrandir, durham
Reviewed By: durham
Subscribers: net-systems-diffs@, yogeshwer
Differential Revision: https://phabricator.intern.facebook.com/D3756493
Tasks: 12823586
Signature: t1:3756493:1471971600:9666e9c31bf59070c3ace0821d47d322671eb5b1
2016-08-24 00:14:42 +03:00
|
|
|
if bundlecaps:
|
|
|
|
bundlecaps = [bundlecaps]
|
|
|
|
else:
|
|
|
|
bundlecaps = []
|
|
|
|
|
|
|
|
# shallow, includepattern, and excludepattern are a hacky way of
|
|
|
|
# carrying over data from the local repo to this getbundle
|
|
|
|
# command. We need to do it this way because bundle1 getbundle
|
|
|
|
# doesn't provide any other place we can hook in to manipulate
|
|
|
|
# getbundle args before it goes across the wire. Once we get rid
|
|
|
|
# of bundle1, we can use bundle2's _pullbundle2extraprepare to
|
|
|
|
# do this more cleanly.
|
2018-05-30 12:16:33 +03:00
|
|
|
bundlecaps.append("remotefilelog")
|
fix crash in "hg pull" when remotefilelog not enabled
Summary:
764cd9916c94 recently introduced code that was unconditionally checking the
repo.includepattern and repo.excludepattern attributes on a local repository
without first checking if this is a shallow repository. These attributes only
exist on shallow repositories, causing "hg pull" to crash on non-shallow
repositories. This crash wouldn't happen in simple circumstances, since the
remotefilelog extension only gets fully set up once a shallow repository object
has been created, however when using chg you can end up with scenarios where a
non-shallow repository is used in the same hg process after a shallow one.
This refactors the code to now store the local repository object on the remote
peer rather than trying to store the individual shallow, includepattern, and
excludepattern attributes.
Overall this code does still feel a bit janky to me -- the rest of the peer API
is independent of the local repository, but the _callstream() wrapper cares
about the local repository being referenced. It seems like we should ideally
redesign the APIs so that _callstream() receives the local repository data as
an argument (or we should make the peer <--> local repository assocation more
formal and explicit if think it's better to force an association here).
Test Plan: Added a new test which triggered the crash, but passes with these changes.
Reviewers: ttung, mitrandir, durham
Reviewed By: durham
Subscribers: net-systems-diffs@, yogeshwer
Differential Revision: https://phabricator.intern.facebook.com/D3756493
Tasks: 12823586
Signature: t1:3756493:1471971600:9666e9c31bf59070c3ace0821d47d322671eb5b1
2016-08-24 00:14:42 +03:00
|
|
|
if self._localrepo.includepattern:
|
2018-05-30 12:16:33 +03:00
|
|
|
patterns = "\0".join(self._localrepo.includepattern)
|
fix crash in "hg pull" when remotefilelog not enabled
Summary:
764cd9916c94 recently introduced code that was unconditionally checking the
repo.includepattern and repo.excludepattern attributes on a local repository
without first checking if this is a shallow repository. These attributes only
exist on shallow repositories, causing "hg pull" to crash on non-shallow
repositories. This crash wouldn't happen in simple circumstances, since the
remotefilelog extension only gets fully set up once a shallow repository object
has been created, however when using chg you can end up with scenarios where a
non-shallow repository is used in the same hg process after a shallow one.
This refactors the code to now store the local repository object on the remote
peer rather than trying to store the individual shallow, includepattern, and
excludepattern attributes.
Overall this code does still feel a bit janky to me -- the rest of the peer API
is independent of the local repository, but the _callstream() wrapper cares
about the local repository being referenced. It seems like we should ideally
redesign the APIs so that _callstream() receives the local repository data as
an argument (or we should make the peer <--> local repository assocation more
formal and explicit if think it's better to force an association here).
Test Plan: Added a new test which triggered the crash, but passes with these changes.
Reviewers: ttung, mitrandir, durham
Reviewed By: durham
Subscribers: net-systems-diffs@, yogeshwer
Differential Revision: https://phabricator.intern.facebook.com/D3756493
Tasks: 12823586
Signature: t1:3756493:1471971600:9666e9c31bf59070c3ace0821d47d322671eb5b1
2016-08-24 00:14:42 +03:00
|
|
|
includecap = "includepattern=" + patterns
|
|
|
|
bundlecaps.append(includecap)
|
|
|
|
if self._localrepo.excludepattern:
|
2018-05-30 12:16:33 +03:00
|
|
|
patterns = "\0".join(self._localrepo.excludepattern)
|
fix crash in "hg pull" when remotefilelog not enabled
Summary:
764cd9916c94 recently introduced code that was unconditionally checking the
repo.includepattern and repo.excludepattern attributes on a local repository
without first checking if this is a shallow repository. These attributes only
exist on shallow repositories, causing "hg pull" to crash on non-shallow
repositories. This crash wouldn't happen in simple circumstances, since the
remotefilelog extension only gets fully set up once a shallow repository object
has been created, however when using chg you can end up with scenarios where a
non-shallow repository is used in the same hg process after a shallow one.
This refactors the code to now store the local repository object on the remote
peer rather than trying to store the individual shallow, includepattern, and
excludepattern attributes.
Overall this code does still feel a bit janky to me -- the rest of the peer API
is independent of the local repository, but the _callstream() wrapper cares
about the local repository being referenced. It seems like we should ideally
redesign the APIs so that _callstream() receives the local repository data as
an argument (or we should make the peer <--> local repository assocation more
formal and explicit if think it's better to force an association here).
Test Plan: Added a new test which triggered the crash, but passes with these changes.
Reviewers: ttung, mitrandir, durham
Reviewed By: durham
Subscribers: net-systems-diffs@, yogeshwer
Differential Revision: https://phabricator.intern.facebook.com/D3756493
Tasks: 12823586
Signature: t1:3756493:1471971600:9666e9c31bf59070c3ace0821d47d322671eb5b1
2016-08-24 00:14:42 +03:00
|
|
|
excludecap = "excludepattern=" + patterns
|
|
|
|
bundlecaps.append(excludecap)
|
2018-05-30 12:16:33 +03:00
|
|
|
opts["bundlecaps"] = ",".join(bundlecaps)
|
fix crash in "hg pull" when remotefilelog not enabled
Summary:
764cd9916c94 recently introduced code that was unconditionally checking the
repo.includepattern and repo.excludepattern attributes on a local repository
without first checking if this is a shallow repository. These attributes only
exist on shallow repositories, causing "hg pull" to crash on non-shallow
repositories. This crash wouldn't happen in simple circumstances, since the
remotefilelog extension only gets fully set up once a shallow repository object
has been created, however when using chg you can end up with scenarios where a
non-shallow repository is used in the same hg process after a shallow one.
This refactors the code to now store the local repository object on the remote
peer rather than trying to store the individual shallow, includepattern, and
excludepattern attributes.
Overall this code does still feel a bit janky to me -- the rest of the peer API
is independent of the local repository, but the _callstream() wrapper cares
about the local repository being referenced. It seems like we should ideally
redesign the APIs so that _callstream() receives the local repository data as
an argument (or we should make the peer <--> local repository assocation more
formal and explicit if think it's better to force an association here).
Test Plan: Added a new test which triggered the crash, but passes with these changes.
Reviewers: ttung, mitrandir, durham
Reviewed By: durham
Subscribers: net-systems-diffs@, yogeshwer
Differential Revision: https://phabricator.intern.facebook.com/D3756493
Tasks: 12823586
Signature: t1:3756493:1471971600:9666e9c31bf59070c3ace0821d47d322671eb5b1
2016-08-24 00:14:42 +03:00
|
|
|
|
2018-04-03 14:14:24 +03:00
|
|
|
def _callstream(self, cmd, **opts):
|
|
|
|
self._updatecallstreamopts(cmd, opts)
|
|
|
|
return super(remotefilepeer, self)._callstream(cmd, **opts)
|
2016-07-22 23:47:02 +03:00
|
|
|
|
2015-07-01 00:32:31 +03:00
|
|
|
peer.__class__ = remotefilepeer
|
|
|
|
|
2018-05-30 12:16:33 +03:00
|
|
|
|
2019-02-06 21:23:04 +03:00
|
|
|
class CacheConnectionError(Exception):
|
|
|
|
"""Exception raised if the cache connection was unexpectedly closed."""
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
super("Scmmemcache connection was unexpectedly closed")
|
|
|
|
|
|
|
|
|
2014-01-09 23:41:12 +04:00
|
|
|
class cacheconnection(object):
|
|
|
|
"""The connection for communicating with the remote cache. Performs
|
|
|
|
gets and sets by communicating with an external process that has the
|
|
|
|
cache-specific implementation.
|
|
|
|
"""
|
2018-05-30 12:16:33 +03:00
|
|
|
|
2014-01-09 23:41:12 +04:00
|
|
|
def __init__(self):
|
2018-09-26 03:39:46 +03:00
|
|
|
self.pipeo = self.pipei = None
|
2014-01-09 23:41:12 +04:00
|
|
|
self.subprocess = None
|
|
|
|
self.connected = False
|
|
|
|
|
|
|
|
def connect(self, cachecommand):
|
|
|
|
if self.pipeo:
|
2016-04-26 23:00:31 +03:00
|
|
|
raise error.Abort(_("cache connection already open"))
|
2018-09-26 03:39:46 +03:00
|
|
|
|
|
|
|
# Use subprocess.Popen() directly rather than the wrappers in
|
|
|
|
# util in order to pipe stderr to /dev/null, thereby preventing
|
|
|
|
# hangs in cases where the cache process fills the stderr pipe
|
|
|
|
# buffer (since remotefilelog never reads from stderr).
|
|
|
|
self.subprocess = subprocess.Popen(
|
|
|
|
cachecommand,
|
|
|
|
shell=True,
|
|
|
|
close_fds=util.closefds,
|
|
|
|
stdin=subprocess.PIPE,
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=open(os.devnull, "wb"),
|
|
|
|
)
|
|
|
|
|
|
|
|
self.pipei = self.subprocess.stdin
|
|
|
|
self.pipeo = self.subprocess.stdout
|
2014-01-09 23:41:12 +04:00
|
|
|
self.connected = True
|
|
|
|
|
|
|
|
def close(self):
|
2015-10-15 04:49:55 +03:00
|
|
|
def tryclose(pipe):
|
2015-10-14 18:12:48 +03:00
|
|
|
try:
|
2015-10-15 04:49:55 +03:00
|
|
|
pipe.close()
|
2016-04-26 23:00:31 +03:00
|
|
|
except Exception:
|
2015-10-14 18:12:48 +03:00
|
|
|
pass
|
2018-05-30 12:16:33 +03:00
|
|
|
|
2015-10-15 04:49:55 +03:00
|
|
|
if self.connected:
|
2015-10-14 18:12:48 +03:00
|
|
|
try:
|
2015-10-15 04:49:55 +03:00
|
|
|
self.pipei.write("exit\n")
|
2016-04-26 23:00:31 +03:00
|
|
|
except Exception:
|
2015-10-15 04:49:55 +03:00
|
|
|
pass
|
|
|
|
tryclose(self.pipei)
|
|
|
|
self.pipei = None
|
|
|
|
tryclose(self.pipeo)
|
|
|
|
self.pipeo = None
|
2015-10-14 18:12:48 +03:00
|
|
|
try:
|
2015-10-15 04:49:55 +03:00
|
|
|
# Wait for process to terminate, making sure to avoid deadlock.
|
|
|
|
# See https://docs.python.org/2/library/subprocess.html for
|
|
|
|
# warnings about wait() and deadlocking.
|
|
|
|
self.subprocess.communicate()
|
2016-04-26 23:00:31 +03:00
|
|
|
except Exception:
|
2015-10-15 04:49:55 +03:00
|
|
|
pass
|
2014-01-09 23:41:12 +04:00
|
|
|
self.subprocess = None
|
2015-10-15 04:49:55 +03:00
|
|
|
self.connected = False
|
2014-01-09 23:41:12 +04:00
|
|
|
|
|
|
|
def request(self, request, flush=True):
|
|
|
|
if self.connected:
|
|
|
|
try:
|
|
|
|
self.pipei.write(request)
|
|
|
|
if flush:
|
|
|
|
self.pipei.flush()
|
|
|
|
except IOError:
|
|
|
|
self.close()
|
|
|
|
|
2019-02-06 21:23:04 +03:00
|
|
|
def receive(self, prog=None):
|
|
|
|
"""Reads the cacheprocess' reply for the request sent and tracks
|
|
|
|
the progress. Returns a tuple (hit_count, misses) where:
|
|
|
|
* `hit_count` - the number of hits
|
|
|
|
* `misses` - list of missed keys
|
|
|
|
"""
|
|
|
|
missed = []
|
|
|
|
hitcount = 0
|
|
|
|
while True:
|
|
|
|
key = self._receiveline()
|
|
|
|
if not key:
|
|
|
|
raise CacheConnectionError()
|
|
|
|
if key == "0":
|
|
|
|
# the end of the stream
|
|
|
|
break
|
|
|
|
|
|
|
|
if key.startswith("_hits_"):
|
|
|
|
# hit -> receive progress reports
|
|
|
|
parts = key.split("_")
|
|
|
|
hitcount += int(parts[2])
|
|
|
|
if prog is not None:
|
|
|
|
prog.value = hitcount
|
|
|
|
else:
|
|
|
|
missed.append(key)
|
|
|
|
return (hitcount, missed)
|
|
|
|
|
|
|
|
def _receiveline(self):
|
2014-01-09 23:41:12 +04:00
|
|
|
if not self.connected:
|
|
|
|
return None
|
|
|
|
try:
|
|
|
|
result = self.pipeo.readline()[:-1]
|
|
|
|
if not result:
|
|
|
|
self.close()
|
|
|
|
except IOError:
|
|
|
|
self.close()
|
|
|
|
|
|
|
|
return result
|
|
|
|
|
2018-05-30 12:16:33 +03:00
|
|
|
|
|
|
|
def _getfilesbatch(remote, receivemissing, progresstick, missed, idmap, batchsize):
|
2016-03-22 20:06:24 +03:00
|
|
|
# Over http(s), iterbatch is a streamy method and we can start
|
|
|
|
# looking at results early. This means we send one (potentially
|
|
|
|
# large) request, but then we show nice progress as we process
|
|
|
|
# file results, rather than showing chunks of $batchsize in
|
|
|
|
# progress.
|
|
|
|
#
|
|
|
|
# Over ssh, iterbatch isn't streamy because batch() wasn't
|
|
|
|
# explicitly designed as a streaming method. In the future we
|
|
|
|
# should probably introduce a streambatch() method upstream and
|
|
|
|
# use that for this.
|
2018-05-30 12:16:33 +03:00
|
|
|
if (
|
|
|
|
getattr(remote, "iterbatch", False)
|
|
|
|
and remote.capable("httppostargs")
|
|
|
|
and isinstance(remote, httppeer.httppeer)
|
|
|
|
):
|
2016-03-22 20:06:24 +03:00
|
|
|
b = remote.iterbatch()
|
|
|
|
for m in missed:
|
|
|
|
file_ = idmap[m]
|
|
|
|
node = m[-40:]
|
|
|
|
b.getfile(file_, node)
|
|
|
|
b.submit()
|
|
|
|
for m, r in itertools.izip(missed, b.results()):
|
2016-05-19 08:39:20 +03:00
|
|
|
file_ = idmap[m]
|
|
|
|
node = m[-40:]
|
2018-03-21 23:49:58 +03:00
|
|
|
progresstick(file_)
|
2019-01-02 15:40:13 +03:00
|
|
|
receivemissing(io.BytesIO("%d\n%s" % (len(r), r)), file_, node, m)
|
2016-03-22 20:06:24 +03:00
|
|
|
return
|
2015-08-18 22:14:01 +03:00
|
|
|
while missed:
|
|
|
|
chunk, missed = missed[:batchsize], missed[batchsize:]
|
2017-08-22 01:10:41 +03:00
|
|
|
b = remote.iterbatch()
|
2015-08-18 22:14:01 +03:00
|
|
|
for m in chunk:
|
|
|
|
file_ = idmap[m]
|
|
|
|
node = m[-40:]
|
2017-08-22 01:10:41 +03:00
|
|
|
b.getfile(file_, node)
|
2015-08-18 22:14:01 +03:00
|
|
|
b.submit()
|
2017-08-22 01:10:41 +03:00
|
|
|
for m, v in zip(chunk, b.results()):
|
2016-04-05 02:26:12 +03:00
|
|
|
file_ = idmap[m]
|
|
|
|
node = m[-40:]
|
2018-03-21 23:49:58 +03:00
|
|
|
progresstick(file_)
|
2019-01-02 15:40:13 +03:00
|
|
|
receivemissing(io.BytesIO("%d\n%s" % (len(v), v)), file_, node, m)
|
2015-07-01 00:32:31 +03:00
|
|
|
|
2018-05-30 12:16:33 +03:00
|
|
|
|
|
|
|
def _getfiles_optimistic(remote, receivemissing, progresstick, missed, idmap, step):
|
2017-06-20 21:08:15 +03:00
|
|
|
remote._callstream("getfiles")
|
2015-06-30 23:43:18 +03:00
|
|
|
i = 0
|
2018-05-30 12:16:33 +03:00
|
|
|
pipeo = shallowutil.trygetattr(remote, ("_pipeo", "pipeo"))
|
|
|
|
pipei = shallowutil.trygetattr(remote, ("_pipei", "pipei"))
|
2015-06-30 23:43:18 +03:00
|
|
|
while i < len(missed):
|
|
|
|
# issue a batch of requests
|
|
|
|
start = i
|
2017-02-28 01:45:51 +03:00
|
|
|
end = min(len(missed), start + step)
|
2015-06-30 23:43:18 +03:00
|
|
|
i = end
|
2017-02-01 18:00:31 +03:00
|
|
|
for missingid in missed[start:end]:
|
|
|
|
# issue new request
|
|
|
|
versionid = missingid[-40:]
|
|
|
|
file = idmap[missingid]
|
|
|
|
sshrequest = "%s%s\n" % (versionid, file)
|
2017-08-22 01:10:41 +03:00
|
|
|
pipeo.write(sshrequest)
|
|
|
|
pipeo.flush()
|
2017-02-01 18:00:31 +03:00
|
|
|
|
|
|
|
# receive batch results
|
|
|
|
for missingid in missed[start:end]:
|
|
|
|
versionid = missingid[-40:]
|
|
|
|
file = idmap[missingid]
|
2018-03-21 23:49:58 +03:00
|
|
|
progresstick(file)
|
2019-01-02 15:40:13 +03:00
|
|
|
receivemissing(pipei, file, versionid, missingid)
|
2015-06-30 23:43:18 +03:00
|
|
|
|
2017-06-20 21:08:15 +03:00
|
|
|
# End the command
|
2018-05-30 12:16:33 +03:00
|
|
|
pipeo.write("\n")
|
2017-08-22 01:10:41 +03:00
|
|
|
pipeo.flush()
|
2017-06-20 21:08:15 +03:00
|
|
|
|
2018-05-30 12:16:33 +03:00
|
|
|
|
|
|
|
def _getfiles_threaded(remote, receivemissing, progresstick, missed, idmap, step):
|
2017-11-23 01:15:06 +03:00
|
|
|
remote._callstream("getfiles")
|
2018-05-30 12:16:33 +03:00
|
|
|
pipeo = shallowutil.trygetattr(remote, ("_pipeo", "pipeo"))
|
|
|
|
pipei = shallowutil.trygetattr(remote, ("_pipei", "pipei"))
|
2017-11-23 01:15:06 +03:00
|
|
|
|
|
|
|
def writer():
|
|
|
|
for missingid in missed:
|
|
|
|
versionid = missingid[-40:]
|
|
|
|
file = idmap[missingid]
|
|
|
|
sshrequest = "%s%s\n" % (versionid, file)
|
|
|
|
pipeo.write(sshrequest)
|
|
|
|
pipeo.flush()
|
2018-05-30 12:16:33 +03:00
|
|
|
|
2017-11-23 01:15:06 +03:00
|
|
|
writerthread = threading.Thread(target=writer)
|
|
|
|
writerthread.daemon = True
|
|
|
|
writerthread.start()
|
|
|
|
|
|
|
|
for missingid in missed:
|
|
|
|
versionid = missingid[-40:]
|
|
|
|
file = idmap[missingid]
|
2018-03-21 23:49:58 +03:00
|
|
|
progresstick(file)
|
2019-01-02 15:40:13 +03:00
|
|
|
receivemissing(pipei, file, versionid, missingid)
|
2017-11-23 01:15:06 +03:00
|
|
|
|
|
|
|
writerthread.join()
|
|
|
|
# End the command
|
2018-05-30 12:16:33 +03:00
|
|
|
pipeo.write("\n")
|
2017-11-23 01:15:06 +03:00
|
|
|
pipeo.flush()
|
|
|
|
|
2018-05-30 12:16:33 +03:00
|
|
|
|
2013-05-07 03:44:04 +04:00
|
|
|
class fileserverclient(object):
|
|
|
|
"""A client for requesting files from the remote file server.
|
|
|
|
"""
|
2018-05-30 12:16:33 +03:00
|
|
|
|
2016-04-05 02:26:12 +03:00
|
|
|
def __init__(self, repo):
|
2014-02-12 02:41:56 +04:00
|
|
|
ui = repo.ui
|
|
|
|
self.repo = repo
|
2013-05-07 03:51:48 +04:00
|
|
|
self.ui = ui
|
2013-06-21 21:14:29 +04:00
|
|
|
self.cacheprocess = ui.config("remotefilelog", "cacheprocess")
|
2014-05-21 23:28:03 +04:00
|
|
|
if self.cacheprocess:
|
|
|
|
self.cacheprocess = util.expandpath(self.cacheprocess)
|
2016-01-28 00:22:22 +03:00
|
|
|
|
|
|
|
# This option causes remotefilelog to pass the full file path to the
|
|
|
|
# cacheprocess instead of a hashed key.
|
|
|
|
self.cacheprocesspasspath = ui.configbool(
|
2018-05-30 12:16:33 +03:00
|
|
|
"remotefilelog", "cacheprocess.includepath"
|
|
|
|
)
|
2016-01-28 00:22:22 +03:00
|
|
|
|
2013-06-21 21:14:29 +04:00
|
|
|
self.debugoutput = ui.configbool("remotefilelog", "debug")
|
2013-05-07 03:44:04 +04:00
|
|
|
|
2014-01-09 23:41:12 +04:00
|
|
|
self.remotecache = cacheconnection()
|
2015-12-11 22:18:51 +03:00
|
|
|
|
2016-05-16 20:59:09 +03:00
|
|
|
def setstore(self, datastore, historystore, writedata, writehistory):
|
|
|
|
self.datastore = datastore
|
|
|
|
self.historystore = historystore
|
|
|
|
self.writedata = writedata
|
|
|
|
self.writehistory = writehistory
|
2016-04-05 02:26:12 +03:00
|
|
|
|
2015-12-11 22:18:51 +03:00
|
|
|
def _connect(self):
|
2017-11-21 17:52:51 +03:00
|
|
|
return self.repo.connectionpool.get(self.repo.fallbackpath)
|
2013-06-08 02:13:58 +04:00
|
|
|
|
2014-02-12 02:41:56 +04:00
|
|
|
def request(self, fileids):
|
2013-05-07 03:44:04 +04:00
|
|
|
"""Takes a list of filename/node pairs and fetches them from the
|
2014-02-12 04:25:55 +04:00
|
|
|
server. Files are stored in the local cache.
|
2013-05-07 03:44:04 +04:00
|
|
|
A list of nodes that the server couldn't find is returned.
|
|
|
|
If the connection fails, an exception is raised.
|
|
|
|
"""
|
2014-01-09 23:41:12 +04:00
|
|
|
if not self.remotecache.connected:
|
2013-05-07 03:44:04 +04:00
|
|
|
self.connect()
|
2014-01-09 23:41:12 +04:00
|
|
|
cache = self.remotecache
|
2016-05-16 20:59:09 +03:00
|
|
|
writedata = self.writedata
|
2013-05-07 03:44:04 +04:00
|
|
|
|
2018-05-30 12:16:33 +03:00
|
|
|
if self.ui.configbool("remotefilelog", "fetchpacks"):
|
2016-05-16 20:59:09 +03:00
|
|
|
self.requestpack(fileids)
|
|
|
|
return
|
|
|
|
|
2014-02-12 02:41:56 +04:00
|
|
|
repo = self.repo
|
2018-03-21 23:49:44 +03:00
|
|
|
total = len(fileids)
|
|
|
|
request = "get\n%d\n" % total
|
2013-06-08 02:13:58 +04:00
|
|
|
idmap = {}
|
2013-08-15 22:00:51 +04:00
|
|
|
reponame = repo.name
|
2018-10-26 11:13:41 +03:00
|
|
|
getfilenamepath = lambda name: None
|
|
|
|
if util.safehasattr(self.writedata, "_getfilenamepath"):
|
|
|
|
getfilenamepath = self.writedata._getfilenamepath
|
2013-05-07 03:44:04 +04:00
|
|
|
for file, id in fileids:
|
2013-08-15 22:00:51 +04:00
|
|
|
fullid = getcachekey(reponame, file, id)
|
2016-01-28 00:22:22 +03:00
|
|
|
if self.cacheprocesspasspath:
|
2018-05-30 12:16:33 +03:00
|
|
|
request += file + "\0"
|
2013-06-08 02:13:58 +04:00
|
|
|
request += fullid + "\n"
|
|
|
|
idmap[fullid] = file
|
2013-05-07 03:44:04 +04:00
|
|
|
|
2014-01-09 23:41:12 +04:00
|
|
|
cache.request(request)
|
2013-05-07 03:44:04 +04:00
|
|
|
|
2018-05-30 12:16:33 +03:00
|
|
|
with progress.bar(self.ui, _("downloading"), total=total) as prog:
|
2019-02-06 21:23:04 +03:00
|
|
|
try:
|
|
|
|
count, missed = cache.receive(prog)
|
|
|
|
except CacheConnectionError:
|
|
|
|
missedset = set(missed)
|
|
|
|
for missingid in idmap.iterkeys():
|
|
|
|
if not missingid in missedset:
|
|
|
|
missed.append(missingid)
|
|
|
|
self.ui.warn(
|
|
|
|
_(
|
|
|
|
"warning: cache connection closed early - "
|
|
|
|
+ "falling back to server\n"
|
2018-05-30 12:16:33 +03:00
|
|
|
)
|
2019-02-06 21:23:04 +03:00
|
|
|
)
|
2018-03-21 23:49:44 +03:00
|
|
|
|
2018-10-26 11:13:41 +03:00
|
|
|
# If the cacheprocess knew the filename, it should store it
|
|
|
|
# somewhere useful (e.g. in a pack file it generates). Otherwise,
|
|
|
|
# we must write the filename out for it.
|
|
|
|
if not self.cacheprocesspasspath:
|
|
|
|
missedset = set(missed)
|
|
|
|
for fullid, file in idmap.iteritems():
|
|
|
|
if fullid not in missedset:
|
|
|
|
filenamepath = getfilenamepath(fullid)
|
|
|
|
if (
|
|
|
|
filenamepath is not None
|
|
|
|
and os.path.isdir(os.path.dirname(filenamepath))
|
|
|
|
and not os.path.exists(filenamepath)
|
|
|
|
):
|
|
|
|
shallowutil.writefile(filenamepath, file, readonly=True)
|
|
|
|
|
2018-03-21 23:49:44 +03:00
|
|
|
global fetchmisses
|
|
|
|
fetchmisses += len(missed)
|
|
|
|
|
|
|
|
count = [total - len(missed)]
|
|
|
|
fromcache = count[0]
|
|
|
|
prog.value = count[0]
|
2018-05-30 12:16:33 +03:00
|
|
|
self.ui.log(
|
|
|
|
"remotefilelog",
|
|
|
|
"remote cache hit rate is %r of %r\n",
|
|
|
|
count[0],
|
|
|
|
total,
|
|
|
|
hit=count[0],
|
|
|
|
total=total,
|
|
|
|
)
|
2018-03-21 23:49:44 +03:00
|
|
|
|
|
|
|
oldumask = os.umask(0o002)
|
|
|
|
try:
|
|
|
|
# receive cache misses from master
|
|
|
|
if missed:
|
2018-10-01 15:55:24 +03:00
|
|
|
# try to get rid of possible duplicates
|
|
|
|
missedset = set(missed)
|
|
|
|
if len(missedset) != len(missed):
|
|
|
|
self.ui.develwarn("Fetch request contains duplicates")
|
|
|
|
missed = list(missedset)
|
2018-05-30 12:16:33 +03:00
|
|
|
|
2018-03-21 23:49:58 +03:00
|
|
|
def progresstick(name=""):
|
2018-03-21 23:49:44 +03:00
|
|
|
count[0] += 1
|
2018-03-21 23:49:58 +03:00
|
|
|
prog.value = (count[0], name)
|
2018-05-30 12:16:33 +03:00
|
|
|
|
2018-03-21 23:49:44 +03:00
|
|
|
# When verbose is true, sshpeer prints 'running ssh...'
|
|
|
|
# to stdout, which can interfere with some command
|
|
|
|
# outputs
|
|
|
|
verbose = self.ui.verbose
|
|
|
|
self.ui.verbose = False
|
2019-01-02 15:40:13 +03:00
|
|
|
draft = set()
|
2018-03-21 23:49:44 +03:00
|
|
|
try:
|
|
|
|
with self._connect() as conn:
|
|
|
|
remote = conn.peer
|
|
|
|
# TODO: deduplicate this with the constant in
|
|
|
|
# shallowrepo
|
|
|
|
if remote.capable("remotefilelog"):
|
|
|
|
if not isinstance(remote, sshpeer.sshpeer):
|
2018-05-30 12:16:33 +03:00
|
|
|
msg = "remotefilelog requires ssh servers"
|
2018-03-21 23:49:44 +03:00
|
|
|
raise error.Abort(msg)
|
2018-05-30 12:16:33 +03:00
|
|
|
step = self.ui.configint(
|
|
|
|
"remotefilelog", "getfilesstep", 10000
|
|
|
|
)
|
|
|
|
getfilestype = self.ui.config(
|
|
|
|
"remotefilelog", "getfilestype", "optimistic"
|
|
|
|
)
|
|
|
|
if getfilestype == "threaded":
|
2018-03-21 23:49:44 +03:00
|
|
|
_getfiles = _getfiles_threaded
|
|
|
|
else:
|
|
|
|
_getfiles = _getfiles_optimistic
|
2018-05-30 12:16:33 +03:00
|
|
|
_getfiles(
|
|
|
|
remote,
|
2019-01-02 15:40:13 +03:00
|
|
|
functools.partial(self.receivemissing, draft),
|
2018-05-30 12:16:33 +03:00
|
|
|
progresstick,
|
|
|
|
missed,
|
|
|
|
idmap,
|
|
|
|
step,
|
|
|
|
)
|
2018-03-21 23:49:44 +03:00
|
|
|
elif remote.capable("getfile"):
|
2018-05-30 12:16:33 +03:00
|
|
|
if remote.capable("batch"):
|
2018-03-21 23:49:44 +03:00
|
|
|
batchdefault = 100
|
|
|
|
else:
|
|
|
|
batchdefault = 10
|
|
|
|
batchsize = self.ui.configint(
|
2018-05-30 12:16:33 +03:00
|
|
|
"remotefilelog", "batchsize", batchdefault
|
|
|
|
)
|
2018-03-21 23:49:44 +03:00
|
|
|
_getfilesbatch(
|
2018-05-30 12:16:33 +03:00
|
|
|
remote,
|
2019-01-02 15:40:13 +03:00
|
|
|
functools.partial(self.receivemissing, draft),
|
2018-05-30 12:16:33 +03:00
|
|
|
progresstick,
|
|
|
|
missed,
|
|
|
|
idmap,
|
|
|
|
batchsize,
|
|
|
|
)
|
2017-06-20 21:08:15 +03:00
|
|
|
else:
|
2018-05-30 12:16:33 +03:00
|
|
|
msg = (
|
|
|
|
"configured remotefilelog server"
|
|
|
|
" does not support remotefilelog"
|
|
|
|
)
|
2018-03-21 23:49:44 +03:00
|
|
|
raise error.Abort(msg)
|
|
|
|
|
2018-05-30 12:16:33 +03:00
|
|
|
self.ui.log(
|
|
|
|
"remotefilefetchlog",
|
|
|
|
"Success\n",
|
|
|
|
fetched_files=count[0] - fromcache,
|
|
|
|
total_to_fetch=total - fromcache,
|
|
|
|
)
|
2018-03-21 23:49:44 +03:00
|
|
|
except Exception:
|
2018-05-30 12:16:33 +03:00
|
|
|
self.ui.log(
|
|
|
|
"remotefilefetchlog",
|
|
|
|
"Fail\n",
|
|
|
|
fetched_files=count[0] - fromcache,
|
|
|
|
total_to_fetch=total - fromcache,
|
|
|
|
)
|
2018-03-21 23:49:44 +03:00
|
|
|
raise
|
|
|
|
finally:
|
|
|
|
self.ui.verbose = verbose
|
|
|
|
# send to memcache
|
2018-05-30 12:16:33 +03:00
|
|
|
if self.ui.configbool("remotefilelog", "updatesharedcache"):
|
2019-01-02 15:40:13 +03:00
|
|
|
upload = [n for n in missed if n not in draft]
|
|
|
|
if upload:
|
|
|
|
request = "set\n%d\n%s\n" % (len(upload), "\n".join(upload))
|
|
|
|
cache.request(request)
|
2018-03-21 23:49:44 +03:00
|
|
|
|
|
|
|
# mark ourselves as a user of this cache
|
|
|
|
writedata.markrepo(self.repo.path)
|
|
|
|
finally:
|
|
|
|
os.umask(oldumask)
|
2013-06-29 02:57:15 +04:00
|
|
|
|
2019-01-02 15:40:13 +03:00
|
|
|
def receivemissing(self, draftset, pipe, filename, node, key):
|
2013-12-12 01:39:53 +04:00
|
|
|
line = pipe.readline()[:-1]
|
|
|
|
if not line:
|
2018-05-30 12:16:33 +03:00
|
|
|
raise error.ResponseError(
|
2019-01-11 08:19:26 +03:00
|
|
|
_("error downloading file contents:"),
|
|
|
|
_("connection closed early for filename %s and node %s")
|
|
|
|
% (filename, node),
|
2018-05-30 12:16:33 +03:00
|
|
|
)
|
2013-12-12 01:39:53 +04:00
|
|
|
size = int(line)
|
|
|
|
data = pipe.read(size)
|
2016-04-05 19:50:12 +03:00
|
|
|
if len(data) != size:
|
2018-05-30 12:16:33 +03:00
|
|
|
raise error.ResponseError(
|
|
|
|
_("error downloading file contents:"),
|
|
|
|
_("only received %s of %s bytes") % (len(data), size),
|
|
|
|
)
|
2019-01-02 15:40:13 +03:00
|
|
|
data = lz4decompress(data)
|
|
|
|
mapping = shallowutil.ancestormap(data)
|
|
|
|
if any(
|
|
|
|
linknode == nullid for _p1, _p2, linknode, _copyfrom in mapping.values()
|
|
|
|
):
|
|
|
|
draftset.add(key)
|
|
|
|
self.writedata.addremotefilelognode(filename, bin(node), data)
|
2013-12-12 01:39:53 +04:00
|
|
|
|
2016-05-16 20:59:09 +03:00
|
|
|
def requestpack(self, fileids):
|
|
|
|
"""Requests the given file revisions from the server in a pack format.
|
|
|
|
|
|
|
|
See `remotefilelogserver.getpack` for the file format.
|
|
|
|
"""
|
2017-07-06 16:53:11 +03:00
|
|
|
try:
|
|
|
|
with self._connect() as conn:
|
|
|
|
total = len(fileids)
|
|
|
|
rcvd = 0
|
|
|
|
|
|
|
|
remote = conn.peer
|
|
|
|
remote._callstream("getpackv1")
|
|
|
|
|
|
|
|
self._sendpackrequest(remote, fileids)
|
|
|
|
|
2017-07-08 07:08:26 +03:00
|
|
|
packpath = shallowutil.getcachepackpath(
|
2018-05-30 12:16:33 +03:00
|
|
|
self.repo, constants.FILEPACK_CATEGORY
|
|
|
|
)
|
|
|
|
pipei = shallowutil.trygetattr(remote, ("_pipei", "pipei"))
|
2018-06-27 05:13:49 +03:00
|
|
|
|
|
|
|
receiveddata, receivedhistory = shallowutil.receivepack(
|
2018-05-30 12:16:33 +03:00
|
|
|
self.repo.ui, pipei, packpath
|
|
|
|
)
|
2017-07-06 16:53:11 +03:00
|
|
|
rcvd = len(receiveddata)
|
|
|
|
|
2018-05-30 12:16:33 +03:00
|
|
|
self.ui.log(
|
|
|
|
"remotefilefetchlog",
|
|
|
|
"Success(pack)\n" if (rcvd == total) else "Fail(pack)\n",
|
|
|
|
fetched_files=rcvd,
|
|
|
|
total_to_fetch=total,
|
|
|
|
)
|
2017-07-08 07:08:26 +03:00
|
|
|
except Exception:
|
2018-05-30 12:16:33 +03:00
|
|
|
self.ui.log(
|
|
|
|
"remotefilefetchlog",
|
|
|
|
"Fail(pack)\n",
|
|
|
|
fetched_files=rcvd,
|
|
|
|
total_to_fetch=total,
|
|
|
|
)
|
2017-07-06 16:53:11 +03:00
|
|
|
raise
|
2016-05-16 20:59:09 +03:00
|
|
|
|
2016-05-16 20:59:09 +03:00
|
|
|
def _sendpackrequest(self, remote, fileids):
|
|
|
|
"""Formats and writes the given fileids to the remote as part of a
|
|
|
|
getpackv1 call.
|
|
|
|
"""
|
|
|
|
# Sort the requests by name, so we receive requests in batches by name
|
|
|
|
grouped = {}
|
|
|
|
for filename, node in fileids:
|
|
|
|
grouped.setdefault(filename, set()).add(node)
|
|
|
|
|
|
|
|
# Issue request
|
2018-05-30 12:16:33 +03:00
|
|
|
pipeo = shallowutil.trygetattr(remote, ("_pipeo", "pipeo"))
|
2016-05-16 20:59:09 +03:00
|
|
|
for filename, nodes in grouped.iteritems():
|
|
|
|
filenamelen = struct.pack(constants.FILENAMESTRUCT, len(filename))
|
|
|
|
countlen = struct.pack(constants.PACKREQUESTCOUNTSTRUCT, len(nodes))
|
2018-05-30 12:16:33 +03:00
|
|
|
rawnodes = "".join(bin(n) for n in nodes)
|
2016-05-16 20:59:09 +03:00
|
|
|
|
2018-05-30 12:16:33 +03:00
|
|
|
pipeo.write("%s%s%s%s" % (filenamelen, filename, countlen, rawnodes))
|
2017-08-22 01:10:41 +03:00
|
|
|
pipeo.flush()
|
|
|
|
pipeo.write(struct.pack(constants.FILENAMESTRUCT, 0))
|
|
|
|
pipeo.flush()
|
2016-05-16 20:59:09 +03:00
|
|
|
|
2013-05-07 03:44:04 +04:00
|
|
|
def connect(self):
|
2013-09-04 07:03:24 +04:00
|
|
|
if self.cacheprocess:
|
2016-05-16 20:59:09 +03:00
|
|
|
cmd = "%s %s" % (self.cacheprocess, self.writedata._path)
|
2014-01-09 23:41:12 +04:00
|
|
|
self.remotecache.connect(cmd)
|
2013-09-04 07:03:24 +04:00
|
|
|
else:
|
|
|
|
# If no cache process is specified, we fake one that always
|
|
|
|
# returns cache misses. This enables tests to run easily
|
|
|
|
# and may eventually allow us to be a drop in replacement
|
|
|
|
# for the largefiles extension.
|
2014-01-09 23:41:12 +04:00
|
|
|
class simplecache(object):
|
2013-09-04 07:03:24 +04:00
|
|
|
def __init__(self):
|
|
|
|
self.missingids = []
|
2014-01-09 23:41:12 +04:00
|
|
|
self.connected = True
|
|
|
|
|
2013-09-04 07:03:24 +04:00
|
|
|
def close(self):
|
|
|
|
pass
|
2014-01-09 23:41:12 +04:00
|
|
|
|
|
|
|
def request(self, value, flush=True):
|
|
|
|
lines = value.split("\n")
|
|
|
|
if lines[0] != "get":
|
|
|
|
return
|
|
|
|
self.missingids = lines[2:-1]
|
|
|
|
|
2019-02-06 21:23:04 +03:00
|
|
|
def receive(self, prog=None):
|
|
|
|
result = (0, self.missingids)
|
|
|
|
self.missingids = []
|
|
|
|
return result
|
2014-01-09 23:41:12 +04:00
|
|
|
|
|
|
|
self.remotecache = simplecache()
|
2013-05-07 03:44:04 +04:00
|
|
|
|
|
|
|
def close(self):
|
2018-12-15 04:10:55 +03:00
|
|
|
# Make it "run-tests.py -i" friendly
|
|
|
|
if "TESTTMP" in encoding.environ:
|
2018-12-16 03:23:20 +03:00
|
|
|
global fetchcost
|
2018-12-15 04:10:55 +03:00
|
|
|
fetchcost = 0
|
2017-03-16 06:57:32 +03:00
|
|
|
if fetches:
|
2018-05-30 12:16:33 +03:00
|
|
|
msg = (
|
|
|
|
"%s files fetched over %d fetches - "
|
|
|
|
+ "(%d misses, %0.2f%% hit ratio) over %0.2fs\n"
|
|
|
|
) % (
|
|
|
|
fetched,
|
|
|
|
fetches,
|
|
|
|
fetchmisses,
|
|
|
|
float(fetched - fetchmisses) / float(fetched) * 100.0,
|
|
|
|
fetchcost,
|
|
|
|
)
|
2017-03-16 06:57:32 +03:00
|
|
|
if self.debugoutput:
|
|
|
|
self.ui.warn(msg)
|
2018-05-30 12:16:33 +03:00
|
|
|
self.ui.log(
|
|
|
|
"remotefilelog.prefetch",
|
|
|
|
msg.replace("%", "%%"),
|
2017-03-16 06:57:32 +03:00
|
|
|
remotefilelogfetched=fetched,
|
|
|
|
remotefilelogfetches=fetches,
|
|
|
|
remotefilelogfetchmisses=fetchmisses,
|
2018-05-30 12:16:33 +03:00
|
|
|
remotefilelogfetchtime=fetchcost * 1000,
|
|
|
|
)
|
2013-05-07 03:59:05 +04:00
|
|
|
|
2014-01-09 23:41:12 +04:00
|
|
|
if self.remotecache.connected:
|
|
|
|
self.remotecache.close()
|
2013-05-07 03:49:55 +04:00
|
|
|
|
2018-05-30 12:16:33 +03:00
|
|
|
def prefetch(self, fileids, force=False, fetchdata=True, fetchhistory=False):
|
2013-05-07 03:49:55 +04:00
|
|
|
"""downloads the given file versions to the cache
|
|
|
|
"""
|
2014-02-12 02:41:56 +04:00
|
|
|
repo = self.repo
|
2016-04-05 02:26:12 +03:00
|
|
|
idstocheck = []
|
2013-05-07 03:49:55 +04:00
|
|
|
for file, id in fileids:
|
|
|
|
# hack
|
2013-07-25 09:16:50 +04:00
|
|
|
# - we don't use .hgtags
|
|
|
|
# - workingctx produces ids with length 42,
|
|
|
|
# which we skip since they aren't in any cache
|
2018-05-30 12:16:33 +03:00
|
|
|
if file == ".hgtags" or len(id) == 42 or not repo.shallowmatch(file):
|
2013-05-07 03:49:55 +04:00
|
|
|
continue
|
|
|
|
|
2016-04-05 02:26:12 +03:00
|
|
|
idstocheck.append((file, bin(id)))
|
2013-05-07 03:49:55 +04:00
|
|
|
|
2016-05-16 20:59:09 +03:00
|
|
|
datastore = self.datastore
|
|
|
|
historystore = self.historystore
|
2016-04-05 02:26:12 +03:00
|
|
|
if force:
|
2018-11-12 08:35:22 +03:00
|
|
|
datastore = unioncontentstore(*repo.fileslog.shareddatastores)
|
|
|
|
historystore = unionmetadatastore(*repo.fileslog.sharedhistorystores)
|
2016-05-16 20:59:09 +03:00
|
|
|
|
|
|
|
missingids = set()
|
|
|
|
if fetchdata:
|
|
|
|
missingids.update(datastore.getmissing(idstocheck))
|
|
|
|
if fetchhistory:
|
|
|
|
missingids.update(historystore.getmissing(idstocheck))
|
2013-05-07 03:49:55 +04:00
|
|
|
|
2016-10-25 22:29:44 +03:00
|
|
|
# partition missing nodes into nullid and not-nullid so we can
|
|
|
|
# warn about this filtering potentially shadowing bugs.
|
|
|
|
nullids = len([None for unused, id in missingids if id == nullid])
|
|
|
|
if nullids:
|
|
|
|
missingids = [(f, id) for f, id in missingids if id != nullid]
|
|
|
|
repo.ui.develwarn(
|
2018-05-30 12:16:33 +03:00
|
|
|
(
|
|
|
|
"remotefilelog not fetching %d null revs"
|
|
|
|
" - this is likely hiding bugs" % nullids
|
|
|
|
),
|
|
|
|
config="remotefilelog-ext",
|
|
|
|
)
|
2018-12-20 21:16:15 +03:00
|
|
|
batchlfsdownloads = self.ui.configbool(
|
|
|
|
"remotefilelog", "_batchlfsdownloads", True
|
|
|
|
)
|
|
|
|
dolfsprefetch = self.ui.configbool("remotefilelog", "dolfsprefetch", True)
|
2013-05-07 03:49:55 +04:00
|
|
|
if missingids:
|
2013-05-07 03:59:05 +04:00
|
|
|
global fetches, fetched, fetchcost
|
|
|
|
fetches += 1
|
2015-09-29 08:16:12 +03:00
|
|
|
|
|
|
|
# We want to be able to detect excess individual file downloads, so
|
|
|
|
# let's log that information for debugging.
|
|
|
|
if fetches >= 15 and fetches < 18:
|
|
|
|
if fetches == 15:
|
2018-05-30 12:16:33 +03:00
|
|
|
fetchwarning = self.ui.config("remotefilelog", "fetchwarning")
|
2015-09-29 08:16:12 +03:00
|
|
|
if fetchwarning:
|
2018-05-30 12:16:33 +03:00
|
|
|
self.ui.warn(fetchwarning + "\n")
|
2015-09-29 08:16:12 +03:00
|
|
|
self.logstacktrace()
|
2016-04-05 02:26:12 +03:00
|
|
|
missingids = [(file, hex(id)) for file, id in missingids]
|
2013-05-07 03:59:05 +04:00
|
|
|
fetched += len(missingids)
|
|
|
|
start = time.time()
|
2018-04-23 20:06:23 +03:00
|
|
|
with self.ui.timesection("fetchingfiles"):
|
|
|
|
missingids = self.request(missingids)
|
2013-05-07 03:49:55 +04:00
|
|
|
if missingids:
|
2018-05-30 12:16:33 +03:00
|
|
|
raise error.Abort(_("unable to download %d files") % len(missingids))
|
2013-05-07 03:59:05 +04:00
|
|
|
fetchcost += time.time() - start
|
2018-12-20 21:16:15 +03:00
|
|
|
if not batchlfsdownloads and dolfsprefetch:
|
|
|
|
self._lfsprefetch(fileids)
|
|
|
|
if batchlfsdownloads and dolfsprefetch:
|
2018-12-20 02:50:36 +03:00
|
|
|
self._lfsprefetch(fileids)
|
2017-09-21 00:02:58 +03:00
|
|
|
|
|
|
|
def _lfsprefetch(self, fileids):
|
2018-05-30 12:16:33 +03:00
|
|
|
if not _lfsmod or not util.safehasattr(self.repo.svfs, "lfslocalblobstore"):
|
2017-09-21 00:02:58 +03:00
|
|
|
return
|
|
|
|
if not _lfsmod.wrapper.candownload(self.repo):
|
|
|
|
return
|
|
|
|
pointers = []
|
2019-02-04 22:41:20 +03:00
|
|
|
filenames = {}
|
2017-09-21 00:02:58 +03:00
|
|
|
store = self.repo.svfs.lfslocalblobstore
|
|
|
|
for file, id in fileids:
|
2017-09-26 01:33:55 +03:00
|
|
|
node = bin(id)
|
2017-09-21 00:02:58 +03:00
|
|
|
rlog = self.repo.file(file)
|
2017-09-26 01:33:55 +03:00
|
|
|
if rlog.flags(node) & revlog.REVIDX_EXTSTORED:
|
|
|
|
text = rlog.revision(node, raw=True)
|
2017-09-21 00:02:58 +03:00
|
|
|
p = _lfsmod.pointer.deserialize(text)
|
|
|
|
oid = p.oid()
|
|
|
|
if not store.has(oid):
|
|
|
|
pointers.append(p)
|
2019-02-04 22:41:20 +03:00
|
|
|
filenames[oid] = file
|
2017-09-21 00:02:58 +03:00
|
|
|
if len(pointers) > 0:
|
2019-02-04 22:41:20 +03:00
|
|
|
self.repo.svfs.lfsremoteblobstore.readbatch(
|
|
|
|
pointers, store, objectnames=filenames
|
|
|
|
)
|
2017-09-21 00:02:58 +03:00
|
|
|
assert all(store.has(p.oid()) for p in pointers)
|
2014-02-12 04:25:55 +04:00
|
|
|
|
2015-09-29 08:16:12 +03:00
|
|
|
def logstacktrace(self):
|
|
|
|
import traceback
|
2018-05-30 12:16:33 +03:00
|
|
|
|
|
|
|
self.ui.log(
|
|
|
|
"remotefilelog",
|
|
|
|
"excess remotefilelog fetching:\n%s\n",
|
|
|
|
"".join(traceback.format_stack()),
|
|
|
|
)
|