2013-06-21 21:14:29 +04:00
|
|
|
# fileserverclient.py - client for communicating with the cache process
|
2013-05-07 03:44:04 +04:00
|
|
|
#
|
|
|
|
# Copyright 2013 Facebook, Inc.
|
|
|
|
#
|
|
|
|
# This software may be used and distributed according to the terms of the
|
|
|
|
# GNU General Public License version 2 or any later version.
|
|
|
|
|
|
|
|
from mercurial.i18n import _
|
2016-02-18 19:34:33 +03:00
|
|
|
from mercurial import util, sshpeer, hg, error, util, wireproto, node
|
2015-07-01 00:32:31 +03:00
|
|
|
import os, socket, lz4, time, grp, io
|
2015-12-02 21:40:49 +03:00
|
|
|
import errno
|
2013-05-07 03:59:05 +04:00
|
|
|
|
|
|
|
# Statistics for debugging
|
|
|
|
fetchcost = 0
|
|
|
|
fetches = 0
|
|
|
|
fetched = 0
|
2013-07-02 04:37:55 +04:00
|
|
|
fetchmisses = 0
|
2013-05-07 03:44:04 +04:00
|
|
|
|
|
|
|
_downloading = _('downloading')
|
|
|
|
|
2013-08-20 23:59:33 +04:00
|
|
|
def makedirs(root, path, owner):
|
2015-12-10 21:11:27 +03:00
|
|
|
try:
|
|
|
|
os.makedirs(path)
|
|
|
|
except OSError, ex:
|
|
|
|
if ex.errno != errno.EEXIST:
|
|
|
|
raise
|
2013-08-20 23:59:33 +04:00
|
|
|
|
|
|
|
while path != root:
|
|
|
|
stat = os.stat(path)
|
|
|
|
if stat.st_uid == owner:
|
|
|
|
os.chmod(path, 0o2775)
|
|
|
|
path = os.path.dirname(path)
|
|
|
|
|
2013-08-15 22:00:51 +04:00
|
|
|
def getcachekey(reponame, file, id):
|
2013-09-07 00:28:15 +04:00
|
|
|
pathhash = util.sha1(file).hexdigest()
|
2013-08-15 22:00:51 +04:00
|
|
|
return os.path.join(reponame, pathhash[:2], pathhash[2:], id)
|
2013-09-07 00:28:15 +04:00
|
|
|
|
|
|
|
def getlocalkey(file, id):
|
2013-05-18 05:08:53 +04:00
|
|
|
pathhash = util.sha1(file).hexdigest()
|
|
|
|
return os.path.join(pathhash, id)
|
|
|
|
|
2015-07-01 00:32:31 +03:00
|
|
|
def peersetup(ui, peer):
|
|
|
|
class remotefilepeer(peer.__class__):
|
2015-07-01 00:34:01 +03:00
|
|
|
@wireproto.batchable
|
2015-07-01 00:32:31 +03:00
|
|
|
def getfile(self, file, node):
|
2015-07-01 00:34:01 +03:00
|
|
|
if not self.capable('getfile'):
|
|
|
|
raise util.Abort(
|
|
|
|
'configured remotefile server does not support getfile')
|
|
|
|
f = wireproto.future()
|
|
|
|
yield {'file': file, 'node': node}, f
|
2015-08-04 21:59:53 +03:00
|
|
|
code, data = f.value.split('\0', 1)
|
|
|
|
if int(code):
|
2015-08-18 22:35:21 +03:00
|
|
|
raise error.LookupError(file, node, data)
|
2015-08-04 21:59:53 +03:00
|
|
|
yield data
|
2015-07-01 00:32:31 +03:00
|
|
|
peer.__class__ = remotefilepeer
|
|
|
|
|
2014-01-09 23:41:12 +04:00
|
|
|
class cacheconnection(object):
|
|
|
|
"""The connection for communicating with the remote cache. Performs
|
|
|
|
gets and sets by communicating with an external process that has the
|
|
|
|
cache-specific implementation.
|
|
|
|
"""
|
|
|
|
def __init__(self):
|
|
|
|
self.pipeo = self.pipei = self.pipee = None
|
|
|
|
self.subprocess = None
|
|
|
|
self.connected = False
|
|
|
|
|
|
|
|
def connect(self, cachecommand):
|
|
|
|
if self.pipeo:
|
|
|
|
raise util.Abort(_("cache connection already open"))
|
|
|
|
self.pipei, self.pipeo, self.pipee, self.subprocess = \
|
|
|
|
util.popen4(cachecommand)
|
|
|
|
self.connected = True
|
|
|
|
|
|
|
|
def close(self):
|
2015-10-15 04:49:55 +03:00
|
|
|
def tryclose(pipe):
|
2015-10-14 18:12:48 +03:00
|
|
|
try:
|
2015-10-15 04:49:55 +03:00
|
|
|
pipe.close()
|
2015-10-14 18:12:48 +03:00
|
|
|
except:
|
|
|
|
pass
|
2015-10-15 04:49:55 +03:00
|
|
|
if self.connected:
|
2015-10-14 18:12:48 +03:00
|
|
|
try:
|
2015-10-15 04:49:55 +03:00
|
|
|
self.pipei.write("exit\n")
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
tryclose(self.pipei)
|
|
|
|
self.pipei = None
|
|
|
|
tryclose(self.pipeo)
|
|
|
|
self.pipeo = None
|
|
|
|
tryclose(self.pipee)
|
|
|
|
self.pipee = None
|
2015-10-14 18:12:48 +03:00
|
|
|
try:
|
2015-10-15 04:49:55 +03:00
|
|
|
# Wait for process to terminate, making sure to avoid deadlock.
|
|
|
|
# See https://docs.python.org/2/library/subprocess.html for
|
|
|
|
# warnings about wait() and deadlocking.
|
|
|
|
self.subprocess.communicate()
|
|
|
|
except:
|
|
|
|
pass
|
2014-01-09 23:41:12 +04:00
|
|
|
self.subprocess = None
|
2015-10-15 04:49:55 +03:00
|
|
|
self.connected = False
|
2014-01-09 23:41:12 +04:00
|
|
|
|
|
|
|
def request(self, request, flush=True):
|
|
|
|
if self.connected:
|
|
|
|
try:
|
|
|
|
self.pipei.write(request)
|
|
|
|
if flush:
|
|
|
|
self.pipei.flush()
|
|
|
|
except IOError:
|
|
|
|
self.close()
|
|
|
|
|
|
|
|
def receiveline(self):
|
|
|
|
if not self.connected:
|
|
|
|
return None
|
|
|
|
try:
|
|
|
|
result = self.pipeo.readline()[:-1]
|
|
|
|
if not result:
|
|
|
|
self.close()
|
|
|
|
except IOError:
|
|
|
|
self.close()
|
|
|
|
|
|
|
|
return result
|
|
|
|
|
2015-08-18 22:14:01 +03:00
|
|
|
def _getfilesbatch(
|
|
|
|
remote, receivemissing, progresstick, missed, idmap, batchsize):
|
|
|
|
while missed:
|
|
|
|
chunk, missed = missed[:batchsize], missed[batchsize:]
|
|
|
|
b = remote.batch()
|
|
|
|
futures = {}
|
|
|
|
for m in chunk:
|
|
|
|
file_ = idmap[m]
|
|
|
|
node = m[-40:]
|
|
|
|
futures[m] = b.getfile(file_, node)
|
|
|
|
b.submit()
|
|
|
|
for m in chunk:
|
|
|
|
v = futures[m].value
|
|
|
|
receivemissing(io.BytesIO('%d\n%s' % (len(v), v)), m)
|
|
|
|
progresstick()
|
2015-07-01 00:32:31 +03:00
|
|
|
|
2015-06-30 23:43:18 +03:00
|
|
|
def _getfiles(
|
2015-12-11 22:20:24 +03:00
|
|
|
remote, receivemissing, progresstick, missed, idmap):
|
2015-06-30 23:43:18 +03:00
|
|
|
i = 0
|
|
|
|
while i < len(missed):
|
|
|
|
# issue a batch of requests
|
|
|
|
start = i
|
|
|
|
end = min(len(missed), start + 10000)
|
|
|
|
i = end
|
|
|
|
for missingid in missed[start:end]:
|
|
|
|
# issue new request
|
|
|
|
versionid = missingid[-40:]
|
|
|
|
file = idmap[missingid]
|
|
|
|
sshrequest = "%s%s\n" % (versionid, file)
|
|
|
|
remote.pipeo.write(sshrequest)
|
|
|
|
remote.pipeo.flush()
|
|
|
|
|
|
|
|
# receive batch results
|
|
|
|
for j in range(start, end):
|
|
|
|
receivemissing(remote.pipei, missed[j])
|
|
|
|
progresstick()
|
|
|
|
|
2013-05-07 03:44:04 +04:00
|
|
|
class fileserverclient(object):
|
|
|
|
"""A client for requesting files from the remote file server.
|
|
|
|
"""
|
2014-02-12 02:41:56 +04:00
|
|
|
def __init__(self, repo):
|
|
|
|
ui = repo.ui
|
|
|
|
self.repo = repo
|
2013-05-07 03:51:48 +04:00
|
|
|
self.ui = ui
|
2013-06-21 21:14:29 +04:00
|
|
|
self.cacheprocess = ui.config("remotefilelog", "cacheprocess")
|
2014-05-21 23:28:03 +04:00
|
|
|
if self.cacheprocess:
|
|
|
|
self.cacheprocess = util.expandpath(self.cacheprocess)
|
2016-01-28 00:22:22 +03:00
|
|
|
|
|
|
|
|
|
|
|
# This option causes remotefilelog to pass the full file path to the
|
|
|
|
# cacheprocess instead of a hashed key.
|
|
|
|
self.cacheprocesspasspath = ui.configbool(
|
|
|
|
"remotefilelog", "cacheprocess.includepath")
|
|
|
|
|
2013-06-21 21:14:29 +04:00
|
|
|
self.debugoutput = ui.configbool("remotefilelog", "debug")
|
2013-05-07 03:44:04 +04:00
|
|
|
|
2014-02-12 04:25:55 +04:00
|
|
|
self.localcache = localcache(repo)
|
2014-01-09 23:41:12 +04:00
|
|
|
self.remotecache = cacheconnection()
|
2015-12-11 22:18:51 +03:00
|
|
|
self.remoteserver = None
|
|
|
|
|
|
|
|
def _connect(self):
|
|
|
|
fallbackpath = self.repo.fallbackpath
|
|
|
|
if not self.remoteserver:
|
|
|
|
if not fallbackpath:
|
|
|
|
raise util.Abort("no remotefilelog server "
|
|
|
|
"configured - is your .hg/hgrc trusted?")
|
|
|
|
self.remoteserver = hg.peer(self.ui, {}, fallbackpath)
|
|
|
|
elif (isinstance(self.remoteserver, sshpeer.sshpeer) and
|
|
|
|
self.remoteserver.subprocess.poll() != None):
|
|
|
|
# The ssh connection died, so recreate it.
|
|
|
|
self.remoteserver = hg.peer(self.ui, {}, fallbackpath)
|
|
|
|
|
|
|
|
return self.remoteserver
|
2013-06-08 02:13:58 +04:00
|
|
|
|
2014-02-12 02:41:56 +04:00
|
|
|
def request(self, fileids):
|
2013-05-07 03:44:04 +04:00
|
|
|
"""Takes a list of filename/node pairs and fetches them from the
|
2014-02-12 04:25:55 +04:00
|
|
|
server. Files are stored in the local cache.
|
2013-05-07 03:44:04 +04:00
|
|
|
A list of nodes that the server couldn't find is returned.
|
|
|
|
If the connection fails, an exception is raised.
|
|
|
|
"""
|
2014-01-09 23:41:12 +04:00
|
|
|
if not self.remotecache.connected:
|
2013-05-07 03:44:04 +04:00
|
|
|
self.connect()
|
2014-01-09 23:41:12 +04:00
|
|
|
cache = self.remotecache
|
2014-02-12 04:25:55 +04:00
|
|
|
localcache = self.localcache
|
2013-05-07 03:44:04 +04:00
|
|
|
|
2014-02-12 02:41:56 +04:00
|
|
|
repo = self.repo
|
2013-05-07 03:44:04 +04:00
|
|
|
count = len(fileids)
|
2013-06-08 02:13:58 +04:00
|
|
|
request = "get\n%d\n" % count
|
|
|
|
idmap = {}
|
2013-08-15 22:00:51 +04:00
|
|
|
reponame = repo.name
|
2013-05-07 03:44:04 +04:00
|
|
|
for file, id in fileids:
|
2013-08-15 22:00:51 +04:00
|
|
|
fullid = getcachekey(reponame, file, id)
|
2016-01-28 00:22:22 +03:00
|
|
|
if self.cacheprocesspasspath:
|
|
|
|
request += file + '\0'
|
2013-06-08 02:13:58 +04:00
|
|
|
request += fullid + "\n"
|
|
|
|
idmap[fullid] = file
|
2013-05-07 03:44:04 +04:00
|
|
|
|
2014-01-09 23:41:12 +04:00
|
|
|
cache.request(request)
|
2013-05-07 03:44:04 +04:00
|
|
|
|
|
|
|
missing = []
|
2013-05-07 03:51:48 +04:00
|
|
|
total = count
|
|
|
|
self.ui.progress(_downloading, 0, total=count)
|
|
|
|
|
2013-06-08 02:13:58 +04:00
|
|
|
missed = []
|
|
|
|
count = 0
|
|
|
|
while True:
|
2014-01-09 23:41:12 +04:00
|
|
|
missingid = cache.receiveline()
|
2013-06-08 02:13:58 +04:00
|
|
|
if not missingid:
|
2014-01-09 23:41:12 +04:00
|
|
|
missedset = set(missed)
|
|
|
|
for missingid in idmap.iterkeys():
|
|
|
|
if not missingid in missedset:
|
|
|
|
missed.append(missingid)
|
|
|
|
self.ui.warn(_("warning: cache connection closed early - " +
|
|
|
|
"falling back to server\n"))
|
|
|
|
break
|
2013-06-25 22:38:48 +04:00
|
|
|
if missingid == "0":
|
2013-06-08 02:13:58 +04:00
|
|
|
break
|
|
|
|
if missingid.startswith("_hits_"):
|
2013-06-25 22:38:48 +04:00
|
|
|
# receive progress reports
|
2013-06-08 02:13:58 +04:00
|
|
|
parts = missingid.split("_")
|
|
|
|
count += int(parts[2])
|
|
|
|
self.ui.progress(_downloading, count, total=total)
|
|
|
|
continue
|
2013-05-07 03:44:04 +04:00
|
|
|
|
2013-06-08 02:13:58 +04:00
|
|
|
missed.append(missingid)
|
2013-05-07 03:44:04 +04:00
|
|
|
|
2013-12-12 01:39:53 +04:00
|
|
|
global fetchmisses
|
|
|
|
fetchmisses += len(missed)
|
|
|
|
|
2015-06-30 23:43:18 +03:00
|
|
|
count = [total - len(missed)]
|
2015-07-22 04:31:01 +03:00
|
|
|
self.ui.progress(_downloading, count[0], total=total)
|
2013-12-12 01:39:53 +04:00
|
|
|
|
|
|
|
oldumask = os.umask(0o002)
|
|
|
|
try:
|
|
|
|
# receive cache misses from master
|
|
|
|
if missed:
|
2015-06-30 23:43:18 +03:00
|
|
|
def progresstick():
|
|
|
|
count[0] += 1
|
|
|
|
self.ui.progress(_downloading, count[0], total=total)
|
|
|
|
# When verbose is true, sshpeer prints 'running ssh...'
|
|
|
|
# to stdout, which can interfere with some command
|
|
|
|
# outputs
|
2013-07-25 00:20:13 +04:00
|
|
|
verbose = self.ui.verbose
|
2015-06-30 23:43:18 +03:00
|
|
|
self.ui.verbose = False
|
2013-07-25 00:20:13 +04:00
|
|
|
try:
|
2015-12-11 22:18:51 +03:00
|
|
|
oldremote = self.remoteserver
|
|
|
|
remote = self._connect()
|
|
|
|
|
2015-06-30 00:33:56 +03:00
|
|
|
# TODO: deduplicate this with the constant in shallowrepo
|
2015-07-01 00:32:31 +03:00
|
|
|
if remote.capable("remotefilelog"):
|
|
|
|
if not isinstance(remote, sshpeer.sshpeer):
|
|
|
|
raise util.Abort('remotefilelog requires ssh servers')
|
2015-12-11 22:18:51 +03:00
|
|
|
# If it's a new connection, issue the getfiles command
|
|
|
|
if oldremote != remote:
|
|
|
|
remote._callstream("getfiles")
|
2015-12-11 22:20:24 +03:00
|
|
|
_getfiles(remote, self.receivemissing, progresstick,
|
|
|
|
missed, idmap)
|
2015-07-01 00:32:31 +03:00
|
|
|
elif remote.capable("getfile"):
|
2015-08-18 22:14:01 +03:00
|
|
|
batchdefault = 100 if remote.capable('batch') else 10
|
|
|
|
batchsize = self.ui.configint(
|
|
|
|
'remotefilelog', 'batchsize', batchdefault)
|
2015-07-01 00:32:31 +03:00
|
|
|
_getfilesbatch(
|
2015-08-18 22:14:01 +03:00
|
|
|
remote, self.receivemissing, progresstick, missed,
|
|
|
|
idmap, batchsize)
|
2015-07-01 00:32:31 +03:00
|
|
|
else:
|
2015-06-30 00:33:56 +03:00
|
|
|
raise util.Abort("configured remotefilelog server"
|
|
|
|
" does not support remotefilelog")
|
2013-07-25 00:20:13 +04:00
|
|
|
finally:
|
|
|
|
self.ui.verbose = verbose
|
2013-06-29 04:12:20 +04:00
|
|
|
# send to memcache
|
2015-06-30 23:43:18 +03:00
|
|
|
count[0] = len(missed)
|
|
|
|
request = "set\n%d\n%s\n" % (count[0], "\n".join(missed))
|
2014-01-09 23:41:12 +04:00
|
|
|
cache.request(request)
|
2013-06-29 04:12:20 +04:00
|
|
|
|
|
|
|
self.ui.progress(_downloading, None)
|
|
|
|
|
|
|
|
# mark ourselves as a user of this cache
|
2014-02-12 04:25:55 +04:00
|
|
|
localcache.markrepo()
|
2013-06-29 04:12:20 +04:00
|
|
|
finally:
|
|
|
|
os.umask(oldumask)
|
2013-06-29 02:57:15 +04:00
|
|
|
|
2013-05-07 03:44:04 +04:00
|
|
|
return missing
|
|
|
|
|
2014-02-12 04:25:55 +04:00
|
|
|
def receivemissing(self, pipe, missingid):
|
2013-12-12 01:39:53 +04:00
|
|
|
line = pipe.readline()[:-1]
|
|
|
|
if not line:
|
|
|
|
raise error.ResponseError(_("error downloading file " +
|
2014-09-11 22:30:16 +04:00
|
|
|
"contents: connection closed early\n"), '')
|
2013-12-12 01:39:53 +04:00
|
|
|
size = int(line)
|
|
|
|
data = pipe.read(size)
|
|
|
|
|
2014-02-12 04:25:55 +04:00
|
|
|
self.localcache.write(missingid, lz4.decompress(data))
|
2013-12-12 01:39:53 +04:00
|
|
|
|
2013-05-07 03:44:04 +04:00
|
|
|
def connect(self):
|
2013-09-04 07:03:24 +04:00
|
|
|
if self.cacheprocess:
|
2014-02-12 04:25:55 +04:00
|
|
|
cmd = "%s %s" % (self.cacheprocess, self.localcache.cachepath)
|
2014-01-09 23:41:12 +04:00
|
|
|
self.remotecache.connect(cmd)
|
2013-09-04 07:03:24 +04:00
|
|
|
else:
|
|
|
|
# If no cache process is specified, we fake one that always
|
|
|
|
# returns cache misses. This enables tests to run easily
|
|
|
|
# and may eventually allow us to be a drop in replacement
|
|
|
|
# for the largefiles extension.
|
2014-01-09 23:41:12 +04:00
|
|
|
class simplecache(object):
|
2013-09-04 07:03:24 +04:00
|
|
|
def __init__(self):
|
|
|
|
self.missingids = []
|
2014-01-09 23:41:12 +04:00
|
|
|
self.connected = True
|
|
|
|
|
2013-09-04 07:03:24 +04:00
|
|
|
def close(self):
|
|
|
|
pass
|
2014-01-09 23:41:12 +04:00
|
|
|
|
|
|
|
def request(self, value, flush=True):
|
|
|
|
lines = value.split("\n")
|
|
|
|
if lines[0] != "get":
|
|
|
|
return
|
|
|
|
self.missingids = lines[2:-1]
|
|
|
|
self.missingids.append('0')
|
|
|
|
|
|
|
|
def receiveline(self):
|
|
|
|
if len(self.missingids) > 0:
|
|
|
|
return self.missingids.pop(0)
|
2013-09-04 07:03:24 +04:00
|
|
|
return None
|
2014-01-09 23:41:12 +04:00
|
|
|
|
|
|
|
self.remotecache = simplecache()
|
2013-05-07 03:44:04 +04:00
|
|
|
|
|
|
|
def close(self):
|
2013-06-21 21:14:29 +04:00
|
|
|
if fetches and self.debugoutput:
|
2013-07-24 06:06:40 +04:00
|
|
|
self.ui.warn(("%s files fetched over %d fetches - " +
|
2013-07-25 05:49:14 +04:00
|
|
|
"(%d misses, %0.2f%% hit ratio) over %0.2fs\n") % (
|
2013-05-07 03:59:05 +04:00
|
|
|
fetched,
|
|
|
|
fetches,
|
2013-07-02 04:37:55 +04:00
|
|
|
fetchmisses,
|
|
|
|
float(fetched - fetchmisses) / float(fetched) * 100.0,
|
2013-07-24 06:06:40 +04:00
|
|
|
fetchcost))
|
2013-05-07 03:59:05 +04:00
|
|
|
|
2014-01-09 23:41:12 +04:00
|
|
|
if self.remotecache.connected:
|
|
|
|
self.remotecache.close()
|
2013-05-07 03:49:55 +04:00
|
|
|
|
2015-12-11 22:18:51 +03:00
|
|
|
if self.remoteserver and util.safehasattr(self.remoteserver, 'cleanup'):
|
|
|
|
self.remoteserver.cleanup()
|
|
|
|
self.remoteserver = None
|
|
|
|
|
2014-02-12 02:41:56 +04:00
|
|
|
def prefetch(self, fileids, force=False):
|
2013-05-07 03:49:55 +04:00
|
|
|
"""downloads the given file versions to the cache
|
|
|
|
"""
|
2014-02-12 02:41:56 +04:00
|
|
|
repo = self.repo
|
2014-02-12 04:25:55 +04:00
|
|
|
localcache = self.localcache
|
2015-06-30 17:12:38 +03:00
|
|
|
storepath = repo.svfs.vfs.base
|
2013-08-15 22:00:51 +04:00
|
|
|
reponame = repo.name
|
2013-05-07 03:49:55 +04:00
|
|
|
missingids = []
|
|
|
|
for file, id in fileids:
|
|
|
|
# hack
|
2013-07-25 09:16:50 +04:00
|
|
|
# - we don't use .hgtags
|
|
|
|
# - workingctx produces ids with length 42,
|
|
|
|
# which we skip since they aren't in any cache
|
2013-09-26 21:46:06 +04:00
|
|
|
if file == '.hgtags' or len(id) == 42 or not repo.shallowmatch(file):
|
2013-05-07 03:49:55 +04:00
|
|
|
continue
|
|
|
|
|
2013-08-15 22:00:51 +04:00
|
|
|
cachekey = getcachekey(reponame, file, id)
|
2013-09-07 00:28:15 +04:00
|
|
|
localkey = getlocalkey(file, id)
|
|
|
|
idlocalpath = os.path.join(storepath, 'data', localkey)
|
2014-02-12 04:25:55 +04:00
|
|
|
if cachekey in localcache:
|
2013-11-23 01:41:54 +04:00
|
|
|
continue
|
|
|
|
if not force and os.path.exists(idlocalpath):
|
2013-05-07 03:49:55 +04:00
|
|
|
continue
|
|
|
|
|
|
|
|
missingids.append((file, id))
|
|
|
|
|
|
|
|
if missingids:
|
2013-05-07 03:59:05 +04:00
|
|
|
global fetches, fetched, fetchcost
|
|
|
|
fetches += 1
|
2015-09-29 08:16:12 +03:00
|
|
|
|
|
|
|
# We want to be able to detect excess individual file downloads, so
|
|
|
|
# let's log that information for debugging.
|
|
|
|
if fetches >= 15 and fetches < 18:
|
|
|
|
if fetches == 15:
|
|
|
|
fetchwarning = self.ui.config('remotefilelog',
|
|
|
|
'fetchwarning')
|
|
|
|
if fetchwarning:
|
|
|
|
self.ui.warn(fetchwarning + '\n')
|
|
|
|
self.logstacktrace()
|
2013-05-07 03:59:05 +04:00
|
|
|
fetched += len(missingids)
|
|
|
|
start = time.time()
|
2014-02-12 02:41:56 +04:00
|
|
|
missingids = self.request(missingids)
|
2013-05-07 03:49:55 +04:00
|
|
|
if missingids:
|
|
|
|
raise util.Abort(_("unable to download %d files") % len(missingids))
|
2013-05-07 03:59:05 +04:00
|
|
|
fetchcost += time.time() - start
|
2014-02-12 04:25:55 +04:00
|
|
|
|
2015-09-29 08:16:12 +03:00
|
|
|
def logstacktrace(self):
|
|
|
|
import traceback
|
|
|
|
self.ui.log('remotefilelog', 'excess remotefilelog fetching:\n%s',
|
|
|
|
''.join(traceback.format_stack()))
|
|
|
|
|
2014-02-12 04:25:55 +04:00
|
|
|
class localcache(object):
|
|
|
|
def __init__(self, repo):
|
|
|
|
self.ui = repo.ui
|
|
|
|
self.repo = repo
|
|
|
|
self.cachepath = self.ui.config("remotefilelog", "cachepath")
|
2015-08-29 09:20:54 +03:00
|
|
|
if not self.cachepath:
|
|
|
|
raise util.Abort(_("could not find config option remotefilelog.cachepath"))
|
2015-01-09 05:59:04 +03:00
|
|
|
self._validatecachelog = self.ui.config("remotefilelog", "validatecachelog")
|
2016-02-18 19:34:33 +03:00
|
|
|
self._validatecache = self.ui.config("remotefilelog", "validatecache",
|
|
|
|
'on')
|
|
|
|
if self._validatecache not in ('on', 'strict', 'off'):
|
|
|
|
self._validatecache = 'on'
|
|
|
|
if self._validatecache == 'off':
|
|
|
|
self._validatecache = False
|
|
|
|
|
2014-05-21 23:28:03 +04:00
|
|
|
if self.cachepath:
|
|
|
|
self.cachepath = util.expandpath(self.cachepath)
|
2014-02-12 04:25:55 +04:00
|
|
|
self.uid = os.getuid()
|
|
|
|
|
|
|
|
if not os.path.exists(self.cachepath):
|
|
|
|
oldumask = os.umask(0o002)
|
|
|
|
try:
|
|
|
|
os.makedirs(self.cachepath)
|
|
|
|
|
|
|
|
groupname = self.ui.config("remotefilelog", "cachegroup")
|
|
|
|
if groupname:
|
|
|
|
gid = grp.getgrnam(groupname).gr_gid
|
|
|
|
if gid:
|
|
|
|
os.chown(self.cachepath, os.getuid(), gid)
|
|
|
|
os.chmod(self.cachepath, 0o2775)
|
|
|
|
finally:
|
|
|
|
os.umask(oldumask)
|
|
|
|
|
|
|
|
def __contains__(self, key):
|
|
|
|
path = os.path.join(self.cachepath, key)
|
2015-01-09 05:59:04 +03:00
|
|
|
exists = os.path.exists(path)
|
2016-02-18 19:34:33 +03:00
|
|
|
|
|
|
|
# only validate during contains if strict mode is enabled
|
|
|
|
# to avoid doubling iops in the hot path
|
|
|
|
if exists and self._validatecache == 'strict' and not \
|
|
|
|
self._validatekey(path, 'contains'):
|
2015-01-09 05:59:04 +03:00
|
|
|
return False
|
|
|
|
|
|
|
|
return exists
|
2014-02-12 04:25:55 +04:00
|
|
|
|
|
|
|
def write(self, key, data):
|
|
|
|
path = os.path.join(self.cachepath, key)
|
|
|
|
dirpath = os.path.dirname(path)
|
|
|
|
if not os.path.exists(dirpath):
|
|
|
|
makedirs(self.cachepath, dirpath, self.uid)
|
|
|
|
|
remotefilelog: more graceful handling of write errors for localcache
Summary:
I debugged an issue this past week where a set of machines had exhausted the
disk space available on the partition where the local cache was situated. This
particular tier didn't use cacheprocess, only the local cache. There were some
number of truncated files in the local cache.
Inspecting the code here, it looks like we're using atomictempfile incorrectly.
atomictempfile.close() will unconditionally rename the temp file into place,
and we were calling this from a finally handler.
It seems safest to remove the try/finally from around this section of code and
just let the destructor trigger to clean up the temporary file in the error
path, and if we make it through writing the data, then call close and have it
move the file in to place.
Test Plan:
ran the tests. They don't cover this case, but at least I didn't
obviously break anything:
```
$ ./run-tests.py --with-hg=../../hg-crew/hg
...................
# Ran 19 tests, 0 skipped, 0 warned, 0 failed.
```
Reviewers: #sourcecontrol, ttung, mitrandir
Reviewed By: mitrandir
Subscribers: scyost
Differential Revision: https://phabricator.fb.com/D2940861
Tasks: 10044183
Signature: t1:2940861:1455673078:a7593d70c32151e13c8ccc31f92387e9c8cb23a0
2016-02-17 19:03:38 +03:00
|
|
|
f = util.atomictempfile(path, 'w')
|
|
|
|
f.write(data)
|
|
|
|
# after a successful write, close will rename us in to place
|
|
|
|
f.close()
|
2014-02-12 04:25:55 +04:00
|
|
|
|
2016-02-18 19:34:33 +03:00
|
|
|
if self._validatecache:
|
2015-01-09 05:59:04 +03:00
|
|
|
if not self._validatekey(path, 'write'):
|
|
|
|
raise util.Abort(_("local cache write was corrupted %s") % path)
|
|
|
|
|
2014-02-12 04:25:55 +04:00
|
|
|
stat = os.stat(path)
|
|
|
|
if stat.st_uid == self.uid:
|
|
|
|
os.chmod(path, 0o0664)
|
|
|
|
|
|
|
|
def read(self, key):
|
|
|
|
try:
|
|
|
|
path = os.path.join(self.cachepath, key)
|
|
|
|
with open(path, "r") as f:
|
|
|
|
result = f.read()
|
|
|
|
|
|
|
|
# we should never have empty files
|
|
|
|
if not result:
|
|
|
|
os.remove(path)
|
2015-01-09 05:59:04 +03:00
|
|
|
raise KeyError("empty local cache file %s" % path)
|
|
|
|
|
2016-02-18 19:34:33 +03:00
|
|
|
if self._validatecache and not self._validatedata(result, path):
|
|
|
|
if self._validatecachelog:
|
|
|
|
with open(self._validatecachelog, 'a+') as f:
|
|
|
|
f.write("corrupt %s during read\n" % path)
|
|
|
|
|
|
|
|
os.rename(path, path + ".corrupt")
|
2015-01-09 05:59:04 +03:00
|
|
|
raise KeyError("corrupt local cache file %s" % path)
|
2014-02-12 04:25:55 +04:00
|
|
|
|
|
|
|
return result
|
|
|
|
except IOError:
|
|
|
|
raise KeyError("key not in local cache")
|
|
|
|
|
2015-01-09 05:59:04 +03:00
|
|
|
def _validatekey(self, path, action):
|
|
|
|
with open(path, 'r') as f:
|
|
|
|
data = f.read()
|
|
|
|
|
2016-02-18 19:34:33 +03:00
|
|
|
if self._validatedata(data, path):
|
2015-01-09 05:59:04 +03:00
|
|
|
return True
|
|
|
|
|
2016-02-18 19:34:33 +03:00
|
|
|
if self._validatecachelog:
|
|
|
|
with open(self._validatecachelog, 'a+') as f:
|
|
|
|
f.write("corrupt %s during %s\n" % (path, action))
|
2015-01-09 05:59:04 +03:00
|
|
|
|
|
|
|
os.rename(path, path + ".corrupt")
|
|
|
|
return False
|
|
|
|
|
2016-02-18 19:34:33 +03:00
|
|
|
def _validatedata(self, data, path):
|
2015-01-09 05:59:04 +03:00
|
|
|
try:
|
|
|
|
if len(data) > 0:
|
2016-02-18 19:34:33 +03:00
|
|
|
size, remainder = data.split('\0', 1)
|
2015-01-09 05:59:04 +03:00
|
|
|
size = int(size)
|
2016-02-18 19:34:33 +03:00
|
|
|
if len(data) <= size:
|
|
|
|
# it is truncated
|
|
|
|
return False
|
|
|
|
|
|
|
|
# extract the node from the metadata
|
|
|
|
datanode = remainder[size:size+20]
|
|
|
|
|
|
|
|
# and compare against the path
|
|
|
|
if os.path.basename(path) == node.hex(datanode):
|
|
|
|
# Content matches the intended path
|
2015-01-09 05:59:04 +03:00
|
|
|
return True
|
2016-02-18 19:34:33 +03:00
|
|
|
return False
|
2015-01-09 05:59:04 +03:00
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
return False
|
|
|
|
|
2014-02-12 04:25:55 +04:00
|
|
|
def markrepo(self):
|
|
|
|
repospath = os.path.join(self.cachepath, "repos")
|
|
|
|
with open(repospath, 'a') as reposfile:
|
|
|
|
reposfile.write(os.path.dirname(self.repo.path) + "\n")
|
|
|
|
|
|
|
|
stat = os.stat(repospath)
|
|
|
|
if stat.st_uid == self.uid:
|
|
|
|
os.chmod(repospath, 0o0664)
|
|
|
|
|
|
|
|
def gc(self, keepkeys):
|
|
|
|
ui = self.ui
|
|
|
|
cachepath = self.cachepath
|
|
|
|
_removing = _("removing unnecessary files")
|
|
|
|
_truncating = _("enforcing cache limit")
|
|
|
|
|
|
|
|
# prune cache
|
|
|
|
import Queue
|
|
|
|
queue = Queue.PriorityQueue()
|
|
|
|
originalsize = 0
|
|
|
|
size = 0
|
|
|
|
count = 0
|
|
|
|
removed = 0
|
|
|
|
|
|
|
|
# keep files newer than a day even if they aren't needed
|
|
|
|
limit = time.time() - (60 * 60 * 24)
|
|
|
|
|
|
|
|
ui.progress(_removing, count, unit="files")
|
|
|
|
for root, dirs, files in os.walk(cachepath):
|
|
|
|
for file in files:
|
|
|
|
if file == 'repos':
|
|
|
|
continue
|
|
|
|
|
|
|
|
ui.progress(_removing, count, unit="files")
|
|
|
|
path = os.path.join(root, file)
|
|
|
|
key = os.path.relpath(path, cachepath)
|
|
|
|
count += 1
|
2015-12-02 21:40:49 +03:00
|
|
|
try:
|
|
|
|
stat = os.stat(path)
|
|
|
|
except OSError as e:
|
|
|
|
if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory
|
|
|
|
raise
|
|
|
|
msg = _("warning: file %s was removed by another process\n")
|
|
|
|
ui.warn(msg % path)
|
|
|
|
continue
|
|
|
|
|
2014-02-12 04:25:55 +04:00
|
|
|
originalsize += stat.st_size
|
|
|
|
|
|
|
|
if key in keepkeys or stat.st_atime > limit:
|
|
|
|
queue.put((stat.st_atime, path, stat))
|
|
|
|
size += stat.st_size
|
|
|
|
else:
|
2015-12-02 21:40:49 +03:00
|
|
|
try:
|
|
|
|
os.remove(path)
|
|
|
|
except OSError as e:
|
|
|
|
if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory
|
|
|
|
raise
|
|
|
|
msg = _("warning: file %s was removed by another process\n")
|
|
|
|
ui.warn(msg % path)
|
|
|
|
continue
|
2014-02-12 04:25:55 +04:00
|
|
|
removed += 1
|
|
|
|
ui.progress(_removing, None)
|
|
|
|
|
|
|
|
# remove oldest files until under limit
|
|
|
|
limit = ui.configbytes("remotefilelog", "cachelimit", "1000 GB")
|
|
|
|
if size > limit:
|
|
|
|
excess = size - limit
|
|
|
|
removedexcess = 0
|
|
|
|
while queue and size > limit and size > 0:
|
|
|
|
ui.progress(_truncating, removedexcess, unit="bytes", total=excess)
|
|
|
|
atime, oldpath, stat = queue.get()
|
2015-12-02 21:40:49 +03:00
|
|
|
try:
|
|
|
|
os.remove(oldpath)
|
|
|
|
except OSError as e:
|
|
|
|
if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory
|
|
|
|
raise
|
|
|
|
msg = _("warning: file %s was removed by another process\n")
|
|
|
|
ui.warn(msg % oldpath)
|
2014-02-12 04:25:55 +04:00
|
|
|
size -= stat.st_size
|
|
|
|
removed += 1
|
|
|
|
removedexcess += stat.st_size
|
|
|
|
ui.progress(_truncating, None)
|
|
|
|
|
|
|
|
ui.status("finished: removed %s of %s files (%0.2f GB to %0.2f GB)\n" %
|
|
|
|
(removed, count, float(originalsize) / 1024.0 / 1024.0 / 1024.0,
|
|
|
|
float(size) / 1024.0 / 1024.0 / 1024.0))
|