remotefilelog: remove format.noloosefile

Summary: This is now dead code.

Reviewed By: quark-zju

Differential Revision: D17923872

fbshipit-source-id: 9a65d13d43f0caa7b25516a7de3bcd7d558dda92
This commit is contained in:
Xavier Deguillard 2019-10-16 14:19:36 -07:00 committed by Facebook Github Bot
parent 5132ddaa2c
commit 5840de16ef
8 changed files with 4 additions and 927 deletions

View File

@ -109,8 +109,6 @@ Configs:
``format.userustmutablestore`` switches to using the rust mutable stores.
``format.noloosefile`` do not use loosefiles.
``treemanifest.blocksendflat`` causes an exception to be thrown if the
current repository attempts to add flat manifests to a changegroup.

View File

@ -1,547 +0,0 @@
# Copyright 2016 Facebook, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import errno
import hashlib
import itertools
import os
import random
import shutil
import stat
import time
from edenscm.mercurial import error, phases, progress, pycompat, revlog, util
from edenscm.mercurial.i18n import _
from edenscm.mercurial.node import bin, hex
from edenscm.mercurial.pycompat import range
from . import constants, datapack, historypack, shallowutil
# Cache of filename sha to filename, to prevent repeated search for the same
# filename shas.
filenamehashcache = {}
class basestore(object):
def __init__(self, repo, path, reponame, shared=False):
"""Creates a remotefilelog store object for the given repo name.
`path` - The file path where this store keeps its data
`reponame` - The name of the repo. This is used to partition data from
many repos.
`shared` - True if this store is a shared cache of data from the central
server, for many repos on this machine. False means this store is for
the local data for one repo.
"""
self.repo = repo
self.ui = repo.ui
self._path = path
self._reponame = reponame
self._shared = shared
self._uid = os.getuid() if not pycompat.iswindows else None
self._validatecachelog = self.ui.config("remotefilelog", "validatecachelog")
self._validatecache = self.ui.config("remotefilelog", "validatecache", "on")
self._validatehashes = self.ui.configbool(
"remotefilelog", "validatecachehashes", True
)
self._incrementalloosefilesrepack = self.ui.configbool(
"remotefilelog", "incrementalloosefilerepack", True
)
if self._validatecache not in ("on", "strict", "off"):
self._validatecache = "on"
if self._validatecache == "off":
self._validatecache = False
self._mutablepacks = None
self._repackdir = None
if shared:
shallowutil.mkstickygroupdir(self.ui, path)
def getmissing(self, keys):
missing = []
with progress.bar(
self.repo.ui, _("discovering"), _("files"), len(keys)
) as prog:
for name, node in keys:
prog.value += 1
filepath = self._getfilepath(name, node)
try:
size = os.path.getsize(filepath)
# An empty file is considered corrupt and we pretend it
# doesn't exist.
exists = size > 0
except os.error:
exists = False
if (
exists
and self._validatecache == "strict"
and not self._validatekey(filepath, "contains")
):
exists = False
if not exists:
missing.append((name, node))
return missing
# BELOW THIS ARE IMPLEMENTATIONS OF REPACK SOURCE
def markledger(self, ledger, options=None):
if options and options.get(constants.OPTION_PACKSONLY):
return
incremental = False
if options and options.get("incremental") and self._incrementalloosefilesrepack:
incremental = True
with ledger.location(self._path):
for filename, nodes in self._getfiles(incremental):
for node in nodes:
ledger.markdataentry(self, filename, node)
ledger.markhistoryentry(self, filename, node)
def cleanup(self, ledger):
entries = ledger.sources.get(self, [])
with progress.bar(self.ui, _("cleaning up"), _("files"), len(entries)) as prog:
for entry in entries:
if entry.gced or (entry.datarepacked and entry.historyrepacked):
path = self._getfilepath(entry.filename, entry.node)
util.tryunlink(path)
prog.value += 1
if self._repackdir is not None:
# Clean up the repo cache directory.
self._cleanupdirectory(self._repackdir)
def markforrefresh(self):
# This only applies to stores that keep a snapshot of whats on disk.
pass
# BELOW THIS ARE NON-STANDARD APIS
def _cleanupdirectory(self, rootdir):
"""Removes the empty directories and unnecessary files within the root
directory recursively. Note that this method does not remove the root
directory itself. """
oldfiles = set()
otherfiles = set()
havefilename = False
# util.listdir returns stat information which saves some rmdir/listdir
# syscalls.
for name, mode in util.listdir(rootdir):
if stat.S_ISDIR(mode):
dirpath = os.path.join(rootdir, name)
self._cleanupdirectory(dirpath)
# Now that the directory specified by dirpath is potentially
# empty, try and remove it.
try:
os.rmdir(dirpath)
except OSError:
pass
elif stat.S_ISREG(mode):
if name == "filename":
havefilename = True
elif name.endswith("_old"):
oldfiles.add(name[:-4])
else:
otherfiles.add(name)
# Remove the files which end with suffix '_old' and have no
# corresponding file without the suffix '_old'. See addremotefilelognode
# method for the generation/purpose of files with '_old' suffix.
for filename in oldfiles - otherfiles:
filepath = os.path.join(rootdir, filename + "_old")
util.tryunlink(filepath)
# If we've deleted all the files and have a "filename" left over, delete
# the filename, too.
if havefilename and not otherfiles:
filepath = os.path.join(rootdir, "filename")
util.tryunlink(filepath)
def _getfiles(self, incrementalrepack):
"""Return a list of (filename, [node,...]) for all the revisions that
exist in the store.
This is useful for obtaining a list of all the contents of the store
when performing a repack to another store, since the store API requires
name+node keys and not namehash+node keys.
"""
existing = {}
for filenamehash, node in self._listkeys(incrementalrepack):
existing.setdefault(filenamehash, []).append(node)
filenamemap = self._resolvefilenames(existing.keys())
for filename, sha in filenamemap.iteritems():
yield (filename, existing[sha])
def _resolvefilenames(self, hashes):
"""Given a list of filename hashes that are present in the
remotefilelog store, return a mapping from filename->hash.
This is useful when converting remotefilelog blobs into other storage
formats.
"""
if not hashes:
return {}
filenames = {}
missingfilename = set(hashes)
if self._shared:
getfilenamepath = lambda sha: os.path.join(
self._path, self._reponame, sha[:2], sha[2:], "filename"
)
else:
getfilenamepath = lambda sha: os.path.join(self._path, sha, "filename")
# Search the local cache and filename files in case we look for files
# we've already found
for sha in hashes:
if sha in filenamehashcache:
if filenamehashcache[sha] is not None:
filenames[filenamehashcache[sha]] = sha
missingfilename.discard(sha)
filenamepath = getfilenamepath(hex(sha))
if os.path.exists(filenamepath):
try:
filename = shallowutil.readfile(filenamepath)
except Exception:
pass
else:
checksha = hashlib.sha1(filename).digest()
if checksha == sha:
filenames[filename] = sha
filenamehashcache[sha] = filename
missingfilename.discard(sha)
else:
# The filename file is invalid - delete it.
util.tryunlink(filenamepath)
if not missingfilename:
return filenames
# Scan all draft commits and the last 250000 commits in the changelog
# looking for the files. If they're not there, we don't bother looking
# further.
# developer config: remotefilelog.resolvechangeloglimit
unfi = self.repo.unfiltered()
cl = unfi.changelog
revs = list(unfi.revs("not public()"))
scanlen = min(
len(cl), self.ui.configint("remotefilelog", "resolvechangeloglimit", 250000)
)
remainingstr = "%d remaining" % len(missingfilename)
with progress.bar(
self.ui, "resolving filenames", total=len(revs) + scanlen
) as prog:
for i, rev in enumerate(
itertools.chain(revs, range(len(cl) - 1, len(cl) - scanlen, -1))
):
files = cl.readfiles(cl.node(rev))
prog.value = i, remainingstr
for filename in files:
sha = hashlib.sha1(filename).digest()
if sha in missingfilename:
filenames[filename] = sha
filenamehashcache[sha] = filename
missingfilename.discard(sha)
remainingstr = "%d remaining" % len(missingfilename)
if not missingfilename:
break
# Record anything we didn't find in the cache so that we don't look
# for it again.
filenamehashcache.update((h, None) for h in missingfilename)
return filenames
def _getrepocachepath(self):
return os.path.join(self._path, self._reponame) if self._shared else self._path
def _getincrementalrootdir(self):
rootdir = self._getrepocachepath()
entries = os.listdir(rootdir)
entries = [os.path.join(rootdir, p) for p in entries]
entries = [folder for folder in entries if os.path.isdir(folder)]
if len(entries) == 0:
return None
# Since the distribution of loosefile should be uniform accross all of
# the loosefile directories, let's randomly pick one to repack.
for tries in range(10):
entry = entries[random.randrange(len(entries))]
for root, dirs, files in os.walk(entry):
for filename in files:
if len(filename) != 40:
continue
try:
int(filename, 16)
except ValueError:
continue
parent, d = os.path.split(root)
if self._shared:
d += os.path.basename(parent)
if len(d) != 40:
continue
try:
int(d, 16)
except ValueError:
continue
if self._shared:
return parent
else:
return root
return None
def _listkeys(self, incrementalrepack):
"""List all the remotefilelog keys that exist in the store.
Returns a iterator of (filename hash, filecontent hash) tuples.
"""
if self._repackdir is not None:
rootdir = self._repackdir
else:
if not incrementalrepack:
rootdir = self._getrepocachepath()
else:
rootdir = self._getincrementalrootdir()
self._repackdir = rootdir
if rootdir is not None:
for root, dirs, files in os.walk(rootdir):
for filename in files:
if len(filename) != 40:
continue
node = filename
if self._shared:
# .../1a/85ffda..be21
filenamehash = root[-41:-39] + root[-38:]
else:
filenamehash = root[-40:]
self._reportmetrics(root, filename)
yield (bin(filenamehash), bin(node))
def _reportmetrics(self, root, filename):
"""Log total remotefilelog blob size and count.
The method is overloaded in remotefilelogstore class, because we can
only count metrics for the datastore. History is kept in the same files
so we don't need to log metrics twice.
"""
pass
def _getfilepath(self, name, node):
"""
The path of the file used to store the content of the named file
with a particular node hash.
"""
node = hex(node)
if self._shared:
key = shallowutil.getcachekey(self._reponame, name, node)
else:
key = shallowutil.getlocalkey(name, node)
return os.path.join(self._path, key)
def _getfilenamepath(self, name):
"""
The path of the file used to store the name of the named file. This
allows reverse lookup from the hashed name back to the original name.
This is a file named ``filename`` inside the directory where the file
content is stored.
"""
if self._shared:
key = shallowutil.getcachekey(self._reponame, name, "filename")
else:
key = shallowutil.getlocalkey(name, "filename")
return os.path.join(self._path, key)
def _getdata(self, name, node):
filepath = self._getfilepath(name, node)
filenamepath = self._getfilenamepath(name)
try:
data = shallowutil.readfile(filepath)
if not os.path.exists(filenamepath):
try:
shallowutil.writefile(filenamepath, name, readonly=True)
except Exception:
pass
if self._validatecache:
validationresult = self._validatedata(data, filepath)
if validationresult == shallowutil.ValidationResult.Invalid:
if self._validatecachelog:
with util.posixfile(self._validatecachelog, "a+") as f:
f.write("corrupt %s during read\n" % filepath)
os.rename(filepath, filepath + ".corrupt")
raise KeyError("corrupt local cache file %s" % filepath)
else:
# only check if the content is censored
offset, size, flags = shallowutil.parsesizeflags(data)
text = data[offset : offset + size]
validationresult = (
shallowutil.ValidationResult.Redacted
if shallowutil.verifyredacteddata(text)
else shallowutil.ValidationResult.Valid
)
if validationresult == shallowutil.ValidationResult.Redacted:
data = self.createcensoredfileblob(data)
except IOError:
raise KeyError(
"no file found at %s for %s:%s" % (filepath, name, hex(node))
)
return data
def createcensoredfileblob(self, raw):
"""Creates a fileblob that contains a default message when
the file is blacklisted and the actual content cannot be accessed.
"""
offset, size, flags = shallowutil.parsesizeflags(raw)
ancestortext = raw[offset + size :]
text = constants.REDACTED_MESSAGE
revlogflags = revlog.REVIDX_DEFAULT_FLAGS
header = shallowutil.buildfileblobheader(len(text), revlogflags)
return "%s\0%s%s" % (header, text, ancestortext)
def addremotefilelognode(self, name, node, data):
filepath = self._getfilepath(name, node)
filenamepath = self._getfilenamepath(name)
oldumask = os.umask(0o002)
try:
# if this node already exists, save the old version for
# recovery/debugging purposes.
if os.path.exists(filepath):
newfilename = filepath + "_old"
# newfilename can be read-only and shutil.copy will fail.
# Delete newfilename to avoid it
if os.path.exists(newfilename):
shallowutil.unlinkfile(newfilename)
shutil.copy(filepath, newfilename)
shallowutil.mkstickygroupdir(self.ui, os.path.dirname(filepath))
shallowutil.writefile(filepath, data, readonly=True)
if not os.path.exists(filenamepath):
shallowutil.writefile(filenamepath, name, readonly=True)
if self._validatecache:
if not self._validatekey(filepath, "write"):
raise error.Abort(
_("local cache write was corrupted %s") % filepath
)
finally:
os.umask(oldumask)
def markrepo(self, path):
"""Call this to add the given repo path to the store's list of
repositories that are using it. This is useful later when doing garbage
collection, since it allows us to insecpt the repos to see what nodes
they want to be kept alive in the store.
"""
repospath = os.path.join(self._path, "repos")
line = os.path.dirname(path) + "\n"
# Skip writing to the repos file if the line is already written.
try:
if line in util.iterfile(open(repospath, "rb")):
return
except IOError:
pass
with util.posixfile(repospath, "a") as reposfile:
reposfile.write(line)
repospathstat = os.stat(repospath)
if repospathstat.st_uid == self._uid:
os.chmod(repospath, 0o0664)
def _validatekey(self, path, action):
with util.posixfile(path, "rb") as f:
data = f.read()
validationresult = self._validatedata(data, path)
if validationresult != shallowutil.ValidationResult.Invalid:
return True
if self._validatecachelog:
with util.posixfile(self._validatecachelog, "a+") as f:
f.write("corrupt %s during %s\n" % (path, action))
os.rename(path, path + ".corrupt")
return False
def _validatedata(self, data, path):
try:
if len(data) > 0:
# see remotefilelogserver.createfileblob for the format
offset, size, flags = shallowutil.parsesizeflags(data)
if len(data) <= size:
# it is truncated
return shallowutil.ValidationResult.Invalid
# extract the node from the metadata
offset += size
datanode = data[offset : offset + 20]
hexdatanode = hex(datanode)
validationresult = shallowutil.verifyfilenode(
self.ui, data, hexdatanode, self._validatehashes
)
if validationresult == shallowutil.ValidationResult.Invalid:
return validationresult
# and compare against the path
if os.path.basename(path) == hexdatanode:
# Content matches the intended path
return validationresult
return shallowutil.ValidationResult.Invalid
except (ValueError, RuntimeError):
pass
return shallowutil.ValidationResult.Invalid
def handlecorruption(self, name, node):
filepath = self._getfilepath(name, node)
if self._shared:
self.ui.warn(_("detected corruption in '%s', moving it aside\n") % filepath)
os.rename(filepath, filepath + ".corrupt")
# Throw a KeyError so UnionStore can catch it and proceed to the
# next store.
raise KeyError(
"corruption in file '%s' for %s:%s" % (filepath, name, hex(node))
)
else:
# Throw a ValueError so UnionStore does not attempt to read further
# stores, since local data corruption is not recoverable.
raise ValueError(
"corruption in file '%s' for %s:%s" % (filepath, name, hex(node))
)

View File

@ -12,7 +12,7 @@ from edenscm.mercurial import manifest, mdiff, revlog, util
from edenscm.mercurial.node import hex, nullid
from edenscm.mercurial.pycompat import range
from . import basestore, constants, shallowutil
from . import constants, shallowutil
class ChainIndicies(object):
@ -160,88 +160,6 @@ class unioncontentstore(object):
break
class remotefilelogcontentstore(basestore.basestore):
def __init__(self, *args, **kwargs):
super(remotefilelogcontentstore, self).__init__(*args, **kwargs)
self._threaddata = threading.local()
storetype = "shared" if self._shared else "local"
self._metricsprefix = "filestore_%s_blob" % storetype
def get(self, name, node):
# return raw revision text
data = self._getdata(name, node)
offset, size, flags = shallowutil.parsesizeflags(data)
content = data[offset : offset + size]
try:
ancestormap = shallowutil.ancestormap(data)
except ValueError:
self.handlecorruption(name, node)
p1, p2, linknode, copyfrom = ancestormap[node]
copyrev = None
if copyfrom:
copyrev = hex(p1)
self._updatemetacache(node, size, flags)
# lfs tracks renames in its own metadata, remove hg copy metadata,
# because copy metadata will be re-added by lfs flag processor.
if flags & revlog.REVIDX_EXTSTORED:
copyrev = copyfrom = None
revision = shallowutil.createrevlogtext(content, copyfrom, copyrev)
return revision
def getdelta(self, name, node):
# Since remotefilelog content stores only contain full texts, just
# return that.
revision = self.get(name, node)
return revision, name, nullid, self.getmeta(name, node)
def getdeltachain(self, name, node):
# Since remotefilelog content stores just contain full texts, we return
# a fake delta chain that just consists of a single full text revision.
# The nullid in the deltabasenode slot indicates that the revision is a
# fulltext.
revision = self.get(name, node)
return [(name, node, None, nullid, revision)]
def getmeta(self, name, node):
self._sanitizemetacache()
if node != self._threaddata.metacache[0]:
data = self._getdata(name, node)
offset, size, flags = shallowutil.parsesizeflags(data)
self._updatemetacache(node, size, flags)
return self._threaddata.metacache[1]
def add(self, name, node, data):
raise RuntimeError("cannot add content only to remotefilelog " "contentstore")
def _sanitizemetacache(self):
metacache = getattr(self._threaddata, "metacache", None)
if metacache is None:
self._threaddata.metacache = (None, None) # (node, meta)
def _updatemetacache(self, node, size, flags):
self._sanitizemetacache()
if node == self._threaddata.metacache[0]:
return
meta = {constants.METAKEYFLAG: flags, constants.METAKEYSIZE: size}
self._threaddata.metacache = (node, meta)
def markforrefresh(self):
pass
def _reportmetrics(self, root, filename):
filepath = os.path.join(root, filename)
stats = os.stat(filepath)
self.ui.metrics.gauge(self._metricsprefix + "size", stats.st_size)
self.ui.metrics.gauge(self._metricsprefix + "num", 1)
class remotecontentstore(object):
def __init__(self, ui, fileservice, shared):
self._fileservice = fileservice

View File

@ -8,7 +8,7 @@ from __future__ import absolute_import
from edenscm.mercurial import util
from edenscm.mercurial.node import hex, nullid
from . import basestore, shallowutil
from . import shallowutil
class unionmetadatastore(object):
@ -126,32 +126,6 @@ class unionmetadatastore(object):
self.stores.remove(store)
class remotefilelogmetadatastore(basestore.basestore):
def getancestors(self, name, node, known=None):
"""Returns as many ancestors as we're aware of.
return value: {
node: (p1, p2, linknode, copyfrom),
...
}
"""
data = self._getdata(name, node)
try:
ancestors = shallowutil.ancestormap(data)
except ValueError:
self.handlecorruption(name, node)
return ancestors
def getnodeinfo(self, name, node):
return self.getancestors(name, node)[node]
def add(self, name, node, parents, linknode):
raise RuntimeError("cannot add metadata only to remotefilelog " "metadatastore")
def markforrefresh(self):
pass
class remotemetadatastore(object):
def __init__(self, ui, fileservice, shared):
self._fileservice = fileservice

View File

@ -16,18 +16,10 @@ from edenscm.mercurial.i18n import _
from edenscm.mercurial.node import bin, nullid
from . import constants, fileserverclient, mutablestores, shallowutil
from .contentstore import (
remotecontentstore,
remotefilelogcontentstore,
unioncontentstore,
)
from .contentstore import remotecontentstore, unioncontentstore
from .datapack import makedatapackstore
from .historypack import makehistorypackstore
from .metadatastore import (
remotefilelogmetadatastore,
remotemetadatastore,
unionmetadatastore,
)
from .metadatastore import remotemetadatastore, unionmetadatastore
# corresponds to uncompressed length of revlog's indexformatng (2 gigs, 4-byte
@ -480,18 +472,6 @@ class remotefileslog(filelog.fileslog):
localcontent += [lpackcontent]
localmetadata += [lpackmetadata]
if not repo.ui.configbool("format", "noloosefile", True):
loosecachecontent, loosecachemetadata = self.makecachestores()
cachecontent += [loosecachecontent]
cachemetadata += [loosecachemetadata]
looselocalcontent, looselocalmetadata = self.makelocalstores()
localcontent += [looselocalcontent]
localmetadata += [looselocalmetadata]
else:
loosecachecontent = None
loosecachemetadata = None
mutablelocalstore = mutablestores.mutabledatahistorystore(
lambda: self._mutablelocalpacks
)
@ -594,44 +574,6 @@ class remotefileslog(filelog.fileslog):
return (spackcontent, spackmetadata, lpackcontent, lpackmetadata)
def makecachestores(self):
"""Typically machine-wide, cache of remote data; can be discarded."""
repo = self.repo
# Instantiate shared cache stores
cachepath = shallowutil.getcachepath(repo.ui)
cachecontent = remotefilelogcontentstore(
repo, cachepath, repo.name, shared=True
)
cachemetadata = remotefilelogmetadatastore(
repo, cachepath, repo.name, shared=True
)
self.sharedstore = cachecontent
self.shareddatastores.append(cachecontent)
self.sharedhistorystores.append(cachemetadata)
return cachecontent, cachemetadata
def makelocalstores(self):
"""In-repo stores, like .hg/store/data; can not be discarded."""
repo = self.repo
localpath = os.path.join(repo.svfs.vfs.base, "data")
if not os.path.exists(localpath):
os.makedirs(localpath)
# Instantiate local data stores
localcontent = remotefilelogcontentstore(
repo, localpath, repo.name, shared=False
)
localmetadata = remotefilelogmetadatastore(
repo, localpath, repo.name, shared=False
)
self.localdatastores.append(localcontent)
self.localhistorystores.append(localmetadata)
return localcontent, localmetadata
def makeremotestores(self, cachecontent, cachemetadata):
"""These stores fetch data from a remote server."""
repo = self.repo

View File

@ -14,16 +14,6 @@ from edenscm.mercurial.node import hex, nullid, nullrev
from ..extutil import runshellcommand
from . import constants, fileserverclient, remotefilectx, remotefilelog, shallowutil
from .contentstore import (
remotecontentstore,
remotefilelogcontentstore,
unioncontentstore,
)
from .metadatastore import (
remotefilelogmetadatastore,
remotemetadatastore,
unionmetadatastore,
)
from .repack import domaintenancerepack

View File

@ -65,16 +65,6 @@ def interposeclass(container, classname):
return wrap
def getcachekey(reponame, file, id):
pathhash = hashlib.sha1(file).hexdigest()
return os.path.join(reponame, pathhash[:2], pathhash[2:], id)
def getlocalkey(file, id):
pathhash = hashlib.sha1(file).hexdigest()
return os.path.join(pathhash, id)
def getcachepath(ui, allowempty=False):
cachepath = ui.config("remotefilelog", "cachepath")
if not cachepath:
@ -277,37 +267,6 @@ def bin2int(buf):
return x
def parsesizeflags(raw):
"""given a remotefilelog blob, return (headersize, rawtextsize, flags)
see remotefilelogserver.createfileblob for the format.
raise RuntimeError if the content is illformed.
"""
flags = revlog.REVIDX_DEFAULT_FLAGS
size = None
try:
index = raw.index("\0")
header = raw[:index]
if header.startswith("v"):
# v1 and above, header starts with 'v'
if header.startswith("v1\n"):
for s in header.split("\n"):
if s.startswith(constants.METAKEYSIZE):
size = int(s[len(constants.METAKEYSIZE) :])
elif s.startswith(constants.METAKEYFLAG):
flags = int(s[len(constants.METAKEYFLAG) :])
else:
raise RuntimeError("unsupported remotefilelog header: %s" % header)
else:
# v0, str(int(size)) is the header
size = int(header)
except ValueError:
raise RuntimeError("unexpected remotefilelog header: illegal format")
if size is None:
raise RuntimeError("unexpected remotefilelog header: no size found")
return index + 1, size, flags
def buildfileblobheader(size, flags, version=1):
"""return the header of a remotefilelog blob.
@ -330,147 +289,6 @@ def buildfileblobheader(size, flags, version=1):
return header
def verifyfilenode(ui, raw, hexexpectedfilenode, validatehashes):
offset, size, flags = parsesizeflags(raw)
text = raw[offset : offset + size]
if verifyredacteddata(text):
return ValidationResult.Redacted
# Do not check lfs data since hash verification would fail
if validatehashes and flags == 0:
ancestors = ancestormap(raw)
p1, p2, _, copyfrom = ancestors[bin(hexexpectedfilenode)]
if copyfrom:
# Mercurial has a complicated copy/renames logic.
# In vanilla hg, in case of rename p1 is always "null",
# and the copy information is embedded in file revision.
# In remotefilelog, p1 is used to store "copyfrom file node".
# In both cases, p2 is always "null" for a non-merge commit.
# It could only be not-null for merges.
# The code below converts between two representations.
filelogmeta = {"copy": copyfrom, "copyrev": hex(p1)}
text = filelog.packmeta(filelogmeta, text)
p1 = nullid
elif text.startswith("\1\n"):
text = filelog.packmeta({}, text)
actualhash = hex(revlog.hash(text, p1, p2))
if hexexpectedfilenode != actualhash:
ui.log(
"remotefilelog",
"remotefilelog hash verification failed \n",
actual_hash=actualhash,
expected_hash=hexexpectedfilenode,
)
return ValidationResult.Invalid
return ValidationResult.Valid
def verifyredacteddata(data):
# Check if text is the same as the magic string used
# in blacklisted files. When a file is blacklisted,
# it's content is replaced by a default string.
if data == constants.REDACTED_CONTENT:
return True
return False
def ancestormap(raw):
offset, size, flags = parsesizeflags(raw)
start = offset + size
mapping = {}
while start < len(raw):
divider = raw.index("\0", start + 80)
currentnode = raw[start : (start + 20)]
p1 = raw[(start + 20) : (start + 40)]
p2 = raw[(start + 40) : (start + 60)]
linknode = raw[(start + 60) : (start + 80)]
copyfrom = raw[(start + 80) : divider]
mapping[currentnode] = (p1, p2, linknode, copyfrom)
start = divider + 1
return mapping
def readfile(path):
f = util.posixfile(path, "rb")
try:
result = f.read()
# we should never have empty files
if not result:
os.remove(path)
raise IOError("empty file: %s" % path)
return result
finally:
f.close()
def unlinkfile(filepath):
if pycompat.iswindows:
# On Windows, os.unlink cannnot delete readonly files
os.chmod(filepath, stat.S_IWUSR)
util.unlink(filepath)
def renamefile(source, destination):
if pycompat.iswindows:
# On Windows, os.rename cannot rename readonly files
# and cannot overwrite destination if it exists
os.chmod(source, stat.S_IWUSR)
if os.path.isfile(destination):
os.chmod(destination, stat.S_IWUSR)
os.unlink(destination)
os.rename(source, destination)
def writefile(path, content, readonly=False):
dirname, filename = os.path.split(path)
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
fd, temp = tempfile.mkstemp(prefix=".%s-" % filename, dir=dirname)
os.close(fd)
try:
f = util.posixfile(temp, "wb")
f.write(content)
f.close()
if readonly:
mode = 0o444
else:
# tempfiles are created with 0o600, so we need to manually set the
# mode.
oldumask = os.umask(0)
# there's no way to get the umask without modifying it, so set it
# back
os.umask(oldumask)
mode = ~oldumask
renamefile(temp, path)
os.chmod(path, mode)
except Exception:
try:
unlinkfile(temp)
except OSError:
pass
raise
def sortnodes(nodes, parentfunc):
"""Topologically sorts the nodes, using the parentfunc to find
the parents of nodes."""
@ -529,20 +347,6 @@ def readpath(stream):
return readexactly(stream, pathlen)
def readnodelist(stream):
rawlen = readexactly(stream, constants.NODECOUNTSIZE)
nodecount = struct.unpack(constants.NODECOUNTSTRUCT, rawlen)[0]
for i in range(nodecount):
yield readexactly(stream, constants.NODESIZE)
def readpathlist(stream):
rawlen = readexactly(stream, constants.PATHCOUNTSIZE)
pathcount = struct.unpack(constants.PATHCOUNTSTRUCT, rawlen)[0]
for i in range(pathcount):
yield readpath(stream)
def getgid(groupname):
try:
gid = grp.getgrnam(groupname).gr_gid

View File

@ -129,8 +129,6 @@ New errors are not allowed. Warnings are strongly discouraged.
undocumented: remotefilelog.server (bool)
undocumented: remotefilelog.servercachepath (str)
undocumented: remotefilelog.shallowtrees (bool)
undocumented: remotefilelog.validatecache (str) ["on"]
undocumented: remotefilelog.validatecachelog (str)
undocumented: remotenames.alias.default (bool)
undocumented: remotenames.allownonfastforward (bool)
undocumented: remotenames.calculatedistance (bool)