2012-08-16 00:38:42 +04:00
|
|
|
# util.py - Mercurial utility functions and platform specific implementations
|
2009-04-26 03:13:08 +04:00
|
|
|
#
|
|
|
|
# Copyright 2005 K. Thananchayan <thananck@yahoo.com>
|
|
|
|
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
|
|
|
|
# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
|
|
|
|
#
|
|
|
|
# This software may be used and distributed according to the terms of the
|
2010-01-20 07:20:08 +03:00
|
|
|
# GNU General Public License version 2 or any later version.
|
2009-04-26 03:24:49 +04:00
|
|
|
|
2012-08-16 00:38:42 +04:00
|
|
|
"""Mercurial utility functions and platform specific implementations.
|
2009-04-26 03:24:49 +04:00
|
|
|
|
|
|
|
This contains helper routines that are independent of the SCM core and
|
|
|
|
hide platform-specific details from the core.
|
|
|
|
"""
|
2005-06-22 07:31:13 +04:00
|
|
|
|
2015-12-13 10:14:08 +03:00
|
|
|
from __future__ import absolute_import
|
|
|
|
|
2015-09-16 03:35:32 +03:00
|
|
|
import bz2
|
2015-12-13 10:14:08 +03:00
|
|
|
import calendar
|
2017-03-15 17:28:39 +03:00
|
|
|
import codecs
|
2015-12-13 10:14:08 +03:00
|
|
|
import collections
|
|
|
|
import datetime
|
|
|
|
import errno
|
|
|
|
import gc
|
2015-12-13 07:30:37 +03:00
|
|
|
import hashlib
|
2015-12-13 10:14:08 +03:00
|
|
|
import imp
|
|
|
|
import os
|
2016-11-15 23:25:51 +03:00
|
|
|
import platform as pyplatform
|
2015-12-13 10:14:08 +03:00
|
|
|
import re as remod
|
|
|
|
import shutil
|
|
|
|
import signal
|
|
|
|
import socket
|
2016-11-15 23:25:51 +03:00
|
|
|
import stat
|
2016-10-07 15:58:23 +03:00
|
|
|
import string
|
2015-12-13 10:14:08 +03:00
|
|
|
import subprocess
|
|
|
|
import sys
|
|
|
|
import tempfile
|
|
|
|
import textwrap
|
|
|
|
import time
|
|
|
|
import traceback
|
2017-04-04 12:03:29 +03:00
|
|
|
import warnings
|
2015-12-13 10:14:08 +03:00
|
|
|
import zlib
|
|
|
|
|
|
|
|
from . import (
|
|
|
|
encoding,
|
|
|
|
error,
|
|
|
|
i18n,
|
2016-08-12 05:35:17 +03:00
|
|
|
policy,
|
2016-04-06 23:00:49 +03:00
|
|
|
pycompat,
|
2015-12-13 10:14:08 +03:00
|
|
|
)
|
2006-12-04 01:16:33 +03:00
|
|
|
|
2016-08-13 06:08:23 +03:00
|
|
|
base85 = policy.importmod(r'base85')
|
2016-08-12 05:35:17 +03:00
|
|
|
osutil = policy.importmod(r'osutil')
|
2016-08-13 06:23:56 +03:00
|
|
|
parsers = policy.importmod(r'parsers')
|
2016-08-12 05:35:17 +03:00
|
|
|
|
2017-04-26 15:56:47 +03:00
|
|
|
b85decode = base85.b85decode
|
|
|
|
b85encode = base85.b85encode
|
|
|
|
|
2017-03-10 08:35:21 +03:00
|
|
|
cookielib = pycompat.cookielib
|
2016-10-20 18:09:38 +03:00
|
|
|
empty = pycompat.empty
|
|
|
|
httplib = pycompat.httplib
|
|
|
|
httpserver = pycompat.httpserver
|
|
|
|
pickle = pycompat.pickle
|
|
|
|
queue = pycompat.queue
|
|
|
|
socketserver = pycompat.socketserver
|
2016-10-20 17:40:24 +03:00
|
|
|
stderr = pycompat.stderr
|
|
|
|
stdin = pycompat.stdin
|
|
|
|
stdout = pycompat.stdout
|
2016-10-20 18:09:38 +03:00
|
|
|
stringio = pycompat.stringio
|
|
|
|
urlerr = pycompat.urlerr
|
2016-04-07 02:22:12 +03:00
|
|
|
urlreq = pycompat.urlreq
|
2016-10-20 18:09:38 +03:00
|
|
|
xmlrpclib = pycompat.xmlrpclib
|
2016-04-07 02:22:12 +03:00
|
|
|
|
win32mbcs: avoid unintentional failure at colorization
Since 1d07d9da84a0, pycompat.bytestr() wrapped by win32mbcs returns
unicode object, if an argument is not byte-str object. And this causes
unexpected failure at colorization.
pycompat.bytestr() is used to convert from color effect "int" value to
byte-str object in color module. Wrapped pycompat.bytestr() returns
unicode object for such "int" value, because it isn't byte-str.
If this returned unicode object is used to colorize non-ASCII byte-str
in cases below, UnicodeDecodeError is raised at an operation between
them.
- colorization uses "ansi" color mode, or
Even though this isn't default on Windows, user might use this
color mode for third party pager.
- ui.write() is buffered with labeled=True
Buffering causes "ansi" color mode internally, regardless of
actual color mode. With "win32" color mode, extra escape sequences
are omitted at writing data out.
For example, with "win32" color mode, "hg status" doesn't fail for
non-ASCII filenames, but "hg log" does for non-ASCII text, because
the latter implies buffered formatter.
There are many "color effect" value lines in color.py, and making them
byte-str objects isn't suitable for fixing on stable. In addition to
it, pycompat.bytestr will be used to get byte-str object from any
types other than int, too.
To resolve this issue, this patch does:
- replace pycompat.bytestr in checkwinfilename() with newly added
hook point util._filenamebytestr, and
- make win32mbcs reverse-wrap util._filenamebytestr
(this is a replacement of 1d07d9da84a0)
This patch does two things above at same time, because separately
applying the former change adds broken revision (from point of view of
win32mbcs) to stable branch.
"_" prefix is added to "filenamebytestr", because it is win32mbcs
specific hook point.
2017-05-31 17:44:33 +03:00
|
|
|
# workaround for win32mbcs
|
|
|
|
_filenamebytestr = pycompat.bytestr
|
|
|
|
|
2017-02-04 02:10:27 +03:00
|
|
|
def isatty(fp):
|
|
|
|
try:
|
|
|
|
return fp.isatty()
|
|
|
|
except AttributeError:
|
|
|
|
return False
|
|
|
|
|
|
|
|
# glibc determines buffering on first write to stdout - if we replace a TTY
|
|
|
|
# destined stdout with a pipe destined stdout (e.g. pager), we want line
|
|
|
|
# buffering
|
|
|
|
if isatty(stdout):
|
2017-02-16 15:00:35 +03:00
|
|
|
stdout = os.fdopen(stdout.fileno(), pycompat.sysstr('wb'), 1)
|
2017-02-04 02:10:27 +03:00
|
|
|
|
2016-12-18 21:46:52 +03:00
|
|
|
if pycompat.osname == 'nt':
|
2015-12-13 10:14:08 +03:00
|
|
|
from . import windows as platform
|
2017-02-04 02:10:27 +03:00
|
|
|
stdout = platform.winstdout(stdout)
|
2011-07-21 13:05:26 +04:00
|
|
|
else:
|
2015-12-13 10:14:08 +03:00
|
|
|
from . import posix as platform
|
2011-07-23 14:29:52 +04:00
|
|
|
|
2015-12-13 10:14:08 +03:00
|
|
|
_ = i18n._
|
2015-12-13 07:30:37 +03:00
|
|
|
|
2016-05-21 10:52:04 +03:00
|
|
|
bindunixsocket = platform.bindunixsocket
|
2011-07-25 16:03:02 +04:00
|
|
|
cachestat = platform.cachestat
|
2011-07-23 14:29:52 +04:00
|
|
|
checkexec = platform.checkexec
|
|
|
|
checklink = platform.checklink
|
2011-08-02 15:18:56 +04:00
|
|
|
copymode = platform.copymode
|
2011-07-23 14:29:52 +04:00
|
|
|
executablepath = platform.executablepath
|
|
|
|
expandglobs = platform.expandglobs
|
|
|
|
explainexit = platform.explainexit
|
|
|
|
findexe = platform.findexe
|
|
|
|
gethgcmd = platform.gethgcmd
|
|
|
|
getuser = platform.getuser
|
2016-02-03 12:11:22 +03:00
|
|
|
getpid = os.getpid
|
2011-07-23 14:29:52 +04:00
|
|
|
groupmembers = platform.groupmembers
|
|
|
|
groupname = platform.groupname
|
|
|
|
hidewindow = platform.hidewindow
|
|
|
|
isexec = platform.isexec
|
|
|
|
isowner = platform.isowner
|
2017-04-26 16:26:28 +03:00
|
|
|
listdir = osutil.listdir
|
2011-07-23 14:29:52 +04:00
|
|
|
localpath = platform.localpath
|
|
|
|
lookupreg = platform.lookupreg
|
|
|
|
makedir = platform.makedir
|
|
|
|
nlinks = platform.nlinks
|
|
|
|
normpath = platform.normpath
|
2011-11-16 00:25:11 +04:00
|
|
|
normcase = platform.normcase
|
2015-04-01 10:38:56 +03:00
|
|
|
normcasespec = platform.normcasespec
|
|
|
|
normcasefallback = platform.normcasefallback
|
2011-07-23 14:29:52 +04:00
|
|
|
openhardlinks = platform.openhardlinks
|
|
|
|
oslink = platform.oslink
|
|
|
|
parsepatchoutput = platform.parsepatchoutput
|
|
|
|
pconvert = platform.pconvert
|
2015-05-21 02:00:05 +03:00
|
|
|
poll = platform.poll
|
2011-07-23 14:29:52 +04:00
|
|
|
popen = platform.popen
|
|
|
|
posixfile = platform.posixfile
|
|
|
|
quotecommand = platform.quotecommand
|
2014-08-16 07:02:18 +04:00
|
|
|
readpipe = platform.readpipe
|
2011-07-23 14:29:52 +04:00
|
|
|
rename = platform.rename
|
2015-04-10 18:47:09 +03:00
|
|
|
removedirs = platform.removedirs
|
2011-07-23 14:29:52 +04:00
|
|
|
samedevice = platform.samedevice
|
|
|
|
samefile = platform.samefile
|
|
|
|
samestat = platform.samestat
|
|
|
|
setbinary = platform.setbinary
|
|
|
|
setflags = platform.setflags
|
|
|
|
setsignalhandler = platform.setsignalhandler
|
|
|
|
shellquote = platform.shellquote
|
|
|
|
spawndetached = platform.spawndetached
|
2012-09-14 23:08:17 +04:00
|
|
|
split = platform.split
|
2011-07-23 14:29:52 +04:00
|
|
|
sshargs = platform.sshargs
|
2012-12-04 00:40:24 +04:00
|
|
|
statfiles = getattr(osutil, 'statfiles', platform.statfiles)
|
2013-04-03 22:35:27 +04:00
|
|
|
statisexec = platform.statisexec
|
|
|
|
statislink = platform.statislink
|
2011-07-23 14:29:52 +04:00
|
|
|
testpid = platform.testpid
|
|
|
|
umask = platform.umask
|
|
|
|
unlink = platform.unlink
|
|
|
|
username = platform.username
|
2011-07-21 13:05:26 +04:00
|
|
|
|
2017-04-26 16:26:28 +03:00
|
|
|
try:
|
|
|
|
recvfds = osutil.recvfds
|
|
|
|
except AttributeError:
|
|
|
|
pass
|
|
|
|
try:
|
|
|
|
setprocname = osutil.setprocname
|
|
|
|
except AttributeError:
|
|
|
|
pass
|
|
|
|
|
2008-04-05 00:36:40 +04:00
|
|
|
# Python compatibility
|
|
|
|
|
2011-12-16 00:59:22 +04:00
|
|
|
_notset = object()
|
|
|
|
|
2015-11-19 22:21:24 +03:00
|
|
|
# disable Python's problematic floating point timestamps (issue4836)
|
|
|
|
# (Python hypocritically says you shouldn't change this behavior in
|
|
|
|
# libraries, and sure enough Mercurial is not a library.)
|
|
|
|
os.stat_float_times(False)
|
|
|
|
|
2011-12-16 00:59:22 +04:00
|
|
|
def safehasattr(thing, attr):
|
|
|
|
return getattr(thing, attr, _notset) is not _notset
|
|
|
|
|
revlog: flag processor
Add the ability for revlog objects to process revision flags and apply
registered transforms on read/write operations.
This patch introduces:
- the 'revlog._processflags()' method that looks at revision flags and applies
flag processors registered on them. Due to the need to handle non-commutative
operations, flag transforms are applied in stable order but the order in which
the transforms are applied is reversed between read and write operations.
- the 'addflagprocessor()' method allowing to register processors on flags.
Flag processors are defined as a 3-tuple of (read, write, raw) functions to be
applied depending on the operation being performed.
- an update on 'revlog.addrevision()' behavior. The current flagprocessor design
relies on extensions to wrap around 'addrevision()' to set flags on revision
data, and on the flagprocessor to perform the actual transformation of its
contents. In the lfs case, this means we need to process flags before we meet
the 2GB size check, leading to performing some operations before it happens:
- if flags are set on the revision data, we assume some extensions might be
modifying the contents using the flag processor next, and we compute the
node for the original revision data (still allowing extension to override
the node by wrapping around 'addrevision()').
- we then invoke the flag processor to apply registered transforms (in lfs's
case, drastically reducing the size of large blobs).
- finally, we proceed with the 2GB size check.
Note: In the case a cachedelta is passed to 'addrevision()' and we detect the
flag processor modified the revision data, we chose to trust the flag processor
and drop the cachedelta.
2017-01-10 19:15:21 +03:00
|
|
|
def bitsfrom(container):
|
|
|
|
bits = 0
|
|
|
|
for bit in container:
|
|
|
|
bits |= bit
|
|
|
|
return bits
|
|
|
|
|
2017-04-04 12:03:29 +03:00
|
|
|
# python 2.6 still have deprecation warning enabled by default. We do not want
|
|
|
|
# to display anything to standard user so detect if we are running test and
|
|
|
|
# only use python deprecation warning in this case.
|
|
|
|
_dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
|
|
|
|
if _dowarn:
|
|
|
|
# explicitly unfilter our warning for python 2.7
|
|
|
|
#
|
|
|
|
# The option of setting PYTHONWARNINGS in the test runner was investigated.
|
|
|
|
# However, module name set through PYTHONWARNINGS was exactly matched, so
|
|
|
|
# we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
|
|
|
|
# makes the whole PYTHONWARNINGS thing useless for our usecase.
|
2017-04-13 20:12:49 +03:00
|
|
|
warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
|
|
|
|
warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
|
|
|
|
warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
|
2017-04-04 12:03:29 +03:00
|
|
|
|
|
|
|
def nouideprecwarn(msg, version, stacklevel=1):
|
|
|
|
"""Issue an python native deprecation warning
|
|
|
|
|
|
|
|
This is a noop outside of tests, use 'ui.deprecwarn' when possible.
|
|
|
|
"""
|
|
|
|
if _dowarn:
|
|
|
|
msg += ("\n(compatibility will be dropped after Mercurial-%s,"
|
|
|
|
" update your code.)") % version
|
|
|
|
warnings.warn(msg, DeprecationWarning, stacklevel + 1)
|
|
|
|
|
2014-10-16 12:02:51 +04:00
|
|
|
DIGESTS = {
|
2016-06-10 07:13:23 +03:00
|
|
|
'md5': hashlib.md5,
|
|
|
|
'sha1': hashlib.sha1,
|
|
|
|
'sha512': hashlib.sha512,
|
2014-10-16 12:02:51 +04:00
|
|
|
}
|
|
|
|
# List of digest types from strongest to weakest
|
2015-12-13 07:30:37 +03:00
|
|
|
DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
|
2014-10-16 12:02:51 +04:00
|
|
|
|
|
|
|
for k in DIGESTS_BY_STRENGTH:
|
|
|
|
assert k in DIGESTS
|
|
|
|
|
|
|
|
class digester(object):
|
|
|
|
"""helper to compute digests.
|
|
|
|
|
|
|
|
This helper can be used to compute one or more digests given their name.
|
|
|
|
|
|
|
|
>>> d = digester(['md5', 'sha1'])
|
|
|
|
>>> d.update('foo')
|
|
|
|
>>> [k for k in sorted(d)]
|
|
|
|
['md5', 'sha1']
|
|
|
|
>>> d['md5']
|
|
|
|
'acbd18db4cc2f85cedef654fccc4a4d8'
|
|
|
|
>>> d['sha1']
|
|
|
|
'0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
|
|
|
|
>>> digester.preferred(['md5', 'sha1'])
|
|
|
|
'sha1'
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, digests, s=''):
|
|
|
|
self._hashes = {}
|
|
|
|
for k in digests:
|
|
|
|
if k not in DIGESTS:
|
|
|
|
raise Abort(_('unknown digest type: %s') % k)
|
|
|
|
self._hashes[k] = DIGESTS[k]()
|
|
|
|
if s:
|
|
|
|
self.update(s)
|
|
|
|
|
|
|
|
def update(self, data):
|
|
|
|
for h in self._hashes.values():
|
|
|
|
h.update(data)
|
|
|
|
|
|
|
|
def __getitem__(self, key):
|
|
|
|
if key not in DIGESTS:
|
|
|
|
raise Abort(_('unknown digest type: %s') % k)
|
|
|
|
return self._hashes[key].hexdigest()
|
|
|
|
|
|
|
|
def __iter__(self):
|
|
|
|
return iter(self._hashes)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def preferred(supported):
|
|
|
|
"""returns the strongest digest type in both supported and DIGESTS."""
|
|
|
|
|
|
|
|
for k in DIGESTS_BY_STRENGTH:
|
|
|
|
if k in supported:
|
|
|
|
return k
|
|
|
|
return None
|
|
|
|
|
2014-10-16 12:03:21 +04:00
|
|
|
class digestchecker(object):
|
|
|
|
"""file handle wrapper that additionally checks content against a given
|
|
|
|
size and digests.
|
|
|
|
|
|
|
|
d = digestchecker(fh, size, {'md5': '...'})
|
|
|
|
|
|
|
|
When multiple digests are given, all of them are validated.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, fh, size, digests):
|
|
|
|
self._fh = fh
|
|
|
|
self._size = size
|
|
|
|
self._got = 0
|
|
|
|
self._digests = dict(digests)
|
|
|
|
self._digester = digester(self._digests.keys())
|
|
|
|
|
|
|
|
def read(self, length=-1):
|
|
|
|
content = self._fh.read(length)
|
|
|
|
self._digester.update(content)
|
|
|
|
self._got += len(content)
|
|
|
|
return content
|
|
|
|
|
|
|
|
def validate(self):
|
|
|
|
if self._size != self._got:
|
|
|
|
raise Abort(_('size mismatch: expected %d, got %d') %
|
|
|
|
(self._size, self._got))
|
|
|
|
for k, v in self._digests.items():
|
|
|
|
if v != self._digester[k]:
|
2014-10-23 18:35:10 +04:00
|
|
|
# i18n: first parameter is a digest name
|
2014-10-16 12:03:21 +04:00
|
|
|
raise Abort(_('%s mismatch: expected %s, got %s') %
|
|
|
|
(k, v, self._digester[k]))
|
|
|
|
|
2010-07-15 05:59:43 +04:00
|
|
|
try:
|
2011-12-16 01:27:11 +04:00
|
|
|
buffer = buffer
|
2010-07-15 05:59:43 +04:00
|
|
|
except NameError:
|
2016-09-28 14:01:23 +03:00
|
|
|
if not pycompat.ispy3:
|
2017-01-14 17:35:15 +03:00
|
|
|
def buffer(sliceable, offset=0, length=None):
|
|
|
|
if length is not None:
|
|
|
|
return sliceable[offset:offset + length]
|
2011-12-16 01:27:11 +04:00
|
|
|
return sliceable[offset:]
|
|
|
|
else:
|
2017-01-14 17:35:15 +03:00
|
|
|
def buffer(sliceable, offset=0, length=None):
|
|
|
|
if length is not None:
|
|
|
|
return memoryview(sliceable)[offset:offset + length]
|
2011-12-16 01:27:11 +04:00
|
|
|
return memoryview(sliceable)[offset:]
|
2010-03-23 13:36:19 +03:00
|
|
|
|
2016-12-18 21:46:52 +03:00
|
|
|
closefds = pycompat.osname == 'posix'
|
2010-01-02 18:03:25 +03:00
|
|
|
|
2015-05-31 09:55:24 +03:00
|
|
|
_chunksize = 4096
|
|
|
|
|
|
|
|
class bufferedinputpipe(object):
|
|
|
|
"""a manually buffered input pipe
|
|
|
|
|
|
|
|
Python will not let us use buffered IO and lazy reading with 'polling' at
|
|
|
|
the same time. We cannot probe the buffer state and select will not detect
|
|
|
|
that data are ready to read if they are already buffered.
|
|
|
|
|
|
|
|
This class let us work around that by implementing its own buffering
|
|
|
|
(allowing efficient readline) while offering a way to know if the buffer is
|
|
|
|
empty from the output (allowing collaboration of the buffer with polling).
|
|
|
|
|
|
|
|
This class lives in the 'util' module because it makes use of the 'os'
|
|
|
|
module from the python stdlib.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, input):
|
|
|
|
self._input = input
|
|
|
|
self._buffer = []
|
|
|
|
self._eof = False
|
2015-06-26 21:29:50 +03:00
|
|
|
self._lenbuf = 0
|
2015-05-31 09:55:24 +03:00
|
|
|
|
|
|
|
@property
|
|
|
|
def hasbuffer(self):
|
|
|
|
"""True is any data is currently buffered
|
|
|
|
|
|
|
|
This will be used externally a pre-step for polling IO. If there is
|
|
|
|
already data then no polling should be set in place."""
|
|
|
|
return bool(self._buffer)
|
|
|
|
|
|
|
|
@property
|
|
|
|
def closed(self):
|
|
|
|
return self._input.closed
|
|
|
|
|
|
|
|
def fileno(self):
|
|
|
|
return self._input.fileno()
|
|
|
|
|
|
|
|
def close(self):
|
|
|
|
return self._input.close()
|
|
|
|
|
|
|
|
def read(self, size):
|
|
|
|
while (not self._eof) and (self._lenbuf < size):
|
|
|
|
self._fillbuffer()
|
|
|
|
return self._frombuffer(size)
|
|
|
|
|
|
|
|
def readline(self, *args, **kwargs):
|
|
|
|
if 1 < len(self._buffer):
|
|
|
|
# this should not happen because both read and readline end with a
|
|
|
|
# _frombuffer call that collapse it.
|
|
|
|
self._buffer = [''.join(self._buffer)]
|
2015-06-26 21:29:50 +03:00
|
|
|
self._lenbuf = len(self._buffer[0])
|
2015-05-31 09:55:24 +03:00
|
|
|
lfi = -1
|
|
|
|
if self._buffer:
|
|
|
|
lfi = self._buffer[-1].find('\n')
|
|
|
|
while (not self._eof) and lfi < 0:
|
|
|
|
self._fillbuffer()
|
|
|
|
if self._buffer:
|
|
|
|
lfi = self._buffer[-1].find('\n')
|
|
|
|
size = lfi + 1
|
|
|
|
if lfi < 0: # end of file
|
|
|
|
size = self._lenbuf
|
|
|
|
elif 1 < len(self._buffer):
|
|
|
|
# we need to take previous chunks into account
|
|
|
|
size += self._lenbuf - len(self._buffer[-1])
|
|
|
|
return self._frombuffer(size)
|
|
|
|
|
|
|
|
def _frombuffer(self, size):
|
|
|
|
"""return at most 'size' data from the buffer
|
|
|
|
|
|
|
|
The data are removed from the buffer."""
|
|
|
|
if size == 0 or not self._buffer:
|
|
|
|
return ''
|
|
|
|
buf = self._buffer[0]
|
|
|
|
if 1 < len(self._buffer):
|
|
|
|
buf = ''.join(self._buffer)
|
|
|
|
|
|
|
|
data = buf[:size]
|
|
|
|
buf = buf[len(data):]
|
|
|
|
if buf:
|
|
|
|
self._buffer = [buf]
|
2015-06-26 21:29:50 +03:00
|
|
|
self._lenbuf = len(buf)
|
2015-05-31 09:55:24 +03:00
|
|
|
else:
|
|
|
|
self._buffer = []
|
2015-06-26 21:29:50 +03:00
|
|
|
self._lenbuf = 0
|
2015-05-31 09:55:24 +03:00
|
|
|
return data
|
|
|
|
|
|
|
|
def _fillbuffer(self):
|
|
|
|
"""read data to the buffer"""
|
|
|
|
data = os.read(self._input.fileno(), _chunksize)
|
|
|
|
if not data:
|
|
|
|
self._eof = True
|
|
|
|
else:
|
2015-06-26 21:29:50 +03:00
|
|
|
self._lenbuf += len(data)
|
2015-05-31 09:55:24 +03:00
|
|
|
self._buffer.append(data)
|
|
|
|
|
2010-01-02 18:42:00 +03:00
|
|
|
def popen2(cmd, env=None, newlines=False):
|
2009-07-09 13:59:18 +04:00
|
|
|
# Setting bufsize to -1 lets the system decide the buffer size.
|
|
|
|
# The default for bufsize is 0, meaning unbuffered. This leads to
|
|
|
|
# poor performance on Mac OS X: http://bugs.python.org/issue4194
|
|
|
|
p = subprocess.Popen(cmd, shell=True, bufsize=-1,
|
2009-07-09 04:01:18 +04:00
|
|
|
close_fds=closefds,
|
2010-01-02 18:03:25 +03:00
|
|
|
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
|
2010-01-02 18:42:00 +03:00
|
|
|
universal_newlines=newlines,
|
|
|
|
env=env)
|
2009-05-03 01:05:35 +04:00
|
|
|
return p.stdin, p.stdout
|
2010-01-02 18:03:25 +03:00
|
|
|
|
2010-01-02 18:42:00 +03:00
|
|
|
def popen3(cmd, env=None, newlines=False):
|
2013-03-09 04:59:36 +04:00
|
|
|
stdin, stdout, stderr, p = popen4(cmd, env, newlines)
|
|
|
|
return stdin, stdout, stderr
|
|
|
|
|
2015-05-20 19:29:45 +03:00
|
|
|
def popen4(cmd, env=None, newlines=False, bufsize=-1):
|
|
|
|
p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
|
2009-07-09 04:01:18 +04:00
|
|
|
close_fds=closefds,
|
2009-05-03 01:05:35 +04:00
|
|
|
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
|
2010-01-02 18:03:25 +03:00
|
|
|
stderr=subprocess.PIPE,
|
2010-01-02 18:42:00 +03:00
|
|
|
universal_newlines=newlines,
|
|
|
|
env=env)
|
2013-03-09 04:59:36 +04:00
|
|
|
return p.stdin, p.stdout, p.stderr, p
|
2008-10-05 23:35:26 +04:00
|
|
|
|
2009-01-11 03:02:38 +03:00
|
|
|
def version():
|
|
|
|
"""Return version information if available."""
|
|
|
|
try:
|
2015-12-13 10:14:08 +03:00
|
|
|
from . import __version__
|
2009-01-11 03:02:38 +03:00
|
|
|
return __version__.version
|
|
|
|
except ImportError:
|
|
|
|
return 'unknown'
|
|
|
|
|
2015-11-25 01:23:51 +03:00
|
|
|
def versiontuple(v=None, n=4):
|
|
|
|
"""Parses a Mercurial version string into an N-tuple.
|
|
|
|
|
|
|
|
The version string to be parsed is specified with the ``v`` argument.
|
|
|
|
If it isn't defined, the current Mercurial version string will be parsed.
|
|
|
|
|
|
|
|
``n`` can be 2, 3, or 4. Here is how some version strings map to
|
|
|
|
returned values:
|
|
|
|
|
|
|
|
>>> v = '3.6.1+190-df9b73d2d444'
|
|
|
|
>>> versiontuple(v, 2)
|
|
|
|
(3, 6)
|
|
|
|
>>> versiontuple(v, 3)
|
|
|
|
(3, 6, 1)
|
|
|
|
>>> versiontuple(v, 4)
|
|
|
|
(3, 6, 1, '190-df9b73d2d444')
|
|
|
|
|
|
|
|
>>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
|
|
|
|
(3, 6, 1, '190-df9b73d2d444+20151118')
|
|
|
|
|
|
|
|
>>> v = '3.6'
|
|
|
|
>>> versiontuple(v, 2)
|
|
|
|
(3, 6)
|
|
|
|
>>> versiontuple(v, 3)
|
|
|
|
(3, 6, None)
|
|
|
|
>>> versiontuple(v, 4)
|
|
|
|
(3, 6, None, None)
|
2016-07-19 20:15:35 +03:00
|
|
|
|
|
|
|
>>> v = '3.9-rc'
|
|
|
|
>>> versiontuple(v, 2)
|
|
|
|
(3, 9)
|
|
|
|
>>> versiontuple(v, 3)
|
|
|
|
(3, 9, None)
|
|
|
|
>>> versiontuple(v, 4)
|
|
|
|
(3, 9, None, 'rc')
|
|
|
|
|
|
|
|
>>> v = '3.9-rc+2-02a8fea4289b'
|
|
|
|
>>> versiontuple(v, 2)
|
|
|
|
(3, 9)
|
|
|
|
>>> versiontuple(v, 3)
|
|
|
|
(3, 9, None)
|
|
|
|
>>> versiontuple(v, 4)
|
|
|
|
(3, 9, None, 'rc+2-02a8fea4289b')
|
2015-11-25 01:23:51 +03:00
|
|
|
"""
|
|
|
|
if not v:
|
|
|
|
v = version()
|
2016-07-19 20:15:35 +03:00
|
|
|
parts = remod.split('[\+-]', v, 1)
|
2015-11-25 01:23:51 +03:00
|
|
|
if len(parts) == 1:
|
|
|
|
vparts, extra = parts[0], None
|
|
|
|
else:
|
|
|
|
vparts, extra = parts
|
|
|
|
|
|
|
|
vints = []
|
|
|
|
for i in vparts.split('.'):
|
|
|
|
try:
|
|
|
|
vints.append(int(i))
|
|
|
|
except ValueError:
|
|
|
|
break
|
|
|
|
# (3, 6) -> (3, 6, None)
|
|
|
|
while len(vints) < 3:
|
|
|
|
vints.append(None)
|
|
|
|
|
|
|
|
if n == 2:
|
|
|
|
return (vints[0], vints[1])
|
|
|
|
if n == 3:
|
|
|
|
return (vints[0], vints[1], vints[2])
|
|
|
|
if n == 4:
|
|
|
|
return (vints[0], vints[1], vints[2], extra)
|
|
|
|
|
2006-07-13 20:40:01 +04:00
|
|
|
# used by parsedate
|
2006-12-06 22:13:31 +03:00
|
|
|
defaultdateformats = (
|
2016-07-27 23:22:36 +03:00
|
|
|
'%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
|
|
|
|
'%Y-%m-%dT%H:%M', # without seconds
|
|
|
|
'%Y-%m-%dT%H%M%S', # another awful but legal variant without :
|
|
|
|
'%Y-%m-%dT%H%M', # without seconds
|
|
|
|
'%Y-%m-%d %H:%M:%S', # our common legal variant
|
|
|
|
'%Y-%m-%d %H:%M', # without seconds
|
|
|
|
'%Y-%m-%d %H%M%S', # without :
|
|
|
|
'%Y-%m-%d %H%M', # without seconds
|
2006-12-06 22:13:31 +03:00
|
|
|
'%Y-%m-%d %I:%M:%S%p',
|
|
|
|
'%Y-%m-%d %H:%M',
|
|
|
|
'%Y-%m-%d %I:%M%p',
|
|
|
|
'%Y-%m-%d',
|
|
|
|
'%m-%d',
|
|
|
|
'%m/%d',
|
|
|
|
'%m/%d/%y',
|
|
|
|
'%m/%d/%Y',
|
|
|
|
'%a %b %d %H:%M:%S %Y',
|
|
|
|
'%a %b %d %I:%M:%S%p %Y',
|
2007-06-25 16:46:20 +04:00
|
|
|
'%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
|
2006-12-06 22:13:31 +03:00
|
|
|
'%b %d %H:%M:%S %Y',
|
2006-12-07 00:11:44 +03:00
|
|
|
'%b %d %I:%M:%S%p %Y',
|
|
|
|
'%b %d %H:%M:%S',
|
2006-12-06 22:13:31 +03:00
|
|
|
'%b %d %I:%M:%S%p',
|
|
|
|
'%b %d %H:%M',
|
|
|
|
'%b %d %I:%M%p',
|
|
|
|
'%b %d %Y',
|
|
|
|
'%b %d',
|
|
|
|
'%H:%M:%S',
|
2009-08-21 13:52:57 +04:00
|
|
|
'%I:%M:%S%p',
|
2006-12-06 22:13:31 +03:00
|
|
|
'%H:%M',
|
|
|
|
'%I:%M%p',
|
|
|
|
)
|
2006-07-13 20:40:01 +04:00
|
|
|
|
2006-12-07 00:11:44 +03:00
|
|
|
extendeddateformats = defaultdateformats + (
|
|
|
|
"%Y",
|
|
|
|
"%Y-%m",
|
|
|
|
"%b",
|
|
|
|
"%b %Y",
|
|
|
|
)
|
|
|
|
|
2006-09-22 19:19:25 +04:00
|
|
|
def cachefunc(func):
|
|
|
|
'''cache the result of function calls'''
|
2006-09-22 19:58:22 +04:00
|
|
|
# XXX doesn't handle keywords args
|
2016-03-29 20:43:23 +03:00
|
|
|
if func.__code__.co_argcount == 0:
|
2014-02-15 14:52:26 +04:00
|
|
|
cache = []
|
|
|
|
def f():
|
|
|
|
if len(cache) == 0:
|
|
|
|
cache.append(func())
|
|
|
|
return cache[0]
|
|
|
|
return f
|
2006-09-22 19:19:25 +04:00
|
|
|
cache = {}
|
2016-03-29 20:43:23 +03:00
|
|
|
if func.__code__.co_argcount == 1:
|
2006-09-22 19:58:22 +04:00
|
|
|
# we gain a small amount of time because
|
|
|
|
# we don't need to pack/unpack the list
|
2006-09-22 19:19:25 +04:00
|
|
|
def f(arg):
|
|
|
|
if arg not in cache:
|
|
|
|
cache[arg] = func(arg)
|
|
|
|
return cache[arg]
|
|
|
|
else:
|
|
|
|
def f(*args):
|
|
|
|
if args not in cache:
|
|
|
|
cache[args] = func(*args)
|
|
|
|
return cache[args]
|
|
|
|
|
|
|
|
return f
|
|
|
|
|
2017-05-17 09:40:29 +03:00
|
|
|
class sortdict(collections.OrderedDict):
|
2017-05-17 17:01:56 +03:00
|
|
|
'''a simple sorted dictionary
|
|
|
|
|
|
|
|
>>> d1 = sortdict([('a', 0), ('b', 1)])
|
|
|
|
>>> d2 = d1.copy()
|
|
|
|
>>> d2
|
|
|
|
sortdict([('a', 0), ('b', 1)])
|
|
|
|
>>> d2.update([('a', 2)])
|
|
|
|
>>> d2.keys() # should still be in last-set order
|
|
|
|
['b', 'a']
|
|
|
|
'''
|
|
|
|
|
2017-05-17 09:40:29 +03:00
|
|
|
def __setitem__(self, key, value):
|
|
|
|
if key in self:
|
|
|
|
del self[key]
|
|
|
|
super(sortdict, self).__setitem__(key, value)
|
2014-02-23 04:56:31 +04:00
|
|
|
|
util: reimplement lrucachedict
As part of attempting to more aggressively use the existing
lrucachedict, collections.deque operations were frequently
showing up in profiling output, negating benefits of caching.
Searching the internet seems to tell me that the most efficient
way to implement an LRU cache in Python is to have a dict indexing
the cached entries and then to use a doubly linked list to track
freshness of each entry. So, this patch replaces our existing
lrucachedict with a version using such a pattern.
The recently introduced perflrucachedict command reveals the
following timings for 10,000 operations for the following cache
sizes for the existing cache:
n=4 init=0.004079 gets=0.003632 sets=0.005188 mixed=0.005402
n=8 init=0.004045 gets=0.003998 sets=0.005064 mixed=0.005328
n=16 init=0.004011 gets=0.004496 sets=0.005021 mixed=0.005555
n=32 init=0.004064 gets=0.005611 sets=0.005188 mixed=0.006189
n=64 init=0.003975 gets=0.007684 sets=0.005178 mixed=0.007245
n=128 init=0.004121 gets=0.012005 sets=0.005422 mixed=0.009471
n=256 init=0.004143 gets=0.020295 sets=0.005227 mixed=0.013612
n=512 init=0.004039 gets=0.036703 sets=0.005243 mixed=0.020685
n=1024 init=0.004193 gets=0.068142 sets=0.005251 mixed=0.033064
n=2048 init=0.004070 gets=0.133383 sets=0.005160 mixed=0.050359
n=4096 init=0.004053 gets=0.265194 sets=0.004868 mixed=0.048352
n=8192 init=0.004087 gets=0.542218 sets=0.004562 mixed=0.032753
n=16384 init=0.004106 gets=1.064055 sets=0.004179 mixed=0.020367
n=32768 init=0.004034 gets=2.097620 sets=0.004260 mixed=0.013031
n=65536 init=0.004108 gets=4.106390 sets=0.004268 mixed=0.010191
As the data shows, the existing cache's retrieval performance
diminishes linearly with cache size. (Keep in mind the microbenchmark
is testing 100% cache hit rate.)
The new cache implementation reveals the following:
n=4 init=0.006665 gets=0.006541 sets=0.005733 mixed=0.006876
n=8 init=0.006649 gets=0.006374 sets=0.005663 mixed=0.006899
n=16 init=0.006570 gets=0.006504 sets=0.005799 mixed=0.007057
n=32 init=0.006854 gets=0.006459 sets=0.005747 mixed=0.007034
n=64 init=0.006580 gets=0.006495 sets=0.005740 mixed=0.006992
n=128 init=0.006534 gets=0.006739 sets=0.005648 mixed=0.007124
n=256 init=0.006669 gets=0.006773 sets=0.005824 mixed=0.007151
n=512 init=0.006701 gets=0.007061 sets=0.006042 mixed=0.007372
n=1024 init=0.006641 gets=0.007620 sets=0.006387 mixed=0.007464
n=2048 init=0.006517 gets=0.008598 sets=0.006871 mixed=0.008077
n=4096 init=0.006720 gets=0.010933 sets=0.007854 mixed=0.008663
n=8192 init=0.007383 gets=0.015969 sets=0.010288 mixed=0.008896
n=16384 init=0.006660 gets=0.025447 sets=0.011208 mixed=0.008826
n=32768 init=0.006658 gets=0.044390 sets=0.011192 mixed=0.008943
n=65536 init=0.006836 gets=0.082736 sets=0.011151 mixed=0.008826
Let's go through the results.
The new cache takes longer to construct. ~6.6ms vs ~4.1ms. However,
this is measuring 10,000 __init__ calls, so the difference is
~0.2us/instance. We currently only create lrucachedict for manifest
instances, so this regression is not likely relevant.
The new cache is slightly slower for retrievals for cache sizes
< 1024. It's worth noting that the only existing use of lurcachedict
is in manifest.py and the default cache size is 4. This regression
is worrisome. However, for n=4, the delta is ~2.9s for 10,000 lookups,
or ~0.29us/op. Again, this is a marginal regression and likely not
relevant in the real world. Timing `hg log -p -l 100` for
mozilla-central reveals that cache lookup times are dominated by
decompression and fulltext resolution (even with lz4 manifests).
The new cache is significantly faster for retrievals at larger
capacities. Whereas the old implementation has retrieval performance
linear with cache capacity, the new cache is constant time until much
larger values. And, when it does start to increase significantly, it
is a few magnitudes faster than the current cache.
The new cache does appear to be slower for sets when capacity is large.
However, performance is similar for smaller capacities. Of course,
caches should generally be optimized for retrieval performance because
if a cache is getting more sets than gets, it doesn't really make
sense to cache. If this regression is worrisome, again, taking the
largest regression at n=65536 of ~6.9ms for 10,000 results in a
regression of ~0.68us/op. This is not significant in the grand scheme
of things.
Overall, the new cache is performant at retrievals at much larger
capacity values which makes it a generally more useful cache backend.
While there are regressions, their absolute value is extremely small.
Since we aren't using lrucachedict aggressively today, these
regressions should not be relevant. The improved scalability of
lrucachedict should enable us to more aggressively utilize
lrucachedict for more granular caching (read: higher capacity caches)
in the near future. The impetus for this patch is to establish a cache
of decompressed revlog revisions, notably manifest revisions. And since
delta chains can grow to >10,000 and cache hit rate can be high, the
improved retrieval performance of lrucachedict should be relevant.
2015-12-07 06:04:10 +03:00
|
|
|
class _lrucachenode(object):
|
|
|
|
"""A node in a doubly linked list.
|
|
|
|
|
|
|
|
Holds a reference to nodes on either side as well as a key-value
|
|
|
|
pair for the dictionary entry.
|
|
|
|
"""
|
2016-10-02 01:08:14 +03:00
|
|
|
__slots__ = (u'next', u'prev', u'key', u'value')
|
util: reimplement lrucachedict
As part of attempting to more aggressively use the existing
lrucachedict, collections.deque operations were frequently
showing up in profiling output, negating benefits of caching.
Searching the internet seems to tell me that the most efficient
way to implement an LRU cache in Python is to have a dict indexing
the cached entries and then to use a doubly linked list to track
freshness of each entry. So, this patch replaces our existing
lrucachedict with a version using such a pattern.
The recently introduced perflrucachedict command reveals the
following timings for 10,000 operations for the following cache
sizes for the existing cache:
n=4 init=0.004079 gets=0.003632 sets=0.005188 mixed=0.005402
n=8 init=0.004045 gets=0.003998 sets=0.005064 mixed=0.005328
n=16 init=0.004011 gets=0.004496 sets=0.005021 mixed=0.005555
n=32 init=0.004064 gets=0.005611 sets=0.005188 mixed=0.006189
n=64 init=0.003975 gets=0.007684 sets=0.005178 mixed=0.007245
n=128 init=0.004121 gets=0.012005 sets=0.005422 mixed=0.009471
n=256 init=0.004143 gets=0.020295 sets=0.005227 mixed=0.013612
n=512 init=0.004039 gets=0.036703 sets=0.005243 mixed=0.020685
n=1024 init=0.004193 gets=0.068142 sets=0.005251 mixed=0.033064
n=2048 init=0.004070 gets=0.133383 sets=0.005160 mixed=0.050359
n=4096 init=0.004053 gets=0.265194 sets=0.004868 mixed=0.048352
n=8192 init=0.004087 gets=0.542218 sets=0.004562 mixed=0.032753
n=16384 init=0.004106 gets=1.064055 sets=0.004179 mixed=0.020367
n=32768 init=0.004034 gets=2.097620 sets=0.004260 mixed=0.013031
n=65536 init=0.004108 gets=4.106390 sets=0.004268 mixed=0.010191
As the data shows, the existing cache's retrieval performance
diminishes linearly with cache size. (Keep in mind the microbenchmark
is testing 100% cache hit rate.)
The new cache implementation reveals the following:
n=4 init=0.006665 gets=0.006541 sets=0.005733 mixed=0.006876
n=8 init=0.006649 gets=0.006374 sets=0.005663 mixed=0.006899
n=16 init=0.006570 gets=0.006504 sets=0.005799 mixed=0.007057
n=32 init=0.006854 gets=0.006459 sets=0.005747 mixed=0.007034
n=64 init=0.006580 gets=0.006495 sets=0.005740 mixed=0.006992
n=128 init=0.006534 gets=0.006739 sets=0.005648 mixed=0.007124
n=256 init=0.006669 gets=0.006773 sets=0.005824 mixed=0.007151
n=512 init=0.006701 gets=0.007061 sets=0.006042 mixed=0.007372
n=1024 init=0.006641 gets=0.007620 sets=0.006387 mixed=0.007464
n=2048 init=0.006517 gets=0.008598 sets=0.006871 mixed=0.008077
n=4096 init=0.006720 gets=0.010933 sets=0.007854 mixed=0.008663
n=8192 init=0.007383 gets=0.015969 sets=0.010288 mixed=0.008896
n=16384 init=0.006660 gets=0.025447 sets=0.011208 mixed=0.008826
n=32768 init=0.006658 gets=0.044390 sets=0.011192 mixed=0.008943
n=65536 init=0.006836 gets=0.082736 sets=0.011151 mixed=0.008826
Let's go through the results.
The new cache takes longer to construct. ~6.6ms vs ~4.1ms. However,
this is measuring 10,000 __init__ calls, so the difference is
~0.2us/instance. We currently only create lrucachedict for manifest
instances, so this regression is not likely relevant.
The new cache is slightly slower for retrievals for cache sizes
< 1024. It's worth noting that the only existing use of lurcachedict
is in manifest.py and the default cache size is 4. This regression
is worrisome. However, for n=4, the delta is ~2.9s for 10,000 lookups,
or ~0.29us/op. Again, this is a marginal regression and likely not
relevant in the real world. Timing `hg log -p -l 100` for
mozilla-central reveals that cache lookup times are dominated by
decompression and fulltext resolution (even with lz4 manifests).
The new cache is significantly faster for retrievals at larger
capacities. Whereas the old implementation has retrieval performance
linear with cache capacity, the new cache is constant time until much
larger values. And, when it does start to increase significantly, it
is a few magnitudes faster than the current cache.
The new cache does appear to be slower for sets when capacity is large.
However, performance is similar for smaller capacities. Of course,
caches should generally be optimized for retrieval performance because
if a cache is getting more sets than gets, it doesn't really make
sense to cache. If this regression is worrisome, again, taking the
largest regression at n=65536 of ~6.9ms for 10,000 results in a
regression of ~0.68us/op. This is not significant in the grand scheme
of things.
Overall, the new cache is performant at retrievals at much larger
capacity values which makes it a generally more useful cache backend.
While there are regressions, their absolute value is extremely small.
Since we aren't using lrucachedict aggressively today, these
regressions should not be relevant. The improved scalability of
lrucachedict should enable us to more aggressively utilize
lrucachedict for more granular caching (read: higher capacity caches)
in the near future. The impetus for this patch is to establish a cache
of decompressed revlog revisions, notably manifest revisions. And since
delta chains can grow to >10,000 and cache hit rate can be high, the
improved retrieval performance of lrucachedict should be relevant.
2015-12-07 06:04:10 +03:00
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
self.next = None
|
|
|
|
self.prev = None
|
|
|
|
|
|
|
|
self.key = _notset
|
|
|
|
self.value = None
|
|
|
|
|
|
|
|
def markempty(self):
|
|
|
|
"""Mark the node as emptied."""
|
|
|
|
self.key = _notset
|
|
|
|
|
2013-02-09 19:41:46 +04:00
|
|
|
class lrucachedict(object):
|
util: reimplement lrucachedict
As part of attempting to more aggressively use the existing
lrucachedict, collections.deque operations were frequently
showing up in profiling output, negating benefits of caching.
Searching the internet seems to tell me that the most efficient
way to implement an LRU cache in Python is to have a dict indexing
the cached entries and then to use a doubly linked list to track
freshness of each entry. So, this patch replaces our existing
lrucachedict with a version using such a pattern.
The recently introduced perflrucachedict command reveals the
following timings for 10,000 operations for the following cache
sizes for the existing cache:
n=4 init=0.004079 gets=0.003632 sets=0.005188 mixed=0.005402
n=8 init=0.004045 gets=0.003998 sets=0.005064 mixed=0.005328
n=16 init=0.004011 gets=0.004496 sets=0.005021 mixed=0.005555
n=32 init=0.004064 gets=0.005611 sets=0.005188 mixed=0.006189
n=64 init=0.003975 gets=0.007684 sets=0.005178 mixed=0.007245
n=128 init=0.004121 gets=0.012005 sets=0.005422 mixed=0.009471
n=256 init=0.004143 gets=0.020295 sets=0.005227 mixed=0.013612
n=512 init=0.004039 gets=0.036703 sets=0.005243 mixed=0.020685
n=1024 init=0.004193 gets=0.068142 sets=0.005251 mixed=0.033064
n=2048 init=0.004070 gets=0.133383 sets=0.005160 mixed=0.050359
n=4096 init=0.004053 gets=0.265194 sets=0.004868 mixed=0.048352
n=8192 init=0.004087 gets=0.542218 sets=0.004562 mixed=0.032753
n=16384 init=0.004106 gets=1.064055 sets=0.004179 mixed=0.020367
n=32768 init=0.004034 gets=2.097620 sets=0.004260 mixed=0.013031
n=65536 init=0.004108 gets=4.106390 sets=0.004268 mixed=0.010191
As the data shows, the existing cache's retrieval performance
diminishes linearly with cache size. (Keep in mind the microbenchmark
is testing 100% cache hit rate.)
The new cache implementation reveals the following:
n=4 init=0.006665 gets=0.006541 sets=0.005733 mixed=0.006876
n=8 init=0.006649 gets=0.006374 sets=0.005663 mixed=0.006899
n=16 init=0.006570 gets=0.006504 sets=0.005799 mixed=0.007057
n=32 init=0.006854 gets=0.006459 sets=0.005747 mixed=0.007034
n=64 init=0.006580 gets=0.006495 sets=0.005740 mixed=0.006992
n=128 init=0.006534 gets=0.006739 sets=0.005648 mixed=0.007124
n=256 init=0.006669 gets=0.006773 sets=0.005824 mixed=0.007151
n=512 init=0.006701 gets=0.007061 sets=0.006042 mixed=0.007372
n=1024 init=0.006641 gets=0.007620 sets=0.006387 mixed=0.007464
n=2048 init=0.006517 gets=0.008598 sets=0.006871 mixed=0.008077
n=4096 init=0.006720 gets=0.010933 sets=0.007854 mixed=0.008663
n=8192 init=0.007383 gets=0.015969 sets=0.010288 mixed=0.008896
n=16384 init=0.006660 gets=0.025447 sets=0.011208 mixed=0.008826
n=32768 init=0.006658 gets=0.044390 sets=0.011192 mixed=0.008943
n=65536 init=0.006836 gets=0.082736 sets=0.011151 mixed=0.008826
Let's go through the results.
The new cache takes longer to construct. ~6.6ms vs ~4.1ms. However,
this is measuring 10,000 __init__ calls, so the difference is
~0.2us/instance. We currently only create lrucachedict for manifest
instances, so this regression is not likely relevant.
The new cache is slightly slower for retrievals for cache sizes
< 1024. It's worth noting that the only existing use of lurcachedict
is in manifest.py and the default cache size is 4. This regression
is worrisome. However, for n=4, the delta is ~2.9s for 10,000 lookups,
or ~0.29us/op. Again, this is a marginal regression and likely not
relevant in the real world. Timing `hg log -p -l 100` for
mozilla-central reveals that cache lookup times are dominated by
decompression and fulltext resolution (even with lz4 manifests).
The new cache is significantly faster for retrievals at larger
capacities. Whereas the old implementation has retrieval performance
linear with cache capacity, the new cache is constant time until much
larger values. And, when it does start to increase significantly, it
is a few magnitudes faster than the current cache.
The new cache does appear to be slower for sets when capacity is large.
However, performance is similar for smaller capacities. Of course,
caches should generally be optimized for retrieval performance because
if a cache is getting more sets than gets, it doesn't really make
sense to cache. If this regression is worrisome, again, taking the
largest regression at n=65536 of ~6.9ms for 10,000 results in a
regression of ~0.68us/op. This is not significant in the grand scheme
of things.
Overall, the new cache is performant at retrievals at much larger
capacity values which makes it a generally more useful cache backend.
While there are regressions, their absolute value is extremely small.
Since we aren't using lrucachedict aggressively today, these
regressions should not be relevant. The improved scalability of
lrucachedict should enable us to more aggressively utilize
lrucachedict for more granular caching (read: higher capacity caches)
in the near future. The impetus for this patch is to establish a cache
of decompressed revlog revisions, notably manifest revisions. And since
delta chains can grow to >10,000 and cache hit rate can be high, the
improved retrieval performance of lrucachedict should be relevant.
2015-12-07 06:04:10 +03:00
|
|
|
"""Dict that caches most recent accesses and sets.
|
|
|
|
|
|
|
|
The dict consists of an actual backing dict - indexed by original
|
|
|
|
key - and a doubly linked circular list defining the order of entries in
|
|
|
|
the cache.
|
|
|
|
|
|
|
|
The head node is the newest entry in the cache. If the cache is full,
|
|
|
|
we recycle head.prev and make it the new head. Cache accesses result in
|
|
|
|
the node being moved to before the existing head and being marked as the
|
|
|
|
new head node.
|
|
|
|
"""
|
|
|
|
def __init__(self, max):
|
2013-02-09 19:41:46 +04:00
|
|
|
self._cache = {}
|
|
|
|
|
util: reimplement lrucachedict
As part of attempting to more aggressively use the existing
lrucachedict, collections.deque operations were frequently
showing up in profiling output, negating benefits of caching.
Searching the internet seems to tell me that the most efficient
way to implement an LRU cache in Python is to have a dict indexing
the cached entries and then to use a doubly linked list to track
freshness of each entry. So, this patch replaces our existing
lrucachedict with a version using such a pattern.
The recently introduced perflrucachedict command reveals the
following timings for 10,000 operations for the following cache
sizes for the existing cache:
n=4 init=0.004079 gets=0.003632 sets=0.005188 mixed=0.005402
n=8 init=0.004045 gets=0.003998 sets=0.005064 mixed=0.005328
n=16 init=0.004011 gets=0.004496 sets=0.005021 mixed=0.005555
n=32 init=0.004064 gets=0.005611 sets=0.005188 mixed=0.006189
n=64 init=0.003975 gets=0.007684 sets=0.005178 mixed=0.007245
n=128 init=0.004121 gets=0.012005 sets=0.005422 mixed=0.009471
n=256 init=0.004143 gets=0.020295 sets=0.005227 mixed=0.013612
n=512 init=0.004039 gets=0.036703 sets=0.005243 mixed=0.020685
n=1024 init=0.004193 gets=0.068142 sets=0.005251 mixed=0.033064
n=2048 init=0.004070 gets=0.133383 sets=0.005160 mixed=0.050359
n=4096 init=0.004053 gets=0.265194 sets=0.004868 mixed=0.048352
n=8192 init=0.004087 gets=0.542218 sets=0.004562 mixed=0.032753
n=16384 init=0.004106 gets=1.064055 sets=0.004179 mixed=0.020367
n=32768 init=0.004034 gets=2.097620 sets=0.004260 mixed=0.013031
n=65536 init=0.004108 gets=4.106390 sets=0.004268 mixed=0.010191
As the data shows, the existing cache's retrieval performance
diminishes linearly with cache size. (Keep in mind the microbenchmark
is testing 100% cache hit rate.)
The new cache implementation reveals the following:
n=4 init=0.006665 gets=0.006541 sets=0.005733 mixed=0.006876
n=8 init=0.006649 gets=0.006374 sets=0.005663 mixed=0.006899
n=16 init=0.006570 gets=0.006504 sets=0.005799 mixed=0.007057
n=32 init=0.006854 gets=0.006459 sets=0.005747 mixed=0.007034
n=64 init=0.006580 gets=0.006495 sets=0.005740 mixed=0.006992
n=128 init=0.006534 gets=0.006739 sets=0.005648 mixed=0.007124
n=256 init=0.006669 gets=0.006773 sets=0.005824 mixed=0.007151
n=512 init=0.006701 gets=0.007061 sets=0.006042 mixed=0.007372
n=1024 init=0.006641 gets=0.007620 sets=0.006387 mixed=0.007464
n=2048 init=0.006517 gets=0.008598 sets=0.006871 mixed=0.008077
n=4096 init=0.006720 gets=0.010933 sets=0.007854 mixed=0.008663
n=8192 init=0.007383 gets=0.015969 sets=0.010288 mixed=0.008896
n=16384 init=0.006660 gets=0.025447 sets=0.011208 mixed=0.008826
n=32768 init=0.006658 gets=0.044390 sets=0.011192 mixed=0.008943
n=65536 init=0.006836 gets=0.082736 sets=0.011151 mixed=0.008826
Let's go through the results.
The new cache takes longer to construct. ~6.6ms vs ~4.1ms. However,
this is measuring 10,000 __init__ calls, so the difference is
~0.2us/instance. We currently only create lrucachedict for manifest
instances, so this regression is not likely relevant.
The new cache is slightly slower for retrievals for cache sizes
< 1024. It's worth noting that the only existing use of lurcachedict
is in manifest.py and the default cache size is 4. This regression
is worrisome. However, for n=4, the delta is ~2.9s for 10,000 lookups,
or ~0.29us/op. Again, this is a marginal regression and likely not
relevant in the real world. Timing `hg log -p -l 100` for
mozilla-central reveals that cache lookup times are dominated by
decompression and fulltext resolution (even with lz4 manifests).
The new cache is significantly faster for retrievals at larger
capacities. Whereas the old implementation has retrieval performance
linear with cache capacity, the new cache is constant time until much
larger values. And, when it does start to increase significantly, it
is a few magnitudes faster than the current cache.
The new cache does appear to be slower for sets when capacity is large.
However, performance is similar for smaller capacities. Of course,
caches should generally be optimized for retrieval performance because
if a cache is getting more sets than gets, it doesn't really make
sense to cache. If this regression is worrisome, again, taking the
largest regression at n=65536 of ~6.9ms for 10,000 results in a
regression of ~0.68us/op. This is not significant in the grand scheme
of things.
Overall, the new cache is performant at retrievals at much larger
capacity values which makes it a generally more useful cache backend.
While there are regressions, their absolute value is extremely small.
Since we aren't using lrucachedict aggressively today, these
regressions should not be relevant. The improved scalability of
lrucachedict should enable us to more aggressively utilize
lrucachedict for more granular caching (read: higher capacity caches)
in the near future. The impetus for this patch is to establish a cache
of decompressed revlog revisions, notably manifest revisions. And since
delta chains can grow to >10,000 and cache hit rate can be high, the
improved retrieval performance of lrucachedict should be relevant.
2015-12-07 06:04:10 +03:00
|
|
|
self._head = head = _lrucachenode()
|
|
|
|
head.prev = head
|
|
|
|
head.next = head
|
|
|
|
self._size = 1
|
|
|
|
self._capacity = max
|
|
|
|
|
|
|
|
def __len__(self):
|
|
|
|
return len(self._cache)
|
|
|
|
|
|
|
|
def __contains__(self, k):
|
|
|
|
return k in self._cache
|
|
|
|
|
|
|
|
def __iter__(self):
|
|
|
|
# We don't have to iterate in cache order, but why not.
|
|
|
|
n = self._head
|
|
|
|
for i in range(len(self._cache)):
|
|
|
|
yield n.key
|
|
|
|
n = n.next
|
|
|
|
|
|
|
|
def __getitem__(self, k):
|
|
|
|
node = self._cache[k]
|
|
|
|
self._movetohead(node)
|
|
|
|
return node.value
|
|
|
|
|
|
|
|
def __setitem__(self, k, v):
|
|
|
|
node = self._cache.get(k)
|
|
|
|
# Replace existing value and mark as newest.
|
|
|
|
if node is not None:
|
|
|
|
node.value = v
|
|
|
|
self._movetohead(node)
|
|
|
|
return
|
|
|
|
|
|
|
|
if self._size < self._capacity:
|
|
|
|
node = self._addcapacity()
|
2013-02-09 19:41:46 +04:00
|
|
|
else:
|
util: reimplement lrucachedict
As part of attempting to more aggressively use the existing
lrucachedict, collections.deque operations were frequently
showing up in profiling output, negating benefits of caching.
Searching the internet seems to tell me that the most efficient
way to implement an LRU cache in Python is to have a dict indexing
the cached entries and then to use a doubly linked list to track
freshness of each entry. So, this patch replaces our existing
lrucachedict with a version using such a pattern.
The recently introduced perflrucachedict command reveals the
following timings for 10,000 operations for the following cache
sizes for the existing cache:
n=4 init=0.004079 gets=0.003632 sets=0.005188 mixed=0.005402
n=8 init=0.004045 gets=0.003998 sets=0.005064 mixed=0.005328
n=16 init=0.004011 gets=0.004496 sets=0.005021 mixed=0.005555
n=32 init=0.004064 gets=0.005611 sets=0.005188 mixed=0.006189
n=64 init=0.003975 gets=0.007684 sets=0.005178 mixed=0.007245
n=128 init=0.004121 gets=0.012005 sets=0.005422 mixed=0.009471
n=256 init=0.004143 gets=0.020295 sets=0.005227 mixed=0.013612
n=512 init=0.004039 gets=0.036703 sets=0.005243 mixed=0.020685
n=1024 init=0.004193 gets=0.068142 sets=0.005251 mixed=0.033064
n=2048 init=0.004070 gets=0.133383 sets=0.005160 mixed=0.050359
n=4096 init=0.004053 gets=0.265194 sets=0.004868 mixed=0.048352
n=8192 init=0.004087 gets=0.542218 sets=0.004562 mixed=0.032753
n=16384 init=0.004106 gets=1.064055 sets=0.004179 mixed=0.020367
n=32768 init=0.004034 gets=2.097620 sets=0.004260 mixed=0.013031
n=65536 init=0.004108 gets=4.106390 sets=0.004268 mixed=0.010191
As the data shows, the existing cache's retrieval performance
diminishes linearly with cache size. (Keep in mind the microbenchmark
is testing 100% cache hit rate.)
The new cache implementation reveals the following:
n=4 init=0.006665 gets=0.006541 sets=0.005733 mixed=0.006876
n=8 init=0.006649 gets=0.006374 sets=0.005663 mixed=0.006899
n=16 init=0.006570 gets=0.006504 sets=0.005799 mixed=0.007057
n=32 init=0.006854 gets=0.006459 sets=0.005747 mixed=0.007034
n=64 init=0.006580 gets=0.006495 sets=0.005740 mixed=0.006992
n=128 init=0.006534 gets=0.006739 sets=0.005648 mixed=0.007124
n=256 init=0.006669 gets=0.006773 sets=0.005824 mixed=0.007151
n=512 init=0.006701 gets=0.007061 sets=0.006042 mixed=0.007372
n=1024 init=0.006641 gets=0.007620 sets=0.006387 mixed=0.007464
n=2048 init=0.006517 gets=0.008598 sets=0.006871 mixed=0.008077
n=4096 init=0.006720 gets=0.010933 sets=0.007854 mixed=0.008663
n=8192 init=0.007383 gets=0.015969 sets=0.010288 mixed=0.008896
n=16384 init=0.006660 gets=0.025447 sets=0.011208 mixed=0.008826
n=32768 init=0.006658 gets=0.044390 sets=0.011192 mixed=0.008943
n=65536 init=0.006836 gets=0.082736 sets=0.011151 mixed=0.008826
Let's go through the results.
The new cache takes longer to construct. ~6.6ms vs ~4.1ms. However,
this is measuring 10,000 __init__ calls, so the difference is
~0.2us/instance. We currently only create lrucachedict for manifest
instances, so this regression is not likely relevant.
The new cache is slightly slower for retrievals for cache sizes
< 1024. It's worth noting that the only existing use of lurcachedict
is in manifest.py and the default cache size is 4. This regression
is worrisome. However, for n=4, the delta is ~2.9s for 10,000 lookups,
or ~0.29us/op. Again, this is a marginal regression and likely not
relevant in the real world. Timing `hg log -p -l 100` for
mozilla-central reveals that cache lookup times are dominated by
decompression and fulltext resolution (even with lz4 manifests).
The new cache is significantly faster for retrievals at larger
capacities. Whereas the old implementation has retrieval performance
linear with cache capacity, the new cache is constant time until much
larger values. And, when it does start to increase significantly, it
is a few magnitudes faster than the current cache.
The new cache does appear to be slower for sets when capacity is large.
However, performance is similar for smaller capacities. Of course,
caches should generally be optimized for retrieval performance because
if a cache is getting more sets than gets, it doesn't really make
sense to cache. If this regression is worrisome, again, taking the
largest regression at n=65536 of ~6.9ms for 10,000 results in a
regression of ~0.68us/op. This is not significant in the grand scheme
of things.
Overall, the new cache is performant at retrievals at much larger
capacity values which makes it a generally more useful cache backend.
While there are regressions, their absolute value is extremely small.
Since we aren't using lrucachedict aggressively today, these
regressions should not be relevant. The improved scalability of
lrucachedict should enable us to more aggressively utilize
lrucachedict for more granular caching (read: higher capacity caches)
in the near future. The impetus for this patch is to establish a cache
of decompressed revlog revisions, notably manifest revisions. And since
delta chains can grow to >10,000 and cache hit rate can be high, the
improved retrieval performance of lrucachedict should be relevant.
2015-12-07 06:04:10 +03:00
|
|
|
# Grab the last/oldest item.
|
|
|
|
node = self._head.prev
|
|
|
|
|
|
|
|
# At capacity. Kill the old entry.
|
|
|
|
if node.key is not _notset:
|
|
|
|
del self._cache[node.key]
|
|
|
|
|
|
|
|
node.key = k
|
|
|
|
node.value = v
|
|
|
|
self._cache[k] = node
|
|
|
|
# And mark it as newest entry. No need to adjust order since it
|
|
|
|
# is already self._head.prev.
|
|
|
|
self._head = node
|
|
|
|
|
|
|
|
def __delitem__(self, k):
|
|
|
|
node = self._cache.pop(k)
|
|
|
|
node.markempty()
|
2013-02-09 19:41:46 +04:00
|
|
|
|
util: reimplement lrucachedict
As part of attempting to more aggressively use the existing
lrucachedict, collections.deque operations were frequently
showing up in profiling output, negating benefits of caching.
Searching the internet seems to tell me that the most efficient
way to implement an LRU cache in Python is to have a dict indexing
the cached entries and then to use a doubly linked list to track
freshness of each entry. So, this patch replaces our existing
lrucachedict with a version using such a pattern.
The recently introduced perflrucachedict command reveals the
following timings for 10,000 operations for the following cache
sizes for the existing cache:
n=4 init=0.004079 gets=0.003632 sets=0.005188 mixed=0.005402
n=8 init=0.004045 gets=0.003998 sets=0.005064 mixed=0.005328
n=16 init=0.004011 gets=0.004496 sets=0.005021 mixed=0.005555
n=32 init=0.004064 gets=0.005611 sets=0.005188 mixed=0.006189
n=64 init=0.003975 gets=0.007684 sets=0.005178 mixed=0.007245
n=128 init=0.004121 gets=0.012005 sets=0.005422 mixed=0.009471
n=256 init=0.004143 gets=0.020295 sets=0.005227 mixed=0.013612
n=512 init=0.004039 gets=0.036703 sets=0.005243 mixed=0.020685
n=1024 init=0.004193 gets=0.068142 sets=0.005251 mixed=0.033064
n=2048 init=0.004070 gets=0.133383 sets=0.005160 mixed=0.050359
n=4096 init=0.004053 gets=0.265194 sets=0.004868 mixed=0.048352
n=8192 init=0.004087 gets=0.542218 sets=0.004562 mixed=0.032753
n=16384 init=0.004106 gets=1.064055 sets=0.004179 mixed=0.020367
n=32768 init=0.004034 gets=2.097620 sets=0.004260 mixed=0.013031
n=65536 init=0.004108 gets=4.106390 sets=0.004268 mixed=0.010191
As the data shows, the existing cache's retrieval performance
diminishes linearly with cache size. (Keep in mind the microbenchmark
is testing 100% cache hit rate.)
The new cache implementation reveals the following:
n=4 init=0.006665 gets=0.006541 sets=0.005733 mixed=0.006876
n=8 init=0.006649 gets=0.006374 sets=0.005663 mixed=0.006899
n=16 init=0.006570 gets=0.006504 sets=0.005799 mixed=0.007057
n=32 init=0.006854 gets=0.006459 sets=0.005747 mixed=0.007034
n=64 init=0.006580 gets=0.006495 sets=0.005740 mixed=0.006992
n=128 init=0.006534 gets=0.006739 sets=0.005648 mixed=0.007124
n=256 init=0.006669 gets=0.006773 sets=0.005824 mixed=0.007151
n=512 init=0.006701 gets=0.007061 sets=0.006042 mixed=0.007372
n=1024 init=0.006641 gets=0.007620 sets=0.006387 mixed=0.007464
n=2048 init=0.006517 gets=0.008598 sets=0.006871 mixed=0.008077
n=4096 init=0.006720 gets=0.010933 sets=0.007854 mixed=0.008663
n=8192 init=0.007383 gets=0.015969 sets=0.010288 mixed=0.008896
n=16384 init=0.006660 gets=0.025447 sets=0.011208 mixed=0.008826
n=32768 init=0.006658 gets=0.044390 sets=0.011192 mixed=0.008943
n=65536 init=0.006836 gets=0.082736 sets=0.011151 mixed=0.008826
Let's go through the results.
The new cache takes longer to construct. ~6.6ms vs ~4.1ms. However,
this is measuring 10,000 __init__ calls, so the difference is
~0.2us/instance. We currently only create lrucachedict for manifest
instances, so this regression is not likely relevant.
The new cache is slightly slower for retrievals for cache sizes
< 1024. It's worth noting that the only existing use of lurcachedict
is in manifest.py and the default cache size is 4. This regression
is worrisome. However, for n=4, the delta is ~2.9s for 10,000 lookups,
or ~0.29us/op. Again, this is a marginal regression and likely not
relevant in the real world. Timing `hg log -p -l 100` for
mozilla-central reveals that cache lookup times are dominated by
decompression and fulltext resolution (even with lz4 manifests).
The new cache is significantly faster for retrievals at larger
capacities. Whereas the old implementation has retrieval performance
linear with cache capacity, the new cache is constant time until much
larger values. And, when it does start to increase significantly, it
is a few magnitudes faster than the current cache.
The new cache does appear to be slower for sets when capacity is large.
However, performance is similar for smaller capacities. Of course,
caches should generally be optimized for retrieval performance because
if a cache is getting more sets than gets, it doesn't really make
sense to cache. If this regression is worrisome, again, taking the
largest regression at n=65536 of ~6.9ms for 10,000 results in a
regression of ~0.68us/op. This is not significant in the grand scheme
of things.
Overall, the new cache is performant at retrievals at much larger
capacity values which makes it a generally more useful cache backend.
While there are regressions, their absolute value is extremely small.
Since we aren't using lrucachedict aggressively today, these
regressions should not be relevant. The improved scalability of
lrucachedict should enable us to more aggressively utilize
lrucachedict for more granular caching (read: higher capacity caches)
in the near future. The impetus for this patch is to establish a cache
of decompressed revlog revisions, notably manifest revisions. And since
delta chains can grow to >10,000 and cache hit rate can be high, the
improved retrieval performance of lrucachedict should be relevant.
2015-12-07 06:04:10 +03:00
|
|
|
# Temporarily mark as newest item before re-adjusting head to make
|
|
|
|
# this node the oldest item.
|
|
|
|
self._movetohead(node)
|
|
|
|
self._head = node.next
|
|
|
|
|
|
|
|
# Additional dict methods.
|
|
|
|
|
|
|
|
def get(self, k, default=None):
|
|
|
|
try:
|
2016-08-23 06:30:37 +03:00
|
|
|
return self._cache[k].value
|
util: reimplement lrucachedict
As part of attempting to more aggressively use the existing
lrucachedict, collections.deque operations were frequently
showing up in profiling output, negating benefits of caching.
Searching the internet seems to tell me that the most efficient
way to implement an LRU cache in Python is to have a dict indexing
the cached entries and then to use a doubly linked list to track
freshness of each entry. So, this patch replaces our existing
lrucachedict with a version using such a pattern.
The recently introduced perflrucachedict command reveals the
following timings for 10,000 operations for the following cache
sizes for the existing cache:
n=4 init=0.004079 gets=0.003632 sets=0.005188 mixed=0.005402
n=8 init=0.004045 gets=0.003998 sets=0.005064 mixed=0.005328
n=16 init=0.004011 gets=0.004496 sets=0.005021 mixed=0.005555
n=32 init=0.004064 gets=0.005611 sets=0.005188 mixed=0.006189
n=64 init=0.003975 gets=0.007684 sets=0.005178 mixed=0.007245
n=128 init=0.004121 gets=0.012005 sets=0.005422 mixed=0.009471
n=256 init=0.004143 gets=0.020295 sets=0.005227 mixed=0.013612
n=512 init=0.004039 gets=0.036703 sets=0.005243 mixed=0.020685
n=1024 init=0.004193 gets=0.068142 sets=0.005251 mixed=0.033064
n=2048 init=0.004070 gets=0.133383 sets=0.005160 mixed=0.050359
n=4096 init=0.004053 gets=0.265194 sets=0.004868 mixed=0.048352
n=8192 init=0.004087 gets=0.542218 sets=0.004562 mixed=0.032753
n=16384 init=0.004106 gets=1.064055 sets=0.004179 mixed=0.020367
n=32768 init=0.004034 gets=2.097620 sets=0.004260 mixed=0.013031
n=65536 init=0.004108 gets=4.106390 sets=0.004268 mixed=0.010191
As the data shows, the existing cache's retrieval performance
diminishes linearly with cache size. (Keep in mind the microbenchmark
is testing 100% cache hit rate.)
The new cache implementation reveals the following:
n=4 init=0.006665 gets=0.006541 sets=0.005733 mixed=0.006876
n=8 init=0.006649 gets=0.006374 sets=0.005663 mixed=0.006899
n=16 init=0.006570 gets=0.006504 sets=0.005799 mixed=0.007057
n=32 init=0.006854 gets=0.006459 sets=0.005747 mixed=0.007034
n=64 init=0.006580 gets=0.006495 sets=0.005740 mixed=0.006992
n=128 init=0.006534 gets=0.006739 sets=0.005648 mixed=0.007124
n=256 init=0.006669 gets=0.006773 sets=0.005824 mixed=0.007151
n=512 init=0.006701 gets=0.007061 sets=0.006042 mixed=0.007372
n=1024 init=0.006641 gets=0.007620 sets=0.006387 mixed=0.007464
n=2048 init=0.006517 gets=0.008598 sets=0.006871 mixed=0.008077
n=4096 init=0.006720 gets=0.010933 sets=0.007854 mixed=0.008663
n=8192 init=0.007383 gets=0.015969 sets=0.010288 mixed=0.008896
n=16384 init=0.006660 gets=0.025447 sets=0.011208 mixed=0.008826
n=32768 init=0.006658 gets=0.044390 sets=0.011192 mixed=0.008943
n=65536 init=0.006836 gets=0.082736 sets=0.011151 mixed=0.008826
Let's go through the results.
The new cache takes longer to construct. ~6.6ms vs ~4.1ms. However,
this is measuring 10,000 __init__ calls, so the difference is
~0.2us/instance. We currently only create lrucachedict for manifest
instances, so this regression is not likely relevant.
The new cache is slightly slower for retrievals for cache sizes
< 1024. It's worth noting that the only existing use of lurcachedict
is in manifest.py and the default cache size is 4. This regression
is worrisome. However, for n=4, the delta is ~2.9s for 10,000 lookups,
or ~0.29us/op. Again, this is a marginal regression and likely not
relevant in the real world. Timing `hg log -p -l 100` for
mozilla-central reveals that cache lookup times are dominated by
decompression and fulltext resolution (even with lz4 manifests).
The new cache is significantly faster for retrievals at larger
capacities. Whereas the old implementation has retrieval performance
linear with cache capacity, the new cache is constant time until much
larger values. And, when it does start to increase significantly, it
is a few magnitudes faster than the current cache.
The new cache does appear to be slower for sets when capacity is large.
However, performance is similar for smaller capacities. Of course,
caches should generally be optimized for retrieval performance because
if a cache is getting more sets than gets, it doesn't really make
sense to cache. If this regression is worrisome, again, taking the
largest regression at n=65536 of ~6.9ms for 10,000 results in a
regression of ~0.68us/op. This is not significant in the grand scheme
of things.
Overall, the new cache is performant at retrievals at much larger
capacity values which makes it a generally more useful cache backend.
While there are regressions, their absolute value is extremely small.
Since we aren't using lrucachedict aggressively today, these
regressions should not be relevant. The improved scalability of
lrucachedict should enable us to more aggressively utilize
lrucachedict for more granular caching (read: higher capacity caches)
in the near future. The impetus for this patch is to establish a cache
of decompressed revlog revisions, notably manifest revisions. And since
delta chains can grow to >10,000 and cache hit rate can be high, the
improved retrieval performance of lrucachedict should be relevant.
2015-12-07 06:04:10 +03:00
|
|
|
except KeyError:
|
|
|
|
return default
|
2013-02-09 19:41:46 +04:00
|
|
|
|
2013-09-07 00:16:21 +04:00
|
|
|
def clear(self):
|
util: reimplement lrucachedict
As part of attempting to more aggressively use the existing
lrucachedict, collections.deque operations were frequently
showing up in profiling output, negating benefits of caching.
Searching the internet seems to tell me that the most efficient
way to implement an LRU cache in Python is to have a dict indexing
the cached entries and then to use a doubly linked list to track
freshness of each entry. So, this patch replaces our existing
lrucachedict with a version using such a pattern.
The recently introduced perflrucachedict command reveals the
following timings for 10,000 operations for the following cache
sizes for the existing cache:
n=4 init=0.004079 gets=0.003632 sets=0.005188 mixed=0.005402
n=8 init=0.004045 gets=0.003998 sets=0.005064 mixed=0.005328
n=16 init=0.004011 gets=0.004496 sets=0.005021 mixed=0.005555
n=32 init=0.004064 gets=0.005611 sets=0.005188 mixed=0.006189
n=64 init=0.003975 gets=0.007684 sets=0.005178 mixed=0.007245
n=128 init=0.004121 gets=0.012005 sets=0.005422 mixed=0.009471
n=256 init=0.004143 gets=0.020295 sets=0.005227 mixed=0.013612
n=512 init=0.004039 gets=0.036703 sets=0.005243 mixed=0.020685
n=1024 init=0.004193 gets=0.068142 sets=0.005251 mixed=0.033064
n=2048 init=0.004070 gets=0.133383 sets=0.005160 mixed=0.050359
n=4096 init=0.004053 gets=0.265194 sets=0.004868 mixed=0.048352
n=8192 init=0.004087 gets=0.542218 sets=0.004562 mixed=0.032753
n=16384 init=0.004106 gets=1.064055 sets=0.004179 mixed=0.020367
n=32768 init=0.004034 gets=2.097620 sets=0.004260 mixed=0.013031
n=65536 init=0.004108 gets=4.106390 sets=0.004268 mixed=0.010191
As the data shows, the existing cache's retrieval performance
diminishes linearly with cache size. (Keep in mind the microbenchmark
is testing 100% cache hit rate.)
The new cache implementation reveals the following:
n=4 init=0.006665 gets=0.006541 sets=0.005733 mixed=0.006876
n=8 init=0.006649 gets=0.006374 sets=0.005663 mixed=0.006899
n=16 init=0.006570 gets=0.006504 sets=0.005799 mixed=0.007057
n=32 init=0.006854 gets=0.006459 sets=0.005747 mixed=0.007034
n=64 init=0.006580 gets=0.006495 sets=0.005740 mixed=0.006992
n=128 init=0.006534 gets=0.006739 sets=0.005648 mixed=0.007124
n=256 init=0.006669 gets=0.006773 sets=0.005824 mixed=0.007151
n=512 init=0.006701 gets=0.007061 sets=0.006042 mixed=0.007372
n=1024 init=0.006641 gets=0.007620 sets=0.006387 mixed=0.007464
n=2048 init=0.006517 gets=0.008598 sets=0.006871 mixed=0.008077
n=4096 init=0.006720 gets=0.010933 sets=0.007854 mixed=0.008663
n=8192 init=0.007383 gets=0.015969 sets=0.010288 mixed=0.008896
n=16384 init=0.006660 gets=0.025447 sets=0.011208 mixed=0.008826
n=32768 init=0.006658 gets=0.044390 sets=0.011192 mixed=0.008943
n=65536 init=0.006836 gets=0.082736 sets=0.011151 mixed=0.008826
Let's go through the results.
The new cache takes longer to construct. ~6.6ms vs ~4.1ms. However,
this is measuring 10,000 __init__ calls, so the difference is
~0.2us/instance. We currently only create lrucachedict for manifest
instances, so this regression is not likely relevant.
The new cache is slightly slower for retrievals for cache sizes
< 1024. It's worth noting that the only existing use of lurcachedict
is in manifest.py and the default cache size is 4. This regression
is worrisome. However, for n=4, the delta is ~2.9s for 10,000 lookups,
or ~0.29us/op. Again, this is a marginal regression and likely not
relevant in the real world. Timing `hg log -p -l 100` for
mozilla-central reveals that cache lookup times are dominated by
decompression and fulltext resolution (even with lz4 manifests).
The new cache is significantly faster for retrievals at larger
capacities. Whereas the old implementation has retrieval performance
linear with cache capacity, the new cache is constant time until much
larger values. And, when it does start to increase significantly, it
is a few magnitudes faster than the current cache.
The new cache does appear to be slower for sets when capacity is large.
However, performance is similar for smaller capacities. Of course,
caches should generally be optimized for retrieval performance because
if a cache is getting more sets than gets, it doesn't really make
sense to cache. If this regression is worrisome, again, taking the
largest regression at n=65536 of ~6.9ms for 10,000 results in a
regression of ~0.68us/op. This is not significant in the grand scheme
of things.
Overall, the new cache is performant at retrievals at much larger
capacity values which makes it a generally more useful cache backend.
While there are regressions, their absolute value is extremely small.
Since we aren't using lrucachedict aggressively today, these
regressions should not be relevant. The improved scalability of
lrucachedict should enable us to more aggressively utilize
lrucachedict for more granular caching (read: higher capacity caches)
in the near future. The impetus for this patch is to establish a cache
of decompressed revlog revisions, notably manifest revisions. And since
delta chains can grow to >10,000 and cache hit rate can be high, the
improved retrieval performance of lrucachedict should be relevant.
2015-12-07 06:04:10 +03:00
|
|
|
n = self._head
|
|
|
|
while n.key is not _notset:
|
|
|
|
n.markempty()
|
|
|
|
n = n.next
|
|
|
|
|
2013-09-07 00:16:21 +04:00
|
|
|
self._cache.clear()
|
util: reimplement lrucachedict
As part of attempting to more aggressively use the existing
lrucachedict, collections.deque operations were frequently
showing up in profiling output, negating benefits of caching.
Searching the internet seems to tell me that the most efficient
way to implement an LRU cache in Python is to have a dict indexing
the cached entries and then to use a doubly linked list to track
freshness of each entry. So, this patch replaces our existing
lrucachedict with a version using such a pattern.
The recently introduced perflrucachedict command reveals the
following timings for 10,000 operations for the following cache
sizes for the existing cache:
n=4 init=0.004079 gets=0.003632 sets=0.005188 mixed=0.005402
n=8 init=0.004045 gets=0.003998 sets=0.005064 mixed=0.005328
n=16 init=0.004011 gets=0.004496 sets=0.005021 mixed=0.005555
n=32 init=0.004064 gets=0.005611 sets=0.005188 mixed=0.006189
n=64 init=0.003975 gets=0.007684 sets=0.005178 mixed=0.007245
n=128 init=0.004121 gets=0.012005 sets=0.005422 mixed=0.009471
n=256 init=0.004143 gets=0.020295 sets=0.005227 mixed=0.013612
n=512 init=0.004039 gets=0.036703 sets=0.005243 mixed=0.020685
n=1024 init=0.004193 gets=0.068142 sets=0.005251 mixed=0.033064
n=2048 init=0.004070 gets=0.133383 sets=0.005160 mixed=0.050359
n=4096 init=0.004053 gets=0.265194 sets=0.004868 mixed=0.048352
n=8192 init=0.004087 gets=0.542218 sets=0.004562 mixed=0.032753
n=16384 init=0.004106 gets=1.064055 sets=0.004179 mixed=0.020367
n=32768 init=0.004034 gets=2.097620 sets=0.004260 mixed=0.013031
n=65536 init=0.004108 gets=4.106390 sets=0.004268 mixed=0.010191
As the data shows, the existing cache's retrieval performance
diminishes linearly with cache size. (Keep in mind the microbenchmark
is testing 100% cache hit rate.)
The new cache implementation reveals the following:
n=4 init=0.006665 gets=0.006541 sets=0.005733 mixed=0.006876
n=8 init=0.006649 gets=0.006374 sets=0.005663 mixed=0.006899
n=16 init=0.006570 gets=0.006504 sets=0.005799 mixed=0.007057
n=32 init=0.006854 gets=0.006459 sets=0.005747 mixed=0.007034
n=64 init=0.006580 gets=0.006495 sets=0.005740 mixed=0.006992
n=128 init=0.006534 gets=0.006739 sets=0.005648 mixed=0.007124
n=256 init=0.006669 gets=0.006773 sets=0.005824 mixed=0.007151
n=512 init=0.006701 gets=0.007061 sets=0.006042 mixed=0.007372
n=1024 init=0.006641 gets=0.007620 sets=0.006387 mixed=0.007464
n=2048 init=0.006517 gets=0.008598 sets=0.006871 mixed=0.008077
n=4096 init=0.006720 gets=0.010933 sets=0.007854 mixed=0.008663
n=8192 init=0.007383 gets=0.015969 sets=0.010288 mixed=0.008896
n=16384 init=0.006660 gets=0.025447 sets=0.011208 mixed=0.008826
n=32768 init=0.006658 gets=0.044390 sets=0.011192 mixed=0.008943
n=65536 init=0.006836 gets=0.082736 sets=0.011151 mixed=0.008826
Let's go through the results.
The new cache takes longer to construct. ~6.6ms vs ~4.1ms. However,
this is measuring 10,000 __init__ calls, so the difference is
~0.2us/instance. We currently only create lrucachedict for manifest
instances, so this regression is not likely relevant.
The new cache is slightly slower for retrievals for cache sizes
< 1024. It's worth noting that the only existing use of lurcachedict
is in manifest.py and the default cache size is 4. This regression
is worrisome. However, for n=4, the delta is ~2.9s for 10,000 lookups,
or ~0.29us/op. Again, this is a marginal regression and likely not
relevant in the real world. Timing `hg log -p -l 100` for
mozilla-central reveals that cache lookup times are dominated by
decompression and fulltext resolution (even with lz4 manifests).
The new cache is significantly faster for retrievals at larger
capacities. Whereas the old implementation has retrieval performance
linear with cache capacity, the new cache is constant time until much
larger values. And, when it does start to increase significantly, it
is a few magnitudes faster than the current cache.
The new cache does appear to be slower for sets when capacity is large.
However, performance is similar for smaller capacities. Of course,
caches should generally be optimized for retrieval performance because
if a cache is getting more sets than gets, it doesn't really make
sense to cache. If this regression is worrisome, again, taking the
largest regression at n=65536 of ~6.9ms for 10,000 results in a
regression of ~0.68us/op. This is not significant in the grand scheme
of things.
Overall, the new cache is performant at retrievals at much larger
capacity values which makes it a generally more useful cache backend.
While there are regressions, their absolute value is extremely small.
Since we aren't using lrucachedict aggressively today, these
regressions should not be relevant. The improved scalability of
lrucachedict should enable us to more aggressively utilize
lrucachedict for more granular caching (read: higher capacity caches)
in the near future. The impetus for this patch is to establish a cache
of decompressed revlog revisions, notably manifest revisions. And since
delta chains can grow to >10,000 and cache hit rate can be high, the
improved retrieval performance of lrucachedict should be relevant.
2015-12-07 06:04:10 +03:00
|
|
|
|
2015-12-31 00:10:53 +03:00
|
|
|
def copy(self):
|
|
|
|
result = lrucachedict(self._capacity)
|
|
|
|
n = self._head.prev
|
|
|
|
# Iterate in oldest-to-newest order, so the copy has the right ordering
|
|
|
|
for i in range(len(self._cache)):
|
|
|
|
result[n.key] = n.value
|
|
|
|
n = n.prev
|
|
|
|
return result
|
|
|
|
|
util: reimplement lrucachedict
As part of attempting to more aggressively use the existing
lrucachedict, collections.deque operations were frequently
showing up in profiling output, negating benefits of caching.
Searching the internet seems to tell me that the most efficient
way to implement an LRU cache in Python is to have a dict indexing
the cached entries and then to use a doubly linked list to track
freshness of each entry. So, this patch replaces our existing
lrucachedict with a version using such a pattern.
The recently introduced perflrucachedict command reveals the
following timings for 10,000 operations for the following cache
sizes for the existing cache:
n=4 init=0.004079 gets=0.003632 sets=0.005188 mixed=0.005402
n=8 init=0.004045 gets=0.003998 sets=0.005064 mixed=0.005328
n=16 init=0.004011 gets=0.004496 sets=0.005021 mixed=0.005555
n=32 init=0.004064 gets=0.005611 sets=0.005188 mixed=0.006189
n=64 init=0.003975 gets=0.007684 sets=0.005178 mixed=0.007245
n=128 init=0.004121 gets=0.012005 sets=0.005422 mixed=0.009471
n=256 init=0.004143 gets=0.020295 sets=0.005227 mixed=0.013612
n=512 init=0.004039 gets=0.036703 sets=0.005243 mixed=0.020685
n=1024 init=0.004193 gets=0.068142 sets=0.005251 mixed=0.033064
n=2048 init=0.004070 gets=0.133383 sets=0.005160 mixed=0.050359
n=4096 init=0.004053 gets=0.265194 sets=0.004868 mixed=0.048352
n=8192 init=0.004087 gets=0.542218 sets=0.004562 mixed=0.032753
n=16384 init=0.004106 gets=1.064055 sets=0.004179 mixed=0.020367
n=32768 init=0.004034 gets=2.097620 sets=0.004260 mixed=0.013031
n=65536 init=0.004108 gets=4.106390 sets=0.004268 mixed=0.010191
As the data shows, the existing cache's retrieval performance
diminishes linearly with cache size. (Keep in mind the microbenchmark
is testing 100% cache hit rate.)
The new cache implementation reveals the following:
n=4 init=0.006665 gets=0.006541 sets=0.005733 mixed=0.006876
n=8 init=0.006649 gets=0.006374 sets=0.005663 mixed=0.006899
n=16 init=0.006570 gets=0.006504 sets=0.005799 mixed=0.007057
n=32 init=0.006854 gets=0.006459 sets=0.005747 mixed=0.007034
n=64 init=0.006580 gets=0.006495 sets=0.005740 mixed=0.006992
n=128 init=0.006534 gets=0.006739 sets=0.005648 mixed=0.007124
n=256 init=0.006669 gets=0.006773 sets=0.005824 mixed=0.007151
n=512 init=0.006701 gets=0.007061 sets=0.006042 mixed=0.007372
n=1024 init=0.006641 gets=0.007620 sets=0.006387 mixed=0.007464
n=2048 init=0.006517 gets=0.008598 sets=0.006871 mixed=0.008077
n=4096 init=0.006720 gets=0.010933 sets=0.007854 mixed=0.008663
n=8192 init=0.007383 gets=0.015969 sets=0.010288 mixed=0.008896
n=16384 init=0.006660 gets=0.025447 sets=0.011208 mixed=0.008826
n=32768 init=0.006658 gets=0.044390 sets=0.011192 mixed=0.008943
n=65536 init=0.006836 gets=0.082736 sets=0.011151 mixed=0.008826
Let's go through the results.
The new cache takes longer to construct. ~6.6ms vs ~4.1ms. However,
this is measuring 10,000 __init__ calls, so the difference is
~0.2us/instance. We currently only create lrucachedict for manifest
instances, so this regression is not likely relevant.
The new cache is slightly slower for retrievals for cache sizes
< 1024. It's worth noting that the only existing use of lurcachedict
is in manifest.py and the default cache size is 4. This regression
is worrisome. However, for n=4, the delta is ~2.9s for 10,000 lookups,
or ~0.29us/op. Again, this is a marginal regression and likely not
relevant in the real world. Timing `hg log -p -l 100` for
mozilla-central reveals that cache lookup times are dominated by
decompression and fulltext resolution (even with lz4 manifests).
The new cache is significantly faster for retrievals at larger
capacities. Whereas the old implementation has retrieval performance
linear with cache capacity, the new cache is constant time until much
larger values. And, when it does start to increase significantly, it
is a few magnitudes faster than the current cache.
The new cache does appear to be slower for sets when capacity is large.
However, performance is similar for smaller capacities. Of course,
caches should generally be optimized for retrieval performance because
if a cache is getting more sets than gets, it doesn't really make
sense to cache. If this regression is worrisome, again, taking the
largest regression at n=65536 of ~6.9ms for 10,000 results in a
regression of ~0.68us/op. This is not significant in the grand scheme
of things.
Overall, the new cache is performant at retrievals at much larger
capacity values which makes it a generally more useful cache backend.
While there are regressions, their absolute value is extremely small.
Since we aren't using lrucachedict aggressively today, these
regressions should not be relevant. The improved scalability of
lrucachedict should enable us to more aggressively utilize
lrucachedict for more granular caching (read: higher capacity caches)
in the near future. The impetus for this patch is to establish a cache
of decompressed revlog revisions, notably manifest revisions. And since
delta chains can grow to >10,000 and cache hit rate can be high, the
improved retrieval performance of lrucachedict should be relevant.
2015-12-07 06:04:10 +03:00
|
|
|
def _movetohead(self, node):
|
|
|
|
"""Mark a node as the newest, making it the new head.
|
|
|
|
|
|
|
|
When a node is accessed, it becomes the freshest entry in the LRU
|
|
|
|
list, which is denoted by self._head.
|
|
|
|
|
|
|
|
Visually, let's make ``N`` the new head node (* denotes head):
|
|
|
|
|
|
|
|
previous/oldest <-> head <-> next/next newest
|
|
|
|
|
|
|
|
----<->--- A* ---<->-----
|
|
|
|
| |
|
|
|
|
E <-> D <-> N <-> C <-> B
|
|
|
|
|
|
|
|
To:
|
|
|
|
|
|
|
|
----<->--- N* ---<->-----
|
|
|
|
| |
|
|
|
|
E <-> D <-> C <-> B <-> A
|
|
|
|
|
|
|
|
This requires the following moves:
|
|
|
|
|
|
|
|
C.next = D (node.prev.next = node.next)
|
|
|
|
D.prev = C (node.next.prev = node.prev)
|
|
|
|
E.next = N (head.prev.next = node)
|
|
|
|
N.prev = E (node.prev = head.prev)
|
|
|
|
N.next = A (node.next = head)
|
|
|
|
A.prev = N (head.prev = node)
|
|
|
|
"""
|
|
|
|
head = self._head
|
|
|
|
# C.next = D
|
|
|
|
node.prev.next = node.next
|
|
|
|
# D.prev = C
|
|
|
|
node.next.prev = node.prev
|
|
|
|
# N.prev = E
|
|
|
|
node.prev = head.prev
|
|
|
|
# N.next = A
|
|
|
|
# It is tempting to do just "head" here, however if node is
|
|
|
|
# adjacent to head, this will do bad things.
|
|
|
|
node.next = head.prev.next
|
|
|
|
# E.next = N
|
|
|
|
node.next.prev = node
|
|
|
|
# A.prev = N
|
|
|
|
node.prev.next = node
|
|
|
|
|
|
|
|
self._head = node
|
|
|
|
|
|
|
|
def _addcapacity(self):
|
|
|
|
"""Add a node to the circular linked list.
|
|
|
|
|
|
|
|
The new node is inserted before the head node.
|
|
|
|
"""
|
|
|
|
head = self._head
|
|
|
|
node = _lrucachenode()
|
|
|
|
head.prev.next = node
|
|
|
|
node.prev = head.prev
|
|
|
|
node.next = head
|
|
|
|
head.prev = node
|
|
|
|
self._size += 1
|
|
|
|
return node
|
2013-09-07 00:16:21 +04:00
|
|
|
|
2009-07-10 02:10:07 +04:00
|
|
|
def lrucachefunc(func):
|
|
|
|
'''cache most recent results of function calls'''
|
|
|
|
cache = {}
|
2015-05-16 21:28:04 +03:00
|
|
|
order = collections.deque()
|
2016-03-29 20:43:23 +03:00
|
|
|
if func.__code__.co_argcount == 1:
|
2009-07-10 02:10:07 +04:00
|
|
|
def f(arg):
|
|
|
|
if arg not in cache:
|
|
|
|
if len(cache) > 20:
|
2012-05-15 21:46:23 +04:00
|
|
|
del cache[order.popleft()]
|
2009-07-10 02:10:07 +04:00
|
|
|
cache[arg] = func(arg)
|
|
|
|
else:
|
|
|
|
order.remove(arg)
|
|
|
|
order.append(arg)
|
|
|
|
return cache[arg]
|
|
|
|
else:
|
|
|
|
def f(*args):
|
|
|
|
if args not in cache:
|
|
|
|
if len(cache) > 20:
|
2012-05-15 21:46:23 +04:00
|
|
|
del cache[order.popleft()]
|
2009-07-10 02:10:07 +04:00
|
|
|
cache[args] = func(*args)
|
|
|
|
else:
|
|
|
|
order.remove(args)
|
|
|
|
order.append(args)
|
|
|
|
return cache[args]
|
|
|
|
|
|
|
|
return f
|
|
|
|
|
2009-04-27 01:50:44 +04:00
|
|
|
class propertycache(object):
|
|
|
|
def __init__(self, func):
|
|
|
|
self.func = func
|
|
|
|
self.name = func.__name__
|
|
|
|
def __get__(self, obj, type=None):
|
|
|
|
result = self.func(obj)
|
2012-10-08 22:02:20 +04:00
|
|
|
self.cachevalue(obj, result)
|
2009-04-27 01:50:44 +04:00
|
|
|
return result
|
|
|
|
|
2012-10-08 22:02:20 +04:00
|
|
|
def cachevalue(self, obj, value):
|
2013-10-23 21:49:56 +04:00
|
|
|
# __dict__ assignment required to bypass __setattr__ (eg: repoview)
|
2013-09-30 16:36:11 +04:00
|
|
|
obj.__dict__[self.name] = value
|
2012-10-08 22:02:20 +04:00
|
|
|
|
2005-09-21 22:44:08 +04:00
|
|
|
def pipefilter(s, cmd):
|
|
|
|
'''filter string S through command CMD, returning its output'''
|
2009-05-07 03:33:44 +04:00
|
|
|
p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
|
|
|
|
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
|
|
|
|
pout, perr = p.communicate(s)
|
|
|
|
return pout
|
2005-06-22 07:31:13 +04:00
|
|
|
|
2005-09-21 22:44:08 +04:00
|
|
|
def tempfilter(s, cmd):
|
|
|
|
'''filter string S through a pair of temporary files with CMD.
|
|
|
|
CMD is used as a template to create the real command to be run,
|
|
|
|
with the strings INFILE and OUTFILE replaced by the real names of
|
|
|
|
the temporary files generated.'''
|
|
|
|
inname, outname = None, None
|
|
|
|
try:
|
2006-04-30 23:11:22 +04:00
|
|
|
infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
|
2017-02-13 19:45:28 +03:00
|
|
|
fp = os.fdopen(infd, pycompat.sysstr('wb'))
|
2005-09-21 22:44:08 +04:00
|
|
|
fp.write(s)
|
|
|
|
fp.close()
|
2006-04-30 23:11:22 +04:00
|
|
|
outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
|
2005-09-21 22:44:08 +04:00
|
|
|
os.close(outfd)
|
|
|
|
cmd = cmd.replace('INFILE', inname)
|
|
|
|
cmd = cmd.replace('OUTFILE', outname)
|
|
|
|
code = os.system(cmd)
|
2016-12-18 23:56:41 +03:00
|
|
|
if pycompat.sysplatform == 'OpenVMS' and code & 1:
|
2007-06-08 18:24:43 +04:00
|
|
|
code = 0
|
2010-01-25 09:05:27 +03:00
|
|
|
if code:
|
|
|
|
raise Abort(_("command '%s' failed: %s") %
|
2011-05-06 17:31:09 +04:00
|
|
|
(cmd, explainexit(code)))
|
2016-01-13 03:16:19 +03:00
|
|
|
return readfile(outname)
|
2005-09-21 22:44:08 +04:00
|
|
|
finally:
|
|
|
|
try:
|
2010-01-25 09:05:27 +03:00
|
|
|
if inname:
|
|
|
|
os.unlink(inname)
|
2011-04-23 01:51:25 +04:00
|
|
|
except OSError:
|
2010-01-25 09:05:27 +03:00
|
|
|
pass
|
2005-09-21 22:44:08 +04:00
|
|
|
try:
|
2010-01-25 09:05:27 +03:00
|
|
|
if outname:
|
|
|
|
os.unlink(outname)
|
2011-04-23 01:51:25 +04:00
|
|
|
except OSError:
|
2010-01-25 09:05:27 +03:00
|
|
|
pass
|
2005-09-21 22:44:08 +04:00
|
|
|
|
|
|
|
filtertable = {
|
|
|
|
'tempfile:': tempfilter,
|
|
|
|
'pipe:': pipefilter,
|
|
|
|
}
|
|
|
|
|
|
|
|
def filter(s, cmd):
|
|
|
|
"filter a string through a command that transforms its input to its output"
|
|
|
|
for name, fn in filtertable.iteritems():
|
|
|
|
if cmd.startswith(name):
|
|
|
|
return fn(s, cmd[len(name):].lstrip())
|
|
|
|
return pipefilter(s, cmd)
|
|
|
|
|
2005-08-24 06:58:46 +04:00
|
|
|
def binary(s):
|
2008-04-08 15:19:36 +04:00
|
|
|
"""return true if a string is binary data"""
|
2009-04-22 19:14:58 +04:00
|
|
|
return bool(s and '\0' in s)
|
2005-08-24 06:58:46 +04:00
|
|
|
|
2008-11-22 02:51:40 +03:00
|
|
|
def increasingchunks(source, min=1024, max=65536):
|
|
|
|
'''return no less than min bytes per chunk while data remains,
|
|
|
|
doubling min after each chunk until it reaches max'''
|
|
|
|
def log2(x):
|
|
|
|
if not x:
|
|
|
|
return 0
|
|
|
|
i = 0
|
|
|
|
while x:
|
|
|
|
x >>= 1
|
|
|
|
i += 1
|
|
|
|
return i - 1
|
|
|
|
|
|
|
|
buf = []
|
|
|
|
blen = 0
|
|
|
|
for chunk in source:
|
|
|
|
buf.append(chunk)
|
|
|
|
blen += len(chunk)
|
|
|
|
if blen >= min:
|
|
|
|
if min < max:
|
|
|
|
min = min << 1
|
|
|
|
nmin = 1 << log2(blen)
|
|
|
|
if nmin > min:
|
|
|
|
min = nmin
|
|
|
|
if min > max:
|
|
|
|
min = max
|
|
|
|
yield ''.join(buf)
|
|
|
|
blen = 0
|
|
|
|
buf = []
|
|
|
|
if buf:
|
|
|
|
yield ''.join(buf)
|
|
|
|
|
2009-04-03 22:20:52 +04:00
|
|
|
Abort = error.Abort
|
2005-06-28 14:38:33 +04:00
|
|
|
|
2010-01-25 09:05:27 +03:00
|
|
|
def always(fn):
|
|
|
|
return True
|
|
|
|
|
|
|
|
def never(fn):
|
|
|
|
return False
|
2005-07-18 18:54:21 +04:00
|
|
|
|
2014-12-04 16:43:40 +03:00
|
|
|
def nogc(func):
|
|
|
|
"""disable garbage collector
|
|
|
|
|
|
|
|
Python's garbage collector triggers a GC each time a certain number of
|
|
|
|
container objects (the number being defined by gc.get_threshold()) are
|
|
|
|
allocated even when marked not to be tracked by the collector. Tracking has
|
|
|
|
no effect on when GCs are triggered, only on what objects the GC looks
|
2014-04-18 00:47:38 +04:00
|
|
|
into. As a workaround, disable GC while building complex (huge)
|
2014-12-04 16:43:40 +03:00
|
|
|
containers.
|
|
|
|
|
|
|
|
This garbage collector issue have been fixed in 2.7.
|
|
|
|
"""
|
2016-10-07 15:01:16 +03:00
|
|
|
if sys.version_info >= (2, 7):
|
2016-07-28 15:18:01 +03:00
|
|
|
return func
|
2014-12-04 16:43:40 +03:00
|
|
|
def wrapper(*args, **kwargs):
|
|
|
|
gcenabled = gc.isenabled()
|
|
|
|
gc.disable()
|
|
|
|
try:
|
|
|
|
return func(*args, **kwargs)
|
|
|
|
finally:
|
|
|
|
if gcenabled:
|
|
|
|
gc.enable()
|
|
|
|
return wrapper
|
|
|
|
|
2007-03-16 06:22:57 +03:00
|
|
|
def pathto(root, n1, n2):
|
2005-08-13 03:06:52 +04:00
|
|
|
'''return the relative path from one place to another.
|
2007-03-16 06:22:57 +03:00
|
|
|
root should use os.sep to separate directories
|
2006-11-15 23:56:47 +03:00
|
|
|
n1 should use os.sep to separate directories
|
|
|
|
n2 should use "/" to separate directories
|
|
|
|
returns an os.sep-separated path.
|
2007-03-16 06:22:57 +03:00
|
|
|
|
|
|
|
If n1 is a relative path, it's assumed it's
|
|
|
|
relative to root.
|
|
|
|
n2 should always be relative to root.
|
2006-11-15 23:56:47 +03:00
|
|
|
'''
|
2010-01-25 09:05:27 +03:00
|
|
|
if not n1:
|
|
|
|
return localpath(n2)
|
2007-03-16 06:22:58 +03:00
|
|
|
if os.path.isabs(n1):
|
|
|
|
if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
|
|
|
|
return os.path.join(root, localpath(n2))
|
|
|
|
n2 = '/'.join((pconvert(root), n2))
|
2008-01-09 15:30:36 +03:00
|
|
|
a, b = splitpath(n1), n2.split('/')
|
2005-11-14 04:59:35 +03:00
|
|
|
a.reverse()
|
|
|
|
b.reverse()
|
2005-08-12 23:16:58 +04:00
|
|
|
while a and b and a[-1] == b[-1]:
|
2005-11-14 04:59:35 +03:00
|
|
|
a.pop()
|
|
|
|
b.pop()
|
2005-08-12 23:16:58 +04:00
|
|
|
b.reverse()
|
2016-12-17 17:26:30 +03:00
|
|
|
return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
|
2005-08-12 23:16:58 +04:00
|
|
|
|
2011-05-06 17:10:29 +04:00
|
|
|
def mainfrozen():
|
2007-12-20 23:02:51 +03:00
|
|
|
"""return True if we are a frozen executable.
|
|
|
|
|
|
|
|
The code supports py2exe (most common, Windows only) and tools/freeze
|
|
|
|
(portable, not much used).
|
|
|
|
"""
|
2011-07-26 01:04:40 +04:00
|
|
|
return (safehasattr(sys, "frozen") or # new py2exe
|
|
|
|
safehasattr(sys, "importers") or # old py2exe
|
2016-10-02 02:59:17 +03:00
|
|
|
imp.is_frozen(u"__main__")) # tools/freeze
|
2007-12-20 23:02:51 +03:00
|
|
|
|
2014-09-28 18:57:06 +04:00
|
|
|
# the location of data files matching the source code
|
util: adjust 'datapath' to be correct in a frozen OS X package
Apparently unlike py2exe, py2app copies the Mercurial source tree as-is to a
Contents/Resources subdirectory of an app bundle, and places its binary stub in
Contents/MacOS. (The Windows install has the 'hgext' and 'mercurial' modules in
'lib/library.zip', while the help and templates subdirectories have been moved
out of the mercurial directory to the root of the installation. I assume that
the python code living in a zip file is why "py2exe doesn't support __file__".)
Therefore, prior to this change, Mercurial in a frozen app bundle on OS X would
go looking for help *.txt, templates and locale info in Contents/MacOS, where
they don't exist.
There are only a handful of places that test for frozen, and not all of them are
wrong for OS X, so it seems wiser to handle them on a case by case basis, rather
that try to change mainfrozen(). The remaining cases are:
1) util.hgexecutable() wrongly points to the bundled python executable, and
affects $HG in util.system() launched processes (e.g. external hooks)
2) util.hgcmd() wrongly points to the bundled python executable, but it seems
to only affect 'hg serve -d'
3) hook._pythonhook() may be OK, since I didn't see anything outrageous when
printing sys.path from an internal hook. I'm not sure if this special
case is needed on OS X though.
4) sslutil._plainapplepython() is OK, because sys.executable is not
/usr/bin/python, nor is it in /System/Library/Frameworks
2016-01-11 01:49:01 +03:00
|
|
|
if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
|
2014-09-28 18:57:06 +04:00
|
|
|
# executable version (py2exe) doesn't support __file__
|
2016-12-19 21:50:07 +03:00
|
|
|
datapath = os.path.dirname(pycompat.sysexecutable)
|
2014-09-28 18:57:06 +04:00
|
|
|
else:
|
2017-02-20 16:10:42 +03:00
|
|
|
datapath = os.path.dirname(pycompat.fsencode(__file__))
|
2016-11-06 06:18:23 +03:00
|
|
|
|
2014-09-28 18:57:47 +04:00
|
|
|
i18n.setdatapath(datapath)
|
|
|
|
|
2014-09-28 18:57:06 +04:00
|
|
|
_hgexecutable = None
|
|
|
|
|
2007-08-05 00:25:12 +04:00
|
|
|
def hgexecutable():
|
|
|
|
"""return location of the 'hg' executable.
|
2007-06-23 22:21:10 +04:00
|
|
|
|
2007-08-05 00:25:12 +04:00
|
|
|
Defaults to $HG or 'hg' in the search path.
|
2007-06-23 22:21:10 +04:00
|
|
|
"""
|
2007-08-05 00:25:12 +04:00
|
|
|
if _hgexecutable is None:
|
2016-12-17 23:36:00 +03:00
|
|
|
hg = encoding.environ.get('HG')
|
2017-03-19 08:19:27 +03:00
|
|
|
mainmod = sys.modules[pycompat.sysstr('__main__')]
|
2008-04-10 02:27:57 +04:00
|
|
|
if hg:
|
2011-05-06 17:13:46 +04:00
|
|
|
_sethgexecutable(hg)
|
2011-05-06 17:10:29 +04:00
|
|
|
elif mainfrozen():
|
2016-01-11 01:56:08 +03:00
|
|
|
if getattr(sys, 'frozen', None) == 'macosx_app':
|
|
|
|
# Env variable set by py2app
|
2016-12-17 23:36:00 +03:00
|
|
|
_sethgexecutable(encoding.environ['EXECUTABLEPATH'])
|
2016-01-11 01:56:08 +03:00
|
|
|
else:
|
2016-12-19 21:50:07 +03:00
|
|
|
_sethgexecutable(pycompat.sysexecutable)
|
2017-02-20 16:10:42 +03:00
|
|
|
elif (os.path.basename(
|
|
|
|
pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
|
|
|
|
_sethgexecutable(pycompat.fsencode(mainmod.__file__))
|
2007-12-20 23:02:51 +03:00
|
|
|
else:
|
2011-05-08 22:35:46 +04:00
|
|
|
exe = findexe('hg') or os.path.basename(sys.argv[0])
|
2011-05-06 17:13:46 +04:00
|
|
|
_sethgexecutable(exe)
|
2007-08-05 00:25:12 +04:00
|
|
|
return _hgexecutable
|
|
|
|
|
2011-05-06 17:13:46 +04:00
|
|
|
def _sethgexecutable(path):
|
2007-08-05 00:25:12 +04:00
|
|
|
"""set location of the 'hg' executable"""
|
2007-06-23 22:21:10 +04:00
|
|
|
global _hgexecutable
|
2007-08-05 00:25:12 +04:00
|
|
|
_hgexecutable = path
|
2007-06-23 22:21:10 +04:00
|
|
|
|
2015-10-03 08:57:24 +03:00
|
|
|
def _isstdout(f):
|
|
|
|
fileno = getattr(f, 'fileno', None)
|
|
|
|
return fileno and fileno() == sys.__stdout__.fileno()
|
|
|
|
|
2017-01-10 01:58:02 +03:00
|
|
|
def shellenviron(environ=None):
|
|
|
|
"""return environ with optional override, useful for shelling out"""
|
|
|
|
def py2shell(val):
|
|
|
|
'convert python object into string that is useful to shell'
|
|
|
|
if val is None or val is False:
|
|
|
|
return '0'
|
|
|
|
if val is True:
|
|
|
|
return '1'
|
|
|
|
return str(val)
|
|
|
|
env = dict(encoding.environ)
|
|
|
|
if environ:
|
|
|
|
env.update((k, py2shell(v)) for k, v in environ.iteritems())
|
|
|
|
env['HG'] = hgexecutable()
|
|
|
|
return env
|
|
|
|
|
2017-02-18 19:16:45 +03:00
|
|
|
def system(cmd, environ=None, cwd=None, out=None):
|
2006-03-11 09:24:19 +03:00
|
|
|
'''enhanced shell command execution.
|
2006-03-11 09:42:59 +03:00
|
|
|
run with environment maybe modified, maybe in different dir.
|
|
|
|
|
2010-07-01 03:15:23 +04:00
|
|
|
if out is specified, it is assumed to be a file-like object that has a
|
|
|
|
write() method. stdout and stderr will be redirected to out.'''
|
2011-02-18 05:35:01 +03:00
|
|
|
try:
|
2016-10-20 17:53:36 +03:00
|
|
|
stdout.flush()
|
2011-02-18 05:35:01 +03:00
|
|
|
except Exception:
|
|
|
|
pass
|
2010-12-22 22:25:00 +03:00
|
|
|
cmd = quotecommand(cmd)
|
2017-06-17 01:42:03 +03:00
|
|
|
env = shellenviron(environ)
|
|
|
|
if out is None or _isstdout(out):
|
|
|
|
rc = subprocess.call(cmd, shell=True, close_fds=closefds,
|
|
|
|
env=env, cwd=cwd)
|
2010-07-01 03:15:23 +04:00
|
|
|
else:
|
2017-06-17 01:42:03 +03:00
|
|
|
proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
|
|
|
|
env=env, cwd=cwd, stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.STDOUT)
|
|
|
|
for line in iter(proc.stdout.readline, ''):
|
|
|
|
out.write(line)
|
|
|
|
proc.wait()
|
|
|
|
rc = proc.returncode
|
|
|
|
if pycompat.sysplatform == 'OpenVMS' and rc & 1:
|
|
|
|
rc = 0
|
2009-09-21 00:19:18 +04:00
|
|
|
return rc
|
2005-06-28 14:38:33 +04:00
|
|
|
|
2008-11-19 01:02:14 +03:00
|
|
|
def checksignature(func):
|
|
|
|
'''wrap a function with code to check for calling errors'''
|
|
|
|
def check(*args, **kwargs):
|
|
|
|
try:
|
|
|
|
return func(*args, **kwargs)
|
|
|
|
except TypeError:
|
|
|
|
if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
|
2009-01-12 22:51:43 +03:00
|
|
|
raise error.SignatureError
|
2008-11-19 01:02:14 +03:00
|
|
|
raise
|
|
|
|
|
|
|
|
return check
|
|
|
|
|
2017-03-12 11:23:07 +03:00
|
|
|
# a whilelist of known filesystems where hardlink works reliably
|
2017-02-11 03:56:29 +03:00
|
|
|
_hardlinkfswhitelist = {
|
2017-03-12 11:23:07 +03:00
|
|
|
'btrfs',
|
|
|
|
'ext2',
|
|
|
|
'ext3',
|
|
|
|
'ext4',
|
2017-03-24 08:31:50 +03:00
|
|
|
'hfs',
|
2017-03-12 11:23:07 +03:00
|
|
|
'jfs',
|
|
|
|
'reiserfs',
|
|
|
|
'tmpfs',
|
2017-03-24 08:31:50 +03:00
|
|
|
'ufs',
|
2017-03-12 11:23:07 +03:00
|
|
|
'xfs',
|
2017-03-24 08:31:50 +03:00
|
|
|
'zfs',
|
2017-02-11 03:56:29 +03:00
|
|
|
}
|
2017-03-12 11:23:07 +03:00
|
|
|
|
2016-05-18 18:20:38 +03:00
|
|
|
def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
|
2015-12-12 22:00:04 +03:00
|
|
|
'''copy a file, preserving mode and optionally other stat info like
|
2016-06-12 23:11:56 +03:00
|
|
|
atime/mtime
|
|
|
|
|
|
|
|
checkambig argument is used with filestat, and is useful only if
|
|
|
|
destination file is guarded by any lock (e.g. repo.lock or
|
|
|
|
repo.wlock).
|
|
|
|
|
|
|
|
copystat and checkambig should be exclusive.
|
|
|
|
'''
|
2016-05-18 18:20:38 +03:00
|
|
|
assert not (copystat and checkambig)
|
|
|
|
oldstat = None
|
2013-01-10 03:44:23 +04:00
|
|
|
if os.path.lexists(dest):
|
2016-05-18 18:20:38 +03:00
|
|
|
if checkambig:
|
2017-06-11 00:09:54 +03:00
|
|
|
oldstat = checkambig and filestat.frompath(dest)
|
2013-01-10 03:44:23 +04:00
|
|
|
unlink(dest)
|
2017-03-12 11:23:07 +03:00
|
|
|
if hardlink:
|
|
|
|
# Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
|
|
|
|
# unless we are confident that dest is on a whitelisted filesystem.
|
2017-03-25 11:25:23 +03:00
|
|
|
try:
|
|
|
|
fstype = getfstype(os.path.dirname(dest))
|
|
|
|
except OSError:
|
|
|
|
fstype = None
|
2017-03-12 11:23:07 +03:00
|
|
|
if fstype not in _hardlinkfswhitelist:
|
|
|
|
hardlink = False
|
2017-03-12 12:03:23 +03:00
|
|
|
if hardlink:
|
2015-01-05 23:39:09 +03:00
|
|
|
try:
|
|
|
|
oslink(src, dest)
|
|
|
|
return
|
|
|
|
except (IOError, OSError):
|
|
|
|
pass # fall back to normal copy
|
2007-03-22 06:20:56 +03:00
|
|
|
if os.path.islink(src):
|
|
|
|
os.symlink(os.readlink(src), dest)
|
2015-12-12 22:00:04 +03:00
|
|
|
# copytime is ignored for symlinks, but in general copytime isn't needed
|
|
|
|
# for them anyway
|
2007-03-22 06:20:56 +03:00
|
|
|
else:
|
|
|
|
try:
|
|
|
|
shutil.copyfile(src, dest)
|
2015-12-12 22:00:04 +03:00
|
|
|
if copystat:
|
|
|
|
# copystat also copies mode
|
|
|
|
shutil.copystat(src, dest)
|
|
|
|
else:
|
|
|
|
shutil.copymode(src, dest)
|
2016-05-18 18:20:38 +03:00
|
|
|
if oldstat and oldstat.stat:
|
2017-06-11 00:09:54 +03:00
|
|
|
newstat = filestat.frompath(dest)
|
2016-05-18 18:20:38 +03:00
|
|
|
if newstat.isambig(oldstat):
|
|
|
|
# stat of copied file is ambiguous to original one
|
|
|
|
advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
|
|
|
|
os.utime(dest, (advanced, advanced))
|
2015-06-24 08:20:08 +03:00
|
|
|
except shutil.Error as inst:
|
2007-03-22 06:20:56 +03:00
|
|
|
raise Abort(str(inst))
|
2006-11-13 22:26:57 +03:00
|
|
|
|
2015-03-19 17:24:22 +03:00
|
|
|
def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
|
|
|
|
"""Copy a directory tree using hardlinks if possible."""
|
|
|
|
num = 0
|
Add support for cloning with hardlinks on windows.
In order to use hardlinks, the win32file module is needed, and this is
present in ActivePython. If it isn't present, or hardlinks are not supported
on the underlying filesystem, a regular copy is used.
When using hardlinks the biggest benefit is probably the saving in space,
but cloning can be much quicker. For example cloning the Xen tree
(non trivial) without an update goes from about 95s to 15s.
Unix-like platforms should be unaffected, although should be more tolerant on
filesystems that don't support hard links.
(tweaked by mpm to deal with new copyfiles function)
--- hg.orig/mercurial/commands.py 2005-09-13 19:32:53.000000000 -0500
+++ hg/mercurial/commands.py 2005-09-14 12:11:34.000000000 -0500
@@ -620,10 +620,6 @@ def clone(ui, source, dest=None, **opts)
if other.dev() != -1:
abspath = os.path.abspath(source)
- copyfile = (os.stat(dest).st_dev == other.dev()
- and getattr(os, 'link', None) or shutil.copy2)
- if copyfile is not shutil.copy2:
- ui.note("cloning by hardlink\n")
# we use a lock here because if we race with commit, we can
# end up with extra data in the cloned revlogs that's not
@@ -638,7 +634,7 @@ def clone(ui, source, dest=None, **opts)
for f in files.split():
src = os.path.join(source, ".hg", f)
dst = os.path.join(dest, ".hg", f)
- util.copyfiles(src, dst, copyfile)
+ util.copyfiles(src, dst)
repo = hg.repository(ui, dest)
Index: hg/mercurial/util.py
===================================================================
--- hg.orig/mercurial/util.py 2005-09-08 00:15:25.000000000 -0500
+++ hg/mercurial/util.py 2005-09-14 12:16:49.000000000 -0500
@@ -12,7 +12,7 @@ platform-specific details from the core.
import os, errno
from demandload import *
-demandload(globals(), "re cStringIO")
+demandload(globals(), "re cStringIO shutil")
def binary(s):
"""return true if a string is binary data using diff's heuristic"""
@@ -217,17 +217,28 @@ def rename(src, dst):
os.unlink(dst)
os.rename(src, dst)
-def copyfiles(src, dst, copyfile):
- """Copy a directory tree, files are copied using 'copyfile'."""
+def copyfiles(src, dst, hardlink=None):
+ """Copy a directory tree using hardlinks if possible"""
+
+ if hardlink is None:
+ hardlink = (os.stat(src).st_dev ==
+ os.stat(os.path.dirname(dst)).st_dev)
if os.path.isdir(src):
os.mkdir(dst)
for name in os.listdir(src):
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
- copyfiles(srcname, dstname, copyfile)
+ copyfiles(srcname, dstname, hardlink)
else:
- copyfile(src, dst)
+ if hardlink:
+ try:
+ os_link(src, dst)
+ except:
+ hardlink = False
+ shutil.copy2(src, dst)
+ else:
+ shutil.copy2(src, dst)
def opener(base):
"""
@@ -244,13 +255,13 @@ def opener(base):
if mode[0] != "r":
try:
- s = os.stat(f)
+ nlink = nlinks(f)
except OSError:
d = os.path.dirname(f)
if not os.path.isdir(d):
os.makedirs(d)
else:
- if s.st_nlink > 1:
+ if nlink > 1:
file(f + ".tmp", "wb").write(file(f, "rb").read())
rename(f+".tmp", f)
@@ -266,10 +277,41 @@ def _makelock_file(info, pathname):
def _readlock_file(pathname):
return file(pathname).read()
+def nlinks(pathname):
+ """Return number of hardlinks for the given file."""
+ return os.stat(pathname).st_nlink
+
+if hasattr(os, 'link'):
+ os_link = os.link
+else:
+ def os_link(src, dst):
+ raise OSError(0, "Hardlinks not supported")
+
# Platform specific variants
if os.name == 'nt':
nulldev = 'NUL:'
+ try: # ActivePython can create hard links using win32file module
+ import win32file
+
+ def os_link(src, dst): # NB will only succeed on NTFS
+ win32file.CreateHardLink(dst, src)
+
+ def nlinks(pathname):
+ """Return number of hardlinks for the given file."""
+ try:
+ fh = win32file.CreateFile(pathname,
+ win32file.GENERIC_READ, win32file.FILE_SHARE_READ,
+ None, win32file.OPEN_EXISTING, 0, None)
+ res = win32file.GetFileInformationByHandle(fh)
+ fh.Close()
+ return res[7]
+ except:
+ return os.stat(pathname).st_nlink
+
+ except ImportError:
+ pass
+
def is_exec(f, last):
return last
2005-09-14 21:22:20 +04:00
|
|
|
|
2017-03-29 22:21:15 +03:00
|
|
|
gettopic = lambda: hardlink and _('linking') or _('copying')
|
2005-09-08 06:21:38 +04:00
|
|
|
|
|
|
|
if os.path.isdir(src):
|
2017-03-29 22:26:46 +03:00
|
|
|
if hardlink is None:
|
|
|
|
hardlink = (os.stat(src).st_dev ==
|
|
|
|
os.stat(os.path.dirname(dst)).st_dev)
|
|
|
|
topic = gettopic()
|
2005-09-08 06:21:38 +04:00
|
|
|
os.mkdir(dst)
|
2017-04-26 16:26:28 +03:00
|
|
|
for name, kind in listdir(src):
|
2005-09-08 06:21:38 +04:00
|
|
|
srcname = os.path.join(src, name)
|
|
|
|
dstname = os.path.join(dst, name)
|
2015-03-19 17:24:22 +03:00
|
|
|
def nprog(t, pos):
|
|
|
|
if pos is not None:
|
|
|
|
return progress(t, pos + num)
|
|
|
|
hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
|
2010-05-31 15:47:51 +04:00
|
|
|
num += n
|
2005-09-08 06:21:38 +04:00
|
|
|
else:
|
2017-03-29 22:26:46 +03:00
|
|
|
if hardlink is None:
|
2017-03-29 22:37:03 +03:00
|
|
|
hardlink = (os.stat(os.path.dirname(src)).st_dev ==
|
2017-03-29 22:26:46 +03:00
|
|
|
os.stat(os.path.dirname(dst)).st_dev)
|
|
|
|
topic = gettopic()
|
|
|
|
|
Add support for cloning with hardlinks on windows.
In order to use hardlinks, the win32file module is needed, and this is
present in ActivePython. If it isn't present, or hardlinks are not supported
on the underlying filesystem, a regular copy is used.
When using hardlinks the biggest benefit is probably the saving in space,
but cloning can be much quicker. For example cloning the Xen tree
(non trivial) without an update goes from about 95s to 15s.
Unix-like platforms should be unaffected, although should be more tolerant on
filesystems that don't support hard links.
(tweaked by mpm to deal with new copyfiles function)
--- hg.orig/mercurial/commands.py 2005-09-13 19:32:53.000000000 -0500
+++ hg/mercurial/commands.py 2005-09-14 12:11:34.000000000 -0500
@@ -620,10 +620,6 @@ def clone(ui, source, dest=None, **opts)
if other.dev() != -1:
abspath = os.path.abspath(source)
- copyfile = (os.stat(dest).st_dev == other.dev()
- and getattr(os, 'link', None) or shutil.copy2)
- if copyfile is not shutil.copy2:
- ui.note("cloning by hardlink\n")
# we use a lock here because if we race with commit, we can
# end up with extra data in the cloned revlogs that's not
@@ -638,7 +634,7 @@ def clone(ui, source, dest=None, **opts)
for f in files.split():
src = os.path.join(source, ".hg", f)
dst = os.path.join(dest, ".hg", f)
- util.copyfiles(src, dst, copyfile)
+ util.copyfiles(src, dst)
repo = hg.repository(ui, dest)
Index: hg/mercurial/util.py
===================================================================
--- hg.orig/mercurial/util.py 2005-09-08 00:15:25.000000000 -0500
+++ hg/mercurial/util.py 2005-09-14 12:16:49.000000000 -0500
@@ -12,7 +12,7 @@ platform-specific details from the core.
import os, errno
from demandload import *
-demandload(globals(), "re cStringIO")
+demandload(globals(), "re cStringIO shutil")
def binary(s):
"""return true if a string is binary data using diff's heuristic"""
@@ -217,17 +217,28 @@ def rename(src, dst):
os.unlink(dst)
os.rename(src, dst)
-def copyfiles(src, dst, copyfile):
- """Copy a directory tree, files are copied using 'copyfile'."""
+def copyfiles(src, dst, hardlink=None):
+ """Copy a directory tree using hardlinks if possible"""
+
+ if hardlink is None:
+ hardlink = (os.stat(src).st_dev ==
+ os.stat(os.path.dirname(dst)).st_dev)
if os.path.isdir(src):
os.mkdir(dst)
for name in os.listdir(src):
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
- copyfiles(srcname, dstname, copyfile)
+ copyfiles(srcname, dstname, hardlink)
else:
- copyfile(src, dst)
+ if hardlink:
+ try:
+ os_link(src, dst)
+ except:
+ hardlink = False
+ shutil.copy2(src, dst)
+ else:
+ shutil.copy2(src, dst)
def opener(base):
"""
@@ -244,13 +255,13 @@ def opener(base):
if mode[0] != "r":
try:
- s = os.stat(f)
+ nlink = nlinks(f)
except OSError:
d = os.path.dirname(f)
if not os.path.isdir(d):
os.makedirs(d)
else:
- if s.st_nlink > 1:
+ if nlink > 1:
file(f + ".tmp", "wb").write(file(f, "rb").read())
rename(f+".tmp", f)
@@ -266,10 +277,41 @@ def _makelock_file(info, pathname):
def _readlock_file(pathname):
return file(pathname).read()
+def nlinks(pathname):
+ """Return number of hardlinks for the given file."""
+ return os.stat(pathname).st_nlink
+
+if hasattr(os, 'link'):
+ os_link = os.link
+else:
+ def os_link(src, dst):
+ raise OSError(0, "Hardlinks not supported")
+
# Platform specific variants
if os.name == 'nt':
nulldev = 'NUL:'
+ try: # ActivePython can create hard links using win32file module
+ import win32file
+
+ def os_link(src, dst): # NB will only succeed on NTFS
+ win32file.CreateHardLink(dst, src)
+
+ def nlinks(pathname):
+ """Return number of hardlinks for the given file."""
+ try:
+ fh = win32file.CreateFile(pathname,
+ win32file.GENERIC_READ, win32file.FILE_SHARE_READ,
+ None, win32file.OPEN_EXISTING, 0, None)
+ res = win32file.GetFileInformationByHandle(fh)
+ fh.Close()
+ return res[7]
+ except:
+ return os.stat(pathname).st_nlink
+
+ except ImportError:
+ pass
+
def is_exec(f, last):
return last
2005-09-14 21:22:20 +04:00
|
|
|
if hardlink:
|
|
|
|
try:
|
2011-05-06 17:34:34 +04:00
|
|
|
oslink(src, dst)
|
2006-04-06 04:17:07 +04:00
|
|
|
except (IOError, OSError):
|
Add support for cloning with hardlinks on windows.
In order to use hardlinks, the win32file module is needed, and this is
present in ActivePython. If it isn't present, or hardlinks are not supported
on the underlying filesystem, a regular copy is used.
When using hardlinks the biggest benefit is probably the saving in space,
but cloning can be much quicker. For example cloning the Xen tree
(non trivial) without an update goes from about 95s to 15s.
Unix-like platforms should be unaffected, although should be more tolerant on
filesystems that don't support hard links.
(tweaked by mpm to deal with new copyfiles function)
--- hg.orig/mercurial/commands.py 2005-09-13 19:32:53.000000000 -0500
+++ hg/mercurial/commands.py 2005-09-14 12:11:34.000000000 -0500
@@ -620,10 +620,6 @@ def clone(ui, source, dest=None, **opts)
if other.dev() != -1:
abspath = os.path.abspath(source)
- copyfile = (os.stat(dest).st_dev == other.dev()
- and getattr(os, 'link', None) or shutil.copy2)
- if copyfile is not shutil.copy2:
- ui.note("cloning by hardlink\n")
# we use a lock here because if we race with commit, we can
# end up with extra data in the cloned revlogs that's not
@@ -638,7 +634,7 @@ def clone(ui, source, dest=None, **opts)
for f in files.split():
src = os.path.join(source, ".hg", f)
dst = os.path.join(dest, ".hg", f)
- util.copyfiles(src, dst, copyfile)
+ util.copyfiles(src, dst)
repo = hg.repository(ui, dest)
Index: hg/mercurial/util.py
===================================================================
--- hg.orig/mercurial/util.py 2005-09-08 00:15:25.000000000 -0500
+++ hg/mercurial/util.py 2005-09-14 12:16:49.000000000 -0500
@@ -12,7 +12,7 @@ platform-specific details from the core.
import os, errno
from demandload import *
-demandload(globals(), "re cStringIO")
+demandload(globals(), "re cStringIO shutil")
def binary(s):
"""return true if a string is binary data using diff's heuristic"""
@@ -217,17 +217,28 @@ def rename(src, dst):
os.unlink(dst)
os.rename(src, dst)
-def copyfiles(src, dst, copyfile):
- """Copy a directory tree, files are copied using 'copyfile'."""
+def copyfiles(src, dst, hardlink=None):
+ """Copy a directory tree using hardlinks if possible"""
+
+ if hardlink is None:
+ hardlink = (os.stat(src).st_dev ==
+ os.stat(os.path.dirname(dst)).st_dev)
if os.path.isdir(src):
os.mkdir(dst)
for name in os.listdir(src):
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
- copyfiles(srcname, dstname, copyfile)
+ copyfiles(srcname, dstname, hardlink)
else:
- copyfile(src, dst)
+ if hardlink:
+ try:
+ os_link(src, dst)
+ except:
+ hardlink = False
+ shutil.copy2(src, dst)
+ else:
+ shutil.copy2(src, dst)
def opener(base):
"""
@@ -244,13 +255,13 @@ def opener(base):
if mode[0] != "r":
try:
- s = os.stat(f)
+ nlink = nlinks(f)
except OSError:
d = os.path.dirname(f)
if not os.path.isdir(d):
os.makedirs(d)
else:
- if s.st_nlink > 1:
+ if nlink > 1:
file(f + ".tmp", "wb").write(file(f, "rb").read())
rename(f+".tmp", f)
@@ -266,10 +277,41 @@ def _makelock_file(info, pathname):
def _readlock_file(pathname):
return file(pathname).read()
+def nlinks(pathname):
+ """Return number of hardlinks for the given file."""
+ return os.stat(pathname).st_nlink
+
+if hasattr(os, 'link'):
+ os_link = os.link
+else:
+ def os_link(src, dst):
+ raise OSError(0, "Hardlinks not supported")
+
# Platform specific variants
if os.name == 'nt':
nulldev = 'NUL:'
+ try: # ActivePython can create hard links using win32file module
+ import win32file
+
+ def os_link(src, dst): # NB will only succeed on NTFS
+ win32file.CreateHardLink(dst, src)
+
+ def nlinks(pathname):
+ """Return number of hardlinks for the given file."""
+ try:
+ fh = win32file.CreateFile(pathname,
+ win32file.GENERIC_READ, win32file.FILE_SHARE_READ,
+ None, win32file.OPEN_EXISTING, 0, None)
+ res = win32file.GetFileInformationByHandle(fh)
+ fh.Close()
+ return res[7]
+ except:
+ return os.stat(pathname).st_nlink
+
+ except ImportError:
+ pass
+
def is_exec(f, last):
return last
2005-09-14 21:22:20 +04:00
|
|
|
hardlink = False
|
2005-12-16 10:32:44 +03:00
|
|
|
shutil.copy(src, dst)
|
Add support for cloning with hardlinks on windows.
In order to use hardlinks, the win32file module is needed, and this is
present in ActivePython. If it isn't present, or hardlinks are not supported
on the underlying filesystem, a regular copy is used.
When using hardlinks the biggest benefit is probably the saving in space,
but cloning can be much quicker. For example cloning the Xen tree
(non trivial) without an update goes from about 95s to 15s.
Unix-like platforms should be unaffected, although should be more tolerant on
filesystems that don't support hard links.
(tweaked by mpm to deal with new copyfiles function)
--- hg.orig/mercurial/commands.py 2005-09-13 19:32:53.000000000 -0500
+++ hg/mercurial/commands.py 2005-09-14 12:11:34.000000000 -0500
@@ -620,10 +620,6 @@ def clone(ui, source, dest=None, **opts)
if other.dev() != -1:
abspath = os.path.abspath(source)
- copyfile = (os.stat(dest).st_dev == other.dev()
- and getattr(os, 'link', None) or shutil.copy2)
- if copyfile is not shutil.copy2:
- ui.note("cloning by hardlink\n")
# we use a lock here because if we race with commit, we can
# end up with extra data in the cloned revlogs that's not
@@ -638,7 +634,7 @@ def clone(ui, source, dest=None, **opts)
for f in files.split():
src = os.path.join(source, ".hg", f)
dst = os.path.join(dest, ".hg", f)
- util.copyfiles(src, dst, copyfile)
+ util.copyfiles(src, dst)
repo = hg.repository(ui, dest)
Index: hg/mercurial/util.py
===================================================================
--- hg.orig/mercurial/util.py 2005-09-08 00:15:25.000000000 -0500
+++ hg/mercurial/util.py 2005-09-14 12:16:49.000000000 -0500
@@ -12,7 +12,7 @@ platform-specific details from the core.
import os, errno
from demandload import *
-demandload(globals(), "re cStringIO")
+demandload(globals(), "re cStringIO shutil")
def binary(s):
"""return true if a string is binary data using diff's heuristic"""
@@ -217,17 +217,28 @@ def rename(src, dst):
os.unlink(dst)
os.rename(src, dst)
-def copyfiles(src, dst, copyfile):
- """Copy a directory tree, files are copied using 'copyfile'."""
+def copyfiles(src, dst, hardlink=None):
+ """Copy a directory tree using hardlinks if possible"""
+
+ if hardlink is None:
+ hardlink = (os.stat(src).st_dev ==
+ os.stat(os.path.dirname(dst)).st_dev)
if os.path.isdir(src):
os.mkdir(dst)
for name in os.listdir(src):
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
- copyfiles(srcname, dstname, copyfile)
+ copyfiles(srcname, dstname, hardlink)
else:
- copyfile(src, dst)
+ if hardlink:
+ try:
+ os_link(src, dst)
+ except:
+ hardlink = False
+ shutil.copy2(src, dst)
+ else:
+ shutil.copy2(src, dst)
def opener(base):
"""
@@ -244,13 +255,13 @@ def opener(base):
if mode[0] != "r":
try:
- s = os.stat(f)
+ nlink = nlinks(f)
except OSError:
d = os.path.dirname(f)
if not os.path.isdir(d):
os.makedirs(d)
else:
- if s.st_nlink > 1:
+ if nlink > 1:
file(f + ".tmp", "wb").write(file(f, "rb").read())
rename(f+".tmp", f)
@@ -266,10 +277,41 @@ def _makelock_file(info, pathname):
def _readlock_file(pathname):
return file(pathname).read()
+def nlinks(pathname):
+ """Return number of hardlinks for the given file."""
+ return os.stat(pathname).st_nlink
+
+if hasattr(os, 'link'):
+ os_link = os.link
+else:
+ def os_link(src, dst):
+ raise OSError(0, "Hardlinks not supported")
+
# Platform specific variants
if os.name == 'nt':
nulldev = 'NUL:'
+ try: # ActivePython can create hard links using win32file module
+ import win32file
+
+ def os_link(src, dst): # NB will only succeed on NTFS
+ win32file.CreateHardLink(dst, src)
+
+ def nlinks(pathname):
+ """Return number of hardlinks for the given file."""
+ try:
+ fh = win32file.CreateFile(pathname,
+ win32file.GENERIC_READ, win32file.FILE_SHARE_READ,
+ None, win32file.OPEN_EXISTING, 0, None)
+ res = win32file.GetFileInformationByHandle(fh)
+ fh.Close()
+ return res[7]
+ except:
+ return os.stat(pathname).st_nlink
+
+ except ImportError:
+ pass
+
def is_exec(f, last):
return last
2005-09-14 21:22:20 +04:00
|
|
|
else:
|
2005-12-16 10:32:44 +03:00
|
|
|
shutil.copy(src, dst)
|
2010-05-31 15:47:51 +04:00
|
|
|
num += 1
|
2015-03-19 17:24:22 +03:00
|
|
|
progress(topic, num)
|
|
|
|
progress(topic, None)
|
2005-07-14 18:30:49 +04:00
|
|
|
|
2010-05-31 15:47:51 +04:00
|
|
|
return hardlink, num
|
2010-05-28 19:28:34 +04:00
|
|
|
|
2017-06-24 17:27:50 +03:00
|
|
|
_winreservednames = b'''con prn aux nul
|
2011-04-06 20:09:43 +04:00
|
|
|
com1 com2 com3 com4 com5 com6 com7 com8 com9
|
|
|
|
lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
|
2011-05-08 00:25:20 +04:00
|
|
|
_winreservedchars = ':*?"<>|'
|
2011-04-06 20:09:43 +04:00
|
|
|
def checkwinfilename(path):
|
2013-11-08 15:35:50 +04:00
|
|
|
r'''Check that the base-relative path is a valid filename on Windows.
|
2011-04-06 20:09:43 +04:00
|
|
|
Returns None if the path is ok, or a UI string describing the problem.
|
|
|
|
|
|
|
|
>>> checkwinfilename("just/a/normal/path")
|
|
|
|
>>> checkwinfilename("foo/bar/con.xml")
|
|
|
|
"filename contains 'con', which is reserved on Windows"
|
|
|
|
>>> checkwinfilename("foo/con.xml/bar")
|
|
|
|
"filename contains 'con', which is reserved on Windows"
|
|
|
|
>>> checkwinfilename("foo/bar/xml.con")
|
|
|
|
>>> checkwinfilename("foo/bar/AUX/bla.txt")
|
|
|
|
"filename contains 'AUX', which is reserved on Windows"
|
|
|
|
>>> checkwinfilename("foo/bar/bla:.txt")
|
|
|
|
"filename contains ':', which is reserved on Windows"
|
|
|
|
>>> checkwinfilename("foo/bar/b\07la.txt")
|
2013-11-08 15:35:50 +04:00
|
|
|
"filename contains '\\x07', which is invalid on Windows"
|
2011-04-06 20:09:43 +04:00
|
|
|
>>> checkwinfilename("foo/bar/bla ")
|
|
|
|
"filename ends with ' ', which is not allowed on Windows"
|
2011-10-25 01:57:14 +04:00
|
|
|
>>> checkwinfilename("../bar")
|
2013-11-08 15:35:50 +04:00
|
|
|
>>> checkwinfilename("foo\\")
|
|
|
|
"filename ends with '\\', which is invalid on Windows"
|
|
|
|
>>> checkwinfilename("foo\\/bar")
|
|
|
|
"directory name ends with '\\', which is invalid on Windows"
|
2011-04-06 20:09:43 +04:00
|
|
|
'''
|
2013-11-08 15:35:50 +04:00
|
|
|
if path.endswith('\\'):
|
|
|
|
return _("filename ends with '\\', which is invalid on Windows")
|
|
|
|
if '\\/' in path:
|
|
|
|
return _("directory name ends with '\\', which is invalid on Windows")
|
2011-04-06 20:09:43 +04:00
|
|
|
for n in path.replace('\\', '/').split('/'):
|
|
|
|
if not n:
|
|
|
|
continue
|
win32mbcs: avoid unintentional failure at colorization
Since 1d07d9da84a0, pycompat.bytestr() wrapped by win32mbcs returns
unicode object, if an argument is not byte-str object. And this causes
unexpected failure at colorization.
pycompat.bytestr() is used to convert from color effect "int" value to
byte-str object in color module. Wrapped pycompat.bytestr() returns
unicode object for such "int" value, because it isn't byte-str.
If this returned unicode object is used to colorize non-ASCII byte-str
in cases below, UnicodeDecodeError is raised at an operation between
them.
- colorization uses "ansi" color mode, or
Even though this isn't default on Windows, user might use this
color mode for third party pager.
- ui.write() is buffered with labeled=True
Buffering causes "ansi" color mode internally, regardless of
actual color mode. With "win32" color mode, extra escape sequences
are omitted at writing data out.
For example, with "win32" color mode, "hg status" doesn't fail for
non-ASCII filenames, but "hg log" does for non-ASCII text, because
the latter implies buffered formatter.
There are many "color effect" value lines in color.py, and making them
byte-str objects isn't suitable for fixing on stable. In addition to
it, pycompat.bytestr will be used to get byte-str object from any
types other than int, too.
To resolve this issue, this patch does:
- replace pycompat.bytestr in checkwinfilename() with newly added
hook point util._filenamebytestr, and
- make win32mbcs reverse-wrap util._filenamebytestr
(this is a replacement of 1d07d9da84a0)
This patch does two things above at same time, because separately
applying the former change adds broken revision (from point of view of
win32mbcs) to stable branch.
"_" prefix is added to "filenamebytestr", because it is win32mbcs
specific hook point.
2017-05-31 17:44:33 +03:00
|
|
|
for c in _filenamebytestr(n):
|
2011-05-08 00:25:20 +04:00
|
|
|
if c in _winreservedchars:
|
2011-04-06 20:09:43 +04:00
|
|
|
return _("filename contains '%s', which is reserved "
|
|
|
|
"on Windows") % c
|
|
|
|
if ord(c) <= 31:
|
2011-04-16 22:08:43 +04:00
|
|
|
return _("filename contains %r, which is invalid "
|
2011-04-06 20:09:43 +04:00
|
|
|
"on Windows") % c
|
|
|
|
base = n.split('.')[0]
|
2011-05-08 00:25:20 +04:00
|
|
|
if base and base.lower() in _winreservednames:
|
2011-04-06 20:09:43 +04:00
|
|
|
return _("filename contains '%s', which is reserved "
|
|
|
|
"on Windows") % base
|
|
|
|
t = n[-1]
|
2011-10-25 01:57:14 +04:00
|
|
|
if t in '. ' and n not in '..':
|
2011-04-06 20:09:43 +04:00
|
|
|
return _("filename ends with '%s', which is not allowed "
|
|
|
|
"on Windows") % t
|
|
|
|
|
2016-12-18 21:46:52 +03:00
|
|
|
if pycompat.osname == 'nt':
|
2011-04-06 20:09:43 +04:00
|
|
|
checkosfilename = checkwinfilename
|
2017-02-15 22:53:59 +03:00
|
|
|
timer = time.clock
|
2011-07-23 14:29:52 +04:00
|
|
|
else:
|
|
|
|
checkosfilename = platform.checkosfilename
|
2017-02-15 22:53:59 +03:00
|
|
|
timer = time.time
|
|
|
|
|
|
|
|
if safehasattr(time, "perf_counter"):
|
|
|
|
timer = time.perf_counter
|
2009-03-26 21:54:44 +03:00
|
|
|
|
|
|
|
def makelock(info, pathname):
|
|
|
|
try:
|
|
|
|
return os.symlink(info, pathname)
|
2015-06-24 08:20:08 +03:00
|
|
|
except OSError as why:
|
2009-03-26 21:54:44 +03:00
|
|
|
if why.errno == errno.EEXIST:
|
|
|
|
raise
|
|
|
|
except AttributeError: # no symlink in os
|
|
|
|
pass
|
|
|
|
|
2005-07-15 01:51:47 +04:00
|
|
|
ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
|
|
|
|
os.write(ld, info)
|
|
|
|
os.close(ld)
|
|
|
|
|
2009-03-26 21:54:44 +03:00
|
|
|
def readlock(pathname):
|
|
|
|
try:
|
|
|
|
return os.readlink(pathname)
|
2015-06-24 08:20:08 +03:00
|
|
|
except OSError as why:
|
2009-03-26 21:54:44 +03:00
|
|
|
if why.errno not in (errno.EINVAL, errno.ENOSYS):
|
|
|
|
raise
|
|
|
|
except AttributeError: # no symlink in os
|
|
|
|
pass
|
2010-12-24 17:23:01 +03:00
|
|
|
fp = posixfile(pathname)
|
|
|
|
r = fp.read()
|
|
|
|
fp.close()
|
|
|
|
return r
|
2005-07-15 01:51:47 +04:00
|
|
|
|
2006-05-03 01:30:00 +04:00
|
|
|
def fstat(fp):
|
|
|
|
'''stat file object that may not have fileno method.'''
|
|
|
|
try:
|
|
|
|
return os.fstat(fp.fileno())
|
|
|
|
except AttributeError:
|
|
|
|
return os.stat(fp.name)
|
|
|
|
|
2006-12-05 02:10:29 +03:00
|
|
|
# File system features
|
|
|
|
|
2016-08-30 19:22:53 +03:00
|
|
|
def fscasesensitive(path):
|
2006-12-05 02:10:29 +03:00
|
|
|
"""
|
2013-02-11 03:43:12 +04:00
|
|
|
Return true if the given path is on a case-sensitive filesystem
|
2006-12-05 02:10:29 +03:00
|
|
|
|
|
|
|
Requires a path (like /foo/.hg) ending with a foldable final
|
|
|
|
directory component.
|
|
|
|
"""
|
2015-05-03 22:49:15 +03:00
|
|
|
s1 = os.lstat(path)
|
2006-12-05 02:10:29 +03:00
|
|
|
d, b = os.path.split(path)
|
2011-12-16 16:09:40 +04:00
|
|
|
b2 = b.upper()
|
|
|
|
if b == b2:
|
|
|
|
b2 = b.lower()
|
|
|
|
if b == b2:
|
|
|
|
return True # no evidence against case sensitivity
|
|
|
|
p2 = os.path.join(d, b2)
|
2006-12-05 02:10:29 +03:00
|
|
|
try:
|
2015-05-03 22:49:15 +03:00
|
|
|
s2 = os.lstat(p2)
|
2006-12-05 02:10:29 +03:00
|
|
|
if s2 == s1:
|
|
|
|
return False
|
|
|
|
return True
|
2011-04-23 01:51:25 +04:00
|
|
|
except OSError:
|
2006-12-05 02:10:29 +03:00
|
|
|
return True
|
|
|
|
|
2012-06-02 02:26:20 +04:00
|
|
|
try:
|
|
|
|
import re2
|
|
|
|
_re2 = None
|
|
|
|
except ImportError:
|
|
|
|
_re2 = False
|
|
|
|
|
2014-07-16 01:40:43 +04:00
|
|
|
class _re(object):
|
2014-07-16 02:01:52 +04:00
|
|
|
def _checkre2(self):
|
|
|
|
global _re2
|
|
|
|
try:
|
|
|
|
# check if match works, see issue3964
|
|
|
|
_re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
|
|
|
|
except ImportError:
|
|
|
|
_re2 = False
|
|
|
|
|
2014-07-16 01:40:43 +04:00
|
|
|
def compile(self, pat, flags=0):
|
|
|
|
'''Compile a regular expression, using re2 if possible
|
|
|
|
|
|
|
|
For best performance, use only re2-compatible regexp features. The
|
|
|
|
only flags from the re module that are re2-compatible are
|
|
|
|
IGNORECASE and MULTILINE.'''
|
|
|
|
if _re2 is None:
|
2014-07-16 02:01:52 +04:00
|
|
|
self._checkre2()
|
2014-07-16 01:40:43 +04:00
|
|
|
if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
|
|
|
|
if flags & remod.IGNORECASE:
|
|
|
|
pat = '(?i)' + pat
|
|
|
|
if flags & remod.MULTILINE:
|
|
|
|
pat = '(?m)' + pat
|
|
|
|
try:
|
|
|
|
return re2.compile(pat)
|
|
|
|
except re2.error:
|
|
|
|
pass
|
|
|
|
return remod.compile(pat, flags)
|
|
|
|
|
2014-07-16 02:14:45 +04:00
|
|
|
@propertycache
|
|
|
|
def escape(self):
|
|
|
|
'''Return the version of escape corresponding to self.compile.
|
|
|
|
|
|
|
|
This is imperfect because whether re2 or re is used for a particular
|
|
|
|
function depends on the flags, etc, but it's the best we can do.
|
|
|
|
'''
|
|
|
|
global _re2
|
|
|
|
if _re2 is None:
|
|
|
|
self._checkre2()
|
|
|
|
if _re2:
|
|
|
|
return re2.escape
|
|
|
|
else:
|
|
|
|
return remod.escape
|
|
|
|
|
2014-07-16 01:40:43 +04:00
|
|
|
re = _re()
|
2012-06-02 02:26:20 +04:00
|
|
|
|
2008-06-06 22:23:23 +04:00
|
|
|
_fspathcache = {}
|
|
|
|
def fspath(name, root):
|
|
|
|
'''Get name in the case stored in the filesystem
|
|
|
|
|
2011-12-16 16:09:40 +04:00
|
|
|
The name should be relative to root, and be normcase-ed for efficiency.
|
|
|
|
|
|
|
|
Note that this function is unnecessary, and should not be
|
2008-06-06 22:23:23 +04:00
|
|
|
called, for case-sensitive filesystems (simply because it's expensive).
|
2011-12-16 16:09:40 +04:00
|
|
|
|
2011-12-16 16:09:40 +04:00
|
|
|
The root should be normcase-ed, too.
|
2008-06-06 22:23:23 +04:00
|
|
|
'''
|
2014-10-24 22:39:39 +04:00
|
|
|
def _makefspathcacheentry(dir):
|
|
|
|
return dict((normcase(n), n) for n in os.listdir(dir))
|
2011-12-16 16:09:40 +04:00
|
|
|
|
2016-12-17 17:26:30 +03:00
|
|
|
seps = pycompat.ossep
|
2016-12-17 22:47:12 +03:00
|
|
|
if pycompat.osaltsep:
|
|
|
|
seps = seps + pycompat.osaltsep
|
2008-06-06 22:23:23 +04:00
|
|
|
# Protect backslashes. This gets silly very quickly.
|
|
|
|
seps.replace('\\','\\\\')
|
2017-03-19 07:16:39 +03:00
|
|
|
pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
|
2011-12-16 16:09:40 +04:00
|
|
|
dir = os.path.normpath(root)
|
2008-06-06 22:23:23 +04:00
|
|
|
result = []
|
|
|
|
for part, sep in pattern.findall(name):
|
|
|
|
if sep:
|
|
|
|
result.append(sep)
|
|
|
|
continue
|
|
|
|
|
2011-12-23 19:51:14 +04:00
|
|
|
if dir not in _fspathcache:
|
2014-10-24 22:39:39 +04:00
|
|
|
_fspathcache[dir] = _makefspathcacheentry(dir)
|
2011-12-23 19:51:14 +04:00
|
|
|
contents = _fspathcache[dir]
|
2008-06-06 22:23:23 +04:00
|
|
|
|
2014-10-24 22:39:39 +04:00
|
|
|
found = contents.get(part)
|
2011-12-16 16:09:40 +04:00
|
|
|
if not found:
|
2011-12-23 19:52:06 +04:00
|
|
|
# retry "once per directory" per "dirstate.walk" which
|
|
|
|
# may take place for each patches of "hg qpush", for example
|
2014-10-24 22:39:39 +04:00
|
|
|
_fspathcache[dir] = contents = _makefspathcacheentry(dir)
|
|
|
|
found = contents.get(part)
|
2011-12-16 16:09:40 +04:00
|
|
|
|
|
|
|
result.append(found or part)
|
2011-12-16 16:09:40 +04:00
|
|
|
dir = os.path.join(dir, part)
|
2008-06-06 22:23:23 +04:00
|
|
|
|
|
|
|
return ''.join(result)
|
|
|
|
|
2017-03-23 21:58:45 +03:00
|
|
|
def getfstype(dirpath):
|
|
|
|
'''Get the filesystem type name from a directory (best-effort)
|
|
|
|
|
2017-03-25 11:25:23 +03:00
|
|
|
Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
|
2017-03-23 21:58:45 +03:00
|
|
|
'''
|
|
|
|
return getattr(osutil, 'getfstype', lambda x: None)(dirpath)
|
|
|
|
|
2010-11-07 20:21:29 +03:00
|
|
|
def checknlink(testfile):
|
|
|
|
'''check whether hardlink count reporting works properly'''
|
|
|
|
|
2010-12-14 00:38:06 +03:00
|
|
|
# testfile may be open, so we need a separate file for checking to
|
|
|
|
# work around issue2543 (or testfile may get lost on Samba shares)
|
|
|
|
f1 = testfile + ".hgtmp1"
|
|
|
|
if os.path.lexists(f1):
|
|
|
|
return False
|
2010-11-07 20:21:29 +03:00
|
|
|
try:
|
2010-12-14 00:38:06 +03:00
|
|
|
posixfile(f1, 'w').close()
|
|
|
|
except IOError:
|
2016-08-19 23:30:40 +03:00
|
|
|
try:
|
|
|
|
os.unlink(f1)
|
|
|
|
except OSError:
|
|
|
|
pass
|
2010-11-07 20:21:29 +03:00
|
|
|
return False
|
|
|
|
|
2010-12-14 00:38:06 +03:00
|
|
|
f2 = testfile + ".hgtmp2"
|
|
|
|
fd = None
|
2010-11-07 20:21:29 +03:00
|
|
|
try:
|
2015-05-15 17:58:21 +03:00
|
|
|
oslink(f1, f2)
|
2010-11-07 20:21:29 +03:00
|
|
|
# nlinks() may behave differently for files on Windows shares if
|
|
|
|
# the file is open.
|
2011-02-02 15:51:22 +03:00
|
|
|
fd = posixfile(f2)
|
2010-12-14 00:38:06 +03:00
|
|
|
return nlinks(f2) > 1
|
2015-05-15 17:58:21 +03:00
|
|
|
except OSError:
|
|
|
|
return False
|
2010-11-07 20:21:29 +03:00
|
|
|
finally:
|
2010-12-14 00:38:06 +03:00
|
|
|
if fd is not None:
|
|
|
|
fd.close()
|
|
|
|
for f in (f1, f2):
|
|
|
|
try:
|
|
|
|
os.unlink(f)
|
|
|
|
except OSError:
|
|
|
|
pass
|
2010-11-07 20:21:29 +03:00
|
|
|
|
2008-01-09 15:30:35 +03:00
|
|
|
def endswithsep(path):
|
|
|
|
'''Check path ends with os.sep or os.altsep.'''
|
2016-12-17 17:26:30 +03:00
|
|
|
return (path.endswith(pycompat.ossep)
|
2016-12-17 22:47:12 +03:00
|
|
|
or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
|
2008-01-09 15:30:35 +03:00
|
|
|
|
2008-01-09 15:30:36 +03:00
|
|
|
def splitpath(path):
|
|
|
|
'''Split path by os.sep.
|
|
|
|
Note that this function does not use os.altsep because this is
|
|
|
|
an alternative of simple "xxx.split(os.sep)".
|
|
|
|
It is recommended to use os.path.normpath() before using this
|
|
|
|
function if need.'''
|
2016-12-17 17:26:30 +03:00
|
|
|
return path.split(pycompat.ossep)
|
2008-01-09 15:30:36 +03:00
|
|
|
|
2008-02-04 04:29:05 +03:00
|
|
|
def gui():
|
|
|
|
'''Are we running in a GUI?'''
|
2016-12-18 23:56:41 +03:00
|
|
|
if pycompat.sysplatform == 'darwin':
|
2016-12-17 23:36:00 +03:00
|
|
|
if 'SSH_CONNECTION' in encoding.environ:
|
2011-03-23 11:43:34 +03:00
|
|
|
# handle SSH access to a box where the user is logged in
|
|
|
|
return False
|
|
|
|
elif getattr(osutil, 'isgui', None):
|
|
|
|
# check if a CoreGraphics session is available
|
|
|
|
return osutil.isgui()
|
|
|
|
else:
|
|
|
|
# pure build; use a safe default
|
|
|
|
return True
|
|
|
|
else:
|
2016-12-18 21:46:52 +03:00
|
|
|
return pycompat.osname == "nt" or encoding.environ.get("DISPLAY")
|
2008-02-04 04:29:05 +03:00
|
|
|
|
2008-02-09 23:38:54 +03:00
|
|
|
def mktempcopy(name, emptyok=False, createmode=None):
|
2007-07-12 00:40:41 +04:00
|
|
|
"""Create a temporary file with the same contents from name
|
2006-05-03 01:30:00 +04:00
|
|
|
|
2007-07-12 00:40:41 +04:00
|
|
|
The permission bits are copied from the original file.
|
|
|
|
|
|
|
|
If the temporary file is going to be truncated immediately, you
|
|
|
|
can use emptyok=True as an optimization.
|
|
|
|
|
|
|
|
Returns the name of the temporary file.
|
2006-05-03 01:30:00 +04:00
|
|
|
"""
|
2007-07-12 00:40:41 +04:00
|
|
|
d, fn = os.path.split(name)
|
|
|
|
fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
|
|
|
|
os.close(fd)
|
|
|
|
# Temporary files are created with mode 0600, which is usually not
|
|
|
|
# what we want. If the original file already exists, just copy
|
|
|
|
# its mode. Otherwise, manually obey umask.
|
2011-08-02 14:29:48 +04:00
|
|
|
copymode(name, temp, createmode)
|
2007-07-12 00:40:41 +04:00
|
|
|
if emptyok:
|
|
|
|
return temp
|
|
|
|
try:
|
2006-05-03 01:30:00 +04:00
|
|
|
try:
|
2007-07-12 00:40:41 +04:00
|
|
|
ifp = posixfile(name, "rb")
|
2015-06-24 08:20:08 +03:00
|
|
|
except IOError as inst:
|
2007-07-12 00:40:41 +04:00
|
|
|
if inst.errno == errno.ENOENT:
|
|
|
|
return temp
|
|
|
|
if not getattr(inst, 'filename', None):
|
|
|
|
inst.filename = name
|
|
|
|
raise
|
|
|
|
ofp = posixfile(temp, "wb")
|
|
|
|
for chunk in filechunkiter(ifp):
|
|
|
|
ofp.write(chunk)
|
|
|
|
ifp.close()
|
|
|
|
ofp.close()
|
2012-05-13 15:18:06 +04:00
|
|
|
except: # re-raises
|
2007-07-12 00:40:41 +04:00
|
|
|
try: os.unlink(temp)
|
2012-05-13 15:17:31 +04:00
|
|
|
except OSError: pass
|
2007-07-12 00:40:41 +04:00
|
|
|
raise
|
|
|
|
return temp
|
|
|
|
|
2016-05-18 18:20:37 +03:00
|
|
|
class filestat(object):
|
|
|
|
"""help to exactly detect change of a file
|
|
|
|
|
|
|
|
'stat' attribute is result of 'os.stat()' if specified 'path'
|
|
|
|
exists. Otherwise, it is None. This can avoid preparative
|
|
|
|
'exists()' examination on client side of this class.
|
|
|
|
"""
|
2017-06-11 00:09:54 +03:00
|
|
|
def __init__(self, stat):
|
|
|
|
self.stat = stat
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def frompath(cls, path):
|
2016-05-18 18:20:37 +03:00
|
|
|
try:
|
2017-06-11 00:09:54 +03:00
|
|
|
stat = os.stat(path)
|
2016-05-18 18:20:37 +03:00
|
|
|
except OSError as err:
|
|
|
|
if err.errno != errno.ENOENT:
|
|
|
|
raise
|
2017-06-11 00:09:54 +03:00
|
|
|
stat = None
|
|
|
|
return cls(stat)
|
2016-05-18 18:20:37 +03:00
|
|
|
|
2017-06-13 01:34:31 +03:00
|
|
|
@classmethod
|
|
|
|
def fromfp(cls, fp):
|
|
|
|
stat = os.fstat(fp.fileno())
|
|
|
|
return cls(stat)
|
|
|
|
|
2016-05-18 18:20:37 +03:00
|
|
|
__hash__ = object.__hash__
|
|
|
|
|
|
|
|
def __eq__(self, old):
|
|
|
|
try:
|
|
|
|
# if ambiguity between stat of new and old file is
|
2016-10-18 00:16:55 +03:00
|
|
|
# avoided, comparison of size, ctime and mtime is enough
|
2016-05-18 18:20:37 +03:00
|
|
|
# to exactly detect change of a file regardless of platform
|
|
|
|
return (self.stat.st_size == old.stat.st_size and
|
|
|
|
self.stat.st_ctime == old.stat.st_ctime and
|
|
|
|
self.stat.st_mtime == old.stat.st_mtime)
|
2017-06-09 07:07:48 +03:00
|
|
|
except AttributeError:
|
|
|
|
pass
|
|
|
|
try:
|
|
|
|
return self.stat is None and old.stat is None
|
2016-05-18 18:20:37 +03:00
|
|
|
except AttributeError:
|
|
|
|
return False
|
|
|
|
|
|
|
|
def isambig(self, old):
|
|
|
|
"""Examine whether new (= self) stat is ambiguous against old one
|
|
|
|
|
|
|
|
"S[N]" below means stat of a file at N-th change:
|
|
|
|
|
|
|
|
- S[n-1].ctime < S[n].ctime: can detect change of a file
|
|
|
|
- S[n-1].ctime == S[n].ctime
|
|
|
|
- S[n-1].ctime < S[n].mtime: means natural advancing (*1)
|
|
|
|
- S[n-1].ctime == S[n].mtime: is ambiguous (*2)
|
|
|
|
- S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
|
|
|
|
- S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
|
|
|
|
|
|
|
|
Case (*2) above means that a file was changed twice or more at
|
|
|
|
same time in sec (= S[n-1].ctime), and comparison of timestamp
|
|
|
|
is ambiguous.
|
|
|
|
|
|
|
|
Base idea to avoid such ambiguity is "advance mtime 1 sec, if
|
|
|
|
timestamp is ambiguous".
|
|
|
|
|
|
|
|
But advancing mtime only in case (*2) doesn't work as
|
|
|
|
expected, because naturally advanced S[n].mtime in case (*1)
|
|
|
|
might be equal to manually advanced S[n-1 or earlier].mtime.
|
|
|
|
|
|
|
|
Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
|
|
|
|
treated as ambiguous regardless of mtime, to avoid overlooking
|
|
|
|
by confliction between such mtime.
|
|
|
|
|
|
|
|
Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
|
|
|
|
S[n].mtime", even if size of a file isn't changed.
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
return (self.stat.st_ctime == old.stat.st_ctime)
|
|
|
|
except AttributeError:
|
|
|
|
return False
|
|
|
|
|
2016-11-13 00:06:23 +03:00
|
|
|
def avoidambig(self, path, old):
|
|
|
|
"""Change file stat of specified path to avoid ambiguity
|
|
|
|
|
|
|
|
'old' should be previous filestat of 'path'.
|
|
|
|
|
|
|
|
This skips avoiding ambiguity, if a process doesn't have
|
2017-06-09 06:58:17 +03:00
|
|
|
appropriate privileges for 'path'. This returns False in this
|
|
|
|
case.
|
|
|
|
|
|
|
|
Otherwise, this returns True, as "ambiguity is avoided".
|
2016-11-13 00:06:23 +03:00
|
|
|
"""
|
|
|
|
advanced = (old.stat.st_mtime + 1) & 0x7fffffff
|
|
|
|
try:
|
|
|
|
os.utime(path, (advanced, advanced))
|
|
|
|
except OSError as inst:
|
|
|
|
if inst.errno == errno.EPERM:
|
|
|
|
# utime() on the file created by another user causes EPERM,
|
|
|
|
# if a process doesn't have appropriate privileges
|
2017-06-09 06:58:17 +03:00
|
|
|
return False
|
2016-11-13 00:06:23 +03:00
|
|
|
raise
|
2017-06-09 06:58:17 +03:00
|
|
|
return True
|
2016-11-13 00:06:23 +03:00
|
|
|
|
2016-06-02 18:44:20 +03:00
|
|
|
def __ne__(self, other):
|
|
|
|
return not self == other
|
|
|
|
|
2009-06-10 17:10:21 +04:00
|
|
|
class atomictempfile(object):
|
2012-08-16 00:38:42 +04:00
|
|
|
'''writable file object that atomically updates a file
|
2007-07-12 00:40:41 +04:00
|
|
|
|
2011-04-25 01:30:50 +04:00
|
|
|
All writes will go to a temporary copy of the original file. Call
|
2011-08-26 04:21:04 +04:00
|
|
|
close() when you are done writing, and atomictempfile will rename
|
|
|
|
the temporary copy to the original name, making the changes
|
|
|
|
visible. If the object is destroyed without being closed, all your
|
|
|
|
writes are discarded.
|
2016-06-12 23:11:56 +03:00
|
|
|
|
|
|
|
checkambig argument of constructor is used with filestat, and is
|
|
|
|
useful only if target file is guarded by any lock (e.g. repo.lock
|
|
|
|
or repo.wlock).
|
2011-04-25 01:30:50 +04:00
|
|
|
'''
|
2016-05-18 18:20:38 +03:00
|
|
|
def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
|
2011-04-25 03:25:10 +04:00
|
|
|
self.__name = name # permanent name
|
|
|
|
self._tempname = mktempcopy(name, emptyok=('w' in mode),
|
|
|
|
createmode=createmode)
|
|
|
|
self._fp = posixfile(self._tempname, mode)
|
2016-05-18 18:20:38 +03:00
|
|
|
self._checkambig = checkambig
|
2009-03-26 23:12:11 +03:00
|
|
|
|
2011-04-25 03:25:10 +04:00
|
|
|
# delegated methods
|
2016-06-23 20:20:58 +03:00
|
|
|
self.read = self._fp.read
|
2011-04-25 03:25:10 +04:00
|
|
|
self.write = self._fp.write
|
2012-07-24 02:38:43 +04:00
|
|
|
self.seek = self._fp.seek
|
|
|
|
self.tell = self._fp.tell
|
2011-04-25 03:25:10 +04:00
|
|
|
self.fileno = self._fp.fileno
|
2007-07-12 00:40:41 +04:00
|
|
|
|
2011-08-26 04:21:04 +04:00
|
|
|
def close(self):
|
2009-06-13 15:14:02 +04:00
|
|
|
if not self._fp.closed:
|
2009-03-26 23:12:11 +03:00
|
|
|
self._fp.close()
|
2016-05-18 18:20:38 +03:00
|
|
|
filename = localpath(self.__name)
|
2017-06-11 00:09:54 +03:00
|
|
|
oldstat = self._checkambig and filestat.frompath(filename)
|
2016-05-18 18:20:38 +03:00
|
|
|
if oldstat and oldstat.stat:
|
|
|
|
rename(self._tempname, filename)
|
2017-06-11 00:09:54 +03:00
|
|
|
newstat = filestat.frompath(filename)
|
2016-05-18 18:20:38 +03:00
|
|
|
if newstat.isambig(oldstat):
|
|
|
|
# stat of changed file is ambiguous to original one
|
|
|
|
advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
|
|
|
|
os.utime(filename, (advanced, advanced))
|
|
|
|
else:
|
|
|
|
rename(self._tempname, filename)
|
2007-07-12 00:40:41 +04:00
|
|
|
|
2011-08-26 04:21:04 +04:00
|
|
|
def discard(self):
|
2009-06-13 15:14:02 +04:00
|
|
|
if not self._fp.closed:
|
2006-05-08 19:20:56 +04:00
|
|
|
try:
|
2011-04-25 03:25:10 +04:00
|
|
|
os.unlink(self._tempname)
|
|
|
|
except OSError:
|
|
|
|
pass
|
2009-06-13 15:14:02 +04:00
|
|
|
self._fp.close()
|
2006-05-03 01:30:00 +04:00
|
|
|
|
2010-12-07 18:03:42 +03:00
|
|
|
def __del__(self):
|
2011-07-26 01:04:40 +04:00
|
|
|
if safehasattr(self, '_fp'): # constructor actually did something
|
2011-08-26 04:21:04 +04:00
|
|
|
self.discard()
|
2010-12-07 18:03:42 +03:00
|
|
|
|
2016-06-23 20:21:25 +03:00
|
|
|
def __enter__(self):
|
|
|
|
return self
|
|
|
|
|
|
|
|
def __exit__(self, exctype, excvalue, traceback):
|
|
|
|
if exctype is not None:
|
|
|
|
self.discard()
|
|
|
|
else:
|
|
|
|
self.close()
|
|
|
|
|
2017-03-21 16:50:28 +03:00
|
|
|
def unlinkpath(f, ignoremissing=False):
|
|
|
|
"""unlink and remove the directory if it is empty"""
|
2017-03-21 16:50:28 +03:00
|
|
|
if ignoremissing:
|
|
|
|
tryunlink(f)
|
|
|
|
else:
|
2017-03-21 16:50:28 +03:00
|
|
|
unlink(f)
|
|
|
|
# try removing directories that might now be empty
|
|
|
|
try:
|
|
|
|
removedirs(os.path.dirname(f))
|
|
|
|
except OSError:
|
|
|
|
pass
|
|
|
|
|
2017-03-21 16:50:28 +03:00
|
|
|
def tryunlink(f):
|
|
|
|
"""Attempt to remove a file, ignoring ENOENT errors."""
|
|
|
|
try:
|
|
|
|
unlink(f)
|
|
|
|
except OSError as e:
|
|
|
|
if e.errno != errno.ENOENT:
|
|
|
|
raise
|
|
|
|
|
2013-02-16 14:44:13 +04:00
|
|
|
def makedirs(name, mode=None, notindexed=False):
|
2016-04-27 01:32:59 +03:00
|
|
|
"""recursive directory creation with parent mode inheritance
|
|
|
|
|
|
|
|
Newly created directories are marked as "not to be indexed by
|
|
|
|
the content indexing service", if ``notindexed`` is specified
|
|
|
|
for "write" mode access.
|
|
|
|
"""
|
2008-02-09 23:38:54 +03:00
|
|
|
try:
|
2013-02-16 14:44:13 +04:00
|
|
|
makedir(name, notindexed)
|
2015-06-24 08:20:08 +03:00
|
|
|
except OSError as err:
|
2008-02-09 23:38:54 +03:00
|
|
|
if err.errno == errno.EEXIST:
|
|
|
|
return
|
2011-08-25 13:03:16 +04:00
|
|
|
if err.errno != errno.ENOENT or not name:
|
|
|
|
raise
|
|
|
|
parent = os.path.dirname(os.path.abspath(name))
|
|
|
|
if parent == name:
|
2008-02-09 23:38:54 +03:00
|
|
|
raise
|
2013-02-16 14:44:13 +04:00
|
|
|
makedirs(parent, mode, notindexed)
|
2016-04-27 01:32:59 +03:00
|
|
|
try:
|
|
|
|
makedir(name, notindexed)
|
|
|
|
except OSError as err:
|
|
|
|
# Catch EEXIST to handle races
|
|
|
|
if err.errno == errno.EEXIST:
|
|
|
|
return
|
|
|
|
raise
|
2013-02-14 00:20:10 +04:00
|
|
|
if mode is not None:
|
|
|
|
os.chmod(name, mode)
|
2013-02-12 04:15:12 +04:00
|
|
|
|
2011-05-01 13:46:49 +04:00
|
|
|
def readfile(path):
|
2016-01-13 01:49:35 +03:00
|
|
|
with open(path, 'rb') as fp:
|
2011-05-01 13:56:24 +04:00
|
|
|
return fp.read()
|
2011-05-01 13:46:49 +04:00
|
|
|
|
2011-05-02 12:11:05 +04:00
|
|
|
def writefile(path, text):
|
2016-01-13 01:49:35 +03:00
|
|
|
with open(path, 'wb') as fp:
|
2011-05-02 12:11:05 +04:00
|
|
|
fp.write(text)
|
|
|
|
|
|
|
|
def appendfile(path, text):
|
2016-01-13 01:49:35 +03:00
|
|
|
with open(path, 'ab') as fp:
|
2011-05-01 13:46:49 +04:00
|
|
|
fp.write(text)
|
|
|
|
|
2005-09-05 01:11:51 +04:00
|
|
|
class chunkbuffer(object):
|
|
|
|
"""Allow arbitrary sized chunks of data to be efficiently read from an
|
|
|
|
iterator over chunks of arbitrary size."""
|
2005-09-05 01:21:53 +04:00
|
|
|
|
2007-10-11 09:46:48 +04:00
|
|
|
def __init__(self, in_iter):
|
2017-05-02 20:20:44 +03:00
|
|
|
"""in_iter is the iterator that's iterating over the input chunks."""
|
2010-07-25 15:10:57 +04:00
|
|
|
def splitbig(chunks):
|
|
|
|
for chunk in chunks:
|
|
|
|
if len(chunk) > 2**20:
|
|
|
|
pos = 0
|
|
|
|
while pos < len(chunk):
|
|
|
|
end = pos + 2 ** 18
|
|
|
|
yield chunk[pos:end]
|
|
|
|
pos = end
|
|
|
|
else:
|
|
|
|
yield chunk
|
|
|
|
self.iter = splitbig(in_iter)
|
2015-05-16 21:28:04 +03:00
|
|
|
self._queue = collections.deque()
|
2015-10-06 03:36:32 +03:00
|
|
|
self._chunkoffset = 0
|
2005-09-05 01:11:51 +04:00
|
|
|
|
2014-04-11 09:10:26 +04:00
|
|
|
def read(self, l=None):
|
2005-09-05 01:21:53 +04:00
|
|
|
"""Read L bytes of data from the iterator of chunks of data.
|
2014-04-11 09:10:26 +04:00
|
|
|
Returns less than L bytes if the iterator runs dry.
|
|
|
|
|
2014-04-18 00:47:38 +04:00
|
|
|
If size parameter is omitted, read everything"""
|
2015-10-06 02:28:12 +03:00
|
|
|
if l is None:
|
|
|
|
return ''.join(self.iter)
|
|
|
|
|
2010-08-06 21:18:33 +04:00
|
|
|
left = l
|
2012-11-27 01:42:52 +04:00
|
|
|
buf = []
|
2012-06-06 03:52:20 +04:00
|
|
|
queue = self._queue
|
2015-10-06 02:28:12 +03:00
|
|
|
while left > 0:
|
2010-08-06 21:18:33 +04:00
|
|
|
# refill the queue
|
|
|
|
if not queue:
|
|
|
|
target = 2**18
|
|
|
|
for chunk in self.iter:
|
|
|
|
queue.append(chunk)
|
|
|
|
target -= len(chunk)
|
|
|
|
if target <= 0:
|
|
|
|
break
|
|
|
|
if not queue:
|
2005-09-05 01:11:51 +04:00
|
|
|
break
|
2010-08-06 21:18:33 +04:00
|
|
|
|
2015-10-06 03:36:32 +03:00
|
|
|
# The easy way to do this would be to queue.popleft(), modify the
|
|
|
|
# chunk (if necessary), then queue.appendleft(). However, for cases
|
|
|
|
# where we read partial chunk content, this incurs 2 dequeue
|
|
|
|
# mutations and creates a new str for the remaining chunk in the
|
|
|
|
# queue. Our code below avoids this overhead.
|
|
|
|
|
2015-10-06 02:34:47 +03:00
|
|
|
chunk = queue[0]
|
|
|
|
chunkl = len(chunk)
|
2015-10-06 03:36:32 +03:00
|
|
|
offset = self._chunkoffset
|
2015-10-06 02:34:47 +03:00
|
|
|
|
|
|
|
# Use full chunk.
|
2015-10-06 03:36:32 +03:00
|
|
|
if offset == 0 and left >= chunkl:
|
2015-10-06 02:34:47 +03:00
|
|
|
left -= chunkl
|
|
|
|
queue.popleft()
|
|
|
|
buf.append(chunk)
|
2015-10-06 03:36:32 +03:00
|
|
|
# self._chunkoffset remains at 0.
|
|
|
|
continue
|
|
|
|
|
|
|
|
chunkremaining = chunkl - offset
|
|
|
|
|
|
|
|
# Use all of unconsumed part of chunk.
|
|
|
|
if left >= chunkremaining:
|
|
|
|
left -= chunkremaining
|
|
|
|
queue.popleft()
|
|
|
|
# offset == 0 is enabled by block above, so this won't merely
|
|
|
|
# copy via ``chunk[0:]``.
|
|
|
|
buf.append(chunk[offset:])
|
|
|
|
self._chunkoffset = 0
|
|
|
|
|
2015-10-06 02:34:47 +03:00
|
|
|
# Partial chunk needed.
|
|
|
|
else:
|
2015-10-06 03:36:32 +03:00
|
|
|
buf.append(chunk[offset:offset + left])
|
|
|
|
self._chunkoffset += left
|
|
|
|
left -= chunkremaining
|
2005-09-05 01:11:51 +04:00
|
|
|
|
2012-11-27 01:42:52 +04:00
|
|
|
return ''.join(buf)
|
2010-08-29 06:49:53 +04:00
|
|
|
|
2016-10-14 02:53:15 +03:00
|
|
|
def filechunkiter(f, size=131072, limit=None):
|
2006-06-21 02:13:17 +04:00
|
|
|
"""Create a generator that produces the data in the file size
|
2016-10-14 02:53:15 +03:00
|
|
|
(default 131072) bytes at a time, up to optional limit (default is
|
2006-06-21 02:13:17 +04:00
|
|
|
to read all data). Chunks may be less than size bytes if the
|
|
|
|
chunk is the last chunk in the file, or the file is a socket or
|
|
|
|
some other type of file that sometimes reads less data than is
|
|
|
|
requested."""
|
|
|
|
assert size >= 0
|
|
|
|
assert limit is None or limit >= 0
|
|
|
|
while True:
|
2010-01-25 09:05:27 +03:00
|
|
|
if limit is None:
|
|
|
|
nbytes = size
|
|
|
|
else:
|
|
|
|
nbytes = min(limit, size)
|
2006-06-21 02:13:17 +04:00
|
|
|
s = nbytes and f.read(nbytes)
|
2010-01-25 09:05:27 +03:00
|
|
|
if not s:
|
|
|
|
break
|
|
|
|
if limit:
|
|
|
|
limit -= len(s)
|
2005-09-05 01:11:51 +04:00
|
|
|
yield s
|
2005-09-23 09:46:50 +04:00
|
|
|
|
2013-06-04 04:20:45 +04:00
|
|
|
def makedate(timestamp=None):
|
|
|
|
'''Return a unix timestamp (or the current time) as a (unixtime,
|
|
|
|
offset) tuple based off the local timezone.'''
|
|
|
|
if timestamp is None:
|
|
|
|
timestamp = time.time()
|
2013-06-04 04:20:44 +04:00
|
|
|
if timestamp < 0:
|
2010-11-24 21:31:43 +03:00
|
|
|
hint = _("check your clock")
|
2013-06-04 04:20:44 +04:00
|
|
|
raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
|
|
|
|
delta = (datetime.datetime.utcfromtimestamp(timestamp) -
|
|
|
|
datetime.datetime.fromtimestamp(timestamp))
|
2011-11-13 04:29:26 +04:00
|
|
|
tz = delta.days * 86400 + delta.seconds
|
2013-06-04 04:20:44 +04:00
|
|
|
return timestamp, tz
|
2005-09-23 21:28:55 +04:00
|
|
|
|
2008-03-12 01:42:41 +03:00
|
|
|
def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
|
2005-09-23 10:19:47 +04:00
|
|
|
"""represent a (unixtime, offset) tuple as a localized time.
|
|
|
|
unixtime is seconds since the epoch, and offset is the time zone's
|
2016-04-11 20:46:50 +03:00
|
|
|
number of seconds away from UTC.
|
|
|
|
|
|
|
|
>>> datestr((0, 0))
|
|
|
|
'Thu Jan 01 00:00:00 1970 +0000'
|
|
|
|
>>> datestr((42, 0))
|
|
|
|
'Thu Jan 01 00:00:42 1970 +0000'
|
|
|
|
>>> datestr((-42, 0))
|
|
|
|
'Wed Dec 31 23:59:18 1969 +0000'
|
|
|
|
>>> datestr((0x7fffffff, 0))
|
|
|
|
'Tue Jan 19 03:14:07 2038 +0000'
|
|
|
|
>>> datestr((-0x80000000, 0))
|
|
|
|
'Fri Dec 13 20:45:52 1901 +0000'
|
|
|
|
"""
|
2005-09-23 10:19:47 +04:00
|
|
|
t, tz = date or makedate()
|
2013-11-08 01:24:23 +04:00
|
|
|
if "%1" in format or "%2" in format or "%z" in format:
|
2008-03-12 01:42:41 +03:00
|
|
|
sign = (tz > 0) and "-" or "+"
|
2009-07-05 13:00:44 +04:00
|
|
|
minutes = abs(tz) // 60
|
2015-11-15 04:30:10 +03:00
|
|
|
q, r = divmod(minutes, 60)
|
2013-11-08 01:24:23 +04:00
|
|
|
format = format.replace("%z", "%1%2")
|
2015-11-15 04:30:10 +03:00
|
|
|
format = format.replace("%1", "%c%02d" % (sign, q))
|
|
|
|
format = format.replace("%2", "%02d" % r)
|
2016-04-08 15:11:03 +03:00
|
|
|
d = t - tz
|
|
|
|
if d > 0x7fffffff:
|
|
|
|
d = 0x7fffffff
|
2016-04-12 01:30:28 +03:00
|
|
|
elif d < -0x80000000:
|
|
|
|
d = -0x80000000
|
2016-04-08 15:11:03 +03:00
|
|
|
# Never use time.gmtime() and datetime.datetime.fromtimestamp()
|
|
|
|
# because they use the gmtime() system call which is buggy on Windows
|
|
|
|
# for negative values.
|
|
|
|
t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
|
2017-03-13 19:19:07 +03:00
|
|
|
s = encoding.strtolocal(t.strftime(encoding.strfromlocal(format)))
|
2006-03-22 10:29:21 +03:00
|
|
|
return s
|
2006-02-27 22:32:10 +03:00
|
|
|
|
2008-02-16 15:33:38 +03:00
|
|
|
def shortdate(date=None):
|
|
|
|
"""turn (timestamp, tzoff) tuple into iso 8631 date."""
|
2008-03-12 01:42:41 +03:00
|
|
|
return datestr(date, format='%Y-%m-%d')
|
2008-02-16 15:33:38 +03:00
|
|
|
|
2016-07-27 23:14:19 +03:00
|
|
|
def parsetimezone(s):
|
|
|
|
"""find a trailing timezone, if any, in string, and return a
|
|
|
|
(offset, remainder) pair"""
|
|
|
|
|
|
|
|
if s.endswith("GMT") or s.endswith("UTC"):
|
|
|
|
return 0, s[:-3].rstrip()
|
|
|
|
|
|
|
|
# Unix-style timezones [+-]hhmm
|
|
|
|
if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
|
|
|
|
sign = (s[-5] == "+") and 1 or -1
|
|
|
|
hours = int(s[-4:-2])
|
|
|
|
minutes = int(s[-2:])
|
|
|
|
return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
|
|
|
|
|
2016-07-27 23:20:34 +03:00
|
|
|
# ISO8601 trailing Z
|
|
|
|
if s.endswith("Z") and s[-2:-1].isdigit():
|
|
|
|
return 0, s[:-1]
|
|
|
|
|
|
|
|
# ISO8601-style [+-]hh:mm
|
|
|
|
if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
|
|
|
|
s[-5:-3].isdigit() and s[-2:].isdigit()):
|
|
|
|
sign = (s[-6] == "+") and 1 or -1
|
|
|
|
hours = int(s[-5:-3])
|
|
|
|
minutes = int(s[-2:])
|
|
|
|
return -sign * (hours * 60 + minutes) * 60, s[:-6]
|
|
|
|
|
2016-07-27 23:14:19 +03:00
|
|
|
return None, s
|
2015-09-01 13:43:14 +03:00
|
|
|
|
2017-03-13 07:54:32 +03:00
|
|
|
def strdate(string, format, defaults=None):
|
2006-06-30 20:47:35 +04:00
|
|
|
"""parse a localized time string and return a (unixtime, offset) tuple.
|
|
|
|
if the string cannot be parsed, ValueError is raised."""
|
2017-03-16 01:07:14 +03:00
|
|
|
if defaults is None:
|
|
|
|
defaults = {}
|
2017-03-13 07:54:32 +03:00
|
|
|
|
2006-10-03 14:33:14 +04:00
|
|
|
# NOTE: unixtime = localunixtime + offset
|
2016-07-27 23:14:19 +03:00
|
|
|
offset, date = parsetimezone(string)
|
2006-12-06 22:13:31 +03:00
|
|
|
|
2006-12-07 00:11:44 +03:00
|
|
|
# add missing elements from defaults
|
2010-12-29 23:04:47 +03:00
|
|
|
usenow = False # default to using biased defaults
|
|
|
|
for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
|
2017-05-04 22:56:49 +03:00
|
|
|
part = pycompat.bytestr(part)
|
2006-12-07 00:11:44 +03:00
|
|
|
found = [True for p in part if ("%"+p) in format]
|
|
|
|
if not found:
|
2010-12-29 23:04:47 +03:00
|
|
|
date += "@" + defaults[part][usenow]
|
2006-12-07 00:11:44 +03:00
|
|
|
format += "@%" + part[0]
|
2010-12-29 23:04:47 +03:00
|
|
|
else:
|
|
|
|
# We've found a specific time element, less specific time
|
|
|
|
# elements are relative to today
|
|
|
|
usenow = True
|
2006-12-06 22:13:31 +03:00
|
|
|
|
2017-05-06 02:21:25 +03:00
|
|
|
timetuple = time.strptime(encoding.strfromlocal(date),
|
|
|
|
encoding.strfromlocal(format))
|
2006-10-03 14:33:18 +04:00
|
|
|
localunixtime = int(calendar.timegm(timetuple))
|
|
|
|
if offset is None:
|
|
|
|
# local timezone
|
|
|
|
unixtime = int(time.mktime(timetuple))
|
|
|
|
offset = unixtime - localunixtime
|
|
|
|
else:
|
|
|
|
unixtime = localunixtime + offset
|
2006-10-03 14:33:14 +04:00
|
|
|
return unixtime, offset
|
2006-06-30 20:47:35 +04:00
|
|
|
|
2015-09-23 02:55:18 +03:00
|
|
|
def parsedate(date, formats=None, bias=None):
|
2010-12-29 23:04:47 +03:00
|
|
|
"""parse a localized date/time and return a (unixtime, offset) tuple.
|
2008-02-17 23:34:28 +03:00
|
|
|
|
2006-06-30 20:47:35 +04:00
|
|
|
The date may be a "unixtime offset" string or in one of the specified
|
2008-02-17 23:34:28 +03:00
|
|
|
formats. If the date already is a (unixtime, offset) tuple, it is returned.
|
2013-01-23 21:51:45 +04:00
|
|
|
|
|
|
|
>>> parsedate(' today ') == parsedate(\
|
|
|
|
datetime.date.today().strftime('%b %d'))
|
|
|
|
True
|
|
|
|
>>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
|
|
|
|
datetime.timedelta(days=1)\
|
|
|
|
).strftime('%b %d'))
|
|
|
|
True
|
2013-02-10 01:39:22 +04:00
|
|
|
>>> now, tz = makedate()
|
|
|
|
>>> strnow, strtz = parsedate('now')
|
|
|
|
>>> (strnow - now) < 1
|
|
|
|
True
|
|
|
|
>>> tz == strtz
|
|
|
|
True
|
2008-02-17 23:34:28 +03:00
|
|
|
"""
|
2015-09-23 02:55:18 +03:00
|
|
|
if bias is None:
|
|
|
|
bias = {}
|
2008-02-17 23:34:28 +03:00
|
|
|
if not date:
|
2006-12-06 22:13:27 +03:00
|
|
|
return 0, 0
|
2008-03-12 01:42:51 +03:00
|
|
|
if isinstance(date, tuple) and len(date) == 2:
|
2008-02-17 23:34:28 +03:00
|
|
|
return date
|
2006-07-13 20:40:01 +04:00
|
|
|
if not formats:
|
|
|
|
formats = defaultdateformats
|
2008-02-17 23:34:28 +03:00
|
|
|
date = date.strip()
|
2013-01-23 21:51:45 +04:00
|
|
|
|
2015-02-24 16:12:13 +03:00
|
|
|
if date == 'now' or date == _('now'):
|
2013-02-10 01:39:22 +04:00
|
|
|
return makedate()
|
2015-02-24 16:12:13 +03:00
|
|
|
if date == 'today' or date == _('today'):
|
2013-01-23 21:51:45 +04:00
|
|
|
date = datetime.date.today().strftime('%b %d')
|
2015-02-24 16:12:13 +03:00
|
|
|
elif date == 'yesterday' or date == _('yesterday'):
|
2013-01-23 21:51:45 +04:00
|
|
|
date = (datetime.date.today() -
|
|
|
|
datetime.timedelta(days=1)).strftime('%b %d')
|
|
|
|
|
2006-06-30 20:47:35 +04:00
|
|
|
try:
|
2008-02-17 23:34:28 +03:00
|
|
|
when, offset = map(int, date.split(' '))
|
2006-06-30 20:48:06 +04:00
|
|
|
except ValueError:
|
2006-12-07 00:11:44 +03:00
|
|
|
# fill out defaults
|
|
|
|
now = makedate()
|
2010-12-29 23:04:47 +03:00
|
|
|
defaults = {}
|
2010-12-02 05:43:06 +03:00
|
|
|
for part in ("d", "mb", "yY", "HI", "M", "S"):
|
2010-12-29 23:04:47 +03:00
|
|
|
# this piece is for rounding the specific end of unknowns
|
|
|
|
b = bias.get(part)
|
|
|
|
if b is None:
|
2017-05-04 22:56:13 +03:00
|
|
|
if part[0:1] in "HMS":
|
2010-12-29 23:04:47 +03:00
|
|
|
b = "00"
|
2006-12-07 00:11:44 +03:00
|
|
|
else:
|
2010-12-29 23:04:47 +03:00
|
|
|
b = "0"
|
|
|
|
|
|
|
|
# this piece is for matching the generic end to today's date
|
2017-05-04 22:56:13 +03:00
|
|
|
n = datestr(now, "%" + part[0:1])
|
2010-12-29 23:04:47 +03:00
|
|
|
|
|
|
|
defaults[part] = (b, n)
|
2006-12-07 00:11:44 +03:00
|
|
|
|
2006-06-30 20:48:06 +04:00
|
|
|
for format in formats:
|
|
|
|
try:
|
2008-02-17 23:34:28 +03:00
|
|
|
when, offset = strdate(date, format, defaults)
|
2008-02-13 18:46:43 +03:00
|
|
|
except (ValueError, OverflowError):
|
2006-06-30 20:48:06 +04:00
|
|
|
pass
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
else:
|
2017-05-24 18:50:17 +03:00
|
|
|
raise error.ParseError(_('invalid date: %r') % date)
|
2006-06-30 20:48:06 +04:00
|
|
|
# validate explicit (probably user-specified) date and
|
|
|
|
# time zone offset. values must fit in signed 32 bits for
|
|
|
|
# current 32-bit linux runtimes. timezones go from UTC-12
|
|
|
|
# to UTC+14
|
2016-04-12 01:30:28 +03:00
|
|
|
if when < -0x80000000 or when > 0x7fffffff:
|
2017-05-24 18:50:17 +03:00
|
|
|
raise error.ParseError(_('date exceeds 32 bits: %d') % when)
|
2006-06-30 20:48:06 +04:00
|
|
|
if offset < -50400 or offset > 43200:
|
2017-05-24 18:50:17 +03:00
|
|
|
raise error.ParseError(_('impossible time zone offset: %d') % offset)
|
2006-06-30 20:48:06 +04:00
|
|
|
return when, offset
|
2006-06-30 20:47:35 +04:00
|
|
|
|
2006-12-07 00:11:44 +03:00
|
|
|
def matchdate(date):
|
|
|
|
"""Return a function that matches a given date match specifier
|
|
|
|
|
|
|
|
Formats include:
|
|
|
|
|
|
|
|
'{date}' match a given date to the accuracy provided
|
|
|
|
|
|
|
|
'<{date}' on or before a given date
|
|
|
|
|
|
|
|
'>{date}' on or after a given date
|
|
|
|
|
2010-12-29 23:04:47 +03:00
|
|
|
>>> p1 = parsedate("10:29:59")
|
|
|
|
>>> p2 = parsedate("10:30:00")
|
|
|
|
>>> p3 = parsedate("10:30:59")
|
|
|
|
>>> p4 = parsedate("10:31:00")
|
|
|
|
>>> p5 = parsedate("Sep 15 10:30:00 1999")
|
|
|
|
>>> f = matchdate("10:30")
|
|
|
|
>>> f(p1[0])
|
|
|
|
False
|
|
|
|
>>> f(p2[0])
|
|
|
|
True
|
|
|
|
>>> f(p3[0])
|
|
|
|
True
|
|
|
|
>>> f(p4[0])
|
|
|
|
False
|
|
|
|
>>> f(p5[0])
|
|
|
|
False
|
2006-12-07 00:11:44 +03:00
|
|
|
"""
|
|
|
|
|
|
|
|
def lower(date):
|
2014-03-12 21:19:20 +04:00
|
|
|
d = {'mb': "1", 'd': "1"}
|
2008-03-12 01:42:51 +03:00
|
|
|
return parsedate(date, extendeddateformats, d)[0]
|
2006-12-07 00:11:44 +03:00
|
|
|
|
|
|
|
def upper(date):
|
2014-03-12 21:19:20 +04:00
|
|
|
d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
|
2010-12-02 05:43:06 +03:00
|
|
|
for days in ("31", "30", "29"):
|
2006-12-07 00:11:44 +03:00
|
|
|
try:
|
|
|
|
d["d"] = days
|
|
|
|
return parsedate(date, extendeddateformats, d)[0]
|
2012-05-12 18:02:45 +04:00
|
|
|
except Abort:
|
2006-12-07 00:11:44 +03:00
|
|
|
pass
|
|
|
|
d["d"] = "28"
|
|
|
|
return parsedate(date, extendeddateformats, d)[0]
|
|
|
|
|
2009-04-01 20:11:00 +04:00
|
|
|
date = date.strip()
|
2011-03-28 01:48:58 +04:00
|
|
|
|
|
|
|
if not date:
|
|
|
|
raise Abort(_("dates cannot consist entirely of whitespace"))
|
|
|
|
elif date[0] == "<":
|
2011-04-04 18:39:22 +04:00
|
|
|
if not date[1:]:
|
2011-04-05 12:01:39 +04:00
|
|
|
raise Abort(_("invalid day spec, use '<DATE'"))
|
2006-12-07 00:11:44 +03:00
|
|
|
when = upper(date[1:])
|
|
|
|
return lambda x: x <= when
|
|
|
|
elif date[0] == ">":
|
2011-04-04 18:39:22 +04:00
|
|
|
if not date[1:]:
|
2011-04-05 12:01:39 +04:00
|
|
|
raise Abort(_("invalid day spec, use '>DATE'"))
|
2006-12-07 00:11:44 +03:00
|
|
|
when = lower(date[1:])
|
|
|
|
return lambda x: x >= when
|
|
|
|
elif date[0] == "-":
|
|
|
|
try:
|
|
|
|
days = int(date[1:])
|
|
|
|
except ValueError:
|
|
|
|
raise Abort(_("invalid day spec: %s") % date[1:])
|
2011-04-05 12:55:47 +04:00
|
|
|
if days < 0:
|
2016-09-21 02:48:30 +03:00
|
|
|
raise Abort(_("%s must be nonnegative (see 'hg help dates')")
|
2011-04-05 12:55:47 +04:00
|
|
|
% date[1:])
|
2006-12-07 00:11:44 +03:00
|
|
|
when = makedate()[0] - days * 3600 * 24
|
2006-12-07 00:29:17 +03:00
|
|
|
return lambda x: x >= when
|
2006-12-07 00:11:44 +03:00
|
|
|
elif " to " in date:
|
|
|
|
a, b = date.split(" to ")
|
|
|
|
start, stop = lower(a), upper(b)
|
|
|
|
return lambda x: x >= start and x <= stop
|
|
|
|
else:
|
|
|
|
start, stop = lower(date), upper(date)
|
|
|
|
return lambda x: x >= start and x <= stop
|
|
|
|
|
2017-01-12 05:47:19 +03:00
|
|
|
def stringmatcher(pattern, casesensitive=True):
|
2015-08-23 05:52:18 +03:00
|
|
|
"""
|
|
|
|
accepts a string, possibly starting with 're:' or 'literal:' prefix.
|
|
|
|
returns the matcher name, pattern, and matcher function.
|
|
|
|
missing or unknown prefixes are treated as literal matches.
|
|
|
|
|
|
|
|
helper for tests:
|
|
|
|
>>> def test(pattern, *tests):
|
|
|
|
... kind, pattern, matcher = stringmatcher(pattern)
|
|
|
|
... return (kind, pattern, [bool(matcher(t)) for t in tests])
|
2017-01-12 05:47:19 +03:00
|
|
|
>>> def itest(pattern, *tests):
|
|
|
|
... kind, pattern, matcher = stringmatcher(pattern, casesensitive=False)
|
|
|
|
... return (kind, pattern, [bool(matcher(t)) for t in tests])
|
2015-08-23 05:52:18 +03:00
|
|
|
|
|
|
|
exact matching (no prefix):
|
|
|
|
>>> test('abcdefg', 'abc', 'def', 'abcdefg')
|
|
|
|
('literal', 'abcdefg', [False, False, True])
|
|
|
|
|
|
|
|
regex matching ('re:' prefix)
|
|
|
|
>>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
|
|
|
|
('re', 'a.+b', [False, False, True])
|
|
|
|
|
|
|
|
force exact matches ('literal:' prefix)
|
|
|
|
>>> test('literal:re:foobar', 'foobar', 're:foobar')
|
|
|
|
('literal', 're:foobar', [False, True])
|
|
|
|
|
|
|
|
unknown prefixes are ignored and treated as literals
|
|
|
|
>>> test('foo:bar', 'foo', 'bar', 'foo:bar')
|
|
|
|
('literal', 'foo:bar', [False, False, True])
|
2017-01-12 05:47:19 +03:00
|
|
|
|
|
|
|
case insensitive regex matches
|
|
|
|
>>> itest('re:A.+b', 'nomatch', 'fooadef', 'fooadefBar')
|
|
|
|
('re', 'A.+b', [False, False, True])
|
|
|
|
|
|
|
|
case insensitive literal matches
|
|
|
|
>>> itest('ABCDEFG', 'abc', 'def', 'abcdefg')
|
|
|
|
('literal', 'ABCDEFG', [False, False, True])
|
2015-08-23 05:52:18 +03:00
|
|
|
"""
|
|
|
|
if pattern.startswith('re:'):
|
|
|
|
pattern = pattern[3:]
|
|
|
|
try:
|
2017-01-12 05:47:19 +03:00
|
|
|
flags = 0
|
|
|
|
if not casesensitive:
|
|
|
|
flags = remod.I
|
|
|
|
regex = remod.compile(pattern, flags)
|
2015-08-23 05:52:18 +03:00
|
|
|
except remod.error as e:
|
|
|
|
raise error.ParseError(_('invalid regular expression: %s')
|
|
|
|
% e)
|
|
|
|
return 're', pattern, regex.search
|
|
|
|
elif pattern.startswith('literal:'):
|
|
|
|
pattern = pattern[8:]
|
2017-01-12 05:47:19 +03:00
|
|
|
|
|
|
|
match = pattern.__eq__
|
|
|
|
|
|
|
|
if not casesensitive:
|
|
|
|
ipat = encoding.lower(pattern)
|
|
|
|
match = lambda s: ipat == encoding.lower(s)
|
|
|
|
return 'literal', pattern, match
|
2015-08-23 05:52:18 +03:00
|
|
|
|
2006-02-27 22:32:10 +03:00
|
|
|
def shortuser(user):
|
|
|
|
"""Return a short representation of a user name or email address."""
|
|
|
|
f = user.find('@')
|
|
|
|
if f >= 0:
|
|
|
|
user = user[:f]
|
|
|
|
f = user.find('<')
|
|
|
|
if f >= 0:
|
2010-01-25 09:05:27 +03:00
|
|
|
user = user[f + 1:]
|
2006-09-27 22:34:52 +04:00
|
|
|
f = user.find(' ')
|
|
|
|
if f >= 0:
|
|
|
|
user = user[:f]
|
2006-10-27 20:30:20 +04:00
|
|
|
f = user.find('.')
|
|
|
|
if f >= 0:
|
|
|
|
user = user[:f]
|
2006-02-27 22:32:10 +03:00
|
|
|
return user
|
2006-03-13 03:21:59 +03:00
|
|
|
|
2012-03-28 18:06:20 +04:00
|
|
|
def emailuser(user):
|
|
|
|
"""Return the user portion of an email address."""
|
|
|
|
f = user.find('@')
|
|
|
|
if f >= 0:
|
|
|
|
user = user[:f]
|
|
|
|
f = user.find('<')
|
|
|
|
if f >= 0:
|
|
|
|
user = user[f + 1:]
|
|
|
|
return user
|
|
|
|
|
2008-01-31 23:44:19 +03:00
|
|
|
def email(author):
|
|
|
|
'''get email of author.'''
|
|
|
|
r = author.find('>')
|
2010-01-25 09:05:27 +03:00
|
|
|
if r == -1:
|
|
|
|
r = None
|
|
|
|
return author[author.find('<') + 1:r]
|
2008-01-31 23:44:19 +03:00
|
|
|
|
2010-12-25 15:59:00 +03:00
|
|
|
def ellipsis(text, maxlength=400):
|
util: replace 'ellipsis' implementation by 'encoding.trim'
Before this patch, 'util.ellipsis' tried to avoid splitting at
intermediate multi-byte sequence, but its implementation was incorrect.
Internal function '_ellipsis' trims specified unicode sequence not at
most maxlength 'columns in display', but at most maxlength number of
'unicode characters'.
def _ellipsis(text, maxlength):
if len(text) <= maxlength:
return text, False
else:
return "%s..." % (text[:maxlength - 3]), True
In many encodings, number of unicode characters can be different from
columns in display.
This patch replaces 'ellipsis' implementation by 'encoding.trim',
which can trim string at most maxlength columns in display correctly,
even though specified string contains multi-byte characters.
'_ellipsis' is removed in this patch, because it is referred only from
'ellipsis'.
2014-07-05 21:56:41 +04:00
|
|
|
"""Trim string to at most maxlength (default: 400) columns in display."""
|
|
|
|
return encoding.trim(text, maxlength, ellipsis='...')
|
2006-12-03 00:35:17 +03:00
|
|
|
|
2013-03-01 00:51:18 +04:00
|
|
|
def unitcountfn(*unittable):
|
|
|
|
'''return a function that renders a readable count of some quantity'''
|
|
|
|
|
|
|
|
def go(count):
|
|
|
|
for multiplier, divisor, format in unittable:
|
2017-04-10 19:16:30 +03:00
|
|
|
if abs(count) >= divisor * multiplier:
|
2013-03-01 00:51:18 +04:00
|
|
|
return format % (count / float(divisor))
|
|
|
|
return unittable[-1][2] % count
|
|
|
|
|
|
|
|
return go
|
|
|
|
|
2017-02-24 20:39:08 +03:00
|
|
|
def processlinerange(fromline, toline):
|
|
|
|
"""Check that linerange <fromline>:<toline> makes sense and return a
|
|
|
|
0-based range.
|
|
|
|
|
|
|
|
>>> processlinerange(10, 20)
|
|
|
|
(9, 20)
|
|
|
|
>>> processlinerange(2, 1)
|
|
|
|
Traceback (most recent call last):
|
|
|
|
...
|
|
|
|
ParseError: line range must be positive
|
|
|
|
>>> processlinerange(0, 5)
|
|
|
|
Traceback (most recent call last):
|
|
|
|
...
|
|
|
|
ParseError: fromline must be strictly positive
|
|
|
|
"""
|
|
|
|
if toline - fromline < 0:
|
|
|
|
raise error.ParseError(_("line range must be positive"))
|
|
|
|
if fromline < 1:
|
|
|
|
raise error.ParseError(_("fromline must be strictly positive"))
|
|
|
|
return fromline - 1, toline
|
|
|
|
|
2013-03-01 00:51:18 +04:00
|
|
|
bytecount = unitcountfn(
|
2012-04-13 05:22:18 +04:00
|
|
|
(100, 1 << 30, _('%.0f GB')),
|
|
|
|
(10, 1 << 30, _('%.1f GB')),
|
|
|
|
(1, 1 << 30, _('%.2f GB')),
|
|
|
|
(100, 1 << 20, _('%.0f MB')),
|
|
|
|
(10, 1 << 20, _('%.1f MB')),
|
|
|
|
(1, 1 << 20, _('%.2f MB')),
|
|
|
|
(100, 1 << 10, _('%.0f KB')),
|
|
|
|
(10, 1 << 10, _('%.1f KB')),
|
|
|
|
(1, 1 << 10, _('%.2f KB')),
|
|
|
|
(1, 1, _('%.0f bytes')),
|
|
|
|
)
|
|
|
|
|
2017-03-29 15:28:54 +03:00
|
|
|
# Matches a single EOL which can either be a CRLF where repeated CR
|
|
|
|
# are removed or a LF. We do not care about old Macintosh files, so a
|
|
|
|
# stray CR is an error.
|
|
|
|
_eolre = remod.compile(br'\r*\n')
|
|
|
|
|
|
|
|
def tolf(s):
|
|
|
|
return _eolre.sub('\n', s)
|
|
|
|
|
|
|
|
def tocrlf(s):
|
|
|
|
return _eolre.sub('\r\n', s)
|
|
|
|
|
2017-03-29 15:40:15 +03:00
|
|
|
if pycompat.oslinesep == '\r\n':
|
|
|
|
tonativeeol = tocrlf
|
|
|
|
fromnativeeol = tolf
|
|
|
|
else:
|
|
|
|
tonativeeol = pycompat.identity
|
|
|
|
fromnativeeol = pycompat.identity
|
|
|
|
|
2017-03-15 17:06:50 +03:00
|
|
|
def escapestr(s):
|
2017-03-15 17:28:39 +03:00
|
|
|
# call underlying function of s.encode('string_escape') directly for
|
|
|
|
# Python 3 compatibility
|
|
|
|
return codecs.escape_encode(s)[0]
|
2017-03-15 17:06:50 +03:00
|
|
|
|
2017-03-17 17:42:46 +03:00
|
|
|
def unescapestr(s):
|
2017-03-17 17:48:22 +03:00
|
|
|
return codecs.escape_decode(s)[0]
|
2017-03-17 17:42:46 +03:00
|
|
|
|
2007-09-11 01:36:01 +04:00
|
|
|
def uirepr(s):
|
|
|
|
# Avoid double backslash in Windows path repr()
|
|
|
|
return repr(s).replace('\\\\', '\\')
|
2008-12-25 11:48:24 +03:00
|
|
|
|
2011-01-29 02:02:29 +03:00
|
|
|
# delay import of textwrap
|
|
|
|
def MBTextWrapper(**kwargs):
|
|
|
|
class tw(textwrap.TextWrapper):
|
|
|
|
"""
|
2011-08-26 23:56:12 +04:00
|
|
|
Extend TextWrapper for width-awareness.
|
2011-01-29 02:02:29 +03:00
|
|
|
|
2011-08-26 23:56:12 +04:00
|
|
|
Neither number of 'bytes' in any encoding nor 'characters' is
|
|
|
|
appropriate to calculate terminal columns for specified string.
|
2011-01-29 02:02:29 +03:00
|
|
|
|
2011-08-26 23:56:12 +04:00
|
|
|
Original TextWrapper implementation uses built-in 'len()' directly,
|
|
|
|
so overriding is needed to use width information of each characters.
|
|
|
|
|
|
|
|
In addition, characters classified into 'ambiguous' width are
|
2012-08-16 00:38:42 +04:00
|
|
|
treated as wide in East Asian area, but as narrow in other.
|
2011-08-26 23:56:12 +04:00
|
|
|
|
|
|
|
This requires use decision to determine width of such characters.
|
2011-01-29 02:02:29 +03:00
|
|
|
"""
|
2011-08-07 01:52:20 +04:00
|
|
|
def _cutdown(self, ucstr, space_left):
|
2011-01-29 02:02:29 +03:00
|
|
|
l = 0
|
2011-08-26 23:56:12 +04:00
|
|
|
colwidth = encoding.ucolwidth
|
2011-01-29 02:02:29 +03:00
|
|
|
for i in xrange(len(ucstr)):
|
2011-08-26 23:56:12 +04:00
|
|
|
l += colwidth(ucstr[i])
|
2011-01-29 02:02:29 +03:00
|
|
|
if space_left < l:
|
2011-08-07 01:52:20 +04:00
|
|
|
return (ucstr[:i], ucstr[i:])
|
|
|
|
return ucstr, ''
|
2011-01-29 02:02:29 +03:00
|
|
|
|
|
|
|
# overriding of base class
|
|
|
|
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
|
|
|
|
space_left = max(width - cur_len, 1)
|
|
|
|
|
|
|
|
if self.break_long_words:
|
|
|
|
cut, res = self._cutdown(reversed_chunks[-1], space_left)
|
|
|
|
cur_line.append(cut)
|
|
|
|
reversed_chunks[-1] = res
|
|
|
|
elif not cur_line:
|
|
|
|
cur_line.append(reversed_chunks.pop())
|
|
|
|
|
2015-09-08 22:32:20 +03:00
|
|
|
# this overriding code is imported from TextWrapper of Python 2.6
|
2011-08-26 23:56:12 +04:00
|
|
|
# to calculate columns of string by 'encoding.ucolwidth()'
|
|
|
|
def _wrap_chunks(self, chunks):
|
|
|
|
colwidth = encoding.ucolwidth
|
|
|
|
|
|
|
|
lines = []
|
|
|
|
if self.width <= 0:
|
|
|
|
raise ValueError("invalid width %r (must be > 0)" % self.width)
|
|
|
|
|
|
|
|
# Arrange in reverse order so items can be efficiently popped
|
|
|
|
# from a stack of chucks.
|
|
|
|
chunks.reverse()
|
|
|
|
|
|
|
|
while chunks:
|
|
|
|
|
|
|
|
# Start the list of chunks that will make up the current line.
|
|
|
|
# cur_len is just the length of all the chunks in cur_line.
|
|
|
|
cur_line = []
|
|
|
|
cur_len = 0
|
|
|
|
|
|
|
|
# Figure out which static string will prefix this line.
|
|
|
|
if lines:
|
|
|
|
indent = self.subsequent_indent
|
|
|
|
else:
|
|
|
|
indent = self.initial_indent
|
|
|
|
|
|
|
|
# Maximum width for this line.
|
|
|
|
width = self.width - len(indent)
|
|
|
|
|
|
|
|
# First chunk on line is whitespace -- drop it, unless this
|
2012-08-16 00:38:42 +04:00
|
|
|
# is the very beginning of the text (i.e. no lines started yet).
|
2017-05-28 23:17:43 +03:00
|
|
|
if self.drop_whitespace and chunks[-1].strip() == r'' and lines:
|
2011-08-26 23:56:12 +04:00
|
|
|
del chunks[-1]
|
|
|
|
|
|
|
|
while chunks:
|
|
|
|
l = colwidth(chunks[-1])
|
|
|
|
|
|
|
|
# Can at least squeeze this chunk onto the current line.
|
|
|
|
if cur_len + l <= width:
|
|
|
|
cur_line.append(chunks.pop())
|
|
|
|
cur_len += l
|
|
|
|
|
|
|
|
# Nope, this line is full.
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
|
|
|
|
# The current line is full, and the next chunk is too big to
|
|
|
|
# fit on *any* line (not just this one).
|
|
|
|
if chunks and colwidth(chunks[-1]) > width:
|
|
|
|
self._handle_long_word(chunks, cur_line, cur_len, width)
|
|
|
|
|
|
|
|
# If the last chunk on this line is all whitespace, drop it.
|
|
|
|
if (self.drop_whitespace and
|
2017-05-28 23:17:43 +03:00
|
|
|
cur_line and cur_line[-1].strip() == r''):
|
2011-08-26 23:56:12 +04:00
|
|
|
del cur_line[-1]
|
|
|
|
|
|
|
|
# Convert current line back to a string and store it in list
|
|
|
|
# of all lines (return value).
|
|
|
|
if cur_line:
|
2017-05-28 20:42:16 +03:00
|
|
|
lines.append(indent + r''.join(cur_line))
|
2011-08-26 23:56:12 +04:00
|
|
|
|
|
|
|
return lines
|
|
|
|
|
2011-01-29 02:02:29 +03:00
|
|
|
global MBTextWrapper
|
|
|
|
MBTextWrapper = tw
|
|
|
|
return tw(**kwargs)
|
replace Python standard textwrap by MBCS sensitive one for i18n text
Mercurial has problem around text wrapping/filling in MBCS encoding
environment, because standard 'textwrap' module of Python can not
treat it correctly. It splits byte sequence for one character into two
lines.
According to unicode specification, "east asian width" classifies
characters into:
W(ide), N(arrow), F(ull-width), H(alf-width), A(mbiguous)
W/N/F/H can be always recognized as 2/1/2/1 bytes in byte sequence,
but 'A' can not. Size of 'A' depends on language in which it is used.
Unicode specification says:
If the context(= language) cannot be established reliably they
should be treated as narrow characters by default
but many of class 'A' characters are full-width, at least, in Japanese
environment.
So, this patch treats class 'A' characters as full-width always for
safety wrapping.
This patch focuses only on MBCS safe-ness, not on writing/printing
rule strict wrapping for each languages
MBCS sensitive textwrap class is originally implemented
by ITO Nobuaki <daydream.trippers@gmail.com>.
2010-06-06 12:20:10 +04:00
|
|
|
|
2010-10-11 03:02:52 +04:00
|
|
|
def wrap(line, width, initindent='', hangindent=''):
|
replace Python standard textwrap by MBCS sensitive one for i18n text
Mercurial has problem around text wrapping/filling in MBCS encoding
environment, because standard 'textwrap' module of Python can not
treat it correctly. It splits byte sequence for one character into two
lines.
According to unicode specification, "east asian width" classifies
characters into:
W(ide), N(arrow), F(ull-width), H(alf-width), A(mbiguous)
W/N/F/H can be always recognized as 2/1/2/1 bytes in byte sequence,
but 'A' can not. Size of 'A' depends on language in which it is used.
Unicode specification says:
If the context(= language) cannot be established reliably they
should be treated as narrow characters by default
but many of class 'A' characters are full-width, at least, in Japanese
environment.
So, this patch treats class 'A' characters as full-width always for
safety wrapping.
This patch focuses only on MBCS safe-ness, not on writing/printing
rule strict wrapping for each languages
MBCS sensitive textwrap class is originally implemented
by ITO Nobuaki <daydream.trippers@gmail.com>.
2010-06-06 12:20:10 +04:00
|
|
|
maxindent = max(len(hangindent), len(initindent))
|
|
|
|
if width <= maxindent:
|
2009-09-03 23:07:06 +04:00
|
|
|
# adjust for weird terminal size
|
replace Python standard textwrap by MBCS sensitive one for i18n text
Mercurial has problem around text wrapping/filling in MBCS encoding
environment, because standard 'textwrap' module of Python can not
treat it correctly. It splits byte sequence for one character into two
lines.
According to unicode specification, "east asian width" classifies
characters into:
W(ide), N(arrow), F(ull-width), H(alf-width), A(mbiguous)
W/N/F/H can be always recognized as 2/1/2/1 bytes in byte sequence,
but 'A' can not. Size of 'A' depends on language in which it is used.
Unicode specification says:
If the context(= language) cannot be established reliably they
should be treated as narrow characters by default
but many of class 'A' characters are full-width, at least, in Japanese
environment.
So, this patch treats class 'A' characters as full-width always for
safety wrapping.
This patch focuses only on MBCS safe-ness, not on writing/printing
rule strict wrapping for each languages
MBCS sensitive textwrap class is originally implemented
by ITO Nobuaki <daydream.trippers@gmail.com>.
2010-06-06 12:20:10 +04:00
|
|
|
width = max(78, maxindent + 1)
|
2017-03-12 05:05:13 +03:00
|
|
|
line = line.decode(pycompat.sysstr(encoding.encoding),
|
|
|
|
pycompat.sysstr(encoding.encodingmode))
|
|
|
|
initindent = initindent.decode(pycompat.sysstr(encoding.encoding),
|
|
|
|
pycompat.sysstr(encoding.encodingmode))
|
|
|
|
hangindent = hangindent.decode(pycompat.sysstr(encoding.encoding),
|
|
|
|
pycompat.sysstr(encoding.encodingmode))
|
replace Python standard textwrap by MBCS sensitive one for i18n text
Mercurial has problem around text wrapping/filling in MBCS encoding
environment, because standard 'textwrap' module of Python can not
treat it correctly. It splits byte sequence for one character into two
lines.
According to unicode specification, "east asian width" classifies
characters into:
W(ide), N(arrow), F(ull-width), H(alf-width), A(mbiguous)
W/N/F/H can be always recognized as 2/1/2/1 bytes in byte sequence,
but 'A' can not. Size of 'A' depends on language in which it is used.
Unicode specification says:
If the context(= language) cannot be established reliably they
should be treated as narrow characters by default
but many of class 'A' characters are full-width, at least, in Japanese
environment.
So, this patch treats class 'A' characters as full-width always for
safety wrapping.
This patch focuses only on MBCS safe-ness, not on writing/printing
rule strict wrapping for each languages
MBCS sensitive textwrap class is originally implemented
by ITO Nobuaki <daydream.trippers@gmail.com>.
2010-06-06 12:20:10 +04:00
|
|
|
wrapper = MBTextWrapper(width=width,
|
|
|
|
initial_indent=initindent,
|
|
|
|
subsequent_indent=hangindent)
|
2017-03-12 05:05:13 +03:00
|
|
|
return wrapper.fill(line).encode(pycompat.sysstr(encoding.encoding))
|
2009-06-24 21:15:58 +04:00
|
|
|
|
2016-11-15 23:25:51 +03:00
|
|
|
if (pyplatform.python_implementation() == 'CPython' and
|
|
|
|
sys.version_info < (3, 0)):
|
|
|
|
# There is an issue in CPython that some IO methods do not handle EINTR
|
|
|
|
# correctly. The following table shows what CPython version (and functions)
|
|
|
|
# are affected (buggy: has the EINTR bug, okay: otherwise):
|
|
|
|
#
|
|
|
|
# | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
|
|
|
|
# --------------------------------------------------
|
|
|
|
# fp.__iter__ | buggy | buggy | okay
|
|
|
|
# fp.read* | buggy | okay [1] | okay
|
|
|
|
#
|
|
|
|
# [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
|
|
|
|
#
|
|
|
|
# Here we workaround the EINTR issue for fileobj.__iter__. Other methods
|
|
|
|
# like "read*" are ignored for now, as Python < 2.7.4 is a minority.
|
|
|
|
#
|
|
|
|
# Although we can workaround the EINTR issue for fp.__iter__, it is slower:
|
|
|
|
# "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
|
|
|
|
# CPython 2, because CPython 2 maintains an internal readahead buffer for
|
|
|
|
# fp.__iter__ but not other fp.read* methods.
|
|
|
|
#
|
|
|
|
# On modern systems like Linux, the "read" syscall cannot be interrupted
|
|
|
|
# when reading "fast" files like on-disk files. So the EINTR issue only
|
|
|
|
# affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
|
|
|
|
# files approximately as "fast" files and use the fast (unsafe) code path,
|
|
|
|
# to minimize the performance impact.
|
|
|
|
if sys.version_info >= (2, 7, 4):
|
|
|
|
# fp.readline deals with EINTR correctly, use it as a workaround.
|
|
|
|
def _safeiterfile(fp):
|
|
|
|
return iter(fp.readline, '')
|
|
|
|
else:
|
|
|
|
# fp.read* are broken too, manually deal with EINTR in a stupid way.
|
|
|
|
# note: this may block longer than necessary because of bufsize.
|
|
|
|
def _safeiterfile(fp, bufsize=4096):
|
|
|
|
fd = fp.fileno()
|
|
|
|
line = ''
|
|
|
|
while True:
|
|
|
|
try:
|
|
|
|
buf = os.read(fd, bufsize)
|
|
|
|
except OSError as ex:
|
|
|
|
# os.read only raises EINTR before any data is read
|
|
|
|
if ex.errno == errno.EINTR:
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
raise
|
|
|
|
line += buf
|
|
|
|
if '\n' in buf:
|
|
|
|
splitted = line.splitlines(True)
|
|
|
|
line = ''
|
|
|
|
for l in splitted:
|
|
|
|
if l[-1] == '\n':
|
|
|
|
yield l
|
|
|
|
else:
|
|
|
|
line = l
|
|
|
|
if not buf:
|
|
|
|
break
|
|
|
|
if line:
|
|
|
|
yield line
|
|
|
|
|
|
|
|
def iterfile(fp):
|
|
|
|
fastpath = True
|
|
|
|
if type(fp) is file:
|
|
|
|
fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
|
|
|
|
if fastpath:
|
|
|
|
return fp
|
|
|
|
else:
|
|
|
|
return _safeiterfile(fp)
|
|
|
|
else:
|
|
|
|
# PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
|
|
|
|
def iterfile(fp):
|
|
|
|
return fp
|
util: add iterfile to workaround a fileobj.__iter__ issue with EINTR
The fileobj.__iter__ implementation in Python 2.7.12 (hg changeset
45d4cea97b04) is buggy: it cannot handle EINTR correctly.
In Objects/fileobject.c:
size_t Py_UniversalNewlineFread(....) {
....
if (!f->f_univ_newline)
return fread(buf, 1, n, stream);
....
}
According to the "fread" man page:
If an error occurs, or the end of the file is reached, the return value
is a short item count (or zero).
Therefore it's possible for "fread" (and "Py_UniversalNewlineFread") to
return a positive value while errno is set to EINTR and ferror(stream)
changes from zero to non-zero.
There are multiple "Py_UniversalNewlineFread": "file_read", "file_readinto",
"file_readlines", "readahead". While the first 3 have code to handle the
EINTR case, the last one "readahead" doesn't:
static int readahead(PyFileObject *f, Py_ssize_t bufsize) {
....
chunksize = Py_UniversalNewlineFread(
f->f_buf, bufsize, f->f_fp, (PyObject *)f);
....
if (chunksize == 0) {
if (ferror(f->f_fp)) {
PyErr_SetFromErrno(PyExc_IOError);
....
}
}
....
}
It means "readahead" could ignore EINTR, if "Py_UniversalNewlineFread"
returns a non-zero value. And at the next time "readahead" got executed, if
"Py_UniversalNewlineFread" returns 0, "readahead" would raise a Python error
without a incorrect errno - could be 0 - thus "IOError: [Errno 0] Error".
The only user of "readahead" is "readahead_get_line_skip".
The only user of "readahead_get_line_skip" is "file_iternext", aka.
"fileobj.__iter__", which should be avoided.
There are multiple places where the pattern "for x in fp" is used. This
patch adds a "iterfile" method in "util.py" so we can migrate our code from
"for x in fp" to "fox x in util.iterfile(fp)".
2016-11-15 02:32:54 +03:00
|
|
|
|
2009-03-23 12:41:42 +03:00
|
|
|
def iterlines(iterator):
|
|
|
|
for chunk in iterator:
|
|
|
|
for line in chunk.splitlines():
|
|
|
|
yield line
|
2009-10-19 23:19:28 +04:00
|
|
|
|
|
|
|
def expandpath(path):
|
|
|
|
return os.path.expanduser(os.path.expandvars(path))
|
2010-01-06 23:11:58 +03:00
|
|
|
|
|
|
|
def hgcmd():
|
|
|
|
"""Return the command used to execute current hg
|
|
|
|
|
|
|
|
This is different from hgexecutable() because on Windows we want
|
|
|
|
to avoid things opening new shell windows like batch files, so we
|
|
|
|
get either the python call or current executable.
|
|
|
|
"""
|
2011-05-06 17:10:29 +04:00
|
|
|
if mainfrozen():
|
2016-01-11 02:15:39 +03:00
|
|
|
if getattr(sys, 'frozen', None) == 'macosx_app':
|
|
|
|
# Env variable set by py2app
|
2016-12-17 23:36:00 +03:00
|
|
|
return [encoding.environ['EXECUTABLEPATH']]
|
2016-01-11 02:15:39 +03:00
|
|
|
else:
|
2016-12-19 21:50:07 +03:00
|
|
|
return [pycompat.sysexecutable]
|
2010-01-06 23:11:58 +03:00
|
|
|
return gethgcmd()
|
2010-02-06 18:50:00 +03:00
|
|
|
|
|
|
|
def rundetached(args, condfn):
|
|
|
|
"""Execute the argument list in a detached process.
|
2010-02-08 17:18:49 +03:00
|
|
|
|
2010-02-06 18:50:00 +03:00
|
|
|
condfn is a callable which is called repeatedly and should return
|
|
|
|
True once the child process is known to have started successfully.
|
|
|
|
At this point, the child process PID is returned. If the child
|
|
|
|
process fails to start or finishes before condfn() evaluates to
|
|
|
|
True, return -1.
|
|
|
|
"""
|
|
|
|
# Windows case is easier because the child process is either
|
|
|
|
# successfully starting and validating the condition or exiting
|
|
|
|
# on failure. We just poll on its PID. On Unix, if the child
|
|
|
|
# process fails to start, it will be left in a zombie state until
|
|
|
|
# the parent wait on it, which we cannot do since we expect a long
|
|
|
|
# running process on success. Instead we listen for SIGCHLD telling
|
|
|
|
# us our child process terminated.
|
|
|
|
terminated = set()
|
|
|
|
def handler(signum, frame):
|
|
|
|
terminated.add(os.wait())
|
|
|
|
prevhandler = None
|
2011-07-26 01:04:40 +04:00
|
|
|
SIGCHLD = getattr(signal, 'SIGCHLD', None)
|
|
|
|
if SIGCHLD is not None:
|
|
|
|
prevhandler = signal.signal(SIGCHLD, handler)
|
2010-02-06 18:50:00 +03:00
|
|
|
try:
|
|
|
|
pid = spawndetached(args)
|
|
|
|
while not condfn():
|
|
|
|
if ((pid in terminated or not testpid(pid))
|
|
|
|
and not condfn()):
|
|
|
|
return -1
|
|
|
|
time.sleep(0.1)
|
|
|
|
return pid
|
|
|
|
finally:
|
|
|
|
if prevhandler is not None:
|
|
|
|
signal.signal(signal.SIGCHLD, prevhandler)
|
2010-02-13 03:59:09 +03:00
|
|
|
|
2011-02-11 03:32:40 +03:00
|
|
|
def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
|
util: add an interpolate() function to for replacing multiple values
util.interpolate can be used to replace multiple items in a string all at once
(and optionally apply a function to the replacement), without worrying about
recursing:
>>> import util
>>> s = '$foo, $spam'
>>> util.interpolate(r'\$', { 'foo': 'bar', 'spam': 'eggs' }, s)
'bar, eggs'
>>> util.interpolate(r'\$', { 'foo': 'spam', 'spam': 'foo' }, s)
'spam, foo'
>>> util.interpolate(r'\$', { 'foo': 'spam', 'spam': 'foo' }, s, lambda s: s.upper())
'SPAM, FOO'
The patch also changes filemerge.py to use this new function.
2010-08-19 02:18:26 +04:00
|
|
|
"""Return the result of interpolating items in the mapping into string s.
|
|
|
|
|
|
|
|
prefix is a single character string, or a two character string with
|
|
|
|
a backslash as the first character if the prefix needs to be escaped in
|
|
|
|
a regular expression.
|
|
|
|
|
|
|
|
fn is an optional function that will be applied to the replacement text
|
|
|
|
just before replacement.
|
2011-02-11 03:32:40 +03:00
|
|
|
|
|
|
|
escape_prefix is an optional flag that allows using doubled prefix for
|
|
|
|
its escaping.
|
util: add an interpolate() function to for replacing multiple values
util.interpolate can be used to replace multiple items in a string all at once
(and optionally apply a function to the replacement), without worrying about
recursing:
>>> import util
>>> s = '$foo, $spam'
>>> util.interpolate(r'\$', { 'foo': 'bar', 'spam': 'eggs' }, s)
'bar, eggs'
>>> util.interpolate(r'\$', { 'foo': 'spam', 'spam': 'foo' }, s)
'spam, foo'
>>> util.interpolate(r'\$', { 'foo': 'spam', 'spam': 'foo' }, s, lambda s: s.upper())
'SPAM, FOO'
The patch also changes filemerge.py to use this new function.
2010-08-19 02:18:26 +04:00
|
|
|
"""
|
|
|
|
fn = fn or (lambda s: s)
|
2011-02-11 03:32:40 +03:00
|
|
|
patterns = '|'.join(mapping.keys())
|
|
|
|
if escape_prefix:
|
|
|
|
patterns += '|' + prefix
|
|
|
|
if len(prefix) > 1:
|
|
|
|
prefix_char = prefix[1:]
|
|
|
|
else:
|
|
|
|
prefix_char = prefix
|
|
|
|
mapping[prefix_char] = prefix_char
|
2014-07-16 01:35:19 +04:00
|
|
|
r = remod.compile(r'%s(%s)' % (prefix, patterns))
|
util: add an interpolate() function to for replacing multiple values
util.interpolate can be used to replace multiple items in a string all at once
(and optionally apply a function to the replacement), without worrying about
recursing:
>>> import util
>>> s = '$foo, $spam'
>>> util.interpolate(r'\$', { 'foo': 'bar', 'spam': 'eggs' }, s)
'bar, eggs'
>>> util.interpolate(r'\$', { 'foo': 'spam', 'spam': 'foo' }, s)
'spam, foo'
>>> util.interpolate(r'\$', { 'foo': 'spam', 'spam': 'foo' }, s, lambda s: s.upper())
'SPAM, FOO'
The patch also changes filemerge.py to use this new function.
2010-08-19 02:18:26 +04:00
|
|
|
return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
|
|
|
|
|
2010-08-28 20:31:07 +04:00
|
|
|
def getport(port):
|
|
|
|
"""Return the port for a given network service.
|
|
|
|
|
|
|
|
If port is an integer, it's returned as is. If it's a string, it's
|
|
|
|
looked up using socket.getservbyname(). If there's no matching
|
2015-10-08 22:55:45 +03:00
|
|
|
service, error.Abort is raised.
|
2010-08-28 20:31:07 +04:00
|
|
|
"""
|
|
|
|
try:
|
|
|
|
return int(port)
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
try:
|
|
|
|
return socket.getservbyname(port)
|
|
|
|
except socket.error:
|
|
|
|
raise Abort(_("no port number associated with service '%s'") % port)
|
2010-08-29 06:50:35 +04:00
|
|
|
|
2010-08-30 19:28:25 +04:00
|
|
|
_booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
|
|
|
|
'0': False, 'no': False, 'false': False, 'off': False,
|
|
|
|
'never': False}
|
2010-08-29 06:50:35 +04:00
|
|
|
|
|
|
|
def parsebool(s):
|
|
|
|
"""Parse s into a boolean.
|
|
|
|
|
|
|
|
If s is not a valid boolean, returns None.
|
|
|
|
"""
|
|
|
|
return _booleans.get(s.lower(), None)
|
2011-04-30 20:43:20 +04:00
|
|
|
|
2011-04-30 20:43:23 +04:00
|
|
|
_hextochr = dict((a + b, chr(int(a + b, 16)))
|
2016-10-07 15:58:23 +03:00
|
|
|
for a in string.hexdigits for b in string.hexdigits)
|
2011-04-30 20:43:23 +04:00
|
|
|
|
2011-04-30 20:43:20 +04:00
|
|
|
class url(object):
|
2011-05-01 17:49:13 +04:00
|
|
|
r"""Reliable URL parser.
|
2011-04-30 20:43:20 +04:00
|
|
|
|
|
|
|
This parses URLs and provides attributes for the following
|
|
|
|
components:
|
|
|
|
|
|
|
|
<scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
|
|
|
|
|
|
|
|
Missing components are set to None. The only exception is
|
|
|
|
fragment, which is set to '' if present but empty.
|
|
|
|
|
|
|
|
If parsefragment is False, fragment is included in query. If
|
|
|
|
parsequery is False, query is included in path. If both are
|
|
|
|
False, both fragment and query are included in path.
|
|
|
|
|
|
|
|
See http://www.ietf.org/rfc/rfc2396.txt for more information.
|
|
|
|
|
|
|
|
Note that for backward compatibility reasons, bundle URLs do not
|
|
|
|
take host names. That means 'bundle://../' has a path of '../'.
|
|
|
|
|
|
|
|
Examples:
|
|
|
|
|
|
|
|
>>> url('http://www.ietf.org/rfc/rfc2396.txt')
|
|
|
|
<url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
|
|
|
|
>>> url('ssh://[::1]:2200//home/joe/repo')
|
|
|
|
<url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
|
|
|
|
>>> url('file:///home/joe/repo')
|
|
|
|
<url scheme: 'file', path: '/home/joe/repo'>
|
2011-07-23 02:11:35 +04:00
|
|
|
>>> url('file:///c:/temp/foo/')
|
|
|
|
<url scheme: 'file', path: 'c:/temp/foo/'>
|
2011-04-30 20:43:20 +04:00
|
|
|
>>> url('bundle:foo')
|
|
|
|
<url scheme: 'bundle', path: 'foo'>
|
|
|
|
>>> url('bundle://../foo')
|
|
|
|
<url scheme: 'bundle', path: '../foo'>
|
2011-05-01 17:49:13 +04:00
|
|
|
>>> url(r'c:\foo\bar')
|
|
|
|
<url path: 'c:\\foo\\bar'>
|
2011-06-21 01:45:33 +04:00
|
|
|
>>> url(r'\\blah\blah\blah')
|
|
|
|
<url path: '\\\\blah\\blah\\blah'>
|
2011-09-11 02:49:19 +04:00
|
|
|
>>> url(r'\\blah\blah\blah#baz')
|
|
|
|
<url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
|
2013-11-21 01:03:15 +04:00
|
|
|
>>> url(r'file:///C:\users\me')
|
|
|
|
<url scheme: 'file', path: 'C:\\users\\me'>
|
2011-04-30 20:43:20 +04:00
|
|
|
|
|
|
|
Authentication credentials:
|
|
|
|
|
|
|
|
>>> url('ssh://joe:xyz@x/repo')
|
|
|
|
<url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
|
|
|
|
>>> url('ssh://joe@x/repo')
|
|
|
|
<url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
|
|
|
|
|
|
|
|
Query strings and fragments:
|
|
|
|
|
|
|
|
>>> url('http://host/a?b#c')
|
|
|
|
<url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
|
|
|
|
>>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
|
|
|
|
<url scheme: 'http', host: 'host', path: 'a?b#c'>
|
2016-09-30 15:38:47 +03:00
|
|
|
|
|
|
|
Empty path:
|
|
|
|
|
|
|
|
>>> url('')
|
|
|
|
<url path: ''>
|
|
|
|
>>> url('#a')
|
|
|
|
<url path: '', fragment: 'a'>
|
|
|
|
>>> url('http://host/')
|
|
|
|
<url scheme: 'http', host: 'host', path: ''>
|
|
|
|
>>> url('http://host/#a')
|
|
|
|
<url scheme: 'http', host: 'host', path: '', fragment: 'a'>
|
|
|
|
|
|
|
|
Only scheme:
|
|
|
|
|
|
|
|
>>> url('http:')
|
|
|
|
<url scheme: 'http'>
|
2011-04-30 20:43:20 +04:00
|
|
|
"""
|
|
|
|
|
|
|
|
_safechars = "!~*'()+"
|
2013-11-21 01:03:15 +04:00
|
|
|
_safepchars = "/!~*'()+:\\"
|
2016-10-09 16:00:47 +03:00
|
|
|
_matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
|
2011-04-30 20:43:20 +04:00
|
|
|
|
|
|
|
def __init__(self, path, parsequery=True, parsefragment=True):
|
|
|
|
# We slowly chomp away at path until we have only the path left
|
|
|
|
self.scheme = self.user = self.passwd = self.host = None
|
|
|
|
self.port = self.path = self.query = self.fragment = None
|
|
|
|
self._localpath = True
|
|
|
|
self._hostport = ''
|
|
|
|
self._origpath = path
|
|
|
|
|
2011-09-11 02:49:19 +04:00
|
|
|
if parsefragment and '#' in path:
|
|
|
|
path, self.fragment = path.split('#', 1)
|
|
|
|
|
2011-06-21 01:45:33 +04:00
|
|
|
# special case for Windows drive letters and UNC paths
|
2016-10-09 16:00:47 +03:00
|
|
|
if hasdriveletter(path) or path.startswith('\\\\'):
|
2011-04-30 20:43:20 +04:00
|
|
|
self.path = path
|
|
|
|
return
|
|
|
|
|
|
|
|
# For compatibility reasons, we can't handle bundle paths as
|
|
|
|
# normal URLS
|
|
|
|
if path.startswith('bundle:'):
|
|
|
|
self.scheme = 'bundle'
|
|
|
|
path = path[7:]
|
|
|
|
if path.startswith('//'):
|
|
|
|
path = path[2:]
|
|
|
|
self.path = path
|
|
|
|
return
|
|
|
|
|
|
|
|
if self._matchscheme(path):
|
|
|
|
parts = path.split(':', 1)
|
|
|
|
if parts[0]:
|
|
|
|
self.scheme, path = parts
|
|
|
|
self._localpath = False
|
|
|
|
|
|
|
|
if not path:
|
|
|
|
path = None
|
|
|
|
if self._localpath:
|
|
|
|
self.path = ''
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
if self._localpath:
|
|
|
|
self.path = path
|
|
|
|
return
|
|
|
|
|
|
|
|
if parsequery and '?' in path:
|
|
|
|
path, self.query = path.split('?', 1)
|
|
|
|
if not path:
|
|
|
|
path = None
|
|
|
|
if not self.query:
|
|
|
|
self.query = None
|
|
|
|
|
|
|
|
# // is required to specify a host/authority
|
|
|
|
if path and path.startswith('//'):
|
|
|
|
parts = path[2:].split('/', 1)
|
|
|
|
if len(parts) > 1:
|
|
|
|
self.host, path = parts
|
|
|
|
else:
|
|
|
|
self.host = parts[0]
|
|
|
|
path = None
|
|
|
|
if not self.host:
|
|
|
|
self.host = None
|
2011-08-04 04:51:29 +04:00
|
|
|
# path of file:///d is /d
|
|
|
|
# path of file:///d:/ is d:/, not /d:/
|
2011-07-23 02:11:35 +04:00
|
|
|
if path and not hasdriveletter(path):
|
2011-04-30 20:43:20 +04:00
|
|
|
path = '/' + path
|
|
|
|
|
|
|
|
if self.host and '@' in self.host:
|
|
|
|
self.user, self.host = self.host.rsplit('@', 1)
|
|
|
|
if ':' in self.user:
|
|
|
|
self.user, self.passwd = self.user.split(':', 1)
|
|
|
|
if not self.host:
|
|
|
|
self.host = None
|
|
|
|
|
|
|
|
# Don't split on colons in IPv6 addresses without ports
|
|
|
|
if (self.host and ':' in self.host and
|
|
|
|
not (self.host.startswith('[') and self.host.endswith(']'))):
|
|
|
|
self._hostport = self.host
|
|
|
|
self.host, self.port = self.host.rsplit(':', 1)
|
|
|
|
if not self.host:
|
|
|
|
self.host = None
|
|
|
|
|
|
|
|
if (self.host and self.scheme == 'file' and
|
|
|
|
self.host not in ('localhost', '127.0.0.1', '[::1]')):
|
|
|
|
raise Abort(_('file:// URLs can only refer to localhost'))
|
|
|
|
|
|
|
|
self.path = path
|
|
|
|
|
2011-07-31 23:00:44 +04:00
|
|
|
# leave the query string escaped
|
2011-04-30 20:43:20 +04:00
|
|
|
for a in ('user', 'passwd', 'host', 'port',
|
2011-07-31 23:00:44 +04:00
|
|
|
'path', 'fragment'):
|
2011-04-30 20:43:20 +04:00
|
|
|
v = getattr(self, a)
|
|
|
|
if v is not None:
|
2017-03-22 08:23:11 +03:00
|
|
|
setattr(self, a, urlreq.unquote(v))
|
2011-04-30 20:43:20 +04:00
|
|
|
|
|
|
|
def __repr__(self):
|
|
|
|
attrs = []
|
|
|
|
for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
|
|
|
|
'query', 'fragment'):
|
|
|
|
v = getattr(self, a)
|
|
|
|
if v is not None:
|
|
|
|
attrs.append('%s: %r' % (a, v))
|
|
|
|
return '<url %s>' % ', '.join(attrs)
|
|
|
|
|
2017-06-24 07:48:04 +03:00
|
|
|
def __bytes__(self):
|
2011-05-01 17:49:13 +04:00
|
|
|
r"""Join the URL's components back into a URL string.
|
2011-04-30 20:43:20 +04:00
|
|
|
|
|
|
|
Examples:
|
|
|
|
|
2011-11-07 06:25:10 +04:00
|
|
|
>>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
|
|
|
|
'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
|
2011-07-31 23:00:44 +04:00
|
|
|
>>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
|
|
|
|
'http://user:pw@host:80/?foo=bar&baz=42'
|
|
|
|
>>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
|
|
|
|
'http://user:pw@host:80/?foo=bar%3dbaz'
|
2011-04-30 20:43:20 +04:00
|
|
|
>>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
|
|
|
|
'ssh://user:pw@[::1]:2200//home/joe#'
|
|
|
|
>>> str(url('http://localhost:80//'))
|
|
|
|
'http://localhost:80//'
|
|
|
|
>>> str(url('http://localhost:80/'))
|
|
|
|
'http://localhost:80/'
|
|
|
|
>>> str(url('http://localhost:80'))
|
|
|
|
'http://localhost:80/'
|
|
|
|
>>> str(url('bundle:foo'))
|
|
|
|
'bundle:foo'
|
|
|
|
>>> str(url('bundle://../foo'))
|
|
|
|
'bundle:../foo'
|
|
|
|
>>> str(url('path'))
|
|
|
|
'path'
|
2011-05-12 18:41:56 +04:00
|
|
|
>>> str(url('file:///tmp/foo/bar'))
|
|
|
|
'file:///tmp/foo/bar'
|
2011-12-04 21:22:25 +04:00
|
|
|
>>> str(url('file:///c:/tmp/foo/bar'))
|
2011-12-06 03:48:40 +04:00
|
|
|
'file:///c:/tmp/foo/bar'
|
2011-05-01 17:49:13 +04:00
|
|
|
>>> print url(r'bundle:foo\bar')
|
|
|
|
bundle:foo\bar
|
2013-11-21 01:03:15 +04:00
|
|
|
>>> print url(r'file:///D:\data\hg')
|
|
|
|
file:///D:\data\hg
|
2011-04-30 20:43:20 +04:00
|
|
|
"""
|
|
|
|
if self._localpath:
|
|
|
|
s = self.path
|
|
|
|
if self.scheme == 'bundle':
|
|
|
|
s = 'bundle:' + s
|
|
|
|
if self.fragment:
|
|
|
|
s += '#' + self.fragment
|
|
|
|
return s
|
|
|
|
|
|
|
|
s = self.scheme + ':'
|
2011-05-12 18:41:56 +04:00
|
|
|
if self.user or self.passwd or self.host:
|
|
|
|
s += '//'
|
2011-12-04 21:22:25 +04:00
|
|
|
elif self.scheme and (not self.path or self.path.startswith('/')
|
|
|
|
or hasdriveletter(self.path)):
|
2011-04-30 20:43:20 +04:00
|
|
|
s += '//'
|
2011-12-04 21:22:25 +04:00
|
|
|
if hasdriveletter(self.path):
|
|
|
|
s += '/'
|
2011-04-30 20:43:20 +04:00
|
|
|
if self.user:
|
2016-04-07 02:22:12 +03:00
|
|
|
s += urlreq.quote(self.user, safe=self._safechars)
|
2011-04-30 20:43:20 +04:00
|
|
|
if self.passwd:
|
2016-04-07 02:22:12 +03:00
|
|
|
s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
|
2011-04-30 20:43:20 +04:00
|
|
|
if self.user or self.passwd:
|
|
|
|
s += '@'
|
|
|
|
if self.host:
|
|
|
|
if not (self.host.startswith('[') and self.host.endswith(']')):
|
2016-04-07 02:22:12 +03:00
|
|
|
s += urlreq.quote(self.host)
|
2011-04-30 20:43:20 +04:00
|
|
|
else:
|
|
|
|
s += self.host
|
|
|
|
if self.port:
|
2016-04-07 02:22:12 +03:00
|
|
|
s += ':' + urlreq.quote(self.port)
|
2011-04-30 20:43:20 +04:00
|
|
|
if self.host:
|
|
|
|
s += '/'
|
|
|
|
if self.path:
|
2011-07-31 23:00:44 +04:00
|
|
|
# TODO: similar to the query string, we should not unescape the
|
|
|
|
# path when we store it, the path might contain '%2f' = '/',
|
|
|
|
# which we should *not* escape.
|
2016-04-07 02:22:12 +03:00
|
|
|
s += urlreq.quote(self.path, safe=self._safepchars)
|
2011-04-30 20:43:20 +04:00
|
|
|
if self.query:
|
2011-07-31 23:00:44 +04:00
|
|
|
# we store the query in escaped form.
|
|
|
|
s += '?' + self.query
|
2011-04-30 20:43:20 +04:00
|
|
|
if self.fragment is not None:
|
2016-04-07 02:22:12 +03:00
|
|
|
s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
|
2011-04-30 20:43:20 +04:00
|
|
|
return s
|
|
|
|
|
2017-06-24 07:48:04 +03:00
|
|
|
__str__ = encoding.strmethod(__bytes__)
|
|
|
|
|
2011-04-30 20:43:20 +04:00
|
|
|
def authinfo(self):
|
|
|
|
user, passwd = self.user, self.passwd
|
|
|
|
try:
|
|
|
|
self.user, self.passwd = None, None
|
2017-04-07 11:16:35 +03:00
|
|
|
s = bytes(self)
|
2011-04-30 20:43:20 +04:00
|
|
|
finally:
|
|
|
|
self.user, self.passwd = user, passwd
|
|
|
|
if not self.user:
|
|
|
|
return (s, None)
|
2011-08-06 16:10:59 +04:00
|
|
|
# authinfo[1] is passed to urllib2 password manager, and its
|
|
|
|
# URIs must not contain credentials. The host is passed in the
|
|
|
|
# URIs list because Python < 2.4.3 uses only that to search for
|
|
|
|
# a password.
|
2011-08-05 23:05:40 +04:00
|
|
|
return (s, (None, (s, self.host),
|
2011-04-30 20:43:20 +04:00
|
|
|
self.user, self.passwd or ''))
|
|
|
|
|
2011-06-30 01:01:06 +04:00
|
|
|
def isabs(self):
|
|
|
|
if self.scheme and self.scheme != 'file':
|
|
|
|
return True # remote URL
|
|
|
|
if hasdriveletter(self.path):
|
|
|
|
return True # absolute for our purposes - can't be joined()
|
2017-06-25 00:41:55 +03:00
|
|
|
if self.path.startswith(br'\\'):
|
2011-06-30 01:01:06 +04:00
|
|
|
return True # Windows UNC path
|
|
|
|
if self.path.startswith('/'):
|
|
|
|
return True # POSIX-style
|
|
|
|
return False
|
|
|
|
|
2011-04-30 20:43:20 +04:00
|
|
|
def localpath(self):
|
|
|
|
if self.scheme == 'file' or self.scheme == 'bundle':
|
|
|
|
path = self.path or '/'
|
|
|
|
# For Windows, we need to promote hosts containing drive
|
|
|
|
# letters to paths with drive letters.
|
|
|
|
if hasdriveletter(self._hostport):
|
|
|
|
path = self._hostport + '/' + self.path
|
2011-11-16 03:10:56 +04:00
|
|
|
elif (self.host is not None and self.path
|
|
|
|
and not hasdriveletter(path)):
|
2011-04-30 20:43:20 +04:00
|
|
|
path = '/' + path
|
|
|
|
return path
|
|
|
|
return self._origpath
|
|
|
|
|
2014-02-04 02:47:41 +04:00
|
|
|
def islocal(self):
|
|
|
|
'''whether localpath will return something that posixfile can open'''
|
|
|
|
return (not self.scheme or self.scheme == 'file'
|
|
|
|
or self.scheme == 'bundle')
|
|
|
|
|
2011-04-30 20:43:20 +04:00
|
|
|
def hasscheme(path):
|
|
|
|
return bool(url(path).scheme)
|
|
|
|
|
|
|
|
def hasdriveletter(path):
|
2011-12-04 21:22:25 +04:00
|
|
|
return path and path[1:2] == ':' and path[0:1].isalpha()
|
2011-04-30 20:43:20 +04:00
|
|
|
|
2011-07-01 19:37:09 +04:00
|
|
|
def urllocalpath(path):
|
2011-04-30 20:43:20 +04:00
|
|
|
return url(path, parsequery=False, parsefragment=False).localpath()
|
|
|
|
|
|
|
|
def hidepassword(u):
|
|
|
|
'''hide user credential in a url string'''
|
|
|
|
u = url(u)
|
|
|
|
if u.passwd:
|
|
|
|
u.passwd = '***'
|
2017-04-07 11:16:35 +03:00
|
|
|
return bytes(u)
|
2011-04-30 20:43:20 +04:00
|
|
|
|
|
|
|
def removeauth(u):
|
|
|
|
'''remove all authentication information from a url string'''
|
|
|
|
u = url(u)
|
|
|
|
u.user = u.passwd = None
|
|
|
|
return str(u)
|
2011-06-02 01:43:34 +04:00
|
|
|
|
2013-03-01 01:11:42 +04:00
|
|
|
timecount = unitcountfn(
|
|
|
|
(1, 1e3, _('%.0f s')),
|
|
|
|
(100, 1, _('%.1f s')),
|
|
|
|
(10, 1, _('%.2f s')),
|
|
|
|
(1, 1, _('%.3f s')),
|
|
|
|
(100, 0.001, _('%.1f ms')),
|
|
|
|
(10, 0.001, _('%.2f ms')),
|
|
|
|
(1, 0.001, _('%.3f ms')),
|
|
|
|
(100, 0.000001, _('%.1f us')),
|
|
|
|
(10, 0.000001, _('%.2f us')),
|
|
|
|
(1, 0.000001, _('%.3f us')),
|
|
|
|
(100, 0.000000001, _('%.1f ns')),
|
|
|
|
(10, 0.000000001, _('%.2f ns')),
|
|
|
|
(1, 0.000000001, _('%.3f ns')),
|
|
|
|
)
|
|
|
|
|
|
|
|
_timenesting = [0]
|
|
|
|
|
|
|
|
def timed(func):
|
|
|
|
'''Report the execution time of a function call to stderr.
|
|
|
|
|
|
|
|
During development, use as a decorator when you need to measure
|
|
|
|
the cost of a function, e.g. as follows:
|
|
|
|
|
|
|
|
@util.timed
|
|
|
|
def foo(a, b, c):
|
|
|
|
pass
|
|
|
|
'''
|
|
|
|
|
|
|
|
def wrapper(*args, **kwargs):
|
2017-02-16 00:17:39 +03:00
|
|
|
start = timer()
|
2013-03-01 01:11:42 +04:00
|
|
|
indent = 2
|
|
|
|
_timenesting[0] += indent
|
|
|
|
try:
|
|
|
|
return func(*args, **kwargs)
|
|
|
|
finally:
|
2017-02-16 00:17:39 +03:00
|
|
|
elapsed = timer() - start
|
2013-03-01 01:11:42 +04:00
|
|
|
_timenesting[0] -= indent
|
2016-10-20 17:53:36 +03:00
|
|
|
stderr.write('%s%s: %s\n' %
|
|
|
|
(' ' * _timenesting[0], func.__name__,
|
|
|
|
timecount(elapsed)))
|
2013-03-01 01:11:42 +04:00
|
|
|
return wrapper
|
2013-05-15 02:16:43 +04:00
|
|
|
|
|
|
|
_sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
|
|
|
|
('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
|
|
|
|
|
|
|
|
def sizetoint(s):
|
|
|
|
'''Convert a space specifier to a byte count.
|
|
|
|
|
|
|
|
>>> sizetoint('30')
|
|
|
|
30
|
|
|
|
>>> sizetoint('2.2kb')
|
|
|
|
2252
|
|
|
|
>>> sizetoint('6M')
|
|
|
|
6291456
|
|
|
|
'''
|
|
|
|
t = s.strip().lower()
|
|
|
|
try:
|
|
|
|
for k, u in _sizeunits:
|
|
|
|
if t.endswith(k):
|
|
|
|
return int(float(t[:-len(k)]) * u)
|
|
|
|
return int(t)
|
|
|
|
except ValueError:
|
|
|
|
raise error.ParseError(_("couldn't parse size: %s") % s)
|
2013-05-14 22:23:15 +04:00
|
|
|
|
|
|
|
class hooks(object):
|
|
|
|
'''A collection of hook functions that can be used to extend a
|
2015-08-28 17:53:55 +03:00
|
|
|
function's behavior. Hooks are called in lexicographic order,
|
2013-05-14 22:23:15 +04:00
|
|
|
based on the names of their sources.'''
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
self._hooks = []
|
|
|
|
|
|
|
|
def add(self, source, hook):
|
|
|
|
self._hooks.append((source, hook))
|
|
|
|
|
|
|
|
def __call__(self, *args):
|
|
|
|
self._hooks.sort(key=lambda x: x[0])
|
2014-04-15 19:37:24 +04:00
|
|
|
results = []
|
2013-05-14 22:23:15 +04:00
|
|
|
for source, hook in self._hooks:
|
2014-04-15 19:37:24 +04:00
|
|
|
results.append(hook(*args))
|
|
|
|
return results
|
2014-01-13 02:28:21 +04:00
|
|
|
|
2015-01-14 03:15:26 +03:00
|
|
|
def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s', depth=0):
|
2016-03-11 20:22:04 +03:00
|
|
|
'''Yields lines for a nicely formatted stacktrace.
|
2015-01-14 03:15:26 +03:00
|
|
|
Skips the 'skip' last entries, then return the last 'depth' entries.
|
2016-03-11 20:22:04 +03:00
|
|
|
Each file+linenumber is formatted according to fileline.
|
|
|
|
Each line is formatted according to line.
|
|
|
|
If line is None, it yields:
|
|
|
|
length of longest filepath+line number,
|
|
|
|
filepath+linenumber,
|
|
|
|
function
|
|
|
|
|
|
|
|
Not be used in production code but very convenient while developing.
|
|
|
|
'''
|
|
|
|
entries = [(fileline % (fn, ln), func)
|
2015-01-14 03:15:26 +03:00
|
|
|
for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
|
|
|
|
][-depth:]
|
2016-03-11 20:22:04 +03:00
|
|
|
if entries:
|
|
|
|
fnmax = max(len(entry[0]) for entry in entries)
|
|
|
|
for fnln, func in entries:
|
|
|
|
if line is None:
|
|
|
|
yield (fnmax, fnln, func)
|
|
|
|
else:
|
|
|
|
yield line % (fnmax, fnln, func)
|
|
|
|
|
2015-01-14 03:15:26 +03:00
|
|
|
def debugstacktrace(msg='stacktrace', skip=0,
|
|
|
|
f=stderr, otherf=stdout, depth=0):
|
2014-01-13 02:28:21 +04:00
|
|
|
'''Writes a message to f (stderr) with a nicely formatted stacktrace.
|
2015-01-14 03:15:26 +03:00
|
|
|
Skips the 'skip' entries closest to the call, then show 'depth' entries.
|
|
|
|
By default it will flush stdout first.
|
2016-03-11 19:50:14 +03:00
|
|
|
It can be used everywhere and intentionally does not require an ui object.
|
2014-01-13 02:28:21 +04:00
|
|
|
Not be used in production code but very convenient while developing.
|
|
|
|
'''
|
2014-02-20 05:38:36 +04:00
|
|
|
if otherf:
|
|
|
|
otherf.flush()
|
2015-01-16 06:26:40 +03:00
|
|
|
f.write('%s at:\n' % msg.rstrip())
|
2015-01-14 03:15:26 +03:00
|
|
|
for line in getstackframes(skip + 1, depth=depth):
|
2016-03-11 20:22:04 +03:00
|
|
|
f.write(line)
|
2014-02-20 05:38:36 +04:00
|
|
|
f.flush()
|
2014-01-13 02:28:21 +04:00
|
|
|
|
2015-04-07 00:36:08 +03:00
|
|
|
class dirs(object):
|
|
|
|
'''a multiset of directory names from a dirstate or manifest'''
|
|
|
|
|
|
|
|
def __init__(self, map, skip=None):
|
|
|
|
self._dirs = {}
|
|
|
|
addpath = self.addpath
|
|
|
|
if safehasattr(map, 'iteritems') and skip is not None:
|
|
|
|
for f, s in map.iteritems():
|
|
|
|
if s[0] != skip:
|
|
|
|
addpath(f)
|
|
|
|
else:
|
|
|
|
for f in map:
|
|
|
|
addpath(f)
|
|
|
|
|
|
|
|
def addpath(self, path):
|
|
|
|
dirs = self._dirs
|
|
|
|
for base in finddirs(path):
|
|
|
|
if base in dirs:
|
|
|
|
dirs[base] += 1
|
|
|
|
return
|
|
|
|
dirs[base] = 1
|
|
|
|
|
|
|
|
def delpath(self, path):
|
|
|
|
dirs = self._dirs
|
|
|
|
for base in finddirs(path):
|
|
|
|
if dirs[base] > 1:
|
|
|
|
dirs[base] -= 1
|
|
|
|
return
|
|
|
|
del dirs[base]
|
|
|
|
|
|
|
|
def __iter__(self):
|
2017-03-16 02:23:23 +03:00
|
|
|
return iter(self._dirs)
|
2015-04-07 00:36:08 +03:00
|
|
|
|
|
|
|
def __contains__(self, d):
|
|
|
|
return d in self._dirs
|
|
|
|
|
|
|
|
if safehasattr(parsers, 'dirs'):
|
|
|
|
dirs = parsers.dirs
|
|
|
|
|
|
|
|
def finddirs(path):
|
|
|
|
pos = path.rfind('/')
|
|
|
|
while pos != -1:
|
|
|
|
yield path[:pos]
|
|
|
|
pos = path.rfind('/', 0, pos)
|
|
|
|
|
2016-01-12 02:25:43 +03:00
|
|
|
class ctxmanager(object):
|
|
|
|
'''A context manager for use in 'with' blocks to allow multiple
|
|
|
|
contexts to be entered at once. This is both safer and more
|
|
|
|
flexible than contextlib.nested.
|
|
|
|
|
|
|
|
Once Mercurial supports Python 2.7+, this will become mostly
|
|
|
|
unnecessary.
|
|
|
|
'''
|
|
|
|
|
|
|
|
def __init__(self, *args):
|
|
|
|
'''Accepts a list of no-argument functions that return context
|
|
|
|
managers. These will be invoked at __call__ time.'''
|
|
|
|
self._pending = args
|
|
|
|
self._atexit = []
|
|
|
|
|
|
|
|
def __enter__(self):
|
|
|
|
return self
|
|
|
|
|
2016-01-14 20:31:01 +03:00
|
|
|
def enter(self):
|
2016-01-12 02:25:43 +03:00
|
|
|
'''Create and enter context managers in the order in which they were
|
|
|
|
passed to the constructor.'''
|
|
|
|
values = []
|
|
|
|
for func in self._pending:
|
|
|
|
obj = func()
|
|
|
|
values.append(obj.__enter__())
|
|
|
|
self._atexit.append(obj.__exit__)
|
|
|
|
del self._pending
|
|
|
|
return values
|
|
|
|
|
|
|
|
def atexit(self, func, *args, **kwargs):
|
|
|
|
'''Add a function to call when this context manager exits. The
|
|
|
|
ordering of multiple atexit calls is unspecified, save that
|
|
|
|
they will happen before any __exit__ functions.'''
|
|
|
|
def wrapper(exc_type, exc_val, exc_tb):
|
|
|
|
func(*args, **kwargs)
|
|
|
|
self._atexit.append(wrapper)
|
|
|
|
return func
|
|
|
|
|
|
|
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
|
|
'''Context managers are exited in the reverse order from which
|
|
|
|
they were created.'''
|
|
|
|
received = exc_type is not None
|
|
|
|
suppressed = False
|
|
|
|
pending = None
|
|
|
|
self._atexit.reverse()
|
|
|
|
for exitfunc in self._atexit:
|
|
|
|
try:
|
|
|
|
if exitfunc(exc_type, exc_val, exc_tb):
|
|
|
|
suppressed = True
|
|
|
|
exc_type = None
|
|
|
|
exc_val = None
|
|
|
|
exc_tb = None
|
2016-01-13 22:41:10 +03:00
|
|
|
except BaseException:
|
2016-01-12 02:25:43 +03:00
|
|
|
pending = sys.exc_info()
|
|
|
|
exc_type, exc_val, exc_tb = pending = sys.exc_info()
|
|
|
|
del self._atexit
|
|
|
|
if pending:
|
|
|
|
raise exc_val
|
|
|
|
return received and suppressed
|
|
|
|
|
2016-11-08 05:31:39 +03:00
|
|
|
# compression code
|
|
|
|
|
2016-12-24 23:51:12 +03:00
|
|
|
SERVERROLE = 'server'
|
|
|
|
CLIENTROLE = 'client'
|
|
|
|
|
|
|
|
compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
|
|
|
|
(u'name', u'serverpriority',
|
|
|
|
u'clientpriority'))
|
|
|
|
|
2016-11-08 05:31:39 +03:00
|
|
|
class compressormanager(object):
|
|
|
|
"""Holds registrations of various compression engines.
|
|
|
|
|
|
|
|
This class essentially abstracts the differences between compression
|
|
|
|
engines to allow new compression formats to be added easily, possibly from
|
|
|
|
extensions.
|
|
|
|
|
|
|
|
Compressors are registered against the global instance by calling its
|
|
|
|
``register()`` method.
|
|
|
|
"""
|
|
|
|
def __init__(self):
|
|
|
|
self._engines = {}
|
|
|
|
# Bundle spec human name to engine name.
|
|
|
|
self._bundlenames = {}
|
|
|
|
# Internal bundle identifier to engine name.
|
|
|
|
self._bundletypes = {}
|
2017-01-03 00:27:20 +03:00
|
|
|
# Revlog header to engine name.
|
|
|
|
self._revlogheaders = {}
|
2016-12-24 23:51:12 +03:00
|
|
|
# Wire proto identifier to engine name.
|
|
|
|
self._wiretypes = {}
|
2016-11-08 05:31:39 +03:00
|
|
|
|
|
|
|
def __getitem__(self, key):
|
|
|
|
return self._engines[key]
|
|
|
|
|
|
|
|
def __contains__(self, key):
|
|
|
|
return key in self._engines
|
|
|
|
|
|
|
|
def __iter__(self):
|
|
|
|
return iter(self._engines.keys())
|
|
|
|
|
|
|
|
def register(self, engine):
|
|
|
|
"""Register a compression engine with the manager.
|
|
|
|
|
|
|
|
The argument must be a ``compressionengine`` instance.
|
|
|
|
"""
|
|
|
|
if not isinstance(engine, compressionengine):
|
|
|
|
raise ValueError(_('argument must be a compressionengine'))
|
|
|
|
|
|
|
|
name = engine.name()
|
|
|
|
|
|
|
|
if name in self._engines:
|
|
|
|
raise error.Abort(_('compression engine %s already registered') %
|
|
|
|
name)
|
|
|
|
|
|
|
|
bundleinfo = engine.bundletype()
|
|
|
|
if bundleinfo:
|
|
|
|
bundlename, bundletype = bundleinfo
|
|
|
|
|
|
|
|
if bundlename in self._bundlenames:
|
|
|
|
raise error.Abort(_('bundle name %s already registered') %
|
|
|
|
bundlename)
|
|
|
|
if bundletype in self._bundletypes:
|
|
|
|
raise error.Abort(_('bundle type %s already registered by %s') %
|
|
|
|
(bundletype, self._bundletypes[bundletype]))
|
|
|
|
|
|
|
|
# No external facing name declared.
|
|
|
|
if bundlename:
|
|
|
|
self._bundlenames[bundlename] = name
|
|
|
|
|
|
|
|
self._bundletypes[bundletype] = name
|
|
|
|
|
2016-12-24 23:51:12 +03:00
|
|
|
wiresupport = engine.wireprotosupport()
|
|
|
|
if wiresupport:
|
|
|
|
wiretype = wiresupport.name
|
|
|
|
if wiretype in self._wiretypes:
|
|
|
|
raise error.Abort(_('wire protocol compression %s already '
|
|
|
|
'registered by %s') %
|
|
|
|
(wiretype, self._wiretypes[wiretype]))
|
|
|
|
|
|
|
|
self._wiretypes[wiretype] = name
|
|
|
|
|
2017-01-03 00:27:20 +03:00
|
|
|
revlogheader = engine.revlogheader()
|
|
|
|
if revlogheader and revlogheader in self._revlogheaders:
|
|
|
|
raise error.Abort(_('revlog header %s already registered by %s') %
|
|
|
|
(revlogheader, self._revlogheaders[revlogheader]))
|
|
|
|
|
|
|
|
if revlogheader:
|
|
|
|
self._revlogheaders[revlogheader] = name
|
|
|
|
|
2016-11-08 05:31:39 +03:00
|
|
|
self._engines[name] = engine
|
|
|
|
|
|
|
|
@property
|
|
|
|
def supportedbundlenames(self):
|
|
|
|
return set(self._bundlenames.keys())
|
|
|
|
|
|
|
|
@property
|
|
|
|
def supportedbundletypes(self):
|
|
|
|
return set(self._bundletypes.keys())
|
|
|
|
|
|
|
|
def forbundlename(self, bundlename):
|
|
|
|
"""Obtain a compression engine registered to a bundle name.
|
|
|
|
|
|
|
|
Will raise KeyError if the bundle type isn't registered.
|
2016-11-11 10:15:02 +03:00
|
|
|
|
|
|
|
Will abort if the engine is known but not available.
|
2016-11-08 05:31:39 +03:00
|
|
|
"""
|
2016-11-11 10:15:02 +03:00
|
|
|
engine = self._engines[self._bundlenames[bundlename]]
|
|
|
|
if not engine.available():
|
|
|
|
raise error.Abort(_('compression engine %s could not be loaded') %
|
|
|
|
engine.name())
|
|
|
|
return engine
|
2016-11-08 05:31:39 +03:00
|
|
|
|
|
|
|
def forbundletype(self, bundletype):
|
|
|
|
"""Obtain a compression engine registered to a bundle type.
|
|
|
|
|
|
|
|
Will raise KeyError if the bundle type isn't registered.
|
2016-11-11 10:15:02 +03:00
|
|
|
|
|
|
|
Will abort if the engine is known but not available.
|
2016-11-08 05:31:39 +03:00
|
|
|
"""
|
2016-11-11 10:15:02 +03:00
|
|
|
engine = self._engines[self._bundletypes[bundletype]]
|
|
|
|
if not engine.available():
|
|
|
|
raise error.Abort(_('compression engine %s could not be loaded') %
|
|
|
|
engine.name())
|
|
|
|
return engine
|
2016-11-08 05:31:39 +03:00
|
|
|
|
2016-12-24 23:51:12 +03:00
|
|
|
def supportedwireengines(self, role, onlyavailable=True):
|
|
|
|
"""Obtain compression engines that support the wire protocol.
|
|
|
|
|
|
|
|
Returns a list of engines in prioritized order, most desired first.
|
|
|
|
|
|
|
|
If ``onlyavailable`` is set, filter out engines that can't be
|
|
|
|
loaded.
|
|
|
|
"""
|
|
|
|
assert role in (SERVERROLE, CLIENTROLE)
|
|
|
|
|
|
|
|
attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
|
|
|
|
|
|
|
|
engines = [self._engines[e] for e in self._wiretypes.values()]
|
|
|
|
if onlyavailable:
|
|
|
|
engines = [e for e in engines if e.available()]
|
|
|
|
|
|
|
|
def getkey(e):
|
|
|
|
# Sort first by priority, highest first. In case of tie, sort
|
|
|
|
# alphabetically. This is arbitrary, but ensures output is
|
|
|
|
# stable.
|
|
|
|
w = e.wireprotosupport()
|
|
|
|
return -1 * getattr(w, attr), w.name
|
|
|
|
|
|
|
|
return list(sorted(engines, key=getkey))
|
|
|
|
|
|
|
|
def forwiretype(self, wiretype):
|
|
|
|
engine = self._engines[self._wiretypes[wiretype]]
|
|
|
|
if not engine.available():
|
|
|
|
raise error.Abort(_('compression engine %s could not be loaded') %
|
|
|
|
engine.name())
|
|
|
|
return engine
|
|
|
|
|
2017-01-03 00:27:20 +03:00
|
|
|
def forrevlogheader(self, header):
|
|
|
|
"""Obtain a compression engine registered to a revlog header.
|
|
|
|
|
|
|
|
Will raise KeyError if the revlog header value isn't registered.
|
|
|
|
"""
|
|
|
|
return self._engines[self._revlogheaders[header]]
|
|
|
|
|
2016-11-08 05:31:39 +03:00
|
|
|
compengines = compressormanager()
|
|
|
|
|
|
|
|
class compressionengine(object):
|
|
|
|
"""Base class for compression engines.
|
|
|
|
|
|
|
|
Compression engines must implement the interface defined by this class.
|
|
|
|
"""
|
|
|
|
def name(self):
|
|
|
|
"""Returns the name of the compression engine.
|
|
|
|
|
|
|
|
This is the key the engine is registered under.
|
|
|
|
|
|
|
|
This method must be implemented.
|
|
|
|
"""
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
2016-11-11 10:03:48 +03:00
|
|
|
def available(self):
|
|
|
|
"""Whether the compression engine is available.
|
|
|
|
|
|
|
|
The intent of this method is to allow optional compression engines
|
|
|
|
that may not be available in all installations (such as engines relying
|
|
|
|
on C extensions that may not be present).
|
|
|
|
"""
|
|
|
|
return True
|
|
|
|
|
2016-11-08 05:31:39 +03:00
|
|
|
def bundletype(self):
|
|
|
|
"""Describes bundle identifiers for this engine.
|
|
|
|
|
|
|
|
If this compression engine isn't supported for bundles, returns None.
|
|
|
|
|
|
|
|
If this engine can be used for bundles, returns a 2-tuple of strings of
|
|
|
|
the user-facing "bundle spec" compression name and an internal
|
|
|
|
identifier used to denote the compression format within bundles. To
|
|
|
|
exclude the name from external usage, set the first element to ``None``.
|
|
|
|
|
|
|
|
If bundle compression is supported, the class must also implement
|
2016-11-08 05:57:54 +03:00
|
|
|
``compressstream`` and `decompressorreader``.
|
2017-04-01 23:29:01 +03:00
|
|
|
|
|
|
|
The docstring of this method is used in the help system to tell users
|
|
|
|
about this engine.
|
2016-11-08 05:31:39 +03:00
|
|
|
"""
|
|
|
|
return None
|
|
|
|
|
2016-12-24 23:51:12 +03:00
|
|
|
def wireprotosupport(self):
|
|
|
|
"""Declare support for this compression format on the wire protocol.
|
|
|
|
|
|
|
|
If this compression engine isn't supported for compressing wire
|
|
|
|
protocol payloads, returns None.
|
|
|
|
|
|
|
|
Otherwise, returns ``compenginewireprotosupport`` with the following
|
|
|
|
fields:
|
|
|
|
|
|
|
|
* String format identifier
|
|
|
|
* Integer priority for the server
|
|
|
|
* Integer priority for the client
|
|
|
|
|
|
|
|
The integer priorities are used to order the advertisement of format
|
|
|
|
support by server and client. The highest integer is advertised
|
|
|
|
first. Integers with non-positive values aren't advertised.
|
|
|
|
|
|
|
|
The priority values are somewhat arbitrary and only used for default
|
|
|
|
ordering. The relative order can be changed via config options.
|
|
|
|
|
|
|
|
If wire protocol compression is supported, the class must also implement
|
|
|
|
``compressstream`` and ``decompressorreader``.
|
|
|
|
"""
|
|
|
|
return None
|
|
|
|
|
2017-01-03 00:27:20 +03:00
|
|
|
def revlogheader(self):
|
|
|
|
"""Header added to revlog chunks that identifies this engine.
|
|
|
|
|
|
|
|
If this engine can be used to compress revlogs, this method should
|
|
|
|
return the bytes used to identify chunks compressed with this engine.
|
|
|
|
Else, the method should return ``None`` to indicate it does not
|
|
|
|
participate in revlog compression.
|
|
|
|
"""
|
|
|
|
return None
|
|
|
|
|
2016-11-08 05:57:07 +03:00
|
|
|
def compressstream(self, it, opts=None):
|
|
|
|
"""Compress an iterator of chunks.
|
|
|
|
|
|
|
|
The method receives an iterator (ideally a generator) of chunks of
|
|
|
|
bytes to be compressed. It returns an iterator (ideally a generator)
|
|
|
|
of bytes of chunks representing the compressed output.
|
|
|
|
|
|
|
|
Optionally accepts an argument defining how to perform compression.
|
|
|
|
Each engine treats this argument differently.
|
|
|
|
"""
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
2016-11-08 05:31:39 +03:00
|
|
|
def decompressorreader(self, fh):
|
|
|
|
"""Perform decompression on a file object.
|
|
|
|
|
|
|
|
Argument is an object with a ``read(size)`` method that returns
|
|
|
|
compressed data. Return value is an object with a ``read(size)`` that
|
|
|
|
returns uncompressed data.
|
|
|
|
"""
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
util: compression APIs to support revlog compression
As part of "zstd all of the things," we need to teach revlogs to
use non-zlib compression formats. Because we're routing all compression
via the "compression manager" and "compression engine" APIs, we need to
introduction functionality there for performing revlog operations.
Ideally, revlog compression and decompression operations would be
implemented in terms of simple "compress" and "decompress" primitives.
However, there are a few considerations that make us want to have a
specialized primitive for handling revlogs:
1) Performance. Revlogs tend to do compression and especially
decompression operations in batches. Any overhead for e.g.
instantiating a "context" for performing an operation can be
noticed. For this reason, our "revlog compressor" primitive is
reusable. For zstd, we reuse the same compression "context" for
multiple operations. I've measured this to have a performance
impact versus constructing new contexts for each operation.
2) Specialization. By having a primitive dedicated to revlog use,
we can make revlog-specific choices and leave the door open for
more functionality in the future. For example, the zstd revlog
compressor may one day make use of dictionary compression.
A future patch will introduce a decompress() on the compressor
object.
The code for the zlib compressor is basically copied from
revlog.compress(). Although it doesn't handle the empty input
case, the null first byte case, and the 'u' prefix case. These
cases will continue to be handled in revlog.py once that code is
ported to use this API.
2017-01-02 23:39:03 +03:00
|
|
|
def revlogcompressor(self, opts=None):
|
|
|
|
"""Obtain an object that can be used to compress revlog entries.
|
|
|
|
|
|
|
|
The object has a ``compress(data)`` method that compresses binary
|
|
|
|
data. This method returns compressed binary data or ``None`` if
|
|
|
|
the data could not be compressed (too small, not compressible, etc).
|
|
|
|
The returned data should have a header uniquely identifying this
|
|
|
|
compression format so decompression can be routed to this engine.
|
2017-01-03 00:27:20 +03:00
|
|
|
This header should be identified by the ``revlogheader()`` return
|
|
|
|
value.
|
|
|
|
|
|
|
|
The object has a ``decompress(data)`` method that decompresses
|
|
|
|
data. The method will only be called if ``data`` begins with
|
|
|
|
``revlogheader()``. The method should return the raw, uncompressed
|
|
|
|
data or raise a ``RevlogError``.
|
util: compression APIs to support revlog compression
As part of "zstd all of the things," we need to teach revlogs to
use non-zlib compression formats. Because we're routing all compression
via the "compression manager" and "compression engine" APIs, we need to
introduction functionality there for performing revlog operations.
Ideally, revlog compression and decompression operations would be
implemented in terms of simple "compress" and "decompress" primitives.
However, there are a few considerations that make us want to have a
specialized primitive for handling revlogs:
1) Performance. Revlogs tend to do compression and especially
decompression operations in batches. Any overhead for e.g.
instantiating a "context" for performing an operation can be
noticed. For this reason, our "revlog compressor" primitive is
reusable. For zstd, we reuse the same compression "context" for
multiple operations. I've measured this to have a performance
impact versus constructing new contexts for each operation.
2) Specialization. By having a primitive dedicated to revlog use,
we can make revlog-specific choices and leave the door open for
more functionality in the future. For example, the zstd revlog
compressor may one day make use of dictionary compression.
A future patch will introduce a decompress() on the compressor
object.
The code for the zlib compressor is basically copied from
revlog.compress(). Although it doesn't handle the empty input
case, the null first byte case, and the 'u' prefix case. These
cases will continue to be handled in revlog.py once that code is
ported to use this API.
2017-01-02 23:39:03 +03:00
|
|
|
|
|
|
|
The object is reusable but is not thread safe.
|
|
|
|
"""
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
2016-11-08 05:31:39 +03:00
|
|
|
class _zlibengine(compressionengine):
|
|
|
|
def name(self):
|
|
|
|
return 'zlib'
|
|
|
|
|
|
|
|
def bundletype(self):
|
2017-04-01 23:29:01 +03:00
|
|
|
"""zlib compression using the DEFLATE algorithm.
|
|
|
|
|
|
|
|
All Mercurial clients should support this format. The compression
|
|
|
|
algorithm strikes a reasonable balance between compression ratio
|
|
|
|
and size.
|
|
|
|
"""
|
2016-11-08 05:31:39 +03:00
|
|
|
return 'gzip', 'GZ'
|
|
|
|
|
2016-12-24 23:51:12 +03:00
|
|
|
def wireprotosupport(self):
|
|
|
|
return compewireprotosupport('zlib', 20, 20)
|
|
|
|
|
2017-01-03 00:27:20 +03:00
|
|
|
def revlogheader(self):
|
|
|
|
return 'x'
|
|
|
|
|
2016-11-08 05:57:07 +03:00
|
|
|
def compressstream(self, it, opts=None):
|
|
|
|
opts = opts or {}
|
|
|
|
|
|
|
|
z = zlib.compressobj(opts.get('level', -1))
|
|
|
|
for chunk in it:
|
|
|
|
data = z.compress(chunk)
|
|
|
|
# Not all calls to compress emit data. It is cheaper to inspect
|
|
|
|
# here than to feed empty chunks through generator.
|
|
|
|
if data:
|
|
|
|
yield data
|
|
|
|
|
|
|
|
yield z.flush()
|
|
|
|
|
2016-11-08 05:31:39 +03:00
|
|
|
def decompressorreader(self, fh):
|
|
|
|
def gen():
|
|
|
|
d = zlib.decompressobj()
|
|
|
|
for chunk in filechunkiter(fh):
|
2016-11-26 20:07:11 +03:00
|
|
|
while chunk:
|
|
|
|
# Limit output size to limit memory.
|
|
|
|
yield d.decompress(chunk, 2 ** 18)
|
|
|
|
chunk = d.unconsumed_tail
|
2016-11-08 05:31:39 +03:00
|
|
|
|
|
|
|
return chunkbuffer(gen())
|
|
|
|
|
util: compression APIs to support revlog compression
As part of "zstd all of the things," we need to teach revlogs to
use non-zlib compression formats. Because we're routing all compression
via the "compression manager" and "compression engine" APIs, we need to
introduction functionality there for performing revlog operations.
Ideally, revlog compression and decompression operations would be
implemented in terms of simple "compress" and "decompress" primitives.
However, there are a few considerations that make us want to have a
specialized primitive for handling revlogs:
1) Performance. Revlogs tend to do compression and especially
decompression operations in batches. Any overhead for e.g.
instantiating a "context" for performing an operation can be
noticed. For this reason, our "revlog compressor" primitive is
reusable. For zstd, we reuse the same compression "context" for
multiple operations. I've measured this to have a performance
impact versus constructing new contexts for each operation.
2) Specialization. By having a primitive dedicated to revlog use,
we can make revlog-specific choices and leave the door open for
more functionality in the future. For example, the zstd revlog
compressor may one day make use of dictionary compression.
A future patch will introduce a decompress() on the compressor
object.
The code for the zlib compressor is basically copied from
revlog.compress(). Although it doesn't handle the empty input
case, the null first byte case, and the 'u' prefix case. These
cases will continue to be handled in revlog.py once that code is
ported to use this API.
2017-01-02 23:39:03 +03:00
|
|
|
class zlibrevlogcompressor(object):
|
|
|
|
def compress(self, data):
|
|
|
|
insize = len(data)
|
|
|
|
# Caller handles empty input case.
|
|
|
|
assert insize > 0
|
|
|
|
|
|
|
|
if insize < 44:
|
|
|
|
return None
|
|
|
|
|
|
|
|
elif insize <= 1000000:
|
|
|
|
compressed = zlib.compress(data)
|
|
|
|
if len(compressed) < insize:
|
|
|
|
return compressed
|
|
|
|
return None
|
|
|
|
|
|
|
|
# zlib makes an internal copy of the input buffer, doubling
|
|
|
|
# memory usage for large inputs. So do streaming compression
|
|
|
|
# on large inputs.
|
|
|
|
else:
|
|
|
|
z = zlib.compressobj()
|
|
|
|
parts = []
|
|
|
|
pos = 0
|
|
|
|
while pos < insize:
|
|
|
|
pos2 = pos + 2**20
|
|
|
|
parts.append(z.compress(data[pos:pos2]))
|
|
|
|
pos = pos2
|
|
|
|
parts.append(z.flush())
|
|
|
|
|
|
|
|
if sum(map(len, parts)) < insize:
|
|
|
|
return ''.join(parts)
|
|
|
|
return None
|
|
|
|
|
2017-01-03 00:27:20 +03:00
|
|
|
def decompress(self, data):
|
|
|
|
try:
|
|
|
|
return zlib.decompress(data)
|
|
|
|
except zlib.error as e:
|
|
|
|
raise error.RevlogError(_('revlog decompress error: %s') %
|
|
|
|
str(e))
|
|
|
|
|
util: compression APIs to support revlog compression
As part of "zstd all of the things," we need to teach revlogs to
use non-zlib compression formats. Because we're routing all compression
via the "compression manager" and "compression engine" APIs, we need to
introduction functionality there for performing revlog operations.
Ideally, revlog compression and decompression operations would be
implemented in terms of simple "compress" and "decompress" primitives.
However, there are a few considerations that make us want to have a
specialized primitive for handling revlogs:
1) Performance. Revlogs tend to do compression and especially
decompression operations in batches. Any overhead for e.g.
instantiating a "context" for performing an operation can be
noticed. For this reason, our "revlog compressor" primitive is
reusable. For zstd, we reuse the same compression "context" for
multiple operations. I've measured this to have a performance
impact versus constructing new contexts for each operation.
2) Specialization. By having a primitive dedicated to revlog use,
we can make revlog-specific choices and leave the door open for
more functionality in the future. For example, the zstd revlog
compressor may one day make use of dictionary compression.
A future patch will introduce a decompress() on the compressor
object.
The code for the zlib compressor is basically copied from
revlog.compress(). Although it doesn't handle the empty input
case, the null first byte case, and the 'u' prefix case. These
cases will continue to be handled in revlog.py once that code is
ported to use this API.
2017-01-02 23:39:03 +03:00
|
|
|
def revlogcompressor(self, opts=None):
|
|
|
|
return self.zlibrevlogcompressor()
|
|
|
|
|
2016-11-08 05:31:39 +03:00
|
|
|
compengines.register(_zlibengine())
|
|
|
|
|
|
|
|
class _bz2engine(compressionengine):
|
|
|
|
def name(self):
|
|
|
|
return 'bz2'
|
|
|
|
|
|
|
|
def bundletype(self):
|
2017-04-01 23:29:01 +03:00
|
|
|
"""An algorithm that produces smaller bundles than ``gzip``.
|
|
|
|
|
|
|
|
All Mercurial clients should support this format.
|
|
|
|
|
|
|
|
This engine will likely produce smaller bundles than ``gzip`` but
|
|
|
|
will be significantly slower, both during compression and
|
|
|
|
decompression.
|
|
|
|
|
|
|
|
If available, the ``zstd`` engine can yield similar or better
|
|
|
|
compression at much higher speeds.
|
|
|
|
"""
|
2016-11-08 05:31:39 +03:00
|
|
|
return 'bzip2', 'BZ'
|
|
|
|
|
2016-12-24 23:51:12 +03:00
|
|
|
# We declare a protocol name but don't advertise by default because
|
|
|
|
# it is slow.
|
|
|
|
def wireprotosupport(self):
|
|
|
|
return compewireprotosupport('bzip2', 0, 0)
|
|
|
|
|
2016-11-08 05:57:07 +03:00
|
|
|
def compressstream(self, it, opts=None):
|
|
|
|
opts = opts or {}
|
|
|
|
z = bz2.BZ2Compressor(opts.get('level', 9))
|
|
|
|
for chunk in it:
|
|
|
|
data = z.compress(chunk)
|
|
|
|
if data:
|
|
|
|
yield data
|
|
|
|
|
|
|
|
yield z.flush()
|
|
|
|
|
2016-11-08 05:31:39 +03:00
|
|
|
def decompressorreader(self, fh):
|
|
|
|
def gen():
|
|
|
|
d = bz2.BZ2Decompressor()
|
|
|
|
for chunk in filechunkiter(fh):
|
|
|
|
yield d.decompress(chunk)
|
|
|
|
|
|
|
|
return chunkbuffer(gen())
|
|
|
|
|
|
|
|
compengines.register(_bz2engine())
|
|
|
|
|
|
|
|
class _truncatedbz2engine(compressionengine):
|
|
|
|
def name(self):
|
|
|
|
return 'bz2truncated'
|
|
|
|
|
|
|
|
def bundletype(self):
|
|
|
|
return None, '_truncatedBZ'
|
|
|
|
|
2016-11-08 05:57:54 +03:00
|
|
|
# We don't implement compressstream because it is hackily handled elsewhere.
|
2016-11-08 05:31:39 +03:00
|
|
|
|
|
|
|
def decompressorreader(self, fh):
|
|
|
|
def gen():
|
|
|
|
# The input stream doesn't have the 'BZ' header. So add it back.
|
|
|
|
d = bz2.BZ2Decompressor()
|
|
|
|
d.decompress('BZ')
|
|
|
|
for chunk in filechunkiter(fh):
|
|
|
|
yield d.decompress(chunk)
|
|
|
|
|
|
|
|
return chunkbuffer(gen())
|
|
|
|
|
|
|
|
compengines.register(_truncatedbz2engine())
|
2016-10-16 03:24:01 +03:00
|
|
|
|
2016-11-08 05:31:39 +03:00
|
|
|
class _noopengine(compressionengine):
|
|
|
|
def name(self):
|
|
|
|
return 'none'
|
|
|
|
|
|
|
|
def bundletype(self):
|
2017-04-01 23:29:01 +03:00
|
|
|
"""No compression is performed.
|
|
|
|
|
|
|
|
Use this compression engine to explicitly disable compression.
|
|
|
|
"""
|
2016-11-08 05:31:39 +03:00
|
|
|
return 'none', 'UN'
|
|
|
|
|
2016-12-24 23:51:12 +03:00
|
|
|
# Clients always support uncompressed payloads. Servers don't because
|
|
|
|
# unless you are on a fast network, uncompressed payloads can easily
|
|
|
|
# saturate your network pipe.
|
|
|
|
def wireprotosupport(self):
|
|
|
|
return compewireprotosupport('none', 0, 10)
|
|
|
|
|
2017-01-03 00:27:20 +03:00
|
|
|
# We don't implement revlogheader because it is handled specially
|
|
|
|
# in the revlog class.
|
|
|
|
|
2016-11-08 05:57:07 +03:00
|
|
|
def compressstream(self, it, opts=None):
|
|
|
|
return it
|
|
|
|
|
2016-11-08 05:31:39 +03:00
|
|
|
def decompressorreader(self, fh):
|
|
|
|
return fh
|
|
|
|
|
util: compression APIs to support revlog compression
As part of "zstd all of the things," we need to teach revlogs to
use non-zlib compression formats. Because we're routing all compression
via the "compression manager" and "compression engine" APIs, we need to
introduction functionality there for performing revlog operations.
Ideally, revlog compression and decompression operations would be
implemented in terms of simple "compress" and "decompress" primitives.
However, there are a few considerations that make us want to have a
specialized primitive for handling revlogs:
1) Performance. Revlogs tend to do compression and especially
decompression operations in batches. Any overhead for e.g.
instantiating a "context" for performing an operation can be
noticed. For this reason, our "revlog compressor" primitive is
reusable. For zstd, we reuse the same compression "context" for
multiple operations. I've measured this to have a performance
impact versus constructing new contexts for each operation.
2) Specialization. By having a primitive dedicated to revlog use,
we can make revlog-specific choices and leave the door open for
more functionality in the future. For example, the zstd revlog
compressor may one day make use of dictionary compression.
A future patch will introduce a decompress() on the compressor
object.
The code for the zlib compressor is basically copied from
revlog.compress(). Although it doesn't handle the empty input
case, the null first byte case, and the 'u' prefix case. These
cases will continue to be handled in revlog.py once that code is
ported to use this API.
2017-01-02 23:39:03 +03:00
|
|
|
class nooprevlogcompressor(object):
|
|
|
|
def compress(self, data):
|
|
|
|
return None
|
|
|
|
|
|
|
|
def revlogcompressor(self, opts=None):
|
|
|
|
return self.nooprevlogcompressor()
|
|
|
|
|
2016-11-08 05:31:39 +03:00
|
|
|
compengines.register(_noopengine())
|
2016-10-16 03:24:01 +03:00
|
|
|
|
util: implement zstd compression engine
Now that zstd is vendored and being built (in some configurations), we
can implement a compression engine for zstd!
The zstd engine is a little different from existing engines. Because
it may not always be present, we have to defer load the module in case
importing it fails. We facilitate this via a cached property that holds
a reference to the module or None. The "available" method is
implemented to reflect reality.
The zstd engine declares its ability to handle bundles using the
"zstd" human name and the "ZS" internal name. The latter was chosen
because internal names are 2 characters (by only convention I think)
and "ZS" seems reasonable.
The engine, like others, supports specifying the compression level.
However, there are no consumers of this API that yet pass in that
argument. I have plans to change that, so stay tuned.
Since all we need to do to support bundle generation with a new
compression engine is implement and register the compression engine,
bundle generation with zstd "just works!" Tests demonstrating this
have been added.
How does performance of zstd for bundle generation compare? On the
mozilla-unified repo, `hg bundle --all -t <engine>-v2` yields the
following on my i7-6700K on Linux:
engine CPU time bundle size vs orig size throughput
none 97.0s 4,054,405,584 100.0% 41.8 MB/s
bzip2 (l=9) 393.6s 975,343,098 24.0% 10.3 MB/s
gzip (l=6) 184.0s 1,140,533,074 28.1% 22.0 MB/s
zstd (l=1) 108.2s 1,119,434,718 27.6% 37.5 MB/s
zstd (l=2) 111.3s 1,078,328,002 26.6% 36.4 MB/s
zstd (l=3) 113.7s 1,011,823,727 25.0% 35.7 MB/s
zstd (l=4) 116.0s 1,008,965,888 24.9% 35.0 MB/s
zstd (l=5) 121.0s 977,203,148 24.1% 33.5 MB/s
zstd (l=6) 131.7s 927,360,198 22.9% 30.8 MB/s
zstd (l=7) 139.0s 912,808,505 22.5% 29.2 MB/s
zstd (l=12) 198.1s 854,527,714 21.1% 20.5 MB/s
zstd (l=18) 681.6s 789,750,690 19.5% 5.9 MB/s
On compression, zstd for bundle generation delivers:
* better compression than gzip with significantly less CPU utilization
* better than bzip2 compression ratios while still being significantly
faster than gzip
* ability to aggressively tune compression level to achieve
significantly smaller bundles
That last point is important. With clone bundles, a server can
pre-generate a bundle file, upload it to a static file server, and
redirect clients to transparently download it during clone. The server
could choose to produce a zstd bundle with the highest compression
settings possible. This would take a very long time - a magnitude
longer than a typical zstd bundle generation - but the result would
be hundreds of megabytes smaller! For the clone volume we do at
Mozilla, this could translate to petabytes of bandwidth savings
per year and faster clones (due to smaller transfer size).
I don't have detailed numbers to report on decompression. However,
zstd decompression is fast: >1 GB/s output throughput on this machine,
even through the Python bindings. And it can do that regardless of the
compression level of the input. By the time you have enough data to
worry about overhead of decompression, you have plenty of other things
to worry about performance wise.
zstd is wins all around. I can't wait to implement support for it
on the wire protocol and in revlogs.
2016-11-11 12:10:07 +03:00
|
|
|
class _zstdengine(compressionengine):
|
|
|
|
def name(self):
|
|
|
|
return 'zstd'
|
|
|
|
|
|
|
|
@propertycache
|
|
|
|
def _module(self):
|
|
|
|
# Not all installs have the zstd module available. So defer importing
|
|
|
|
# until first access.
|
|
|
|
try:
|
|
|
|
from . import zstd
|
|
|
|
# Force delayed import.
|
|
|
|
zstd.__version__
|
|
|
|
return zstd
|
|
|
|
except ImportError:
|
|
|
|
return None
|
|
|
|
|
|
|
|
def available(self):
|
|
|
|
return bool(self._module)
|
|
|
|
|
|
|
|
def bundletype(self):
|
2017-04-01 23:29:01 +03:00
|
|
|
"""A modern compression algorithm that is fast and highly flexible.
|
|
|
|
|
|
|
|
Only supported by Mercurial 4.1 and newer clients.
|
|
|
|
|
|
|
|
With the default settings, zstd compression is both faster and yields
|
|
|
|
better compression than ``gzip``. It also frequently yields better
|
|
|
|
compression than ``bzip2`` while operating at much higher speeds.
|
|
|
|
|
|
|
|
If this engine is available and backwards compatibility is not a
|
|
|
|
concern, it is likely the best available engine.
|
|
|
|
"""
|
util: implement zstd compression engine
Now that zstd is vendored and being built (in some configurations), we
can implement a compression engine for zstd!
The zstd engine is a little different from existing engines. Because
it may not always be present, we have to defer load the module in case
importing it fails. We facilitate this via a cached property that holds
a reference to the module or None. The "available" method is
implemented to reflect reality.
The zstd engine declares its ability to handle bundles using the
"zstd" human name and the "ZS" internal name. The latter was chosen
because internal names are 2 characters (by only convention I think)
and "ZS" seems reasonable.
The engine, like others, supports specifying the compression level.
However, there are no consumers of this API that yet pass in that
argument. I have plans to change that, so stay tuned.
Since all we need to do to support bundle generation with a new
compression engine is implement and register the compression engine,
bundle generation with zstd "just works!" Tests demonstrating this
have been added.
How does performance of zstd for bundle generation compare? On the
mozilla-unified repo, `hg bundle --all -t <engine>-v2` yields the
following on my i7-6700K on Linux:
engine CPU time bundle size vs orig size throughput
none 97.0s 4,054,405,584 100.0% 41.8 MB/s
bzip2 (l=9) 393.6s 975,343,098 24.0% 10.3 MB/s
gzip (l=6) 184.0s 1,140,533,074 28.1% 22.0 MB/s
zstd (l=1) 108.2s 1,119,434,718 27.6% 37.5 MB/s
zstd (l=2) 111.3s 1,078,328,002 26.6% 36.4 MB/s
zstd (l=3) 113.7s 1,011,823,727 25.0% 35.7 MB/s
zstd (l=4) 116.0s 1,008,965,888 24.9% 35.0 MB/s
zstd (l=5) 121.0s 977,203,148 24.1% 33.5 MB/s
zstd (l=6) 131.7s 927,360,198 22.9% 30.8 MB/s
zstd (l=7) 139.0s 912,808,505 22.5% 29.2 MB/s
zstd (l=12) 198.1s 854,527,714 21.1% 20.5 MB/s
zstd (l=18) 681.6s 789,750,690 19.5% 5.9 MB/s
On compression, zstd for bundle generation delivers:
* better compression than gzip with significantly less CPU utilization
* better than bzip2 compression ratios while still being significantly
faster than gzip
* ability to aggressively tune compression level to achieve
significantly smaller bundles
That last point is important. With clone bundles, a server can
pre-generate a bundle file, upload it to a static file server, and
redirect clients to transparently download it during clone. The server
could choose to produce a zstd bundle with the highest compression
settings possible. This would take a very long time - a magnitude
longer than a typical zstd bundle generation - but the result would
be hundreds of megabytes smaller! For the clone volume we do at
Mozilla, this could translate to petabytes of bandwidth savings
per year and faster clones (due to smaller transfer size).
I don't have detailed numbers to report on decompression. However,
zstd decompression is fast: >1 GB/s output throughput on this machine,
even through the Python bindings. And it can do that regardless of the
compression level of the input. By the time you have enough data to
worry about overhead of decompression, you have plenty of other things
to worry about performance wise.
zstd is wins all around. I can't wait to implement support for it
on the wire protocol and in revlogs.
2016-11-11 12:10:07 +03:00
|
|
|
return 'zstd', 'ZS'
|
|
|
|
|
2016-12-24 23:51:12 +03:00
|
|
|
def wireprotosupport(self):
|
|
|
|
return compewireprotosupport('zstd', 50, 50)
|
|
|
|
|
2017-01-03 00:27:20 +03:00
|
|
|
def revlogheader(self):
|
|
|
|
return '\x28'
|
|
|
|
|
util: implement zstd compression engine
Now that zstd is vendored and being built (in some configurations), we
can implement a compression engine for zstd!
The zstd engine is a little different from existing engines. Because
it may not always be present, we have to defer load the module in case
importing it fails. We facilitate this via a cached property that holds
a reference to the module or None. The "available" method is
implemented to reflect reality.
The zstd engine declares its ability to handle bundles using the
"zstd" human name and the "ZS" internal name. The latter was chosen
because internal names are 2 characters (by only convention I think)
and "ZS" seems reasonable.
The engine, like others, supports specifying the compression level.
However, there are no consumers of this API that yet pass in that
argument. I have plans to change that, so stay tuned.
Since all we need to do to support bundle generation with a new
compression engine is implement and register the compression engine,
bundle generation with zstd "just works!" Tests demonstrating this
have been added.
How does performance of zstd for bundle generation compare? On the
mozilla-unified repo, `hg bundle --all -t <engine>-v2` yields the
following on my i7-6700K on Linux:
engine CPU time bundle size vs orig size throughput
none 97.0s 4,054,405,584 100.0% 41.8 MB/s
bzip2 (l=9) 393.6s 975,343,098 24.0% 10.3 MB/s
gzip (l=6) 184.0s 1,140,533,074 28.1% 22.0 MB/s
zstd (l=1) 108.2s 1,119,434,718 27.6% 37.5 MB/s
zstd (l=2) 111.3s 1,078,328,002 26.6% 36.4 MB/s
zstd (l=3) 113.7s 1,011,823,727 25.0% 35.7 MB/s
zstd (l=4) 116.0s 1,008,965,888 24.9% 35.0 MB/s
zstd (l=5) 121.0s 977,203,148 24.1% 33.5 MB/s
zstd (l=6) 131.7s 927,360,198 22.9% 30.8 MB/s
zstd (l=7) 139.0s 912,808,505 22.5% 29.2 MB/s
zstd (l=12) 198.1s 854,527,714 21.1% 20.5 MB/s
zstd (l=18) 681.6s 789,750,690 19.5% 5.9 MB/s
On compression, zstd for bundle generation delivers:
* better compression than gzip with significantly less CPU utilization
* better than bzip2 compression ratios while still being significantly
faster than gzip
* ability to aggressively tune compression level to achieve
significantly smaller bundles
That last point is important. With clone bundles, a server can
pre-generate a bundle file, upload it to a static file server, and
redirect clients to transparently download it during clone. The server
could choose to produce a zstd bundle with the highest compression
settings possible. This would take a very long time - a magnitude
longer than a typical zstd bundle generation - but the result would
be hundreds of megabytes smaller! For the clone volume we do at
Mozilla, this could translate to petabytes of bandwidth savings
per year and faster clones (due to smaller transfer size).
I don't have detailed numbers to report on decompression. However,
zstd decompression is fast: >1 GB/s output throughput on this machine,
even through the Python bindings. And it can do that regardless of the
compression level of the input. By the time you have enough data to
worry about overhead of decompression, you have plenty of other things
to worry about performance wise.
zstd is wins all around. I can't wait to implement support for it
on the wire protocol and in revlogs.
2016-11-11 12:10:07 +03:00
|
|
|
def compressstream(self, it, opts=None):
|
|
|
|
opts = opts or {}
|
|
|
|
# zstd level 3 is almost always significantly faster than zlib
|
|
|
|
# while providing no worse compression. It strikes a good balance
|
|
|
|
# between speed and compression.
|
|
|
|
level = opts.get('level', 3)
|
|
|
|
|
|
|
|
zstd = self._module
|
|
|
|
z = zstd.ZstdCompressor(level=level).compressobj()
|
|
|
|
for chunk in it:
|
|
|
|
data = z.compress(chunk)
|
|
|
|
if data:
|
|
|
|
yield data
|
|
|
|
|
|
|
|
yield z.flush()
|
|
|
|
|
|
|
|
def decompressorreader(self, fh):
|
|
|
|
zstd = self._module
|
|
|
|
dctx = zstd.ZstdDecompressor()
|
|
|
|
return chunkbuffer(dctx.read_from(fh))
|
|
|
|
|
util: compression APIs to support revlog compression
As part of "zstd all of the things," we need to teach revlogs to
use non-zlib compression formats. Because we're routing all compression
via the "compression manager" and "compression engine" APIs, we need to
introduction functionality there for performing revlog operations.
Ideally, revlog compression and decompression operations would be
implemented in terms of simple "compress" and "decompress" primitives.
However, there are a few considerations that make us want to have a
specialized primitive for handling revlogs:
1) Performance. Revlogs tend to do compression and especially
decompression operations in batches. Any overhead for e.g.
instantiating a "context" for performing an operation can be
noticed. For this reason, our "revlog compressor" primitive is
reusable. For zstd, we reuse the same compression "context" for
multiple operations. I've measured this to have a performance
impact versus constructing new contexts for each operation.
2) Specialization. By having a primitive dedicated to revlog use,
we can make revlog-specific choices and leave the door open for
more functionality in the future. For example, the zstd revlog
compressor may one day make use of dictionary compression.
A future patch will introduce a decompress() on the compressor
object.
The code for the zlib compressor is basically copied from
revlog.compress(). Although it doesn't handle the empty input
case, the null first byte case, and the 'u' prefix case. These
cases will continue to be handled in revlog.py once that code is
ported to use this API.
2017-01-02 23:39:03 +03:00
|
|
|
class zstdrevlogcompressor(object):
|
|
|
|
def __init__(self, zstd, level=3):
|
|
|
|
# Writing the content size adds a few bytes to the output. However,
|
|
|
|
# it allows decompression to be more optimal since we can
|
|
|
|
# pre-allocate a buffer to hold the result.
|
|
|
|
self._cctx = zstd.ZstdCompressor(level=level,
|
|
|
|
write_content_size=True)
|
2017-01-03 00:27:20 +03:00
|
|
|
self._dctx = zstd.ZstdDecompressor()
|
util: compression APIs to support revlog compression
As part of "zstd all of the things," we need to teach revlogs to
use non-zlib compression formats. Because we're routing all compression
via the "compression manager" and "compression engine" APIs, we need to
introduction functionality there for performing revlog operations.
Ideally, revlog compression and decompression operations would be
implemented in terms of simple "compress" and "decompress" primitives.
However, there are a few considerations that make us want to have a
specialized primitive for handling revlogs:
1) Performance. Revlogs tend to do compression and especially
decompression operations in batches. Any overhead for e.g.
instantiating a "context" for performing an operation can be
noticed. For this reason, our "revlog compressor" primitive is
reusable. For zstd, we reuse the same compression "context" for
multiple operations. I've measured this to have a performance
impact versus constructing new contexts for each operation.
2) Specialization. By having a primitive dedicated to revlog use,
we can make revlog-specific choices and leave the door open for
more functionality in the future. For example, the zstd revlog
compressor may one day make use of dictionary compression.
A future patch will introduce a decompress() on the compressor
object.
The code for the zlib compressor is basically copied from
revlog.compress(). Although it doesn't handle the empty input
case, the null first byte case, and the 'u' prefix case. These
cases will continue to be handled in revlog.py once that code is
ported to use this API.
2017-01-02 23:39:03 +03:00
|
|
|
self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
|
2017-01-03 00:27:20 +03:00
|
|
|
self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
|
util: compression APIs to support revlog compression
As part of "zstd all of the things," we need to teach revlogs to
use non-zlib compression formats. Because we're routing all compression
via the "compression manager" and "compression engine" APIs, we need to
introduction functionality there for performing revlog operations.
Ideally, revlog compression and decompression operations would be
implemented in terms of simple "compress" and "decompress" primitives.
However, there are a few considerations that make us want to have a
specialized primitive for handling revlogs:
1) Performance. Revlogs tend to do compression and especially
decompression operations in batches. Any overhead for e.g.
instantiating a "context" for performing an operation can be
noticed. For this reason, our "revlog compressor" primitive is
reusable. For zstd, we reuse the same compression "context" for
multiple operations. I've measured this to have a performance
impact versus constructing new contexts for each operation.
2) Specialization. By having a primitive dedicated to revlog use,
we can make revlog-specific choices and leave the door open for
more functionality in the future. For example, the zstd revlog
compressor may one day make use of dictionary compression.
A future patch will introduce a decompress() on the compressor
object.
The code for the zlib compressor is basically copied from
revlog.compress(). Although it doesn't handle the empty input
case, the null first byte case, and the 'u' prefix case. These
cases will continue to be handled in revlog.py once that code is
ported to use this API.
2017-01-02 23:39:03 +03:00
|
|
|
|
|
|
|
def compress(self, data):
|
|
|
|
insize = len(data)
|
|
|
|
# Caller handles empty input case.
|
|
|
|
assert insize > 0
|
|
|
|
|
|
|
|
if insize < 50:
|
|
|
|
return None
|
|
|
|
|
|
|
|
elif insize <= 1000000:
|
|
|
|
compressed = self._cctx.compress(data)
|
|
|
|
if len(compressed) < insize:
|
|
|
|
return compressed
|
|
|
|
return None
|
|
|
|
else:
|
|
|
|
z = self._cctx.compressobj()
|
|
|
|
chunks = []
|
|
|
|
pos = 0
|
|
|
|
while pos < insize:
|
|
|
|
pos2 = pos + self._compinsize
|
|
|
|
chunk = z.compress(data[pos:pos2])
|
|
|
|
if chunk:
|
|
|
|
chunks.append(chunk)
|
|
|
|
pos = pos2
|
|
|
|
chunks.append(z.flush())
|
|
|
|
|
|
|
|
if sum(map(len, chunks)) < insize:
|
|
|
|
return ''.join(chunks)
|
|
|
|
return None
|
|
|
|
|
2017-01-03 00:27:20 +03:00
|
|
|
def decompress(self, data):
|
|
|
|
insize = len(data)
|
|
|
|
|
|
|
|
try:
|
|
|
|
# This was measured to be faster than other streaming
|
|
|
|
# decompressors.
|
|
|
|
dobj = self._dctx.decompressobj()
|
|
|
|
chunks = []
|
|
|
|
pos = 0
|
|
|
|
while pos < insize:
|
|
|
|
pos2 = pos + self._decompinsize
|
|
|
|
chunk = dobj.decompress(data[pos:pos2])
|
|
|
|
if chunk:
|
|
|
|
chunks.append(chunk)
|
|
|
|
pos = pos2
|
|
|
|
# Frame should be exhausted, so no finish() API.
|
|
|
|
|
|
|
|
return ''.join(chunks)
|
|
|
|
except Exception as e:
|
|
|
|
raise error.RevlogError(_('revlog decompress error: %s') %
|
|
|
|
str(e))
|
|
|
|
|
util: compression APIs to support revlog compression
As part of "zstd all of the things," we need to teach revlogs to
use non-zlib compression formats. Because we're routing all compression
via the "compression manager" and "compression engine" APIs, we need to
introduction functionality there for performing revlog operations.
Ideally, revlog compression and decompression operations would be
implemented in terms of simple "compress" and "decompress" primitives.
However, there are a few considerations that make us want to have a
specialized primitive for handling revlogs:
1) Performance. Revlogs tend to do compression and especially
decompression operations in batches. Any overhead for e.g.
instantiating a "context" for performing an operation can be
noticed. For this reason, our "revlog compressor" primitive is
reusable. For zstd, we reuse the same compression "context" for
multiple operations. I've measured this to have a performance
impact versus constructing new contexts for each operation.
2) Specialization. By having a primitive dedicated to revlog use,
we can make revlog-specific choices and leave the door open for
more functionality in the future. For example, the zstd revlog
compressor may one day make use of dictionary compression.
A future patch will introduce a decompress() on the compressor
object.
The code for the zlib compressor is basically copied from
revlog.compress(). Although it doesn't handle the empty input
case, the null first byte case, and the 'u' prefix case. These
cases will continue to be handled in revlog.py once that code is
ported to use this API.
2017-01-02 23:39:03 +03:00
|
|
|
def revlogcompressor(self, opts=None):
|
|
|
|
opts = opts or {}
|
|
|
|
return self.zstdrevlogcompressor(self._module,
|
|
|
|
level=opts.get('level', 3))
|
|
|
|
|
util: implement zstd compression engine
Now that zstd is vendored and being built (in some configurations), we
can implement a compression engine for zstd!
The zstd engine is a little different from existing engines. Because
it may not always be present, we have to defer load the module in case
importing it fails. We facilitate this via a cached property that holds
a reference to the module or None. The "available" method is
implemented to reflect reality.
The zstd engine declares its ability to handle bundles using the
"zstd" human name and the "ZS" internal name. The latter was chosen
because internal names are 2 characters (by only convention I think)
and "ZS" seems reasonable.
The engine, like others, supports specifying the compression level.
However, there are no consumers of this API that yet pass in that
argument. I have plans to change that, so stay tuned.
Since all we need to do to support bundle generation with a new
compression engine is implement and register the compression engine,
bundle generation with zstd "just works!" Tests demonstrating this
have been added.
How does performance of zstd for bundle generation compare? On the
mozilla-unified repo, `hg bundle --all -t <engine>-v2` yields the
following on my i7-6700K on Linux:
engine CPU time bundle size vs orig size throughput
none 97.0s 4,054,405,584 100.0% 41.8 MB/s
bzip2 (l=9) 393.6s 975,343,098 24.0% 10.3 MB/s
gzip (l=6) 184.0s 1,140,533,074 28.1% 22.0 MB/s
zstd (l=1) 108.2s 1,119,434,718 27.6% 37.5 MB/s
zstd (l=2) 111.3s 1,078,328,002 26.6% 36.4 MB/s
zstd (l=3) 113.7s 1,011,823,727 25.0% 35.7 MB/s
zstd (l=4) 116.0s 1,008,965,888 24.9% 35.0 MB/s
zstd (l=5) 121.0s 977,203,148 24.1% 33.5 MB/s
zstd (l=6) 131.7s 927,360,198 22.9% 30.8 MB/s
zstd (l=7) 139.0s 912,808,505 22.5% 29.2 MB/s
zstd (l=12) 198.1s 854,527,714 21.1% 20.5 MB/s
zstd (l=18) 681.6s 789,750,690 19.5% 5.9 MB/s
On compression, zstd for bundle generation delivers:
* better compression than gzip with significantly less CPU utilization
* better than bzip2 compression ratios while still being significantly
faster than gzip
* ability to aggressively tune compression level to achieve
significantly smaller bundles
That last point is important. With clone bundles, a server can
pre-generate a bundle file, upload it to a static file server, and
redirect clients to transparently download it during clone. The server
could choose to produce a zstd bundle with the highest compression
settings possible. This would take a very long time - a magnitude
longer than a typical zstd bundle generation - but the result would
be hundreds of megabytes smaller! For the clone volume we do at
Mozilla, this could translate to petabytes of bandwidth savings
per year and faster clones (due to smaller transfer size).
I don't have detailed numbers to report on decompression. However,
zstd decompression is fast: >1 GB/s output throughput on this machine,
even through the Python bindings. And it can do that regardless of the
compression level of the input. By the time you have enough data to
worry about overhead of decompression, you have plenty of other things
to worry about performance wise.
zstd is wins all around. I can't wait to implement support for it
on the wire protocol and in revlogs.
2016-11-11 12:10:07 +03:00
|
|
|
compengines.register(_zstdengine())
|
|
|
|
|
2017-04-01 23:29:01 +03:00
|
|
|
def bundlecompressiontopics():
|
|
|
|
"""Obtains a list of available bundle compressions for use in help."""
|
|
|
|
# help.makeitemsdocs() expects a dict of names to items with a .__doc__.
|
|
|
|
items = {}
|
|
|
|
|
|
|
|
# We need to format the docstring. So use a dummy object/type to hold it
|
|
|
|
# rather than mutating the original.
|
|
|
|
class docobject(object):
|
|
|
|
pass
|
|
|
|
|
|
|
|
for name in compengines:
|
|
|
|
engine = compengines[name]
|
|
|
|
|
|
|
|
if not engine.available():
|
|
|
|
continue
|
|
|
|
|
|
|
|
bt = engine.bundletype()
|
|
|
|
if not bt or not bt[0]:
|
|
|
|
continue
|
|
|
|
|
2017-04-04 02:03:34 +03:00
|
|
|
doc = pycompat.sysstr('``%s``\n %s') % (
|
|
|
|
bt[0], engine.bundletype.__doc__)
|
2017-04-01 23:29:01 +03:00
|
|
|
|
|
|
|
value = docobject()
|
|
|
|
value.__doc__ = doc
|
|
|
|
|
|
|
|
items[bt[0]] = value
|
|
|
|
|
|
|
|
return items
|
|
|
|
|
2014-01-13 02:28:21 +04:00
|
|
|
# convenient shortcut
|
|
|
|
dst = debugstacktrace
|