sapling/mercurial/changelog.py

546 lines
18 KiB
Python
Raw Normal View History

2005-08-28 02:05:43 +04:00
# changelog.py - changelog class for mercurial
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
2010-01-20 07:20:08 +03:00
# GNU General Public License version 2 or any later version.
2015-08-08 10:26:49 +03:00
from __future__ import absolute_import
from .i18n import _
from .node import (
bin,
hex,
nullid,
)
from .thirdparty import (
attr,
)
2015-08-08 10:26:49 +03:00
from . import (
encoding,
error,
revlog,
util,
)
_defaultextra = {'branch': 'default'}
def _string_escape(text):
"""
>>> from .pycompat import bytechr as chr
>>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
>>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
>>> s
'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
>>> res = _string_escape(s)
>>> s == util.unescapestr(res)
True
"""
# subset of the string_escape codec
text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
return text.replace('\0', '\\0')
def decodeextra(text):
"""
>>> from .pycompat import bytechr as chr
>>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
... ).items())
[('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
>>> sorted(decodeextra(encodeextra({b'foo': b'bar',
... b'baz': chr(92) + chr(0) + b'2'})
... ).items())
[('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
"""
extra = _defaultextra.copy()
for l in text.split('\0'):
if l:
if '\\0' in l:
# fix up \0 without getting into trouble with \\0
l = l.replace('\\\\', '\\\\\n')
l = l.replace('\\0', '\0')
l = l.replace('\n', '')
k, v = util.unescapestr(l).split(':', 1)
extra[k] = v
return extra
def encodeextra(d):
# keys must be sorted to produce a deterministic changelog entry
items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)]
return "\0".join(items)
def stripdesc(desc):
"""strip trailing whitespace and leading and trailing empty lines"""
return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
2009-06-10 17:10:21 +04:00
class appender(object):
2009-02-28 14:38:45 +03:00
'''the changelog index must be updated last on disk, so we use this class
to delay writes to it'''
def __init__(self, vfs, name, mode, buf):
self.data = buf
fp = vfs(name, mode)
self.fp = fp
self.offset = fp.tell()
self.size = vfs.fstat(fp).st_size
self._end = self.size
def end(self):
return self._end
def tell(self):
return self.offset
def flush(self):
pass
def close(self):
self.fp.close()
def seek(self, offset, whence=0):
'''virtual file offset spans real file and data'''
if whence == 0:
self.offset = offset
elif whence == 1:
self.offset += offset
elif whence == 2:
self.offset = self.end() + offset
if self.offset < self.size:
self.fp.seek(self.offset)
def read(self, count=-1):
'''only trick here is reads that span real file and data'''
ret = ""
if self.offset < self.size:
s = self.fp.read(count)
ret = s
self.offset += len(s)
if count > 0:
count -= len(s)
if count != 0:
doff = self.offset - self.size
self.data.insert(0, "".join(self.data))
del self.data[1:]
2010-01-25 09:05:27 +03:00
s = self.data[0][doff:doff + count]
self.offset += len(s)
ret += s
return ret
def write(self, s):
self.data.append(bytes(s))
self.offset += len(s)
self._end += len(s)
def _divertopener(opener, target):
"""build an opener that writes in 'target.a' instead of 'target'"""
def _divert(name, mode='r', checkambig=False):
if name != target:
return opener(name, mode)
return opener(name + ".a", mode)
return _divert
def _delayopener(opener, target, buf):
"""build an opener that stores chunks in 'buf' instead of 'target'"""
def _delay(name, mode='r', checkambig=False):
if name != target:
return opener(name, mode)
return appender(opener, name, mode, buf)
return _delay
@attr.s
class _changelogrevision(object):
# Extensions might modify _defaultextra, so let the constructor below pass
# it in
extra = attr.ib()
manifest = attr.ib(default=nullid)
user = attr.ib(default='')
date = attr.ib(default=(0, 0))
files = attr.ib(default=attr.Factory(list))
description = attr.ib(default='')
changelog: add class to represent parsed changelog revisions Currently, changelog entries are parsed into their respective components at read time. Many operations are only interested in a subset of fields of a changelog entry. The parsing and storing of all the fields adds avoidable overhead. This patch introduces the "changelogrevision" class. It takes changelog raw text and exposes the parsed results as attributes. The code for parsing changelog entries has been moved into its construction function. changelog.read() has been modified to use the new class internally while maintaining its existing API. Future patches will make revision parsing lazy. We implement the construction function of the new class with __new__ instead of __init__ so we can use a named tuple to represent the empty revision. This saves overhead and complexity of coercing later versions of this class to represent an empty instance. While we are here, we add a method on changelog to obtain an instance of the new type. The overhead of constructing the new class regresses performance of revsets accessing this data: author(mpm) 0.896565 0.929984 desc(bug) 0.887169 0.935642 105% date(2015) 0.878797 0.908094 extra(rebase_source) 0.865446 0.922624 106% author(mpm) or author(greg) 1.801832 1.902112 105% author(mpm) or desc(bug) 1.812438 1.860977 date(2015) or branch(default) 0.968276 1.005824 author(mpm) or desc(bug) or date(2015) or extra(rebase_source) 3.656193 3.743381 Once lazy parsing is implemented, these revsets will all be faster than before. There is no performance change on revsets that do not access this data. There /could/ be a performance regression on operations that perform several changelog reads. However, I can't think of anything outside of revsets and `hg log` (basically the same as a revset) that would be impacted.
2016-03-07 01:28:02 +03:00
class changelogrevision(object):
"""Holds results of a parsed changelog revision.
Changelog revisions consist of multiple pieces of data, including
the manifest node, user, and date. This object exposes a view into
the parsed object.
"""
__slots__ = (
u'_offsets',
u'_text',
changelog: add class to represent parsed changelog revisions Currently, changelog entries are parsed into their respective components at read time. Many operations are only interested in a subset of fields of a changelog entry. The parsing and storing of all the fields adds avoidable overhead. This patch introduces the "changelogrevision" class. It takes changelog raw text and exposes the parsed results as attributes. The code for parsing changelog entries has been moved into its construction function. changelog.read() has been modified to use the new class internally while maintaining its existing API. Future patches will make revision parsing lazy. We implement the construction function of the new class with __new__ instead of __init__ so we can use a named tuple to represent the empty revision. This saves overhead and complexity of coercing later versions of this class to represent an empty instance. While we are here, we add a method on changelog to obtain an instance of the new type. The overhead of constructing the new class regresses performance of revsets accessing this data: author(mpm) 0.896565 0.929984 desc(bug) 0.887169 0.935642 105% date(2015) 0.878797 0.908094 extra(rebase_source) 0.865446 0.922624 106% author(mpm) or author(greg) 1.801832 1.902112 105% author(mpm) or desc(bug) 1.812438 1.860977 date(2015) or branch(default) 0.968276 1.005824 author(mpm) or desc(bug) or date(2015) or extra(rebase_source) 3.656193 3.743381 Once lazy parsing is implemented, these revsets will all be faster than before. There is no performance change on revsets that do not access this data. There /could/ be a performance regression on operations that perform several changelog reads. However, I can't think of anything outside of revsets and `hg log` (basically the same as a revset) that would be impacted.
2016-03-07 01:28:02 +03:00
)
def __new__(cls, text):
if not text:
return _changelogrevision(extra=_defaultextra)
changelog: add class to represent parsed changelog revisions Currently, changelog entries are parsed into their respective components at read time. Many operations are only interested in a subset of fields of a changelog entry. The parsing and storing of all the fields adds avoidable overhead. This patch introduces the "changelogrevision" class. It takes changelog raw text and exposes the parsed results as attributes. The code for parsing changelog entries has been moved into its construction function. changelog.read() has been modified to use the new class internally while maintaining its existing API. Future patches will make revision parsing lazy. We implement the construction function of the new class with __new__ instead of __init__ so we can use a named tuple to represent the empty revision. This saves overhead and complexity of coercing later versions of this class to represent an empty instance. While we are here, we add a method on changelog to obtain an instance of the new type. The overhead of constructing the new class regresses performance of revsets accessing this data: author(mpm) 0.896565 0.929984 desc(bug) 0.887169 0.935642 105% date(2015) 0.878797 0.908094 extra(rebase_source) 0.865446 0.922624 106% author(mpm) or author(greg) 1.801832 1.902112 105% author(mpm) or desc(bug) 1.812438 1.860977 date(2015) or branch(default) 0.968276 1.005824 author(mpm) or desc(bug) or date(2015) or extra(rebase_source) 3.656193 3.743381 Once lazy parsing is implemented, these revsets will all be faster than before. There is no performance change on revsets that do not access this data. There /could/ be a performance regression on operations that perform several changelog reads. However, I can't think of anything outside of revsets and `hg log` (basically the same as a revset) that would be impacted.
2016-03-07 01:28:02 +03:00
self = super(changelogrevision, cls).__new__(cls)
# We could return here and implement the following as an __init__.
# But doing it here is equivalent and saves an extra function call.
# format used:
# nodeid\n : manifest node in ascii
# user\n : user, no \n or \r allowed
# time tz extra\n : date (time is int or float, timezone is int)
# : extra is metadata, encoded and separated by '\0'
# : older versions ignore it
# files\n\n : files modified by the cset, no \n or \r allowed
# (.*) : comment (free text, ideally utf-8)
#
# changelog v0 doesn't use extra
nl1 = text.index('\n')
nl2 = text.index('\n', nl1 + 1)
nl3 = text.index('\n', nl2 + 1)
changelog: add class to represent parsed changelog revisions Currently, changelog entries are parsed into their respective components at read time. Many operations are only interested in a subset of fields of a changelog entry. The parsing and storing of all the fields adds avoidable overhead. This patch introduces the "changelogrevision" class. It takes changelog raw text and exposes the parsed results as attributes. The code for parsing changelog entries has been moved into its construction function. changelog.read() has been modified to use the new class internally while maintaining its existing API. Future patches will make revision parsing lazy. We implement the construction function of the new class with __new__ instead of __init__ so we can use a named tuple to represent the empty revision. This saves overhead and complexity of coercing later versions of this class to represent an empty instance. While we are here, we add a method on changelog to obtain an instance of the new type. The overhead of constructing the new class regresses performance of revsets accessing this data: author(mpm) 0.896565 0.929984 desc(bug) 0.887169 0.935642 105% date(2015) 0.878797 0.908094 extra(rebase_source) 0.865446 0.922624 106% author(mpm) or author(greg) 1.801832 1.902112 105% author(mpm) or desc(bug) 1.812438 1.860977 date(2015) or branch(default) 0.968276 1.005824 author(mpm) or desc(bug) or date(2015) or extra(rebase_source) 3.656193 3.743381 Once lazy parsing is implemented, these revsets will all be faster than before. There is no performance change on revsets that do not access this data. There /could/ be a performance regression on operations that perform several changelog reads. However, I can't think of anything outside of revsets and `hg log` (basically the same as a revset) that would be impacted.
2016-03-07 01:28:02 +03:00
# The list of files may be empty. Which means nl3 is the first of the
# double newline that precedes the description.
if text[nl3 + 1:nl3 + 2] == '\n':
changelog: avoid slicing raw data until needed Before, we were slicing the original raw text and storing individual variables with values corresponding to each field. This is avoidable overhead. With this patch, we store the offsets of the fields at construction time and perform the slice when a property is accessed. This appears to show a very marginal performance win on its own and the gains are so small as to not be worth reporting. However, this patch marks the end of our parsing refactor, so it is worth reporting the gains from the entire series: author(mpm) 0.896565 0.795987 89% desc(bug) 0.887169 0.803438 90% date(2015) 0.878797 0.773961 88% extra(rebase_source) 0.865446 0.761603 88% author(mpm) or author(greg) 1.801832 1.576025 87% author(mpm) or desc(bug) 1.812438 1.593335 88% date(2015) or branch(default) 0.968276 0.875270 90% author(mpm) or desc(bug) or date(2015) or extra(rebase_source) 3.656193 3.183104 87% Pretty consistent speed-up across the board for any revset accessing changelog revision data. Not bad! It's also worth noting that PyPy appears to experience a similar to marginally greater speed-up as well! According to statprof, revsets accessing changelog revision data are now clearly dominated by zlib decompression (16-17% of execution time). Surprisingly, it appears the most expensive part of revision parsing are the various text.index() calls to search for newlines! These appear to cumulatively add up to 5+% of execution time. I reckon implementing the parsing in C would make things marginally faster. If accessing larger strings (such as the commit message), encoding.tolocal() is the most expensive procedure outside of decompression.
2016-03-07 02:40:20 +03:00
doublenl = nl3
else:
doublenl = text.index('\n\n', nl3 + 1)
changelog: avoid slicing raw data until needed Before, we were slicing the original raw text and storing individual variables with values corresponding to each field. This is avoidable overhead. With this patch, we store the offsets of the fields at construction time and perform the slice when a property is accessed. This appears to show a very marginal performance win on its own and the gains are so small as to not be worth reporting. However, this patch marks the end of our parsing refactor, so it is worth reporting the gains from the entire series: author(mpm) 0.896565 0.795987 89% desc(bug) 0.887169 0.803438 90% date(2015) 0.878797 0.773961 88% extra(rebase_source) 0.865446 0.761603 88% author(mpm) or author(greg) 1.801832 1.576025 87% author(mpm) or desc(bug) 1.812438 1.593335 88% date(2015) or branch(default) 0.968276 0.875270 90% author(mpm) or desc(bug) or date(2015) or extra(rebase_source) 3.656193 3.183104 87% Pretty consistent speed-up across the board for any revset accessing changelog revision data. Not bad! It's also worth noting that PyPy appears to experience a similar to marginally greater speed-up as well! According to statprof, revsets accessing changelog revision data are now clearly dominated by zlib decompression (16-17% of execution time). Surprisingly, it appears the most expensive part of revision parsing are the various text.index() calls to search for newlines! These appear to cumulatively add up to 5+% of execution time. I reckon implementing the parsing in C would make things marginally faster. If accessing larger strings (such as the commit message), encoding.tolocal() is the most expensive procedure outside of decompression.
2016-03-07 02:40:20 +03:00
self._offsets = (nl1, nl2, nl3, doublenl)
self._text = text
changelog: add class to represent parsed changelog revisions Currently, changelog entries are parsed into their respective components at read time. Many operations are only interested in a subset of fields of a changelog entry. The parsing and storing of all the fields adds avoidable overhead. This patch introduces the "changelogrevision" class. It takes changelog raw text and exposes the parsed results as attributes. The code for parsing changelog entries has been moved into its construction function. changelog.read() has been modified to use the new class internally while maintaining its existing API. Future patches will make revision parsing lazy. We implement the construction function of the new class with __new__ instead of __init__ so we can use a named tuple to represent the empty revision. This saves overhead and complexity of coercing later versions of this class to represent an empty instance. While we are here, we add a method on changelog to obtain an instance of the new type. The overhead of constructing the new class regresses performance of revsets accessing this data: author(mpm) 0.896565 0.929984 desc(bug) 0.887169 0.935642 105% date(2015) 0.878797 0.908094 extra(rebase_source) 0.865446 0.922624 106% author(mpm) or author(greg) 1.801832 1.902112 105% author(mpm) or desc(bug) 1.812438 1.860977 date(2015) or branch(default) 0.968276 1.005824 author(mpm) or desc(bug) or date(2015) or extra(rebase_source) 3.656193 3.743381 Once lazy parsing is implemented, these revsets will all be faster than before. There is no performance change on revsets that do not access this data. There /could/ be a performance regression on operations that perform several changelog reads. However, I can't think of anything outside of revsets and `hg log` (basically the same as a revset) that would be impacted.
2016-03-07 01:28:02 +03:00
return self
@property
def manifest(self):
changelog: avoid slicing raw data until needed Before, we were slicing the original raw text and storing individual variables with values corresponding to each field. This is avoidable overhead. With this patch, we store the offsets of the fields at construction time and perform the slice when a property is accessed. This appears to show a very marginal performance win on its own and the gains are so small as to not be worth reporting. However, this patch marks the end of our parsing refactor, so it is worth reporting the gains from the entire series: author(mpm) 0.896565 0.795987 89% desc(bug) 0.887169 0.803438 90% date(2015) 0.878797 0.773961 88% extra(rebase_source) 0.865446 0.761603 88% author(mpm) or author(greg) 1.801832 1.576025 87% author(mpm) or desc(bug) 1.812438 1.593335 88% date(2015) or branch(default) 0.968276 0.875270 90% author(mpm) or desc(bug) or date(2015) or extra(rebase_source) 3.656193 3.183104 87% Pretty consistent speed-up across the board for any revset accessing changelog revision data. Not bad! It's also worth noting that PyPy appears to experience a similar to marginally greater speed-up as well! According to statprof, revsets accessing changelog revision data are now clearly dominated by zlib decompression (16-17% of execution time). Surprisingly, it appears the most expensive part of revision parsing are the various text.index() calls to search for newlines! These appear to cumulatively add up to 5+% of execution time. I reckon implementing the parsing in C would make things marginally faster. If accessing larger strings (such as the commit message), encoding.tolocal() is the most expensive procedure outside of decompression.
2016-03-07 02:40:20 +03:00
return bin(self._text[0:self._offsets[0]])
@property
def user(self):
changelog: avoid slicing raw data until needed Before, we were slicing the original raw text and storing individual variables with values corresponding to each field. This is avoidable overhead. With this patch, we store the offsets of the fields at construction time and perform the slice when a property is accessed. This appears to show a very marginal performance win on its own and the gains are so small as to not be worth reporting. However, this patch marks the end of our parsing refactor, so it is worth reporting the gains from the entire series: author(mpm) 0.896565 0.795987 89% desc(bug) 0.887169 0.803438 90% date(2015) 0.878797 0.773961 88% extra(rebase_source) 0.865446 0.761603 88% author(mpm) or author(greg) 1.801832 1.576025 87% author(mpm) or desc(bug) 1.812438 1.593335 88% date(2015) or branch(default) 0.968276 0.875270 90% author(mpm) or desc(bug) or date(2015) or extra(rebase_source) 3.656193 3.183104 87% Pretty consistent speed-up across the board for any revset accessing changelog revision data. Not bad! It's also worth noting that PyPy appears to experience a similar to marginally greater speed-up as well! According to statprof, revsets accessing changelog revision data are now clearly dominated by zlib decompression (16-17% of execution time). Surprisingly, it appears the most expensive part of revision parsing are the various text.index() calls to search for newlines! These appear to cumulatively add up to 5+% of execution time. I reckon implementing the parsing in C would make things marginally faster. If accessing larger strings (such as the commit message), encoding.tolocal() is the most expensive procedure outside of decompression.
2016-03-07 02:40:20 +03:00
off = self._offsets
return encoding.tolocal(self._text[off[0] + 1:off[1]])
@property
def _rawdate(self):
changelog: avoid slicing raw data until needed Before, we were slicing the original raw text and storing individual variables with values corresponding to each field. This is avoidable overhead. With this patch, we store the offsets of the fields at construction time and perform the slice when a property is accessed. This appears to show a very marginal performance win on its own and the gains are so small as to not be worth reporting. However, this patch marks the end of our parsing refactor, so it is worth reporting the gains from the entire series: author(mpm) 0.896565 0.795987 89% desc(bug) 0.887169 0.803438 90% date(2015) 0.878797 0.773961 88% extra(rebase_source) 0.865446 0.761603 88% author(mpm) or author(greg) 1.801832 1.576025 87% author(mpm) or desc(bug) 1.812438 1.593335 88% date(2015) or branch(default) 0.968276 0.875270 90% author(mpm) or desc(bug) or date(2015) or extra(rebase_source) 3.656193 3.183104 87% Pretty consistent speed-up across the board for any revset accessing changelog revision data. Not bad! It's also worth noting that PyPy appears to experience a similar to marginally greater speed-up as well! According to statprof, revsets accessing changelog revision data are now clearly dominated by zlib decompression (16-17% of execution time). Surprisingly, it appears the most expensive part of revision parsing are the various text.index() calls to search for newlines! These appear to cumulatively add up to 5+% of execution time. I reckon implementing the parsing in C would make things marginally faster. If accessing larger strings (such as the commit message), encoding.tolocal() is the most expensive procedure outside of decompression.
2016-03-07 02:40:20 +03:00
off = self._offsets
dateextra = self._text[off[1] + 1:off[2]]
return dateextra.split(' ', 2)[0:2]
@property
def _rawextra(self):
changelog: avoid slicing raw data until needed Before, we were slicing the original raw text and storing individual variables with values corresponding to each field. This is avoidable overhead. With this patch, we store the offsets of the fields at construction time and perform the slice when a property is accessed. This appears to show a very marginal performance win on its own and the gains are so small as to not be worth reporting. However, this patch marks the end of our parsing refactor, so it is worth reporting the gains from the entire series: author(mpm) 0.896565 0.795987 89% desc(bug) 0.887169 0.803438 90% date(2015) 0.878797 0.773961 88% extra(rebase_source) 0.865446 0.761603 88% author(mpm) or author(greg) 1.801832 1.576025 87% author(mpm) or desc(bug) 1.812438 1.593335 88% date(2015) or branch(default) 0.968276 0.875270 90% author(mpm) or desc(bug) or date(2015) or extra(rebase_source) 3.656193 3.183104 87% Pretty consistent speed-up across the board for any revset accessing changelog revision data. Not bad! It's also worth noting that PyPy appears to experience a similar to marginally greater speed-up as well! According to statprof, revsets accessing changelog revision data are now clearly dominated by zlib decompression (16-17% of execution time). Surprisingly, it appears the most expensive part of revision parsing are the various text.index() calls to search for newlines! These appear to cumulatively add up to 5+% of execution time. I reckon implementing the parsing in C would make things marginally faster. If accessing larger strings (such as the commit message), encoding.tolocal() is the most expensive procedure outside of decompression.
2016-03-07 02:40:20 +03:00
off = self._offsets
dateextra = self._text[off[1] + 1:off[2]]
fields = dateextra.split(' ', 2)
if len(fields) != 3:
return None
return fields[2]
@property
def date(self):
raw = self._rawdate
time = float(raw[0])
# Various tools did silly things with the timezone.
try:
timezone = int(raw[1])
except ValueError:
timezone = 0
return time, timezone
@property
def extra(self):
raw = self._rawextra
if raw is None:
return _defaultextra
return decodeextra(raw)
@property
def files(self):
changelog: avoid slicing raw data until needed Before, we were slicing the original raw text and storing individual variables with values corresponding to each field. This is avoidable overhead. With this patch, we store the offsets of the fields at construction time and perform the slice when a property is accessed. This appears to show a very marginal performance win on its own and the gains are so small as to not be worth reporting. However, this patch marks the end of our parsing refactor, so it is worth reporting the gains from the entire series: author(mpm) 0.896565 0.795987 89% desc(bug) 0.887169 0.803438 90% date(2015) 0.878797 0.773961 88% extra(rebase_source) 0.865446 0.761603 88% author(mpm) or author(greg) 1.801832 1.576025 87% author(mpm) or desc(bug) 1.812438 1.593335 88% date(2015) or branch(default) 0.968276 0.875270 90% author(mpm) or desc(bug) or date(2015) or extra(rebase_source) 3.656193 3.183104 87% Pretty consistent speed-up across the board for any revset accessing changelog revision data. Not bad! It's also worth noting that PyPy appears to experience a similar to marginally greater speed-up as well! According to statprof, revsets accessing changelog revision data are now clearly dominated by zlib decompression (16-17% of execution time). Surprisingly, it appears the most expensive part of revision parsing are the various text.index() calls to search for newlines! These appear to cumulatively add up to 5+% of execution time. I reckon implementing the parsing in C would make things marginally faster. If accessing larger strings (such as the commit message), encoding.tolocal() is the most expensive procedure outside of decompression.
2016-03-07 02:40:20 +03:00
off = self._offsets
if off[2] == off[3]:
return []
changelog: avoid slicing raw data until needed Before, we were slicing the original raw text and storing individual variables with values corresponding to each field. This is avoidable overhead. With this patch, we store the offsets of the fields at construction time and perform the slice when a property is accessed. This appears to show a very marginal performance win on its own and the gains are so small as to not be worth reporting. However, this patch marks the end of our parsing refactor, so it is worth reporting the gains from the entire series: author(mpm) 0.896565 0.795987 89% desc(bug) 0.887169 0.803438 90% date(2015) 0.878797 0.773961 88% extra(rebase_source) 0.865446 0.761603 88% author(mpm) or author(greg) 1.801832 1.576025 87% author(mpm) or desc(bug) 1.812438 1.593335 88% date(2015) or branch(default) 0.968276 0.875270 90% author(mpm) or desc(bug) or date(2015) or extra(rebase_source) 3.656193 3.183104 87% Pretty consistent speed-up across the board for any revset accessing changelog revision data. Not bad! It's also worth noting that PyPy appears to experience a similar to marginally greater speed-up as well! According to statprof, revsets accessing changelog revision data are now clearly dominated by zlib decompression (16-17% of execution time). Surprisingly, it appears the most expensive part of revision parsing are the various text.index() calls to search for newlines! These appear to cumulatively add up to 5+% of execution time. I reckon implementing the parsing in C would make things marginally faster. If accessing larger strings (such as the commit message), encoding.tolocal() is the most expensive procedure outside of decompression.
2016-03-07 02:40:20 +03:00
return self._text[off[2] + 1:off[3]].split('\n')
@property
def description(self):
changelog: avoid slicing raw data until needed Before, we were slicing the original raw text and storing individual variables with values corresponding to each field. This is avoidable overhead. With this patch, we store the offsets of the fields at construction time and perform the slice when a property is accessed. This appears to show a very marginal performance win on its own and the gains are so small as to not be worth reporting. However, this patch marks the end of our parsing refactor, so it is worth reporting the gains from the entire series: author(mpm) 0.896565 0.795987 89% desc(bug) 0.887169 0.803438 90% date(2015) 0.878797 0.773961 88% extra(rebase_source) 0.865446 0.761603 88% author(mpm) or author(greg) 1.801832 1.576025 87% author(mpm) or desc(bug) 1.812438 1.593335 88% date(2015) or branch(default) 0.968276 0.875270 90% author(mpm) or desc(bug) or date(2015) or extra(rebase_source) 3.656193 3.183104 87% Pretty consistent speed-up across the board for any revset accessing changelog revision data. Not bad! It's also worth noting that PyPy appears to experience a similar to marginally greater speed-up as well! According to statprof, revsets accessing changelog revision data are now clearly dominated by zlib decompression (16-17% of execution time). Surprisingly, it appears the most expensive part of revision parsing are the various text.index() calls to search for newlines! These appear to cumulatively add up to 5+% of execution time. I reckon implementing the parsing in C would make things marginally faster. If accessing larger strings (such as the commit message), encoding.tolocal() is the most expensive procedure outside of decompression.
2016-03-07 02:40:20 +03:00
return encoding.tolocal(self._text[self._offsets[3] + 2:])
class changelog(revlog.revlog):
changelog: load pending file directly When changelogs are written, a copy of the index (or inline revlog) may be written to an 00changelog.i.a file to facilitate hooks and other processes having access to the pending data before it is finalized. The way it works today, the localrepo class loads the changelog like normal. Then, if it detects a pending transaction, it asks the changelog class to load a pending changelog. The changelog class looks for a 00changelog.i.a file. If it exists, it is loaded and internal data structures on the new revlog class are copied to the original instance. The existing mechanism is inefficient because it loads 2 revlog files. The index, node map, and chunk cache for 00changelog.i are thrown away and replaced by those for 00changelog.i.a. The existing mechanism is also brittle because it is a layering violation to access the data structures being accessed. For example, the code copies the "chunk cache" because for inline revlogs this cache contains the raw revision chunks and allows the original changelog/revlog instance to access revision data for these pending revisions. This whole behavior of course relies on the revlog constructor reading the entirety of an inline revlog into memory and caching it. That's why it is brittle. (I discovered all this as part of modifying behavior of the chunk cache.) This patch streamlines the loading of a pending 00changelog.i.a revlog by doing it directly in the changelog constructor if told to do so. When this code path is active, we no longer load the 00changelog.i file at all. The only negative outcome I see from this change is if loading 00changelog.i was somehow facilitating a role. But I can't imagine what that would be because we throw away its data (the index data structures are replaced and inline revision data is replaced via the chunk cache) and since 00changelog.i.a is a copy of 00changelog.i, file content should be identical, so there should be no meaninful file integrity checking at play. I think this was all just sub-optimal code.
2017-05-14 02:26:43 +03:00
def __init__(self, opener, trypending=False):
"""Load a changelog revlog using an opener.
If ``trypending`` is true, we attempt to load the index from a
``00changelog.i.a`` file instead of the default ``00changelog.i``.
The ``00changelog.i.a`` file contains index (and possibly inline
revision) data for a transaction that hasn't been finalized yet.
It exists in a separate file to facilitate readers (such as
hooks processes) accessing data before a transaction is finalized.
"""
if trypending and opener.exists('00changelog.i.a'):
indexfile = '00changelog.i.a'
else:
indexfile = '00changelog.i'
datafile = '00changelog.d'
revlog.revlog.__init__(self, opener, indexfile, datafile=datafile,
checkambig=True, mmaplargeindex=True)
changelog: load pending file directly When changelogs are written, a copy of the index (or inline revlog) may be written to an 00changelog.i.a file to facilitate hooks and other processes having access to the pending data before it is finalized. The way it works today, the localrepo class loads the changelog like normal. Then, if it detects a pending transaction, it asks the changelog class to load a pending changelog. The changelog class looks for a 00changelog.i.a file. If it exists, it is loaded and internal data structures on the new revlog class are copied to the original instance. The existing mechanism is inefficient because it loads 2 revlog files. The index, node map, and chunk cache for 00changelog.i are thrown away and replaced by those for 00changelog.i.a. The existing mechanism is also brittle because it is a layering violation to access the data structures being accessed. For example, the code copies the "chunk cache" because for inline revlogs this cache contains the raw revision chunks and allows the original changelog/revlog instance to access revision data for these pending revisions. This whole behavior of course relies on the revlog constructor reading the entirety of an inline revlog into memory and caching it. That's why it is brittle. (I discovered all this as part of modifying behavior of the chunk cache.) This patch streamlines the loading of a pending 00changelog.i.a revlog by doing it directly in the changelog constructor if told to do so. When this code path is active, we no longer load the 00changelog.i file at all. The only negative outcome I see from this change is if loading 00changelog.i was somehow facilitating a role. But I can't imagine what that would be because we throw away its data (the index data structures are replaced and inline revision data is replaced via the chunk cache) and since 00changelog.i.a is a copy of 00changelog.i, file content should be identical, so there should be no meaninful file integrity checking at play. I think this was all just sub-optimal code.
2017-05-14 02:26:43 +03:00
2011-05-16 15:06:48 +04:00
if self._initempty:
# changelogs don't benefit from generaldelta
self.version &= ~revlog.FLAG_GENERALDELTA
2011-05-16 15:06:48 +04:00
self._generaldelta = False
changelog: disable delta chains This patch disables delta chains on changelogs. After this patch, new entries on changelogs - including existing changelogs - will be stored as the fulltext of that data (likely compressed). No delta computation will be performed. An overview of delta chains and data justifying this change follows. Revlogs try to store entries as a delta against a previous entry (either a parent revision in the case of generaldelta or the previous physical revision when not using generaldelta). Most of the time this is the correct thing to do: it frequently results in less CPU usage and smaller storage. Delta chains are most effective when the base revision being deltad against is similar to the current data. This tends to occur naturally for manifests and file data, since only small parts of each tend to change with each revision. Changelogs, however, are a different story. Changelog entries represent changesets/commits. And unless commits in a repository are homogonous (same author, changing same files, similar commit messages, etc), a delta from one entry to the next tends to be relatively large compared to the size of the entry. This means that delta chains tend to be short. How short? Here is the full vs delta revision breakdown on some real world repos: Repo % Full % Delta Max Length hg 45.8 54.2 6 mozilla-central 42.4 57.6 8 mozilla-unified 42.5 57.5 17 pypy 46.1 53.9 6 python-zstandard 46.1 53.9 3 (I threw in python-zstandard as an example of a repo that is homogonous. It contains a small Python project with changes all from the same author.) Contrast this with the manifest revlog for these repos, where 99+% of revisions are deltas and delta chains run into the thousands. So delta chains aren't as useful on changelogs. But even a short delta chain may provide benefits. Let's measure that. Delta chains may require less CPU to read revisions if the CPU time spent reading smaller deltas is less than the CPU time used to decompress larger individual entries. We can measure this via `hg perfrevlog -c -d 1` to iterate a revlog to resolve each revision's fulltext. Here are the results of that command on a repo using delta chains in its changelog and on a repo without delta chains: hg (forward) ! wall 0.407008 comb 0.410000 user 0.410000 sys 0.000000 (best of 25) ! wall 0.390061 comb 0.390000 user 0.390000 sys 0.000000 (best of 26) hg (reverse) ! wall 0.515221 comb 0.520000 user 0.520000 sys 0.000000 (best of 19) ! wall 0.400018 comb 0.400000 user 0.390000 sys 0.010000 (best of 25) mozilla-central (forward) ! wall 4.508296 comb 4.490000 user 4.490000 sys 0.000000 (best of 3) ! wall 4.370222 comb 4.370000 user 4.350000 sys 0.020000 (best of 3) mozilla-central (reverse) ! wall 5.758995 comb 5.760000 user 5.720000 sys 0.040000 (best of 3) ! wall 4.346503 comb 4.340000 user 4.320000 sys 0.020000 (best of 3) mozilla-unified (forward) ! wall 4.957088 comb 4.950000 user 4.940000 sys 0.010000 (best of 3) ! wall 4.660528 comb 4.650000 user 4.630000 sys 0.020000 (best of 3) mozilla-unified (reverse) ! wall 6.119827 comb 6.110000 user 6.090000 sys 0.020000 (best of 3) ! wall 4.675136 comb 4.670000 user 4.670000 sys 0.000000 (best of 3) pypy (forward) ! wall 1.231122 comb 1.240000 user 1.230000 sys 0.010000 (best of 8) ! wall 1.164896 comb 1.160000 user 1.160000 sys 0.000000 (best of 9) pypy (reverse) ! wall 1.467049 comb 1.460000 user 1.460000 sys 0.000000 (best of 7) ! wall 1.160200 comb 1.170000 user 1.160000 sys 0.010000 (best of 9) The data clearly shows that it takes less wall and CPU time to resolve revisions when there are no delta chains in the changelogs, regardless of the direction of traversal. Furthermore, not using a delta chain means that fulltext resolution in reverse is as fast as iterating forward. So not using delta chains on the changelog is a clear CPU win for reading operations. An example of a user-visible operation showing this speed-up is revset evaluation. Here are results for `hg perfrevset 'author(gps) or author(mpm)'`: hg ! wall 1.655506 comb 1.660000 user 1.650000 sys 0.010000 (best of 6) ! wall 1.612723 comb 1.610000 user 1.600000 sys 0.010000 (best of 7) mozilla-central ! wall 17.629826 comb 17.640000 user 17.600000 sys 0.040000 (best of 3) ! wall 17.311033 comb 17.300000 user 17.260000 sys 0.040000 (best of 3) What about 00changelog.i size? Repo Delta Chains No Delta Chains hg 7,033,250 6,976,771 mozilla-central 82,978,748 81,574,623 mozilla-unified 88,112,349 86,702,162 pypy 20,740,699 20,659,741 The data shows that removing delta chains from the changelog makes the changelog smaller. Delta chains are also used during changegroup generation. This operation essentially converts a series of revisions to one large delta chain. And changegroup generation is smart: if the delta in the revlog matches what the changegroup is emitting, it will reuse the delta instead of recalculating it. We can measure the impact removing changelog delta chains has on changegroup generation via `hg perfchangegroupchangelog`: hg ! wall 1.589245 comb 1.590000 user 1.590000 sys 0.000000 (best of 7) ! wall 1.788060 comb 1.790000 user 1.790000 sys 0.000000 (best of 6) mozilla-central ! wall 17.382585 comb 17.380000 user 17.340000 sys 0.040000 (best of 3) ! wall 20.161357 comb 20.160000 user 20.120000 sys 0.040000 (best of 3) mozilla-unified ! wall 18.722839 comb 18.720000 user 18.680000 sys 0.040000 (best of 3) ! wall 21.168075 comb 21.170000 user 21.130000 sys 0.040000 (best of 3) pypy ! wall 4.828317 comb 4.830000 user 4.820000 sys 0.010000 (best of 3) ! wall 5.415455 comb 5.420000 user 5.410000 sys 0.010000 (best of 3) The data shows eliminating delta chains makes the changelog part of changegroup generation slower. This is expected since we now have to compute deltas for revisions where we could recycle the delta before. It is worth putting this regression into context of overall changegroup times. Here is the rough total CPU time spent in changegroup generation for various repos while using delta chains on the changelog: Repo CPU Time (s) CPU Time w/ compression hg 4.50 7.05 mozilla-central 111.1 222.0 pypy 28.68 75.5 Before compression, removing delta chains from the changegroup adds ~4.4% overhead to hg changegroup generation, 1.3% to mozilla-central, and 2.0% to pypy. When you factor in zlib compression, these percentages are roughly divided by 2. While the increased CPU usage for changegroup generation is unfortunate, I think it is acceptable because the percentage is small, server operators (those likely impacted most by this) have other mechanisms to mitigate CPU consumption (namely reducing zlib compression level and pre-generated clone bundles), and because there is room to optimize this in the future. For example, we could use the nullid as the base revision, effectively encoding the full revision for each entry in the changegroup. When doing this, `hg perfchangegroupchangelog` nearly halves: mozilla-unified ! wall 21.168075 comb 21.170000 user 21.130000 sys 0.040000 (best of 3) ! wall 11.196461 comb 11.200000 user 11.190000 sys 0.010000 (best of 3) This looks very promising as a future optimization opportunity. It's worth that the changes in test-acl.t to the changegroup part size. This is because revision 6 in the changegroup had a delta chain of length 2 before and after this patch the base revision is nullrev. When the base revision is nullrev, cg2packer.deltaparent() hardcodes the *previous* revision from the changegroup as the delta parent. This caused the delta in the changegroup to switch base revisions, the delta to change, and the size to change accordingly. While the size increased in this case, I think sizes will remain the same on average, as the delta base for changelog revisions doesn't matter too much (as this patch shows). So, I don't consider this a regression.
2016-10-13 13:50:27 +03:00
# Delta chains for changelogs tend to be very small because entries
# tend to be small and don't delta well with each. So disable delta
# chains.
self.storedeltachains = False
changelog: disable delta chains This patch disables delta chains on changelogs. After this patch, new entries on changelogs - including existing changelogs - will be stored as the fulltext of that data (likely compressed). No delta computation will be performed. An overview of delta chains and data justifying this change follows. Revlogs try to store entries as a delta against a previous entry (either a parent revision in the case of generaldelta or the previous physical revision when not using generaldelta). Most of the time this is the correct thing to do: it frequently results in less CPU usage and smaller storage. Delta chains are most effective when the base revision being deltad against is similar to the current data. This tends to occur naturally for manifests and file data, since only small parts of each tend to change with each revision. Changelogs, however, are a different story. Changelog entries represent changesets/commits. And unless commits in a repository are homogonous (same author, changing same files, similar commit messages, etc), a delta from one entry to the next tends to be relatively large compared to the size of the entry. This means that delta chains tend to be short. How short? Here is the full vs delta revision breakdown on some real world repos: Repo % Full % Delta Max Length hg 45.8 54.2 6 mozilla-central 42.4 57.6 8 mozilla-unified 42.5 57.5 17 pypy 46.1 53.9 6 python-zstandard 46.1 53.9 3 (I threw in python-zstandard as an example of a repo that is homogonous. It contains a small Python project with changes all from the same author.) Contrast this with the manifest revlog for these repos, where 99+% of revisions are deltas and delta chains run into the thousands. So delta chains aren't as useful on changelogs. But even a short delta chain may provide benefits. Let's measure that. Delta chains may require less CPU to read revisions if the CPU time spent reading smaller deltas is less than the CPU time used to decompress larger individual entries. We can measure this via `hg perfrevlog -c -d 1` to iterate a revlog to resolve each revision's fulltext. Here are the results of that command on a repo using delta chains in its changelog and on a repo without delta chains: hg (forward) ! wall 0.407008 comb 0.410000 user 0.410000 sys 0.000000 (best of 25) ! wall 0.390061 comb 0.390000 user 0.390000 sys 0.000000 (best of 26) hg (reverse) ! wall 0.515221 comb 0.520000 user 0.520000 sys 0.000000 (best of 19) ! wall 0.400018 comb 0.400000 user 0.390000 sys 0.010000 (best of 25) mozilla-central (forward) ! wall 4.508296 comb 4.490000 user 4.490000 sys 0.000000 (best of 3) ! wall 4.370222 comb 4.370000 user 4.350000 sys 0.020000 (best of 3) mozilla-central (reverse) ! wall 5.758995 comb 5.760000 user 5.720000 sys 0.040000 (best of 3) ! wall 4.346503 comb 4.340000 user 4.320000 sys 0.020000 (best of 3) mozilla-unified (forward) ! wall 4.957088 comb 4.950000 user 4.940000 sys 0.010000 (best of 3) ! wall 4.660528 comb 4.650000 user 4.630000 sys 0.020000 (best of 3) mozilla-unified (reverse) ! wall 6.119827 comb 6.110000 user 6.090000 sys 0.020000 (best of 3) ! wall 4.675136 comb 4.670000 user 4.670000 sys 0.000000 (best of 3) pypy (forward) ! wall 1.231122 comb 1.240000 user 1.230000 sys 0.010000 (best of 8) ! wall 1.164896 comb 1.160000 user 1.160000 sys 0.000000 (best of 9) pypy (reverse) ! wall 1.467049 comb 1.460000 user 1.460000 sys 0.000000 (best of 7) ! wall 1.160200 comb 1.170000 user 1.160000 sys 0.010000 (best of 9) The data clearly shows that it takes less wall and CPU time to resolve revisions when there are no delta chains in the changelogs, regardless of the direction of traversal. Furthermore, not using a delta chain means that fulltext resolution in reverse is as fast as iterating forward. So not using delta chains on the changelog is a clear CPU win for reading operations. An example of a user-visible operation showing this speed-up is revset evaluation. Here are results for `hg perfrevset 'author(gps) or author(mpm)'`: hg ! wall 1.655506 comb 1.660000 user 1.650000 sys 0.010000 (best of 6) ! wall 1.612723 comb 1.610000 user 1.600000 sys 0.010000 (best of 7) mozilla-central ! wall 17.629826 comb 17.640000 user 17.600000 sys 0.040000 (best of 3) ! wall 17.311033 comb 17.300000 user 17.260000 sys 0.040000 (best of 3) What about 00changelog.i size? Repo Delta Chains No Delta Chains hg 7,033,250 6,976,771 mozilla-central 82,978,748 81,574,623 mozilla-unified 88,112,349 86,702,162 pypy 20,740,699 20,659,741 The data shows that removing delta chains from the changelog makes the changelog smaller. Delta chains are also used during changegroup generation. This operation essentially converts a series of revisions to one large delta chain. And changegroup generation is smart: if the delta in the revlog matches what the changegroup is emitting, it will reuse the delta instead of recalculating it. We can measure the impact removing changelog delta chains has on changegroup generation via `hg perfchangegroupchangelog`: hg ! wall 1.589245 comb 1.590000 user 1.590000 sys 0.000000 (best of 7) ! wall 1.788060 comb 1.790000 user 1.790000 sys 0.000000 (best of 6) mozilla-central ! wall 17.382585 comb 17.380000 user 17.340000 sys 0.040000 (best of 3) ! wall 20.161357 comb 20.160000 user 20.120000 sys 0.040000 (best of 3) mozilla-unified ! wall 18.722839 comb 18.720000 user 18.680000 sys 0.040000 (best of 3) ! wall 21.168075 comb 21.170000 user 21.130000 sys 0.040000 (best of 3) pypy ! wall 4.828317 comb 4.830000 user 4.820000 sys 0.010000 (best of 3) ! wall 5.415455 comb 5.420000 user 5.410000 sys 0.010000 (best of 3) The data shows eliminating delta chains makes the changelog part of changegroup generation slower. This is expected since we now have to compute deltas for revisions where we could recycle the delta before. It is worth putting this regression into context of overall changegroup times. Here is the rough total CPU time spent in changegroup generation for various repos while using delta chains on the changelog: Repo CPU Time (s) CPU Time w/ compression hg 4.50 7.05 mozilla-central 111.1 222.0 pypy 28.68 75.5 Before compression, removing delta chains from the changegroup adds ~4.4% overhead to hg changegroup generation, 1.3% to mozilla-central, and 2.0% to pypy. When you factor in zlib compression, these percentages are roughly divided by 2. While the increased CPU usage for changegroup generation is unfortunate, I think it is acceptable because the percentage is small, server operators (those likely impacted most by this) have other mechanisms to mitigate CPU consumption (namely reducing zlib compression level and pre-generated clone bundles), and because there is room to optimize this in the future. For example, we could use the nullid as the base revision, effectively encoding the full revision for each entry in the changegroup. When doing this, `hg perfchangegroupchangelog` nearly halves: mozilla-unified ! wall 21.168075 comb 21.170000 user 21.130000 sys 0.040000 (best of 3) ! wall 11.196461 comb 11.200000 user 11.190000 sys 0.010000 (best of 3) This looks very promising as a future optimization opportunity. It's worth that the changes in test-acl.t to the changegroup part size. This is because revision 6 in the changegroup had a delta chain of length 2 before and after this patch the base revision is nullrev. When the base revision is nullrev, cg2packer.deltaparent() hardcodes the *previous* revision from the changegroup as the delta parent. This caused the delta in the changegroup to switch base revisions, the delta to change, and the size to change accordingly. While the size increased in this case, I think sizes will remain the same on average, as the delta base for changelog revisions doesn't matter too much (as this patch shows). So, I don't consider this a regression.
2016-10-13 13:50:27 +03:00
self._realopener = opener
self._delayed = False
self._delaybuf = None
2009-07-18 20:25:54 +04:00
self._divert = False
self.filteredrevs = frozenset()
def tip(self):
"""filtered version of revlog.tip"""
for i in xrange(len(self) -1, -2, -1):
if i not in self.filteredrevs:
return self.node(i)
def __contains__(self, rev):
"""filtered version of revlog.__contains__"""
return (0 <= rev < len(self)
and rev not in self.filteredrevs)
def __iter__(self):
"""filtered version of revlog.__iter__"""
if len(self.filteredrevs) == 0:
return revlog.revlog.__iter__(self)
def filterediter():
for i in xrange(len(self)):
if i not in self.filteredrevs:
yield i
return filterediter()
def revs(self, start=0, stop=None):
"""filtered version of revlog.revs"""
for i in super(changelog, self).revs(start, stop):
if i not in self.filteredrevs:
yield i
@util.propertycache
def nodemap(self):
# XXX need filtering too
self.rev(self.node(0))
return self._nodecache
def reachableroots(self, minroot, heads, roots, includepath=False):
return self.index.reachableroots2(minroot, heads, roots, includepath)
def headrevs(self):
if self.filteredrevs:
try:
return self.index.headrevsfiltered(self.filteredrevs)
# AttributeError covers non-c-extension environments and
# old c extensions without filter handling.
except AttributeError:
return self._headrevs()
return super(changelog, self).headrevs()
def strip(self, *args, **kwargs):
# XXX make something better than assert
# We can't expect proper strip behavior if we are filtered.
assert not self.filteredrevs
super(changelog, self).strip(*args, **kwargs)
def rev(self, node):
"""filtered version of revlog.rev"""
r = super(changelog, self).rev(node)
if r in self.filteredrevs:
raise error.FilteredLookupError(hex(node), self.indexfile,
_('filtered node'))
return r
def node(self, rev):
"""filtered version of revlog.node"""
if rev in self.filteredrevs:
raise error.FilteredIndexError(rev)
return super(changelog, self).node(rev)
def linkrev(self, rev):
"""filtered version of revlog.linkrev"""
if rev in self.filteredrevs:
raise error.FilteredIndexError(rev)
return super(changelog, self).linkrev(rev)
def parentrevs(self, rev):
"""filtered version of revlog.parentrevs"""
if rev in self.filteredrevs:
raise error.FilteredIndexError(rev)
return super(changelog, self).parentrevs(rev)
def flags(self, rev):
"""filtered version of revlog.flags"""
if rev in self.filteredrevs:
raise error.FilteredIndexError(rev)
return super(changelog, self).flags(rev)
def delayupdate(self, tr):
"delay visibility of index updates to other readers"
if not self._delayed:
if len(self) == 0:
self._divert = True
if self._realopener.exists(self.indexfile + '.a'):
self._realopener.unlink(self.indexfile + '.a')
self.opener = _divertopener(self._realopener, self.indexfile)
else:
self._delaybuf = []
self.opener = _delayopener(self._realopener, self.indexfile,
self._delaybuf)
self._delayed = True
tr.addpending('cl-%i' % id(self), self._writepending)
tr.addfinalize('cl-%i' % id(self), self._finalize)
def _finalize(self, tr):
"finalize index updates"
self._delayed = False
self.opener = self._realopener
# move redirected index data back into place
2009-07-18 20:25:55 +04:00
if self._divert:
assert not self._delaybuf
tmpname = self.indexfile + ".a"
nfile = self.opener.open(tmpname)
2011-05-05 19:33:02 +04:00
nfile.close()
self.opener.rename(tmpname, self.indexfile, checkambig=True)
elif self._delaybuf:
fp = self.opener(self.indexfile, 'a', checkambig=True)
fp.write("".join(self._delaybuf))
fp.close()
self._delaybuf = None
self._divert = False
# split when we're done
self.checkinlinesize(tr)
def _writepending(self, tr):
"create a file containing the unfinalized state for pretxnchangegroup"
if self._delaybuf:
# make a temporary copy of the index
fp1 = self._realopener(self.indexfile)
pendingfilename = self.indexfile + ".a"
# register as a temp file to ensure cleanup on failure
tr.registertmp(pendingfilename)
# write existing data
fp2 = self._realopener(pendingfilename, "w")
fp2.write(fp1.read())
# add pending data
fp2.write("".join(self._delaybuf))
fp2.close()
# switch modes so finalize can simply rename
self._delaybuf = None
2009-07-18 20:25:55 +04:00
self._divert = True
self.opener = _divertopener(self._realopener, self.indexfile)
2009-07-18 20:25:55 +04:00
if self._divert:
return True
return False
def checkinlinesize(self, tr, fp=None):
if not self._delayed:
revlog.revlog.checkinlinesize(self, tr, fp)
2007-12-28 08:55:40 +03:00
def read(self, node):
changelog: add class to represent parsed changelog revisions Currently, changelog entries are parsed into their respective components at read time. Many operations are only interested in a subset of fields of a changelog entry. The parsing and storing of all the fields adds avoidable overhead. This patch introduces the "changelogrevision" class. It takes changelog raw text and exposes the parsed results as attributes. The code for parsing changelog entries has been moved into its construction function. changelog.read() has been modified to use the new class internally while maintaining its existing API. Future patches will make revision parsing lazy. We implement the construction function of the new class with __new__ instead of __init__ so we can use a named tuple to represent the empty revision. This saves overhead and complexity of coercing later versions of this class to represent an empty instance. While we are here, we add a method on changelog to obtain an instance of the new type. The overhead of constructing the new class regresses performance of revsets accessing this data: author(mpm) 0.896565 0.929984 desc(bug) 0.887169 0.935642 105% date(2015) 0.878797 0.908094 extra(rebase_source) 0.865446 0.922624 106% author(mpm) or author(greg) 1.801832 1.902112 105% author(mpm) or desc(bug) 1.812438 1.860977 date(2015) or branch(default) 0.968276 1.005824 author(mpm) or desc(bug) or date(2015) or extra(rebase_source) 3.656193 3.743381 Once lazy parsing is implemented, these revsets will all be faster than before. There is no performance change on revsets that do not access this data. There /could/ be a performance regression on operations that perform several changelog reads. However, I can't think of anything outside of revsets and `hg log` (basically the same as a revset) that would be impacted.
2016-03-07 01:28:02 +03:00
"""Obtain data from a parsed changelog revision.
changelog: add class to represent parsed changelog revisions Currently, changelog entries are parsed into their respective components at read time. Many operations are only interested in a subset of fields of a changelog entry. The parsing and storing of all the fields adds avoidable overhead. This patch introduces the "changelogrevision" class. It takes changelog raw text and exposes the parsed results as attributes. The code for parsing changelog entries has been moved into its construction function. changelog.read() has been modified to use the new class internally while maintaining its existing API. Future patches will make revision parsing lazy. We implement the construction function of the new class with __new__ instead of __init__ so we can use a named tuple to represent the empty revision. This saves overhead and complexity of coercing later versions of this class to represent an empty instance. While we are here, we add a method on changelog to obtain an instance of the new type. The overhead of constructing the new class regresses performance of revsets accessing this data: author(mpm) 0.896565 0.929984 desc(bug) 0.887169 0.935642 105% date(2015) 0.878797 0.908094 extra(rebase_source) 0.865446 0.922624 106% author(mpm) or author(greg) 1.801832 1.902112 105% author(mpm) or desc(bug) 1.812438 1.860977 date(2015) or branch(default) 0.968276 1.005824 author(mpm) or desc(bug) or date(2015) or extra(rebase_source) 3.656193 3.743381 Once lazy parsing is implemented, these revsets will all be faster than before. There is no performance change on revsets that do not access this data. There /could/ be a performance regression on operations that perform several changelog reads. However, I can't think of anything outside of revsets and `hg log` (basically the same as a revset) that would be impacted.
2016-03-07 01:28:02 +03:00
Returns a 6-tuple of:
changelog: add class to represent parsed changelog revisions Currently, changelog entries are parsed into their respective components at read time. Many operations are only interested in a subset of fields of a changelog entry. The parsing and storing of all the fields adds avoidable overhead. This patch introduces the "changelogrevision" class. It takes changelog raw text and exposes the parsed results as attributes. The code for parsing changelog entries has been moved into its construction function. changelog.read() has been modified to use the new class internally while maintaining its existing API. Future patches will make revision parsing lazy. We implement the construction function of the new class with __new__ instead of __init__ so we can use a named tuple to represent the empty revision. This saves overhead and complexity of coercing later versions of this class to represent an empty instance. While we are here, we add a method on changelog to obtain an instance of the new type. The overhead of constructing the new class regresses performance of revsets accessing this data: author(mpm) 0.896565 0.929984 desc(bug) 0.887169 0.935642 105% date(2015) 0.878797 0.908094 extra(rebase_source) 0.865446 0.922624 106% author(mpm) or author(greg) 1.801832 1.902112 105% author(mpm) or desc(bug) 1.812438 1.860977 date(2015) or branch(default) 0.968276 1.005824 author(mpm) or desc(bug) or date(2015) or extra(rebase_source) 3.656193 3.743381 Once lazy parsing is implemented, these revsets will all be faster than before. There is no performance change on revsets that do not access this data. There /could/ be a performance regression on operations that perform several changelog reads. However, I can't think of anything outside of revsets and `hg log` (basically the same as a revset) that would be impacted.
2016-03-07 01:28:02 +03:00
- manifest node in binary
- author/user as a localstr
- date as a 2-tuple of (time, timezone)
- list of files
- commit message as a localstr
- dict of extra metadata
Unless you need to access all fields, consider calling
``changelogrevision`` instead, as it is faster for partial object
access.
"""
c = changelogrevision(self.revision(node))
return (
c.manifest,
c.user,
c.date,
c.files,
c.description,
c.extra
)
def changelogrevision(self, nodeorrev):
"""Obtain a ``changelogrevision`` for a node or revision."""
return changelogrevision(self.revision(nodeorrev))
def readfiles(self, node):
"""
short version of read that only returns the files modified by the cset
"""
text = self.revision(node)
if not text:
return []
last = text.index("\n\n")
l = text[:last].split('\n')
return l[3:]
def add(self, manifest, files, desc, transaction, p1, p2,
user, date=None, extra=None):
# Convert to UTF-8 encoded bytestrings as the very first
# thing: calling any method on a localstr object will turn it
# into a str object and the cached UTF-8 string is thus lost.
user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
user = user.strip()
# An empty username or a username with a "\n" will make the
# revision text contain two "\n\n" sequences -> corrupt
# repository since read cannot unpack the revision.
if not user:
raise error.RevlogError(_("empty username"))
if "\n" in user:
raise error.RevlogError(_("username %s contains a newline")
% repr(user))
desc = stripdesc(desc)
if date:
parseddate = "%d %d" % util.parsedate(date)
else:
parseddate = "%d %d" % util.makedate()
if extra:
branch = extra.get("branch")
if branch in ("default", ""):
del extra["branch"]
elif branch in (".", "null", "tip"):
raise error.RevlogError(_('the name \'%s\' is reserved')
% branch)
if extra:
extra = encodeextra(extra)
parseddate = "%s %s" % (parseddate, extra)
l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
text = "\n".join(l)
return self.addrevision(text, transaction, len(self), p1, p2)
def branchinfo(self, rev):
"""return the branch name and open/close state of a revision
This function exists because creating a changectx object
just to access this is costly."""
extra = self.read(rev)[5]
return encoding.tolocal(extra.get("branch")), 'close' in extra
def _addrevision(self, node, rawtext, transaction, *args, **kwargs):
# overlay over the standard revlog._addrevision to track the new
# revision on the transaction.
rev = len(self)
node = super(changelog, self)._addrevision(node, rawtext, transaction,
*args, **kwargs)
revs = transaction.changes.get('revs')
if revs is not None:
revs.add(rev)
return node