2009-01-24 02:12:18 +03:00
|
|
|
# parsers.py - Python implementation of parsers.c
|
|
|
|
#
|
|
|
|
# Copyright 2009 Matt Mackall <mpm@selenic.com> and others
|
|
|
|
#
|
2009-04-26 03:08:54 +04:00
|
|
|
# This software may be used and distributed according to the terms of the
|
2010-01-20 07:20:08 +03:00
|
|
|
# GNU General Public License version 2 or any later version.
|
2009-01-24 02:12:18 +03:00
|
|
|
|
2015-12-12 21:39:29 +03:00
|
|
|
from __future__ import absolute_import
|
|
|
|
|
|
|
|
import struct
|
|
|
|
import zlib
|
|
|
|
|
2016-08-13 06:23:56 +03:00
|
|
|
from ..node import nullid
|
|
|
|
from .. import pycompat
|
2016-04-10 23:55:37 +03:00
|
|
|
stringio = pycompat.stringio
|
2009-01-24 02:12:18 +03:00
|
|
|
|
2017-03-07 22:15:19 +03:00
|
|
|
|
2009-01-24 02:12:18 +03:00
|
|
|
_pack = struct.pack
|
|
|
|
_unpack = struct.unpack
|
|
|
|
_compress = zlib.compress
|
|
|
|
_decompress = zlib.decompress
|
|
|
|
|
parsers: inline fields of dirstate values in C version
Previously, while unpacking the dirstate we'd create 3-4 new CPython objects
for most dirstate values:
- the state is a single character string, which is pooled by CPython
- the mode is a new object if it isn't 0 due to being in the lookup set
- the size is a new object if it is greater than 255
- the mtime is a new object if it isn't -1 due to being in the lookup set
- the tuple to contain them all
In some cases such as regular hg status, we actually look at all the objects.
In other cases like hg add, hg status for a subdirectory, or hg status with the
third-party hgwatchman enabled, we look at almost none of the objects.
This patch eliminates most object creation in these cases by defining a custom
C struct that is exposed to Python with an interface similar to a tuple. Only
when tuple elements are actually requested are the respective objects created.
The gains, where they're expected, are significant. The following tests are run
against a working copy with over 270,000 files.
parse_dirstate becomes significantly faster:
$ hg perfdirstate
before: wall 0.186437 comb 0.180000 user 0.160000 sys 0.020000 (best of 35)
after: wall 0.093158 comb 0.100000 user 0.090000 sys 0.010000 (best of 95)
and as a result, several commands benefit:
$ time hg status # with hgwatchman enabled
before: 0.42s user 0.14s system 99% cpu 0.563 total
after: 0.34s user 0.12s system 99% cpu 0.471 total
$ time hg add new-file
before: 0.85s user 0.18s system 99% cpu 1.033 total
after: 0.76s user 0.17s system 99% cpu 0.931 total
There is a slight regression in regular status performance, but this is fixed
in an upcoming patch.
2014-05-28 01:27:41 +04:00
|
|
|
# Some code below makes tuples directly because it's more convenient. However,
|
|
|
|
# code outside this module should always use dirstatetuple.
|
|
|
|
def dirstatetuple(*x):
|
|
|
|
# x is a tuple
|
|
|
|
return x
|
|
|
|
|
2016-04-24 14:21:38 +03:00
|
|
|
indexformatng = ">Qiiiiii20s12x"
|
|
|
|
indexfirst = struct.calcsize('Q')
|
|
|
|
sizeint = struct.calcsize('i')
|
|
|
|
indexsize = struct.calcsize(indexformatng)
|
|
|
|
|
|
|
|
def gettype(q):
|
|
|
|
return int(q & 0xFFFF)
|
|
|
|
|
|
|
|
def offset_type(offset, type):
|
2017-03-21 07:40:28 +03:00
|
|
|
return int(int(offset) << 16 | type)
|
2016-04-24 14:21:38 +03:00
|
|
|
|
|
|
|
class BaseIndexObject(object):
|
|
|
|
def __len__(self):
|
|
|
|
return self._lgt + len(self._extra) + 1
|
|
|
|
|
|
|
|
def insert(self, i, tup):
|
|
|
|
assert i == -1
|
|
|
|
self._extra.append(tup)
|
|
|
|
|
|
|
|
def _fix_index(self, i):
|
|
|
|
if not isinstance(i, int):
|
|
|
|
raise TypeError("expecting int indexes")
|
|
|
|
if i < 0:
|
|
|
|
i = len(self) + i
|
|
|
|
if i < 0 or i >= len(self):
|
|
|
|
raise IndexError
|
|
|
|
return i
|
|
|
|
|
|
|
|
def __getitem__(self, i):
|
|
|
|
i = self._fix_index(i)
|
|
|
|
if i == len(self) - 1:
|
|
|
|
return (0, 0, 0, -1, -1, -1, -1, nullid)
|
|
|
|
if i >= self._lgt:
|
|
|
|
return self._extra[i - self._lgt]
|
|
|
|
index = self._calculate_index(i)
|
|
|
|
r = struct.unpack(indexformatng, self._data[index:index + indexsize])
|
|
|
|
if i == 0:
|
|
|
|
e = list(r)
|
|
|
|
type = gettype(e[0])
|
|
|
|
e[0] = offset_type(0, type)
|
|
|
|
return tuple(e)
|
|
|
|
return r
|
|
|
|
|
|
|
|
class IndexObject(BaseIndexObject):
|
|
|
|
def __init__(self, data):
|
|
|
|
assert len(data) % indexsize == 0
|
|
|
|
self._data = data
|
|
|
|
self._lgt = len(data) // indexsize
|
|
|
|
self._extra = []
|
|
|
|
|
|
|
|
def _calculate_index(self, i):
|
|
|
|
return i * indexsize
|
|
|
|
|
|
|
|
def __delitem__(self, i):
|
2017-09-29 18:49:43 +03:00
|
|
|
if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
|
2016-04-24 14:21:38 +03:00
|
|
|
raise ValueError("deleting slices only supports a:-1 with step 1")
|
|
|
|
i = self._fix_index(i.start)
|
|
|
|
if i < self._lgt:
|
|
|
|
self._data = self._data[:i * indexsize]
|
|
|
|
self._lgt = i
|
|
|
|
self._extra = []
|
|
|
|
else:
|
|
|
|
self._extra = self._extra[:i - self._lgt]
|
|
|
|
|
|
|
|
class InlinedIndexObject(BaseIndexObject):
|
|
|
|
def __init__(self, data, inline=0):
|
|
|
|
self._data = data
|
|
|
|
self._lgt = self._inline_scan(None)
|
|
|
|
self._inline_scan(self._lgt)
|
|
|
|
self._extra = []
|
|
|
|
|
|
|
|
def _inline_scan(self, lgt):
|
|
|
|
off = 0
|
|
|
|
if lgt is not None:
|
|
|
|
self._offsets = [0] * lgt
|
|
|
|
count = 0
|
|
|
|
while off <= len(self._data) - indexsize:
|
|
|
|
s, = struct.unpack('>i',
|
|
|
|
self._data[off + indexfirst:off + sizeint + indexfirst])
|
|
|
|
if lgt is not None:
|
|
|
|
self._offsets[count] = off
|
|
|
|
count += 1
|
|
|
|
off += indexsize + s
|
|
|
|
if off != len(self._data):
|
|
|
|
raise ValueError("corrupted data")
|
|
|
|
return count
|
|
|
|
|
|
|
|
def __delitem__(self, i):
|
2017-09-29 18:49:43 +03:00
|
|
|
if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
|
2016-04-24 14:21:38 +03:00
|
|
|
raise ValueError("deleting slices only supports a:-1 with step 1")
|
|
|
|
i = self._fix_index(i.start)
|
|
|
|
if i < self._lgt:
|
|
|
|
self._offsets = self._offsets[:i]
|
|
|
|
self._lgt = i
|
|
|
|
self._extra = []
|
|
|
|
else:
|
|
|
|
self._extra = self._extra[:i - self._lgt]
|
|
|
|
|
|
|
|
def _calculate_index(self, i):
|
|
|
|
return self._offsets[i]
|
|
|
|
|
2011-01-12 22:54:39 +03:00
|
|
|
def parse_index2(data, inline):
|
2016-04-24 14:21:38 +03:00
|
|
|
if not inline:
|
|
|
|
return IndexObject(data), None
|
|
|
|
return InlinedIndexObject(data, inline), (0, data)
|
2009-01-24 02:12:18 +03:00
|
|
|
|
|
|
|
def parse_dirstate(dmap, copymap, st):
|
|
|
|
parents = [st[:20], st[20: 40]]
|
2012-08-16 00:39:18 +04:00
|
|
|
# dereference fields so they will be local in loop
|
2009-04-03 21:37:38 +04:00
|
|
|
format = ">cllll"
|
|
|
|
e_size = struct.calcsize(format)
|
2009-01-24 02:12:18 +03:00
|
|
|
pos1 = 40
|
|
|
|
l = len(st)
|
|
|
|
|
|
|
|
# the inner loop
|
|
|
|
while pos1 < l:
|
|
|
|
pos2 = pos1 + e_size
|
|
|
|
e = _unpack(">cllll", st[pos1:pos2]) # a literal here is faster
|
|
|
|
pos1 = pos2 + e[4]
|
|
|
|
f = st[pos2:pos1]
|
|
|
|
if '\0' in f:
|
|
|
|
f, c = f.split('\0')
|
|
|
|
copymap[f] = c
|
|
|
|
dmap[f] = e[:4]
|
|
|
|
return parents
|
2013-01-18 11:46:08 +04:00
|
|
|
|
|
|
|
def pack_dirstate(dmap, copymap, pl, now):
|
|
|
|
now = int(now)
|
2016-04-10 23:55:37 +03:00
|
|
|
cs = stringio()
|
2013-01-18 11:46:08 +04:00
|
|
|
write = cs.write
|
|
|
|
write("".join(pl))
|
|
|
|
for f, e in dmap.iteritems():
|
|
|
|
if e[0] == 'n' and e[3] == now:
|
|
|
|
# The file was last modified "simultaneously" with the current
|
|
|
|
# write to dirstate (i.e. within the same second for file-
|
|
|
|
# systems with a granularity of 1 sec). This commonly happens
|
|
|
|
# for at least a couple of files on 'update'.
|
|
|
|
# The user could change the file without changing its size
|
2013-08-18 07:48:49 +04:00
|
|
|
# within the same second. Invalidate the file's mtime in
|
2013-01-18 11:46:08 +04:00
|
|
|
# dirstate, forcing future 'status' calls to compare the
|
2013-08-18 07:48:49 +04:00
|
|
|
# contents of the file if the size is the same. This prevents
|
|
|
|
# mistakenly treating such files as clean.
|
parsers: inline fields of dirstate values in C version
Previously, while unpacking the dirstate we'd create 3-4 new CPython objects
for most dirstate values:
- the state is a single character string, which is pooled by CPython
- the mode is a new object if it isn't 0 due to being in the lookup set
- the size is a new object if it is greater than 255
- the mtime is a new object if it isn't -1 due to being in the lookup set
- the tuple to contain them all
In some cases such as regular hg status, we actually look at all the objects.
In other cases like hg add, hg status for a subdirectory, or hg status with the
third-party hgwatchman enabled, we look at almost none of the objects.
This patch eliminates most object creation in these cases by defining a custom
C struct that is exposed to Python with an interface similar to a tuple. Only
when tuple elements are actually requested are the respective objects created.
The gains, where they're expected, are significant. The following tests are run
against a working copy with over 270,000 files.
parse_dirstate becomes significantly faster:
$ hg perfdirstate
before: wall 0.186437 comb 0.180000 user 0.160000 sys 0.020000 (best of 35)
after: wall 0.093158 comb 0.100000 user 0.090000 sys 0.010000 (best of 95)
and as a result, several commands benefit:
$ time hg status # with hgwatchman enabled
before: 0.42s user 0.14s system 99% cpu 0.563 total
after: 0.34s user 0.12s system 99% cpu 0.471 total
$ time hg add new-file
before: 0.85s user 0.18s system 99% cpu 1.033 total
after: 0.76s user 0.17s system 99% cpu 0.931 total
There is a slight regression in regular status performance, but this is fixed
in an upcoming patch.
2014-05-28 01:27:41 +04:00
|
|
|
e = dirstatetuple(e[0], e[1], e[2], -1)
|
2013-01-18 11:46:08 +04:00
|
|
|
dmap[f] = e
|
|
|
|
|
|
|
|
if f in copymap:
|
|
|
|
f = "%s\0%s" % (f, copymap[f])
|
|
|
|
e = _pack(">cllll", e[0], e[1], e[2], e[3], len(f))
|
|
|
|
write(e)
|
|
|
|
write(f)
|
|
|
|
return cs.getvalue()
|