mirror of
https://github.com/facebook/sapling.git
synced 2024-10-11 09:17:30 +03:00
9dc21f8d0b
Summary: D13853115 adds `edenscm/` to `sys.path` and code still uses `import mercurial`. That has nasty problems if both `import mercurial` and `import edenscm.mercurial` are used, because Python would think `mercurial.foo` and `edenscm.mercurial.foo` are different modules so code like `try: ... except mercurial.error.Foo: ...`, or `isinstance(x, mercurial.foo.Bar)` would fail to handle the `edenscm.mercurial` version. There are also some module-level states (ex. `extensions._extensions`) that would cause trouble if they have multiple versions in a single process. Change imports to use the `edenscm` so ideally the `mercurial` is no longer imported at all. Add checks in extensions.py to catch unexpected extensions importing modules from the old (wrong) locations when running tests. Reviewed By: phillco Differential Revision: D13868981 fbshipit-source-id: f4e2513766957fd81d85407994f7521a08e4de48
120 lines
3.9 KiB
Python
120 lines
3.9 KiB
Python
# __init__.py - asv benchmark suite
|
|
#
|
|
# Copyright 2016 Logilab SA <contact@logilab.fr>
|
|
#
|
|
# This software may be used and distributed according to the terms of the
|
|
# GNU General Public License version 2 or any later version.
|
|
|
|
# "historical portability" policy of contrib/benchmarks:
|
|
#
|
|
# We have to make this code work correctly with current mercurial stable branch
|
|
# and if possible with reasonable cost with early Mercurial versions.
|
|
|
|
"""ASV (https://asv.readthedocs.io) benchmark suite
|
|
|
|
Benchmark are parameterized against reference repositories found in the
|
|
directory pointed by the REPOS_DIR environment variable.
|
|
|
|
Invocation example:
|
|
|
|
$ export REPOS_DIR=~/hgperf/repos
|
|
# run suite on given revision
|
|
$ asv --config contrib/asv.conf.json run REV
|
|
# run suite on new changesets found in stable and default branch
|
|
$ asv --config contrib/asv.conf.json run NEW
|
|
# display a comparative result table of benchmark results between two given
|
|
# revisions
|
|
$ asv --config contrib/asv.conf.json compare REV1 REV2
|
|
# compute regression detection and generate ASV static website
|
|
$ asv --config contrib/asv.conf.json publish
|
|
# serve the static website
|
|
$ asv --config contrib/asv.conf.json preview
|
|
"""
|
|
|
|
from __future__ import absolute_import
|
|
|
|
import functools
|
|
import os
|
|
import re
|
|
|
|
from edenscm.mercurial import extensions, hg, ui as uimod, util
|
|
|
|
|
|
basedir = os.path.abspath(
|
|
os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)
|
|
)
|
|
reposdir = os.environ["REPOS_DIR"]
|
|
reposnames = [
|
|
name
|
|
for name in os.listdir(reposdir)
|
|
if os.path.isdir(os.path.join(reposdir, name, ".hg"))
|
|
]
|
|
if not reposnames:
|
|
raise ValueError("No repositories found in $REPO_DIR")
|
|
outputre = re.compile(
|
|
(r"! wall (\d+.\d+) comb \d+.\d+ user \d+.\d+ sys " r"\d+.\d+ \(best of \d+\)")
|
|
)
|
|
|
|
|
|
def runperfcommand(reponame, command, *args, **kwargs):
|
|
os.environ["HGRCPATH"] = os.environ.get("ASVHGRCPATH", "")
|
|
# for "historical portability"
|
|
# ui.load() has been available since d83ca85
|
|
if util.safehasattr(uimod.ui, "load"):
|
|
ui = uimod.ui.load()
|
|
else:
|
|
ui = uimod.ui()
|
|
repo = hg.repository(ui, os.path.join(reposdir, reponame))
|
|
perfext = extensions.load(
|
|
ui, "perfext", os.path.join(basedir, "contrib", "perf.py")
|
|
)
|
|
cmd = getattr(perfext, command)
|
|
ui.pushbuffer()
|
|
cmd(ui, repo, *args, **kwargs)
|
|
output = ui.popbuffer()
|
|
match = outputre.search(output)
|
|
if not match:
|
|
raise ValueError("Invalid output {0}".format(output))
|
|
return float(match.group(1))
|
|
|
|
|
|
def perfbench(repos=reposnames, name=None, params=None):
|
|
"""decorator to declare ASV benchmark based on contrib/perf.py extension
|
|
|
|
An ASV benchmark is a python function with the given attributes:
|
|
|
|
__name__: should start with track_, time_ or mem_ to be collected by ASV
|
|
params and param_name: parameter matrix to display multiple graphs on the
|
|
same page.
|
|
pretty_name: If defined it's displayed in web-ui instead of __name__
|
|
(useful for revsets)
|
|
the module name is prepended to the benchmark name and displayed as
|
|
"category" in webui.
|
|
|
|
Benchmarks are automatically parameterized with repositories found in the
|
|
REPOS_DIR environment variable.
|
|
|
|
`params` is the param matrix in the form of a list of tuple
|
|
(param_name, [value0, value1])
|
|
|
|
For example [(x, [a, b]), (y, [c, d])] declare benchmarks for
|
|
(a, c), (a, d), (b, c) and (b, d).
|
|
"""
|
|
params = list(params or [])
|
|
params.insert(0, ("repo", repos))
|
|
|
|
def decorator(func):
|
|
@functools.wraps(func)
|
|
def wrapped(repo, *args):
|
|
def perf(command, *a, **kw):
|
|
return runperfcommand(repo, command, *a, **kw)
|
|
|
|
return func(perf, *args)
|
|
|
|
wrapped.params = [p[1] for p in params]
|
|
wrapped.param_names = [p[0] for p in params]
|
|
wrapped.pretty_name = name
|
|
return wrapped
|
|
|
|
return decorator
|