diff --git a/tests/integration/integration_runner.py b/tests/integration/integration_runner.py index 4d531d35ed..8771d3064a 100644 --- a/tests/integration/integration_runner.py +++ b/tests/integration/integration_runner.py @@ -12,6 +12,7 @@ import multiprocessing import os import shutil import sys +import subprocess import tempfile import xml.etree.ElementTree as ET @@ -29,6 +30,7 @@ MONONOKE_BONSAI_VERIFY_TARGET = '//scm/mononoke:bonsai_verify' MONONOKE_APISERVER_TARGET = '//scm/mononoke/apiserver:apiserver' DUMMYSSH_TARGET = '//scm/mononoke/tests/integration:dummyssh' BINARY_HG_TARGET = '//scm/hg:hg' +BINARY_HGPYTHON_TARGET = '//scm/hg:hgpython' MONONOKE_HGCLI_TARGET = '//scm/mononoke/hgcli:hgcli' MONONOKE_SERVER_TARGET = '//scm/mononoke:mononoke' FACEBOOK_HOOKS_TARGET = '//scm/mononoke/facebook/hooks:hooks' @@ -74,17 +76,16 @@ def run( ctx, tests, dry_run, interactive, output, verbose, debug, simple_test_selector, keep_tmpdir ): - runner = hg_run_tests.TestRunner() - testdir = parutil.get_dir_path(TESTDIR_PATH) - # Also add to the system path because the Mercurial run-tests.py does an - # absolute import of killdaemons etc. - sys.path.insert(0, os.path.join(testdir, 'third_party')) - - # Use hg.real to avoid going through the wrapper and incurring slowdown - # from subprocesses. - # XXX is this the right thing to do? - args = ['--with-hg', get_hg_binary()] + run_tests_dir = os.path.join( + os.path.join(testdir, 'third_party'), 'hg_run_tests.py' + ) + args = [ + get_hg_python_binary(), + run_tests_dir, + '--maxdifflines=1000', + '--with-hg', get_hg_binary() + ] if dry_run: args.append('--list-tests') if interactive: @@ -139,7 +140,15 @@ def run( # particular, add_to_environ depends on getcwd always being inside # fbcode os.chdir(testdir) - ret = runner.run(args) + # Also add to the system path because the Mercurial run-tests.py does an + # absolute import of killdaemons etc. + env = os.environ.copy() + env["HGPYTHONPATH"] = os.path.join(testdir, 'third_party') + p = subprocess.Popen( + args, env=env, stderr=sys.stderr, stdout=sys.stdout + ) + p.communicate("") + ret = p.returncode if dry_run: # The output must go to stdout. Set simple_test_selector to make @@ -173,5 +182,10 @@ def get_hg_binary(): ) +def get_hg_python_binary(): + return pathutils.get_build_rule_output_path( + BINARY_HGPYTHON_TARGET, pathutils.BuildRuleTypes.PYTHON_BINARY + ) + if __name__ == '__main__': run() diff --git a/tests/integration/test-apiserver.t b/tests/integration/test-apiserver.t index ecd5f5ec47..2c7a98ec35 100644 --- a/tests/integration/test-apiserver.t +++ b/tests/integration/test-apiserver.t @@ -9,8 +9,9 @@ setup testing repo for mononoke $ hg init repo-hg $ cd repo-hg $ setup_hg_server - $ TEST_CONTENT=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 1000 | head -n 1) - $ echo $TEST_CONTENT >> test + >>> import os, textwrap, base64 + >>> open('test', 'w').write(textwrap.fill(base64.b64encode(os.urandom(10000))) + "\n") + $ TEST_CONTENT=$(cat test) $ SHA=$(sha256sum test | awk '{print $1;}') $ ln -s test link $ mkdir -p folder/subfolder @@ -51,8 +52,8 @@ starts api server $ APISERVER_PORT=$(get_free_socket) $ apiserver -H "[::1]" -p $APISERVER_PORT $ wait_for_apiserver - $ alias sslcurl="sslcurl --silent" - $ alias s_client="openssl s_client -connect $APIHOST -cert \"$TESTDIR/testcert.crt\" -key \"$TESTDIR/testcert.key\" -ign_eof" + $ function sslcurl() { curl --silent --cert "$TESTDIR/testcert.crt" --cacert "$TESTDIR/testcert.crt" --key "$TESTDIR/testcert.key" "$@"; } + $ function s_client() { openssl s_client -connect $APIHOST -cert "$TESTDIR/testcert.crt" -key "$TESTDIR/testcert.key" -ign_eof "$@"; } ping test $ sslcurl -i $APISERVER/health_check | grep -iv "date" @@ -177,24 +178,24 @@ test reachability on url encoded bookmarks false (no-eol) test folder list - $ sslcurl $APISERVER/repo/list/$COMMIT2/folder | tee output | python -mjson.tool + $ sslcurl $APISERVER/repo/list/$COMMIT2/folder | tee output | jq . [ - { - "name": "subfolder", - "type": "tree", - "hash": "732eacf2be3265bd6bc4d2c205434b280f446cbf" - } + { + "name": "subfolder", + "type": "tree", + "hash": "732eacf2be3265bd6bc4d2c205434b280f446cbf" + } ] $ TREEHASH=$(cat output | jq -r ".[0].hash") - $ sslcurl $APISERVER/repo/list/$COMMIT2/folder/subfolder | python -mjson.tool + $ sslcurl $APISERVER/repo/list/$COMMIT2/folder/subfolder | jq . [ - { - "name": ".keep", - "type": "file", - "hash": "2c186c8c5bc0df5af5b951afe407d803f9e6b8c9" - } + { + "name": ".keep", + "type": "file", + "hash": "2c186c8c5bc0df5af5b951afe407d803f9e6b8c9" + } ] test nonexist fold @@ -224,15 +225,15 @@ test get blob by hash 404 test get tree - $ sslcurl $APISERVER/repo/tree/$TREEHASH | python -mjson.tool + $ sslcurl $APISERVER/repo/tree/$TREEHASH | jq . [ - { - "name": ".keep", - "type": "file", - "hash": "2c186c8c5bc0df5af5b951afe407d803f9e6b8c9", - "size": 6, - "content_sha1": "f572d396fae9206628714fb2ce00f72e94f2258f" - } + { + "name": ".keep", + "type": "file", + "hash": "2c186c8c5bc0df5af5b951afe407d803f9e6b8c9", + "size": 6, + "content_sha1": "f572d396fae9206628714fb2ce00f72e94f2258f" + } ] $ sslcurl -w "\n%{http_code}" $APISERVER/repo/tree/$BLOBHASH | extract_json_error > output diff --git a/tests/integration/test-blobimport-inline.t b/tests/integration/test-blobimport-inline.t index d34989a07f..af20f06618 100644 --- a/tests/integration/test-blobimport-inline.t +++ b/tests/integration/test-blobimport-inline.t @@ -23,7 +23,7 @@ $ for (( i=0; i < $lines_cnt; i++ )) > do > LINE_LENGTH=$(random_int $max_line_length) - > echo $(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $LINE_LENGTH | head -n 1) >> file + > echo $(head -c 10000 /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $LINE_LENGTH 2>/dev/null | head -n 1) >> file > done $ hg ci -Aqm "commit"$c @@ -35,7 +35,7 @@ > do > LINE_LENGTH=$(random_int $max_line_length) > LINE_NUMBER=$(random_int $lines_cnt) - > CONTENT=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $LINE_LENGTH | head -n 1) + > CONTENT=$(head -c 10000 /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $LINE_LENGTH 2>/dev/null | head -n 1) > sed -i "$LINE_NUMBER""s/.*/$CONTENT/" file > done > hg ci -Aqm "commit"$c diff --git a/tests/integration/test-push-protocol.t b/tests/integration/test-push-protocol.t index 3eed94e08d..e53dca9be6 100644 --- a/tests/integration/test-push-protocol.t +++ b/tests/integration/test-push-protocol.t @@ -28,7 +28,7 @@ verify content user: test date: Thu Jan 01 00:00:00 1970 +0000 summary: a - (re) + $ cd $TESTTMP $ blobimport repo-hg/.hg repo @@ -342,7 +342,7 @@ command, but the output of this command is long # Node ID bb0985934a0f8a493887892173b68940ceb40b4f # Parent 0e7ec5675652a04069cbf976a42e45b740f3243c b - (re) + diff -r 0e7ec5675652 -r bb0985934a0f a --- a/a Thu Jan 01 00:00:00 1970 +0000 +++ b/a Thu Jan 01 00:00:00 1970 +0000 @@ -354,8 +354,8 @@ command, but the output of this command is long +++ b/b_dir/b Thu Jan 01 00:00:00 1970 +0000 @@ -0,0 +1,1 @@ +b file content - (re) - (re) + + fbd6b221382efa5d5bc53130cdaccf06e04c97d3 comparison SUCCESS # HG changeset patch # User test @@ -364,7 +364,7 @@ command, but the output of this command is long # Node ID fbd6b221382efa5d5bc53130cdaccf06e04c97d3 # Parent bb0985934a0f8a493887892173b68940ceb40b4f d - (re) + diff -r bb0985934a0f -r fbd6b221382e b_dir/b --- a/b_dir/b Thu Jan 01 00:00:00 1970 +0000 +++ b/b_dir/b Thu Jan 01 00:00:00 1970 +0000 @@ -376,8 +376,8 @@ command, but the output of this command is long +++ b/d_dir/d Thu Jan 01 00:00:00 1970 +0000 @@ -0,0 +1,1 @@ +d file content - (re) - (re) + + 30da5bf63484d2d6572edafb3ea211c17cd8c005 comparison SUCCESS # HG changeset patch # User test @@ -386,7 +386,7 @@ command, but the output of this command is long # Node ID 30da5bf63484d2d6572edafb3ea211c17cd8c005 # Parent fbd6b221382efa5d5bc53130cdaccf06e04c97d3 e - (re) + diff -r fbd6b221382e -r 30da5bf63484 a --- a/a Thu Jan 01 00:00:00 1970 +0000 +++ b/a Thu Jan 01 00:00:00 1970 +0000 @@ -399,8 +399,8 @@ command, but the output of this command is long @@ -1,1 +1,1 @@ -updated b file content +b file content - (re) - (re) + + 8315ea53ef41d34f56232c88669cc80225b6e66d comparison SUCCESS # HG changeset patch # User test @@ -409,7 +409,7 @@ command, but the output of this command is long # Node ID 8315ea53ef41d34f56232c88669cc80225b6e66d # Parent 30da5bf63484d2d6572edafb3ea211c17cd8c005 f - (re) + diff -r 30da5bf63484 -r 8315ea53ef41 a --- a/a Thu Jan 01 00:00:00 1970 +0000 +++ b/a Thu Jan 01 00:00:00 1970 +0000 @@ -433,8 +433,8 @@ command, but the output of this command is long @@ -1,1 +1,1 @@ -d file content +b file content - (re) - (re) + + 634de738bb0ff135e32d48567718fb9d7dedf575 comparison SUCCESS # HG changeset patch # User test @@ -443,7 +443,7 @@ command, but the output of this command is long # Node ID 634de738bb0ff135e32d48567718fb9d7dedf575 # Parent 8315ea53ef41d34f56232c88669cc80225b6e66d g - (re) + diff -r 8315ea53ef41 -r 634de738bb0f a --- a/a Thu Jan 01 00:00:00 1970 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -480,8 +480,8 @@ command, but the output of this command is long +++ b/e_dir/e Thu Jan 01 00:00:00 1970 +0000 @@ -0,0 +1,1 @@ +a file content - (re) - (re) + + f40c09205504d8410f8c8679bf7a85fef25f9337 comparison SUCCESS # HG changeset patch # User test @@ -490,7 +490,7 @@ command, but the output of this command is long # Node ID f40c09205504d8410f8c8679bf7a85fef25f9337 # Parent bb0985934a0f8a493887892173b68940ceb40b4f c - (re) + diff -r bb0985934a0f -r f40c09205504 b_dir/b --- a/b_dir/b Thu Jan 01 00:00:00 1970 +0000 +++ b/b_dir/b Thu Jan 01 00:00:00 1970 +0000 @@ -502,5 +502,5 @@ command, but the output of this command is long +++ b/c_dir/c Thu Jan 01 00:00:00 1970 +0000 @@ -0,0 +1,1 @@ +c file content - (re) - (re) + + diff --git a/tests/integration/test-server.t b/tests/integration/test-server.t index 162d44caac..7e252e7ee6 100644 --- a/tests/integration/test-server.t +++ b/tests/integration/test-server.t @@ -19,7 +19,7 @@ setup data start mononoke $ mononoke $ wait_for_mononoke $TESTTMP/repo - $ alias s_client="openssl s_client -connect localhost:$MONONOKE_SOCKET -cert \"$TESTDIR/testcert.crt\" -key \"$TESTDIR/testcert.key\" -ign_eof" + $ function s_client () { openssl s_client -connect localhost:$MONONOKE_SOCKET -cert "$TESTDIR/testcert.crt" -key "$TESTDIR/testcert.key" -ign_eof "$@"; } test TLS Session/Ticket resumption when using client certs $ TMPFILE=$(mktemp) diff --git a/tests/integration/third_party/heredoctest.py b/tests/integration/third_party/heredoctest.py new file mode 100644 index 0000000000..e9e0e0af80 --- /dev/null +++ b/tests/integration/third_party/heredoctest.py @@ -0,0 +1,21 @@ +from __future__ import absolute_import, print_function + +import sys + + +globalvars = {} +lines = sys.stdin.readlines() +while lines: + l = lines.pop(0) + if l.startswith("SALT"): + print(l[:-1]) + elif l.startswith(">>> "): + snippet = l[4:] + while lines and lines[0].startswith("... "): + l = lines.pop(0) + snippet += l[4:] + c = compile(snippet, "", "single") + try: + exec(c, globalvars) + except Exception as inst: + print(repr(inst)) diff --git a/tests/integration/third_party/hg_run_tests.py b/tests/integration/third_party/hg_run_tests.py index 5b44216d28..67b5b74b3d 100755 --- a/tests/integration/third_party/hg_run_tests.py +++ b/tests/integration/third_party/hg_run_tests.py @@ -8,9 +8,9 @@ # GNU General Public License version 2 or any later version. # Modifying this script is tricky because it has many modes: -# - serial (default) vs parallel (-jN, N > 1) +# - serial vs parallel (default) (-jN, N > 1) # - no coverage (default) vs coverage (-c, -C, -s) -# - temp install (default) vs specific hg script (--with-hg, --local) +# - temp install vs specific hg script (--with-hg, --local (default)) # - tests are a mix of shell scripts and Python scripts # # If you change this script, it is recommended that you ensure you @@ -18,19 +18,19 @@ # sample of test scripts. For example: # # 1) serial, no coverage, temp install: -# ./run-tests.py test-s* +# ./run-tests.py -j1 --build test-s* # 2) serial, no coverage, local hg: -# ./run-tests.py --local test-s* +# ./run-tests.py -j1 --local test-s* # 3) serial, coverage, temp install: -# ./run-tests.py -c test-s* +# ./run-tests.py -j1 -b -c test-s* # 4) serial, coverage, local hg: -# ./run-tests.py -c --local test-s* # unsupported +# ./run-tests.py -j1 -c --local test-s* # unsupported # 5) parallel, no coverage, temp install: -# ./run-tests.py -j2 test-s* +# ./run-tests.py -j2 -b test-s* # 6) parallel, no coverage, local hg: # ./run-tests.py -j2 --local test-s* # 7) parallel, coverage, temp install: -# ./run-tests.py -j2 -c test-s* # currently broken +# ./run-tests.py -j2 -c -b test-s* # currently broken # 8) parallel, coverage, local install: # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken) # 9) parallel, custom tmp dir: @@ -45,11 +45,14 @@ from __future__ import absolute_import, print_function +import argparse +import collections import difflib import distutils.version as version import errno +import hashlib import json -import optparse +import multiprocessing import os import random import re @@ -63,36 +66,113 @@ import tempfile import threading import time import unittest +import uuid import xml.dom.minidom as minidom + try: import Queue as queue except ImportError: import queue -if os.environ.get('RTUNICODEPEDANTRY', False): +try: + import shlex + + shellquote = shlex.quote +except (ImportError, AttributeError): + import pipes + + shellquote = pipes.quote + +try: + from mercurial.rust.threading import Condition as RLock +except ImportError: + RLock = threading.RLock + +if os.environ.get("RTUNICODEPEDANTRY", False): try: reload(sys) sys.setdefaultencoding("undefined") except NameError: pass -osenvironb = getattr(os, 'environb', os.environ) +origenviron = os.environ.copy() +osenvironb = getattr(os, "environb", os.environ) processlock = threading.Lock() +pygmentspresent = False +# ANSI color is unsupported prior to Windows 10 +if os.name != "nt": + try: # is pygments installed + import pygments + import pygments.lexers as lexers + import pygments.lexer as lexer + import pygments.formatters as formatters + import pygments.token as token + import pygments.style as style + + pygmentspresent = True + difflexer = lexers.DiffLexer() + terminal256formatter = formatters.Terminal256Formatter() + except ImportError: + pass + +if pygmentspresent: + + class TestRunnerStyle(style.Style): + default_style = "" + skipped = token.string_to_tokentype("Token.Generic.Skipped") + failed = token.string_to_tokentype("Token.Generic.Failed") + skippedname = token.string_to_tokentype("Token.Generic.SName") + failedname = token.string_to_tokentype("Token.Generic.FName") + styles = { + skipped: "#e5e5e5", + skippedname: "#00ffff", + failed: "#7f0000", + failedname: "#ff0000", + } + + class TestRunnerLexer(lexer.RegexLexer): + tokens = { + "root": [ + (r"^Skipped", token.Generic.Skipped, "skipped"), + (r"^Failed ", token.Generic.Failed, "failed"), + (r"^ERROR: ", token.Generic.Failed, "failed"), + ], + "skipped": [ + (r"[\w-]+\.(t|py)", token.Generic.SName), + (r":.*", token.Generic.Skipped), + ], + "failed": [ + (r"[\w-]+\.(t|py)", token.Generic.FName), + (r"(:| ).*", token.Generic.Failed), + ], + } + + runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle) + runnerlexer = TestRunnerLexer() + if sys.version_info > (3, 5, 0): PYTHON3 = True - xrange = range # we use xrange in one place, and we'd rather not use range + xrange = range # we use xrange in one place, and we'd rather not use range + def _bytespath(p): - return p.encode('utf-8') + if p is None: + return p + return p.encode("utf-8") def _strpath(p): - return p.decode('utf-8') + if p is None: + return p + return p.decode("utf-8") + elif sys.version_info >= (3, 0, 0): - print('%s is only supported on Python 3.5+ and 2.7, not %s' % - (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3]))) - sys.exit(70) # EX_SOFTWARE from `man 3 sysexit` + print( + "%s is only supported on Python 3.5+ and 2.7, not %s" + % (sys.argv[0], ".".join(str(v) for v in sys.version_info[:3])) + ) + sys.exit(70) # EX_SOFTWARE from `man 3 sysexit` else: PYTHON3 = False @@ -120,7 +200,7 @@ def checksocketfamily(name, port=20058): return False try: s = socket.socket(family, socket.SOCK_STREAM) - s.bind(('localhost', port)) + s.bind(("localhost", port)) s.close() return True except socket.error as exc: @@ -133,9 +213,11 @@ def checksocketfamily(name, port=20058): else: return False + # useipv6 will be set by parseargs useipv6 = None + def checkportisavailable(port): """return true if a port seems free to bind on localhost""" if useipv6: @@ -144,22 +226,41 @@ def checkportisavailable(port): family = socket.AF_INET try: s = socket.socket(family, socket.SOCK_STREAM) - s.bind(('localhost', port)) + s.bind(("localhost", port)) s.close() return True except socket.error as exc: - if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL, - errno.EPROTONOSUPPORT): + if exc.errno not in ( + errno.EADDRINUSE, + errno.EADDRNOTAVAIL, + errno.EPROTONOSUPPORT, + ): raise return False -closefds = os.name == 'posix' + +closefds = os.name == "posix" + +if os.name == "nt": + preexec = None +else: + preexec = lambda: os.setpgid(0, 0) + + def Popen4(cmd, wd, timeout, env=None): processlock.acquire() - p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env, - close_fds=closefds, - stdin=subprocess.PIPE, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) + p = subprocess.Popen( + cmd, + shell=True, + bufsize=-1, + cwd=wd, + env=env, + close_fds=closefds, + preexec_fn=preexec, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) processlock.release() p.fromchild = p.stdout @@ -168,33 +269,40 @@ def Popen4(cmd, wd, timeout, env=None): p.timeout = False if timeout: + track(p) + def t(): start = time.time() while time.time() - start < timeout and p.returncode is None: - time.sleep(.1) + time.sleep(0.1) p.timeout = True if p.returncode is None: terminate(p) + threading.Thread(target=t).start() return p -PYTHON = _bytespath(sys.executable.replace('\\', '/')) -IMPL_PATH = b'PYTHONPATH' -if 'java' in sys.platform: - IMPL_PATH = b'JYTHONPATH' + +PYTHON = _bytespath(sys.executable.replace("\\", "/")) +IMPL_PATH = b"PYTHONPATH" +if "java" in sys.platform: + IMPL_PATH = b"JYTHONPATH" defaults = { - 'jobs': ('HGTEST_JOBS', 1), - 'timeout': ('HGTEST_TIMEOUT', 180), - 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500), - 'port': ('HGTEST_PORT', 20059), - 'shell': ('HGTEST_SHELL', 'sh'), + "jobs": ("HGTEST_JOBS", multiprocessing.cpu_count()), + "timeout": ("HGTEST_TIMEOUT", 360), + "slowtimeout": ("HGTEST_SLOWTIMEOUT", 1000), + "port": ("HGTEST_PORT", 20059), + "shell": ("HGTEST_SHELL", "bash"), + "maxdifflines": ("HGTEST_MAXDIFFLINES", 30), } + def canonpath(path): return os.path.realpath(os.path.expanduser(path)) + def parselistfiles(files, listtype, warn=True): entries = dict() for filename in files: @@ -209,13 +317,14 @@ def parselistfiles(files, listtype, warn=True): continue for line in f.readlines(): - line = line.split(b'#', 1)[0].strip() + line = line.split(b"#", 1)[0].strip() if line: entries[line] = filename f.close() return entries + def parsettestcases(path): """read a .t test file, return a set of test case names @@ -223,124 +332,270 @@ def parsettestcases(path): """ cases = set() try: - with open(path, 'rb') as f: + with open(path, "rb") as f: for l in f: - if l.startswith(b'#testcases '): + if l.startswith(b"#testcases "): cases.update(l[11:].split()) except IOError as ex: if ex.errno != errno.ENOENT: raise return cases + def getparser(): """Obtain the OptionParser used by the CLI.""" - parser = optparse.OptionParser("%prog [options] [tests]") + parser = argparse.ArgumentParser(usage="%(prog)s [options] [tests]") - # keep these sorted - parser.add_option("--blacklist", action="append", - help="skip tests listed in the specified blacklist file") - parser.add_option("--whitelist", action="append", - help="always run tests listed in the specified whitelist file") - parser.add_option("--changed", type="string", - help="run tests that are changed in parent rev or working directory") - parser.add_option("-C", "--annotate", action="store_true", - help="output files annotated with coverage") - parser.add_option("-c", "--cover", action="store_true", - help="print a test coverage report") - parser.add_option("-d", "--debug", action="store_true", + selection = parser.add_argument_group("Test Selection") + selection.add_argument( + "--allow-slow-tests", action="store_true", help="allow extremely slow tests" + ) + selection.add_argument( + "--blacklist", + action="append", + help="skip tests listed in the specified blacklist file", + ) + selection.add_argument( + "--changed", + help="run tests that are changed in parent rev or working directory", + ) + selection.add_argument("-k", "--keywords", help="run tests matching keywords") + selection.add_argument( + "-r", "--retest", action="store_true", help="retest failed tests" + ) + selection.add_argument( + "--test-list", action="append", help="read tests to run from the specified file" + ) + selection.add_argument( + "--whitelist", + action="append", + help="always run tests listed in the specified whitelist file", + ) + selection.add_argument("tests", metavar="TESTS", nargs="*", help="Tests to run") + + harness = parser.add_argument_group("Test Harness Behavior") + harness.add_argument( + "--bisect-repo", + metavar="bisect_repo", + help=("Path of a repo to bisect. Use together with --known-good-rev"), + ) + harness.add_argument( + "-d", + "--debug", + action="store_true", help="debug mode: write output of test scripts to console" - " rather than capturing and diffing it (disables timeout)") - parser.add_option("-f", "--first", action="store_true", - help="exit on the first test failure") - parser.add_option("-H", "--htmlcov", action="store_true", - help="create an HTML report of the coverage of the files") - parser.add_option("-i", "--interactive", action="store_true", - help="prompt to accept changed output") - parser.add_option("-j", "--jobs", type="int", + " rather than capturing and diffing it (disables timeout)", + ) + harness.add_argument( + "-f", "--first", action="store_true", help="exit on the first test failure" + ) + harness.add_argument( + "-i", + "--interactive", + action="store_true", + help="prompt to accept changed output", + ) + harness.add_argument( + "-j", + "--jobs", + type=int, help="number of jobs to run in parallel" - " (default: $%s or %d)" % defaults['jobs']) - parser.add_option("--keep-tmpdir", action="store_true", - help="keep temporary directory after running tests") - parser.add_option("-k", "--keywords", - help="run tests matching keywords") - parser.add_option("--list-tests", action="store_true", - help="list tests instead of running them") - parser.add_option("-l", "--local", action="store_true", - help="shortcut for --with-hg=/../hg, " - "and --with-chg=/../contrib/chg/chg if --chg is set") - parser.add_option("--loop", action="store_true", - help="loop tests repeatedly") - parser.add_option("--runs-per-test", type="int", dest="runs_per_test", - help="run each test N times (default=1)", default=1) - parser.add_option("-n", "--nodiff", action="store_true", - help="skip showing test changes") - parser.add_option("--outputdir", type="string", - help="directory to write error logs to (default=test directory)") - parser.add_option("-p", "--port", type="int", + " (default: $%s or %d)" % defaults["jobs"], + ) + harness.add_argument( + "--keep-tmpdir", + action="store_true", + help="keep temporary directory after running tests", + ) + harness.add_argument( + "--known-good-rev", + metavar="known_good_rev", + help=( + "Automatically bisect any failures using this " + "revision as a known-good revision." + ), + ) + harness.add_argument( + "--list-tests", action="store_true", help="list tests instead of running them" + ) + harness.add_argument("--loop", action="store_true", help="loop tests repeatedly") + harness.add_argument( + "--random", action="store_true", help="run tests in random order" + ) + harness.add_argument( + "-p", + "--port", + type=int, help="port on which servers should listen" - " (default: $%s or %d)" % defaults['port']) - parser.add_option("--compiler", type="string", - help="compiler to build with") - parser.add_option("--pure", action="store_true", - help="use pure Python code instead of C extensions") - parser.add_option("-R", "--restart", action="store_true", - help="restart at last error") - parser.add_option("-r", "--retest", action="store_true", - help="retest failed tests") - parser.add_option("-S", "--noskips", action="store_true", - help="don't report skip tests verbosely") - parser.add_option("--shell", type="string", - help="shell to use (default: $%s or %s)" % defaults['shell']) - parser.add_option("-t", "--timeout", type="int", - help="kill errant tests after TIMEOUT seconds" - " (default: $%s or %d)" % defaults['timeout']) - parser.add_option("--slowtimeout", type="int", + " (default: $%s or %d)" % defaults["port"], + ) + harness.add_argument( + "--profile-runner", action="store_true", help="run statprof on run-tests" + ) + harness.add_argument( + "-R", "--restart", action="store_true", help="restart at last error" + ) + harness.add_argument( + "--runs-per-test", + type=int, + dest="runs_per_test", + help="run each test N times (default=1)", + default=1, + ) + harness.add_argument( + "--shell", help="shell to use (default: $%s or %s)" % defaults["shell"] + ) + harness.add_argument( + "--showchannels", action="store_true", help="show scheduling channels" + ) + harness.add_argument( + "--noprogress", action="store_true", help="do not show progress" + ) + harness.add_argument( + "--slowtimeout", + type=int, help="kill errant slow tests after SLOWTIMEOUT seconds" - " (default: $%s or %d)" % defaults['slowtimeout']) - parser.add_option("--time", action="store_true", - help="time how long each test takes") - parser.add_option("--json", action="store_true", - help="store test result data in 'report.json' file") - parser.add_option("--tmpdir", type="string", - help="run tests in the given temporary directory" - " (implies --keep-tmpdir)") - parser.add_option("-v", "--verbose", action="store_true", - help="output verbose messages") - parser.add_option("--xunit", type="string", - help="record xunit results at specified path") - parser.add_option("--view", type="string", - help="external diff viewer") - parser.add_option("--with-hg", type="string", + " (default: $%s or %d)" % defaults["slowtimeout"], + ) + harness.add_argument( + "-t", + "--timeout", + type=int, + help="kill errant tests after TIMEOUT seconds" + " (default: $%s or %d)" % defaults["timeout"], + ) + harness.add_argument( + "--tmpdir", + help="run tests in the given temporary directory (implies --keep-tmpdir)", + ) + harness.add_argument( + "-v", "--verbose", action="store_true", help="output verbose messages" + ) + + hgconf = parser.add_argument_group("Mercurial Configuration") + hgconf.add_argument( + "--chg", action="store_true", help="install and use chg wrapper in place of hg" + ) + hgconf.add_argument( + "--watchman", action="store_true", help="shortcut for --with-watchman=watchman" + ) + hgconf.add_argument("--compiler", help="compiler to build with") + hgconf.add_argument( + "--extra-config-opt", + action="append", + default=[], + help="set the given config opt in the test hgrc", + ) + hgconf.add_argument( + "--extra-rcpath", + action="append", + default=[], + help="load the given config file or directory in the test hgrc", + ) + hgconf.add_argument( + "-l", + "--local", + action="store_true", + help="shortcut for --with-hg=/../hg, " + "and --with-chg=/../contrib/chg/chg if --chg is set", + ) + hgconf.add_argument( + "-b", + "--rebuild", + dest="local", + action="store_false", + help="build and install to a temporary location before running tests, " + "the reverse of --local", + ) + hgconf.set_defaults(local=True) + hgconf.add_argument( + "--ipv6", + action="store_true", + help="prefer IPv6 to IPv4 for network related tests", + ) + hgconf.add_argument( + "--pure", + action="store_true", + help="use pure Python code instead of C extensions", + ) + hgconf.add_argument( + "-3", + "--py3k-warnings", + action="store_true", + help="enable Py3k warnings on Python 2.7+", + ) + hgconf.add_argument( + "--with-chg", metavar="CHG", help="use specified chg wrapper in place of hg" + ) + hgconf.add_argument( + "--with-hg", metavar="HG", - help="test using specified hg script rather than a " - "temporary installation") - parser.add_option("--chg", action="store_true", - help="install and use chg wrapper in place of hg") - parser.add_option("--with-chg", metavar="CHG", - help="use specified chg wrapper in place of hg") - parser.add_option("--ipv6", action="store_true", - help="prefer IPv6 to IPv4 for network related tests") - parser.add_option("-3", "--py3k-warnings", action="store_true", - help="enable Py3k warnings on Python 2.7+") + help="test using specified hg script rather than a temporary installation", + ) + hgconf.add_argument( + "--with-watchman", metavar="WATCHMAN", help="test using specified watchman" + ) # This option should be deleted once test-check-py3-compat.t and other # Python 3 tests run with Python 3. - parser.add_option("--with-python3", metavar="PYTHON3", - help="Python 3 interpreter (if running under Python 2)" - " (TEMPORARY)") - parser.add_option('--extra-config-opt', action="append", - help='set the given config opt in the test hgrc') - parser.add_option('--random', action="store_true", - help='run tests in random order') - parser.add_option('--profile-runner', action='store_true', - help='run statprof on run-tests') - parser.add_option('--allow-slow-tests', action='store_true', - help='allow extremely slow tests') - parser.add_option('--showchannels', action='store_true', - help='show scheduling channels') - parser.add_option('--known-good-rev', type="string", - metavar="known_good_rev", - help=("Automatically bisect any failures using this " - "revision as a known-good revision.")) + hgconf.add_argument( + "--with-python3", + metavar="PYTHON3", + help="Python 3 interpreter (if running under Python 2) (TEMPORARY)", + ) + + reporting = parser.add_argument_group("Results Reporting") + reporting.add_argument( + "-C", + "--annotate", + action="store_true", + help="output files annotated with coverage", + ) + reporting.add_argument( + "--color", + choices=["always", "auto", "never"], + default=os.environ.get("HGRUNTESTSCOLOR", "auto"), + help="colorisation: always|auto|never (default: auto)", + ) + reporting.add_argument( + "-c", "--cover", action="store_true", help="print a test coverage report" + ) + reporting.add_argument( + "--exceptions", + action="store_true", + help="log all exceptions and generate an exception report", + ) + reporting.add_argument( + "-H", + "--htmlcov", + action="store_true", + help="create an HTML report of the coverage of the files", + ) + reporting.add_argument( + "--json", + action="store_true", + help="store test result data in 'report.json' file", + ) + reporting.add_argument( + "--outputdir", help="directory to write error logs to (default=test directory)" + ) + reporting.add_argument( + "-n", "--nodiff", action="store_true", help="skip showing test changes" + ) + reporting.add_argument( + "--maxdifflines", + type=int, + help="maximum lines of diff output" + " (default: $%s or %d)" % defaults["maxdifflines"], + ) + reporting.add_argument( + "-S", "--noskips", action="store_true", help="don't report skip tests verbosely" + ) + + reporting.add_argument( + "--time", action="store_true", help="time how long each test takes" + ) + reporting.add_argument("--view", help="external diff viewer") + reporting.add_argument("--xunit", help="record xunit results at specified path") for option, (envvar, default) in defaults.items(): defaults[option] = type(default)(os.environ.get(envvar, default)) @@ -348,129 +603,152 @@ def getparser(): return parser + def parseargs(args, parser): """Parse arguments with our OptionParser and validate results.""" - (options, args) = parser.parse_args(args) + options = parser.parse_args(args) # jython is always pure - if 'java' in sys.platform or '__pypy__' in sys.modules: + if "java" in sys.platform or "__pypy__" in sys.modules: options.pure = True if options.with_hg: options.with_hg = canonpath(_bytespath(options.with_hg)) - if not (os.path.isfile(options.with_hg) and - os.access(options.with_hg, os.X_OK)): - parser.error('--with-hg must specify an executable hg script') + if not ( + os.path.isfile(options.with_hg) and os.access(options.with_hg, os.X_OK) + ): + parser.error("--with-hg must specify an executable hg script") if options.local: testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0]))) reporootdir = os.path.dirname(testdir) - pathandattrs = [(b'hg', 'with_hg')] + pathandattrs = [(b"hg", "with_hg")] if options.chg: - pathandattrs.append((b'contrib/chg/chg', 'with_chg')) + pathandattrs.append((b"contrib/chg/chg", "with_chg")) for relpath, attr in pathandattrs: + if getattr(options, attr, None): + continue binpath = os.path.join(reporootdir, relpath) - if os.name != 'nt' and not os.access(binpath, os.X_OK): - parser.error('--local specified, but %r not found or ' - 'not executable' % binpath) + if os.name != "nt" and not os.access(binpath, os.X_OK): + parser.error( + "--local specified, but %r not found or not executable" % binpath + ) setattr(options, attr, binpath) - if (options.chg or options.with_chg) and os.name == 'nt': - parser.error('chg does not work on %s' % os.name) + if (options.chg or options.with_chg) and os.name == "nt": + parser.error("chg does not work on %s" % os.name) if options.with_chg: options.chg = False # no installation to temporary location options.with_chg = canonpath(_bytespath(options.with_chg)) - if not (os.path.isfile(options.with_chg) and - os.access(options.with_chg, os.X_OK)): - parser.error('--with-chg must specify a chg executable') + if not ( + os.path.isfile(options.with_chg) and os.access(options.with_chg, os.X_OK) + ): + parser.error("--with-chg must specify a chg executable") if options.chg and options.with_hg: # chg shares installation location with hg - parser.error('--chg does not work when --with-hg is specified ' - '(use --with-chg instead)') + parser.error( + "--chg does not work when --with-hg is specified " + "(use --with-chg instead)" + ) + if options.watchman and options.with_watchman: + parser.error( + "--watchman does not work when --with-watchman is specified " + "(use --with-watchman instead)" + ) + + if options.color == "always" and not pygmentspresent: + sys.stderr.write( + "warning: --color=always ignored because pygments is not installed\n" + ) + + if options.bisect_repo and not options.known_good_rev: + parser.error("--bisect-repo cannot be used without --known-good-rev") global useipv6 if options.ipv6: - useipv6 = checksocketfamily('AF_INET6') + useipv6 = checksocketfamily("AF_INET6") else: # only use IPv6 if IPv4 is unavailable and IPv6 is available - useipv6 = ((not checksocketfamily('AF_INET')) - and checksocketfamily('AF_INET6')) + useipv6 = (not checksocketfamily("AF_INET")) and checksocketfamily("AF_INET6") options.anycoverage = options.cover or options.annotate or options.htmlcov if options.anycoverage: try: import coverage + covver = version.StrictVersion(coverage.__version__).version if covver < (3, 3): - parser.error('coverage options require coverage 3.3 or later') + parser.error("coverage options require coverage 3.3 or later") except ImportError: - parser.error('coverage options now require the coverage package') + parser.error("coverage options now require the coverage package") if options.anycoverage and options.local: # this needs some path mangling somewhere, I guess - parser.error("sorry, coverage options do not work when --local " - "is specified") + parser.error("sorry, coverage options do not work when --local is specified") if options.anycoverage and options.with_hg: - parser.error("sorry, coverage options do not work when --with-hg " - "is specified") + parser.error("sorry, coverage options do not work when --with-hg is specified") global verbose if options.verbose: - verbose = '' + verbose = "" if options.tmpdir: options.tmpdir = canonpath(options.tmpdir) if options.jobs < 1: - parser.error('--jobs must be positive') + parser.error("--jobs must be positive") if options.interactive and options.debug: parser.error("-i/--interactive and -d/--debug are incompatible") if options.debug: - if options.timeout != defaults['timeout']: - sys.stderr.write( - 'warning: --timeout option ignored with --debug\n') - if options.slowtimeout != defaults['slowtimeout']: - sys.stderr.write( - 'warning: --slowtimeout option ignored with --debug\n') + options.noprogress = True + if options.timeout != defaults["timeout"]: + sys.stderr.write("warning: --timeout option ignored with --debug\n") + if options.slowtimeout != defaults["slowtimeout"]: + sys.stderr.write("warning: --slowtimeout option ignored with --debug\n") options.timeout = 0 options.slowtimeout = 0 if options.py3k_warnings: if PYTHON3: - parser.error( - '--py3k-warnings can only be used on Python 2.7') + parser.error("--py3k-warnings can only be used on Python 2.7") if options.with_python3: if PYTHON3: - parser.error('--with-python3 cannot be used when executing with ' - 'Python 3') + parser.error("--with-python3 cannot be used when executing with Python 3") options.with_python3 = canonpath(options.with_python3) # Verify Python3 executable is acceptable. - proc = subprocess.Popen([options.with_python3, b'--version'], - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) + proc = subprocess.Popen( + [options.with_python3, b"--version"], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) out, _err = proc.communicate() ret = proc.wait() if ret != 0: - parser.error('could not determine version of python 3') - if not out.startswith('Python '): - parser.error('unexpected output from python3 --version: %s' % - out) - vers = version.LooseVersion(out[len('Python '):]) - if vers < version.LooseVersion('3.5.0'): - parser.error('--with-python3 version must be 3.5.0 or greater; ' - 'got %s' % out) + parser.error("could not determine version of python 3") + if not out.startswith("Python "): + parser.error("unexpected output from python3 --version: %s" % out) + vers = version.LooseVersion(out[len("Python ") :]) + if vers < version.LooseVersion("3.5.0"): + parser.error( + "--with-python3 version must be 3.5.0 or greater; got %s" % out + ) if options.blacklist: - options.blacklist = parselistfiles(options.blacklist, 'blacklist') + options.blacklist = parselistfiles(options.blacklist, "blacklist") if options.whitelist: - options.whitelisted = parselistfiles(options.whitelist, 'whitelist') + options.whitelisted = parselistfiles(options.whitelist, "whitelist") else: options.whitelisted = {} if options.showchannels: options.nodiff = True + options.noprogress = True + if options.noprogress: + global showprogress + showprogress = False + + return options - return (options, args) def rename(src, dst): """Like os.rename(), trade atomicity and opened files friendliness @@ -479,27 +757,36 @@ def rename(src, dst): shutil.copy(src, dst) os.remove(src) + _unified_diff = difflib.unified_diff if PYTHON3: import functools + _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff) + def getdiff(expected, output, ref, err): servefail = False lines = [] - for line in _unified_diff(expected, output, ref, err): - if line.startswith(b'+++') or line.startswith(b'---'): - line = line.replace(b'\\', b'/') - if line.endswith(b' \n'): - line = line[:-2] + b'\n' + for line in _unified_diff( + expected, output, os.path.basename(ref), os.path.basename(err) + ): + if line.startswith(b"+++") or line.startswith(b"---"): + line = line.replace(b"\\", b"/") + if line.endswith(b" \n"): + line = line[:-2] + b"\n" lines.append(line) if not servefail and line.startswith( - b'+ abort: child process failed to start'): + b"+ abort: child process failed to start" + ): servefail = True return servefail, lines + verbose = False + + def vlog(*msg): """Log only when in verbose mode.""" if verbose is False: @@ -507,6 +794,7 @@ def vlog(*msg): return log(*msg) + # Bytes that break XML even in a CDATA block: control characters 0-31 # sans \t, \n and \r CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]") @@ -515,7 +803,8 @@ CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]") # list in group 2, and the preceeding line output in group 1: # # output..output (feature !)\n -optline = re.compile(b'(.+) \\((.+?) !\\)\n$') +optline = re.compile(b"(.*) \\((.+?) !\\)\n$") + def cdatasafe(data): """Make a string safe to include in a CDATA block. @@ -525,7 +814,8 @@ def cdatasafe(data): replaces illegal bytes with ? and adds a space between the ]] so that it won't break the CDATA block. """ - return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>') + return CDATA_EVIL.sub(b"?", data).replace(b"]]>", b"] ]>") + def log(*msg): """Log something to stdout. @@ -534,24 +824,118 @@ def log(*msg): """ with iolock: if verbose: - print(verbose, end=' ') + print(verbose, end=" ") for m in msg: - print(m, end=' ') + print(m, end=" ") print() sys.stdout.flush() + +def highlightdiff(line, color): + if not color: + return line + assert pygmentspresent + return pygments.highlight( + line.decode("latin1"), difflexer, terminal256formatter + ).encode("latin1") + + +def highlightmsg(msg, color): + if not color: + return msg + assert pygmentspresent + return pygments.highlight(msg, runnerlexer, runnerformatter) + + +_pgroups = {} + + +def track(proc): + """Register a process to a process group. So it can be killed later.""" + pgroup = ProcessGroup() + pid = proc.pid + pgroup.add(pid) + _pgroups[pid] = pgroup + + def terminate(proc): """Terminate subprocess""" - vlog('# Terminating process %d' % proc.pid) try: - proc.terminate() - except OSError: - pass + pgroup = _pgroups.pop(proc.pid) + vlog("# Terminating process %d recursively" % proc.pid) + pgroup.terminate() + except KeyError: + vlog("# Terminating process %d" % proc.pid) + try: + proc.terminate() + except OSError: + pass + def killdaemons(pidfile): import killdaemons as killmod - return killmod.killdaemons(pidfile, tryhard=False, remove=True, - logfn=vlog) + + return killmod.killdaemons(pidfile, tryhard=False, remove=True, logfn=vlog) + + +if os.name == "nt": + + class ProcessGroup(object): + """Process group backed by Windows JobObject. + + It provides a clean way to kill processes recursively. + """ + + def __init__(self): + self._hjob = _kernel32.CreateJobObjectA(None, None) + + def add(self, pid): + hprocess = _kernel32.OpenProcess( + PROCESS_SET_QUOTA | PROCESS_TERMINATE, 0, pid + ) + if not hprocess or hprocess == _INVALID_HANDLE_VALUE: + raise ctypes.WinError(_kernel32.GetLastError()) + try: + _kernel32.AssignProcessToJobObject(self._hjob, hprocess) + finally: + _kernel32.CloseHandle(hprocess) + + def terminate(self): + if self._hjob: + _kernel32.TerminateJobObject(self._hjob, 0) + _kernel32.CloseHandle(self._hjob) + self._hjob = 0 + + +else: + + class ProcessGroup(object): + """Fallback implementation on *nix. Kill process groups. + + This is less reliable than Windows' JobObject, because child processes + can change their process groups. But it's better than nothing. + + On Linux, the "most correct" solution would be cgroup. But that + requires root permission. + """ + + def __init__(self): + self._pids = [] + + def add(self, pid): + self._pids.append(pid) + + def terminate(self): + for pid in self._pids: + try: + os.killpg(pid, signal.SIGKILL) + except OSError: + try: + os.kill(pid, signal.SIGKILL) + except OSError: + pass + self._pids = [] + class Test(unittest.TestCase): """Encapsulates a single, runnable test. @@ -564,13 +948,26 @@ class Test(unittest.TestCase): # Status code reserved for skipped tests (used by hghave). SKIPPED_STATUS = 80 - def __init__(self, path, outputdir, tmpdir, keeptmpdir=False, - debug=False, - timeout=defaults['timeout'], - startport=defaults['port'], extraconfigopts=None, - py3kwarnings=False, shell=None, hgcommand=None, - slowtimeout=defaults['slowtimeout'], usechg=False, - useipv6=False): + def __init__( + self, + path, + outputdir, + tmpdir, + keeptmpdir=False, + debug=False, + first=False, + timeout=None, + startport=None, + extraconfigopts=None, + extrarcpaths=None, + py3kwarnings=False, + shell=None, + hgcommand=None, + slowtimeout=None, + usechg=False, + useipv6=False, + watchman=None, + ): """Create a test from parameters. path is the full path to the file defining the test. @@ -597,30 +994,42 @@ class Test(unittest.TestCase): must have the form "key=value" (something understood by hgrc). Values of the form "foo.key=value" will result in "[foo] key=value". + extrarcpaths is an iterable for extra hgrc paths (files or + directories). + py3kwarnings enables Py3k warnings. shell is the shell to execute tests in. """ + if timeout is None: + timeout = defaults["timeout"] + if startport is None: + startport = defaults["port"] + if slowtimeout is None: + slowtimeout = defaults["slowtimeout"] self.path = path self.bname = os.path.basename(path) self.name = _strpath(self.bname) self._testdir = os.path.dirname(path) self._outputdir = outputdir self._tmpname = os.path.basename(path) - self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname) + self.errpath = os.path.join(self._outputdir, b"%s.err" % self.bname) self._threadtmp = tmpdir self._keeptmpdir = keeptmpdir self._debug = debug + self._first = first self._timeout = timeout self._slowtimeout = slowtimeout self._startport = startport self._extraconfigopts = extraconfigopts or [] + self._extrarcpaths = extrarcpaths or [] self._py3kwarnings = py3kwarnings self._shell = _bytespath(shell) - self._hgcommand = hgcommand or b'hg' + self._hgcommand = hgcommand or b"hg" self._usechg = usechg self._useipv6 = useipv6 + self._watchman = watchman self._aborted = False self._daemonpids = [] @@ -631,16 +1040,19 @@ class Test(unittest.TestCase): self._testtmp = None self._chgsockdir = None + self._refout = self.readrefout() + + def readrefout(self): + """read reference output""" # If we're not in --debug mode and reference output file exists, # check test output against it. - if debug: - self._refout = None # to match "out is None" + if self._debug: + return None # to match "out is None" elif os.path.exists(self.refpath): - f = open(self.refpath, 'rb') - self._refout = f.read().splitlines(True) - f.close() + with open(self.refpath, "rb") as f: + return f.read().splitlines(True) else: - self._refout = [] + return [] # needed to get base class __repr__ running @property @@ -682,10 +1094,81 @@ class Test(unittest.TestCase): raise if self._usechg: - self._chgsockdir = os.path.join(self._threadtmp, - b'%s.chgsock' % name) + self._chgsockdir = os.path.join(self._threadtmp, b"%s.chgsock" % name) os.mkdir(self._chgsockdir) + if self._watchman: + shortname = hashlib.sha1(b"%s" % name).hexdigest()[:6] + self._watchmandir = os.path.join( + self._threadtmp, b"%s.watchman" % shortname + ) + os.mkdir(self._watchmandir) + cfgfile = os.path.join(self._watchmandir, b"config.json") + + if os.name == "nt": + sockfile = "\\\\.\\pipe\\watchman-test-%s" % uuid.uuid4().hex + closefd = False + else: + sockfile = os.path.join(self._watchmandir, b"sock") + closefd = True + + self._watchmansock = sockfile + + clilogfile = os.path.join(self._watchmandir, "cli-log") + logfile = os.path.join(self._watchmandir, b"log") + pidfile = os.path.join(self._watchmandir, b"pid") + statefile = os.path.join(self._watchmandir, b"state") + + with open(cfgfile, "w") as f: + f.write(json.dumps({})) + + envb = osenvironb.copy() + envb[b"WATCHMAN_CONFIG_FILE"] = _bytespath(cfgfile) + envb[b"WATCHMAN_SOCK"] = _bytespath(sockfile) + + argv = [ + self._watchman, + "--sockname", + sockfile, + "--logfile", + logfile, + "--pidfile", + pidfile, + "--statefile", + statefile, + "--foreground", + "--log-level=2", # debug logging for watchman + ] + + with open(clilogfile, "wb") as f: + self._watchmanproc = subprocess.Popen( + argv, env=envb, stdin=None, stdout=f, stderr=f, close_fds=closefd + ) + + # Wait for watchman socket to become available + argv = [ + self._watchman, + "--no-spawn", + "--no-local", + "--sockname", + sockfile, + "version", + ] + deadline = time.time() + 30 + watchmanavailable = False + while not watchmanavailable and time.time() < deadline: + try: + # The watchman CLI can wait for a short time if sockfile + # is not ready. + subprocess.check_output(argv, env=envb, close_fds=closefd) + watchmanavailable = True + except Exception: + time.sleep(0.1) + if not watchmanavailable: + # tearDown needs to be manually called in this case. + self.tearDown() + raise RuntimeError("timed out waiting for watchman") + def run(self, result): """Run this test and report results against a TestResult instance.""" # This function is extremely similar to unittest.TestCase.run(). Once @@ -709,21 +1192,12 @@ class Test(unittest.TestCase): except KeyboardInterrupt: self._aborted = True raise - except SkipTest as e: + except unittest.SkipTest as e: result.addSkip(self, str(e)) # The base class will have already counted this as a # test we "ran", but we want to exclude skipped tests # from those we count towards those run. result.testsRun -= 1 - except IgnoreTest as e: - result.addIgnore(self, str(e)) - # As with skips, ignores also should be excluded from - # the number of tests executed. - result.testsRun -= 1 - except WarnTest as e: - result.addWarn(self, str(e)) - except ReportedTest as e: - pass except self.failureException as e: # This differs from unittest in that we don't capture # the stack trace. This is for historical reasons and @@ -756,10 +1230,11 @@ class Test(unittest.TestCase): This will return a tuple describing the result of the test. """ env = self._getenv() - self._daemonpids.append(env['DAEMON_PIDS']) - self._createhgrc(env['HGRCPATH']) + self._genrestoreenv(env) + self._daemonpids.append(env["DAEMON_PIDS"]) + self._createhgrc(env["HGRCPATH"].rsplit(os.pathsep, 1)[-1]) - vlog('# Test', self.name) + vlog("# Test", self.name) ret, out = self._run(env) self._finished = True @@ -768,48 +1243,54 @@ class Test(unittest.TestCase): def describe(ret): if ret < 0: - return 'killed by signal: %d' % -ret - return 'returned error code %d' % ret + return "killed by signal: %d" % -ret + return "returned error code %d" % ret self._skipped = False if ret == self.SKIPPED_STATUS: - if out is None: # Debug mode, nothing to parse. - missing = ['unknown'] + if out is None: # Debug mode, nothing to parse. + missing = ["unknown"] failed = None else: missing, failed = TTest.parsehghaveoutput(out) if not missing: - missing = ['skipped'] + missing = ["skipped"] if failed: - self.fail('hg have failed checking for %s' % failed[-1]) + self.fail("hg have failed checking for %s" % failed[-1]) else: self._skipped = True - raise SkipTest(missing[-1]) - elif ret == 'timeout': - self.fail('timed out') + raise unittest.SkipTest(missing[-1]) + elif ret == "timeout": + self.fail("timed out") elif ret is False: - raise WarnTest('no result code from test') + self.fail("no result code from test") elif out != self._refout: # Diff generation may rely on written .err file. - if (ret != 0 or out != self._refout) and not self._skipped \ - and not self._debug: - f = open(self.errpath, 'wb') - for line in out: - f.write(line) - f.close() + if ( + (ret != 0 or out != self._refout) + and not self._skipped + and not self._debug + ): + with open(self.errpath, "wb") as f: + for line in out: + f.write(line) # The result object handles diff calculation for us. - if self._result.addOutputMismatch(self, ret, out, self._refout): - # change was accepted, skip failing - return + with firstlock: + if self._result.addOutputMismatch(self, ret, out, self._refout): + # change was accepted, skip failing + return + if self._first: + global firsterror + firsterror = True if ret: - msg = 'output changed and ' + describe(ret) + msg = "output changed and " + describe(ret) else: - msg = 'output changed' + msg = "output changed" self.fail(msg) elif ret: @@ -822,9 +1303,10 @@ class Test(unittest.TestCase): self._daemonpids = [] if self._keeptmpdir: - log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' % - (self._testtmp.decode('utf-8'), - self._threadtmp.decode('utf-8'))) + log( + "\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s" + % (self._testtmp.decode("utf-8"), self._threadtmp.decode("utf-8")) + ) else: shutil.rmtree(self._testtmp, True) shutil.rmtree(self._threadtmp, True) @@ -834,26 +1316,42 @@ class Test(unittest.TestCase): # files are deleted shutil.rmtree(self._chgsockdir, True) - if (self._ret != 0 or self._out != self._refout) and not self._skipped \ - and not self._debug and self._out: - f = open(self.errpath, 'wb') - for line in self._out: - f.write(line) - f.close() + if self._watchman: + try: + self._watchmanproc.terminate() + self._watchmanproc.kill() + if self._keeptmpdir: + log( + "Keeping watchman dir: %s\n" % self._watchmandir.decode("utf-8") + ) + else: + shutil.rmtree(self._watchmandir, ignore_errors=True) + except Exception: + pass - vlog("# Ret was:", self._ret, '(%s)' % self.name) + if ( + (self._ret != 0 or self._out != self._refout) + and not self._skipped + and not self._debug + and self._out + ): + with open(self.errpath, "wb") as f: + for line in self._out: + f.write(line) + + vlog("# Ret was:", self._ret, "(%s)" % self.name) def _run(self, env): # This should be implemented in child classes to run tests. - raise SkipTest('unknown test type') + raise unittest.SkipTest("unknown test type") def abort(self): """Terminate execution of this test.""" self._aborted = True def _portmap(self, i): - offset = b'' if i == 0 else b'%d' % i - return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset) + offset = b"" if i == 0 else b"%d" % i + return (br":%d\b" % (self._startport + i), b":$HGPORT%s" % offset) def _getreplacements(self): """Obtain a mapping of text replacements to apply to test output. @@ -867,144 +1365,216 @@ class Test(unittest.TestCase): self._portmap(0), self._portmap(1), self._portmap(2), - (br'(?m)^(saved backup bundle to .*\.hg)( \(glob\))?$', - br'\1 (glob)'), - (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'), - (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'), - ] - r.append((self._escapepath(self._testtmp), b'$TESTTMP')) + (br"([^0-9])%s" % re.escape(self._localip()), br"\1$LOCALIP"), + (br"\bHG_TXNID=TXN:[a-f0-9]{40}\b", br"HG_TXNID=TXN:$ID$"), + ] + r.append((self._escapepath(self._testtmp), b"$TESTTMP")) + replacementfile = os.path.join(self._testdir, b"common-pattern.py") + + if os.path.exists(replacementfile): + data = {} + with open(replacementfile, mode="rb") as source: + # the intermediate 'compile' step help with debugging + code = compile(source.read(), replacementfile, "exec") + exec(code, data) + r.extend(data.get("substitutions", ())) return r def _escapepath(self, p): - if os.name == 'nt': - return ( - (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or - c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c - for c in p)) + if os.name == "nt": + return br"(?:[/\\]{2,4}\?[/\\]{1,2})?" + b"".join( + c.isalpha() + and b"[%s%s]" % (c.lower(), c.upper()) + or c in b"/\\" + and br"[/\\]{1,2}" + or c.isdigit() + and c + or b"\\" + c + for c in p ) else: return re.escape(p) def _localip(self): if self._useipv6: - return b'::1' + return b"::1" else: - return b'127.0.0.1' + return b"127.0.0.1" + + def _genrestoreenv(self, testenv): + """Generate a script that can be used by tests to restore the original + environment.""" + # Put the restoreenv script inside self._threadtmp + scriptpath = os.path.join(self._threadtmp, b"restoreenv.sh") + testenv["HGTEST_RESTOREENV"] = scriptpath + + # Only restore environment variable names that the shell allows + # us to export. + name_regex = re.compile("^[a-zA-Z][a-zA-Z0-9_]*$") + + # Do not restore these variables; otherwise tests would fail. + reqnames = {"PYTHON", "TESTDIR", "TESTTMP"} + + with open(scriptpath, "w") as envf: + for name, value in origenviron.items(): + if not name_regex.match(name): + # Skip environment variables with unusual names not + # allowed by most shells. + continue + if name in reqnames: + continue + envf.write("%s=%s\n" % (name, shellquote(value))) + + for name in testenv: + if name in origenviron or name in reqnames: + continue + envf.write("unset %s\n" % (name,)) def _getenv(self): """Obtain environment variables to use during test execution.""" + def defineport(i): - offset = '' if i == 0 else '%s' % i - env["HGPORT%s" % offset] = '%s' % (self._startport + i) + offset = "" if i == 0 else "%s" % i + env["HGPORT%s" % offset] = "%s" % (self._startport + i) + env = os.environ.copy() - env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') - env['HGEMITWARNINGS'] = '1' - env['TESTTMP'] = self._testtmp - env['HOME'] = self._testtmp + env["PYTHONUSERBASE"] = sysconfig.get_config_var("userbase") + env["HGEMITWARNINGS"] = "1" + env["TESTTMP"] = self._testtmp + env["HOME"] = self._testtmp + if not self._usechg: + env["CHGDISABLE"] = "1" # This number should match portneeded in _getport for port in xrange(3): # This list should be parallel to _portmap in _getreplacements defineport(port) - env["HGRCPATH"] = os.path.join(self._threadtmp, b'.hgrc') - env["DAEMON_PIDS"] = os.path.join(self._threadtmp, b'daemon.pids') - env["HGEDITOR"] = ('"' + sys.executable + '"' - + ' -c "import sys; sys.exit(0)"') + rcpath = os.path.join(self._threadtmp, b".hgrc") + rcpaths = self._extrarcpaths + [rcpath] + env["HGRCPATH"] = os.pathsep.join(rcpaths) + env["DAEMON_PIDS"] = os.path.join(self._threadtmp, b"daemon.pids") + env["HGEDITOR"] = '"' + sys.executable + '"' + ' -c "import sys; sys.exit(0)"' env["HGMERGE"] = "internal:merge" - env["HGUSER"] = "test" + env["HGUSER"] = "test" env["HGENCODING"] = "ascii" env["HGENCODINGMODE"] = "strict" - env['HGIPV6'] = str(int(self._useipv6)) + env["HGIPV6"] = str(int(self._useipv6)) # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw # IP addresses. - env['LOCALIP'] = self._localip() + env["LOCALIP"] = self._localip() # Reset some environment variables to well-known values so that # the tests produce repeatable output. - env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C' - env['TZ'] = 'GMT' + env["LANG"] = env["LC_ALL"] = env["LANGUAGE"] = "C" + env["TZ"] = "GMT" env["EMAIL"] = "Foo Bar " - env['COLUMNS'] = '80' - env['TERM'] = 'xterm' + env["COLUMNS"] = "80" - for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' + - 'HGPLAIN HGPLAINEXCEPT EDITOR VISUAL PAGER ' + - 'NO_PROXY CHGDEBUG').split(): + # Claim that 256 colors is not supported. + env["HGCOLORS"] = "16" + + # Do not be affected by system legacy configs. + env["HGLEGACY"] = "" + + for k in ( + "HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy " + + "HGPLAIN HGPLAINEXCEPT EDITOR VISUAL PAGER " + + "NO_PROXY CHGDEBUG HGDETECTRACE" + ).split(): if k in env: del env[k] # unset env related to hooks for k in env.keys(): - if k.startswith('HG_'): + if k.startswith("HG_"): del env[k] if self._usechg: - env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server') + env["CHGSOCKNAME"] = os.path.join(self._chgsockdir, b"server") + + if self._watchman: + env["WATCHMAN_SOCK"] = self._watchmansock + env["HGFSMONITOR_TESTS"] = "1" return env def _createhgrc(self, path): """Create an hgrc file for this test.""" - hgrc = open(path, 'wb') - hgrc.write(b'[ui]\n') - hgrc.write(b'slash = True\n') - hgrc.write(b'interactive = False\n') - hgrc.write(b'mergemarkers = detailed\n') - hgrc.write(b'promptecho = True\n') - hgrc.write(b'[defaults]\n') - hgrc.write(b'[devel]\n') - hgrc.write(b'all-warnings = true\n') - hgrc.write(b'default-date = 0 0\n') - hgrc.write(b'[largefiles]\n') - hgrc.write(b'usercache = %s\n' % - (os.path.join(self._testtmp, b'.cache/largefiles'))) - hgrc.write(b'[web]\n') - hgrc.write(b'address = localhost\n') - hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii')) + with open(path, "wb") as hgrc: + hgrc.write(b"[ui]\n") + hgrc.write(b"slash = True\n") + hgrc.write(b"interactive = False\n") + hgrc.write(b"mergemarkers = detailed\n") + hgrc.write(b"promptecho = True\n") + hgrc.write(b"[defaults]\n") + hgrc.write(b"[devel]\n") + hgrc.write(b"all-warnings = true\n") + hgrc.write(b"default-date = 0 0\n") + hgrc.write(b"[lfs]\n") + if self._watchman: + hgrc.write(b"[extensions]\nfsmonitor=\n") + hgrc.write(b"[fsmonitor]\ndetectrace=1\n") + hgrc.write(b"[web]\n") + hgrc.write(b"address = localhost\n") + hgrc.write(b"ipv6 = %s\n" % str(self._useipv6).encode("ascii")) - for opt in self._extraconfigopts: - section, key = opt.split('.', 1) - assert '=' in key, ('extra config opt %s must ' - 'have an = for assignment' % opt) - hgrc.write(b'[%s]\n%s\n' % (section, key)) - hgrc.close() + for opt in self._extraconfigopts: + section, key = opt.encode("utf-8").split(b".", 1) + assert b"=" in key, ( + "extra config opt %s must have an = for assignment" % opt + ) + hgrc.write(b"[%s]\n%s\n" % (section, key)) def fail(self, msg): # unittest differentiates between errored and failed. # Failed is denoted by AssertionError (by default at least). raise AssertionError(msg) - def _runcommand(self, cmd, env, normalizenewlines=False): + def _runcommand(self, cmd, env, normalizenewlines=False, linecallback=None): """Run command in a sub-process, capturing the output (stdout and stderr). Return a tuple (exitcode, output). output is None in debug mode. """ if self._debug: - proc = subprocess.Popen(cmd, shell=True, cwd=self._testtmp, - env=env) + proc = subprocess.Popen(cmd, shell=True, cwd=self._testtmp, env=env) ret = proc.wait() return (ret, None) proc = Popen4(cmd, self._testtmp, self._timeout, env) + track(proc) + def cleanup(): terminate(proc) ret = proc.wait() if ret == 0: ret = signal.SIGTERM << 8 - killdaemons(env['DAEMON_PIDS']) + killdaemons(env["DAEMON_PIDS"]) return ret - output = '' + output = "" proc.tochild.close() try: - output = proc.fromchild.read() + f = proc.fromchild + while True: + line = f.readline() + # Make the test abort faster if other tests are Ctrl+C-ed. + # Code path: for test in runtests: test.abort() + if self._aborted: + raise KeyboardInterrupt() + if linecallback: + linecallback(line) + output += line + if not line: + break + except KeyboardInterrupt: - vlog('# Handling keyboard interrupt') + vlog("# Handling keyboard interrupt") cleanup() raise + finally: proc.fromchild.close() @@ -1013,37 +1583,68 @@ class Test(unittest.TestCase): ret = os.WEXITSTATUS(ret) if proc.timeout: - ret = 'timeout' + ret = "timeout" if ret: - killdaemons(env['DAEMON_PIDS']) + killdaemons(env["DAEMON_PIDS"]) for s, r in self._getreplacements(): output = re.sub(s, r, output) if normalizenewlines: - output = output.replace('\r\n', '\n') + output = output.replace("\r\n", "\n") return ret, output.splitlines(True) + class PythonTest(Test): """A Python-based test.""" @property def refpath(self): - return os.path.join(self._testdir, b'%s.out' % self.bname) + return os.path.join(self._testdir, b"%s.out" % self.bname) + + def _processoutput(self, output): + if os.path.exists(self.refpath): + expected = open(self.refpath, "r").readlines() + else: + return output + + processed = ["" for i in output] + i = 0 + while i < len(expected) and i < len(output): + line = expected[i].strip() + + # by default, processed output is the same as received output + processed[i] = output[i] + if line.endswith(" (re)"): + # pattern, should try to match + pattern = line[:-5] + if not pattern.endswith("$"): + pattern += "$" + if re.match(pattern, output[i].strip()): + processed[i] = expected[i] + i = i + 1 + + # output is longer than expected, we don't need to process + # the tail + while i < len(output): + processed[i] = output[i] + i = i + 1 + + return processed def _run(self, env): - py3kswitch = self._py3kwarnings and b' -3' or b'' + py3kswitch = self._py3kwarnings and b" -3" or b"" cmd = b'%s%s "%s"' % (PYTHON, py3kswitch, self.path) vlog("# Running", cmd) - normalizenewlines = os.name == 'nt' - result = self._runcommand(cmd, env, - normalizenewlines=normalizenewlines) + normalizenewlines = os.name == "nt" + result = self._runcommand(cmd, env, normalizenewlines=normalizenewlines) if self._aborted: raise KeyboardInterrupt() - return result + return result[0], self._processoutput(result[1]) + # Some glob patterns apply only in some circumstances, so the script # might want to remove (glob) annotations that otherwise should be @@ -1051,64 +1652,76 @@ class PythonTest(Test): checkcodeglobpats = [ # On Windows it looks like \ doesn't require a (glob), but we know # better. - re.compile(br'^pushing to \$TESTTMP/.*[^)]$'), - re.compile(br'^moving \S+/.*[^)]$'), - re.compile(br'^pulling from \$TESTTMP/.*[^)]$'), + re.compile(br"^pushing to \$TESTTMP/.*[^)]$"), + re.compile(br"^moving \S+/.*[^)]$"), + re.compile(br"^pulling from \$TESTTMP/.*[^)]$"), # Not all platforms have 127.0.0.1 as loopback (though most do), # so we always glob that too. - re.compile(br'.*\$LOCALIP.*$'), + re.compile(br".*\$LOCALIP.*$"), ] bchr = chr if PYTHON3: bchr = lambda x: bytes([x]) + class TTest(Test): """A "t test" is a test backed by a .t file.""" - SKIPPED_PREFIX = b'skipped: ' - FAILED_PREFIX = b'hghave check failed: ' - NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search + SKIPPED_PREFIX = b"skipped: " + FAILED_PREFIX = b"hghave check failed: " + NEEDESCAPE = re.compile(br"[\x00-\x08\x0b-\x1f\x7f-\xff]").search - ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub - ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256)) - ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'}) + ESCAPESUB = re.compile(br"[\x00-\x08\x0b-\x1f\\\x7f-\xff]").sub + ESCAPEMAP = dict((bchr(i), br"\x%02x" % i) for i in range(256)) + ESCAPEMAP.update({b"\\": b"\\\\", b"\r": br"\r"}) def __init__(self, path, *args, **kwds): # accept an extra "case" parameter - case = None - if 'case' in kwds: - case = kwds.pop('case') + case = kwds.pop("case", None) self._case = case self._allcases = parsettestcases(path) super(TTest, self).__init__(path, *args, **kwds) if case: - self.name = '%s (case %s)' % (self.name, _strpath(case)) - self.errpath = b'%s.%s.err' % (self.errpath[:-4], case) - self._tmpname += b'-%s' % case + self.name = "%s (case %s)" % (self.name, _strpath(case)) + self.errpath = b"%s.%s.err" % (self.errpath[:-4], case) + self._tmpname += b"-%s" % case + self._hghavecache = {} @property def refpath(self): return os.path.join(self._testdir, self.bname) def _run(self, env): - f = open(self.path, 'rb') - lines = f.readlines() - f.close() + with open(self.path, "rb") as f: + lines = f.readlines() - salt, script, after, expected = self._parsetest(lines) + # .t file is both reference output and the test input, keep reference + # output updated with the the test input. This avoids some race + # conditions where the reference output does not match the actual test. + if self._refout is not None: + self._refout = lines + + salt, saltcount, script, after, expected = self._parsetest(lines) + self.progress = (0, saltcount) # Write out the generated script. - fname = b'%s.sh' % self._testtmp - f = open(fname, 'wb') - for l in script: - f.write(l) - f.close() + fname = b"%s.sh" % self._testtmp + with open(fname, "wb") as f: + for l in script: + f.write(l) cmd = b'%s "%s"' % (self._shell, fname) vlog("# Running", cmd) - exitcode, output = self._runcommand(cmd, env) + saltseen = [0] + + def linecallback(line): + if salt in line: + saltseen[0] += 1 + self.progress = (saltseen[0], saltcount) + + exitcode, output = self._runcommand(cmd, env, linecallback=linecallback) if self._aborted: raise KeyboardInterrupt() @@ -1121,24 +1734,40 @@ class TTest(Test): return self._processoutput(exitcode, output, salt, after, expected) def _hghave(self, reqs): + # Cache the results of _hghave() checks. + # In some cases the same _hghave() call can be repeated hundreds of + # times in a row. (For instance, if a linematch check with a hghave + # requirement does not match, the _hghave() call will be repeated for + # each remaining line in the test output.) + key = tuple(reqs) + result = self._hghavecache.get(key) + if result is None: + result = self._computehghave(reqs) + self._hghavecache[key] = result + return result + + def _computehghave(self, reqs): # TODO do something smarter when all other uses of hghave are gone. runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__))) - tdir = runtestdir.replace(b'\\', b'/') - proc = Popen4(b'%s -c "%s/hghave %s"' % - (self._shell, tdir, b' '.join(reqs)), - self._testtmp, 0, self._getenv()) + tdir = runtestdir.replace(b"\\", b"/") + proc = Popen4( + b'%s -c "%s/hghave %s"' % (self._shell, tdir, b" ".join(reqs)), + self._testtmp, + 0, + self._getenv(), + ) stdout, stderr = proc.communicate() ret = proc.wait() if wifexited(ret): ret = os.WEXITSTATUS(ret) if ret == 2: - print(stdout.decode('utf-8')) + print(stdout.decode("utf-8")) sys.exit(1) if ret != 0: return False, stdout - if 'slow' in reqs: + if b"slow" in reqs: self._timeout = self._slowtimeout return True, None @@ -1146,7 +1775,7 @@ class TTest(Test): # implements "#if" reqs = [] for arg in args: - if arg.startswith(b'no-') and arg[3:] in self._allcases: + if arg.startswith(b"no-") and arg[3:] in self._allcases: if arg[3:] == self._case: return False elif arg in self._allcases: @@ -1161,11 +1790,14 @@ class TTest(Test): # up script results with our source. These markers include input # line number and the last return code. salt = b"SALT%d" % time.time() + saltcount = [0] + def addsalt(line, inpython): + saltcount[0] += 1 if inpython: - script.append(b'%s %d 0\n' % (salt, line)) + script.append(b"%s %d 0\n" % (salt, line)) else: - script.append(b'echo %s %d $?\n' % (salt, line)) + script.append(b"echo %s %d $?\n" % (salt, line)) script = [] @@ -1188,93 +1820,97 @@ class TTest(Test): inpython = False if self._debug: - script.append(b'set -x\n') - if os.getenv('MSYSTEM'): - script.append(b'alias pwd="pwd -W"\n') + script.append(b"set -x\n") + if os.getenv("MSYSTEM"): + script.append(b'pwd() { builtin pwd -W "$@"; }\n') + + # Source $RUNTESTDIR/tinit.sh for utility functions + script.append(b'source "$RUNTESTDIR/tinit.sh"\n') n = 0 for n, l in enumerate(lines): - if not l.endswith(b'\n'): - l += b'\n' - if l.startswith(b'#require'): + if not l.endswith(b"\n"): + l += b"\n" + if l.startswith(b"#require"): lsplit = l.split() - if len(lsplit) < 2 or lsplit[0] != b'#require': - after.setdefault(pos, []).append(' !!! invalid #require\n') - haveresult, message = self._hghave(lsplit[1:]) - if not haveresult: - script = [b'echo "%s"\nexit 80\n' % message] - break + if len(lsplit) < 2 or lsplit[0] != b"#require": + after.setdefault(pos, []).append(" !!! invalid #require\n") + if not skipping: + haveresult, message = self._hghave(lsplit[1:]) + if not haveresult: + script = [b'echo "%s"\nexit 80\n' % message] + break after.setdefault(pos, []).append(l) - elif l.startswith(b'#if'): + elif l.startswith(b"#if"): lsplit = l.split() - if len(lsplit) < 2 or lsplit[0] != b'#if': - after.setdefault(pos, []).append(' !!! invalid #if\n') + if len(lsplit) < 2 or lsplit[0] != b"#if": + after.setdefault(pos, []).append(" !!! invalid #if\n") if skipping is not None: - after.setdefault(pos, []).append(' !!! nested #if\n') + after.setdefault(pos, []).append(" !!! nested #if\n") skipping = not self._iftest(lsplit[1:]) after.setdefault(pos, []).append(l) - elif l.startswith(b'#else'): + elif l.startswith(b"#else"): if skipping is None: - after.setdefault(pos, []).append(' !!! missing #if\n') + after.setdefault(pos, []).append(" !!! missing #if\n") skipping = not skipping after.setdefault(pos, []).append(l) - elif l.startswith(b'#endif'): + elif l.startswith(b"#endif"): if skipping is None: - after.setdefault(pos, []).append(' !!! missing #if\n') + after.setdefault(pos, []).append(" !!! missing #if\n") skipping = None after.setdefault(pos, []).append(l) elif skipping: after.setdefault(pos, []).append(l) - elif l.startswith(b' >>> '): # python inlines + elif l.startswith(b" >>> "): # python inlines after.setdefault(pos, []).append(l) prepos = pos pos = n if not inpython: # We've just entered a Python block. Add the header. inpython = True - addsalt(prepos, False) # Make sure we report the exit code. - script.append(b'%s -m heredoctest < '): # continuations + elif l.startswith(b" > "): # continuations after.setdefault(prepos, []).append(l) script.append(l[4:]) - elif l.startswith(b' '): # results + elif l.startswith(b" "): # results # Queue up a list of expected results. expected.setdefault(pos, []).append(l[2:]) else: if inpython: - script.append(b'EOF\n') + script.append(b"EOF\n") inpython = False # Non-command/result. Queue up for merged output. after.setdefault(pos, []).append(l) if inpython: - script.append(b'EOF\n') + script.append(b"EOF\n") if skipping is not None: - after.setdefault(pos, []).append(' !!! missing #endif\n') + after.setdefault(pos, []).append(" !!! missing #endif\n") addsalt(n + 1, False) - return salt, script, after, expected + return salt, saltcount[0], script, after, expected def _processoutput(self, exitcode, output, salt, after, expected): # Merge the script output back into a unified test. - warnonly = 1 # 1: not yet; 2: yes; 3: for sure not + warnonly = 1 # 1: not yet; 2: yes; 3: for sure not if exitcode != 0: warnonly = 3 @@ -1286,8 +1922,8 @@ class TTest(Test): lout, lcmd = l.split(salt, 1) while lout: - if not lout.endswith(b'\n'): - lout += b' (no-eol)\n' + if not lout.endswith(b"\n"): + lout += b" (no-eol)\n" # Find the expected output at the current position. els = [None] @@ -1299,20 +1935,17 @@ class TTest(Test): while i < len(els): el = els[i] - r = TTest.linematch(el, lout) + r = self.linematch(el, lout) if isinstance(r, str): - if r == '+glob': - lout = el[:-1] + ' (glob)\n' - r = '' # Warn only this line. - elif r == '-glob': - lout = ''.join(el.rsplit(' (glob)', 1)) - r = '' # Warn only this line. + if r == "-glob": + lout = "".join(el.rsplit(" (glob)", 1)) + r = "" # Warn only this line. elif r == "retry": - postout.append(b' ' + el) + postout.append(b" " + el) els.pop(i) break else: - log('\ninfo, unknown linematch result: %r\n' % r) + log("\ninfo, unknown linematch result: %r\n" % r) r = False if r: els.pop(i) @@ -1323,11 +1956,9 @@ class TTest(Test): else: m = optline.match(el) if m: - conditions = [c for c in m.group(2).split(' ')] + conditions = [c for c in m.group(2).split(b" ")] - if self._hghave(conditions)[0]: - lout = el - else: + if not self._iftest(conditions): optional.append(i) i += 1 @@ -1337,35 +1968,41 @@ class TTest(Test): continue # clean up any optional leftovers for i in optional: - postout.append(b' ' + els[i]) + postout.append(b" " + els[i]) for i in reversed(optional): del els[i] - postout.append(b' ' + el) + postout.append(b" " + el) else: if self.NEEDESCAPE(lout): - lout = TTest._stringescape(b'%s (esc)\n' % - lout.rstrip(b'\n')) - postout.append(b' ' + lout) # Let diff deal with it. - if r != '': # If line failed. - warnonly = 3 # for sure not - elif warnonly == 1: # Is "not yet" and line is warn only. - warnonly = 2 # Yes do warn. + lout = TTest._stringescape(b"%s (esc)\n" % lout.rstrip(b"\n")) + postout.append(b" " + lout) # Let diff deal with it. + if r != "": # If line failed. + warnonly = 3 # for sure not + elif warnonly == 1: # Is "not yet" and line is warn only. + warnonly = 2 # Yes do warn. break else: # clean up any optional leftovers while expected.get(pos, None): el = expected[pos].pop(0) if el: - if (not optline.match(el) - and not el.endswith(b" (?)\n")): - break - postout.append(b' ' + el) + if not el.endswith(b" (?)\n"): + m = optline.match(el) + if m: + conditions = [c for c in m.group(2).split(b" ")] + + if self._iftest(conditions): + # Don't append as optional line + continue + else: + continue + postout.append(b" " + el) if lcmd: # Add on last return code. ret = int(lcmd.split()[1]) if ret != 0: - postout.append(b' [%d]\n' % ret) + postout.append(b" [%d]\n" % ret) if pos in after: # Merge in non-active test bits. postout += after.pop(pos) @@ -1375,17 +2012,18 @@ class TTest(Test): postout += after.pop(pos) if warnonly == 2: - exitcode = False # Set exitcode to warned. + exitcode = False # Set exitcode to warned. return exitcode, postout @staticmethod def rematch(el, l): try: + el = b"(?:" + el + b")" # use \Z to ensure that the regex matches to the end of the string - if os.name == 'nt': - return re.match(el + br'\r?\n\Z', l) - return re.match(el + br'\n\Z', l) + if os.name == "nt": + return re.match(el + br"\r?\n\Z", l) + return re.match(el + br"\n\Z", l) except re.error: # el is an invalid regex return False @@ -1394,37 +2032,40 @@ class TTest(Test): def globmatch(el, l): # The only supported special characters are * and ? plus / which also # matches \ on windows. Escaping of these characters is supported. - if el + b'\n' == l: + if el + b"\n" == l: if os.altsep: # matching on "/" is not needed for this line for pat in checkcodeglobpats: if pat.match(el): return True - return b'-glob' + return b"-glob" return True - el = el.replace(b'$LOCALIP', b'*') + el = el.replace(b"$LOCALIP", b"*") + # $HGPORT might be changed in test. Do a fuzzy match. + el = el.replace(b"$HGPORT1", b"*") + el = el.replace(b"$HGPORT2", b"*") + el = el.replace(b"$HGPORT", b"*") i, n = 0, len(el) - res = b'' + res = b"" while i < n: - c = el[i:i + 1] + c = el[i : i + 1] i += 1 - if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/': - res += el[i - 1:i + 1] + if c == b"\\" and i < n and el[i : i + 1] in b"*?\\/": + res += el[i - 1 : i + 1] i += 1 - elif c == b'*': - res += b'.*' - elif c == b'?': - res += b'.' - elif c == b'/' and os.altsep: - res += b'[/\\\\]' + elif c == b"*": + res += b".*" + elif c == b"?": + res += b"." + elif c == b"/" and os.altsep: + res += b"[/\\\\]" else: res += re.escape(c) return TTest.rematch(res, l) - @staticmethod - def linematch(el, l): + def linematch(self, el, l): retry = False - if el == l: # perfect match (fast) + if el == l: # perfect match (fast) return True if el: if el.endswith(b" (?)\n"): @@ -1433,16 +2074,19 @@ class TTest(Test): else: m = optline.match(el) if m: + conditions = [c for c in m.group(2).split(b" ")] + el = m.group(1) + b"\n" - retry = "retry" + if not self._iftest(conditions): + retry = "retry" # Not required by listed features if el.endswith(b" (esc)\n"): if PYTHON3: - el = el[:-7].decode('unicode_escape') + '\n' - el = el.encode('utf-8') + el = el[:-7].decode("unicode_escape") + "\n" + el = el.encode("utf-8") else: - el = el[:-7].decode('string-escape') + '\n' - if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l: + el = el[:-7].decode("string-escape") + "\n" + if el == l or os.name == "nt" and el[:-1] + b"\r\n" == l: return True if el.endswith(b" (re)\n"): return TTest.rematch(el[:-6], l) or retry @@ -1451,26 +2095,28 @@ class TTest(Test): if l.endswith(b" (glob)\n"): l = l[:-8] + b"\n" return TTest.globmatch(el[:-8], l) or retry - if os.altsep and l.replace(b'\\', b'/') == el: - return b'+glob' + if os.altsep: + _l = l.replace(b"\\", b"/") + if el == _l or os.name == "nt" and el[:-1] + b"\r\n" == _l: + return True return retry @staticmethod def parsehghaveoutput(lines): - '''Parse hghave log lines. + """Parse hghave log lines. Return tuple of lists (missing, failed): * the missing/unknown features - * the features for which existence check failed''' + * the features for which existence check failed""" missing = [] failed = [] for line in lines: if line.startswith(TTest.SKIPPED_PREFIX): line = line.splitlines()[0] - missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8')) + missing.append(line[len(TTest.SKIPPED_PREFIX) :].decode("utf-8")) elif line.startswith(TTest.FAILED_PREFIX): line = line.splitlines()[0] - failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8')) + failed.append(line[len(TTest.FAILED_PREFIX) :].decode("utf-8")) return missing, failed @@ -1482,22 +2128,120 @@ class TTest(Test): def _stringescape(s): return TTest.ESCAPESUB(TTest._escapef, s) -iolock = threading.RLock() -class SkipTest(Exception): - """Raised to indicate that a test is to be skipped.""" +firstlock = RLock() +firsterror = False -class IgnoreTest(Exception): - """Raised to indicate that a test is to be ignored.""" +_iolock = RLock() -class WarnTest(Exception): - """Raised to indicate that a test warned.""" -class ReportedTest(Exception): - """Raised to indicate that a test already reported.""" +class Progress(object): + def __init__(self): + self.lines = [] + self.out = sys.stderr + + def clear(self): + self.update([]) + + def update(self, lines): + content = "" + toclear = len(self.lines) - len(lines) + moveup = len(self.lines) - 1 + if toclear > 0: + content += "\r\033[K\033[1A" * toclear + moveup -= toclear + if moveup > 0: + content += "\033[%dA" % moveup + content += "\n".join("\r\033[K%s" % line.rstrip() for line in lines) + self._write(content) + self.lines = lines + + def setup(self): + # Disable line wrapping + self._write("\x1b[?7l") + + def finalize(self): + # Re-enable line wrapping + self._write("\x1b[?7h") + + def _write(self, content): + with _iolock: + self.out.write(content) + self.out.flush() + + +progress = Progress() +showprogress = sys.stderr.isatty() + +if os.name == "nt": + import ctypes + + _HANDLE = ctypes.c_void_p + _DWORD = ctypes.c_ulong + _INVALID_HANDLE_VALUE = _HANDLE(-1).value + _STD_ERROR_HANDLE = _DWORD(-12).value + + _LPVOID = ctypes.c_void_p + _BOOL = ctypes.c_long + _UINT = ctypes.c_uint + _HANDLE = ctypes.c_void_p + + _INVALID_HANDLE_VALUE = _HANDLE(-1).value + + ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4 + + PROCESS_SET_QUOTA = 0x0100 + PROCESS_TERMINATE = 0x0001 + + _kernel32 = ctypes.WinDLL("kernel32", use_last_error=True) + + _kernel32.CreateJobObjectA.argtypes = [_LPVOID, _LPVOID] + _kernel32.CreateJobObjectA.restype = _HANDLE + + _kernel32.OpenProcess.argtypes = [_DWORD, _BOOL, _DWORD] + _kernel32.OpenProcess.restype = _HANDLE + + _kernel32.AssignProcessToJobObject.argtypes = [_HANDLE, _HANDLE] + _kernel32.AssignProcessToJobObject.restype = _BOOL + + _kernel32.TerminateJobObject.argtypes = [_HANDLE, _UINT] + _kernel32.TerminateJobObject.restype = _BOOL + + _kernel32.CloseHandle.argtypes = [_HANDLE] + _kernel32.CloseHandle.restype = _BOOL + + +if showprogress and os.name == "nt": + # From mercurial/color.py. + # Enable virtual terminal mode for the associated console. + + handle = _kernel32.GetStdHandle(_STD_ERROR_HANDLE) # don't close the handle + if handle == _INVALID_HANDLE_VALUE: + showprogress = False + else: + mode = _DWORD(0) + if _kernel32.GetConsoleMode(handle, ctypes.byref(mode)): + if (mode.value & ENABLE_VIRTUAL_TERMINAL_PROCESSING) == 0: + mode.value |= ENABLE_VIRTUAL_TERMINAL_PROCESSING + if not _kernel32.SetConsoleMode(handle, mode): + showprogress = False + + +class IOLockWithProgress(object): + def __enter__(self): + _iolock.acquire() + progress.clear() + + def __exit__(self, exc_type, exc_value, traceback): + _iolock.release() + + +iolock = IOLockWithProgress() + class TestResult(unittest._TextTestResult): """Holds results when executing via unittest.""" + # Don't worry too much about accessing the non-public _TextTestResult. # It is relatively common in Python testing tools. def __init__(self, options, *args, **kwargs): @@ -1514,81 +2258,88 @@ class TestResult(unittest._TextTestResult): # sense to map it into skip some day. self.ignored = [] - # We have a custom "warned" result that isn't present in any Python - # unittest implementation. It is very similar to failed. It may make - # sense to map it into fail some day. - self.warned = [] - self.times = [] self._firststarttime = None # Data stored for the benefit of generating xunit reports. self.successes = [] self.faildata = {} + if options.color == "auto": + self.color = pygmentspresent and self.stream.isatty() + elif options.color == "never": + self.color = False + else: # 'always', for testing purposes + self.color = pygmentspresent + def addFailure(self, test, reason): self.failures.append((test, reason)) if self._options.first: self.stop() else: - with iolock: - if reason == "timed out": - self.stream.write('t') - else: - if not self._options.nodiff: - self.stream.write('\nERROR: %s output changed\n' % test) - self.stream.write('!') + if reason == "timed out": + if not showprogress: + with iolock: + self.stream.write("t") + else: + if not self._options.nodiff: + with iolock: + self.stream.write("\n") + # Exclude the '\n' from highlighting to lex correctly + formatted = "ERROR: %s output changed\n" % test + self.stream.write(highlightmsg(formatted, self.color)) + if not showprogress: + with iolock: + self.stream.write("!") - self.stream.flush() + self.stream.flush() def addSuccess(self, test): - with iolock: - super(TestResult, self).addSuccess(test) + if showprogress and not self.showAll: + super(unittest._TextTestResult, self).addSuccess(test) + else: + with iolock: + super(TestResult, self).addSuccess(test) self.successes.append(test) def addError(self, test, err): - super(TestResult, self).addError(test, err) + if showprogress and not self.showAll: + super(unittest._TextTestResult, self).addError(test, err) + else: + with iolock: + super(TestResult, self).addError(test, err) if self._options.first: self.stop() # Polyfill. def addSkip(self, test, reason): self.skipped.append((test, reason)) - with iolock: - if self.showAll: - self.stream.writeln('skipped %s' % reason) - else: - self.stream.write('s') - self.stream.flush() + if self.showAll: + with iolock: + self.stream.writeln("skipped %s" % reason) + else: + if not showprogress: + with iolock: + self.stream.write("s") + self.stream.flush() def addIgnore(self, test, reason): self.ignored.append((test, reason)) - with iolock: - if self.showAll: - self.stream.writeln('ignored %s' % reason) + if self.showAll: + with iolock: + self.stream.writeln("ignored %s" % reason) + else: + if reason not in ("not retesting", "doesn't match keyword"): + if not showprogress: + with iolock: + self.stream.write("i") else: - if reason not in ('not retesting', "doesn't match keyword"): - self.stream.write('i') - else: - self.testsRun += 1 - self.stream.flush() - - def addWarn(self, test, reason): - self.warned.append((test, reason)) - - if self._options.first: - self.stop() - - with iolock: - if self.showAll: - self.stream.writeln('warned %s' % reason) - else: - self.stream.write('~') - self.stream.flush() + self.testsRun += 1 + self.stream.flush() def addOutputMismatch(self, test, ret, got, expected): """Record a mismatch in test output for a particular test.""" - if self.shouldStop: + if self.shouldStop or firsterror: # don't print, some other test case already failed and # printed, we're just stale and probably failed due to our # temp dir getting cleaned up. @@ -1604,19 +2355,23 @@ class TestResult(unittest._TextTestResult): v = self._options.view if PYTHON3: v = _bytespath(v) - os.system(b"%s %s %s" % - (v, test.refpath, test.errpath)) + os.system(b"%s %s %s" % (v, test.refpath, test.errpath)) else: - servefail, lines = getdiff(expected, got, - test.refpath, test.errpath) + servefail, lines = getdiff(expected, got, test.refpath, test.errpath) if servefail: - self.addFailure( - test, - 'server failed to start (HGPORT=%s)' % test._startport) - raise ReportedTest('server failed to start') + raise test.failureException( + "server failed to start (HGPORT=%s)" % test._startport + ) else: - self.stream.write('\n') + self.stream.write("\n") + if len(lines) > self._options.maxdifflines: + omitted = len(lines) - self._options.maxdifflines + lines = lines[: self._options.maxdifflines] + [ + "... (%d lines omitted. set --maxdifflines to see more) ..." + % omitted + ] for line in lines: + line = highlightdiff(line, self.color) if PYTHON3: self.stream.flush() self.stream.buffer.write(line) @@ -1627,17 +2382,21 @@ class TestResult(unittest._TextTestResult): # handle interactive prompt without releasing iolock if self._options.interactive: - self.stream.write('Accept this change? [n] ') - self.stream.flush() - answer = sys.stdin.readline().strip() - if answer.lower() in ('y', 'yes'): - if test.path.endswith(b'.t'): - rename(test.errpath, test.path) - else: - rename(test.errpath, '%s.out' % test.path) - accepted = True + if test.readrefout() != expected: + self.stream.write( + "Reference output has changed (run again to prompt changes)" + ) + else: + self.stream.write("Accept this change? [n] ") + answer = sys.stdin.readline().strip() + if answer.lower() in ("y", "yes"): + if test.path.endswith(b".t"): + rename(test.errpath, test.path) + else: + rename(test.errpath, "%s.out" % test.path) + accepted = True if not accepted: - self.faildata[test.name] = b''.join(lines) + self.faildata[test.name] = b"".join(lines) return accepted @@ -1649,7 +2408,7 @@ class TestResult(unittest._TextTestResult): # This module has one limitation. It can only work for Linux user # and not for Windows. test.started = os.times() - if self._firststarttime is None: # thread racy but irrelevant + if self._firststarttime is None: # thread racy but irrelevant self._firststarttime = test.started[4] def stopTest(self, test, interrupted=False): @@ -1660,26 +2419,43 @@ class TestResult(unittest._TextTestResult): starttime = test.started endtime = test.stopped origin = self._firststarttime - self.times.append((test.name, - endtime[2] - starttime[2], # user space CPU time - endtime[3] - starttime[3], # sys space CPU time - endtime[4] - starttime[4], # real time - starttime[4] - origin, # start date in run context - endtime[4] - origin, # end date in run context - )) + self.times.append( + ( + test.name, + endtime[2] - starttime[2], # user space CPU time + endtime[3] - starttime[3], # sys space CPU time + endtime[4] - starttime[4], # real time + starttime[4] - origin, # start date in run context + endtime[4] - origin, # end date in run context + ) + ) if interrupted: with iolock: - self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % ( - test.name, self.times[-1][3])) + self.stream.writeln( + "INTERRUPTED: %s (after %d seconds)" + % (test.name, self.times[-1][3]) + ) + class TestSuite(unittest.TestSuite): """Custom unittest TestSuite that knows how to execute Mercurial tests.""" - def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None, - retest=False, keywords=None, loop=False, runs_per_test=1, - loadtest=None, showchannels=False, - *args, **kwargs): + def __init__( + self, + testdir, + jobs=1, + whitelist=None, + blacklist=None, + retest=False, + keywords=None, + loop=False, + runs_per_test=1, + loadtest=None, + showchannels=False, + *args, + **kwargs + ): """Create a new instance that can run tests with a configuration. testdir specifies the directory where tests are executed from. This @@ -1726,28 +2502,29 @@ class TestSuite(unittest.TestSuite): tests = [] num_tests = [0] for test in self._tests: + def get(): num_tests[0] += 1 - if getattr(test, 'should_reload', False): + if getattr(test, "should_reload", False): return self._loadtest(test, num_tests[0]) return test + if not os.path.exists(test.path): result.addSkip(test, "Doesn't exist") continue - if not (self._whitelist and test.name in self._whitelist): + if not (self._whitelist and test.bname in self._whitelist): if self._blacklist and test.bname in self._blacklist: - result.addSkip(test, 'blacklisted') + result.addSkip(test, "blacklisted") continue if self._retest and not os.path.exists(test.errpath): - result.addIgnore(test, 'not retesting') + result.addIgnore(test, "not retesting") continue if self._keywords: - f = open(test.path, 'rb') - t = f.read().lower() + test.bname.lower() - f.close() + with open(test.path, "rb") as f: + t = f.read().lower() + test.bname.lower() ignored = False for k in self._keywords.lower().split(): if k not in t: @@ -1765,6 +2542,7 @@ class TestSuite(unittest.TestSuite): running = 0 channels = [""] * self._jobs + runningtests = collections.OrderedDict() # {test name: (test, start time)} def job(test, result): for n, v in enumerate(channels): @@ -1772,46 +2550,103 @@ class TestSuite(unittest.TestSuite): channel = n break else: - raise ValueError('Could not find output channel') + raise ValueError("Could not find output channel") + runningtests[test.name] = (test, time.time()) channels[channel] = "=" + test.name[5:].split(".")[0] try: test(result) done.put(None) except KeyboardInterrupt: pass - except: # re-raises - done.put(('!', test, 'run-test raised an error, see traceback')) + except: # re-raises + done.put(("!", test, "run-test raised an error, see traceback")) raise finally: + del runningtests[test.name] try: - channels[channel] = '' + channels[channel] = "" except IndexError: pass def stat(): count = 0 while channels: - d = '\n%03s ' % count + d = "\n%03s " % count for n, v in enumerate(channels): if v: d += v[0] - channels[n] = v[1:] or '.' + channels[n] = v[1:] or "." else: - d += ' ' - d += ' ' + d += " " + d += " " with iolock: - sys.stdout.write(d + ' ') + sys.stdout.write(d + " ") sys.stdout.flush() for x in xrange(10): if channels: - time.sleep(.1) + time.sleep(0.1) count += 1 + def singleprogressbar(value, total, char="="): + if total: + if value > total: + value = total + progresschars = char * int(value * 20 / total) + if progresschars and len(progresschars) < 20: + progresschars += ">" + return "[%-20s]" % progresschars + else: + return " " * 22 + + blacklisted = len(result.skipped) + initialtestsrun = result.testsRun + + def progressrenderer(): + lines = [] + suitestart = time.time() + total = len(runtests) + while channels: + failed = len(result.failures) + len(result.errors) + skipped = len(result.skipped) - blacklisted + testsrun = result.testsRun - initialtestsrun + remaining = total - testsrun - skipped + passed = testsrun - failed - len(runningtests) + now = time.time() + timepassed = now - suitestart + lines = [] + runningfrac = 0.0 + for name, (test, teststart) in runningtests.iteritems(): + try: + saltseen, saltcount = getattr(test, "progress") + runningfrac += saltseen * 1.0 / saltcount + testprogress = singleprogressbar(saltseen, saltcount, char="-") + except Exception: + testprogress = singleprogressbar(0, 0) + lines.append( + "%s %-52s %.1fs" % (testprogress, name[:52], now - teststart) + ) + lines[0:0] = [ + "%s %-52s %.1fs" + % ( + singleprogressbar( + runningfrac + failed + passed + skipped, total + ), + "%s Passed. %s Failed. %s Skipped. %s Remaining" + % (passed, failed, skipped, remaining), + timepassed, + ) + ] + progress.update(lines) + time.sleep(0.1) + stoppedearly = False if self._showchannels: statthread = threading.Thread(target=stat, name="stat") statthread.start() + elif showprogress: + progressthread = threading.Thread(target=progressrenderer, name="progress") + progressthread.start() try: while tests or running: @@ -1827,17 +2662,17 @@ class TestSuite(unittest.TestSuite): if tests and not running == self._jobs: test = tests.pop(0) if self._loop: - if getattr(test, 'should_reload', False): + if getattr(test, "should_reload", False): num_tests[0] += 1 - tests.append( - self._loadtest(test, num_tests[0])) + tests.append(self._loadtest(test, num_tests[0])) else: tests.append(test) if self._jobs == 1: job(test, result) else: - t = threading.Thread(target=job, name=test.name, - args=(test, result)) + t = threading.Thread( + target=job, name=test.name, args=(test, result) + ) t.start() running += 1 @@ -1860,15 +2695,17 @@ class TestSuite(unittest.TestSuite): return result + # Save the most recent 5 wall-clock runtimes of each test to a # human-readable text file named .testtimes. Tests are sorted # alphabetically, while times for each test are listed from oldest to # newest. + def loadtimes(outputdir): times = [] try: - with open(os.path.join(outputdir, b'.testtimes-')) as fp: + with open(os.path.join(outputdir, b".testtimes-")) as fp: for line in fp: ts = line.split() times.append((ts[0], [float(t) for t in ts[1:]])) @@ -1877,6 +2714,7 @@ def loadtimes(outputdir): raise return times + def savetimes(outputdir, result): saved = dict(loadtimes(outputdir)) maxruns = 5 @@ -1888,12 +2726,11 @@ def savetimes(outputdir, result): ts.append(real) ts[:] = ts[-maxruns:] - fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes', - dir=outputdir, text=True) - with os.fdopen(fd, 'w') as fp: + fd, tmpname = tempfile.mkstemp(prefix=b".testtimes", dir=outputdir, text=True) + with os.fdopen(fd, "w") as fp: for name, ts in sorted(saved.items()): - fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts]))) - timepath = os.path.join(outputdir, b'.testtimes') + fp.write("%s %s\n" % (name, " ".join(["%.3f" % (t,) for t in ts]))) + timepath = os.path.join(outputdir, b".testtimes") try: os.unlink(timepath) except OSError: @@ -1903,6 +2740,7 @@ def savetimes(outputdir, result): except OSError: pass + class TextTestRunner(unittest.TextTestRunner): """Custom unittest test runner that uses appropriate settings.""" @@ -1912,8 +2750,7 @@ class TextTestRunner(unittest.TextTestRunner): self._runner = runner def listtests(self, test): - result = TestResult(self._runner.options, self.stream, - self.descriptions, 0) + result = TestResult(self._runner.options, self.stream, self.descriptions, 0) test = sorted(test, key=lambda t: t.name) for t in test: print(t.name) @@ -1924,101 +2761,135 @@ class TextTestRunner(unittest.TextTestRunner): self._writexunit(result, xuf) if self._runner.options.json: - jsonpath = os.path.join(self._runner._outputdir, b'report.json') - with open(jsonpath, 'w') as fp: + jsonpath = os.path.join(self._runner._outputdir, b"report.json") + with open(jsonpath, "w") as fp: self._writejson(result, fp) return result def run(self, test): - result = TestResult(self._runner.options, self.stream, - self.descriptions, self.verbosity) + result = TestResult( + self._runner.options, self.stream, self.descriptions, self.verbosity + ) test(result) failed = len(result.failures) - warned = len(result.warned) skipped = len(result.skipped) ignored = len(result.ignored) with iolock: - self.stream.writeln('') + self.stream.writeln("") if not self._runner.options.noskips: for test, msg in result.skipped: - self.stream.writeln('Skipped %s: %s' % (test.name, msg)) - for test, msg in result.warned: - self.stream.writeln('Warned %s: %s' % (test.name, msg)) + formatted = "Skipped %s: %s\n" % (test.name, msg) + self.stream.write(highlightmsg(formatted, result.color)) for test, msg in result.failures: - self.stream.writeln('Failed %s: %s' % (test.name, msg)) + formatted = "Failed %s: %s\n" % (test.name, msg) + self.stream.write(highlightmsg(formatted, result.color)) for test, msg in result.errors: - self.stream.writeln('Errored %s: %s' % (test.name, msg)) + self.stream.writeln("Errored %s: %s" % (test.name, msg)) if self._runner.options.xunit: with open(self._runner.options.xunit, "wb") as xuf: self._writexunit(result, xuf) if self._runner.options.json: - jsonpath = os.path.join(self._runner._outputdir, b'report.json') - with open(jsonpath, 'w') as fp: + jsonpath = os.path.join(self._runner._outputdir, b"report.json") + with open(jsonpath, "w") as fp: self._writejson(result, fp) - self._runner._checkhglib('Tested') + self._runner._checkhglib("Tested") savetimes(self._runner._outputdir, result) if failed and self._runner.options.known_good_rev: - def nooutput(args): - p = subprocess.Popen(args, stderr=subprocess.STDOUT, - stdout=subprocess.PIPE) - p.stdout.read() - p.wait() - for test, msg in result.failures: - nooutput(['hg', 'bisect', '--reset']), - nooutput(['hg', 'bisect', '--bad', '.']) - nooutput(['hg', 'bisect', '--good', - self._runner.options.known_good_rev]) - # TODO: we probably need to forward some options - # that alter hg's behavior inside the tests. - rtc = '%s %s %s' % (sys.executable, sys.argv[0], test) - sub = subprocess.Popen(['hg', 'bisect', '--command', rtc], - stderr=subprocess.STDOUT, - stdout=subprocess.PIPE) - data = sub.stdout.read() - sub.wait() - m = re.search( - (r'\nThe first (?Pbad|good) revision ' - r'is:\nchangeset: +\d+:(?P[a-f0-9]+)\n.*\n' - r'summary: +(?P[^\n]+)\n'), - data, (re.MULTILINE | re.DOTALL)) - if m is None: - self.stream.writeln( - 'Failed to identify failure point for %s' % test) - continue - dat = m.groupdict() - verb = 'broken' if dat['goodbad'] == 'bad' else 'fixed' - self.stream.writeln( - '%s %s by %s (%s)' % ( - test, verb, dat['node'], dat['summary'])) + self._bisecttests(t for t, m in result.failures) self.stream.writeln( - '# Ran %d tests, %d skipped, %d warned, %d failed.' - % (result.testsRun, - skipped + ignored, warned, failed)) + "# Ran %d tests, %d skipped, %d failed." + % (result.testsRun, skipped + ignored, failed) + ) + result.testsSkipped = skipped + ignored if failed: - self.stream.writeln('python hash seed: %s' % - os.environ['PYTHONHASHSEED']) + self.stream.writeln( + "python hash seed: %s" % os.environ["PYTHONHASHSEED"] + ) if self._runner.options.time: self.printtimes(result.times) + if self._runner.options.exceptions: + exceptions = aggregateexceptions( + os.path.join(self._runner._outputdir, b"exceptions") + ) + total = sum(exceptions.values()) + + self.stream.writeln("Exceptions Report:") + self.stream.writeln( + "%d total from %d frames" % (total, len(exceptions)) + ) + for (frame, line, exc), count in exceptions.most_common(): + self.stream.writeln("%d\t%s: %s" % (count, frame, exc)) + + self.stream.flush() + return result + def _bisecttests(self, tests): + bisectcmd = ["hg", "bisect"] + bisectrepo = self._runner.options.bisect_repo + if bisectrepo: + bisectcmd.extend(["-R", os.path.abspath(bisectrepo)]) + + def pread(args): + env = os.environ.copy() + env["HGPLAIN"] = "1" + p = subprocess.Popen( + args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, env=env + ) + data = p.stdout.read() + p.wait() + return data + + for test in tests: + pread(bisectcmd + ["--reset"]), + pread(bisectcmd + ["--bad", "."]) + pread(bisectcmd + ["--good", self._runner.options.known_good_rev]) + # TODO: we probably need to forward more options + # that alter hg's behavior inside the tests. + opts = "" + withhg = self._runner.options.with_hg + if withhg: + opts += " --with-hg=%s " % shellquote(_strpath(withhg)) + rtc = "%s %s %s %s" % (sys.executable, sys.argv[0], opts, test) + data = pread(bisectcmd + ["--command", rtc]) + m = re.search( + ( + br"\nThe first (?Pbad|good) revision " + br"is:\nchangeset: +\d+:(?P[a-f0-9]+)\n.*\n" + br"summary: +(?P[^\n]+)\n" + ), + data, + (re.MULTILINE | re.DOTALL), + ) + if m is None: + self.stream.writeln("Failed to identify failure point for %s" % test) + continue + dat = m.groupdict() + verb = "broken" if dat["goodbad"] == "bad" else "fixed" + self.stream.writeln( + "%s %s by %s (%s)" % (test, verb, dat["node"], dat["summary"]) + ) + def printtimes(self, times): # iolock held by run - self.stream.writeln('# Producing time report') + self.stream.writeln("# Producing time report") times.sort(key=lambda t: (t[3])) - cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s' - self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' % - ('start', 'end', 'cuser', 'csys', 'real', 'Test')) + cols = "%7.3f %7.3f %7.3f %7.3f %7.3f %s" + self.stream.writeln( + "%-7s %-7s %-7s %-7s %-7s %s" + % ("start", "end", "cuser", "csys", "real", "Test") + ) for tdata in times: test = tdata[0] cuser, csys, real, start, end = tdata[1:6] @@ -2029,52 +2900,52 @@ class TextTestRunner(unittest.TextTestRunner): # See http://llg.cubic.org/docs/junit/ for a reference. timesd = dict((t[0], t[3]) for t in result.times) doc = minidom.Document() - s = doc.createElement('testsuite') - s.setAttribute('name', 'run-tests') - s.setAttribute('tests', str(result.testsRun)) - s.setAttribute('errors', "0") # TODO - s.setAttribute('failures', str(len(result.failures))) - s.setAttribute('skipped', str(len(result.skipped) + - len(result.ignored))) + s = doc.createElement("testsuite") + s.setAttribute("name", "run-tests") + s.setAttribute("tests", str(result.testsRun)) + s.setAttribute("errors", "0") # TODO + s.setAttribute("failures", str(len(result.failures))) + s.setAttribute("skipped", str(len(result.skipped) + len(result.ignored))) doc.appendChild(s) for tc in result.successes: - t = doc.createElement('testcase') - t.setAttribute('name', tc.name) + t = doc.createElement("testcase") + t.setAttribute("name", tc.name) tctime = timesd.get(tc.name) if tctime is not None: - t.setAttribute('time', '%.3f' % tctime) + t.setAttribute("time", "%.3f" % tctime) s.appendChild(t) for tc, err in sorted(result.faildata.items()): - t = doc.createElement('testcase') - t.setAttribute('name', tc) + t = doc.createElement("testcase") + t.setAttribute("name", tc) tctime = timesd.get(tc) if tctime is not None: - t.setAttribute('time', '%.3f' % tctime) + t.setAttribute("time", "%.3f" % tctime) # createCDATASection expects a unicode or it will # convert using default conversion rules, which will # fail if string isn't ASCII. - err = cdatasafe(err).decode('utf-8', 'replace') + err = cdatasafe(err).decode("utf-8", "replace") cd = doc.createCDATASection(err) # Use 'failure' here instead of 'error' to match errors = 0, # failures = len(result.failures) in the testsuite element. - failelem = doc.createElement('failure') - failelem.setAttribute('message', 'output changed') - failelem.setAttribute('type', 'output-mismatch') + failelem = doc.createElement("failure") + failelem.setAttribute("message", "output changed") + failelem.setAttribute("type", "output-mismatch") failelem.appendChild(cd) t.appendChild(failelem) s.appendChild(t) for tc, message in result.skipped: # According to the schema, 'skipped' has no attributes. So store # the skip message as a text node instead. - t = doc.createElement('testcase') - t.setAttribute('name', tc.name) - message = cdatasafe(message).decode('utf-8', 'replace') + t = doc.createElement("testcase") + t.setAttribute("name", tc.name) + binmessage = message.encode("utf-8") + message = cdatasafe(binmessage).decode("utf-8", "replace") cd = doc.createCDATASection(message) - skipelem = doc.createElement('skipped') + skipelem = doc.createElement("skipped") skipelem.appendChild(cd) t.appendChild(skipelem) s.appendChild(t) - outf.write(doc.toprettyxml(indent=' ', encoding='utf-8')) + outf.write(doc.toprettyxml(indent=" ", encoding="utf-8")) @staticmethod def _writejson(result, outf): @@ -2084,30 +2955,37 @@ class TextTestRunner(unittest.TextTestRunner): timesd[test] = tdata[1:] outcome = {} - groups = [('success', ((tc, None) - for tc in result.successes)), - ('failure', result.failures), - ('skip', result.skipped)] + groups = [ + ("success", ((tc, None) for tc in result.successes)), + ("failure", result.failures), + ("skip", result.skipped), + ] for res, testcases in groups: for tc, __ in testcases: if tc.name in timesd: - diff = result.faildata.get(tc.name, b'') - tres = {'result': res, - 'time': ('%0.3f' % timesd[tc.name][2]), - 'cuser': ('%0.3f' % timesd[tc.name][0]), - 'csys': ('%0.3f' % timesd[tc.name][1]), - 'start': ('%0.3f' % timesd[tc.name][3]), - 'end': ('%0.3f' % timesd[tc.name][4]), - 'diff': diff.decode('unicode_escape'), - } + diff = result.faildata.get(tc.name, b"") + try: + diff = diff.decode("unicode_escape") + except UnicodeDecodeError as e: + diff = "%r decoding diff, sorry" % e + tres = { + "result": res, + "time": ("%0.3f" % timesd[tc.name][2]), + "cuser": ("%0.3f" % timesd[tc.name][0]), + "csys": ("%0.3f" % timesd[tc.name][1]), + "start": ("%0.3f" % timesd[tc.name][3]), + "end": ("%0.3f" % timesd[tc.name][4]), + "diff": diff, + } else: # blacklisted test - tres = {'result': res} + tres = {"result": res} outcome[tc.name] = tres - jsonout = json.dumps(outcome, sort_keys=True, indent=4, - separators=(',', ': ')) - outf.writelines(("testreport =", jsonout)) + outf.write( + json.dumps(outcome, sort_keys=True, indent=4, separators=(",", ": ")) + ) + class TestRunner(object): """Holds context for executing tests. @@ -2117,20 +2995,18 @@ class TestRunner(object): # Programs required to run tests. REQUIREDTOOLS = [ - os.path.basename(_bytespath(sys.executable)), - b'diff', - b'grep', - b'unzip', - b'gunzip', - b'bunzip2', - b'sed', + b"diff", + b"grep", + b"unzip", + b"gunzip", + b"bunzip2", + b"sed", + b"cmp", + b"dd", ] # Maps file extensions to test class. - TESTTYPES = [ - (b'.py', PythonTest), - (b'.t', TTest), - ] + TESTTYPES = [(b".py", PythonTest), (b".t", TTest)] def __init__(self): self.options = None @@ -2153,17 +3029,22 @@ class TestRunner(object): """Run the test suite.""" oldmask = os.umask(0o22) try: + if showprogress: + progress.setup() parser = parser or getparser() - options, args = parseargs(args, parser) - # positional arguments are paths to test files to run, so - # we make sure they're all bytestrings - args = [_bytespath(a) for a in args] + options = parseargs(args, parser) + tests = [_bytespath(a) for a in options.tests] + if options.test_list is not None: + for listfile in options.test_list: + with open(listfile, "rb") as f: + tests.extend(t for t in f.read().splitlines() if t) self.options = options self._checktools() - testdescs = self.findtests(args) + testdescs = self.findtests(tests) if options.profile_runner: import statprof + statprof.start() result = self._run(testdescs) if options.profile_runner: @@ -2172,6 +3053,8 @@ class TestRunner(object): return result finally: + if showprogress: + progress.finalize() os.umask(oldmask) def _run(self, testdescs): @@ -2179,22 +3062,23 @@ class TestRunner(object): random.shuffle(testdescs) else: # keywords for slow tests - slow = {b'svn': 10, - b'cvs': 10, - b'hghave': 10, - b'largefiles-update': 10, - b'run-tests': 10, - b'corruption': 10, - b'race': 10, - b'i18n': 10, - b'check': 100, - b'gendoc': 100, - b'contrib-perf': 200, - } + slow = { + b"svn": 10, + b"cvs": 10, + b"hghave": 10, + b"run-tests": 10, + b"corruption": 10, + b"race": 10, + b"i18n": 10, + b"check": 100, + b"gendoc": 100, + b"contrib-perf": 200, + } perf = {} + def sortkey(f): # run largest tests first, as they tend to take the longest - f = f['path'] + f = f["path"] try: return perf[f] except KeyError: @@ -2203,28 +3087,35 @@ class TestRunner(object): except OSError as e: if e.errno != errno.ENOENT: raise - perf[f] = -1e9 # file does not exist, tell early + perf[f] = -1e9 # file does not exist, tell early return -1e9 for kw, mul in slow.items(): if kw in f: val *= mul - if f.endswith(b'.py'): + if f.endswith(b".py"): val /= 10.0 perf[f] = val / 1000.0 return perf[f] + testdescs.sort(key=sortkey) - self._testdir = osenvironb[b'TESTDIR'] = getattr( - os, 'getcwdb', os.getcwd)() + self._testdir = osenvironb[b"TESTDIR"] = getattr(os, "getcwdb", os.getcwd)() + # assume all tests in same folder for now + if testdescs: + pathname = os.path.dirname(testdescs[0]["path"]) + if pathname: + osenvironb[b"TESTDIR"] = os.path.join(osenvironb[b"TESTDIR"], pathname) if self.options.outputdir: self._outputdir = canonpath(_bytespath(self.options.outputdir)) else: self._outputdir = self._testdir + if testdescs and pathname: + self._outputdir = os.path.join(self._outputdir, pathname) - if 'PYTHONHASHSEED' not in os.environ: + if "PYTHONHASHSEED" not in os.environ: # use a random python hash seed all the time # we do the randomness ourself to know what seed is used - os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32)) + os.environ["PYTHONHASHSEED"] = str(random.getrandbits(32)) if self.options.tmpdir: self.options.keep_tmpdir = True @@ -2239,19 +3130,18 @@ class TestRunner(object): # Automatically removing tmpdir sounds convenient, but could # really annoy anyone in the habit of using "--tmpdir=/tmp" # or "--tmpdir=$HOME". - #vlog("# Removing temp dir", tmpdir) - #shutil.rmtree(tmpdir) + # vlog("# Removing temp dir", tmpdir) + # shutil.rmtree(tmpdir) os.makedirs(tmpdir) else: d = None - if os.name == 'nt': + if os.name == "nt": # without this, we get the default temp dir location, but # in all lowercase, which causes troubles with paths (issue3490) - d = osenvironb.get(b'TMP', None) - tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d) + d = osenvironb.get(b"TMP", None) + tmpdir = tempfile.mkdtemp(b"", b"hgtests.", d) - self._hgtmp = osenvironb[b'HGTMP'] = ( - os.path.realpath(tmpdir)) + self._hgtmp = osenvironb[b"HGTMP"] = os.path.realpath(tmpdir) if self.options.with_hg: self._installdir = None @@ -2260,7 +3150,7 @@ class TestRunner(object): assert isinstance(self._bindir, bytes) # use full path, since _hgcommand will also be used as ui.remotecmd self._hgcommand = os.path.realpath(whg) - self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin') + self._tmpbindir = os.path.join(self._hgtmp, b"install", b"bin") os.makedirs(self._tmpbindir) # This looks redundant with how Python initializes sys.path from @@ -2272,31 +3162,39 @@ class TestRunner(object): else: self._installdir = os.path.join(self._hgtmp, b"install") self._bindir = os.path.join(self._installdir, b"bin") - self._hgcommand = os.path.join(self._bindir, b'hg') + self._hgcommand = os.path.join(self._bindir, b"hg") self._tmpbindir = self._bindir self._pythondir = os.path.join(self._installdir, b"lib", b"python") # set CHGHG, then replace "hg" command by "chg" chgbindir = self._bindir if self.options.chg or self.options.with_chg: - osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand) + osenvironb[b"CHGHG"] = os.path.join(self._bindir, self._hgcommand) else: - osenvironb.pop(b'CHGHG', None) # drop flag for hghave + osenvironb.pop(b"CHGHG", None) # drop flag for hghave if self.options.chg: - self._hgcommand = b'chg' + self._hgcommand = b"chg" elif self.options.with_chg: chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg)) self._hgcommand = os.path.basename(self.options.with_chg) + if self.options.with_watchman or self.options.watchman: + self._watchman = self.options.with_watchman or "watchman" + osenvironb[b"HGFSMONITOR_TESTS"] = b"1" + else: + osenvironb[b"BINDIR"] = self._bindir + self._watchman = None + if b"HGFSMONITOR_TESTS" in osenvironb: + del osenvironb[b"HGFSMONITOR_TESTS"] osenvironb[b"BINDIR"] = self._bindir osenvironb[b"PYTHON"] = PYTHON if self.options.with_python3: - osenvironb[b'PYTHON3'] = self.options.with_python3 + osenvironb[b"PYTHON3"] = self.options.with_python3 fileb = _bytespath(__file__) runtestdir = os.path.abspath(os.path.dirname(fileb)) - osenvironb[b'RUNTESTDIR'] = runtestdir + osenvironb[b"RUNTESTDIR"] = runtestdir if PYTHON3: sepb = _bytespath(os.pathsep) else: @@ -2319,6 +3217,7 @@ class TestRunner(object): # can run .../tests/run-tests.py test-foo where test-foo # adds an extension to HGRC. Also include run-test.py directory to # import modules like heredoctest. + # self._pythondir should make "import mercurial" do the right thing. pypath = [self._pythondir, self._testdir, runtestdir] # We have to augment PYTHONPATH, rather than simply replacing # it, in case external libraries are only available via current @@ -2335,22 +3234,42 @@ class TestRunner(object): if self.options.allow_slow_tests: os.environ["HGTEST_SLOW"] = "slow" - elif 'HGTEST_SLOW' in os.environ: - del os.environ['HGTEST_SLOW'] + elif "HGTEST_SLOW" in os.environ: + del os.environ["HGTEST_SLOW"] - self._coveragefile = os.path.join(self._testdir, b'.coverage') + self._coveragefile = os.path.join(self._testdir, b".coverage") + + if self.options.exceptions: + exceptionsdir = os.path.join(self._outputdir, b"exceptions") + try: + os.makedirs(exceptionsdir) + except OSError as e: + if e.errno != errno.EEXIST: + raise + + # Remove all existing exception reports. + for f in os.listdir(exceptionsdir): + os.unlink(os.path.join(exceptionsdir, f)) + + osenvironb[b"HGEXCEPTIONSDIR"] = exceptionsdir + logexceptions = os.path.join(self._testdir, b"logexceptions.py") + self.options.extra_config_opt.append( + "extensions.logexceptions=%s" % logexceptions.decode("utf-8") + ) vlog("# Using TESTDIR", self._testdir) - vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR']) + vlog("# Using RUNTESTDIR", osenvironb[b"RUNTESTDIR"]) vlog("# Using HGTMP", self._hgtmp) vlog("# Using PATH", os.environ["PATH"]) vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH]) + if self._watchman: + vlog("# Using watchman", self._watchman) vlog("# Writing to directory", self._outputdir) try: return self._runtests(testdescs) or 0 finally: - time.sleep(.1) + time.sleep(0.1) self._cleanup() def findtests(self, args): @@ -2361,48 +3280,64 @@ class TestRunner(object): """ if not args: if self.options.changed: - proc = Popen4('hg st --rev "%s" -man0 .' % - self.options.changed, None, 0) + proc = Popen4( + 'hg st --rev "%s" -man0 .' % self.options.changed, None, 0 + ) stdout, stderr = proc.communicate() - args = stdout.strip(b'\0').split(b'\0') + args = stdout.strip(b"\0").split(b"\0") else: - args = os.listdir(b'.') + args = os.listdir(b".") + + expanded_args = [] + for arg in args: + if os.path.isdir(arg): + if not arg.endswith(b"/"): + arg += b"/" + expanded_args.extend([arg + a for a in os.listdir(arg)]) + else: + expanded_args.append(arg) + args = expanded_args tests = [] for t in args: - if not (os.path.basename(t).startswith(b'test-') - and (t.endswith(b'.py') or t.endswith(b'.t'))): + if not ( + os.path.basename(t).startswith(b"test-") + and (t.endswith(b".py") or t.endswith(b".t")) + ): continue - if t.endswith(b'.t'): + if t.endswith(b".t"): # .t file may contain multiple test cases cases = sorted(parsettestcases(t)) if cases: - tests += [{'path': t, 'case': c} for c in sorted(cases)] + tests += [{"path": t, "case": c} for c in sorted(cases)] else: - tests.append({'path': t}) + tests.append({"path": t}) else: - tests.append({'path': t}) + tests.append({"path": t}) return tests def _runtests(self, testdescs): def _reloadtest(test, i): # convert a test back to its description dict - desc = {'path': test.path} - case = getattr(test, '_case', None) + desc = {"path": test.path} + case = getattr(test, "_case", None) if case: - desc['case'] = case + desc["case"] = case return self._gettest(desc, i) + failed = False + allskipped = False + errored = False try: if self.options.restart: orig = list(testdescs) while testdescs: desc = testdescs[0] # desc['path'] is a relative path - if 'case' in desc: - errpath = b'%s.%s.err' % (desc['path'], desc['case']) + if "case" in desc: + errpath = b"%s.%s.err" % (desc["path"], desc["case"]) else: - errpath = b'%s.err' % desc['path'] + errpath = b"%s.err" % desc["path"] errpath = os.path.join(self._outputdir, errpath) if os.path.exists(errpath): break @@ -2413,22 +3348,24 @@ class TestRunner(object): tests = [self._gettest(d, i) for i, d in enumerate(testdescs)] - failed = False - warned = False kws = self.options.keywords if kws is not None and PYTHON3: - kws = kws.encode('utf-8') + kws = kws.encode("utf-8") - suite = TestSuite(self._testdir, - jobs=self.options.jobs, - whitelist=self.options.whitelisted, - blacklist=self.options.blacklist, - retest=self.options.retest, - keywords=kws, - loop=self.options.loop, - runs_per_test=self.options.runs_per_test, - showchannels=self.options.showchannels, - tests=tests, loadtest=_reloadtest) + vlog("# Running TestSuite with %d jobs" % self.options.jobs) + suite = TestSuite( + self._testdir, + jobs=self.options.jobs, + whitelist=self.options.whitelisted, + blacklist=self.options.blacklist, + retest=self.options.retest, + keywords=kws, + loop=self.options.loop, + runs_per_test=self.options.runs_per_test, + showchannels=self.options.showchannels, + tests=tests, + loadtest=_reloadtest, + ) verbosity = 1 if self.options.verbose: verbosity = 2 @@ -2449,11 +3386,13 @@ class TestRunner(object): self._usecorrecthg() result = runner.run(suite) + if tests and result.testsSkipped == len(tests): + allskipped = True + if tests and result.errors: + errored = True if result.failures: failed = True - if result.warned: - warned = True if self.options.anycoverage: self._outputcoverage() @@ -2463,11 +3402,13 @@ class TestRunner(object): if failed: return 1 - if warned: - return 80 + elif allskipped: + return Test.SKIPPED_STATUS + elif errored: + return 2 def _getport(self, count): - port = self._ports.get(count) # do we have a cached entry? + port = self._ports.get(count) # do we have a cached entry? if port is None: portneeded = 3 # above 100 tries we just give up and let test reports failure @@ -2490,7 +3431,7 @@ class TestRunner(object): Returns a Test instance. The Test may not be runnable if it doesn't map to a known type. """ - path = testdesc['path'] + path = testdesc["path"] lctest = path.lower() testcls = Test @@ -2500,22 +3441,30 @@ class TestRunner(object): break refpath = os.path.join(self._testdir, path) - tmpdir = os.path.join(self._hgtmp, b'child%d' % count) + tmpdir = os.path.join(self._hgtmp, b"child%d" % count) # extra keyword parameters. 'case' is used by .t tests - kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc) + kwds = dict((k, testdesc[k]) for k in ["case"] if k in testdesc) - t = testcls(refpath, self._outputdir, tmpdir, - keeptmpdir=self.options.keep_tmpdir, - debug=self.options.debug, - timeout=self.options.timeout, - startport=self._getport(count), - extraconfigopts=self.options.extra_config_opt, - py3kwarnings=self.options.py3k_warnings, - shell=self.options.shell, - hgcommand=self._hgcommand, - usechg=bool(self.options.with_chg or self.options.chg), - useipv6=useipv6, **kwds) + t = testcls( + refpath, + self._outputdir, + tmpdir, + keeptmpdir=self.options.keep_tmpdir, + debug=self.options.debug, + first=self.options.first, + timeout=self.options.timeout, + startport=self._getport(count), + extraconfigopts=self.options.extra_config_opt, + extrarcpaths=self.options.extra_rcpath, + py3kwarnings=self.options.py3k_warnings, + shell=self.options.shell, + hgcommand=self._hgcommand, + usechg=bool(self.options.with_chg or self.options.chg), + useipv6=useipv6, + watchman=self._watchman, + **kwds + ) t.should_reload = True return t @@ -2535,10 +3484,12 @@ class TestRunner(object): def _usecorrectpython(self): """Configure the environment to use the appropriate Python in tests.""" # Tests must use the same interpreter as us or bad things will happen. - pyexename = sys.platform == 'win32' and b'python.exe' or b'python' - if getattr(os, 'symlink', None): - vlog("# Making python executable in test path a symlink to '%s'" % - sys.executable) + pyexename = sys.platform == "win32" and b"python.exe" or b"python" + if getattr(os, "symlink", None): + vlog( + "# Making python executable in test path a symlink to '%s'" + % sys.executable + ) mypython = os.path.join(self._tmpbindir, pyexename) try: if os.readlink(mypython) == sys.executable: @@ -2557,23 +3508,34 @@ class TestRunner(object): raise else: exedir, exename = os.path.split(sys.executable) - vlog("# Modifying search path to find %s as %s in '%s'" % - (exename, pyexename, exedir)) - path = os.environ['PATH'].split(os.pathsep) + vlog( + "# Modifying search path to find %s as %s in '%s'" + % (exename, pyexename, exedir) + ) + path = os.environ["PATH"].split(os.pathsep) while exedir in path: path.remove(exedir) - os.environ['PATH'] = os.pathsep.join([exedir] + path) + os.environ["PATH"] = os.pathsep.join([exedir] + path) if not self._findprogram(pyexename): print("WARNING: Cannot find %s in search path" % pyexename) def _usecorrecthg(self): """Configure the environment to use the appropriate hg in tests.""" - if os.path.basename(self._hgcommand) in ('hg', 'hg.exe'): + if os.path.basename(self._hgcommand) in ("hg", "hg.exe"): # No correction is needed return - if getattr(os, 'symlink', None): - tmphgpath = os.path.join(self._tmpbindir, b'hg') + if getattr(os, "symlink", None): + tmphgpath = os.path.join(self._tmpbindir, "hg") vlog("# Symlink %s to %s" % (self._hgcommand, tmphgpath)) + entrypointpath = os.path.join( + os.path.dirname(os.path.realpath(self._hgcommand)), + "mercurial", + "entrypoint.py", + ) + if os.path.exists(entrypointpath): + vlog("# HGPYENTRYPOINT=%s" % entrypointpath) + os.environ["HGPYENTRYPOINT"] = entrypointpath + try: os.symlink(self._hgcommand, tmphgpath) self._createdfiles.append(tmphgpath) @@ -2582,8 +3544,7 @@ class TestRunner(object): if err.errno != errno.EEXIST: raise else: - raise SystemExit('%s could not be put in search path' - % self._hgcommand) + raise SystemExit("%s could not be put in search path" % self._hgcommand) def _installhg(self): """Install hg into the test environment. @@ -2592,9 +3553,9 @@ class TestRunner(object): """ vlog("# Performing temporary installation of HG") installerrs = os.path.join(self._hgtmp, b"install.err") - compiler = '' + compiler = "" if self.options.compiler: - compiler = '--compiler ' + self.options.compiler + compiler = "--compiler " + self.options.compiler if self.options.pure: pure = b"--pure" else: @@ -2611,23 +3572,30 @@ class TestRunner(object): self._hgroot = hgroot os.chdir(hgroot) nohome = b'--home=""' - if os.name == 'nt': + if os.name == "nt": # The --home="" trick works only on OS where os.sep == '/' # because of a distutils convert_path() fast-path. Avoid it at # least on Windows for now, deal with .pydistutils.cfg bugs # when they happen. - nohome = b'' - cmd = (b'%(exe)s setup.py %(pure)s clean --all' - b' build %(compiler)s --build-base="%(base)s"' - b' install --force --prefix="%(prefix)s"' - b' --install-lib="%(libdir)s"' - b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1' - % {b'exe': exe, b'pure': pure, - b'compiler': compiler, - b'base': os.path.join(self._hgtmp, b"build"), - b'prefix': self._installdir, b'libdir': self._pythondir, - b'bindir': self._bindir, - b'nohome': nohome, b'logfile': installerrs}) + nohome = b"" + cmd = ( + b"%(exe)s setup.py %(pure)s clean --all" + b' build %(compiler)s --build-base="%(base)s"' + b' install --force --prefix="%(prefix)s"' + b' --install-lib="%(libdir)s"' + b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1' + % { + b"exe": exe, + b"pure": pure, + b"compiler": compiler, + b"base": os.path.join(self._hgtmp, b"build"), + b"prefix": self._installdir, + b"libdir": self._pythondir, + b"bindir": self._bindir, + b"nohome": nohome, + b"logfile": installerrs, + } + ) # setuptools requires install directories to exist. def makedirs(p): @@ -2636,6 +3604,7 @@ class TestRunner(object): except OSError as e: if e.errno != errno.EEXIST: raise + makedirs(self._pythondir) makedirs(self._bindir) @@ -2648,13 +3617,12 @@ class TestRunner(object): if e.errno != errno.ENOENT: raise else: - f = open(installerrs, 'rb') - for line in f: - if PYTHON3: - sys.stdout.buffer.write(line) - else: - sys.stdout.write(line) - f.close() + with open(installerrs, "rb") as f: + for line in f: + if PYTHON3: + sys.stdout.buffer.write(line) + else: + sys.stdout.write(line) sys.exit(1) os.chdir(self._testdir) @@ -2662,62 +3630,60 @@ class TestRunner(object): if self.options.py3k_warnings and not self.options.anycoverage: vlog("# Updating hg command to enable Py3k Warnings switch") - f = open(os.path.join(self._bindir, 'hg'), 'rb') - lines = [line.rstrip() for line in f] - lines[0] += ' -3' - f.close() - f = open(os.path.join(self._bindir, 'hg'), 'wb') - for line in lines: - f.write(line + '\n') - f.close() + with open(os.path.join(self._bindir, "hg"), "rb") as f: + lines = [line.rstrip() for line in f] + lines[0] += " -3" + with open(os.path.join(self._bindir, "hg"), "wb") as f: + for line in lines: + f.write(line + "\n") - hgbat = os.path.join(self._bindir, b'hg.bat') + hgbat = os.path.join(self._bindir, b"hg.bat") if os.path.isfile(hgbat): # hg.bat expects to be put in bin/scripts while run-tests.py # installation layout put it in bin/ directly. Fix it - f = open(hgbat, 'rb') - data = f.read() - f.close() + with open(hgbat, "rb") as f: + data = f.read() if b'"%~dp0..\\python" "%~dp0hg" %*' in data: - data = data.replace(b'"%~dp0..\\python" "%~dp0hg" %*', - b'"%~dp0python" "%~dp0hg" %*') - f = open(hgbat, 'wb') - f.write(data) - f.close() + data = data.replace( + b'"%~dp0..\\python" "%~dp0hg" %*', b'"%~dp0python" "%~dp0hg" %*' + ) + with open(hgbat, "wb") as f: + f.write(data) else: - print('WARNING: cannot fix hg.bat reference to python.exe') + print("WARNING: cannot fix hg.bat reference to python.exe") if self.options.anycoverage: - custom = os.path.join(self._testdir, 'sitecustomize.py') - target = os.path.join(self._pythondir, 'sitecustomize.py') - vlog('# Installing coverage trigger to %s' % target) + custom = os.path.join(self._testdir, "sitecustomize.py") + target = os.path.join(self._pythondir, "sitecustomize.py") + vlog("# Installing coverage trigger to %s" % target) shutil.copyfile(custom, target) - rc = os.path.join(self._testdir, '.coveragerc') - vlog('# Installing coverage rc to %s' % rc) - os.environ['COVERAGE_PROCESS_START'] = rc - covdir = os.path.join(self._installdir, '..', 'coverage') + rc = os.path.join(self._testdir, ".coveragerc") + vlog("# Installing coverage rc to %s" % rc) + os.environ["COVERAGE_PROCESS_START"] = rc + covdir = os.path.join(self._installdir, "..", "coverage") try: os.mkdir(covdir) except OSError as e: if e.errno != errno.EEXIST: raise - os.environ['COVERAGE_DIR'] = covdir + os.environ["COVERAGE_DIR"] = covdir def _checkhglib(self, verb): """Ensure that the 'mercurial' package imported by python is the one we expect it to be. If not, print a warning to stderr.""" - if ((self._bindir == self._pythondir) and - (self._bindir != self._tmpbindir)): + if (self._bindir == self._pythondir) and (self._bindir != self._tmpbindir): # The pythondir has been inferred from --with-hg flag. # We cannot expect anything sensible here. return - expecthg = os.path.join(self._pythondir, b'mercurial') + expecthg = os.path.join(self._pythondir, b"mercurial") actualhg = self._gethgpath() if os.path.abspath(actualhg) != os.path.abspath(expecthg): - sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n' - ' (expected %s)\n' - % (verb, actualhg, expecthg)) + sys.stderr.write( + "warning: %s with unexpected mercurial lib: %s\n" + " (expected %s)\n" % (verb, actualhg, expecthg) + ) + def _gethgpath(self): """Return the path to the mercurial package that is actually found by the current Python interpreter.""" @@ -2738,17 +3704,23 @@ class TestRunner(object): def _installchg(self): """Install chg into the test environment""" - vlog('# Performing temporary installation of CHG') + vlog("# Performing temporary installation of CHG") assert os.path.dirname(self._bindir) == self._installdir - assert self._hgroot, 'must be called after _installhg()' - cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"' - % {b'make': 'make', # TODO: switch by option or environment? - b'prefix': self._installdir}) - cwd = os.path.join(self._hgroot, b'contrib', b'chg') + assert self._hgroot, "must be called after _installhg()" + cmd = b'"%(make)s" clean install PREFIX="%(prefix)s"' % { + b"make": "make", # TODO: switch by option or environment? + b"prefix": self._installdir, + } + cwd = os.path.join(self._hgroot, b"contrib", b"chg") vlog("# Running", cmd) - proc = subprocess.Popen(cmd, shell=True, cwd=cwd, - stdin=subprocess.PIPE, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) + proc = subprocess.Popen( + cmd, + shell=True, + cwd=cwd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) out, _err = proc.communicate() if proc.returncode != 0: if PYTHON3: @@ -2760,28 +3732,29 @@ class TestRunner(object): def _outputcoverage(self): """Produce code coverage output.""" import coverage + coverage = coverage.coverage - vlog('# Producing coverage report') + vlog("# Producing coverage report") # chdir is the easiest way to get short, relative paths in the # output. os.chdir(self._hgroot) - covdir = os.path.join(self._installdir, '..', 'coverage') - cov = coverage(data_file=os.path.join(covdir, 'cov')) + covdir = os.path.join(self._installdir, "..", "coverage") + cov = coverage(data_file=os.path.join(covdir, "cov")) # Map install directory paths back to source directory. - cov.config.paths['srcdir'] = ['.', self._pythondir] + cov.config.paths["srcdir"] = [".", self._pythondir] cov.combine() - omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]] + omit = [os.path.join(x, "*") for x in [self._bindir, self._testdir]] cov.report(ignore_errors=True, omit=omit) if self.options.htmlcov: - htmldir = os.path.join(self._outputdir, 'htmlcov') + htmldir = os.path.join(self._outputdir, "htmlcov") cov.html_report(directory=htmldir, omit=omit) if self.options.annotate: - adir = os.path.join(self._outputdir, 'annotated') + adir = os.path.join(self._outputdir, "annotated") if not os.path.isdir(adir): os.mkdir(adir) cov.annotate(directory=adir, omit=omit) @@ -2790,29 +3763,76 @@ class TestRunner(object): """Search PATH for a executable program""" dpb = _bytespath(os.defpath) sepb = _bytespath(os.pathsep) - for p in osenvironb.get(b'PATH', dpb).split(sepb): + for p in osenvironb.get(b"PATH", dpb).split(sepb): name = os.path.join(p, program) - if os.name == 'nt' or os.access(name, os.X_OK): + if os.name == "nt" or os.access(name, os.X_OK): return name return None def _checktools(self): """Ensure tools required to run tests are present.""" for p in self.REQUIREDTOOLS: - if os.name == 'nt' and not p.endswith('.exe'): - p += '.exe' + if os.name == "nt" and not p.endswith(".exe"): + p += ".exe" found = self._findprogram(p) if found: vlog("# Found prerequisite", p, "at", found) else: - print("WARNING: Did not find prerequisite tool: %s " % - p.decode("utf-8")) + print( + "WARNING: Did not find prerequisite tool: %s " % p.decode("utf-8") + ) -if __name__ == '__main__': + +def aggregateexceptions(path): + exceptions = collections.Counter() + + for f in os.listdir(path): + with open(os.path.join(path, f), "rb") as fh: + data = fh.read().split(b"\0") + if len(data) != 4: + continue + + exc, mainframe, hgframe, hgline = data + exc = exc.decode("utf-8") + mainframe = mainframe.decode("utf-8") + hgframe = hgframe.decode("utf-8") + hgline = hgline.decode("utf-8") + exceptions[(hgframe, hgline, exc)] += 1 + + return exceptions + + +def ensureenv(): + """Load build/env's environment variables. + + If build/env has specified a different set of environment variables, + restart the current command. Otherwise do nothing. + """ + hgdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + envpath = os.path.join(hgdir, "build", "env") + if not os.path.exists(envpath): + return + with open(envpath, "r") as f: + env = dict(l.split("=", 1) for l in f.read().splitlines() if "=" in l) + if all(os.environ.get(k) == v for k, v in env.items()): + # No restart needed + return + # Restart with new environment + newenv = os.environ.copy() + newenv.update(env) + # Pick the right Python interpreter + python = env.get("PYTHON_SYS_EXECUTABLE", sys.executable) + p = subprocess.Popen([python] + sys.argv, env=newenv) + sys.exit(p.wait()) + + +if __name__ == "__main__": + ensureenv() runner = TestRunner() try: import msvcrt + msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY) msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY) diff --git a/tests/integration/third_party/tinit.sh b/tests/integration/third_party/tinit.sh new file mode 100644 index 0000000000..8ac25b628f --- /dev/null +++ b/tests/integration/third_party/tinit.sh @@ -0,0 +1,91 @@ +# This file will be sourced by all .t tests. Put general purposed functions +# here. + +_repocount=0 + +# Create a new repo +newrepo() { + reponame="$1" + if [ -z "$reponame" ]; then + _repocount=$((_repocount+1)) + reponame=repo$_repocount + fi + mkdir "$TESTTMP/$reponame" + cd "$TESTTMP/$reponame" + hg init +} + +switchrepo() { + reponame="$1" + cd $TESTTMP/$reponame +} + +# Enable extensions or features +enable() { + local rcpath + # .hg/hgrc may not exist yet, so just check for requires + if [ -f .hg/requires ]; then + rcpath=.hg/hgrc + else + rcpath="$HGRCPATH" + fi + for name in "$@"; do + if [ "$name" = obsstore ]; then + cat >> $rcpath << EOF +[experimental] +evolution = createmarkers, allowunstable +EOF + else + cat >> $rcpath << EOF +[extensions] +$name= +EOF + fi + done +} + +# Like "hg debugdrawdag", but do not leave local tags in the repo and define +# nodes as environment variables. +# This is useful if the test wants to hide those commits because tags would +# make commits visible. The function will set environment variables so +# commits can still be referred as $TAGNAME. +drawdag() { + hg debugdrawdag "$@" + eval `hg tags -T '{tag}={node}\n'` + rm -f .hg/localtags +} + +# Simplify error reporting so crash does not show a traceback. +# This is useful to match error messages without the traceback. +shorttraceback() { + enable errorredirect + setconfig errorredirect.script='printf "%s" "$TRACE" | tail -1 1>&2' +} + +# Set config items like --config way, instead of using cat >> $HGRCPATH +setconfig() { + python "$RUNTESTDIR/setconfig.py" "$@" +} + +# Create a new extension +newext() { + extname="$1" + if [ -z "$extname" ]; then + _extcount=$((_extcount+1)) + extname=ext$_extcount + fi + cat > "$TESTTMP/$extname.py" + setconfig "extensions.$extname=$TESTTMP/$extname.py" +} + +showgraph() { + hg log --graph -T "{rev} {node|short} {desc|firstline}" | sed \$d +} + +tglog() { + hg log -G -T "{rev}: {node|short} '{desc}' {bookmarks} {branches}" "$@" +} + +tglogp() { + hg log -G -T "{rev}: {node|short} {phase} '{desc}' {bookmarks} {branches}" "$@" +}