mirror of
https://github.com/sharkdp/hyperfine.git
synced 2024-11-22 11:43:03 +03:00
Update formatting, use f-strings
This commit is contained in:
parent
b0d29f01b0
commit
65dd84fb07
@ -22,18 +22,15 @@ for command, ts in zip(commands, times):
|
|||||||
|
|
||||||
iqr = p75 - p25
|
iqr = p75 - p25
|
||||||
|
|
||||||
print("Command '{}'".format(command))
|
print(f"Command '{command}'")
|
||||||
print(" runs: {:8d}".format(len(ts)))
|
print(f" runs: {len(ts):8d}")
|
||||||
print(" mean: {:8.3f} s".format(np.mean(ts)))
|
print(f" mean: {np.mean(ts):8.3f} s")
|
||||||
print(" stddev: {:8.3f} s".format(np.std(ts, ddof=1)))
|
print(f" stddev: {np.std(ts, ddof=1):8.3f} s")
|
||||||
print(" median: {:8.3f} s".format(np.median(ts)))
|
print(f" median: {np.median(ts):8.3f} s")
|
||||||
print(" min: {:8.3f} s".format(np.min(ts)))
|
print(f" min: {np.min(ts):8.3f} s")
|
||||||
print(" max: {:8.3f} s".format(np.max(ts)))
|
print(f" max: {np.max(ts):8.3f} s")
|
||||||
print()
|
print()
|
||||||
print(" percentiles:")
|
print(" percentiles:")
|
||||||
print(" P_05 .. P_95: {:.3f} s .. {:.3f} s".format(p05, p95))
|
print(f" P_05 .. P_95: {p05:.3f} s .. {p95:.3f} s")
|
||||||
print(
|
print(f" P_25 .. P_75: {p25:.3f} s .. {p75:.3f} s " f"(IQR = {iqr:.3f} s)")
|
||||||
" P_25 .. P_75: {:.3f} s .. {:.3f} s "
|
|
||||||
"(IQR = {:.3f} s)".format(p25, p75, iqr)
|
|
||||||
)
|
|
||||||
print()
|
print()
|
||||||
|
@ -6,7 +6,6 @@ import argparse
|
|||||||
import json
|
import json
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
import matplotlib.font_manager
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description=__doc__)
|
parser = argparse.ArgumentParser(description=__doc__)
|
||||||
parser.add_argument("file", help="JSON file with benchmark results")
|
parser.add_argument("file", help="JSON file with benchmark results")
|
||||||
@ -16,10 +15,24 @@ parser.add_argument(
|
|||||||
)
|
)
|
||||||
parser.add_argument("--bins", help="Number of bins (default: auto)")
|
parser.add_argument("--bins", help="Number of bins (default: auto)")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--legend-location", help="Location of the legend on plot (default: upper center)",
|
"--legend-location",
|
||||||
choices=["upper center", "lower center", "right", "left", "best", "upper left", "upper right", "lower left", "lower right", "center left", "center right", "center"],
|
help="Location of the legend on plot (default: upper center)",
|
||||||
default="upper center"
|
choices=[
|
||||||
)
|
"upper center",
|
||||||
|
"lower center",
|
||||||
|
"right",
|
||||||
|
"left",
|
||||||
|
"best",
|
||||||
|
"upper left",
|
||||||
|
"upper right",
|
||||||
|
"lower left",
|
||||||
|
"lower right",
|
||||||
|
"center left",
|
||||||
|
"center right",
|
||||||
|
"center",
|
||||||
|
],
|
||||||
|
default="upper center",
|
||||||
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--type", help="Type of histogram (*bar*, barstacked, step, stepfilled)"
|
"--type", help="Type of histogram (*bar*, barstacked, step, stepfilled)"
|
||||||
)
|
)
|
||||||
@ -62,11 +75,11 @@ plt.hist(
|
|||||||
range=(t_min, t_max),
|
range=(t_min, t_max),
|
||||||
)
|
)
|
||||||
plt.legend(
|
plt.legend(
|
||||||
loc=args.legend_location,
|
loc=args.legend_location,
|
||||||
fancybox=True,
|
fancybox=True,
|
||||||
shadow=True,
|
shadow=True,
|
||||||
prop={"size": 7, "family": ["Source Code Pro", "Fira Mono", "Courier New"]}
|
prop={"size": 10, "family": ["Source Code Pro", "Fira Mono", "Courier New"]},
|
||||||
)
|
)
|
||||||
|
|
||||||
plt.xlabel("Time [s]")
|
plt.xlabel("Time [s]")
|
||||||
if args.title:
|
if args.title:
|
||||||
|
@ -25,9 +25,7 @@ parser.add_argument(
|
|||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--titles", help="Comma-separated list of titles for the plot legend"
|
"--titles", help="Comma-separated list of titles for the plot legend"
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument("-o", "--output", help="Save image to the given filename.")
|
||||||
"-o", "--output", help="Save image to the given filename."
|
|
||||||
)
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
if args.parameter_name is not None:
|
if args.parameter_name is not None:
|
||||||
@ -38,7 +36,7 @@ if args.parameter_name is not None:
|
|||||||
|
|
||||||
|
|
||||||
def die(msg):
|
def die(msg):
|
||||||
sys.stderr.write("fatal: %s\n" % (msg,))
|
sys.stderr.write(f"fatal: {msg}\n")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
@ -50,8 +48,7 @@ def extract_parameters(results):
|
|||||||
names = frozenset(names)
|
names = frozenset(names)
|
||||||
if len(names) != 1:
|
if len(names) != 1:
|
||||||
die(
|
die(
|
||||||
"benchmarks must all have the same parameter name, but found: %s"
|
f"benchmarks must all have the same parameter name, but found: {sorted(names)}"
|
||||||
% sorted(names)
|
|
||||||
)
|
)
|
||||||
return (next(iter(names)), list(values))
|
return (next(iter(names)), list(values))
|
||||||
|
|
||||||
@ -63,8 +60,7 @@ def unique_parameter(benchmark):
|
|||||||
die("benchmarks must have exactly one parameter, but found none")
|
die("benchmarks must have exactly one parameter, but found none")
|
||||||
if len(params_dict) > 1:
|
if len(params_dict) > 1:
|
||||||
die(
|
die(
|
||||||
"benchmarks must have exactly one parameter, but found multiple: %s"
|
f"benchmarks must have exactly one parameter, but found multiple: {sorted(params_dict)}"
|
||||||
% sorted(params_dict)
|
|
||||||
)
|
)
|
||||||
[(name, value)] = params_dict.items()
|
[(name, value)] = params_dict.items()
|
||||||
return (name, float(value))
|
return (name, float(value))
|
||||||
@ -79,8 +75,7 @@ for filename in args.file:
|
|||||||
(this_parameter_name, parameter_values) = extract_parameters(results)
|
(this_parameter_name, parameter_values) = extract_parameters(results)
|
||||||
if parameter_name is not None and this_parameter_name != parameter_name:
|
if parameter_name is not None and this_parameter_name != parameter_name:
|
||||||
die(
|
die(
|
||||||
"files must all have the same parameter name, but found %r vs. %r"
|
f"files must all have the same parameter name, but found {parameter_name!r} vs. {this_parameter_name!r}"
|
||||||
% (parameter_name, this_parameter_name)
|
|
||||||
)
|
)
|
||||||
parameter_name = this_parameter_name
|
parameter_name = this_parameter_name
|
||||||
|
|
||||||
|
@ -15,17 +15,15 @@ import matplotlib.pyplot as plt
|
|||||||
parser = argparse.ArgumentParser(description=__doc__)
|
parser = argparse.ArgumentParser(description=__doc__)
|
||||||
parser.add_argument("file", help="JSON file with benchmark results")
|
parser.add_argument("file", help="JSON file with benchmark results")
|
||||||
parser.add_argument("--title", help="Plot Title")
|
parser.add_argument("--title", help="Plot Title")
|
||||||
parser.add_argument("--sort-by", choices=['median'], help="Sort method")
|
parser.add_argument("--sort-by", choices=["median"], help="Sort method")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--labels", help="Comma-separated list of entries for the plot legend"
|
"--labels", help="Comma-separated list of entries for the plot legend"
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument("-o", "--output", help="Save image to the given filename.")
|
||||||
"-o", "--output", help="Save image to the given filename."
|
|
||||||
)
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
with open(args.file, encoding='utf-8') as f:
|
with open(args.file, encoding="utf-8") as f:
|
||||||
results = json.load(f)["results"]
|
results = json.load(f)["results"]
|
||||||
|
|
||||||
if args.labels:
|
if args.labels:
|
||||||
@ -34,7 +32,7 @@ else:
|
|||||||
labels = [b["command"] for b in results]
|
labels = [b["command"] for b in results]
|
||||||
times = [b["times"] for b in results]
|
times = [b["times"] for b in results]
|
||||||
|
|
||||||
if args.sort_by == 'median':
|
if args.sort_by == "median":
|
||||||
medians = [b["median"] for b in results]
|
medians = [b["median"] for b in results]
|
||||||
indices = sorted(range(len(labels)), key=lambda k: medians[k])
|
indices = sorted(range(len(labels)), key=lambda k: medians[k])
|
||||||
labels = [labels[i] for i in indices]
|
labels = [labels[i] for i in indices]
|
||||||
@ -53,7 +51,7 @@ if args.title:
|
|||||||
plt.legend(handles=boxplot["boxes"], labels=labels, loc="best", fontsize="medium")
|
plt.legend(handles=boxplot["boxes"], labels=labels, loc="best", fontsize="medium")
|
||||||
plt.ylabel("Time [s]")
|
plt.ylabel("Time [s]")
|
||||||
plt.ylim(0, None)
|
plt.ylim(0, None)
|
||||||
plt.xticks(list(range(1, len(labels)+1)), labels, rotation=45)
|
plt.xticks(list(range(1, len(labels) + 1)), labels, rotation=45)
|
||||||
if args.output:
|
if args.output:
|
||||||
plt.savefig(args.output)
|
plt.savefig(args.output)
|
||||||
else:
|
else:
|
||||||
|
@ -20,19 +20,19 @@ if len(results) != 2:
|
|||||||
print("The input file has to contain exactly two benchmarks")
|
print("The input file has to contain exactly two benchmarks")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
a, b = [x["command"] for x in results[:2]]
|
a, b = (x["command"] for x in results[:2])
|
||||||
X, Y = [x["times"] for x in results[:2]]
|
X, Y = (x["times"] for x in results[:2])
|
||||||
|
|
||||||
print("Command 1: {}".format(a))
|
print(f"Command 1: {a}")
|
||||||
print("Command 2: {}\n".format(b))
|
print(f"Command 2: {b}\n")
|
||||||
|
|
||||||
t, p = stats.ttest_ind(X, Y, equal_var=False)
|
t, p = stats.ttest_ind(X, Y, equal_var=False)
|
||||||
th = 0.05
|
th = 0.05
|
||||||
dispose = p < th
|
dispose = p < th
|
||||||
print("t = {:.3}, p = {:.3}".format(t, p))
|
print(f"t = {t:.3}, p = {p:.3}")
|
||||||
print()
|
print()
|
||||||
|
|
||||||
if dispose:
|
if dispose:
|
||||||
print("There is a difference between the two benchmarks (p < {}).".format(th))
|
print(f"There is a difference between the two benchmarks (p < {th}).")
|
||||||
else:
|
else:
|
||||||
print("The two benchmarks are almost the same (p >= {}).".format(th))
|
print(f"The two benchmarks are almost the same (p >= {th}).")
|
||||||
|
Loading…
Reference in New Issue
Block a user