add --remote flag

This commit is contained in:
Jörg Thalheim 2023-09-16 22:22:58 +02:00
parent ae50c356c2
commit 1f5042aa62
5 changed files with 419 additions and 191 deletions

1
.gitignore vendored
View File

@ -1,4 +1,5 @@
result result
result-*
# source: https://raw.githubusercontent.com/github/gitignore/main/Python.gitignore # source: https://raw.githubusercontent.com/github/gitignore/main/Python.gitignore
# Byte-compiled / optimized / DLL files # Byte-compiled / optimized / DLL files

View File

@ -1,6 +1,7 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import os import os
import sys import sys
import asyncio
sys.path.insert( sys.path.insert(
0, os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) 0, os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
@ -9,5 +10,5 @@ sys.path.insert(
from nix_ci_build import main # NOQA from nix_ci_build import main # NOQA
if __name__ == "__main__": if __name__ == "__main__":
main() asyncio.run(main())

View File

@ -1,13 +1,13 @@
{ python3, makeWrapper, nix, nix-eval-jobs, nix-output-monitor, lib }: { python311, makeWrapper, nix, nix-eval-jobs, nix-output-monitor, lib, bashInteractive }:
let let
path = lib.makeBinPath [ nix nix-eval-jobs nix-output-monitor ]; path = lib.makeBinPath [ nix nix-eval-jobs nix-output-monitor ];
in in
python3.pkgs.buildPythonApplication { python311.pkgs.buildPythonApplication {
pname = "nix-ci-build"; pname = "nix-ci-build";
version = "0.1.0"; version = "0.1.0";
format = "pyproject"; format = "pyproject";
src = ./.; src = ./.;
buildInputs = with python3.pkgs; [ setuptools ]; buildInputs = with python311.pkgs; [ setuptools bashInteractive ];
nativeBuildInputs = [ makeWrapper ]; nativeBuildInputs = [ makeWrapper ];
preFixup = '' preFixup = ''
makeWrapperArgs+=(--prefix PATH : ${path}) makeWrapperArgs+=(--prefix PATH : ${path})

View File

@ -1,15 +1,20 @@
import argparse import argparse
import asyncio
import json import json
import multiprocessing import multiprocessing
import os import os
import select import shlex
import shutil
import subprocess import subprocess
import sys import sys
import time from abc import ABC
from contextlib import ExitStack, contextmanager from asyncio import Queue, TaskGroup
from asyncio.subprocess import Process
from collections import defaultdict
from contextlib import AsyncExitStack, asynccontextmanager
from dataclasses import dataclass, field from dataclasses import dataclass, field
from tempfile import TemporaryDirectory from tempfile import TemporaryDirectory
from typing import IO, Any, Iterator, NoReturn from typing import IO, Any, AsyncIterator, Coroutine, DefaultDict, NoReturn
def die(msg: str) -> NoReturn: def die(msg: str) -> NoReturn:
@ -17,10 +22,26 @@ def die(msg: str) -> NoReturn:
sys.exit(1) sys.exit(1)
class Pipe:
def __init__(self) -> None:
fds = os.pipe()
self.read_file = os.fdopen(fds[0], "rb")
self.write_file = os.fdopen(fds[1], "wb")
def __enter__(self) -> "Pipe":
return self
def __exit__(self, _exc_type: Any, _exc_value: Any, _traceback: Any) -> None:
self.read_file.close()
self.write_file.close()
@dataclass @dataclass
class Options: class Options:
flake: str = "" flake_url: str = ""
flake_fragment: str = ""
options: list[str] = field(default_factory=list) options: list[str] = field(default_factory=list)
remote: str | None = None
systems: set[str] = field(default_factory=set) systems: set[str] = field(default_factory=set)
eval_max_memory_size: int = 4096 eval_max_memory_size: int = 4096
skip_cached: bool = False skip_cached: bool = False
@ -29,32 +50,48 @@ class Options:
retries: int = 0 retries: int = 0
verbose: bool = False verbose: bool = False
copy_to: str | None = None copy_to: str | None = None
nom: bool = False
@property
def remote_url(self) -> None | str:
if self.remote is None:
return None
return f"ssh://{self.remote}"
def run_nix(args: list[str]) -> subprocess.CompletedProcess[str]: async def run_nix(args: list[str]) -> Process:
try: try:
proc = subprocess.run(["nix"] + args, text=True, capture_output=True) proc = await asyncio.create_subprocess_exec(
"nix", *args, stdout=asyncio.subprocess.PIPE
)
except FileNotFoundError: except FileNotFoundError:
die("nix not found in PATH") die(f"nix not found in PATH, try to run {shlex.join(args)}")
return proc return proc
def current_system() -> str: async def get_nix_config() -> dict[str, str]:
proc = run_nix(["eval", "--impure", "--raw", "--expr", "builtins.currentSystem"]) proc = await run_nix(["show-config"])
if proc.returncode != 0: assert proc.stdout is not None
die(f"Failed to determine current system: {proc.stderr}") config = {}
return proc.stdout.strip() async for line in proc.stdout:
cols = line.split(b" = ", 1)
if len(cols) != 2:
continue
key, value = cols
config[key.decode()] = value.decode().strip()
try:
returncode = await proc.wait()
if returncode != 0:
die(f"Failed to get nix config: {returncode}")
finally:
if proc.returncode is None:
proc.kill()
return config
def max_jobs() -> int: def parse_args(args: list[str], nix_config: dict[str, str]) -> Options:
proc = run_nix(["show-config", "max-jobs"])
if proc.returncode != 0:
die(f"Failed to determine number of CPUs: {proc.stderr}")
return int(proc.stdout.strip())
def parse_args(args: list[str]) -> Options:
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument( parser.add_argument(
"-f", "-f",
"--flake", "--flake",
@ -65,7 +102,7 @@ def parse_args(args: list[str]) -> Options:
"-j", "-j",
"--max-jobs", "--max-jobs",
type=int, type=int,
default=max_jobs(), default=nix_config.get("max-jobs", 0),
help="Maximum number of build jobs to run in parallel (0 for unlimited)", help="Maximum number of build jobs to run in parallel (0 for unlimited)",
) )
parser.add_argument( parser.add_argument(
@ -76,10 +113,19 @@ def parse_args(args: list[str]) -> Options:
metavar=("name", "value"), metavar=("name", "value"),
default=[], default=[],
) )
parser.add_argument(
"--no-nom",
help="Use nix-output-monitor to print build output (default: false)",
action="store_true",
default=shutil.which("nom") is None,
)
system = nix_config.get("system")
if system is None:
die("Failed to determine system from nix config")
parser.add_argument( parser.add_argument(
"--systems", "--systems",
help="Comma-separated list of systems to build for (default: current system)", help="Comma-separated list of systems to build for (default: current system)",
default=current_system(), default=system,
) )
parser.add_argument( parser.add_argument(
"--retries", "--retries",
@ -87,6 +133,11 @@ def parse_args(args: list[str]) -> Options:
default=0, default=0,
help="Number of times to retry failed builds", help="Number of times to retry failed builds",
) )
parser.add_argument(
"--remote",
type=str,
help="Remote machine to build on",
)
parser.add_argument( parser.add_argument(
"--skip-cached", "--skip-cached",
help="Skip builds that are already present in the binary cache (default: false)", help="Skip builds that are already present in the binary cache (default: false)",
@ -117,14 +168,23 @@ def parse_args(args: list[str]) -> Options:
a = parser.parse_args(args) a = parser.parse_args(args)
systems = set(a.systems.split(",")) systems = set(a.systems.split(","))
flake_parts = a.flake.split("#")
flake_url = flake_parts[0]
flake_fragment = ""
if len(flake_parts) == 2:
flake_fragment = flake_parts[1]
options = [] options = []
for name, value in a.option: for name, value in a.option:
options.extend(["--option", name, value]) options.extend(["--option", name, value])
return Options( return Options(
flake=a.flake, flake_url=flake_url,
flake_fragment=flake_fragment,
remote=a.remote,
skip_cached=a.skip_cached, skip_cached=a.skip_cached,
options=options, options=options,
max_jobs=a.max_jobs, max_jobs=a.max_jobs,
nom=not a.no_nom,
verbose=a.verbose, verbose=a.verbose,
systems=systems, systems=systems,
eval_max_memory_size=a.eval_max_memory_size, eval_max_memory_size=a.eval_max_memory_size,
@ -133,51 +193,108 @@ def parse_args(args: list[str]) -> Options:
) )
@contextmanager def upload_sources(remote_url: str, flake_url: str) -> str:
def nix_eval_jobs(opts: Options) -> Iterator[subprocess.Popen[str]]: cmd = [
"nix",
"flake",
"archive",
"--to",
remote_url,
"--json",
flake_url,
]
print("$ " + shlex.join(cmd))
proc = subprocess.run(cmd, stdout=subprocess.PIPE)
if proc.returncode != 0:
die(
f"failed to upload sources: {shlex.join(cmd)} failed with {proc.returncode}"
)
try:
return json.loads(proc.stdout)["path"]
except Exception as e:
die(
f"failed to parse output of {shlex.join(cmd)}: {e}\nGot: {proc.stdout.decode('utf-8', 'replace')}"
)
def maybe_remote(cmd: list[str], opts: Options) -> list[str]:
if opts.remote:
return ["ssh", opts.remote, "--", shlex.join(cmd)]
else:
return cmd
def nix_shell(packages: list[str]) -> list[str]:
return (
[
"nix",
"shell",
"--extra-experimental-features",
"nix-command",
"--extra-experimental-features",
"flakes",
]
+ packages
+ ["-c"]
)
@asynccontextmanager
async def ensure_stop(
proc: Process, cmd: list[str], timeout: float = 3.0
) -> AsyncIterator[Process]:
try:
yield proc
finally:
if proc.returncode is not None:
return
proc.terminate()
try:
await asyncio.wait_for(proc.wait(), timeout=timeout)
except asyncio.TimeoutError:
print(f"Failed to stop process {shlex.join(cmd)}. Killing it.")
proc.kill()
await proc.wait()
@asynccontextmanager
async def nix_eval_jobs(opts: Options) -> AsyncIterator[Process]:
with TemporaryDirectory() as d: with TemporaryDirectory() as d:
temp = d
if opts.remote:
# TODO: This is bad
temp = "/tmp/gc-roots"
args = [ args = [
"nix-eval-jobs", "nix-eval-jobs",
"--gc-roots-dir", "--gc-roots-dir",
d, temp,
"--force-recurse", "--force-recurse",
"--max-memory-size", "--max-memory-size",
str(opts.eval_max_memory_size), str(opts.eval_max_memory_size),
"--workers", "--workers",
str(opts.eval_workers), str(opts.eval_workers),
"--flake", "--flake",
opts.flake, f"{opts.flake_url}#{opts.flake_fragment}",
] + opts.options ] + opts.options
if opts.skip_cached: if opts.skip_cached:
args.append("--check-cache-status") args.append("--check-cache-status")
print("$ " + " ".join(args)) if opts.remote:
with subprocess.Popen(args, text=True, stdout=subprocess.PIPE) as proc: args = nix_shell(["nixpkgs#nix-eval-jobs"]) + args
try: args = maybe_remote(args, opts)
print("$ " + shlex.join(args))
proc = await asyncio.create_subprocess_exec(*args, stdout=subprocess.PIPE)
async with ensure_stop(proc, args) as proc:
yield proc yield proc
finally:
proc.kill()
@contextmanager @asynccontextmanager
def nix_build( async def nix_output_monitor(fd: int, opts: Options) -> AsyncIterator[Process]:
installable: str, stdout: IO[Any] | None, opts: Options cmd = maybe_remote(nix_shell(["nixpkgs#nix-output-monitor"]) + ["nom"], opts)
) -> Iterator[subprocess.Popen]: proc = await asyncio.create_subprocess_exec(*cmd, stdin=fd)
log_format = "raw" async with ensure_stop(proc, cmd) as proc:
args = [
"nix",
"build",
installable,
"--log-format",
log_format,
"--keep-going",
] + opts.options
if opts.verbose:
print("$ " + " ".join(args))
with subprocess.Popen(args, text=True, stderr=stdout) as proc:
try:
yield proc yield proc
finally:
proc.kill()
@dataclass @dataclass
@ -185,133 +302,127 @@ class Build:
attr: str attr: str
drv_path: str drv_path: str
outputs: dict[str, str] outputs: dict[str, str]
proc: subprocess.Popen[str]
retries: int async def build(
rc: int | None = None self, stack: AsyncExitStack, build_output: IO[str], opts: Options
) -> int:
proc = await stack.enter_async_context(
nix_build(self.drv_path + "^*", build_output, opts)
)
rc = 0
for _ in range(opts.retries + 1):
rc = await proc.wait()
if rc == 0:
if opts.verbose:
print(f"build {self.attr} succeeded")
return rc
print(f"build {self.attr} exited with {rc}", file=sys.stderr)
return rc
async def nix_copy(
self, args: list[str], exit_stack: AsyncExitStack, opts: Options
) -> int:
cmd = maybe_remote(["nix", "copy", "--log-format", "raw"] + args, opts)
if opts.verbose:
print("$ " + shlex.join(cmd))
proc = await asyncio.create_subprocess_exec(*cmd)
await exit_stack.enter_async_context(ensure_stop(proc, cmd))
return await proc.wait()
async def upload(self, exit_stack: AsyncExitStack, opts: Options) -> int:
if not opts.copy_to:
return 0
cmd = ["nix", "copy", "--log-format", "raw", "--to", opts.copy_to] + list(
self.outputs.values()
)
cmd = maybe_remote(cmd, opts)
if opts.verbose:
print("$ " + shlex.join(cmd))
proc = await asyncio.create_subprocess_exec(*cmd)
await exit_stack.enter_async_context(ensure_stop(proc, cmd))
return await proc.wait()
async def download(self, exit_stack: AsyncExitStack, opts: Options) -> int:
if not opts.remote_url:
return 0
cmd = [
"nix",
"copy",
"--log-format",
"raw",
"--no-check-sigs",
"--from",
opts.remote_url,
] + list(self.outputs.values())
if opts.verbose:
print("$ " + shlex.join(cmd))
proc = await asyncio.create_subprocess_exec(*cmd)
await exit_stack.enter_async_context(ensure_stop(proc, cmd))
return await proc.wait()
@dataclass @dataclass
class EvalError: class Failure(ABC):
attr: str attr: str
error: str error_message: str
def wait_for_any_build(builds: list[Build]) -> Build: class EvalFailure(Failure):
while True: pass
for i, build in enumerate(builds):
rc = build.proc.poll()
if rc is not None:
del builds[i]
build.rc = rc
return build
time.sleep(0.05)
def drain_builds( class BuildFailure(Failure):
builds: list[Build], stdout: IO[Any] | None, stack: ExitStack, opts: Options pass
) -> list[Build]:
build = wait_for_any_build(builds)
if build.rc != 0:
print(f"build {build.attr} exited with {build.rc}", file=sys.stderr)
if build.retries < opts.retries:
print(f"retrying build {build.attr} [{build.retries + 1}/{opts.retries}]")
builds.append(
create_build(
build.attr,
build.drv_path,
build.outputs,
stdout,
stack,
opts,
build.retries + 1,
)
)
else:
return [build]
return []
def create_build( class UploadFailure(Failure):
attr: str, pass
drv_path: str,
outputs: dict[str, str],
stdout: IO[Any] | None, class DownloadFailure(Failure):
exit_stack: ExitStack, pass
opts: Options,
retries: int = 0,
) -> Build: @asynccontextmanager
nix_build_proc = exit_stack.enter_context(nix_build(drv_path + "^*", stdout, opts)) async def nix_build(
if opts.copy_to: installable: str, stderr: IO[Any] | None, opts: Options
if opts.verbose: ) -> AsyncIterator[Process]:
print(f"copying {attr} to {opts.copy_to}") args = [
exit_stack.enter_context(
subprocess.Popen(
[
"nix", "nix",
"copy", "build",
"--to", installable,
opts.copy_to, "--log-format",
] "raw",
+ list(outputs.values()), "--keep-going",
) ] + opts.options
) args = maybe_remote(args, opts)
return Build(attr, drv_path, outputs, nix_build_proc, retries=retries) if opts.verbose:
print("$ " + shlex.join(args))
proc = await asyncio.create_subprocess_exec(*args, stderr=stderr)
class Pipe:
def __init__(self) -> None:
fds = os.pipe()
self.read_file = os.fdopen(fds[0], "rb")
self.write_file = os.fdopen(fds[1], "wb")
def __enter__(self) -> "Pipe":
return self
def __exit__(self, _exc_type: Any, _exc_value: Any, _traceback: Any) -> None:
self.read_file.close()
self.write_file.close()
def stop_gracefully(proc: subprocess.Popen, timeout: int = 1) -> None:
proc.terminate()
try:
proc.wait(timeout=timeout)
except subprocess.TimeoutExpired:
proc.kill()
@contextmanager
def nix_output_monitor(fd: int) -> Iterator[subprocess.Popen]:
proc = subprocess.Popen(["nom"], stdin=fd)
try: try:
yield proc yield proc
finally: finally:
stop_gracefully(proc) proc.kill()
def run_builds(stack: ExitStack, opts: Options) -> int: async def run_evaluation(
eval_error = [] eval_proc: Process,
build_failures = [] build_queue: Queue[tuple[str, str, str]],
drv_paths = set() failures: list[Failure],
proc = stack.enter_context(nix_eval_jobs(opts)) opts: Options,
assert proc.stdout ) -> None:
pipe = stack.enter_context(Pipe()) assert eval_proc.stdout
nom_proc: subprocess.Popen | None = None async for line in eval_proc.stdout:
stdout = pipe.write_file
builds: list[Build] = []
for line in proc.stdout:
if opts.verbose: if opts.verbose:
print(line, end="") print(line, end="")
if nom_proc is None:
nom_proc = stack.enter_context(nix_output_monitor(pipe.read_file.fileno()))
try: try:
job = json.loads(line) job = json.loads(line)
except json.JSONDecodeError: except json.JSONDecodeError:
die(f"Failed to parse line of nix-eval-jobs output: {line}") die(f"Failed to parse line of nix-eval-jobs output: {line.decode()}")
error = job.get("error") error = job.get("error")
attr = job.get("attr", "unknown-flake-attribute") attr = job.get("attr", "unknown-flake-attribute")
if error: if error:
eval_error.append(EvalError(attr, error)) failures.append(EvalFailure(attr, error))
continue continue
is_cached = job.get("isCached", False) is_cached = job.get("isCached", False)
if is_cached: if is_cached:
@ -321,48 +432,164 @@ def run_builds(stack: ExitStack, opts: Options) -> int:
continue continue
drv_path = job.get("drvPath") drv_path = job.get("drvPath")
if not drv_path: if not drv_path:
die(f"nix-eval-jobs did not return a drvPath: {line}") die(f"nix-eval-jobs did not return a drvPath: {line.decode()}")
while len(builds) >= opts.max_jobs and opts.max_jobs != 0: outputs = job.get("outputs", {})
build_failures += drain_builds(builds, stdout, stack, opts) build_queue.put_nowait((attr, drv_path, outputs))
async def run_builds(
stack: AsyncExitStack,
build_output: IO,
build_queue: Queue,
upload_queue: Queue,
download_queue: Queue,
failures: list[Failure],
opts: Options,
) -> NoReturn:
drv_paths: set[Any] = set()
while True:
attr, drv_path, outputs = await build_queue.get()
print(f" building {attr}") print(f" building {attr}")
if drv_path in drv_paths: if drv_path in drv_paths:
continue continue
drv_paths.add(drv_path) drv_paths.add(drv_path)
outputs = job.get("outputs", {}) build = Build(attr, drv_path, outputs)
builds.append(create_build(attr, drv_path, outputs, stdout, stack, opts)) rc = await build.build(stack, build_output, opts)
build_queue.task_done()
while builds: if rc == 0:
build_failures += drain_builds(builds, stdout, stack, opts) upload_queue.put_nowait(build)
download_queue.put_nowait(build)
if nom_proc is not None:
stop_gracefully(nom_proc)
eval_rc = proc.wait()
if eval_rc != 0:
print(
f"nix-eval-jobs exited with {eval_rc}, check logs for details",
file=sys.stderr,
)
for error in eval_error:
print(f"{error.attr}: {error.error}", file=sys.stderr)
for build in build_failures:
print(f"{build.attr}: build failed with {build.rc}", file=sys.stderr)
if len(build_failures) > 0 or len(eval_error) > 0 or eval_rc != 0:
return 1
else: else:
failures.append(BuildFailure(build.attr, f"build exited with {rc}"))
async def run_uploads(
stack: AsyncExitStack,
upload_queue: Queue[Build],
failures: list[Failure],
opts: Options,
) -> NoReturn:
while True:
build = await upload_queue.get()
rc = await build.upload(stack, opts)
if rc != 0:
failures.append(UploadFailure(build.attr, f"upload exited with {rc}"))
upload_queue.task_done()
async def run_downloads(
stack: AsyncExitStack,
download_queue: Queue[Build],
failures: list[Failure],
opts: Options,
) -> NoReturn:
while True:
build = await download_queue.get()
rc = await build.download(stack, opts)
if rc != 0:
failures.append(DownloadFailure(build.attr, f"download exited with {rc}"))
download_queue.task_done()
async def report_progress(
build_queue: Queue,
upload_queue: Queue,
download_queue: Queue,
) -> NoReturn:
old_status = ""
while True:
new_status = f"builds: {build_queue.qsize()}, uploads: {upload_queue.qsize()}, downloads: {download_queue.qsize()}"
if new_status != old_status:
print(new_status)
old_status = new_status
await asyncio.sleep(0.5)
async def run(stack: AsyncExitStack, opts: Options) -> int:
eval_proc_future = stack.enter_async_context(nix_eval_jobs(opts))
pipe: Pipe | None = None
output_monitor_future: Coroutine[None, None, Process] | None = None
if opts.nom:
pipe = stack.enter_context(Pipe())
output_monitor_future = stack.enter_async_context(
nix_output_monitor(pipe.read_file.fileno(), opts)
)
eval_proc = await eval_proc_future
output_monitor: Process | None = None
if output_monitor_future:
output_monitor = await output_monitor_future
failures: DefaultDict[type, list[Failure]] = defaultdict(list)
build_queue: Queue[tuple[str, str, str]] = Queue()
upload_queue: Queue[Build] = Queue()
download_queue: Queue[Build] = Queue()
evaluation = run_evaluation(eval_proc, build_queue, failures[EvalFailure], opts)
async with TaskGroup() as tg:
build_output = sys.stdout.buffer
if pipe:
build_output = pipe.write_file
tasks = []
for _ in range(opts.max_jobs):
tasks.append(
tg.create_task(
run_builds(
stack,
build_output,
build_queue,
upload_queue,
download_queue,
failures[BuildFailure],
opts,
)
)
)
tasks.append(
tg.create_task(
run_uploads(stack, upload_queue, failures[UploadFailure], opts)
)
)
tasks.append(
tg.create_task(
run_downloads(stack, download_queue, failures[DownloadFailure], opts)
)
)
if not opts.nom:
tasks.append(
tg.create_task(
report_progress(build_queue, upload_queue, download_queue)
)
)
await evaluation
await build_queue.join()
await upload_queue.join()
await download_queue.join()
for task in tasks:
task.cancel()
for failure_type in [EvalFailure, BuildFailure, UploadFailure, DownloadFailure]:
for failure in failures[failure_type]:
print(
f"{failure_type.__name__} for {failure.attr}: {failure.error_message}"
)
if eval_proc.returncode != 0 and eval_proc.returncode is not None:
print(f"nix-eval-jobs exited with {eval_proc.returncode}")
if (
output_monitor
and output_monitor.returncode != 0
and output_monitor.returncode is not None
):
print(f"nix-output-monitor exited with {output_monitor.returncode}")
return 0 return 0
def main() -> None: async def main() -> None:
opts = parse_args(sys.argv[1:]) nix_config = await get_nix_config()
opts = parse_args(sys.argv[1:], nix_config)
rc = 0 rc = 0
with ExitStack() as stack: async with AsyncExitStack() as stack:
rc = run_builds(stack, opts) if opts.remote_url:
opts.flake_url = upload_sources(opts.remote_url, opts.flake_url)
rc = await run(stack, opts)
sys.exit(rc) sys.exit(rc)
if __name__ == "__main__":
main()

View File

@ -13,7 +13,6 @@ classifiers = [
"Environment :: Console", "Environment :: Console",
"Topic :: Utilities", "Topic :: Utilities",
"Intended Audience :: Developers", "Intended Audience :: Developers",
"Programming Language :: Python :: 3.6",
] ]
[project.urls] [project.urls]
@ -58,7 +57,7 @@ exclude = '''
''' '''
[tool.mypy] [tool.mypy]
python_version = "3.10" python_version = "3.11"
warn_redundant_casts = true warn_redundant_casts = true
disallow_untyped_calls = true disallow_untyped_calls = true
disallow_untyped_defs = true disallow_untyped_defs = true