Merge pull request #256230 from SomeoneSerge/feat/gpu-tests-py

GPU access in the sandbox
This commit is contained in:
Someone 2024-06-26 19:16:53 +00:00 committed by GitHub
commit cb69dc5b8d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
23 changed files with 741 additions and 3 deletions

View File

@ -243,6 +243,7 @@
./programs/nh.nix
./programs/nix-index.nix
./programs/nix-ld.nix
./programs/nix-required-mounts.nix
./programs/nm-applet.nix
./programs/nncp.nix
./programs/noisetorch.nix

View File

@ -0,0 +1,118 @@
{
config,
lib,
pkgs,
...
}:
let
cfg = config.programs.nix-required-mounts;
package = pkgs.nix-required-mounts;
Mount =
with lib;
types.submodule {
options.host = mkOption {
type = types.str;
description = "Host path to mount";
};
options.guest = mkOption {
type = types.str;
description = "Location in the sandbox to mount the host path at";
};
};
Pattern =
with lib.types;
types.submodule (
{ config, name, ... }:
{
options.onFeatures = lib.mkOption {
type = listOf types.str;
description = "Which requiredSystemFeatures should trigger relaxation of the sandbox";
default = [ name ];
};
options.paths = lib.mkOption {
type = listOf (oneOf [
path
Mount
]);
description = "A list of glob patterns, indicating which paths to expose to the sandbox";
};
options.unsafeFollowSymlinks = lib.mkEnableOption ''
Instructs the hook to mount the symlink targets as well, when any of
the `paths` contain symlinks. This may not work correctly with glob
patterns.
'';
}
);
driverPaths = [
pkgs.addOpenGLRunpath.driverLink
# mesa:
config.hardware.opengl.package
# nvidia_x11, etc:
] ++ config.hardware.opengl.extraPackages; # nvidia_x11
defaults = {
nvidia-gpu.onFeatures = package.allowedPatterns.nvidia-gpu.onFeatures;
nvidia-gpu.paths = package.allowedPatterns.nvidia-gpu.paths ++ driverPaths;
nvidia-gpu.unsafeFollowSymlinks = false;
};
in
{
meta.maintainers = with lib.maintainers; [ SomeoneSerge ];
options.programs.nix-required-mounts = {
enable = lib.mkEnableOption "Expose extra paths to the sandbox depending on derivations' requiredSystemFeatures";
presets.nvidia-gpu.enable = lib.mkEnableOption ''
Declare the support for derivations that require an Nvidia GPU to be
available, e.g. derivations with `requiredSystemFeatures = [ "cuda" ]`.
This mounts the corresponding userspace drivers and device nodes in the
sandbox, but only for derivations that request these special features.
You may extend or override the exposed paths via the
`programs.nix-required-mounts.allowedPatterns.nvidia-gpu.paths` option.
'';
allowedPatterns =
with lib.types;
lib.mkOption rec {
type = attrsOf Pattern;
description = "The hook config, describing which paths to mount for which system features";
default = { };
defaultText = lib.literalExpression ''
{
opengl.paths = config.hardware.opengl.extraPackages ++ [
config.hardware.opengl.package
pkgs.addOpenGLRunpath.driverLink
"/dev/dri"
];
}
'';
example.require-ipfs.paths = [ "/ipfs" ];
example.require-ipfs.onFeatures = [ "ifps" ];
};
extraWrapperArgs = lib.mkOption {
type = with lib.types; listOf str;
default = [ ];
description = "List of extra arguments (such as `--add-flags -v`) to pass to the hook's wrapper";
};
package = lib.mkOption {
type = lib.types.package;
default = package.override { inherit (cfg) allowedPatterns extraWrapperArgs; };
description = "The final package with the final config applied";
internal = true;
};
};
config = lib.mkIf cfg.enable (
lib.mkMerge [
{ nix.settings.pre-build-hook = lib.getExe cfg.package; }
(lib.mkIf cfg.presets.nvidia-gpu.enable {
nix.settings.system-features = cfg.allowedPatterns.nvidia-gpu.onFeatures;
programs.nix-required-mounts.allowedPatterns = {
inherit (defaults) nvidia-gpu;
};
})
]
);
}

View File

@ -651,6 +651,7 @@ in {
nix-config = handleTest ./nix-config.nix {};
nix-ld = handleTest ./nix-ld.nix {};
nix-misc = handleTest ./nix/misc.nix {};
nix-required-mounts = runTest ./nix-required-mounts;
nix-serve = handleTest ./nix-serve.nix {};
nix-serve-ssh = handleTest ./nix-serve-ssh.nix {};
nixops = handleTest ./nixops/default.nix {};

View File

@ -0,0 +1,58 @@
{ pkgs, ... }:
let
inherit (pkgs) lib;
in
{
name = "nix-required-mounts";
meta.maintainers = with lib.maintainers; [ SomeoneSerge ];
nodes.machine =
{ config, pkgs, ... }:
{
virtualisation.writableStore = true;
system.extraDependencies = [ (pkgs.runCommand "deps" { } "mkdir $out").inputDerivation ];
nix.nixPath = [ "nixpkgs=${../../..}" ];
nix.settings.substituters = lib.mkForce [ ];
nix.settings.system-features = [ "supported-feature" ];
nix.settings.experimental-features = [ "nix-command" ];
programs.nix-required-mounts.enable = true;
programs.nix-required-mounts.allowedPatterns.supported-feature = {
onFeatures = [ "supported-feature" ];
paths = [
"/supported-feature-files"
{
host = "/usr/lib/imaginary-fhs-drivers";
guest = "/run/opengl-driver/lib";
}
];
unsafeFollowSymlinks = true;
};
users.users.person.isNormalUser = true;
systemd.tmpfiles.rules = [
"d /supported-feature-files 0755 person users -"
"f /usr/lib/libcuda.so 0444 root root - fakeContent"
"L /usr/lib/imaginary-fhs-drivers/libcuda.so 0444 root root - /usr/lib/libcuda.so"
];
};
testScript = ''
import shlex
def person_do(cmd, succeed=True):
cmd = shlex.quote(cmd)
cmd = f"su person -l -c {cmd} &>/dev/console"
if succeed:
return machine.succeed(cmd)
else:
return machine.fail(cmd)
start_all()
person_do("nix-build ${./ensure-path-not-present.nix} --argstr feature supported-feature")
person_do("nix-build ${./test-require-feature.nix} --argstr feature supported-feature")
person_do("nix-build ${./test-require-feature.nix} --argstr feature unsupported-feature", succeed=False)
person_do("nix-build ${./test-structured-attrs.nix} --argstr feature supported-feature")
person_do("nix-build ${./test-structured-attrs-empty.nix}")
'';
}

View File

@ -0,0 +1,13 @@
{
pkgs ? import <nixpkgs> { },
feature,
}:
pkgs.runCommandNoCC "${feature}-not-present" { } ''
if [[ -e /${feature}-files ]]; then
echo "No ${feature} in requiredSystemFeatures, but /${feature}-files was mounted anyway"
exit 1
else
touch $out
fi
''

View File

@ -0,0 +1,26 @@
{
pkgs ? import <nixpkgs> { },
feature,
}:
pkgs.runCommandNoCC "${feature}-present" { requiredSystemFeatures = [ feature ]; } ''
if [[ ! -e /${feature}-files ]]; then
echo "The host declares ${feature} support, but doesn't expose /${feature}-files" >&2
exit 1
fi
libcudaLocation=/run/opengl-driver/lib/libcuda.so
if [[ -e "$libcudaLocation" || -h "$libcudaLocation" ]] ; then
true # we're good
else
echo "The host declares ${feature} support, but it the hook fails to handle the hostPath != guestPath cases" >&2
exit 1
fi
if cat "$libcudaLocation" | xargs test fakeContent = ; then
true # we're good
else
echo "The host declares ${feature} support, but it seems to fail to follow symlinks" >&2
echo "The content of /run/opengl-driver/lib/libcuda.so is: $(cat /run/opengl-driver/lib/libcuda.so)" >&2
exit 1
fi
touch $out
''

View File

@ -0,0 +1,8 @@
{
pkgs ? import <nixpkgs> { },
}:
pkgs.runCommandNoCC "nix-required-mounts-structured-attrs-no-features" { __structuredAttrs = true; }
''
touch $out
''

View File

@ -0,0 +1,18 @@
{
pkgs ? import <nixpkgs> { },
feature,
}:
pkgs.runCommandNoCC "${feature}-present-structured"
{
__structuredAttrs = true;
requiredSystemFeatures = [ feature ];
}
''
if [[ -e /${feature}-files ]]; then
touch $out
else
echo "The host declares ${feature} support, but doesn't expose /${feature}-files" >&2
echo "Do we fail to parse __structuredAttrs=true derivations?" >&2
fi
''

View File

@ -7,6 +7,7 @@
SDL,
addOpenGLRunpath,
alembic,
blender,
boost,
brotli,
callPackage,
@ -372,6 +373,20 @@ stdenv.mkDerivation (finalAttrs: {
--render-frame 1
done
'';
tester-cudaAvailable = cudaPackages.writeGpuTestPython { } ''
import subprocess
subprocess.run([${
lib.concatMapStringsSep ", " (x: ''"${x}"'') [
(lib.getExe (blender.override { cudaSupport = true; }))
"--background"
"-noaudio"
"--python-exit-code"
"1"
"--python"
"${./test-cuda.py}"
]
}], check=True) # noqa: E501
'';
};
};
@ -381,7 +396,8 @@ stdenv.mkDerivation (finalAttrs: {
# They comment two licenses: GPLv2 and Blender License, but they
# say: "We've decided to cancel the BL offering for an indefinite period."
# OptiX, enabled with cudaSupport, is non-free.
license = with lib.licenses; [ gpl2Plus ] ++ lib.optional cudaSupport unfree;
license = with lib.licenses; [ gpl2Plus ] ++ lib.optional cudaSupport (unfree // { shortName = "NVidia OptiX EULA"; });
platforms = [
"aarch64-linux"
"x86_64-darwin"

View File

@ -0,0 +1,8 @@
import bpy
preferences = bpy.context.preferences.addons["cycles"].preferences
devices = preferences.get_devices_for_type("CUDA")
ids = [d.id for d in devices]
assert any("CUDA" in i for i in ids), f"CUDA not present in {ids}"
print("CUDA is available")

View File

@ -0,0 +1,37 @@
# Use exportReferencesGraph to capture the possible dependencies of the
# drivers (e.g. libc linked through DT_RUNPATH) and ensure they are mounted
# in the sandbox as well. In practice, things seemed to have worked without
# this as well, but we go with the safe option until we understand why.
{
lib,
runCommand,
python3Packages,
allowedPatterns,
}:
runCommand "allowed-patterns.json"
{
nativeBuildInputs = [ python3Packages.python ];
exportReferencesGraph = builtins.concatMap (
name:
builtins.concatMap (
path:
let
prefix = "${builtins.storeDir}/";
# Has to start with a letter: https://github.com/NixOS/nix/blob/516e7ddc41f39ff939b5d5b5dc71e590f24890d4/src/libstore/build/local-derivation-goal.cc#L568
exportName = ''references-${lib.strings.removePrefix prefix "${path}"}'';
isStorePath = lib.isStorePath path && (lib.hasPrefix prefix "${path}");
in
lib.optionals isStorePath [
exportName
path
]
) allowedPatterns.${name}.paths
) (builtins.attrNames allowedPatterns);
env.storeDir = "${builtins.storeDir}/";
shallowConfig = builtins.toJSON allowedPatterns;
passAsFile = [ "shallowConfig" ];
}
''
python ${./scripts/nix_required_mounts_closure.py}
''

View File

@ -0,0 +1,201 @@
#!/usr/bin/env python3
import glob
import json
import subprocess
import textwrap
from argparse import ArgumentParser
from collections import deque
from itertools import chain
from pathlib import Path
from typing import Deque, Dict, List, Set, Tuple, TypeAlias, TypedDict
import logging
Glob: TypeAlias = str
PathString: TypeAlias = str
class Mount(TypedDict):
host: PathString
guest: PathString
class Pattern(TypedDict):
onFeatures: List[str]
paths: List[Glob | Mount]
unsafeFollowSymlinks: bool
AllowedPatterns: TypeAlias = Dict[str, Pattern]
parser = ArgumentParser("pre-build-hook")
parser.add_argument("derivation_path")
parser.add_argument("sandbox_path", nargs="?")
parser.add_argument("--patterns", type=Path, required=True)
parser.add_argument("--nix-exe", type=Path, required=True)
parser.add_argument(
"--issue-command",
choices=("always", "conditional", "never"),
default="conditional",
help="Whether to print extra-sandbox-paths",
)
parser.add_argument(
"--issue-stop",
choices=("always", "conditional", "never"),
default="conditional",
help="Whether to print the final empty line",
)
parser.add_argument("-v", "--verbose", action="count", default=0)
def symlink_parents(p: Path) -> List[Path]:
out = []
while p.is_symlink() and p not in out:
parent = p.readlink()
if parent.is_relative_to("."):
p = p / parent
else:
p = parent
out.append(p)
return out
def get_strings(drv_env: dict, name: str) -> List[str]:
if "__json" in drv_env:
return list(json.loads(drv_env["__json"]).get(name, []))
else:
return drv_env.get(name, "").split()
def validate_mounts(pattern: Pattern) -> List[Tuple[PathString, PathString, bool]]:
roots = []
for mount in pattern["paths"]:
if isinstance(mount, PathString):
matches = glob.glob(mount)
assert matches, f"Specified host paths do not exist: {mount}"
roots.extend((m, m, pattern["unsafeFollowSymlinks"]) for m in matches)
else:
assert isinstance(mount, dict) and "host" in mount, mount
assert Path(
mount["host"]
).exists(), f"Specified host paths do not exist: {mount['host']}"
roots.append(
(
mount["guest"],
mount["host"],
pattern["unsafeFollowSymlinks"],
)
)
return roots
def entrypoint():
args = parser.parse_args()
VERBOSITY_LEVELS = [logging.ERROR, logging.INFO, logging.DEBUG]
level_index = min(args.verbose, len(VERBOSITY_LEVELS) - 1)
logging.basicConfig(level=VERBOSITY_LEVELS[level_index])
drv_path = args.derivation_path
with open(args.patterns, "r") as f:
allowed_patterns = json.load(f)
if not Path(drv_path).exists():
logging.error(
f"{drv_path} doesn't exist."
" Cf. https://github.com/NixOS/nix/issues/9272"
" Exiting the hook",
)
proc = subprocess.run(
[
args.nix_exe,
"show-derivation",
drv_path,
],
capture_output=True,
)
try:
parsed_drv = json.loads(proc.stdout)
except json.JSONDecodeError:
logging.error(
"Couldn't parse the output of"
"`nix show-derivation`"
f". Expected JSON, observed: {proc.stdout}",
)
logging.error(textwrap.indent(proc.stdout.decode("utf8"), prefix=" " * 4))
logging.info("Exiting the nix-required-binds hook")
return
[canon_drv_path] = parsed_drv.keys()
known_features = set(
chain.from_iterable(
pattern["onFeatures"] for pattern in allowed_patterns.values()
)
)
parsed_drv = parsed_drv[canon_drv_path]
drv_env = parsed_drv.get("env", {})
required_features = get_strings(drv_env, "requiredSystemFeatures")
required_features = list(filter(known_features.__contains__, required_features))
patterns: List[Pattern] = list(
pattern
for pattern in allowed_patterns.values()
for path in pattern["paths"]
if any(feature in required_features for feature in pattern["onFeatures"])
) # noqa: E501
queue: Deque[Tuple[PathString, PathString, bool]] = deque(
(mnt for pattern in patterns for mnt in validate_mounts(pattern))
)
unique_mounts: Set[Tuple[PathString, PathString]] = set()
mounts: List[Tuple[PathString, PathString]] = []
while queue:
guest_path_str, host_path_str, follow_symlinks = queue.popleft()
if (guest_path_str, host_path_str) not in unique_mounts:
mounts.append((guest_path_str, host_path_str))
unique_mounts.add((guest_path_str, host_path_str))
if not follow_symlinks:
continue
host_path = Path(host_path_str)
if not (host_path.is_dir() or host_path.is_symlink()):
continue
# assert host_path_str == guest_path_str, (host_path_str, guest_path_str)
for child in host_path.iterdir() if host_path.is_dir() else [host_path]:
for parent in symlink_parents(child):
parent_str = parent.absolute().as_posix()
queue.append((parent_str, parent_str, follow_symlinks))
# the pre-build-hook command
if args.issue_command == "always" or (
args.issue_command == "conditional" and mounts
):
print("extra-sandbox-paths")
print_paths = True
else:
print_paths = False
# arguments, one per line
for guest_path_str, host_path_str in mounts if print_paths else []:
print(f"{guest_path_str}={host_path_str}")
# terminated by an empty line
something_to_terminate = args.issue_stop == "conditional" and mounts
if args.issue_stop == "always" or something_to_terminate:
print()
if __name__ == "__main__":
entrypoint()

View File

@ -0,0 +1,67 @@
{
addOpenGLRunpath,
allowedPatternsPath ? callPackage ./closure.nix { inherit allowedPatterns; },
allowedPatterns ? rec {
# This config is just an example.
# When the hook observes either of the following requiredSystemFeatures:
nvidia-gpu.onFeatures = [
"gpu"
"nvidia-gpu"
"opengl"
"cuda"
];
# It exposes these paths in the sandbox:
nvidia-gpu.paths = [
addOpenGLRunpath.driverLink
"/dev/dri"
"/dev/nvidia*"
];
nvidia-gpu.unsafeFollowSymlinks = true;
},
callPackage,
extraWrapperArgs ? [ ],
lib,
makeWrapper,
nix,
nixosTests,
python3Packages,
}:
let
attrs = builtins.fromTOML (builtins.readFile ./pyproject.toml);
pname = attrs.project.name;
inherit (attrs.project) version;
in
python3Packages.buildPythonApplication {
inherit pname version;
pyproject = true;
src = lib.cleanSource ./.;
nativeBuildInputs = [
makeWrapper
python3Packages.setuptools
];
postFixup = ''
wrapProgram $out/bin/${pname} \
--add-flags "--patterns ${allowedPatternsPath}" \
--add-flags "--nix-exe ${lib.getExe nix}" \
${builtins.concatStringsSep " " extraWrapperArgs}
'';
passthru = {
inherit allowedPatterns;
tests = {
inherit (nixosTests) nix-required-mounts;
};
};
meta = {
inherit (attrs.project) description;
homepage = attrs.project.urls.Homepage;
license = lib.licenses.mit;
mainProgram = attrs.project.name;
maintainers = with lib.maintainers; [ SomeoneSerge ];
};
}

View File

@ -0,0 +1,20 @@
[build-system]
build-backend = "setuptools.build_meta"
requires = [ "setuptools" ]
[project]
name = "nix-required-mounts"
version = "0.0.1"
description = """
A --pre-build-hook for Nix, \
that allows to expose extra paths in the build sandbox \
based on derivations' requiredSystemFeatrues"""
[project.urls]
Homepage = "https://github.com/NixOS/nixpkgs/tree/master/pkgs/by-name/ni/nix-required-mounts"
[project.scripts]
nix-required-mounts = "nix_required_mounts:entrypoint"
[tool.black]
line-length = 79

View File

@ -0,0 +1,45 @@
import json
import os
store_dir = os.environ["storeDir"]
with open(os.environ["shallowConfigPath"], "r") as f:
config = json.load(f)
cache = {}
def read_edges(path: str | dict) -> list[str | dict]:
if isinstance(path, dict):
return [path]
assert isinstance(path, str)
if not path.startswith(store_dir):
return [path]
if path in cache:
return cache[path]
name = f"references-{path.removeprefix(store_dir)}"
assert os.path.exists(name)
with open(name, "r") as f:
return [p.strip() for p in f.readlines() if p.startswith(store_dir)]
def host_path(mount: str | dict) -> str:
if isinstance(mount, dict):
return mount["host"]
assert isinstance(mount, str), mount
return mount
for pattern in config:
closure = []
for path in config[pattern]["paths"]:
closure.append(path)
closure.extend(read_edges(path))
config[pattern]["paths"] = list({host_path(m): m for m in closure}.values())
with open(os.environ["out"], "w") as f:
json.dump(config, f)

View File

@ -3,6 +3,7 @@
cmake,
cudaPackages,
lib,
saxpy,
}:
let
inherit (cudaPackages)
@ -15,7 +16,6 @@ let
cudatoolkit
flags
libcublas
setupCudaHook
;
inherit (lib) getDev getLib getOutput;
fs = lib.fileset;
@ -58,10 +58,19 @@ backendStdenv.mkDerivation {
(lib.cmakeFeature "CMAKE_CUDA_ARCHITECTURES" flags.cmakeCudaArchitecturesString)
];
passthru.gpuCheck = saxpy.overrideAttrs (_: {
requiredSystemFeatures = [ "cuda" ];
doInstallCheck = true;
postInstallCheck = ''
$out/bin/${saxpy.meta.mainProgram or (lib.getName saxpy)}
'';
});
meta = rec {
description = "Simple (Single-precision AX Plus Y) FindCUDAToolkit.cmake example for testing cross-compilation";
license = lib.licenses.mit;
maintainers = lib.teams.cuda.members;
mainProgram = "saxpy";
platforms = lib.platforms.unix;
badPlatforms = lib.optionals (flags.isJetsonBuild && cudaOlder "11.4") platforms;
};

View File

@ -0,0 +1,29 @@
{
lib,
writers,
runCommand,
}:
{
feature ? "cuda",
name ? feature,
libraries ? [ ],
}:
content:
let
tester = writers.writePython3Bin "tester-${name}" { inherit libraries; } content;
tester' = tester.overrideAttrs (oldAttrs: {
passthru.gpuCheck =
runCommand "test-${name}"
{
nativeBuildInputs = [ tester' ];
requiredSystemFeatures = [ feature ];
}
''
set -e
${tester.meta.mainProgram or (lib.getName tester')}
touch $out
'';
});
in
tester'

View File

@ -1,6 +1,7 @@
{
lib,
buildPythonPackage,
cudaPackages,
fetchFromGitHub,
substituteAll,
pythonOlder,
@ -8,6 +9,7 @@
setuptools,
pytestCheckHook,
versioneer,
pynvml,
}:
buildPythonPackage rec {
@ -50,6 +52,13 @@ buildPythonPackage rec {
# OSError: /run/opengl-driver/lib/libnvidia-ml.so.1: cannot open shared object file: No such file or directory
doCheck = false;
passthru.tests.tester-nvmlInit = cudaPackages.writeGpuTestPython { libraries = [ pynvml ]; } ''
import pynvml
from pynvml.smi import nvidia_smi # noqa: F401
print(f"{pynvml.nvmlInit()=}")
'';
meta = with lib; {
description = "Python bindings for the NVIDIA Management Library";
homepage = "https://github.com/gpuopenanalytics/pynvml";

View File

@ -8,6 +8,7 @@
pythonAtLeast,
pythonOlder,
addOpenGLRunpath,
callPackage,
cudaPackages,
future,
numpy,
@ -15,6 +16,7 @@
pyyaml,
requests,
setuptools,
torch-bin,
typing-extensions,
sympy,
jinja2,
@ -119,6 +121,8 @@ buildPythonPackage {
pythonImportsCheck = [ "torch" ];
passthru.gpuChecks.cudaAvailable = callPackage ./test-cuda.nix { torch = torch-bin; };
meta = {
description = "PyTorch: Tensors and Dynamic neural networks in Python with strong GPU acceleration";
homepage = "https://pytorch.org/";

View File

@ -24,6 +24,10 @@
mpi,
buildDocs ? false,
# tests.cudaAvailable:
callPackage,
torchWithCuda,
# Native build inputs
cmake,
symlinkJoin,
@ -639,11 +643,12 @@ buildPythonPackage rec {
rocmSupport
rocmPackages
;
cudaCapabilities = if cudaSupport then supportedCudaCapabilities else [ ];
# At least for 1.10.2 `torch.fft` is unavailable unless BLAS provider is MKL. This attribute allows for easy detection of its availability.
blasProvider = blas.provider;
# To help debug when a package is broken due to CUDA support
inherit brokenConditions;
cudaCapabilities = if cudaSupport then supportedCudaCapabilities else [ ];
tests = callPackage ./tests.nix { };
};
meta = {

View File

@ -0,0 +1,40 @@
{
lib,
torchWithCuda,
torchWithRocm,
callPackage,
}:
let
accelAvailable =
{
feature,
versionAttr,
torch,
cudaPackages,
}:
cudaPackages.writeGpuPythonTest
{
inherit feature;
libraries = [ torch ];
name = "${feature}Available";
}
''
import torch
message = f"{torch.cuda.is_available()=} and {torch.version.${versionAttr}=}"
assert torch.cuda.is_available() and torch.version.${versionAttr}, message
print(message)
'';
in
{
tester-cudaAvailable = callPackage accelAvailable {
feature = "cuda";
versionAttr = "cuda";
torch = torchWithCuda;
};
tester-rocmAvailable = callPackage accelAvailable {
feature = "rocm";
versionAttr = "hip";
torch = torchWithRocm;
};
}

View File

@ -0,0 +1,3 @@
{ callPackage }:
callPackage ./gpu-checks.nix { }

View File

@ -77,6 +77,8 @@ let
saxpy = final.callPackage ../development/cuda-modules/saxpy { };
nccl = final.callPackage ../development/cuda-modules/nccl { };
nccl-tests = final.callPackage ../development/cuda-modules/nccl-tests { };
writeGpuTestPython = final.callPackage ../development/cuda-modules/write-gpu-python-test.nix { };
});
mkVersionedPackageName =