nixos/nvidia: apply nixfmt-rfc-style

This commit is contained in:
Kiskae 2024-05-22 13:46:52 +02:00
parent a4cbb24e12
commit fbdcdde04a

View File

@ -3,12 +3,10 @@
lib, lib,
pkgs, pkgs,
... ...
}: let }:
let
nvidiaEnabled = (lib.elem "nvidia" config.services.xserver.videoDrivers); nvidiaEnabled = (lib.elem "nvidia" config.services.xserver.videoDrivers);
nvidia_x11 = nvidia_x11 = if nvidiaEnabled || cfg.datacenter.enable then cfg.package else null;
if nvidiaEnabled || cfg.datacenter.enable
then cfg.package
else null;
cfg = config.hardware.nvidia; cfg = config.hardware.nvidia;
@ -20,7 +18,8 @@
busIDType = lib.types.strMatching "([[:print:]]+[\:\@][0-9]{1,3}\:[0-9]{1,2}\:[0-9])?"; busIDType = lib.types.strMatching "([[:print:]]+[\:\@][0-9]{1,3}\:[0-9]{1,2}\:[0-9])?";
ibtSupport = cfg.open || (nvidia_x11.ibtSupport or false); ibtSupport = cfg.open || (nvidia_x11.ibtSupport or false);
settingsFormat = pkgs.formats.keyValue { }; settingsFormat = pkgs.formats.keyValue { };
in { in
{
options = { options = {
hardware.nvidia = { hardware.nvidia = {
datacenter.enable = lib.mkEnableOption '' datacenter.enable = lib.mkEnableOption ''
@ -211,7 +210,9 @@ in {
(lib.mkEnableOption '' (lib.mkEnableOption ''
nvidia-settings, NVIDIA's GUI configuration tool nvidia-settings, NVIDIA's GUI configuration tool
'') '')
// {default = true;}; // {
default = true;
};
nvidiaPersistenced = lib.mkEnableOption '' nvidiaPersistenced = lib.mkEnableOption ''
nvidia-persistenced a update for NVIDIA GPU headless mode, i.e. nvidia-persistenced a update for NVIDIA GPU headless mode, i.e.
@ -226,7 +227,8 @@ in {
''; '';
package = lib.mkOption { package = lib.mkOption {
default = config.boot.kernelPackages.nvidiaPackages."${if cfg.datacenter.enable then "dc" else "stable"}"; default =
config.boot.kernelPackages.nvidiaPackages."${if cfg.datacenter.enable then "dc" else "stable"}";
defaultText = lib.literalExpression '' defaultText = lib.literalExpression ''
config.boot.kernelPackages.nvidiaPackages."\$\{if cfg.datacenter.enable then "dc" else "stable"}" config.boot.kernelPackages.nvidiaPackages."\$\{if cfg.datacenter.enable then "dc" else "stable"}"
''; '';
@ -242,17 +244,13 @@ in {
}; };
}; };
config = let config =
igpuDriver = let
if pCfg.intelBusId != "" igpuDriver = if pCfg.intelBusId != "" then "modesetting" else "amdgpu";
then "modesetting" igpuBusId = if pCfg.intelBusId != "" then pCfg.intelBusId else pCfg.amdgpuBusId;
else "amdgpu";
igpuBusId =
if pCfg.intelBusId != ""
then pCfg.intelBusId
else pCfg.amdgpuBusId;
in in
lib.mkIf (nvidia_x11 != null) (lib.mkMerge [ lib.mkIf (nvidia_x11 != null) (
lib.mkMerge [
# Common # Common
({ ({
assertions = [ assertions = [
@ -262,7 +260,10 @@ in {
} }
]; ];
boot = { boot = {
blacklistedKernelModules = ["nouveau" "nvidiafb"]; blacklistedKernelModules = [
"nouveau"
"nvidiafb"
];
# Don't add `nvidia-uvm` to `kernelModules`, because we want # Don't add `nvidia-uvm` to `kernelModules`, because we want
# `nvidia-uvm` be loaded only after `udev` rules for `nvidia` kernel # `nvidia-uvm` be loaded only after `udev` rules for `nvidia` kernel
@ -274,11 +275,8 @@ in {
softdep nvidia post: nvidia-uvm softdep nvidia post: nvidia-uvm
''; '';
}; };
systemd.tmpfiles.rules = systemd.tmpfiles.rules = lib.optional config.virtualisation.docker.enableNvidia "L+ /run/nvidia-docker/bin - - - - ${nvidia_x11.bin}/origBin";
lib.optional config.virtualisation.docker.enableNvidia services.udev.extraRules = ''
"L+ /run/nvidia-docker/bin - - - - ${nvidia_x11.bin}/origBin";
services.udev.extraRules =
''
# Create /dev/nvidia-uvm when the nvidia-uvm module is loaded. # Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidiactl c 195 255'" KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidiactl c 195 255'"
KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'for i in $$(cat /proc/driver/nvidia/gpus/*/information | grep Minor | cut -d \ -f 4); do mknod -m 666 /dev/nvidia$${i} c 195 $${i}; done'" KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'for i in $$(cat /proc/driver/nvidia/gpus/*/information | grep Minor | cut -d \ -f 4); do mknod -m 666 /dev/nvidia$${i} c 195 $${i}; done'"
@ -287,16 +285,10 @@ in {
KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm-tools c $$(grep nvidia-uvm /proc/devices | cut -d \ -f 1) 1'" KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm-tools c $$(grep nvidia-uvm /proc/devices | cut -d \ -f 1) 1'"
''; '';
hardware.opengl = { hardware.opengl = {
extraPackages = [ extraPackages = [ nvidia_x11.out ];
nvidia_x11.out extraPackages32 = [ nvidia_x11.lib32 ];
];
extraPackages32 = [
nvidia_x11.lib32
];
}; };
environment.systemPackages = [ environment.systemPackages = [ nvidia_x11.bin ];
nvidia_x11.bin
];
}) })
# X11 # X11
(lib.mkIf nvidiaEnabled { (lib.mkIf nvidiaEnabled {
@ -312,7 +304,8 @@ in {
} }
{ {
assertion = primeEnabled -> pCfg.nvidiaBusId != "" && (pCfg.intelBusId != "" || pCfg.amdgpuBusId != ""); assertion =
primeEnabled -> pCfg.nvidiaBusId != "" && (pCfg.intelBusId != "" || pCfg.amdgpuBusId != "");
message = "When NVIDIA PRIME is enabled, the GPU bus IDs must be configured."; message = "When NVIDIA PRIME is enabled, the GPU bus IDs must be configured.";
} }
@ -322,7 +315,8 @@ in {
} }
{ {
assertion = (reverseSyncCfg.enable && pCfg.amdgpuBusId != "") -> lib.versionAtLeast nvidia_x11.version "470.0"; assertion =
(reverseSyncCfg.enable && pCfg.amdgpuBusId != "") -> lib.versionAtLeast nvidia_x11.version "470.0";
message = "NVIDIA PRIME render offload for AMD APUs is currently only supported on versions >= 470 beta."; message = "NVIDIA PRIME render offload for AMD APUs is currently only supported on versions >= 470 beta.";
} }
@ -359,7 +353,8 @@ in {
{ {
assertion = cfg.dynamicBoost.enable -> lib.versionAtLeast nvidia_x11.version "510.39.01"; assertion = cfg.dynamicBoost.enable -> lib.versionAtLeast nvidia_x11.version "510.39.01";
message = "NVIDIA's Dynamic Boost feature only exists on versions >= 510.39.01"; message = "NVIDIA's Dynamic Boost feature only exists on versions >= 510.39.01";
}]; }
];
# If Optimus/PRIME is enabled, we: # If Optimus/PRIME is enabled, we:
# - Specify the configured NVIDIA GPU bus ID in the Device section for the # - Specify the configured NVIDIA GPU bus ID in the Device section for the
@ -398,9 +393,8 @@ in {
deviceSection = deviceSection =
'' ''
Option "SidebandSocketPath" "/run/nvidia-xdriver/" Option "SidebandSocketPath" "/run/nvidia-xdriver/"
'' +
lib.optionalString primeEnabled
'' ''
+ lib.optionalString primeEnabled ''
BusID "${pCfg.nvidiaBusId}" BusID "${pCfg.nvidiaBusId}"
'' ''
+ lib.optionalString pCfg.allowExternalGpu '' + lib.optionalString pCfg.allowExternalGpu ''
@ -431,17 +425,16 @@ in {
Option "AllowNVIDIAGPUScreens" Option "AllowNVIDIAGPUScreens"
''; '';
services.xserver.displayManager.setupCommands = let services.xserver.displayManager.setupCommands =
let
gpuProviderName = gpuProviderName =
if igpuDriver == "amdgpu" if igpuDriver == "amdgpu" then
then
# find the name of the provider if amdgpu # find the name of the provider if amdgpu
"`${lib.getExe pkgs.xorg.xrandr} --listproviders | ${lib.getExe pkgs.gnugrep} -i AMD | ${lib.getExe pkgs.gnused} -n 's/^.*name://p'`" "`${lib.getExe pkgs.xorg.xrandr} --listproviders | ${lib.getExe pkgs.gnugrep} -i AMD | ${lib.getExe pkgs.gnused} -n 's/^.*name://p'`"
else igpuDriver; else
igpuDriver;
providerCmdParams = providerCmdParams =
if syncCfg.enable if syncCfg.enable then "\"${gpuProviderName}\" NVIDIA-0" else "NVIDIA-G0 \"${gpuProviderName}\"";
then "\"${gpuProviderName}\" NVIDIA-0"
else "NVIDIA-G0 \"${gpuProviderName}\"";
in in
lib.optionalString (syncCfg.enable || reverseSyncCfg.enable) '' lib.optionalString (syncCfg.enable || reverseSyncCfg.enable) ''
# Added by nvidia configuration module for Optimus/PRIME. # Added by nvidia configuration module for Optimus/PRIME.
@ -450,35 +443,35 @@ in {
''; '';
environment.etc = { environment.etc = {
"nvidia/nvidia-application-profiles-rc" = lib.mkIf nvidia_x11.useProfiles {source = "${nvidia_x11.bin}/share/nvidia/nvidia-application-profiles-rc";}; "nvidia/nvidia-application-profiles-rc" = lib.mkIf nvidia_x11.useProfiles {
source = "${nvidia_x11.bin}/share/nvidia/nvidia-application-profiles-rc";
};
# 'nvidia_x11' installs it's files to /run/opengl-driver/... # 'nvidia_x11' installs it's files to /run/opengl-driver/...
"egl/egl_external_platform.d".source = "/run/opengl-driver/share/egl/egl_external_platform.d/"; "egl/egl_external_platform.d".source = "/run/opengl-driver/share/egl/egl_external_platform.d/";
}; };
hardware.opengl = { hardware.opengl = {
extraPackages = [ extraPackages = [ pkgs.nvidia-vaapi-driver ];
pkgs.nvidia-vaapi-driver extraPackages32 = [ pkgs.pkgsi686Linux.nvidia-vaapi-driver ];
];
extraPackages32 = [
pkgs.pkgsi686Linux.nvidia-vaapi-driver
];
}; };
environment.systemPackages = environment.systemPackages =
lib.optional cfg.nvidiaSettings nvidia_x11.settings lib.optional cfg.nvidiaSettings nvidia_x11.settings
++ lib.optional cfg.nvidiaPersistenced nvidia_x11.persistenced ++ lib.optional cfg.nvidiaPersistenced nvidia_x11.persistenced
++ lib.optional offloadCfg.enableOffloadCmd ++ lib.optional offloadCfg.enableOffloadCmd (
(pkgs.writeShellScriptBin "nvidia-offload" '' pkgs.writeShellScriptBin "nvidia-offload" ''
export __NV_PRIME_RENDER_OFFLOAD=1 export __NV_PRIME_RENDER_OFFLOAD=1
export __NV_PRIME_RENDER_OFFLOAD_PROVIDER=NVIDIA-G0 export __NV_PRIME_RENDER_OFFLOAD_PROVIDER=NVIDIA-G0
export __GLX_VENDOR_LIBRARY_NAME=nvidia export __GLX_VENDOR_LIBRARY_NAME=nvidia
export __VK_LAYER_NV_optimus=NVIDIA_only export __VK_LAYER_NV_optimus=NVIDIA_only
exec "$@" exec "$@"
''); ''
);
systemd.packages = lib.optional cfg.powerManagement.enable nvidia_x11.out; systemd.packages = lib.optional cfg.powerManagement.enable nvidia_x11.out;
systemd.services = let systemd.services =
let
nvidiaService = state: { nvidiaService = state: {
description = "NVIDIA system ${state} actions"; description = "NVIDIA system ${state} actions";
path = [ pkgs.kbd ]; path = [ pkgs.kbd ];
@ -494,12 +487,16 @@ in {
(lib.mkIf cfg.powerManagement.enable { (lib.mkIf cfg.powerManagement.enable {
nvidia-suspend = nvidiaService "suspend"; nvidia-suspend = nvidiaService "suspend";
nvidia-hibernate = nvidiaService "hibernate"; nvidia-hibernate = nvidiaService "hibernate";
nvidia-resume = nvidia-resume = (nvidiaService "resume") // {
(nvidiaService "resume")
// {
before = [ ]; before = [ ];
after = ["systemd-suspend.service" "systemd-hibernate.service"]; after = [
requiredBy = ["systemd-suspend.service" "systemd-hibernate.service"]; "systemd-suspend.service"
"systemd-hibernate.service"
];
requiredBy = [
"systemd-suspend.service"
"systemd-hibernate.service"
];
}; };
}) })
(lib.mkIf cfg.nvidiaPersistenced { (lib.mkIf cfg.nvidiaPersistenced {
@ -541,24 +538,26 @@ in {
in in
lib.optional (isOpen || isNewUnfree) nvidia_x11.firmware; lib.optional (isOpen || isNewUnfree) nvidia_x11.firmware;
systemd.tmpfiles.rules = [ systemd.tmpfiles.rules =
[
# Remove the following log message: # Remove the following log message:
# (WW) NVIDIA: Failed to bind sideband socket to # (WW) NVIDIA: Failed to bind sideband socket to
# (WW) NVIDIA: '/var/run/nvidia-xdriver-b4f69129' Permission denied # (WW) NVIDIA: '/var/run/nvidia-xdriver-b4f69129' Permission denied
# #
# https://bbs.archlinux.org/viewtopic.php?pid=1909115#p1909115 # https://bbs.archlinux.org/viewtopic.php?pid=1909115#p1909115
"d /run/nvidia-xdriver 0770 root users" "d /run/nvidia-xdriver 0770 root users"
] ++ lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia) ]
++ lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
"L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced"; "L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";
boot = { boot = {
extraModulePackages = extraModulePackages = if cfg.open then [ nvidia_x11.open ] else [ nvidia_x11.bin ];
if cfg.open
then [nvidia_x11.open]
else [nvidia_x11.bin];
# nvidia-uvm is required by CUDA applications. # nvidia-uvm is required by CUDA applications.
kernelModules = kernelModules = lib.optionals config.services.xserver.enable [
lib.optionals config.services.xserver.enable ["nvidia" "nvidia_modeset" "nvidia_drm"]; "nvidia"
"nvidia_modeset"
"nvidia_drm"
];
# If requested enable modesetting via kernel parameter. # If requested enable modesetting via kernel parameter.
kernelParams = kernelParams =
@ -572,8 +571,7 @@ in {
options nvidia "NVreg_DynamicPowerManagement=0x02" options nvidia "NVreg_DynamicPowerManagement=0x02"
''; '';
}; };
services.udev.extraRules = services.udev.extraRules = lib.optionalString cfg.powerManagement.finegrained (
lib.optionalString cfg.powerManagement.finegrained (
lib.optionalString (lib.versionOlder config.boot.kernelPackages.kernel.version "5.5") '' lib.optionalString (lib.versionOlder config.boot.kernelPackages.kernel.version "5.5") ''
# Remove NVIDIA USB xHCI Host Controller devices, if present # Remove NVIDIA USB xHCI Host Controller devices, if present
ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c0330", ATTR{remove}="1" ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c0330", ATTR{remove}="1"
@ -597,9 +595,7 @@ in {
}) })
# Data Center # Data Center
(lib.mkIf (cfg.datacenter.enable) { (lib.mkIf (cfg.datacenter.enable) {
boot.extraModulePackages = [ boot.extraModulePackages = [ nvidia_x11.bin ];
nvidia_x11.bin
];
systemd = { systemd = {
tmpfiles.rules = tmpfiles.rules =
@ -617,7 +613,8 @@ in {
serviceConfig = { serviceConfig = {
Type = "forking"; Type = "forking";
TimeoutStartSec = 240; TimeoutStartSec = 240;
ExecStart = let ExecStart =
let
nv-fab-conf = settingsFormat.generate "fabricmanager.conf" cfg.datacenter.settings; nv-fab-conf = settingsFormat.generate "fabricmanager.conf" cfg.datacenter.settings;
in in
"${lib.getExe nvidia_x11.fabricmanager} -c ${nv-fab-conf}"; "${lib.getExe nvidia_x11.fabricmanager} -c ${nv-fab-conf}";
@ -645,5 +642,6 @@ in {
lib.optional cfg.datacenter.enable nvidia_x11.fabricmanager lib.optional cfg.datacenter.enable nvidia_x11.fabricmanager
++ lib.optional cfg.nvidiaPersistenced nvidia_x11.persistenced; ++ lib.optional cfg.nvidiaPersistenced nvidia_x11.persistenced;
}) })
]); ]
);
} }