format tree

This commit is contained in:
zowoq 2024-07-25 10:13:27 +10:00
parent dc07750547
commit 722c1dc068
40 changed files with 756 additions and 576 deletions

View File

@ -1,4 +1,5 @@
{ lib, ... }: {
{ lib, ... }:
{
imports = [
./flake.nix
./nix.nix

View File

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.srvos;
@ -18,10 +23,14 @@ in
services.telegraf.extraConfig.inputs.file =
let
inputsWithDate = lib.filterAttrs (_: input: input ? lastModified) cfg.flake.inputs;
flakeAttrs = input: (lib.mapAttrsToList (n: v: ''${n}="${v}"'')
(lib.filterAttrs (_: v: (builtins.typeOf v) == "string") input));
lastModified = name: input: ''
flake_input_last_modified{input="${name}",${lib.concatStringsSep "," (flakeAttrs input)}} ${toString input.lastModified}'';
flakeAttrs =
input:
(lib.mapAttrsToList (n: v: ''${n}="${v}"'') (
lib.filterAttrs (_: v: (builtins.typeOf v) == "string") input
));
lastModified =
name: input:
''flake_input_last_modified{input="${name}",${lib.concatStringsSep "," (flakeAttrs input)}} ${toString input.lastModified}'';
# avoid adding store path references on flakes which me not need at runtime.
promText = builtins.unsafeDiscardStringContext ''
@ -33,9 +42,7 @@ in
[
{
data_format = "prometheus";
files = [
(pkgs.writeText "flake-inputs.prom" promText)
];
files = [ (pkgs.writeText "flake-inputs.prom" promText) ];
}
];
};

View File

@ -6,11 +6,12 @@
nix.settings.connect-timeout = 5;
# Enable flakes
nix.settings.experimental-features = [
"nix-command"
"flakes"
] ++ lib.optional (lib.versionOlder (lib.versions.majorMinor config.nix.package.version) "2.22")
"repl-flake";
nix.settings.experimental-features =
[
"nix-command"
"flakes"
]
++ lib.optional (lib.versionOlder (lib.versions.majorMinor config.nix.package.version) "2.22") "repl-flake";
# The default at 10 is rarely enough.
nix.settings.log-lines = lib.mkDefault 25;

View File

@ -8,7 +8,7 @@
UseDns no
# unbind gnupg sockets if they exists
StreamLocalBindUnlink yes
# Use key exchange algorithms recommended by `nixpkgs#ssh-audit`
KexAlgorithms curve25519-sha256,curve25519-sha256@libssh.org,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,sntrup761x25519-sha512@openssh.com
'';

View File

@ -1,5 +1 @@
{
imports = [
../common
];
}
{ imports = [ ../common ]; }

View File

@ -1,15 +1,17 @@
{ lib, config, ... }:
{
# Enable flakes
nix.settings.experimental-features = [
# Enable the use of the fetchClosure built-in function in the Nix language.
"fetch-closure"
nix.settings.experimental-features =
[
# Enable the use of the fetchClosure built-in function in the Nix language.
"fetch-closure"
# Allow derivation builders to call Nix, and thus build derivations recursively.
"recursive-nix"
] ++ lib.optional (lib.versionAtLeast (lib.versions.majorMinor config.nix.package.version) "2.19")
# Allow the use of the impure-env setting.
"configurable-impure-env";
# Allow derivation builders to call Nix, and thus build derivations recursively.
"recursive-nix"
]
++ lib.optional (lib.versionAtLeast (lib.versions.majorMinor config.nix.package.version) "2.19")
# Allow the use of the impure-env setting.
"configurable-impure-env";
# for container in builds support
nix.settings.system-features = lib.mkDefault [ "uid-range" ];

View File

@ -1,4 +1,9 @@
{ lib, inputs, pkgs, ... }:
{
lib,
inputs,
pkgs,
...
}:
{
services.telegraf = {
enable = true;

View File

@ -2,11 +2,11 @@
{
# various terminfo packages
environment.systemPackages = [
pkgs.ncurses # macOS often ships a quite old version
pkgs.wezterm.terminfo # this one does not need compilation
# avoid compiling desktop stuff when doing cross nixos
] ++ lib.optionals (pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform) [
pkgs.termite.terminfo
];
environment.systemPackages =
[
pkgs.ncurses # macOS often ships a quite old version
pkgs.wezterm.terminfo # this one does not need compilation
# avoid compiling desktop stuff when doing cross nixos
]
++ lib.optionals (pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform) [ pkgs.termite.terminfo ];
}

View File

@ -1,5 +1 @@
{
imports = [
../common
];
}
{ imports = [ ../common ]; }

View File

@ -1,8 +1,16 @@
# This file provides backward compatibility to nix < 2.4 clients
{ system ? builtins.currentSystem, src ? ./. }:
{
system ? builtins.currentSystem,
src ? ./.,
}:
let
lock = builtins.fromJSON (builtins.readFile ./dev/private/flake.lock);
inherit (lock.nodes.flake-compat.locked) owner repo rev narHash;
inherit (lock.nodes.flake-compat.locked)
owner
repo
rev
narHash
;
flake-compat = fetchTarball {
url = "https://github.com/${owner}/${repo}/archive/${rev}.tar.gz";

View File

@ -1,20 +1,24 @@
{ prefix, self, pkgs }:
{
prefix,
self,
pkgs,
}:
let
lib = pkgs.lib;
system = pkgs.system;
nixosTest = import "${pkgs.path}/nixos/lib/testing-python.nix" {
inherit pkgs system;
};
nixosTest = import "${pkgs.path}/nixos/lib/testing-python.nix" { inherit pkgs system; };
moduleTests = {
"${prefix}-server" = nixosTest.makeTest {
name = "${prefix}-server";
nodes.machine = { ... }: {
imports = [ self.nixosModules.server ];
networking.hostName = "machine";
};
nodes.machine =
{ ... }:
{
imports = [ self.nixosModules.server ];
networking.hostName = "machine";
};
testScript = ''
machine.wait_for_unit("sshd.service")
# TODO: what else to test for?
@ -22,14 +26,12 @@ let
};
};
configurations = import ./test-configurations.nix {
inherit self pkgs;
};
configurations = import ./test-configurations.nix { inherit self pkgs; };
# Add all the nixos configurations to the checks
nixosChecks =
lib.mapAttrs'
(name: value: { name = "${prefix}-${name}"; value = value.config.system.build.toplevel; })
(lib.filterAttrs (_name: value: value != null) configurations);
nixosChecks = lib.mapAttrs' (name: value: {
name = "${prefix}-${name}";
value = value.config.system.build.toplevel;
}) (lib.filterAttrs (_name: value: value != null) configurations);
in
nixosChecks // moduleTests

View File

@ -6,21 +6,25 @@ let
lib = pkgs.lib;
system = pkgs.system;
nixosSystem = args:
import "${toString pkgs.path}/nixos/lib/eval-config.nix" ({ inherit lib system; } // args);
nixosSystem =
args: import "${toString pkgs.path}/nixos/lib/eval-config.nix" ({ inherit lib system; } // args);
# some example configuration to make it eval
dummy = { config, ... }: {
networking.hostName = "example-common";
system.stateVersion = config.system.nixos.version;
users.users.root.initialPassword = "fnord23";
boot.loader.grub.devices = lib.mkForce [ "/dev/sda" ];
fileSystems."/".device = lib.mkDefault "/dev/sda";
dummy =
{ config, ... }:
{
networking.hostName = "example-common";
system.stateVersion = config.system.nixos.version;
users.users.root.initialPassword = "fnord23";
boot.loader.grub.devices = lib.mkForce [ "/dev/sda" ];
fileSystems."/".device = lib.mkDefault "/dev/sda";
# Don't reinstantiate nixpkgs for every nixos eval.
# Also important to have nixpkgs config which allows for some required insecure packages
nixpkgs = { inherit pkgs; };
};
# Don't reinstantiate nixpkgs for every nixos eval.
# Also important to have nixpkgs config which allows for some required insecure packages
nixpkgs = {
inherit pkgs;
};
};
in
{
# General
@ -60,44 +64,38 @@ in
modules = [
dummy
self.nixosModules.hardware-hetzner-cloud
{
systemd.network.networks."10-uplink".networkConfig.Address = "::cafe:babe:feed:face:dead:beef";
}
{ systemd.network.networks."10-uplink".networkConfig.Address = "::cafe:babe:feed:face:dead:beef"; }
];
};
example-hardware-hetzner-cloud-arm =
if (system == "aarch64-linux") then
nixosSystem
{
modules = [
dummy
self.nixosModules.hardware-hetzner-cloud-arm
{
systemd.network.networks."10-uplink".networkConfig.Address = "::cafe:babe:feed:face:dead:beef";
}
];
} else null;
nixosSystem {
modules = [
dummy
self.nixosModules.hardware-hetzner-cloud-arm
{ systemd.network.networks."10-uplink".networkConfig.Address = "::cafe:babe:feed:face:dead:beef"; }
];
}
else
null;
example-hardware-hetzner-online-amd = nixosSystem {
modules = [
dummy
self.nixosModules.hardware-hetzner-online-amd
{
systemd.network.networks."10-uplink".networkConfig.Address = "::cafe:babe:feed:face:dead:beef";
}
{ systemd.network.networks."10-uplink".networkConfig.Address = "::cafe:babe:feed:face:dead:beef"; }
];
};
example-hardware-hetzner-online-intel =
if (system == "x86_64-linux") then
nixosSystem
{
modules = [
dummy
self.nixosModules.hardware-hetzner-online-intel
{
systemd.network.networks."10-uplink".networkConfig.Address = "::cafe:babe:feed:face:dead:beef";
}
];
} else null;
nixosSystem {
modules = [
dummy
self.nixosModules.hardware-hetzner-online-intel
{ systemd.network.networks."10-uplink".networkConfig.Address = "::cafe:babe:feed:face:dead:beef"; }
];
}
else
null;
example-hardware-vultr-bare-metal = nixosSystem {
modules = [
dummy

View File

@ -1,18 +1,18 @@
{
description = "Server-optimized NixOS configuration";
nixConfig.extra-substituters = [
"https://nix-community.cachix.org"
];
nixConfig.extra-substituters = [ "https://nix-community.cachix.org" ];
nixConfig.extra-trusted-public-keys = [
"nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs="
];
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable-small";
outputs = publicInputs @ { self, nixpkgs, ... }:
outputs =
publicInputs@{ self, nixpkgs, ... }:
let
loadPrivateFlake = path:
loadPrivateFlake =
path:
let
flakeHash = nixpkgs.lib.fileContents "${toString path}.narHash";
flakePath = "path:${toString path}?narHash=${flakeHash}";
@ -36,7 +36,15 @@
"x86_64-linux"
];
perSystem = { config, lib, pkgs, self', system, ... }:
perSystem =
{
config,
lib,
pkgs,
self',
system,
...
}:
let
defaultPlatform = pkgs.stdenv.hostPlatform.system == "x86_64-linux";
inherit (pkgs.stdenv.hostPlatform) isLinux;
@ -47,37 +55,41 @@
devShells = lib.mapAttrs' (n: lib.nameValuePair "devShell-${n}") self'.devShells;
packages = lib.mapAttrs' (n: lib.nameValuePair "package-${n}") self'.packages;
in
devShells // { inherit (self') formatter; } // packages //
(lib.optionalAttrs isLinux (import ./dev/checks.nix {
inherit self pkgs;
prefix = "nixos";
}))
// (lib.optionalAttrs isLinux (import ./dev/checks.nix {
inherit self;
pkgs = import inputs.nixos-stable {
inherit system;
};
prefix = "nixos-stable";
}));
devShells
// {
inherit (self') formatter;
}
// packages
// (lib.optionalAttrs isLinux (
import ./dev/checks.nix {
inherit self pkgs;
prefix = "nixos";
}
))
// (lib.optionalAttrs isLinux (
import ./dev/checks.nix {
inherit self;
pkgs = import inputs.nixos-stable { inherit system; };
prefix = "nixos-stable";
}
));
devShells = lib.optionalAttrs defaultPlatform {
mkdocs = pkgs.mkShellNoCC {
packages = [
inputs.mkdocs-numtide.packages.${system}.default
];
};
mkdocs = pkgs.mkShellNoCC { packages = [ inputs.mkdocs-numtide.packages.${system}.default ]; };
};
packages = {
update-dev-private-narHash = pkgs.writeScriptBin "update-dev-private-narHash" ''
nix flake lock ./dev/private
nix hash path ./dev/private > ./dev/private.narHash
'';
} // lib.optionalAttrs defaultPlatform {
docs = inputs.mkdocs-numtide.lib.${system}.mkDocs {
name = "srvos";
src = self;
packages =
{
update-dev-private-narHash = pkgs.writeScriptBin "update-dev-private-narHash" ''
nix flake lock ./dev/private
nix hash path ./dev/private > ./dev/private.narHash
'';
}
// lib.optionalAttrs defaultPlatform {
docs = inputs.mkdocs-numtide.lib.${system}.mkDocs {
name = "srvos";
src = self;
};
};
};
pre-commit = {
check.enable = defaultPlatform;
settings.hooks.dev-private-narHash = {

View File

@ -26,9 +26,7 @@
# - for containers we currently rely on the `stage-2` init script that sets up our /etc
# - For systemd in initrd we have now systemd-repart, but many images still set boot.growPartition
boot.initrd.systemd.enable = lib.mkDefault (
!config.boot.swraid.enable &&
!config.boot.isContainer &&
!config.boot.growPartition
!config.boot.swraid.enable && !config.boot.isContainer && !config.boot.growPartition
);
# Work around for https://github.com/NixOS/nixpkgs/issues/124215

View File

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.srvos;
@ -44,10 +49,14 @@ in
services.telegraf.extraConfig.inputs.file =
let
inputsWithDate = lib.filterAttrs (_: input: input ? lastModified) cfg.flake.inputs;
flakeAttrs = input: (lib.mapAttrsToList (n: v: ''${n}="${v}"'')
(lib.filterAttrs (_: v: (builtins.typeOf v) == "string") input));
lastModified = name: input: ''
flake_input_last_modified{input="${name}",${lib.concatStringsSep "," (flakeAttrs input)}} ${toString input.lastModified}'';
flakeAttrs =
input:
(lib.mapAttrsToList (n: v: ''${n}="${v}"'') (
lib.filterAttrs (_: v: (builtins.typeOf v) == "string") input
));
lastModified =
name: input:
''flake_input_last_modified{input="${name}",${lib.concatStringsSep "," (flakeAttrs input)}} ${toString input.lastModified}'';
# avoid adding store path references on flakes which me not need at runtime.
promText = builtins.unsafeDiscardStringContext ''
@ -59,9 +68,7 @@ in
[
{
data_format = "prometheus";
files = [
(pkgs.writeText "flake-inputs.prom" promText)
];
files = [ (pkgs.writeText "flake-inputs.prom" promText) ];
}
];
};

View File

@ -4,11 +4,12 @@
nix.settings.connect-timeout = 5;
# Enable flakes
nix.settings.experimental-features = [
"nix-command"
"flakes"
] ++ lib.optional (lib.versionOlder (lib.versions.majorMinor config.nix.package.version) "2.22")
"repl-flake";
nix.settings.experimental-features =
[
"nix-command"
"flakes"
]
++ lib.optional (lib.versionOlder (lib.versions.majorMinor config.nix.package.version) "2.22") "repl-flake";
# The default at 10 is rarely enough.
nix.settings.log-lines = lib.mkDefault 25;

View File

@ -23,12 +23,12 @@
# However those match blocks cannot be put after other `extraConfig` lines
# with the current sshd config module, which is however something the sshd
# config parser mandates.
authorizedKeysFiles = lib.mkIf
(!config.services.gitea.enable
&& !config.services.gitlab.enable
&& !config.services.gitolite.enable
&& !config.services.gerrit.enable
&& !config.services.forgejo.enable)
(lib.mkForce [ "/etc/ssh/authorized_keys.d/%u" ]);
authorizedKeysFiles = lib.mkIf (
!config.services.gitea.enable
&& !config.services.gitlab.enable
&& !config.services.gitolite.enable
&& !config.services.gerrit.enable
&& !config.services.forgejo.enable
) (lib.mkForce [ "/etc/ssh/authorized_keys.d/%u" ]);
};
}

View File

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
# Based on https://unix.stackexchange.com/questions/16578/resizable-serial-console-window
resize = pkgs.writeShellScriptBin "resize" ''
@ -34,10 +39,10 @@ in
srvos.boot.consoles = lib.mkOption {
type = lib.types.listOf lib.types.str;
default =
[ "ttyS0,115200" ] ++
(lib.optional (pkgs.stdenv.hostPlatform.isAarch) "ttyAMA0,115200") ++
(lib.optional (pkgs.stdenv.hostPlatform.isRiscV64) "ttySIF0,115200") ++
[ "tty0" ];
[ "ttyS0,115200" ]
++ (lib.optional (pkgs.stdenv.hostPlatform.isAarch) "ttyAMA0,115200")
++ (lib.optional (pkgs.stdenv.hostPlatform.isRiscV64) "ttySIF0,115200")
++ [ "tty0" ];
example = [ "ttyS2,115200" ];
description = lib.mdDoc ''
The Linux kernel console option allows you to configure various devices as

View File

@ -1,4 +1,5 @@
{ lib, ... }: {
{ lib, ... }:
{
imports = [
../common
../mixins/mdns.nix

View File

@ -1,7 +1,5 @@
{
imports = [
./.
];
imports = [ ./. ];
config = {
# arm uses EFI, so we need systemd-boot

View File

@ -1,4 +1,10 @@
{ config, modulesPath, lib, pkgs, ... }:
{
config,
modulesPath,
lib,
pkgs,
...
}:
{
imports = [
../../mixins/cloud-init.nix
@ -9,7 +15,10 @@
boot.growPartition = true;
boot.loader.grub.device = "/dev/sda";
fileSystems."/" = lib.mkDefault { device = "/dev/sda1"; fsType = "ext4"; };
fileSystems."/" = lib.mkDefault {
device = "/dev/sda1";
fsType = "ext4";
};
networking.useNetworkd = true;
networking.useDHCP = false;

View File

@ -1,8 +1,8 @@
{ lib, config, ... }: {
{ lib, config, ... }:
{
imports = [ ./. ];
boot.kernelModules = [ "kvm-amd" ];
hardware.cpu.amd.updateMicrocode =
lib.mkDefault config.hardware.enableRedistributableFirmware;
hardware.cpu.amd.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
}

View File

@ -1,3 +1 @@
{
imports = [ ./. ];
}
{ imports = [ ./. ]; }

View File

@ -1,46 +1,52 @@
{ lib, config, options, modulesPath, ... }:
{
imports = [
"${modulesPath}/installer/scan/not-detected.nix"
];
lib,
config,
options,
modulesPath,
...
}:
{
imports = [ "${modulesPath}/installer/scan/not-detected.nix" ];
config = {
assertions = [
{
assertion = config.systemd.network.networks."10-uplink".networkConfig ? Address;
message = ''
The machine IPv6 address must be set to
`systemd.network.networks."10-uplink".networkConfig.Address`
'';
}
];
config =
{
assertions = [
{
assertion = config.systemd.network.networks."10-uplink".networkConfig ? Address;
message = ''
The machine IPv6 address must be set to
`systemd.network.networks."10-uplink".networkConfig.Address`
'';
}
];
boot.initrd.availableKernelModules = [
"xhci_pci"
"ahci"
# SATA SSDs/HDDs
"sd_mod"
# NVME
"nvme"
];
boot.initrd.availableKernelModules = [
"xhci_pci"
"ahci"
# SATA SSDs/HDDs
"sd_mod"
# NVME
"nvme"
];
networking.useNetworkd = true;
networking.useDHCP = false;
networking.useNetworkd = true;
networking.useDHCP = false;
systemd.network.networks."10-uplink" = {
matchConfig.Name = lib.mkDefault "en* eth0";
networkConfig.DHCP = "ipv4";
# hetzner requires static ipv6 addresses
networkConfig.Gateway = "fe80::1";
networkConfig.IPv6AcceptRA = "no";
};
systemd.network.networks."10-uplink" = {
matchConfig.Name = lib.mkDefault "en* eth0";
networkConfig.DHCP = "ipv4";
# hetzner requires static ipv6 addresses
networkConfig.Gateway = "fe80::1";
networkConfig.IPv6AcceptRA = "no";
};
# Network configuration i.e. when we unlock machines with openssh in the initrd
boot.initrd.systemd.network.networks."10-uplink" = config.systemd.network.networks."10-uplink";
# Network configuration i.e. when we unlock machines with openssh in the initrd
boot.initrd.systemd.network.networks."10-uplink" = config.systemd.network.networks."10-uplink";
} // (lib.optionalAttrs ((options.srvos.boot or { }) ? consoles) {
}
// (lib.optionalAttrs ((options.srvos.boot or { }) ? consoles) {
# To make hetzner kvm console work. It uses VGA rather than serial. Serial leads to nowhere.
srvos.boot.consoles = lib.mkDefault [ ];
});
# To make hetzner kvm console work. It uses VGA rather than serial. Serial leads to nowhere.
srvos.boot.consoles = lib.mkDefault [ ];
});
}

View File

@ -1,7 +1,5 @@
{
imports = [
./intel.nix
];
imports = [ ./intel.nix ];
# It looks like Intel i9-13900 draws too much power for a short moment of time when running parallel load.
# Changing from "performance" to "powersave" governor helps to avoid this.
powerManagement.cpuFreqGovernor = "powersave";

View File

@ -1,8 +1,8 @@
{ lib, config, ... }: {
{ lib, config, ... }:
{
imports = [ ./. ];
boot.kernelModules = [ "kvm-intel" ];
hardware.cpu.intel.updateMicrocode =
lib.mkDefault config.hardware.enableRedistributableFirmware;
hardware.cpu.intel.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
}

View File

@ -1,16 +1,17 @@
{ options, lib, ... }: {
imports = [
../../mixins/cloud-init.nix
];
{ options, lib, ... }:
{
imports = [ ../../mixins/cloud-init.nix ];
config = {
config =
{
services.cloud-init.settings.datasource_list = [ "Vultr" ];
services.cloud-init.settings.datasource.Vultr = { };
services.cloud-init.settings.datasource_list = [ "Vultr" ];
services.cloud-init.settings.datasource.Vultr = { };
} // (lib.optionalAttrs ((options.srvos.boot or { }) ? consoles) {
# tty1 is used by all of the servers so we don't want a serial console
srvos.boot.consoles = lib.mkDefault [ ];
});
}
// (lib.optionalAttrs ((options.srvos.boot or { }) ? consoles) {
# tty1 is used by all of the servers so we don't want a serial console
srvos.boot.consoles = lib.mkDefault [ ];
});
}

View File

@ -1,18 +1,26 @@
{ lib, config, ... }:
{
services.cloud-init = {
enable = lib.mkDefault true;
network.enable = lib.mkDefault true;
services.cloud-init =
{
enable = lib.mkDefault true;
network.enable = lib.mkDefault true;
# Never flush the host's SSH keys. See #148. Since we build the images
# using NixOS, that kind of issue shouldn't happen to us.
settings.ssh_deletekeys = lib.mkDefault false;
# Never flush the host's SSH keys. See #148. Since we build the images
# using NixOS, that kind of issue shouldn't happen to us.
settings.ssh_deletekeys = lib.mkDefault false;
## Automatically enable the filesystems that are used
} // (lib.genAttrs ([ "btrfs" "ext4" "xfs" ])
(fsName: {
enable = lib.mkDefault (lib.any (fs: fs.fsType == fsName) (lib.attrValues config.fileSystems));
}));
## Automatically enable the filesystems that are used
}
// (lib.genAttrs
([
"btrfs"
"ext4"
"xfs"
])
(fsName: {
enable = lib.mkDefault (lib.any (fs: fs.fsType == fsName) (lib.attrValues config.fileSystems));
})
);
networking.useNetworkd = lib.mkDefault true;
networking.useDHCP = lib.mkDefault false;

View File

@ -1,6 +1,10 @@
{ config, lib, ... }: {
{ config, lib, ... }:
{
networking.firewall.allowedTCPPorts = [ 443 80 ];
networking.firewall.allowedTCPPorts = [
443
80
];
services.nginx = {
enable = true;
@ -21,8 +25,12 @@
let
isIPv6 = addr: builtins.match ".*:.*:.*" addr != null;
escapeIPv6 = addr: if isIPv6 addr then "[${addr}]" else addr;
cloudflare = [ "1.1.1.1" "2606:4700:4700::1111" ];
resolvers = if config.networking.nameservers == [ ] then cloudflare else config.networking.nameservers;
cloudflare = [
"1.1.1.1"
"2606:4700:4700::1111"
];
resolvers =
if config.networking.nameservers == [ ] then cloudflare else config.networking.nameservers;
in
map escapeIPv6 resolvers;

View File

@ -1,20 +1,21 @@
{ lib, config, ... }:
{
# Enable flakes
nix.settings.experimental-features = [
# for container in builds support
"auto-allocate-uids"
"cgroups"
nix.settings.experimental-features =
[
# for container in builds support
"auto-allocate-uids"
"cgroups"
# Enable the use of the fetchClosure built-in function in the Nix language.
"fetch-closure"
# Enable the use of the fetchClosure built-in function in the Nix language.
"fetch-closure"
# Allow derivation builders to call Nix, and thus build derivations recursively.
"recursive-nix"
] ++ lib.optional (lib.versionAtLeast (lib.versions.majorMinor config.nix.package.version) "2.19")
# Allow the use of the impure-env setting.
"configurable-impure-env";
# Allow derivation builders to call Nix, and thus build derivations recursively.
"recursive-nix"
]
++ lib.optional (lib.versionAtLeast (lib.versions.majorMinor config.nix.package.version) "2.19")
# Allow the use of the impure-env setting.
"configurable-impure-env";
# no longer need to pre-allocate build users for everything
nix.settings.auto-allocate-uids = true;

View File

@ -1,10 +1,17 @@
{ pkgs, lib, config, ... }:
{
pkgs,
lib,
config,
...
}:
# To use this module you also need to allow port 9273 either on the internet or on a vpn interface
# i.e. networking.firewall.interfaces."vpn0".allowedTCPPorts = [ 9273 ];
# Example prometheus alert rules:
# - https://github.com/Mic92/dotfiles/blob/master/nixos/eva/modules/prometheus/alert-rules.nix
let
isVM = lib.any (mod: mod == "xen-blkfront" || mod == "virtio_console") config.boot.initrd.kernelModules;
isVM = lib.any (
mod: mod == "xen-blkfront" || mod == "virtio_console"
) config.boot.initrd.kernelModules;
# potentially wrong if the nvme is not used at boot...
hasNvme = lib.any (m: m == "nvme") config.boot.initrd.availableKernelModules;
@ -15,8 +22,8 @@ let
${pkgs.jq}/bin/jq -r 'map(.addr_info) | flatten(1) | map(select(.dadfailed == true)) | map(.local) | @text "ipv6_dad_failures count=\(length)i"'
'';
zfsChecks = lib.optional (supportsFs "zfs")
(pkgs.writeScript "zpool-health" ''
zfsChecks = lib.optional (supportsFs "zfs") (
pkgs.writeScript "zpool-health" ''
#!${pkgs.gawk}/bin/awk -f
BEGIN {
while ("${pkgs.zfs}/bin/zpool status" | getline) {
@ -27,12 +34,19 @@ let
}
}
}
'');
''
);
nfsChecks =
let
collectHosts = shares: fs:
if builtins.elem fs.fsType [ "nfs" "nfs3" "nfs4" ]
collectHosts =
shares: fs:
if
builtins.elem fs.fsType [
"nfs"
"nfs3"
"nfs4"
]
then
shares
// (
@ -46,33 +60,32 @@ let
${host} = (shares.${host} or [ ]) ++ [ path ];
}
)
else shares;
else
shares;
nfsHosts = lib.foldl collectHosts { } (builtins.attrValues config.fileSystems);
in
lib.mapAttrsToList
(
host: args:
(pkgs.writeScript "nfs-health" ''
#!${pkgs.gawk}/bin/awk -f
BEGIN {
for (i = 2; i < ARGC; i++) {
mounts[ARGV[i]] = 1
}
while ("${pkgs.nfs-utils}/bin/showmount -e " ARGV[1] | getline) {
if (NR == 1) { continue }
if (mounts[$1] == 1) {
printf "nfs_export,host=%s,path=%s present=1\n", ARGV[1], $1
}
delete mounts[$1]
}
for (mount in mounts) {
printf "nfs_export,host=%s,path=%s present=0\n", ARGV[1], $1
}
lib.mapAttrsToList (
host: args:
(pkgs.writeScript "nfs-health" ''
#!${pkgs.gawk}/bin/awk -f
BEGIN {
for (i = 2; i < ARGC; i++) {
mounts[ARGV[i]] = 1
}
while ("${pkgs.nfs-utils}/bin/showmount -e " ARGV[1] | getline) {
if (NR == 1) { continue }
if (mounts[$1] == 1) {
printf "nfs_export,host=%s,path=%s present=1\n", ARGV[1], $1
}
'')
+ " ${host} ${builtins.concatStringsSep " " args}"
)
nfsHosts;
delete mounts[$1]
}
for (mount in mounts) {
printf "nfs_export,host=%s,path=%s present=0\n", ARGV[1], $1
}
}
'')
+ " ${host} ${builtins.concatStringsSep " " args}"
) nfsHosts;
in
{
@ -84,17 +97,15 @@ in
extraConfig = {
agent.interval = "60s";
inputs = {
prometheus = lib.mkIf config.services.promtail.enable [{
urls = [ "http://localhost:9080/metrics" ]; # default promtail port
metric_version = 2;
}];
kernel_vmstat = { };
nginx.urls = lib.mkIf config.services.nginx.statusPage [
"http://localhost/nginx_status"
prometheus = lib.mkIf config.services.promtail.enable [
{
urls = [ "http://localhost:9080/metrics" ]; # default promtail port
metric_version = 2;
}
];
smart = lib.mkIf (!isVM) {
path_smartctl = "/run/wrappers/bin/smartctl-telegraf";
};
kernel_vmstat = { };
nginx.urls = lib.mkIf config.services.nginx.statusPage [ "http://localhost/nginx_status" ];
smart = lib.mkIf (!isVM) { path_smartctl = "/run/wrappers/bin/smartctl-telegraf"; };
system = { };
mem = { };
file =
@ -113,27 +124,37 @@ in
exec = [
{
## Commands array
commands =
[ ipv6DadCheck ]
++ zfsChecks
++ nfsChecks;
commands = [ ipv6DadCheck ] ++ zfsChecks ++ nfsChecks;
data_format = "influx";
}
];
systemd_units = { };
swap = { };
disk.tagdrop = {
fstype = [ "tmpfs" "ramfs" "devtmpfs" "devfs" "iso9660" "overlay" "aufs" "squashfs" "efivarfs" ];
device = [ "rpc_pipefs" "lxcfs" "nsfs" "borgfs" ];
fstype = [
"tmpfs"
"ramfs"
"devtmpfs"
"devfs"
"iso9660"
"overlay"
"aufs"
"squashfs"
"efivarfs"
];
device = [
"rpc_pipefs"
"lxcfs"
"nsfs"
"borgfs"
];
};
diskio = { };
internal = { };
zfs = {
poolMetrics = true;
};
} // lib.optionalAttrs config.boot.swraid.enable {
mdstat = { };
};
} // lib.optionalAttrs config.boot.swraid.enable { mdstat = { }; };
outputs.prometheus_client = {
listen = ":9273";
metric_version = 2;
@ -148,7 +169,5 @@ in
};
# create dummy file to avoid telegraf errors
systemd.tmpfiles.rules = [
"f /var/log/telegraf/dummy 0444 root root - -"
];
systemd.tmpfiles.rules = [ "f /var/log/telegraf/dummy 0444 root root - -" ];
}

View File

@ -2,13 +2,15 @@
{
# various terminfo packages
environment.systemPackages = [
pkgs.wezterm.terminfo # this one does not need compilation
# avoid compiling desktop stuff when doing cross nixos
] ++ lib.optionals (pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform) [
pkgs.termite.terminfo
# Too unstable
# pkgs.kitty.terminfo
pkgs.foot.terminfo
];
environment.systemPackages =
[
pkgs.wezterm.terminfo # this one does not need compilation
# avoid compiling desktop stuff when doing cross nixos
]
++ lib.optionals (pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform) [
pkgs.termite.terminfo
# Too unstable
# pkgs.kitty.terminfo
pkgs.foot.terminfo
];
}

View File

@ -1,7 +1,5 @@
{ pkgs
, config
, ...
}: {
{ pkgs, config, ... }:
{
programs.bcc.enable = !pkgs.stdenv.hostPlatform.isRiscV;
programs.sysdig.enable = !pkgs.stdenv.isAarch64 && !pkgs.stdenv.hostPlatform.isRiscV;

View File

@ -1,7 +1,8 @@
{ config
, pkgs
, lib
, ...
{
config,
pkgs,
lib,
...
}@args:
with lib;
@ -12,12 +13,17 @@ in
{
options.services.srvos-github-runners = mkOption {
default = { };
type = with types; attrsOf (submodule {
options = import ./options.nix (args // {
# services.github-runners.${name}.name doesn't have a default; it falls back to ${name} below.
includeNameDefault = false;
type =
with types;
attrsOf (submodule {
options = import ./options.nix (
args
// {
# services.github-runners.${name}.name doesn't have a default; it falls back to ${name} below.
includeNameDefault = false;
}
);
});
});
example = {
runner1 = {
enable = true;
@ -40,33 +46,32 @@ in
config = {
assertions =
(mapAttrsToList
(_name: c:
{
assertion = !(c.tokenFile == null && c.githubApp == null);
message = "Missing token file or github app private key file. Specify path either for token in `tokenFile` either for github app private key File in `githubApp.privateKeyFile`";
})
cfg) ++
(mapAttrsToList
(name: c:
{
assertion = !(c.githubApp != null && c.tokenFile != null);
message = "${name}:Cannot set both tokenFile and github app private key file. Specify path either for token in `tokenFile` either for github app private key File in `githubApp.privateKeyFile`";
})
cfg);
(mapAttrsToList (_name: c: {
assertion = !(c.tokenFile == null && c.githubApp == null);
message = "Missing token file or github app private key file. Specify path either for token in `tokenFile` either for github app private key File in `githubApp.privateKeyFile`";
}) cfg)
++ (mapAttrsToList (name: c: {
assertion = !(c.githubApp != null && c.tokenFile != null);
message = "${name}:Cannot set both tokenFile and github app private key file. Specify path either for token in `tokenFile` either for github app private key File in `githubApp.privateKeyFile`";
}) cfg);
systemd.services = flip mapAttrs' cfg (n: v:
systemd.services = flip mapAttrs' cfg (
n: v:
let
svcName = "github-runner-${n}";
in
nameValuePair svcName
(import ./service.nix (args // {
inherit svcName;
cfg = v // {
name = if v.name != null then v.name else n;
};
systemdDir = "github-runner/${n}";
}))
nameValuePair svcName (
import ./service.nix (
args
// {
inherit svcName;
cfg = v // {
name = if v.name != null then v.name else n;
};
systemdDir = "github-runner/${n}";
}
)
)
);
};
}

View File

@ -1,8 +1,9 @@
{ config
, lib
, pkgs
, includeNameDefault
, ...
{
config,
lib,
pkgs,
includeNameDefault,
...
}:
with lib;
@ -62,24 +63,26 @@ with lib;
description = lib.mdDoc ''
Authenticate runners using GitHub App
'';
type = lib.types.nullOr (types.submodule {
options = {
id = mkOption {
type = types.str;
description = lib.mdDoc "GitHub App ID";
type = lib.types.nullOr (
types.submodule {
options = {
id = mkOption {
type = types.str;
description = lib.mdDoc "GitHub App ID";
};
login = mkOption {
type = types.str;
description = lib.mdDoc "GitHub login used to register the application";
};
privateKeyFile = mkOption {
type = types.path;
description = lib.mdDoc ''
The full path to a file containing the GitHub App private key.
'';
};
};
login = mkOption {
type = types.str;
description = lib.mdDoc "GitHub login used to register the application";
};
privateKeyFile = mkOption {
type = types.path;
description = lib.mdDoc ''
The full path to a file containing the GitHub App private key.
'';
};
};
});
}
);
};
name =
@ -87,21 +90,24 @@ with lib;
# Same pattern as for `networking.hostName`
baseType = types.strMatching "^$|^[[:alnum:]]([[:alnum:]_-]{0,61}[[:alnum:]])?$";
in
mkOption
{
type = if includeNameDefault then baseType else types.nullOr baseType;
description = lib.mdDoc ''
Name of the runner to configure. Defaults to the hostname.
mkOption {
type = if includeNameDefault then baseType else types.nullOr baseType;
description = lib.mdDoc ''
Name of the runner to configure. Defaults to the hostname.
Changing this option triggers a new runner registration.
'';
example = "nixos";
} // (if includeNameDefault then {
default = config.networking.hostName;
defaultText = literalExpression "config.networking.hostName";
} else {
default = null;
});
Changing this option triggers a new runner registration.
'';
example = "nixos";
}
// (
if includeNameDefault then
{
default = config.networking.hostName;
defaultText = literalExpression "config.networking.hostName";
}
else
{ default = null; }
);
runnerGroup = mkOption {
type = types.nullOr types.str;
@ -201,7 +207,12 @@ with lib;
};
nodeRuntimes = mkOption {
type = with types; nonEmptyListOf (enum [ "node16" "node20" ]);
type =
with types;
nonEmptyListOf (enum [
"node16"
"node20"
]);
default = [ "node20" ];
description = mdDoc ''
List of Node.js runtimes the runner should support.

View File

@ -1,21 +1,22 @@
{ config
, lib
, pkgs
{
config,
lib,
pkgs,
, cfg ? config.profiles.srvos-github-runner
, svcName
cfg ? config.profiles.srvos-github-runner,
svcName,
, systemdDir ? "${svcName}/${cfg.name}"
systemdDir ? "${svcName}/${cfg.name}",
# %t: Runtime directory root (usually /run); see systemd.unit(5)
, runtimeDir ? "%t/${systemdDir}"
runtimeDir ? "%t/${systemdDir}",
# %S: State directory root (usually /var/lib); see systemd.unit(5)
, stateDir ? "%S/${systemdDir}"
stateDir ? "%S/${systemdDir}",
# %L: Log directory root (usually /var/log); see systemd.unit(5)
, logsDir ? "%L/${systemdDir}"
logsDir ? "%L/${systemdDir}",
# Name of file stored in service state directory
, currentConfigTokenFilename ? ".current-token"
currentConfigTokenFilename ? ".current-token",
, ...
...
}:
with lib;
@ -34,15 +35,16 @@ in
RUNNER_ROOT = stateDir;
} // cfg.extraEnvironment;
path = (with pkgs; [
bash
coreutils
git
gnutar
gzip
]) ++ [
config.nix.package
] ++ cfg.extraPackages;
path =
(with pkgs; [
bash
coreutils
git
gnutar
gzip
])
++ [ config.nix.package ]
++ cfg.extraPackages;
serviceConfig =
let
@ -54,15 +56,17 @@ in
# to contain more than one directory. This causes systemd to set the respective
# environment variables with the path of all of the given directories, separated
# by a colon.
writeScript = name: lines: pkgs.writeShellScript "${svcName}-${name}.sh" ''
set -euo pipefail
writeScript =
name: lines:
pkgs.writeShellScript "${svcName}-${name}.sh" ''
set -euo pipefail
STATE_DIRECTORY="$1"
RUNTIME_DIRECTORY="$2"
LOGS_DIRECTORY="$3"
STATE_DIRECTORY="$1"
RUNTIME_DIRECTORY="$2"
LOGS_DIRECTORY="$3"
${lines}
'';
${lines}
'';
in
{
ExecStart = "${package}/bin/Runner.Listener run --startuptype service";
@ -76,7 +80,14 @@ in
# - Set up the directory structure by creating the necessary symlinks.
ExecStartPre =
let
runnerRegistrationConfig = getAttrs [ "name" "tokenFile" "url" "runnerGroup" "extraLabels" "ephemeral" ] cfg;
runnerRegistrationConfig = getAttrs [
"name"
"tokenFile"
"url"
"runnerGroup"
"extraLabels"
"ephemeral"
] cfg;
newConfigPath = builtins.toFile "${svcName}-config.json" (builtins.toJSON runnerRegistrationConfig);
currentConfigPath = "$STATE_DIRECTORY/.nixos-current-config.json";
newConfigTokenPath = "$STATE_DIRECTORY/.new-token";
@ -89,19 +100,29 @@ in
app_token = pkgs.writeShellApplication {
name = "fetch_access_token";
runtimeInputs = with pkgs;[ jq openssl curl ];
runtimeInputs = with pkgs; [
jq
openssl
curl
];
text = ./app_token.sh;
};
token = pkgs.writeShellApplication {
name = "fetch_runner_token";
runtimeInputs = with pkgs;[ jq curl ];
runtimeInputs = with pkgs; [
jq
curl
];
text = ./token.sh;
};
remove_existing_runner = pkgs.writeShellApplication {
name = "remove_existing_runner";
runtimeInputs = with pkgs;[ jq curl ];
runtimeInputs = with pkgs; [
jq
curl
];
text = ./remove_existing_runner.sh;
};
@ -213,12 +234,25 @@ in
ln -s "$STATE_DIRECTORY"/{${lib.concatStringsSep "," runnerCredFiles}} "$RUNTIME_DIRECTORY/"
'';
in
map (x: "${x} ${escapeShellArgs [ stateDir runtimeDir logsDir ]}") (builtins.filter (x: x != "") [
(optionalString (!isNull cfg.githubApp) "+${unconfigureRunnerGitHubApp}") # runs as root
(optionalString (isNull cfg.githubApp) "+${unconfigureRunner}") # runs as root
configureRunner
setupRuntimeDir
]);
map
(
x:
"${x} ${
escapeShellArgs [
stateDir
runtimeDir
logsDir
]
}"
)
(
builtins.filter (x: x != "") [
(optionalString (!isNull cfg.githubApp) "+${unconfigureRunnerGitHubApp}") # runs as root
(optionalString (isNull cfg.githubApp) "+${unconfigureRunner}") # runs as root
configureRunner
setupRuntimeDir
]
);
ExecStopPost =
let
@ -226,10 +260,16 @@ in
RUNNER_ALLOW_RUNASROOT=1 ${package}/bin/config.sh remove --token "$(cat ${currentConfigTokenPath})" || true
'';
in
map (x: "${x} ${escapeShellArgs [ stateDir runtimeDir logsDir ]}") [
(optionalString (!isNull cfg.githubApp) "-+${unregisterScript}")
];
map (
x:
"${x} ${
escapeShellArgs [
stateDir
runtimeDir
logsDir
]
}"
) [ (optionalString (!isNull cfg.githubApp) "-+${unregisterScript}") ];
# If running in ephemeral mode, restart the service on-exit (i.e., successful de-registration of the runner)
# to trigger a fresh registration.
@ -295,7 +335,12 @@ in
"~setdomainname"
"~sethostname"
];
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" "AF_NETLINK" ];
RestrictAddressFamilies = [
"AF_INET"
"AF_INET6"
"AF_UNIX"
"AF_NETLINK"
];
# Needs network access
PrivateNetwork = false;
@ -316,7 +361,7 @@ in
# Note that this has some interactions with the User setting; so you may
# want to consult the systemd docs if using both.
DynamicUser = true;
} // (
lib.optionalAttrs (cfg.user != null) { User = cfg.user; }
) // cfg.serviceOverrides;
}
// (lib.optionalAttrs (cfg.user != null) { User = cfg.user; })
// cfg.serviceOverrides;
}

View File

@ -1,4 +1,9 @@
{ lib, config, pkgs, ... }:
{
lib,
config,
pkgs,
...
}:
let
cfg = config.roles.github-actions-runner;
queued-build-hook = builtins.fetchTarball {
@ -44,30 +49,31 @@ in
default = true;
};
githubApp = lib.mkOption {
default = null;
description = lib.mdDoc ''
Authenticate runners using GitHub App
'';
type = lib.types.nullOr (lib.types.submodule {
options = {
id = lib.mkOption {
type = lib.types.str;
description = lib.mdDoc "GitHub App ID";
type = lib.types.nullOr (
lib.types.submodule {
options = {
id = lib.mkOption {
type = lib.types.str;
description = lib.mdDoc "GitHub App ID";
};
login = lib.mkOption {
type = lib.types.str;
description = lib.mdDoc "GitHub login used to register the application";
};
privateKeyFile = lib.mkOption {
type = lib.types.path;
description = lib.mdDoc ''
The full path to a file containing the GitHub App private key.
'';
};
};
login = lib.mkOption {
type = lib.types.str;
description = lib.mdDoc "GitHub login used to register the application";
};
privateKeyFile = lib.mkOption {
type = lib.types.path;
description = lib.mdDoc ''
The full path to a file containing the GitHub App private key.
'';
};
};
});
}
);
};
name = lib.mkOption {
@ -155,7 +161,12 @@ in
};
nodeRuntimes = lib.mkOption {
type = with lib.types; nonEmptyListOf (enum [ "node16" "node20" ]);
type =
with lib.types;
nonEmptyListOf (enum [
"node16"
"node20"
]);
default = [ "node20" ];
description = lib.mdDoc ''
List of Node.js runtimes the runner should support.
@ -165,8 +176,8 @@ in
config = lib.mkIf (cfg.url != null) {
users.groups.github-runner = lib.mkIf (cfg.extraReadWritePaths != [ ]) { };
services.srvos-github-runners = builtins.listToAttrs (map
(n: rec {
services.srvos-github-runners = builtins.listToAttrs (
map (n: rec {
name = "${cfg.name}-${toString n}";
value = {
inherit name;
@ -177,13 +188,15 @@ in
githubApp = cfg.githubApp;
ephemeral = cfg.ephemeral;
nodeRuntimes = cfg.nodeRuntimes;
serviceOverrides = {
DeviceAllow = [ "/dev/kvm" ];
PrivateDevices = false;
} // (lib.optionalAttrs (cfg.extraReadWritePaths != [ ]) {
ReadWritePaths = cfg.extraReadWritePaths;
Group = [ "github-runner" ];
});
serviceOverrides =
{
DeviceAllow = [ "/dev/kvm" ];
PrivateDevices = false;
}
// (lib.optionalAttrs (cfg.extraReadWritePaths != [ ]) {
ReadWritePaths = cfg.extraReadWritePaths;
Group = [ "github-runner" ];
});
extraPackages = [
pkgs.cachix
pkgs.glibc.bin
@ -194,8 +207,8 @@ in
] ++ cfg.extraPackages;
extraLabels = cfg.extraLabels;
};
})
(lib.range 1 cfg.count));
}) (lib.range 1 cfg.count)
);
# Required to run unmodified binaries fetched via dotnet in a dev environment.
programs.nix-ld.enable = true;
@ -208,13 +221,15 @@ in
jobs = 4;
};
queued-build-hook = lib.mkIf (cfg.binary-cache.script != null)
({
queued-build-hook = lib.mkIf (cfg.binary-cache.script != null) (
{
enable = true;
postBuildScriptContent = cfg.binary-cache.script;
credentials = cfg.binary-cache.credentials;
} // (lib.optionalAttrs (cfg.binary-cache.enqueueScript != "") {
}
// (lib.optionalAttrs (cfg.binary-cache.enqueueScript != "") {
enqueueScriptContent = cfg.binary-cache.enqueueScript;
}));
})
);
};
}

View File

@ -1,10 +1,14 @@
{ lib, config, pkgs, ... }:
{
lib,
config,
pkgs,
...
}:
let
cfg = config.roles.nix-remote-builder;
in
{
options.roles.nix-remote-builder = {
schedulerPublicKeys = lib.mkOption {
description = "SSH public keys of the central build scheduler";
@ -23,15 +27,18 @@ in
# Allow more open files for non-root users to run NixOS VM tests.
security.pam.loginLimits = [
{ domain = "*"; item = "nofile"; type = "-"; value = "20480"; }
{
domain = "*";
item = "nofile";
type = "-";
value = "20480";
}
];
# Give restricted SSH access to the build scheduler
users.users.nix-remote-builder.openssh.authorizedKeys.keys = map
(key:
''restrict,command="nix-daemon --stdio" ${key}''
)
cfg.schedulerPublicKeys;
users.users.nix-remote-builder.openssh.authorizedKeys.keys = map (
key: ''restrict,command="nix-daemon --stdio" ${key}''
) cfg.schedulerPublicKeys;
users.users.nix-remote-builder.isNormalUser = true;
users.users.nix-remote-builder.group = "nogroup";
nix.settings.trusted-users = [ "nix-remote-builder" ];

View File

@ -1,106 +1,114 @@
{ lib, pkgs, config, ... }:
{
lib,
pkgs,
config,
...
}:
let
filterEnabled = lib.filterAttrs (_: v: v.enable);
rules.groups = lib.mapAttrsToList
(name: group: {
inherit name;
rules =
(lib.mapAttrsToList
(name: rule: {
alert = rule.name;
expr = rule.expr;
for = rule.for;
labels = rule.labels;
annotations = rule.annotations;
})
(filterEnabled group.alertRules)) ++
(lib.mapAttrsToList
(name: rule: {
record = rule.name;
expr = rule.expr;
labels = rule.labels;
annotations = rule.annotations;
})
(filterEnabled group.recordingRules));
})
config.srvos.prometheus.ruleGroups;
rules.groups = lib.mapAttrsToList (name: group: {
inherit name;
rules =
(lib.mapAttrsToList (name: rule: {
alert = rule.name;
expr = rule.expr;
for = rule.for;
labels = rule.labels;
annotations = rule.annotations;
}) (filterEnabled group.alertRules))
++ (lib.mapAttrsToList (name: rule: {
record = rule.name;
expr = rule.expr;
labels = rule.labels;
annotations = rule.annotations;
}) (filterEnabled group.recordingRules));
}) config.srvos.prometheus.ruleGroups;
in
{
imports = [
./default-alerts.nix
];
imports = [ ./default-alerts.nix ];
options = {
# XXX maybe we move this upstream eventually to nixpkgs. Expect this interface to be replaced with the upstream equivalent.
srvos.prometheus.ruleGroups = lib.mkOption {
type = lib.types.attrsOf (lib.types.submodule ({ name, ... }: {
options = {
name = lib.mkOption {
type = lib.types.str;
default = name;
};
enable = lib.mkEnableOption (lib.mdDoc "Enable rule group") // {
default = true;
};
alertRules = lib.mkOption {
type = lib.types.attrsOf (lib.types.submodule ({ name, ... }: {
options = {
name = lib.mkOption {
type = lib.types.str;
default = name;
};
enable = lib.mkEnableOption (lib.mdDoc "Enable alert rule") // {
default = true;
};
expr = lib.mkOption {
type = lib.types.str;
};
for = lib.mkOption {
type = lib.types.str;
default = "2m";
};
labels = lib.mkOption {
type = lib.types.attrsOf lib.types.str;
default = { };
};
annotations = lib.mkOption {
type = lib.types.attrsOf lib.types.str;
default = { };
};
type = lib.types.attrsOf (
lib.types.submodule (
{ name, ... }:
{
options = {
name = lib.mkOption {
type = lib.types.str;
default = name;
};
}));
default = { };
};
recordingRules = lib.mkOption {
type = lib.types.attrsOf (lib.types.submodule ({ name, ... }: {
options = {
name = lib.mkOption {
type = lib.types.str;
default = name;
};
enable = lib.mkEnableOption (lib.mdDoc "Enable recording rule") // {
default = true;
};
expr = lib.mkOption {
type = lib.types.str;
};
for = lib.mkOption {
type = lib.types.str;
default = "2m";
};
labels = lib.mkOption {
type = lib.types.attrsOf lib.types.str;
default = { };
};
annotations = lib.mkOption {
type = lib.types.attrsOf lib.types.str;
default = { };
};
enable = lib.mkEnableOption (lib.mdDoc "Enable rule group") // {
default = true;
};
}));
default = { };
};
};
}));
alertRules = lib.mkOption {
type = lib.types.attrsOf (
lib.types.submodule (
{ name, ... }:
{
options = {
name = lib.mkOption {
type = lib.types.str;
default = name;
};
enable = lib.mkEnableOption (lib.mdDoc "Enable alert rule") // {
default = true;
};
expr = lib.mkOption { type = lib.types.str; };
for = lib.mkOption {
type = lib.types.str;
default = "2m";
};
labels = lib.mkOption {
type = lib.types.attrsOf lib.types.str;
default = { };
};
annotations = lib.mkOption {
type = lib.types.attrsOf lib.types.str;
default = { };
};
};
}
)
);
default = { };
};
recordingRules = lib.mkOption {
type = lib.types.attrsOf (
lib.types.submodule (
{ name, ... }:
{
options = {
name = lib.mkOption {
type = lib.types.str;
default = name;
};
enable = lib.mkEnableOption (lib.mdDoc "Enable recording rule") // {
default = true;
};
expr = lib.mkOption { type = lib.types.str; };
for = lib.mkOption {
type = lib.types.str;
default = "2m";
};
labels = lib.mkOption {
type = lib.types.attrsOf lib.types.str;
default = { };
};
annotations = lib.mkOption {
type = lib.types.attrsOf lib.types.str;
default = { };
};
};
}
)
);
default = { };
};
};
}
)
);
example = {
prometheusAlerts = {
alertRules = {

View File

@ -3,9 +3,7 @@
{ pkgs, lib, ... }:
{
imports = [
../common
];
imports = [ ../common ];
environment = {
# List packages installed in system profile.
@ -33,11 +31,13 @@
# No need for fonts on a server
fonts.fontconfig.enable = lib.mkDefault false;
programs.vim = {
defaultEditor = lib.mkDefault true;
} // lib.optionalAttrs (lib.versionAtLeast (lib.versions.majorMinor lib.version) "24.11") {
enable = lib.mkDefault true;
};
programs.vim =
{
defaultEditor = lib.mkDefault true;
}
// lib.optionalAttrs (lib.versionAtLeast (lib.versions.majorMinor lib.version) "24.11") {
enable = lib.mkDefault true;
};
# Make sure firewall is enabled
networking.firewall.enable = true;
@ -46,7 +46,10 @@
networking.hostName = lib.mkDefault "";
# If the user is in @wheel they are trusted by default.
nix.settings.trusted-users = [ "root" "@wheel" ];
nix.settings.trusted-users = [
"root"
"@wheel"
];
security.sudo.wheelNeedsPassword = false;