Merge master into staging-next

This commit is contained in:
Frederik Rietdijk 2019-04-24 18:36:52 +02:00
commit 85d3d95c92
689 changed files with 13215 additions and 6897 deletions

8
.github/CODEOWNERS vendored
View File

@ -122,6 +122,14 @@
/nixos/modules/services/databases/postgresql.nix @thoughtpolice
/nixos/tests/postgresql.nix @thoughtpolice
# Hardened profile & related modules
/nixos/modules/profiles/hardened.nix @joachifm
/nixos/modules/security/hidepid.nix @joachifm
/nixos/modules/security/lock-kernel-modules.nix @joachifm
/nixos/modules/security/misc.nix @joachifm
/nixos/tests/hardened.nix @joachifm
/pkgs/os-specific/linux/kernel/hardened-config.nix @joachifm
# Dhall
/pkgs/development/dhall-modules @Gabriel439 @Profpatsch
/pkgs/development/interpreters/dhall @Gabriel439 @Profpatsch

View File

@ -366,7 +366,7 @@ automatically select the right version of GHC and other build tools to build,
test and execute apps in an existing project downloaded from somewhere on the
Internet. Pass the `--nix` flag to any `stack` command to do so, e.g.
```shell
git clone --recursive http://github.com/yesodweb/wai
git clone --recursive https://github.com/yesodweb/wai
cd wai
stack --nix build
```

View File

@ -445,7 +445,7 @@ buildPythonPackage rec {
};
meta = with lib; {
homepage = "http://github.com/pytoolz/toolz/";
homepage = "https://github.com/pytoolz/toolz/";
description = "List processing tools and functional utilities";
license = licenses.bsd3;
maintainers = with maintainers; [ fridh ];
@ -510,7 +510,7 @@ Each interpreter has the following attributes:
### Building packages and applications
Python libraries and applications that use `setuptools` or
`distutils` are typically build with respectively the `buildPythonPackage` and
`distutils` are typically built with respectively the `buildPythonPackage` and
`buildPythonApplication` functions. These two functions also support installing a `wheel`.
All Python packages reside in `pkgs/top-level/python-packages.nix` and all

View File

@ -250,6 +250,6 @@ override to the `pkgs/misc/vim-plugins/default.nix` in the same directory.
- [vim-pi](https://bitbucket.org/vimcommunity/vim-pi) is a plugin repository
from VAM plugin manager meant to be used by others as well used by
- [vim2nix](http://github.com/MarcWeber/vim-addon-vim2nix) which generates the
- [vim2nix](https://github.com/MarcWeber/vim-addon-vim2nix) which generates the
.nix code

View File

@ -72,16 +72,22 @@ rec {
release = null;
};
kernelArch =
if final.isAarch32 then "arm"
else if final.isAarch64 then "arm64"
else if final.isx86_32 then "x86"
else if final.isx86_64 then "ia64"
else final.parsed.cpu.name;
qemuArch =
if final.isArm then "arm"
else if final.isx86_64 then "x86_64"
else if final.isx86 then "i386"
else {
"powerpc" = "ppc";
"powerpcle" = "ppc";
"powerpc64" = "ppc64";
"powerpc64le" = "ppc64";
"mips64" = "mips";
"mipsel64" = "mipsel";
"powerpc64le" = "ppc64le";
}.${final.parsed.cpu.name} or final.parsed.cpu.name;
emulator = pkgs: let
@ -103,7 +109,7 @@ rec {
in
if final.parsed.kernel.name == pkgs.stdenv.hostPlatform.parsed.kernel.name &&
pkgs.stdenv.hostPlatform.isCompatible final
then "${pkgs.runtimeShell} -c"
then "${pkgs.runtimeShell} -c '\"$@\"' --"
else if final.isWindows
then "${wine}/bin/${wine-name}"
else if final.isLinux && pkgs.stdenv.hostPlatform.isLinux

View File

@ -131,11 +131,6 @@ rec {
config = "powerpcle-none-eabi";
libc = "newlib";
};
alpha-embedded = {
config = "alpha-elf";
libc = "newlib";
};
i686-embedded = {
config = "i686-elf";

View File

@ -127,22 +127,42 @@ rec {
(b == i386 && isCompatible a i486)
(b == i486 && isCompatible a i586)
(b == i586 && isCompatible a i686)
# NOTE: Not true in some cases. Like in WSL mode.
# XXX: Not true in some cases. Like in WSL mode.
(b == i686 && isCompatible a x86_64)
# ARM
# ARMv4
(b == arm && isCompatible a armv5tel)
(b == armv5tel && isCompatible a armv6m)
(b == armv6m && isCompatible a armv6l)
(b == armv6l && isCompatible a armv7a)
(b == armv7a && isCompatible a armv7r)
(b == armv7r && isCompatible a armv7m)
(b == armv7m && isCompatible a armv7l)
(b == armv7l && isCompatible a armv8a)
(b == armv8a && isCompatible a armv8r)
(b == armv8r && isCompatible a armv8m)
# NOTE: not always true! Some arm64 cpus dont support arm32 mode.
(b == armv8m && isCompatible a aarch64)
# ARMv5
(b == armv5tel && isCompatible a armv6l)
# ARMv6
(b == armv6l && isCompatible a armv6m)
(b == armv6m && isCompatible a armv7l)
# ARMv7
(b == armv7l && isCompatible a armv7a)
(b == armv7l && isCompatible a armv7r)
(b == armv7l && isCompatible a armv7m)
(b == armv7a && isCompatible a armv8a)
(b == armv7r && isCompatible a armv8a)
(b == armv7m && isCompatible a armv8a)
(b == armv7a && isCompatible a armv8r)
(b == armv7r && isCompatible a armv8r)
(b == armv7m && isCompatible a armv8r)
(b == armv7a && isCompatible a armv8m)
(b == armv7r && isCompatible a armv8m)
(b == armv7m && isCompatible a armv8m)
# ARMv8
(b == armv8r && isCompatible a armv8a)
(b == armv8m && isCompatible a armv8a)
# XXX: not always true! Some arm64 cpus dont support arm32 mode.
(b == aarch64 && a == armv8a)
(b == armv8a && isCompatible a aarch64)
(b == aarch64 && a == aarch64_be)
(b == aarch64_be && isCompatible a aarch64)

View File

@ -365,6 +365,11 @@
github = "ankhers";
name = "Justin Wood";
};
anton-dessiatov = {
email = "anton.dessiatov@gmail.com";
github = "anton-dessiatov";
name = "Anton Desyatov";
};
Anton-Latukha = {
email = "anton.latuka+nixpkgs@gmail.com";
github = "Anton-Latukha";
@ -1313,6 +1318,11 @@
github = "dtzWill";
name = "Will Dietz";
};
dxf = {
email = "dingxiangfei2009@gmail.com";
github = "dingxiangfei2009";
name = "Ding Xiang Fei";
};
dysinger = {
email = "tim@dysinger.net";
github = "dysinger";
@ -1682,6 +1692,11 @@
github = "fps";
name = "Florian Paul Schmidt";
};
fredeb = {
email = "im@fredeb.dev";
github = "fredeeb";
name = "Frede Emil";
};
freepotion = {
email = "freepotion@protonmail.com";
github = "freepotion";
@ -2172,6 +2187,11 @@
github = "jbgi";
name = "Jean-Baptiste Giraudeau";
};
jchw = {
email = "johnwchadwick@gmail.com";
github = "jchv";
name = "John Chadwick";
};
jcumming = {
email = "jack@mudshark.org";
name = "Jack Cummings";
@ -4031,6 +4051,11 @@
github = "renatoGarcia";
name = "Renato Garcia";
};
rencire = {
email = "546296+rencire@users.noreply.github.com";
github = "rencire";
name = "Eric Ren";
};
renzo = {
email = "renzocarbonara@gmail.com";
github = "k0001";
@ -4823,6 +4848,15 @@
github = "the-kenny";
name = "Moritz Ulrich";
};
thesola10 = {
email = "thesola10@bobile.fr";
github = "thesola10";
keys = [{
longkeyid = "rsa4096/0x89245619BEBB95BA";
fingerprint = "1D05 13A6 1AC4 0D8D C6D6 5F2C 8924 5619 BEBB 95BA";
}];
name = "Karim Vergnes";
};
theuni = {
email = "ct@flyingcircus.io";
github = "ctheune";
@ -5421,6 +5455,11 @@
github = "zohl";
name = "Al Zohali";
};
zookatron = {
email = "tim@zookatron.com";
github = "zookatron";
name = "Tim Zook";
};
zoomulator = {
email = "zoomulator@gmail.com";
github = "zoomulator";

View File

@ -68,7 +68,7 @@ in
# Create the tarball
system.build.tarball = import ../../../lib/make-system-tarball.nix {
inherit (pkgs) stdenv perl xz pathsFromGraph;
inherit (pkgs) stdenv closureInfo pixz;
inherit (config.tarball) contents storeContents;
};

View File

@ -188,6 +188,7 @@
./services/audio/snapserver.nix
./services/audio/squeezelite.nix
./services/audio/ympd.nix
./services/backup/automysqlbackup.nix
./services/backup/bacula.nix
./services/backup/borgbackup.nix
./services/backup/duplicati.nix
@ -672,6 +673,7 @@
./services/networking/syncthing-relay.nix
./services/networking/tcpcrypt.nix
./services/networking/teamspeak3.nix
./services/networking/tedicross.nix
./services/networking/tinc.nix
./services/networking/tinydns.nix
./services/networking/tftpd.nix
@ -705,6 +707,7 @@
./services/search/hound.nix
./services/search/kibana.nix
./services/search/solr.nix
./services/security/bitwarden_rs/default.nix
./services/security/certmgr.nix
./services/security/cfssl.nix
./services/security/clamav.nix

View File

@ -12,14 +12,24 @@ with lib;
type = types.bool;
default = true;
description = ''
Whether to allow creation of user namespaces. A recurring problem
with user namespaces is the presence of code paths where the kernel's
permission checking logic fails to account for namespacing, instead
permitting a namespaced process to act outside the namespace with the
same privileges as it would have inside it. This is particularly
Whether to allow creation of user namespaces.
</para>
<para>
The motivation for disabling user namespaces is the potential
presence of code paths where the kernel's permission checking
logic fails to account for namespacing, instead permitting a
namespaced process to act outside the namespace with the same
privileges as it would have inside it. This is particularly
damaging in the common case of running as root within the namespace.
When user namespace creation is disallowed, attempting to create
a user namespace fails with "no space left on device" (ENOSPC).
</para>
<para>
When user namespace creation is disallowed, attempting to create a
user namespace fails with "no space left on device" (ENOSPC).
root may re-enable user namespace creation at runtime.
</para>
<para>
'';
};

View File

@ -7,7 +7,7 @@ let
in
{
options.services.oxidized = {
enable = mkEnableOption "the oxidized configuation backup service.";
enable = mkEnableOption "the oxidized configuration backup service";
user = mkOption {
type = types.str;

View File

@ -179,11 +179,11 @@ in {
} // optionalAttrs (cfg.config != "") { RABBITMQ_ADVANCED_CONFIG_FILE = advanced_config_file; };
serviceConfig = {
PermissionsStartOnly = true; # preStart must be run as root
ExecStart = "${cfg.package}/sbin/rabbitmq-server";
ExecStop = "${cfg.package}/sbin/rabbitmqctl shutdown";
User = "rabbitmq";
Group = "rabbitmq";
LogsDirectory = "rabbitmq";
WorkingDirectory = cfg.dataDir;
Type = "notify";
NotifyAccess = "all";
@ -197,11 +197,8 @@ in {
preStart = ''
${optionalString (cfg.cookie != "") ''
echo -n ${cfg.cookie} > ${cfg.dataDir}/.erlang.cookie
chown rabbitmq:rabbitmq ${cfg.dataDir}/.erlang.cookie
chmod 600 ${cfg.dataDir}/.erlang.cookie
''}
mkdir -p /var/log/rabbitmq
chown rabbitmq:rabbitmq /var/log/rabbitmq
'';
};

View File

@ -14,15 +14,10 @@ let
description = "${name} liquidsoap stream";
wantedBy = [ "multi-user.target" ];
path = [ pkgs.wget ];
preStart =
''
mkdir -p /var/log/liquidsoap
chown liquidsoap -R /var/log/liquidsoap
'';
serviceConfig = {
PermissionsStartOnly="true";
ExecStart = "${pkgs.liquidsoap}/bin/liquidsoap ${stream}";
User = "liquidsoap";
LogsDirectory = "liquidsoap";
};
};
};

View File

@ -158,18 +158,18 @@ in {
};
};
systemd.tmpfiles.rules = [
"d '${cfg.dataDir}' - ${cfg.user} ${cfg.group} - -"
"d '${cfg.playlistDirectory}' - ${cfg.user} ${cfg.group} - -"
];
systemd.services.mpd = {
after = [ "network.target" "sound.target" ];
description = "Music Player Daemon";
wantedBy = optional (!cfg.startWhenNeeded) "multi-user.target";
preStart = ''
mkdir -p "${cfg.dataDir}" && chown -R ${cfg.user}:${cfg.group} "${cfg.dataDir}"
mkdir -p "${cfg.playlistDirectory}" && chown -R ${cfg.user}:${cfg.group} "${cfg.playlistDirectory}"
'';
serviceConfig = {
User = "${cfg.user}";
PermissionsStartOnly = true;
ExecStart = "${pkgs.mpd}/bin/mpd --no-daemon ${mpdConf}";
Type = "notify";
LimitRTPRIO = 50;

View File

@ -0,0 +1,115 @@
{ config, lib, pkgs, ... }:
let
inherit (lib) concatMapStringsSep concatStringsSep isInt isList literalExample;
inherit (lib) mapAttrs mapAttrsToList mkDefault mkEnableOption mkIf mkOption optional types;
cfg = config.services.automysqlbackup;
pkg = pkgs.automysqlbackup;
user = "automysqlbackup";
group = "automysqlbackup";
toStr = val:
if isList val then "( ${concatMapStringsSep " " (val: "'${val}'") val} )"
else if isInt val then toString val
else if true == val then "'yes'"
else if false == val then "'no'"
else "'${toString val}'";
configFile = pkgs.writeText "automysqlbackup.conf" ''
#version=${pkg.version}
# DONT'T REMOVE THE PREVIOUS VERSION LINE!
#
${concatStringsSep "\n" (mapAttrsToList (name: value: "CONFIG_${name}=${toStr value}") cfg.config)}
'';
in
{
# interface
options = {
services.automysqlbackup = {
enable = mkEnableOption "AutoMySQLBackup";
calendar = mkOption {
type = types.str;
default = "01:15:00";
description = ''
Configured when to run the backup service systemd unit (DayOfWeek Year-Month-Day Hour:Minute:Second).
'';
};
config = mkOption {
type = with types; attrsOf (either (either str (either int bool)) (listOf str));
default = {};
description = ''
automysqlbackup configuration. Refer to
<filename>''${pkgs.automysqlbackup}/etc/automysqlbackup.conf</filename>
for details on supported values.
'';
example = literalExample ''
{
db_names = [ "nextcloud" "matomo" ];
table_exclude = [ "nextcloud.oc_users" "nextcloud.oc_whats_new" ];
mailcontent = "log";
mail_address = "admin@example.org";
}
'';
};
};
};
# implementation
config = mkIf cfg.enable {
assertions = [
{ assertion = !config.services.mysqlBackup.enable;
message = "Please choose one of services.mysqlBackup or services.automysqlbackup.";
}
];
services.automysqlbackup.config = mapAttrs (name: mkDefault) {
mysql_dump_username = user;
mysql_dump_host = "localhost";
backup_dir = "/var/backup/mysql";
db_exclude = [ "information_schema" "performance_schema" ];
mailcontent = "stdout";
mysql_dump_single_transaction = true;
};
systemd.timers.automysqlbackup = {
description = "automysqlbackup timer";
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = cfg.calendar;
AccuracySec = "5m";
};
};
systemd.services.automysqlbackup = {
description = "automysqlbackup service";
serviceConfig = {
User = user;
Group = group;
ExecStart = "${pkg}/bin/automysqlbackup ${configFile}";
};
};
environment.systemPackages = [ pkg ];
users.users.${user}.group = group;
users.groups.${group} = { };
systemd.tmpfiles.rules = [
"d '${cfg.config.backup_dir}' 0750 ${user} ${group} - -"
];
services.mysql.ensureUsers = optional (config.services.mysql.enable && cfg.config.mysql_dump_host == "localhost") {
name = user;
ensurePermissions = { "*.*" = "SELECT, SHOW VIEW, TRIGGER, LOCK TABLES"; };
};
};
}

View File

@ -117,14 +117,12 @@ in
enable = true;
serviceConfig = {
User = cfg.user;
PermissionsStartOnly = true;
};
preStart = ''
mkdir -m 0700 -p ${cfg.location}
chown -R ${cfg.user} ${cfg.location}
'';
script = backupScript;
};
tmpfiles.rules = [
"d ${cfg.location} 0700 ${cfg.user} - - -"
];
};
};

View File

@ -14,11 +14,6 @@ let
requires = [ "postgresql.service" ];
preStart = ''
mkdir -m 0700 -p ${cfg.location}
chown postgres ${cfg.location}
'';
script = ''
umask 0077 # ensure backup is only readable by postgres user
@ -32,7 +27,6 @@ let
serviceConfig = {
Type = "oneshot";
PermissionsStartOnly = "true";
User = "postgres";
};
@ -107,6 +101,11 @@ in {
message = "config.services.postgresqlBackup.backupAll cannot be used together with config.services.postgresqlBackup.databases";
}];
}
(mkIf cfg.enable {
systemd.tmpfiles.rules = [
"d '${cfg.location}' 0700 postgres - - -"
];
})
(mkIf (cfg.enable && cfg.backupAll) {
systemd.services.postgresqlBackup =
postgresqlBackupService "all" "${config.services.postgresql.package}/bin/pg_dumpall";

View File

@ -62,7 +62,7 @@ in
'';
};
enable = mkEnableOption "Whether to enable Kubernetes addon manager.";
enable = mkEnableOption "Kubernetes addon manager";
kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes addon manager";
bootstrapAddonsKubeconfig = top.lib.mkKubeConfigOptions "Kubernetes addon manager bootstrap";

View File

@ -28,7 +28,7 @@ in
type = str;
};
enable = mkEnableOption "Kubernetes controller manager.";
enable = mkEnableOption "Kubernetes controller manager";
extraOpts = mkOption {
description = "Kubernetes controller manager extra command line options.";

View File

@ -23,7 +23,7 @@ in
{
###### interface
options.services.kubernetes.flannel = {
enable = mkEnableOption "enable flannel networking";
enable = mkEnableOption "flannel networking";
kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes flannel";
};

View File

@ -39,7 +39,7 @@ in
###### interface
options.services.kubernetes.pki = with lib.types; {
enable = mkEnableOption "Whether to enable easyCert issuer service.";
enable = mkEnableOption "easyCert issuer service";
certs = mkOption {
description = "List of certificate specs to feed to cert generator.";

View File

@ -17,7 +17,7 @@ in
type = str;
};
enable = mkEnableOption "Whether to enable Kubernetes proxy.";
enable = mkEnableOption "Kubernetes proxy";
extraOpts = mkOption {
description = "Kubernetes proxy extra command line options.";

View File

@ -16,7 +16,7 @@ in
type = str;
};
enable = mkEnableOption "Whether to enable Kubernetes scheduler.";
enable = mkEnableOption "Kubernetes scheduler";
extraOpts = mkOption {
description = "Kubernetes scheduler extra command line options.";

View File

@ -1,8 +1,6 @@
{ config, lib, pkgs, ... }:
let
cfg = config.services.clickhouse;
confDir = "/etc/clickhouse-server";
stateDir = "/var/lib/clickhouse";
in
with lib;
{
@ -43,20 +41,13 @@ with lib;
after = [ "network.target" ];
preStart = ''
mkdir -p ${stateDir}
chown clickhouse:clickhouse ${confDir} ${stateDir}
'';
script = ''
cd "${confDir}"
exec ${pkgs.clickhouse}/bin/clickhouse-server
'';
serviceConfig = {
User = "clickhouse";
Group = "clickhouse";
PermissionsStartOnly = true;
ConfigurationDirectory = "clickhouse-server";
StateDirectory = "clickhouse";
LogsDirectory = "clickhouse";
ExecStart = "${pkgs.clickhouse}/bin/clickhouse-server --config-file=${pkgs.clickhouse}/etc/clickhouse-server/config.xml";
};
};

View File

@ -158,27 +158,21 @@ in {
services.couchdb.configFile = mkDefault
(if useVersion2 then "/var/lib/couchdb/local.ini" else "/var/lib/couchdb/couchdb.ini");
systemd.tmpfiles.rules = [
"d '${dirOf cfg.uriFile}' - ${cfg.user} ${cfg.group} - -"
"d '${dirOf cfg.logFile}' - ${cfg.user} ${cfg.group} - -"
"d '${cfg.databaseDir}' - ${cfg.user} ${cfg.group} - -"
"d '${cfg.viewIndexDir}' - ${cfg.user} ${cfg.group} - -"
];
systemd.services.couchdb = {
description = "CouchDB Server";
wantedBy = [ "multi-user.target" ];
preStart =
''
mkdir -p `dirname ${cfg.uriFile}`;
mkdir -p `dirname ${cfg.logFile}`;
mkdir -p ${cfg.databaseDir};
mkdir -p ${cfg.viewIndexDir};
touch ${cfg.configFile}
touch -a ${cfg.logFile}
if [ "$(id -u)" = 0 ]; then
chown ${cfg.user}:${cfg.group} `dirname ${cfg.uriFile}`;
(test -f ${cfg.uriFile} && chown ${cfg.user}:${cfg.group} ${cfg.uriFile}) || true
chown ${cfg.user}:${cfg.group} ${cfg.databaseDir}
chown ${cfg.user}:${cfg.group} ${cfg.viewIndexDir}
chown ${cfg.user}:${cfg.group} ${cfg.configFile}
chown ${cfg.user}:${cfg.group} ${cfg.logFile}
fi
'';
environment = mkIf useVersion2 {
@ -191,7 +185,6 @@ in {
};
serviceConfig = {
PermissionsStartOnly = true;
User = cfg.user;
Group = cfg.group;
ExecStart = executable;

View File

@ -157,20 +157,19 @@ in
config = mkIf config.services.influxdb.enable {
systemd.tmpfiles.rules = [
"d '${cfg.dataDir}' 0770 ${cfg.user} ${cfg.group} - -"
];
systemd.services.influxdb = {
description = "InfluxDB Server";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
ExecStart = ''${cfg.package}/bin/influxd -config "${configFile}"'';
User = "${cfg.user}";
Group = "${cfg.group}";
PermissionsStartOnly = true;
User = cfg.user;
Group = cfg.group;
};
preStart = ''
mkdir -m 0770 -p ${cfg.dataDir}
if [ "$(id -u)" = 0 ]; then chown -R ${cfg.user}:${cfg.group} ${cfg.dataDir}; fi
'';
postStart =
let
scheme = if configOptions.http.https-enabled then "-k https" else "http";

View File

@ -78,11 +78,6 @@ in
after = [ "network.target" ];
serviceConfig = {
PermissionsStartOnly = true;
ExecStartPre = optionals cfg.enableUnixSocket [
"${pkgs.coreutils}/bin/install -d -o ${cfg.user} /run/memcached/"
"${pkgs.coreutils}/bin/chown -R ${cfg.user} /run/memcached/"
];
ExecStart =
let
networking = if cfg.enableUnixSocket
@ -91,12 +86,13 @@ in
in "${memcached}/bin/memcached ${networking} -m ${toString cfg.maxMemory} -c ${toString cfg.maxConnections} ${concatStringsSep " " cfg.extraOptions}";
User = cfg.user;
RuntimeDirectory = "memcached";
};
};
};
imports = [
(mkRemovedOptionModule ["services" "memcached" "socket"] ''
This option was replaced by a fixed unix socket path at /run/memcached/memcached.sock enabled using services.memached.enableUnixSocket.
This option was replaced by a fixed unix socket path at /run/memcached/memcached.sock enabled using services.memcached.enableUnixSocket.
'')
];

View File

@ -98,7 +98,7 @@ in
type = types.path;
default = "/var/log/stanchion";
description = ''
Log directory for Stanchino.
Log directory for Stanchion.
'';
};
@ -152,6 +152,11 @@ in
users.groups.stanchion.gid = config.ids.gids.stanchion;
systemd.tmpfiles.rules = [
"d '${cfg.logDir}' - stanchion stanchion --"
"d '${cfg.dataDir}' 0700 stanchion stanchion --"
];
systemd.services.stanchion = {
description = "Stanchion Server";
@ -168,25 +173,12 @@ in
environment.STANCHION_LOG_DIR = "${cfg.logDir}";
environment.STANCHION_ETC_DIR = "/etc/stanchion";
preStart = ''
if ! test -e ${cfg.logDir}; then
mkdir -m 0755 -p ${cfg.logDir}
chown -R stanchion:stanchion ${cfg.logDir}
fi
if ! test -e ${cfg.dataDir}; then
mkdir -m 0700 -p ${cfg.dataDir}
chown -R stanchion:stanchion ${cfg.dataDir}
fi
'';
serviceConfig = {
ExecStart = "${cfg.package}/bin/stanchion console";
ExecStop = "${cfg.package}/bin/stanchion stop";
StandardInput = "tty";
User = "stanchion";
Group = "stanchion";
PermissionsStartOnly = true;
# Give Stanchion a decent amount of time to clean up.
TimeoutStopSec = 120;
LimitNOFILE = 65536;

View File

@ -18,7 +18,7 @@ in
services.gnome3.gnome-settings-daemon = {
enable = mkEnableOption "GNOME Settings Daemon.";
enable = mkEnableOption "GNOME Settings Daemon";
# There are many forks of gnome-settings-daemon
package = mkOption {

View File

@ -13,7 +13,7 @@ in {
options = {
hardware.bluetooth = {
enable = mkEnableOption "support for Bluetooth.";
enable = mkEnableOption "support for Bluetooth";
powerOnBoot = mkOption {
type = types.bool;

View File

@ -12,7 +12,7 @@ in {
options = {
services.vdr = {
enable = mkEnableOption "enable VDR. Please put config into ${libDir}.";
enable = mkEnableOption "VDR. Please put config into ${libDir}";
package = mkOption {
type = types.package;
@ -34,7 +34,7 @@ in {
description = "Additional command line arguments to pass to VDR.";
};
enableLirc = mkEnableOption "enable LIRC";
enableLirc = mkEnableOption "LIRC";
};
};

View File

@ -11,7 +11,7 @@ in
options = {
services.mailcatcher = {
enable = mkEnableOption "Enable MailCatcher.";
enable = mkEnableOption "MailCatcher";
http.ip = mkOption {
type = types.str;

View File

@ -212,6 +212,10 @@ with lib;
};
};
systemd.tmpfiles.rules = [
"d /var/spool/nullmailer - ${cfg.user} - - -"
];
systemd.services.nullmailer = {
description = "nullmailer";
wantedBy = [ "multi-user.target" ];
@ -220,13 +224,11 @@ with lib;
preStart = ''
mkdir -p /var/spool/nullmailer/{queue,tmp}
rm -f /var/spool/nullmailer/trigger && mkfifo -m 660 /var/spool/nullmailer/trigger
chown ${cfg.user} /var/spool/nullmailer/*
'';
serviceConfig = {
User = cfg.user;
Group = cfg.group;
PermissionsStartOnly=true;
ExecStart = "${pkgs.nullmailer}/bin/nullmailer-send";
Restart = "always";
};

View File

@ -7,7 +7,7 @@ let
in {
options.services.offlineimap = {
enable = mkEnableOption "Offlineimap, a software to dispose your mailbox(es) as a local Maildir(s).";
enable = mkEnableOption "OfflineIMAP, a software to dispose your mailbox(es) as a local Maildir(s)";
install = mkOption {
type = types.bool;

View File

@ -94,6 +94,10 @@ in {
services.rss2email.config.to = cfg.to;
systemd.tmpfiles.rules = [
"d /var/rss2email 0700 rss2email rss2email - -"
];
systemd.services.rss2email = let
conf = pkgs.writeText "rss2email.cfg" (lib.generators.toINI {} ({
DEFAULT = cfg.config;
@ -105,22 +109,16 @@ in {
in
{
preStart = ''
mkdir -p /var/rss2email
chmod 700 /var/rss2email
cp ${conf} /var/rss2email/conf.cfg
if [ ! -f /var/rss2email/db.json ]; then
echo '{"version":2,"feeds":[]}' > /var/rss2email/db.json
fi
chown -R rss2email:rss2email /var/rss2email
'';
path = [ pkgs.system-sendmail ];
serviceConfig = {
ExecStart =
"${pkgs.rss2email}/bin/r2e -c /var/rss2email/conf.cfg -d /var/rss2email/db.json run";
User = "rss2email";
PermissionsStartOnly = "true";
};
};

View File

@ -12,7 +12,7 @@ in
options = {
services.beanstalkd = {
enable = mkEnableOption "Enable the Beanstalk work queue.";
enable = mkEnableOption "the Beanstalk work queue";
listen = {
port = mkOption {

View File

@ -142,6 +142,10 @@ in {
};
config = mkIf cfg.enable {
systemd.tmpfiles.rules = [
"d '${cfg.dataDir}' 0700 etcd - - -"
];
systemd.services.etcd = {
description = "etcd key-value store";
wantedBy = [ "multi-user.target" ];
@ -176,14 +180,8 @@ in {
Type = "notify";
ExecStart = "${pkgs.etcd.bin}/bin/etcd";
User = "etcd";
PermissionsStartOnly = true;
LimitNOFILE = 40000;
};
preStart = ''
mkdir -m 0700 -p ${cfg.dataDir}
if [ "$(id -u)" = 0 ]; then chown etcd ${cfg.dataDir}; fi
'';
};
environment.systemPackages = [ pkgs.etcdctl ];

View File

@ -38,24 +38,19 @@ in
};
config = mkIf cfg.enable {
systemd.tmpfiles.rules = [
"d '${cfg.dataDir}' 0700 ${cfg.user} ${cfg.group} - -"
];
systemd.services.jackett = {
description = "Jackett";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
preStart = ''
test -d ${cfg.dataDir} || {
echo "Creating jackett data directory in ${cfg.dataDir}"
mkdir -p ${cfg.dataDir}
}
chown -R ${cfg.user}:${cfg.group} ${cfg.dataDir}
chmod 0700 ${cfg.dataDir}
'';
serviceConfig = {
Type = "simple";
User = cfg.user;
Group = cfg.group;
PermissionsStartOnly = "true";
ExecStart = "${pkgs.jackett}/bin/Jackett --NoUpdates --DataFolder '${cfg.dataDir}'";
Restart = "on-failure";
};

View File

@ -17,20 +17,15 @@ in
description = "Lidarr";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
preStart = ''
[ ! -d /var/lib/lidarr ] && mkdir -p /var/lib/lidarr
chown -R lidarr:lidarr /var/lib/lidarr
'';
serviceConfig = {
Type = "simple";
User = "lidarr";
Group = "lidarr";
PermissionsStartOnly = "true";
ExecStart = "${pkgs.lidarr}/bin/Lidarr";
Restart = "on-failure";
StateDirectory = "/var/lib/lidarr/";
StateDirectory = "lidarr";
StateDirectoryMode = "0770";
};
};

View File

@ -554,7 +554,10 @@ in {
};
trusted_third_party_id_servers = mkOption {
type = types.listOf types.str;
default = ["matrix.org"];
default = [
"matrix.org"
"vector.im"
];
description = ''
The list of identity servers trusted to verify third party identifiers by this server.
'';

View File

@ -95,6 +95,9 @@ in {
config = mkIf cfg.enable {
systemd.tmpfiles.rules = [
"d '${cfg.workDir}' 0700 - - - -"
];
systemd.services.mesos-master = {
description = "Mesos Master";
wantedBy = [ "multi-user.target" ];
@ -114,11 +117,7 @@ in {
${toString cfg.extraCmdLineOptions}
'';
Restart = "on-failure";
PermissionsStartOnly = true;
};
preStart = ''
mkdir -m 0700 -p ${cfg.workDir}
'';
};
};

View File

@ -184,6 +184,9 @@ in {
};
config = mkIf cfg.enable {
systemd.tmpfiles.rules = [
"d '${cfg.workDir}' 0701 - - - -"
];
systemd.services.mesos-slave = {
description = "Mesos Slave";
wantedBy = [ "multi-user.target" ];
@ -210,11 +213,7 @@ in {
--executor_environment_variables=${lib.escapeShellArg (builtins.toJSON cfg.executorEnvironmentVariables)} \
${toString cfg.extraCmdLineOptions}
'';
PermissionsStartOnly = true;
};
preStart = ''
mkdir -m 0701 -p ${cfg.workDir}
'';
};
};

View File

@ -38,24 +38,19 @@ in
};
config = mkIf cfg.enable {
systemd.tmpfiles.rules = [
"d '${cfg.dataDir}' 0700 ${cfg.user} ${cfg.group} - -"
];
systemd.services.radarr = {
description = "Radarr";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
preStart = ''
test -d ${cfg.dataDir} || {
echo "Creating radarr data directory in ${cfg.dataDir}"
mkdir -p ${cfg.dataDir}
}
chown -R ${cfg.user}:${cfg.group} ${cfg.dataDir}
chmod 0700 ${cfg.dataDir}
'';
serviceConfig = {
Type = "simple";
User = cfg.user;
Group = cfg.group;
PermissionsStartOnly = "true";
ExecStart = "${pkgs.radarr}/bin/Radarr -nobrowser -data='${cfg.dataDir}'";
Restart = "on-failure";
};

View File

@ -39,24 +39,19 @@ in
};
config = mkIf cfg.enable {
systemd.tmpfiles.rules = [
"d '${cfg.dataDir}' 0700 ${cfg.user} ${cfg.group} - -"
];
systemd.services.sonarr = {
description = "Sonarr";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
preStart = ''
test -d ${cfg.dataDir} || {
echo "Creating sonarr data directory in ${cfg.dataDir}"
mkdir -p ${cfg.dataDir}
}
chown -R ${cfg.user}:${cfg.group} ${cfg.dataDir}
chmod 0700 ${cfg.dataDir}
'';
serviceConfig = {
Type = "simple";
User = cfg.user;
Group = cfg.group;
PermissionsStartOnly = "true";
ExecStart = "${pkgs.sonarr}/bin/NzbDrone -nobrowser -data='${cfg.dataDir}'";
Restart = "on-failure";
};

View File

@ -6,7 +6,7 @@ let
in {
options = {
services.sssd = {
enable = mkEnableOption "the System Security Services Daemon.";
enable = mkEnableOption "the System Security Services Daemon";
config = mkOption {
type = types.lines;

View File

@ -119,6 +119,10 @@ in {
config = mkIf cfg.enable {
environment.systemPackages = [cfg.package];
systemd.tmpfiles.rules = [
"d '${cfg.dataDir}' 0700 zookeeper - - -"
];
systemd.services.zookeeper = {
description = "Zookeeper Daemon";
wantedBy = [ "multi-user.target" ];
@ -135,11 +139,8 @@ in {
${configDir}/zoo.cfg
'';
User = "zookeeper";
PermissionsStartOnly = true;
};
preStart = ''
mkdir -m 0700 -p ${cfg.dataDir}
if [ "$(id -u)" = 0 ]; then chown zookeeper ${cfg.dataDir}; fi
echo "${toString cfg.id}" > ${cfg.dataDir}/myid
'';
};

View File

@ -79,6 +79,10 @@ in {
};
config = mkIf cfg.enable {
systemd.tmpfiles.rules = [
"d '${cfg.dataDir}' - ${cfg.user} - - -"
];
systemd.services.collectd = {
description = "Collectd Monitoring Agent";
after = [ "network.target" ];
@ -87,16 +91,9 @@ in {
serviceConfig = {
ExecStart = "${cfg.package}/sbin/collectd -C ${conf} -f";
User = cfg.user;
PermissionsStartOnly = true;
Restart = "on-failure";
RestartSec = 3;
};
preStart = ''
mkdir -p "${cfg.dataDir}"
chmod 755 "${cfg.dataDir}"
chown -R ${cfg.user} "${cfg.dataDir}"
'';
};
users.users = optional (cfg.user == "collectd") {

View File

@ -123,7 +123,7 @@ in {
graphite carbon.
For more information visit
<link xlink:href="http://graphite-api.readthedocs.org/en/latest/"/>
<link xlink:href="https://graphite-api.readthedocs.org/en/latest/"/>
'';
default = false;
type = types.bool;

View File

@ -8,7 +8,7 @@
# spawn-fcgi -s /run/munin/fastcgi-graph.sock -U www-data -u munin -g munin /usr/lib/munin/cgi/munin-cgi-graph
# spawn-fcgi -s /run/munin/fastcgi-html.sock -U www-data -u munin -g munin /usr/lib/munin/cgi/munin-cgi-html
# https://paste.sh/vofcctHP#-KbDSXVeWoifYncZmLfZzgum
# nginx http://munin.readthedocs.org/en/latest/example/webserver/nginx.html
# nginx https://munin.readthedocs.org/en/latest/example/webserver/nginx.html
with lib;

View File

@ -22,9 +22,6 @@ let
workingDir = stateDirBase + stateDir;
workingDir2 = stateDirBase + cfg2.stateDir;
# Get a submodule without any embedded metadata:
_filter = x: filterAttrs (k: v: k != "_module") x;
# a wrapper that verifies that the configuration is valid
promtoolCheck = what: name: file: pkgs.runCommand "${name}-${what}-checked"
{ buildInputs = [ cfg.package ]; } ''
@ -50,11 +47,11 @@ let
# This becomes the main config file for Prometheus 1
promConfig = {
global = cfg.globalConfig;
global = filterValidPrometheus cfg.globalConfig;
rule_files = map (promtoolCheck "check-rules" "rules") (cfg.ruleFiles ++ [
(pkgs.writeText "prometheus.rules" (concatStringsSep "\n" cfg.rules))
]);
scrape_configs = filterEmpty cfg.scrapeConfigs;
scrape_configs = filterValidPrometheus cfg.scrapeConfigs;
};
generatedPrometheusYml = writePrettyJSON "prometheus.yml" promConfig;
@ -77,11 +74,11 @@ let
# This becomes the main config file for Prometheus 2
promConfig2 = {
global = cfg2.globalConfig;
global = filterValidPrometheus cfg2.globalConfig;
rule_files = map (prom2toolCheck "check rules" "rules") (cfg2.ruleFiles ++ [
(pkgs.writeText "prometheus.rules" (concatStringsSep "\n" cfg2.rules))
]);
scrape_configs = filterEmpty cfg2.scrapeConfigs;
scrape_configs = filterValidPrometheus cfg2.scrapeConfigs;
alerting = optionalAttrs (cfg2.alertmanagerURL != []) {
alertmanagers = [{
static_configs = [{
@ -108,7 +105,7 @@ let
] ++
optional (cfg2.webExternalUrl != null) "--web.external-url=${cfg2.webExternalUrl}";
filterEmpty = filterAttrsListRecursive (_n: v: !(v == null || v == [] || v == {}));
filterValidPrometheus = filterAttrsListRecursive (n: v: !(n == "_module" || v == null));
filterAttrsListRecursive = pred: x:
if isAttrs x then
listToAttrs (
@ -123,41 +120,37 @@ let
map (filterAttrsListRecursive pred) x
else x;
mkDefOpt = type : defaultStr : description : mkOpt type (description + ''
Defaults to <literal>${defaultStr}</literal> in prometheus
when set to <literal>null</literal>.
'');
mkOpt = type : description : mkOption {
type = types.nullOr type;
default = null;
inherit description;
};
promTypes.globalConfig = types.submodule {
options = {
scrape_interval = mkOption {
type = types.str;
default = "1m";
description = ''
How frequently to scrape targets by default.
'';
};
scrape_interval = mkDefOpt types.str "1m" ''
How frequently to scrape targets by default.
'';
scrape_timeout = mkOption {
type = types.str;
default = "10s";
description = ''
How long until a scrape request times out.
'';
};
scrape_timeout = mkDefOpt types.str "10s" ''
How long until a scrape request times out.
'';
evaluation_interval = mkOption {
type = types.str;
default = "1m";
description = ''
How frequently to evaluate rules by default.
'';
};
evaluation_interval = mkDefOpt types.str "1m" ''
How frequently to evaluate rules by default.
'';
external_labels = mkOption {
type = types.attrsOf types.str;
description = ''
The labels to add to any time series or alerts when
communicating with external systems (federation, remote
storage, Alertmanager).
'';
default = {};
};
external_labels = mkOpt (types.attrsOf types.str) ''
The labels to add to any time series or alerts when
communicating with external systems (federation, remote
storage, Alertmanager).
'';
};
};
@ -169,145 +162,127 @@ let
The job name assigned to scraped metrics by default.
'';
};
scrape_interval = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
How frequently to scrape targets from this job. Defaults to the
globally configured default.
'';
};
scrape_timeout = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
Per-target timeout when scraping this job. Defaults to the
globally configured default.
'';
};
metrics_path = mkOption {
type = types.str;
default = "/metrics";
description = ''
The HTTP resource path on which to fetch metrics from targets.
'';
};
honor_labels = mkOption {
type = types.bool;
default = false;
description = ''
Controls how Prometheus handles conflicts between labels
that are already present in scraped data and labels that
Prometheus would attach server-side ("job" and "instance"
labels, manually configured target labels, and labels
generated by service discovery implementations).
scrape_interval = mkOpt types.str ''
How frequently to scrape targets from this job. Defaults to the
globally configured default.
'';
If honor_labels is set to "true", label conflicts are
resolved by keeping label values from the scraped data and
ignoring the conflicting server-side labels.
scrape_timeout = mkOpt types.str ''
Per-target timeout when scraping this job. Defaults to the
globally configured default.
'';
If honor_labels is set to "false", label conflicts are
resolved by renaming conflicting labels in the scraped data
to "exported_&lt;original-label&gt;" (for example
"exported_instance", "exported_job") and then attaching
server-side labels. This is useful for use cases such as
federation, where all labels specified in the target should
be preserved.
'';
};
scheme = mkOption {
type = types.enum ["http" "https"];
default = "http";
description = ''
The URL scheme with which to fetch metrics from targets.
'';
};
params = mkOption {
type = types.attrsOf (types.listOf types.str);
default = {};
description = ''
Optional HTTP URL parameters.
'';
};
basic_auth = mkOption {
type = types.nullOr (types.submodule {
options = {
username = mkOption {
type = types.str;
description = ''
HTTP username
'';
};
password = mkOption {
type = types.str;
description = ''
HTTP password
'';
};
metrics_path = mkDefOpt types.str "/metrics" ''
The HTTP resource path on which to fetch metrics from targets.
'';
honor_labels = mkDefOpt types.bool "false" ''
Controls how Prometheus handles conflicts between labels
that are already present in scraped data and labels that
Prometheus would attach server-side ("job" and "instance"
labels, manually configured target labels, and labels
generated by service discovery implementations).
If honor_labels is set to "true", label conflicts are
resolved by keeping label values from the scraped data and
ignoring the conflicting server-side labels.
If honor_labels is set to "false", label conflicts are
resolved by renaming conflicting labels in the scraped data
to "exported_&lt;original-label&gt;" (for example
"exported_instance", "exported_job") and then attaching
server-side labels. This is useful for use cases such as
federation, where all labels specified in the target should
be preserved.
'';
honor_timestamps = mkDefOpt types.bool "true" ''
honor_timestamps controls whether Prometheus respects the timestamps present
in scraped data.
If honor_timestamps is set to <literal>true</literal>, the timestamps of the metrics exposed
by the target will be used.
If honor_timestamps is set to <literal>false</literal>, the timestamps of the metrics exposed
by the target will be ignored.
'';
scheme = mkDefOpt (types.enum ["http" "https"]) "http" ''
The URL scheme with which to fetch metrics from targets.
'';
params = mkOpt (types.attrsOf (types.listOf types.str)) ''
Optional HTTP URL parameters.
'';
basic_auth = mkOpt (types.submodule {
options = {
username = mkOption {
type = types.str;
description = ''
HTTP username
'';
};
});
default = null;
apply = x: mapNullable _filter x;
description = ''
Optional http login credentials for metrics scraping.
'';
};
tls_config = mkOption {
type = types.nullOr promTypes.tls_config;
default = null;
apply = x: mapNullable _filter x;
description = ''
Configures the scrape request's TLS settings.
'';
};
dns_sd_configs = mkOption {
type = types.listOf promTypes.dns_sd_config;
default = [];
apply = x: map _filter x;
description = ''
List of DNS service discovery configurations.
'';
};
consul_sd_configs = mkOption {
type = types.listOf promTypes.consul_sd_config;
default = [];
apply = x: map _filter x;
description = ''
List of Consul service discovery configurations.
'';
};
file_sd_configs = mkOption {
type = types.listOf promTypes.file_sd_config;
default = [];
apply = x: map _filter x;
description = ''
List of file service discovery configurations.
'';
};
static_configs = mkOption {
type = types.listOf promTypes.static_config;
default = [];
apply = x: map _filter x;
description = ''
List of labeled target groups for this job.
'';
};
ec2_sd_configs = mkOption {
type = types.listOf promTypes.ec2_sd_config;
default = [];
apply = x: map _filter x;
description = ''
List of EC2 service discovery configurations.
'';
};
relabel_configs = mkOption {
type = types.listOf promTypes.relabel_config;
default = [];
apply = x: map _filter x;
description = ''
List of relabel configurations.
'';
};
password = mkOption {
type = types.str;
description = ''
HTTP password
'';
};
};
}) ''
Optional http login credentials for metrics scraping.
'';
bearer_token = mkOpt types.str ''
Sets the `Authorization` header on every scrape request with
the configured bearer token. It is mutually exclusive with
<option>bearer_token_file</option>.
'';
bearer_token_file = mkOpt types.str ''
Sets the `Authorization` header on every scrape request with
the bearer token read from the configured file. It is mutually
exclusive with <option>bearer_token</option>.
'';
tls_config = mkOpt promTypes.tls_config ''
Configures the scrape request's TLS settings.
'';
proxy_url = mkOpt types.str ''
Optional proxy URL.
'';
ec2_sd_configs = mkOpt (types.listOf promTypes.ec2_sd_config) ''
List of EC2 service discovery configurations.
'';
dns_sd_configs = mkOpt (types.listOf promTypes.dns_sd_config) ''
List of DNS service discovery configurations.
'';
consul_sd_configs = mkOpt (types.listOf promTypes.consul_sd_config) ''
List of Consul service discovery configurations.
'';
file_sd_configs = mkOpt (types.listOf promTypes.file_sd_config) ''
List of file service discovery configurations.
'';
static_configs = mkOpt (types.listOf promTypes.static_config) ''
List of labeled target groups for this job.
'';
relabel_configs = mkOpt (types.listOf promTypes.relabel_config) ''
List of relabel configurations.
'';
sample_limit = mkDefOpt types.int "0" ''
Per-scrape limit on number of scraped samples that will be accepted.
If more than this number of samples are present after metric relabelling
the entire scrape will be treated as failed. 0 means no limit.
'';
};
};
@ -337,66 +312,41 @@ let
The AWS Region.
'';
};
endpoint = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
Custom endpoint to be used.
'';
};
access_key = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
The AWS API key id. If blank, the environment variable
<literal>AWS_ACCESS_KEY_ID</literal> is used.
'';
};
secret_key = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
The AWS API key secret. If blank, the environment variable
<literal>AWS_SECRET_ACCESS_KEY</literal> is used.
'';
};
profile = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
Named AWS profile used to connect to the API.
'';
};
role_arn = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
AWS Role ARN, an alternative to using AWS API keys.
'';
};
refresh_interval = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
Refresh interval to re-read the instance list.
'';
};
port = mkOption {
type = types.int;
default = 80;
description = ''
The port to scrape metrics from. If using the public IP
address, this must instead be specified in the relabeling
rule.
'';
};
filters = mkOption {
type = types.nullOr (types.listOf promTypes.filter);
default = null;
description = ''
Filters can be used optionally to filter the instance list by other criteria.
'';
};
endpoint = mkOpt types.str ''
Custom endpoint to be used.
'';
access_key = mkOpt types.str ''
The AWS API key id. If blank, the environment variable
<literal>AWS_ACCESS_KEY_ID</literal> is used.
'';
secret_key = mkOpt types.str ''
The AWS API key secret. If blank, the environment variable
<literal>AWS_SECRET_ACCESS_KEY</literal> is used.
'';
profile = mkOpt types.str ''
Named AWS profile used to connect to the API.
'';
role_arn = mkOpt types.str ''
AWS Role ARN, an alternative to using AWS API keys.
'';
refresh_interval = mkDefOpt types.str "60s" ''
Refresh interval to re-read the instance list.
'';
port = mkDefOpt types.int "80" ''
The port to scrape metrics from. If using the public IP
address, this must instead be specified in the relabeling
rule.
'';
filters = mkOpt (types.listOf promTypes.filter) ''
Filters can be used optionally to filter the instance list by other criteria.
'';
};
};
@ -409,6 +359,7 @@ let
for the available filters.
'';
};
value = mkOption {
type = types.listOf types.str;
default = [];
@ -427,56 +378,63 @@ let
A list of DNS SRV record names to be queried.
'';
};
refresh_interval = mkOption {
type = types.str;
default = "30s";
description = ''
The time after which the provided names are refreshed.
'';
};
refresh_interval = mkDefOpt types.str "30s" ''
The time after which the provided names are refreshed.
'';
};
};
promTypes.consul_sd_config = types.submodule {
options = {
server = mkOption {
type = types.str;
description = "Consul server to query.";
};
token = mkOption {
type = types.nullOr types.str;
description = "Consul token";
};
datacenter = mkOption {
type = types.nullOr types.str;
description = "Consul datacenter";
};
scheme = mkOption {
type = types.nullOr types.str;
description = "Consul scheme";
};
username = mkOption {
type = types.nullOr types.str;
description = "Consul username";
};
password = mkOption {
type = types.nullOr types.str;
description = "Consul password";
};
server = mkDefOpt types.str "localhost:8500" ''
Consul server to query.
'';
services = mkOption {
type = types.listOf types.str;
description = ''
A list of services for which targets are retrieved.
'';
};
tag_separator = mkOption {
type = types.str;
default = ",";
description = ''
The string by which Consul tags are joined into the tag label.
'';
};
token = mkOpt types.str "Consul token";
datacenter = mkOpt types.str "Consul datacenter";
scheme = mkDefOpt types.str "http" "Consul scheme";
username = mkOpt types.str "Consul username";
password = mkOpt types.str "Consul password";
tls_config = mkOpt promTypes.tls_config ''
Configures the Consul request's TLS settings.
'';
services = mkOpt (types.listOf types.str) ''
A list of services for which targets are retrieved.
'';
tags = mkOpt (types.listOf types.str) ''
An optional list of tags used to filter nodes for a given
service. Services must contain all tags in the list.
'';
node_meta = mkOpt (types.attrsOf types.str) ''
Node metadata used to filter nodes for a given service.
'';
tag_separator = mkDefOpt types.str "," ''
The string by which Consul tags are joined into the tag label.
'';
allow_stale = mkOpt types.bool ''
Allow stale Consul results
(see <link xlink:href="https://www.consul.io/api/index.html#consistency-modes"/>).
Will reduce load on Consul.
'';
refresh_interval = mkDefOpt types.str "30s" ''
The time after which the provided names are refreshed.
On large setup it might be a good idea to increase this value
because the catalog will change all the time.
'';
};
};
@ -488,108 +446,74 @@ let
Patterns for files from which target groups are extracted. Refer
to the Prometheus documentation for permitted filename patterns
and formats.
'';
};
'';
};
refresh_interval = mkOption {
type = types.str;
default = "30s";
description = ''
Refresh interval to re-read the files.
'';
};
refresh_interval = mkDefOpt types.str "5m" ''
Refresh interval to re-read the files.
'';
};
};
promTypes.relabel_config = types.submodule {
options = {
source_labels = mkOption {
type = with types; nullOr (listOf str);
default = null;
description = ''
The source labels select values from existing labels. Their content
is concatenated using the configured separator and matched against
the configured regular expression.
'';
};
separator = mkOption {
type = types.str;
default = ";";
description = ''
Separator placed between concatenated source label values.
'';
};
target_label = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
Label to which the resulting value is written in a replace action.
It is mandatory for replace actions.
'';
};
regex = mkOption {
type = types.str;
default = "(.*)";
description = ''
Regular expression against which the extracted value is matched.
'';
};
replacement = mkOption {
type = types.str;
default = "$1";
description = ''
Replacement value against which a regex replace is performed if the
regular expression matches.
'';
};
action = mkOption {
type = types.enum ["replace" "keep" "drop"];
default = "replace";
description = ''
Action to perform based on regex matching.
'';
};
source_labels = mkOpt (types.listOf types.str) ''
The source labels select values from existing labels. Their content
is concatenated using the configured separator and matched against
the configured regular expression.
'';
separator = mkDefOpt types.str ";" ''
Separator placed between concatenated source label values.
'';
target_label = mkOpt types.str ''
Label to which the resulting value is written in a replace action.
It is mandatory for replace actions.
'';
regex = mkDefOpt types.str "(.*)" ''
Regular expression against which the extracted value is matched.
'';
modulus = mkOpt types.int ''
Modulus to take of the hash of the source label values.
'';
replacement = mkDefOpt types.str "$1" ''
Replacement value against which a regex replace is performed if the
regular expression matches.
'';
action = mkDefOpt (types.enum ["replace" "keep" "drop"]) "replace" ''
Action to perform based on regex matching.
'';
};
};
promTypes.tls_config = types.submodule {
options = {
ca_file = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
CA certificate to validate API server certificate with.
'';
};
cert_file = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
Certificate file for client cert authentication to the server.
'';
};
key_file = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
Key file for client cert authentication to the server.
'';
};
server_name = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
ServerName extension to indicate the name of the server.
http://tools.ietf.org/html/rfc4366#section-3.1
'';
};
insecure_skip_verify = mkOption {
type = types.bool;
default = false;
description = ''
Disable validation of the server certificate.
'';
};
ca_file = mkOpt types.str ''
CA certificate to validate API server certificate with.
'';
cert_file = mkOpt types.str ''
Certificate file for client cert authentication to the server.
'';
key_file = mkOpt types.str ''
Key file for client cert authentication to the server.
'';
server_name = mkOpt types.str ''
ServerName extension to indicate the name of the server.
http://tools.ietf.org/html/rfc4366#section-3.1
'';
insecure_skip_verify = mkOpt types.bool ''
Disable validation of the server certificate.
'';
};
};
@ -662,7 +586,6 @@ in {
globalConfig = mkOption {
type = promTypes.globalConfig;
default = {};
apply = _filter;
description = ''
Parameters that are valid in all configuration contexts. They
also serve as defaults for other configuration sections
@ -688,7 +611,6 @@ in {
scrapeConfigs = mkOption {
type = types.listOf promTypes.scrape_config;
default = [];
apply = x: map _filter x;
description = ''
A list of scrape configurations.
'';
@ -786,7 +708,6 @@ in {
globalConfig = mkOption {
type = promTypes.globalConfig;
default = {};
apply = _filter;
description = ''
Parameters that are valid in all configuration contexts. They
also serve as defaults for other configuration sections
@ -812,7 +733,6 @@ in {
scrapeConfigs = mkOption {
type = types.listOf promTypes.scrape_config;
default = [];
apply = x: map _filter x;
description = ''
A list of scrape configurations.
'';

View File

@ -226,18 +226,19 @@ in {
ipfs.gid = config.ids.gids.ipfs;
};
systemd.tmpfiles.rules = [
"d '${cfg.dataDir}' - ${cfg.user} ${cfg.group} - -"
] ++ optionals cfg.autoMount [
"d '${cfg.ipfsMountDir}' - ${cfg.user} ${cfg.group} - -"
"d '${cfg.ipnsMountDir}' - ${cfg.user} ${cfg.group} - -"
];
systemd.services.ipfs-init = recursiveUpdate commonEnv {
description = "IPFS Initializer";
after = [ "local-fs.target" ];
before = [ "ipfs.service" "ipfs-offline.service" "ipfs-norouting.service" ];
preStart = ''
install -m 0755 -o ${cfg.user} -g ${cfg.group} -d ${cfg.dataDir}
'' + optionalString cfg.autoMount ''
install -m 0755 -o ${cfg.user} -g ${cfg.group} -d ${cfg.ipfsMountDir}
install -m 0755 -o ${cfg.user} -g ${cfg.group} -d ${cfg.ipnsMountDir}
'';
script = ''
if [[ ! -f ${cfg.dataDir}/config ]]; then
ipfs init ${optionalString cfg.emptyRepo "-e"} \
@ -253,7 +254,6 @@ in {
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
PermissionsStartOnly = true;
};
};

View File

@ -470,7 +470,7 @@ in
'';
};
trust.hidden = mkEnableOption "Router concealment.";
trust.hidden = mkEnableOption "Router concealment";
websocket = mkEndpointOpt "websockets" "127.0.0.1" 7666;
@ -478,7 +478,7 @@ in
exploratory.outbound = i2cpOpts "exploratory";
ntcp2.enable = mkEnableTrueOption "NTCP2.";
ntcp2.published = mkEnableOption "NTCP2 publication.";
ntcp2.published = mkEnableOption "NTCP2 publication";
ntcp2.port = mkOption {
type = types.int;
default = 0;

View File

@ -20,7 +20,7 @@ in
services.miredo = {
enable = mkEnableOption "the Miredo IPv6 tunneling service.";
enable = mkEnableOption "the Miredo IPv6 tunneling service";
package = mkOption {
type = types.package;

View File

@ -51,7 +51,7 @@ in
services.monero = {
enable = mkEnableOption "Monero node daemon.";
enable = mkEnableOption "Monero node daemon";
mining.enable = mkOption {
type = types.bool;

View File

@ -44,7 +44,7 @@ in
options = {
services.mosquitto = {
enable = mkEnableOption "Enable the MQTT Mosquitto broker.";
enable = mkEnableOption "the MQTT Mosquitto broker";
host = mkOption {
default = "127.0.0.1";
@ -65,7 +65,7 @@ in
};
ssl = {
enable = mkEnableOption "Enable SSL listener.";
enable = mkEnableOption "SSL listener";
cafile = mkOption {
type = types.nullOr types.path;

View File

@ -116,7 +116,6 @@ in {
Group = "mxisd";
ExecStart = "${cfg.package}/bin/mxisd --spring.config.location=${cfg.dataDir}/ --spring.profiles.active=systemd --java.security.egd=file:/dev/./urandom";
WorkingDirectory = cfg.dataDir;
PermissionsStartOnly = true;
SuccessExitStatus = 143;
Restart = "on-failure";
};

View File

@ -1,3 +1,4 @@
{ config, lib, pkgs, ... }:
with lib;
@ -43,7 +44,7 @@ in
services.namecoind = {
enable = mkEnableOption "namecoind, Namecoin client.";
enable = mkEnableOption "namecoind, Namecoin client";
wallet = mkOption {
type = types.path;

View File

@ -3,7 +3,7 @@
in {
options.services.nullidentdmod = with types; {
enable = mkEnableOption "Enable the nullidentdmod identd daemon";
enable = mkEnableOption "the nullidentdmod identd daemon";
userid = mkOption {
type = nullOr str;

View File

@ -285,12 +285,12 @@ in
uid = config.ids.uids.smokeping;
description = "smokeping daemon user";
home = smokepingHome;
createHome = true;
};
systemd.services.smokeping = {
wantedBy = [ "multi-user.target"];
serviceConfig = {
User = cfg.user;
PermissionsStartOnly = true;
Restart = "on-failure";
};
preStart = ''
@ -300,7 +300,6 @@ in
cp ${cgiHome} ${smokepingHome}/smokeping.fcgi
${cfg.package}/bin/smokeping --check --config=${configPath}
${cfg.package}/bin/smokeping --static --config=${configPath}
chown -R ${cfg.user} ${smokepingHome}
'';
script = ''${cfg.package}/bin/smokeping --config=${configPath} --nodaemon'';
};

View File

@ -151,7 +151,6 @@ in {
RestartForceExitStatus="3 4";
User = cfg.user;
Group = cfg.group;
PermissionsStartOnly = true;
ExecStart = ''
${cfg.package}/bin/syncthing \
-no-browser \

View File

@ -0,0 +1,100 @@
{ config, pkgs, lib, ... }:
with lib;
let
dataDir = "/var/lib/tedicross";
cfg = config.services.tedicross;
configJSON = pkgs.writeText "tedicross-settings.json" (builtins.toJSON cfg.config);
configYAML = pkgs.runCommand "tedicross-settings.yaml" { preferLocalBuild = true; } ''
${pkgs.remarshal}/bin/json2yaml -i ${configJSON} -o $out
'';
in {
options = {
services.tedicross = {
enable = mkEnableOption "the TediCross Telegram-Discord bridge service";
config = mkOption {
type = types.attrs;
# from https://github.com/TediCross/TediCross/blob/master/example.settings.yaml
example = literalExample ''
{
telegram = {
useFirstNameInsteadOfUsername = false;
colonAfterSenderName = false;
skipOldMessages = true;
sendEmojiWithStickers = true;
};
discord = {
useNickname = false;
skipOldMessages = true;
displayTelegramReplies = "embed";
replyLength = 100;
};
bridges = [
{
name = "Default bridge";
direction = "both";
telegram = {
chatId = -123456789;
relayJoinMessages = true;
relayLeaveMessages = true;
sendUsernames = true;
ignoreCommands = true;
};
discord = {
serverId = "DISCORD_SERVER_ID";
channelId = "DISCORD_CHANNEL_ID";
relayJoinMessages = true;
relayLeaveMessages = true;
sendUsernames = true;
crossDeleteOnTelegram = true;
};
}
];
debug = false;
}
'';
description = ''
<filename>settings.yaml</filename> configuration as a Nix attribute set.
Secret tokens should be specified using <option>environmentFile</option>
instead of this world-readable file.
'';
};
environmentFile = mkOption {
type = types.nullOr types.path;
default = null;
description = ''
File containing environment variables to be passed to the TediCross service,
in which secret tokens can be specified securely using the
<literal>TELEGRAM_BOT_TOKEN</literal> and <literal>DISCORD_BOT_TOKEN</literal>
keys.
'';
};
};
};
config = mkIf cfg.enable {
# from https://github.com/TediCross/TediCross/blob/master/guides/autostart/Linux.md
systemd.services.tedicross = {
description = "TediCross Telegram-Discord bridge service";
wantedBy = [ "multi-user.target" ];
wants = [ "network-online.target" ];
after = [ "network-online.target" ];
serviceConfig = {
Type = "simple";
ExecStart = "${pkgs.nodePackages.tedicross}/bin/tedicross --config='${configYAML}' --data-dir='${dataDir}'";
Restart = "always";
DynamicUser = true;
StateDirectory = baseNameOf dataDir;
EnvironmentFile = cfg.environmentFile;
};
};
};
meta.maintainers = with maintainers; [ pacien ];
}

View File

@ -5,7 +5,7 @@ with lib;
{
options = {
services.toxvpn = {
enable = mkEnableOption "enable toxvpn running on startup";
enable = mkEnableOption "toxvpn running on startup";
localip = mkOption {
type = types.string;

View File

@ -26,19 +26,28 @@ let
type = with types; nullOr str;
default = null;
description = ''
Base64 private key generated by wg genkey.
Base64 private key generated by <command>wg genkey</command>.
Warning: Consider using privateKeyFile instead if you do not
want to store the key in the world-readable Nix store.
'';
};
generatePrivateKeyFile = mkOption {
default = false;
type = types.bool;
description = ''
Automatically generate a private key with
<command>wg genkey</command>, at the privateKeyFile location.
'';
};
privateKeyFile = mkOption {
example = "/private/wireguard_key";
type = with types; nullOr str;
default = null;
description = ''
Private key file as generated by wg genkey.
Private key file as generated by <command>wg genkey</command>.
'';
};
@ -124,8 +133,8 @@ let
example = "rVXs/Ni9tu3oDBLS4hOyAUAa1qTWVA3loR8eL20os3I=";
type = with types; nullOr str;
description = ''
Base64 preshared key generated by wg genpsk. Optional,
and may be omitted. This option adds an additional layer of
Base64 preshared key generated by <command>wg genpsk</command>.
Optional, and may be omitted. This option adds an additional layer of
symmetric-key cryptography to be mixed into the already existing
public-key cryptography, for post-quantum resistance.
@ -139,8 +148,8 @@ let
example = "/private/wireguard_psk";
type = with types; nullOr str;
description = ''
File pointing to preshared key as generated by wg pensk. Optional,
and may be omitted. This option adds an additional layer of
File pointing to preshared key as generated by <command>wg pensk</command>.
Optional, and may be omitted. This option adds an additional layer of
symmetric-key cryptography to be mixed into the already existing
public-key cryptography, for post-quantum resistance.
'';
@ -182,9 +191,48 @@ let
};
generateUnit = name: values:
generatePathUnit = name: values:
assert (values.privateKey == null);
assert (values.privateKeyFile != null);
nameValuePair "wireguard-${name}"
{
description = "WireGuard Tunnel - ${name} - Private Key";
requiredBy = [ "wireguard-${name}.service" ];
before = [ "wireguard-${name}.service" ];
pathConfig.PathExists = values.privateKeyFile;
};
generateKeyServiceUnit = name: values:
assert values.generatePrivateKeyFile;
nameValuePair "wireguard-${name}-key"
{
description = "WireGuard Tunnel - ${name} - Key Generator";
wantedBy = [ "wireguard-${name}.service" ];
requiredBy = [ "wireguard-${name}.service" ];
before = [ "wireguard-${name}.service" ];
path = with pkgs; [ wireguard ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
};
script = ''
mkdir --mode 0644 -p "${dirOf values.privateKeyFile}"
if [ ! -f "${values.privateKeyFile}" ]; then
touch "${values.privateKeyFile}"
chmod 0600 "${values.privateKeyFile}"
wg genkey > "${values.privateKeyFile}"
chmod 0400 "${values.privateKeyFile}"
fi
'';
};
generateSetupServiceUnit = name: values:
# exactly one way to specify the private key must be set
assert (values.privateKey != null) != (values.privateKeyFile != null);
#assert (values.privateKey != null) != (values.privateKeyFile != null);
let privKey = if values.privateKeyFile != null then values.privateKeyFile else pkgs.writeText "wg-key" values.privateKey;
in
nameValuePair "wireguard-${name}"
@ -279,10 +327,27 @@ in
config = mkIf (cfg.interfaces != {}) {
assertions = (attrValues (
mapAttrs (name: value: {
assertion = (value.privateKey != null) != (value.privateKeyFile != null);
message = "Either networking.wireguard.interfaces.${name}.privateKey or networking.wireguard.interfaces.${name}.privateKeyFile must be set.";
}) cfg.interfaces))
++ (attrValues (
mapAttrs (name: value: {
assertion = value.generatePrivateKeyFile -> (value.privateKey == null);
message = "networking.wireguard.interfaces.${name}.generatePrivateKey must not be set if networking.wireguard.interfaces.${name}.privateKey is set.";
}) cfg.interfaces));
boot.extraModulePackages = [ kernel.wireguard ];
environment.systemPackages = [ pkgs.wireguard-tools ];
systemd.services = mapAttrs' generateUnit cfg.interfaces;
systemd.services = (mapAttrs' generateSetupServiceUnit cfg.interfaces)
// (mapAttrs' generateKeyServiceUnit
(filterAttrs (name: value: value.generatePrivateKeyFile) cfg.interfaces));
systemd.paths = mapAttrs' generatePathUnit
(filterAttrs (name: value: value.privateKeyFile != null) cfg.interfaces);
};

View File

@ -31,7 +31,7 @@ let
in {
options.services.kibana = {
enable = mkEnableOption "enable kibana service";
enable = mkEnableOption "kibana service";
listenAddress = mkOption {
description = "Kibana listening host";

View File

@ -11,7 +11,7 @@ in
{
options = {
services.solr = {
enable = mkEnableOption "Enables the solr service.";
enable = mkEnableOption "Solr";
# default to the 8.x series not forcing major version upgrade of those on the 7.x series
package = mkOption {

View File

@ -0,0 +1,17 @@
#!/usr/bin/env bash
# Based on: https://github.com/dani-garcia/bitwarden_rs/wiki/Backing-up-your-vault
if ! mkdir -p "$BACKUP_FOLDER"; then
echo "Could not create backup folder '$BACKUP_FOLDER'" >&2
exit 1
fi
if [[ ! -f "$DATA_FOLDER"/db.sqlite3 ]]; then
echo "Could not find SQLite database file '$DATA_FOLDER/db.sqlite3'" >&2
exit 1
fi
sqlite3 "$DATA_FOLDER"/db.sqlite3 ".backup '$BACKUP_FOLDER/db.sqlite3'"
cp "$DATA_FOLDER"/rsa_key.{der,pem,pub.der} "$BACKUP_FOLDER"
cp -r "$DATA_FOLDER"/attachments "$BACKUP_FOLDER"
cp -r "$DATA_FOLDER"/icon_cache "$BACKUP_FOLDER"

View File

@ -0,0 +1,126 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.bitwarden_rs;
user = config.users.users.bitwarden_rs.name;
group = config.users.groups.bitwarden_rs.name;
# Convert name from camel case (e.g. disable2FARemember) to upper case snake case (e.g. DISABLE_2FA_REMEMBER).
nameToEnvVar = name:
let
parts = builtins.split "([A-Z0-9]+)" name;
partsToEnvVar = parts: foldl' (key: x: let last = stringLength key - 1; in
if isList x then key + optionalString (key != "" && substring last 1 key != "_") "_" + head x
else if key != "" && elem (substring 0 1 x) lowerChars then # to handle e.g. [ "disable" [ "2FAR" ] "emember" ]
substring 0 last key + optionalString (substring (last - 1) 1 key != "_") "_" + substring last 1 key + toUpper x
else key + toUpper x) "" parts;
in if builtins.match "[A-Z0-9_]+" name != null then name else partsToEnvVar parts;
configFile = pkgs.writeText "bitwarden_rs.env" (concatMapStrings (s: s + "\n") (
(concatLists (mapAttrsToList (name: value:
if value != null then [ "${nameToEnvVar name}=${if isBool value then boolToString value else toString value}" ] else []
) cfg.config))));
in {
options.services.bitwarden_rs = with types; {
enable = mkEnableOption "bitwarden_rs";
backupDir = mkOption {
type = nullOr str;
default = null;
description = ''
The directory under which bitwarden_rs will backup its persistent data.
'';
};
config = mkOption {
type = attrsOf (nullOr (either (either bool int) str));
default = {};
example = literalExample ''
{
domain = https://bw.domain.tld:8443;
signupsAllowed = true;
rocketPort = 8222;
rocketLog = "critical";
}
'';
description = ''
The configuration of bitwarden_rs is done through environment variables,
therefore the names are converted from camel case (e.g. disable2FARemember)
to upper case snake case (e.g. DISABLE_2FA_REMEMBER).
In this conversion digits (0-9) are handled just like upper case characters,
so foo2 would be converted to FOO_2.
Names already in this format remain unchanged, so FOO2 remains FOO2 if passed as such,
even though foo2 would have been converted to FOO_2.
This allows working around any potential future conflicting naming conventions.
Based on the attributes passed to this config option a environment file will be generated
that is passed to bitwarden_rs's systemd service.
The available configuration options can be found in
<link xlink:href="https://github.com/dani-garcia/bitwarden_rs/blob/1.8.0/.env.template">the environment template file</link>.
'';
apply = config: optionalAttrs config.webVaultEnabled {
webVaultFolder = "${pkgs.bitwarden_rs-vault}/share/bitwarden_rs/vault";
} // config;
};
};
config = mkIf cfg.enable {
services.bitwarden_rs.config = {
dataFolder = "/var/lib/bitwarden_rs";
webVaultEnabled = mkDefault true;
};
users.users.bitwarden_rs = { inherit group; };
users.groups.bitwarden_rs = { };
systemd.services.bitwarden_rs = {
after = [ "network.target" ];
path = with pkgs; [ openssl ];
serviceConfig = {
User = user;
Group = group;
EnvironmentFile = configFile;
ExecStart = "${pkgs.bitwarden_rs}/bin/bitwarden_rs";
LimitNOFILE = "1048576";
LimitNPROC = "64";
PrivateTmp = "true";
PrivateDevices = "true";
ProtectHome = "true";
ProtectSystem = "strict";
AmbientCapabilities = "CAP_NET_BIND_SERVICE";
StateDirectory = "bitwarden_rs";
};
wantedBy = [ "multi-user.target" ];
};
systemd.services.backup-bitwarden_rs = mkIf (cfg.backupDir != null) {
description = "Backup bitwarden_rs";
environment = {
DATA_FOLDER = "/var/lib/bitwarden_rs";
BACKUP_FOLDER = cfg.backupDir;
};
path = with pkgs; [ sqlite ];
serviceConfig = {
SyslogIdentifier = "backup-bitwarden_rs";
User = mkDefault user;
Group = mkDefault group;
ExecStart = "${pkgs.bash}/bin/bash ${./backup.sh}";
};
wantedBy = [ "multi-user.target" ];
};
systemd.timers.backup-bitwarden_rs = mkIf (cfg.backupDir != null) {
description = "Backup bitwarden_rs on time";
timerConfig = {
OnCalendar = mkDefault "23:00";
Persistent = "true";
Unit = "backup-bitwarden_rs.service";
};
wantedBy = [ "multi-user.target" ];
};
};
}

View File

@ -49,21 +49,16 @@ in
path = [ pkgs.munge pkgs.coreutils ];
preStart = ''
chmod 0400 ${cfg.password}
mkdir -p /var/lib/munge -m 0711
chown -R munge:munge /var/lib/munge
mkdir -p /run/munge -m 0755
chown -R munge:munge /run/munge
'';
serviceConfig = {
ExecStartPre = "+${pkgs.coreutils}/bin/chmod 0400 ${cfg.password}";
ExecStart = "${pkgs.munge}/bin/munged --syslog --key-file ${cfg.password}";
PIDFile = "/run/munge/munged.pid";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
PermissionsStartOnly = "true";
User = "munge";
Group = "munge";
StateDirectory = "munge";
StateDirectoryMode = "0711";
RuntimeDirectory = "munge";
};
};

View File

@ -119,6 +119,10 @@ in
};
users.groups.vault.gid = config.ids.gids.vault;
systemd.tmpfiles.rules = optional (cfg.storagePath != null) [
"d '${cfg.storagePath}' 0700 vault vault - -"
];
systemd.services.vault = {
description = "Vault server daemon";
@ -128,14 +132,9 @@ in
restartIfChanged = false; # do not restart on "nixos-rebuild switch". It would seal the storage and disrupt the clients.
preStart = optionalString (cfg.storagePath != null) ''
install -d -m0700 -o vault -g vault "${cfg.storagePath}"
'';
serviceConfig = {
User = "vault";
Group = "vault";
PermissionsStartOnly = true;
ExecStart = "${cfg.package}/bin/vault server -config ${configFile}";
PrivateDevices = true;
PrivateTmp = true;

View File

@ -39,6 +39,10 @@ in {
###### implementation
config = mkIf cfg.enable {
systemd.tmpfiles.rules = [
"d '${cfg.stateDir}' - peerflix - - -"
];
systemd.services.peerflix = {
description = "Peerflix Daemon";
wantedBy = [ "multi-user.target" ];
@ -47,13 +51,11 @@ in {
preStart = ''
mkdir -p "${cfg.stateDir}"/{torrents,.config/peerflix-server}
if [ "$(id -u)" = 0 ]; then chown -R peerflix "${cfg.stateDir}"; fi
ln -fs "${configFile}" "${cfg.stateDir}/.config/peerflix-server/config.json"
'';
serviceConfig = {
ExecStart = "${pkgs.nodePackages.peerflix-server}/bin/peerflix-server";
PermissionsStartOnly = true;
User = "peerflix";
};
};

View File

@ -899,10 +899,6 @@ in
description = "CodiMD Service";
wantedBy = [ "multi-user.target" ];
after = [ "networking.target" ];
preStart = ''
mkdir -p ${cfg.workDir}
chown -R codimd: ${cfg.workDir}
'';
serviceConfig = {
WorkingDirectory = cfg.workDir;
ExecStart = "${pkgs.codimd}/bin/codimd";
@ -912,7 +908,6 @@ in
];
Restart = "always";
User = "codimd";
PermissionsStartOnly = true;
PrivateTmp = true;
};
};

View File

@ -83,6 +83,8 @@ in
users.users."${cfg.user}" = {
isSystemUser = true;
group = cfg.group;
home = cfg.home;
createHome = true;
};
users.groups."${cfg.group}" = {};
@ -104,8 +106,6 @@ in
preStart = ''
mkdir -p ${cfg.home}/nexus3/etc
chown -R ${cfg.user}:${cfg.group} ${cfg.home}
if [ ! -f ${cfg.home}/nexus3/etc/nexus.properties ]; then
echo "# Jetty section" > ${cfg.home}/nexus3/etc/nexus.properties
echo "application-port=${toString cfg.listenPort}" >> ${cfg.home}/nexus3/etc/nexus.properties
@ -124,7 +124,6 @@ in
User = cfg.user;
Group = cfg.group;
PrivateTmp = true;
PermissionsStartOnly = true;
LimitNOFILE = 102642;
};
};

View File

@ -72,19 +72,16 @@ in
};
config = mkIf cfg.enable {
systemd.tmpfiles.rules = [
"d '${cfg.configDir}' - minio minio - -"
"d '${cfg.dataDir}' - minio minio - -"
];
systemd.services.minio = {
description = "Minio Object Storage";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
preStart = ''
# Make sure directories exist with correct owner
mkdir -p ${cfg.configDir}
chown -R minio:minio ${cfg.configDir}
mkdir -p ${cfg.dataDir}
chown minio:minio ${cfg.dataDir}
'';
serviceConfig = {
PermissionsStartOnly = true;
ExecStart = "${cfg.package}/bin/minio server --json --address ${cfg.listenAddress} --config-dir=${cfg.configDir} ${cfg.dataDir}";
Type = "simple";
User = "minio";

View File

@ -84,18 +84,16 @@ in {
};
config = mkIf cfg.enable {
systemd.tmpfiles.rules = [
"d '${cfg.dataDir}' 0700 traefik traefik - -"
];
systemd.services.traefik = {
description = "Traefik web server";
after = [ "network-online.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
PermissionsStartOnly = true;
ExecStart = ''${cfg.package.bin}/bin/traefik --configfile=${configFile}'';
ExecStartPre = [
''${pkgs.coreutils}/bin/mkdir -p "${cfg.dataDir}"''
''${pkgs.coreutils}/bin/chmod 700 "${cfg.dataDir}"''
''${pkgs.coreutils}/bin/chown -R traefik:traefik "${cfg.dataDir}"''
];
Type = "simple";
User = "traefik";
Group = cfg.group;

View File

@ -75,7 +75,7 @@ in {
debug = mkEnableOption "gnome-session debug messages";
flashback = {
enableMetacity = mkEnableOption "Enable the standard GNOME Flashback session with Metacity.";
enableMetacity = mkEnableOption "the standard GNOME Flashback session with Metacity";
customSessions = mkOption {
type = types.listOf (types.submodule {

View File

@ -13,6 +13,12 @@ in {
config = mkIf (xcfg.enable && cfg.enable) {
environment.systemPackages = [ pkgs.maxx ];
# there is hardcoded path in binaries
system.activationScripts.setup-maxx = ''
mkdir -p /opt
ln -sfn ${pkgs.maxx}/opt/MaXX /opt
'';
services.xserver.desktopManager.session = [
{ name = "MaXX";
start = ''

View File

@ -20,8 +20,8 @@ in
Whether to enable the dummy "startx" pseudo-display manager,
which allows users to start X manually via the "startx" command
from a vt shell. The X server runs under the user's id, not as root.
The user must provide a ~/.xinintrc file containing session startup
commands, see startx(1). This is not autmatically generated
The user must provide a ~/.xinitrc file containing session startup
commands, see startx(1). This is not automatically generated
from the desktopManager and windowManager settings.
'';
};

View File

@ -33,7 +33,7 @@ in
description = "Authentication to use when connecting to xpra";
};
pulseaudio = mkEnableOption "pulseaudio audio streaming.";
pulseaudio = mkEnableOption "pulseaudio audio streaming";
extraOptions = mkOption {
description = "Extra xpra options";

View File

@ -555,7 +555,7 @@ echo /sbin/modprobe > /proc/sys/kernel/modprobe
# Start stage 2. `switch_root' deletes all files in the ramfs on the
# current root. Note that $stage2Init might be an absolute symlink,
# in which case "-e" won't work because we're not in the chroot yet.
if ! test -e "$targetRoot/$stage2Init" -o ! -L "$targetRoot/$stage2Init"; then
if [ ! -e "$targetRoot/$stage2Init" ] && [ ! -L "$targetRoot/$stage2Init" ] ; then
echo "stage 2 init script ($targetRoot/$stage2Init) not found"
fail
fi

View File

@ -32,7 +32,7 @@ let
fileSystems = filter utils.fsNeededForBoot config.system.build.fileSystems;
# A utility for enumerating the shared-library dependencies of a program
findLibs = pkgs.writeShellScriptBin "find-libs" ''
findLibs = pkgs.buildPackages.writeShellScriptBin "find-libs" ''
set -euo pipefail
declare -A seen

View File

@ -19,7 +19,7 @@ in
# One could also do regular btrfs balances, but that shouldn't be necessary
# during normal usage and as long as the filesystems aren't filled near capacity
services.btrfs.autoScrub = {
enable = mkEnableOption "Enable regular btrfs scrub";
enable = mkEnableOption "regular btrfs scrub";
fileSystems = mkOption {
type = types.listOf types.path;

View File

@ -274,5 +274,22 @@ let self = {
"18.09".sa-east-1.hvm-ebs = "ami-0e4a8a47fd6db6112";
"18.09".ap-south-1.hvm-ebs = "ami-0880a678d3f555313";
latest = self."18.09";
# 19.03.172286.8ea36d73256
"19.03".eu-west-1.hvm-ebs = "ami-0fe40176548ff0940";
"19.03".eu-west-2.hvm-ebs = "ami-03a40fd3a02fe95ba";
"19.03".eu-west-3.hvm-ebs = "ami-0436f9da0f20a638e";
"19.03".eu-central-1.hvm-ebs = "ami-0022b8ea9efde5de4";
"19.03".us-east-1.hvm-ebs = "ami-0efc58fb70ae9a217";
"19.03".us-east-2.hvm-ebs = "ami-0abf711b1b34da1af";
"19.03".us-west-1.hvm-ebs = "ami-07d126e8838c40ec5";
"19.03".us-west-2.hvm-ebs = "ami-03f8a737546e47fb0";
"19.03".ca-central-1.hvm-ebs = "ami-03f9fd0ef2e035ede";
"19.03".ap-southeast-1.hvm-ebs = "ami-0cff66114c652c262";
"19.03".ap-southeast-2.hvm-ebs = "ami-054c73a7f8d773ea9";
"19.03".ap-northeast-1.hvm-ebs = "ami-00db62688900456a4";
"19.03".ap-northeast-2.hvm-ebs = "ami-0485cdd1a5fdd2117";
"19.03".sa-east-1.hvm-ebs = "ami-0c6a43c6e0ad1f4e2";
"19.03".ap-south-1.hvm-ebs = "ami-0303deb1b5890f878";
latest = self."19.03";
}; in self

View File

@ -100,6 +100,11 @@ in {
boot.growPartition = true;
boot.loader.grub.device = "/dev/sda";
swapDevices = [{
device = "/var/swap";
size = 2048;
}];
virtualisation.virtualbox.guest.enable = true;
};

View File

@ -23,6 +23,7 @@ in
{
acme = handleTestOn ["x86_64-linux"] ./acme.nix {};
atd = handleTest ./atd.nix {};
automysqlbackup = handleTest ./automysqlbackup.nix {};
avahi = handleTest ./avahi.nix {};
bcachefs = handleTestOn ["x86_64-linux"] ./bcachefs.nix {}; # linux-4.18.2018.10.12 is unsupported on aarch64
beanstalkd = handleTest ./beanstalkd.nix {};
@ -143,6 +144,7 @@ in
misc = handleTest ./misc.nix {};
mongodb = handleTest ./mongodb.nix {};
morty = handleTest ./morty.nix {};
mosquitto = handleTest ./mosquitto.nix {};
mpd = handleTest ./mpd.nix {};
mumble = handleTest ./mumble.nix {};
munin = handleTest ./munin.nix {};
@ -237,6 +239,7 @@ in
vault = handleTest ./vault.nix {};
virtualbox = handleTestOn ["x86_64-linux"] ./virtualbox.nix {};
wireguard = handleTest ./wireguard {};
wireguard-generated = handleTest ./wireguard/generated.nix {};
wordpress = handleTest ./wordpress.nix {};
xautolock = handleTest ./xautolock.nix {};
xdg-desktop-portal = handleTest ./xdg-desktop-portal.nix {};

View File

@ -0,0 +1,34 @@
import ./make-test.nix ({ pkgs, lib, ... }:
{
name = "automysqlbackup";
meta.maintainers = [ lib.maintainers.aanderse ];
machine =
{ pkgs, ... }:
{
services.mysql.enable = true;
services.mysql.package = pkgs.mysql;
services.mysql.initialDatabases = [ { name = "testdb"; schema = ./testdb.sql; } ];
services.automysqlbackup.enable = true;
};
testScript = ''
startAll;
# Need to have mysql started so that it can be populated with data.
$machine->waitForUnit("mysql.service");
# Wait for testdb to be fully populated (5 rows).
$machine->waitUntilSucceeds("mysql -u root -D testdb -N -B -e 'select count(id) from tests' | grep -q 5");
# Do a backup and wait for it to start
$machine->startJob("automysqlbackup.service");
$machine->waitForJob("automysqlbackup.service");
# wait for backup file and check that data appears in backup
$machine->waitForFile("/var/backup/mysql/daily/testdb");
$machine->succeed("${pkgs.gzip}/bin/zcat /var/backup/mysql/daily/testdb/daily_testdb_*.sql.gz | grep hello");
'';
})

69
nixos/tests/mosquitto.nix Normal file
View File

@ -0,0 +1,69 @@
import ./make-test.nix ({ pkgs, ... }:
let
port = 1888;
username = "mqtt";
password = "VERY_secret";
topic = "test/foo";
cmd = bin: pkgs.lib.concatStringsSep " " [
"${pkgs.mosquitto}/bin/mosquitto_${bin}"
"-V mqttv311"
"-h server"
"-p ${toString port}"
"-u ${username}"
"-P '${password}'"
"-t ${topic}"
];
in rec {
name = "mosquitto";
meta = with pkgs.stdenv.lib; {
maintainers = with maintainers; [ peterhoeg ];
};
nodes = let
client = { pkgs, ... }: {
environment.systemPackages = with pkgs; [ mosquitto ];
};
in {
server = { pkgs, ... }: {
networking.firewall.allowedTCPPorts = [ port ];
services.mosquitto = {
inherit port;
enable = true;
host = "0.0.0.0";
checkPasswords = true;
users."${username}" = {
inherit password;
acl = [
"topic readwrite ${topic}"
];
};
};
};
client1 = client;
client2 = client;
};
testScript = let
file = "/tmp/msg";
payload = "wootWOOT";
in ''
startAll;
$server->waitForUnit("mosquitto.service");
$server->fail("test -f ${file}");
$server->execute("(${cmd "sub"} -C 1 | tee ${file} &)");
$client1->fail("test -f ${file}");
$client1->execute("(${cmd "sub"} -C 1 | tee ${file} &)");
$client2->succeed("${cmd "pub"} -m ${payload}");
$server->succeed("grep -q ${payload} ${file}");
$client1->succeed("grep -q ${payload} ${file}");
'';
})

View File

@ -1,18 +1,19 @@
# verifies:
# 1. nginx generates config file with shared http context definitions above
# generated virtual hosts config.
# 2. whether the ETag header is properly generated whenever we're serving
# files in Nix store paths
import ./make-test.nix ({ pkgs, ...} : {
import ./make-test.nix ({ pkgs, ... }: {
name = "nginx";
meta = with pkgs.stdenv.lib.maintainers; {
maintainers = [ mbbx6spp ];
};
nodes = {
webserver =
{ ... }:
{ services.nginx.enable = true;
services.nginx.commonHttpConfig = ''
nodes = let
commonConfig = { pkgs, ... }: {
services.nginx.enable = true;
services.nginx.commonHttpConfig = ''
log_format ceeformat '@cee: {"status":"$status",'
'"request_time":$request_time,'
'"upstream_response_time":$upstream_response_time,'
@ -24,20 +25,61 @@ import ./make-test.nix ({ pkgs, ...} : {
'"request":"$request",'
'"http_referer":"$http_referer",'
'"upstream_addr":"$upstream_addr"}';
'';
services.nginx.virtualHosts."0.my.test" = {
extraConfig = ''
access_log syslog:server=unix:/dev/log,facility=user,tag=mytag,severity=info ceeformat;
location /favicon.ico { allow all; access_log off; log_not_found off; }
'';
services.nginx.virtualHosts."0.my.test" = {
extraConfig = ''
access_log syslog:server=unix:/dev/log,facility=user,tag=mytag,severity=info ceeformat;
location /favicon.ico { allow all; access_log off; log_not_found off; }
'';
};
};
services.nginx.virtualHosts.localhost = {
root = pkgs.runCommand "testdir" {} ''
mkdir "$out"
echo hello world > "$out/index.html"
'';
};
};
in {
webserver = commonConfig;
newwebserver = { pkgs, lib, ... }: {
imports = [ commonConfig ];
services.nginx.virtualHosts.localhost = {
root = lib.mkForce (pkgs.runCommand "testdir2" {} ''
mkdir "$out"
echo hello world > "$out/index.html"
'');
};
};
};
testScript = ''
startAll;
testScript = { nodes, ... }: let
newServerSystem = nodes.newwebserver.config.system.build.toplevel;
switch = "${newServerSystem}/bin/switch-to-configuration test";
in ''
my $url = 'http://localhost/index.html';
sub checkEtag {
my $etag = $webserver->succeed(
'curl -v '.$url.' 2>&1 | sed -n -e "s/^< [Ee][Tt][Aa][Gg]: *//p"'
);
$etag =~ s/\r?\n$//;
my $httpCode = $webserver->succeed(
'curl -w "%{http_code}" -X HEAD -H \'If-None-Match: '.$etag.'\' '.$url
);
chomp $httpCode;
die "HTTP code is not 304" unless $httpCode == 304;
return $etag;
}
$webserver->waitForUnit("nginx");
$webserver->waitForOpenPort("80");
subtest "check ETag if serving Nix store paths", sub {
my $oldEtag = checkEtag;
$webserver->succeed('${switch}');
my $newEtag = checkEtag;
die "Old ETag $oldEtag is the same as $newEtag" if $oldEtag eq $newEtag;
};
'';
})

View File

@ -0,0 +1,57 @@
import ../make-test.nix ({ pkgs, ...} : {
name = "wireguard-generated";
meta = with pkgs.stdenv.lib.maintainers; {
maintainers = [ ma27 grahamc ];
};
nodes = {
peer1 = {
networking.firewall.allowedUDPPorts = [ 12345 ];
networking.wireguard.interfaces.wg0 = {
ips = [ "10.10.10.1/24" ];
listenPort = 12345;
privateKeyFile = "/etc/wireguard/private";
generatePrivateKeyFile = true;
};
};
peer2 = {
networking.firewall.allowedUDPPorts = [ 12345 ];
networking.wireguard.interfaces.wg0 = {
ips = [ "10.10.10.2/24" ];
listenPort = 12345;
privateKeyFile = "/etc/wireguard/private";
generatePrivateKeyFile = true;
};
};
};
testScript = ''
startAll;
$peer1->waitForUnit("wireguard-wg0.service");
$peer2->waitForUnit("wireguard-wg0.service");
my ($retcode, $peer1pubkey) = $peer1->execute("wg pubkey < /etc/wireguard/private");
$peer1pubkey =~ s/\s+$//;
if ($retcode != 0) {
die "Could not read public key from peer1";
}
my ($retcode, $peer2pubkey) = $peer2->execute("wg pubkey < /etc/wireguard/private");
$peer2pubkey =~ s/\s+$//;
if ($retcode != 0) {
die "Could not read public key from peer2";
}
$peer1->succeed("wg set wg0 peer $peer2pubkey allowed-ips 10.10.10.2/32 endpoint 192.168.1.2:12345 persistent-keepalive 1");
$peer1->succeed("ip route replace 10.10.10.2/32 dev wg0 table main");
$peer2->succeed("wg set wg0 peer $peer1pubkey allowed-ips 10.10.10.1/32 endpoint 192.168.1.1:12345 persistent-keepalive 1");
$peer2->succeed("ip route replace 10.10.10.1/32 dev wg0 table main");
$peer1->succeed("ping -c1 10.10.10.2");
$peer2->succeed("ping -c1 10.10.10.1");
'';
})

View File

@ -1,8 +1,9 @@
{ stdenv, buildGoPackage, fetchFromGitHub, libobjc, IOKit }:
{ stdenv, buildGoPackage, fetchFromGitHub, libobjc, IOKit, fetchpatch }:
buildGoPackage rec {
name = "go-ethereum-${version}";
version = "1.8.26";
pname = "go-ethereum";
version = "1.8.27";
goPackagePath = "github.com/ethereum/go-ethereum";
# Fix for usb-related segmentation faults on darwin
@ -12,11 +13,22 @@ buildGoPackage rec {
# Fixes Cgo related build failures (see https://github.com/NixOS/nixpkgs/issues/25959 )
hardeningDisable = [ "fortify" ];
# Apply ethereum/go-ethereum#19183 to fix the aarch64 build failure.
#
# TODO Remove this patch when upstream (https://github.com/ethereum/go-ethereum)
# fix this problem in the future release.
patches = [
(fetchpatch {
url = "https://github.com/ethereum/go-ethereum/commit/39bd2609.patch";
sha256 = "1a362hzvcjk505hicv25kziy3c6s5an4j7rk4jibcxwgvygb3mz5";
})
];
src = fetchFromGitHub {
owner = "ethereum";
repo = "go-ethereum";
repo = pname;
rev = "v${version}";
sha256 = "0i7shrwix5j8l5i0ap5pzhninwyk2kvm1pax27pnnjlpam8577i4";
sha256 = "1640y7lqy7bvjjgx6wp0cnbw632ls5fj4ixclr819lfz4p5dfhx1";
};
meta = with stdenv.lib; {

View File

@ -1,6 +1,6 @@
let
version = "2.3.2";
sha256 = "1063n7lkcfkywi0a06pxkw0wkq3qyq4lr53fv584mlbnh2hj8gpm";
cargoSha256 = "1pj5hzy7k1l9bbw1qpz80vvk89qz4qz4rnnkcvn2rkbmq382gxwy";
version = "2.5.0";
sha256 = "1dsckybjg2cvrvcs1bya03xymcm0whfxcb1v0vljn5pghyazgvhx";
cargoSha256 = "0z7dmzpqg0qnkga7r4ykwrvz8ds1k9ik7cx58h2vnmhrhrddvizr";
in
import ./parity.nix { inherit version sha256 cargoSha256; }

View File

@ -1,6 +1,6 @@
let
version = "2.2.9";
sha256 = "0n9zk25ni4asfdqc4xh0gqp2446vxacqz7qcrmsngf8swvayvi16";
cargoSha256 = "10lg0vzikzlj927hpn59x1dz9dvhcaqsl8nz14vj2iz42vfkcm7p";
version = "2.4.5";
sha256 = "02ajwjw6cz86x6zybvw5l0pgv7r370hickjv9ja141w7bhl70q3v";
cargoSha256 = "1n218c43gf200xlb3q03bd6w4kas0jsqx6ciw9s6h7h18wwibvf1";
in
import ./parity.nix { inherit version sha256 cargoSha256; }

View File

@ -22,7 +22,7 @@ rustPlatform.buildRustPackage rec {
meta = with stdenv.lib; {
description = "Polkadot Node Implementation";
homepage = http://polkadot.network;
homepage = https://polkadot.network;
license = licenses.gpl3;
maintainers = [ maintainers.akru ];
platforms = platforms.linux;

View File

@ -55,7 +55,7 @@ stdenv.mkDerivation {
meta = with stdenv.lib; {
description = "ReplayGain for AAC files";
homepage = https://github.com/mulx/aacgain;
homepage = https://aacgain.altosdesign.com;
license = licenses.gpl2;
platforms = platforms.linux;
maintainers = [ maintainers.robbinch ];

View File

@ -1,5 +1,5 @@
{ stdenv, fetchurl
, pkgconfig, intltool, gnome3
, pkgconfig, intltool
, glib, dbus, gtk3, libappindicator-gtk3, gst_all_1
, librsvg, wrapGAppsHook
, pulseaudioSupport ? true, libpulseaudio ? null }:
@ -15,7 +15,7 @@ stdenv.mkDerivation rec {
};
# https://bugs.launchpad.net/audio-recorder/+bug/1784622
NIX_CFLAGS_COMPILE = "-I${gnome3.glib.dev}/include/gio-unix-2.0";
NIX_CFLAGS_COMPILE = "-I${glib.dev}/include/gio-unix-2.0";
nativeBuildInputs = [ pkgconfig intltool wrapGAppsHook ];

View File

@ -2,11 +2,11 @@
pythonPackages.buildPythonApplication rec {
pname = "Mopidy-Iris";
version = "3.33.0";
version = "3.36.0";
src = pythonPackages.fetchPypi {
inherit pname version;
sha256 = "0g00rjkmsnza4gjjdm0cwrpw3gqvmjj58157dvrh7f8k7j0gdvdm";
sha256 = "1qxb3rfjxmwihcm0nrarrgp9x7zr3kjipzn5igj0d57gpi2bdwgv";
};
propagatedBuildInputs = [

View File

@ -1,5 +1,5 @@
{ fetchurl, stdenv, squashfsTools, xorg, alsaLib, makeWrapper, openssl, freetype
, glib, pango, cairo, atk, gdk_pixbuf, gtk2, cups, nspr, nss, libpng
, glib, pango, cairo, atk, gdk_pixbuf, gtk2, cups, nspr, nss, libpng, libnotify
, libgcrypt, systemd, fontconfig, dbus, expat, ffmpeg_3, curl, zlib, gnome3
, at-spi2-atk
}:
@ -36,6 +36,7 @@ let
glib
gtk2
libgcrypt
libnotify
libpng
nss
pango

View File

@ -39,12 +39,11 @@
let
drvName = "android-studio-${channel}-${version}";
archiveFormat = if builtins.elem channel [ "dev" "canary" ] then "tar.gz" else "zip";
androidStudio = stdenv.mkDerivation {
name = drvName;
src = fetchurl {
url = "https://dl.google.com/dl/android/studio/ide-zips/${version}/android-studio-ide-${build}-linux.${archiveFormat}";
url = "https://dl.google.com/dl/android/studio/ide-zips/${version}/android-studio-ide-${build}-linux.tar.gz";
sha256 = sha256Hash;
};

View File

@ -8,19 +8,15 @@ let
inherit (gnome2) GConf gnome_vfs;
};
stableVersion = {
version = "3.3.2.0"; # "Android Studio 3.3.2"
build = "182.5314842";
sha256Hash = "0smh3d3v8n0isxg7fkls20622gp52f58i2b6wa4a0g8wnvmd6mw2";
};
betaVersion = {
version = "3.4.0.17"; # "Android Studio 3.4 RC 3"
build = "183.5400832";
sha256Hash = "1v4apc73jdhavhzj8j46mzh15rw08w1hd9y9ykarj3b5q7i2vyq1";
version = "3.4.0.18"; # "Android Studio 3.4.0"
build = "183.5452501";
sha256Hash = "0i8wz9v6nxzr27a07cv2330i84v94pcl13gjwvpglp55hyzd8axd";
};
betaVersion = stableVersion;
latestVersion = { # canary & dev
version = "3.5.0.10"; # "Android Studio 3.5 Canary 11"
build = "191.5455988";
sha256Hash = "1g24a8fwnrfzdf093wdmqly3mzjddk5ndgi51qj98amn7kclsdpf";
version = "3.5.0.11"; # "Android Studio 3.5 Canary 12"
build = "191.5471097";
sha256Hash = "1dz9iy8f12fzqp8wv9c5v01d33djy97aha8rxxp18vi6myak42ca";
};
in rec {
# Attributes are named by their corresponding release channels

View File

@ -3,14 +3,14 @@
let
versions = {
atom = {
version = "1.34.0";
sha256 = "16hrjymrc43izg7frcrk7cwjwwrclcxzcwb5iw2llzjc6iadzlkb";
version = "1.36.0";
sha256 = "1ljg39h5xjigk2njvxyinb1gd3sbja21v47c7va6vl9hjr5xb3fr";
};
atom-beta = {
version = "1.35.0";
version = "1.37.0";
beta = 0;
sha256 = "0gm5k573dq1hhnyw3719f5k1c6rsz872mhzg8q53n89y0g2r5xmw";
sha256 = "0aq8r5vfgq7r31qajjgcg4n5a57a2m8fvq6fzy9vq5gawkvmaxxx";
};
};

Some files were not shown because too many files have changed in this diff Show More