mirror of
https://github.com/ilyakooo0/nixpkgs.git
synced 2024-12-28 06:14:26 +03:00
Merge branch 'master' into staging-next
Comments on conflicts: - llvm:d6f401e1
vs.469ecc70
- docs for 6 and 7 say the default is to build all targets, so we should be fine - some pypi hashes: they were equivalent, just base16 vs. base32
This commit is contained in:
commit
5effa4e0f9
@ -11,7 +11,10 @@
|
||||
<xi:include href="functions/overrides.xml" />
|
||||
<xi:include href="functions/generators.xml" />
|
||||
<xi:include href="functions/debug.xml" />
|
||||
<xi:include href="functions/fetchers.xml" />
|
||||
<xi:include href="functions/trivial-builders.xml" />
|
||||
<xi:include href="functions/fhs-environments.xml" />
|
||||
<xi:include href="functions/shell.xml" />
|
||||
<xi:include href="functions/dockertools.xml" />
|
||||
<xi:include href="functions/prefer-remote-fetch.xml" />
|
||||
</chapter>
|
||||
|
@ -24,7 +24,7 @@
|
||||
|
||||
<para>
|
||||
This function is analogous to the <command>docker build</command> command,
|
||||
in that can used to build a Docker-compatible repository tarball containing
|
||||
in that it can be used to build a Docker-compatible repository tarball containing
|
||||
a single image with one or multiple layers. As such, the result is suitable
|
||||
for being loaded in Docker with <command>docker load</command>.
|
||||
</para>
|
||||
@ -190,11 +190,11 @@ buildImage {
|
||||
By default <function>buildImage</function> will use a static date of one
|
||||
second past the UNIX Epoch. This allows <function>buildImage</function> to
|
||||
produce binary reproducible images. When listing images with
|
||||
<command>docker list images</command>, the newly created images will be
|
||||
<command>docker images</command>, the newly created images will be
|
||||
listed like this:
|
||||
</para>
|
||||
<screen><![CDATA[
|
||||
$ docker image list
|
||||
$ docker images
|
||||
REPOSITORY TAG IMAGE ID CREATED SIZE
|
||||
hello latest 08c791c7846e 48 years ago 25.2MB
|
||||
]]></screen>
|
||||
@ -217,7 +217,7 @@ pkgs.dockerTools.buildImage {
|
||||
and now the Docker CLI will display a reasonable date and sort the images
|
||||
as expected:
|
||||
<screen><![CDATA[
|
||||
$ docker image list
|
||||
$ docker images
|
||||
REPOSITORY TAG IMAGE ID CREATED SIZE
|
||||
hello latest de2bf4786de6 About a minute ago 25.2MB
|
||||
]]></screen>
|
||||
@ -402,7 +402,7 @@ pkgs.dockerTools.buildLayeredImage {
|
||||
|
||||
<para>
|
||||
This function is analogous to the <command>docker pull</command> command, in
|
||||
that can be used to pull a Docker image from a Docker registry. By default
|
||||
that it can be used to pull a Docker image from a Docker registry. By default
|
||||
<link xlink:href="https://hub.docker.com/">Docker Hub</link> is used to pull
|
||||
images.
|
||||
</para>
|
||||
@ -484,7 +484,7 @@ sha256:20d9485b25ecfd89204e843a962c1bd70e9cc6858d65d7f5fadc340246e2116b
|
||||
|
||||
<para>
|
||||
This function is analogous to the <command>docker export</command> command,
|
||||
in that can used to flatten a Docker image that contains multiple layers. It
|
||||
in that it can be used to flatten a Docker image that contains multiple layers. It
|
||||
is in fact the result of the merge of all the layers of the image. As such,
|
||||
the result is suitable for being imported in Docker with <command>docker
|
||||
import</command>.
|
||||
@ -557,7 +557,7 @@ buildImage {
|
||||
|
||||
<para>
|
||||
Creating base files like <literal>/etc/passwd</literal> or
|
||||
<literal>/etc/login.defs</literal> are necessary for shadow-utils to
|
||||
<literal>/etc/login.defs</literal> is necessary for shadow-utils to
|
||||
manipulate users and groups.
|
||||
</para>
|
||||
</section>
|
||||
|
206
doc/functions/fetchers.xml
Normal file
206
doc/functions/fetchers.xml
Normal file
@ -0,0 +1,206 @@
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xml:id="sec-pkgs-fetchers">
|
||||
<title>Fetcher functions</title>
|
||||
|
||||
<para>
|
||||
When using Nix, you will frequently need to download source code
|
||||
and other files from the internet. Nixpkgs comes with a few helper
|
||||
functions that allow you to fetch fixed-output derivations in a
|
||||
structured way.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The two fetcher primitives are <function>fetchurl</function> and
|
||||
<function>fetchzip</function>. Both of these have two required
|
||||
arguments, a URL and a hash. The hash is typically
|
||||
<literal>sha256</literal>, although many more hash algorithms are
|
||||
supported. Nixpkgs contributors are currently recommended to use
|
||||
<literal>sha256</literal>. This hash will be used by Nix to
|
||||
identify your source. A typical usage of fetchurl is provided
|
||||
below.
|
||||
</para>
|
||||
|
||||
<programlisting><![CDATA[
|
||||
{ stdenv, fetchurl }:
|
||||
|
||||
stdenv.mkDerivation {
|
||||
name = "hello";
|
||||
src = fetchurl {
|
||||
url = "http://www.example.org/hello.tar.gz";
|
||||
sha256 = "1111111111111111111111111111111111111111111111111111";
|
||||
};
|
||||
}
|
||||
]]></programlisting>
|
||||
|
||||
<para>
|
||||
The main difference between <function>fetchurl</function> and
|
||||
<function>fetchzip</function> is in how they store the contents.
|
||||
<function>fetchurl</function> will store the unaltered contents of
|
||||
the URL within the Nix store. <function>fetchzip</function> on the
|
||||
other hand will decompress the archive for you, making files and
|
||||
directories directly accessible in the future.
|
||||
<function>fetchzip</function> can only be used with archives.
|
||||
Despite the name, <function>fetchzip</function> is not limited to
|
||||
.zip files and can also be used with any tarball.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
<function>fetchpatch</function> works very similarly to
|
||||
<function>fetchurl</function> with the same arguments expected. It
|
||||
expects patch files as a source and and performs normalization on
|
||||
them before computing the checksum. For example it will remove
|
||||
comments or other unstable parts that are sometimes added by
|
||||
version control systems and can change over time.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Other fetcher functions allow you to add source code directly from
|
||||
a VCS such as subversion or git. These are mostly straightforward
|
||||
names based on the name of the command used with the VCS system.
|
||||
Because they give you a working repository, they act most like
|
||||
<function>fetchzip</function>.
|
||||
</para>
|
||||
|
||||
<variablelist>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>fetchsvn</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Used with Subversion. Expects <literal>url</literal> to a
|
||||
Subversion directory, <literal>rev</literal>, and
|
||||
<literal>sha256</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>fetchgit</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Used with Git. Expects <literal>url</literal> to a Git repo,
|
||||
<literal>rev</literal>, and <literal>sha256</literal>.
|
||||
<literal>rev</literal> in this case can be full the git commit
|
||||
id (SHA1 hash) or a tag name like
|
||||
<literal>refs/tags/v1.0</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>fetchfossil</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Used with Fossil. Expects <literal>url</literal> to a Fossil
|
||||
archive, <literal>rev</literal>, and <literal>sha256</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>fetchcvs</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Used with CVS. Expects <literal>cvsRoot</literal>,
|
||||
<literal>tag</literal>, and <literal>sha256</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>fetchhg</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Used with Mercurial. Expects <literal>url</literal>,
|
||||
<literal>rev</literal>, and <literal>sha256</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
|
||||
<para>
|
||||
A number of fetcher functions wrap part of
|
||||
<function>fetchurl</function> and <function>fetchzip</function>.
|
||||
They are mainly convenience functions intended for commonly used
|
||||
destinations of source code in Nixpkgs. These wrapper fetchers are
|
||||
listed below.
|
||||
</para>
|
||||
|
||||
<variablelist>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>fetchFromGitHub</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
<function>fetchFromGitHub</function> expects four arguments.
|
||||
<literal>owner</literal> is a string corresponding to the
|
||||
GitHub user or organization that controls this repository.
|
||||
<literal>repo</literal> corresponds to the name of the
|
||||
software repository. These are located at the top of every
|
||||
GitHub HTML page as
|
||||
<literal>owner</literal>/<literal>repo</literal>.
|
||||
<literal>rev</literal> corresponds to the Git commit hash or
|
||||
tag (e.g <literal>v1.0</literal>) that will be downloaded from
|
||||
Git. Finally, <literal>sha256</literal> corresponds to the
|
||||
hash of the extracted directory. Again, other hash algorithms
|
||||
are also available but <literal>sha256</literal> is currently
|
||||
preferred.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>fetchFromGitLab</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This is used with GitLab repositories. The arguments expected
|
||||
are very similar to fetchFromGitHub above.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>fetchFromBitbucket</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This is used with BitBucket repositories. The arguments expected
|
||||
are very similar to fetchFromGitHub above.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>fetchFromSavannah</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This is used with Savannah repositories. The arguments expected
|
||||
are very similar to fetchFromGitHub above.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>fetchFromRepoOrCz</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This is used with repo.or.cz repositories. The arguments
|
||||
expected are very similar to fetchFromGitHub above.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
|
||||
|
||||
</section>
|
27
doc/functions/prefer-remote-fetch.xml
Normal file
27
doc/functions/prefer-remote-fetch.xml
Normal file
@ -0,0 +1,27 @@
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/xinclude"
|
||||
xml:id="sec-prefer-remote-fetch">
|
||||
<title>prefer-remote-fetch overlay</title>
|
||||
|
||||
<para>
|
||||
<function>prefer-remote-fetch</function> is an overlay that download sources
|
||||
on remote builder. This is useful when the evaluating machine has a slow
|
||||
upload while the builder can fetch faster directly from the source.
|
||||
To use it, put the following snippet as a new overlay:
|
||||
<programlisting>
|
||||
self: super:
|
||||
(super.prefer-remote-fetch self super)
|
||||
</programlisting>
|
||||
|
||||
A full configuration example for that sets the overlay up for your own account,
|
||||
could look like this
|
||||
|
||||
<programlisting>
|
||||
$ mkdir ~/.config/nixpkgs/overlays/
|
||||
$ cat > ~/.config/nixpkgs/overlays/prefer-remote-fetch.nix <<EOF
|
||||
self: super: super.prefer-remote-fetch self super
|
||||
EOF
|
||||
</programlisting>
|
||||
</para>
|
||||
</section>
|
124
doc/functions/trivial-builders.xml
Normal file
124
doc/functions/trivial-builders.xml
Normal file
@ -0,0 +1,124 @@
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xml:id="sec-trivial-builders">
|
||||
<title>Trivial builders</title>
|
||||
|
||||
<para>
|
||||
Nixpkgs provides a couple of functions that help with building
|
||||
derivations. The most important one,
|
||||
<function>stdenv.mkDerivation</function>, has already been
|
||||
documented above. The following functions wrap
|
||||
<function>stdenv.mkDerivation</function>, making it easier to use
|
||||
in certain cases.
|
||||
</para>
|
||||
|
||||
<variablelist>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>runCommand</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This takes three arguments, <literal>name</literal>,
|
||||
<literal>env</literal>, and <literal>buildCommand</literal>.
|
||||
<literal>name</literal> is just the name that Nix will append
|
||||
to the store path in the same way that
|
||||
<literal>stdenv.mkDerivation</literal> uses its
|
||||
<literal>name</literal> attribute. <literal>env</literal> is an
|
||||
attribute set specifying environment variables that will be set
|
||||
for this derivation. These attributes are then passed to the
|
||||
wrapped <literal>stdenv.mkDerivation</literal>.
|
||||
<literal>buildCommand</literal> specifies the commands that
|
||||
will be run to create this derivation. Note that you will need
|
||||
to create <literal>$out</literal> for Nix to register the
|
||||
command as successful.
|
||||
</para>
|
||||
<para>
|
||||
An example of using <literal>runCommand</literal> is provided
|
||||
below.
|
||||
</para>
|
||||
<programlisting>
|
||||
(import <nixpkgs> {}).runCommand "my-example" {} ''
|
||||
echo My example command is running
|
||||
|
||||
mkdir $out
|
||||
|
||||
echo I can write data to the Nix store > $out/message
|
||||
|
||||
echo I can also run basic commands like:
|
||||
|
||||
echo ls
|
||||
ls
|
||||
|
||||
echo whoami
|
||||
whoami
|
||||
|
||||
echo date
|
||||
date
|
||||
''
|
||||
</programlisting>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>runCommandCC</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This works just like <literal>runCommand</literal>. The only
|
||||
difference is that it also provides a C compiler in
|
||||
<literal>buildCommand</literal>’s environment. To minimize your
|
||||
dependencies, you should only use this if you are sure you will
|
||||
need a C compiler as part of running your command.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>writeTextFile</literal>, <literal>writeText</literal>,
|
||||
<literal>writeTextDir</literal>, <literal>writeScript</literal>,
|
||||
<literal>writeScriptBin</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
These functions write <literal>text</literal> to the Nix store.
|
||||
This is useful for creating scripts from Nix expressions.
|
||||
<literal>writeTextFile</literal> takes an attribute set and
|
||||
expects two arguments, <literal>name</literal> and
|
||||
<literal>text</literal>. <literal>name</literal> corresponds to
|
||||
the name used in the Nix store path. <literal>text</literal>
|
||||
will be the contents of the file. You can also set
|
||||
<literal>executable</literal> to true to make this file have
|
||||
the executable bit set.
|
||||
</para>
|
||||
<para>
|
||||
Many more commands wrap <literal>writeTextFile</literal>
|
||||
including <literal>writeText</literal>,
|
||||
<literal>writeTextDir</literal>,
|
||||
<literal>writeScript</literal>, and
|
||||
<literal>writeScriptBin</literal>. These are convenience
|
||||
functions over <literal>writeTextFile</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>symlinkJoin</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This can be used to put many derivations into the same directory
|
||||
structure. It works by creating a new derivation and adding
|
||||
symlinks to each of the paths listed. It expects two arguments,
|
||||
<literal>name</literal>, and <literal>paths</literal>.
|
||||
<literal>name</literal> is the name used in the Nix store path
|
||||
for the created derivation. <literal>paths</literal> is a list of
|
||||
paths that will be symlinked. These paths can be to Nix store
|
||||
derivations or any other subdirectory contained within.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
|
||||
</section>
|
141
doc/stdenv.xml
141
doc/stdenv.xml
@ -1279,7 +1279,9 @@ makeFlags = [ "PREFIX=$(out)" ];
|
||||
<command>make</command>. You must use this instead of
|
||||
<varname>makeFlags</varname> if the arguments contain spaces, e.g.
|
||||
<programlisting>
|
||||
makeFlagsArray=(CFLAGS="-O0 -g" LDFLAGS="-lfoo -lbar")
|
||||
preBuild = ''
|
||||
makeFlagsArray+=(CFLAGS="-O0 -g" LDFLAGS="-lfoo -lbar")
|
||||
'';
|
||||
</programlisting>
|
||||
Note that shell arrays cannot be passed through environment variables,
|
||||
so you cannot set <varname>makeFlagsArray</varname> in a derivation
|
||||
@ -2202,10 +2204,130 @@ addEnvHooks "$hostOffset" myBashFunction
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Here are some packages that provide a setup hook. Since the mechanism is
|
||||
modular, this probably isn't an exhaustive list. Then again, since the
|
||||
mechanism is only to be used as a last resort, it might be.
|
||||
<variablelist>
|
||||
First, let’s cover some setup hooks that are part of Nixpkgs
|
||||
default stdenv. This means that they are run for every package
|
||||
built using <function>stdenv.mkDerivation</function>. Some of
|
||||
these are platform specific, so they may run on Linux but not
|
||||
Darwin or vice-versa.
|
||||
<variablelist>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>move-docs.sh</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This setup hook moves any installed documentation to the
|
||||
<literal>/share</literal> subdirectory directory. This includes
|
||||
the man, doc and info directories. This is needed for legacy
|
||||
programs that do not know how to use the
|
||||
<literal>share</literal> subdirectory.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>compress-man-pages.sh</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This setup hook compresses any man pages that have been
|
||||
installed. The compression is done using the gzip program. This
|
||||
helps to reduce the installed size of packages.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>strip.sh</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This runs the strip command on installed binaries and
|
||||
libraries. This removes unnecessary information like debug
|
||||
symbols when they are not needed. This also helps to reduce the
|
||||
installed size of packages.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>patch-shebangs.sh</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This setup hook patches installed scripts to use the full path
|
||||
to the shebang interpreter. A shebang interpreter is the first
|
||||
commented line of a script telling the operating system which
|
||||
program will run the script (e.g <literal>#!/bin/bash</literal>). In
|
||||
Nix, we want an exact path to that interpreter to be used. This
|
||||
often replaces <literal>/bin/sh</literal> with a path in the
|
||||
Nix store.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>audit-tmpdir.sh</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This verifies that no references are left from the install
|
||||
binaries to the directory used to build those binaries. This
|
||||
ensures that the binaries do not need things outside the Nix
|
||||
store. This is currently supported in Linux only.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>multiple-outputs.sh</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This setup hook adds configure flags that tell packages to
|
||||
install files into any one of the proper outputs listed in
|
||||
<literal>outputs</literal>. This behavior can be turned off by setting
|
||||
<literal>setOutputFlags</literal> to false in the derivation
|
||||
environment. See <xref linkend="chap-multiple-output"/> for
|
||||
more information.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>move-sbin.sh</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This setup hook moves any binaries installed in the sbin
|
||||
subdirectory into bin. In addition, a link is provided from
|
||||
sbin to bin for compatibility.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>move-lib64.sh</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This setup hook moves any libraries installed in the lib64
|
||||
subdirectory into lib. In addition, a link is provided from
|
||||
lib64 to lib for compatibility.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<literal>set-source-date-epoch-to-latest.sh</literal>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This sets <literal>SOURCE_DATE_EPOCH</literal> to the
|
||||
modification time of the most recent file.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
Bintools Wrapper
|
||||
@ -2312,6 +2434,15 @@ addEnvHooks "$hostOffset" myBashFunction
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Here are some more packages that provide a setup hook. Since the
|
||||
list of hooks is extensible, this is not an exhaustive list the
|
||||
mechanism is only to be used as a last resort, it might cover most
|
||||
uses.
|
||||
<variablelist>
|
||||
<varlistentry>
|
||||
<term>
|
||||
Perl
|
||||
|
@ -1,57 +1,21 @@
|
||||
{ lib
|
||||
# we pass the kernel version here to keep a nice syntax `whenOlder "4.13"`
|
||||
# kernelVersion, e.g., config.boot.kernelPackages.version
|
||||
, version
|
||||
, mkValuePreprocess ? null
|
||||
}:
|
||||
{ lib, version }:
|
||||
|
||||
with lib;
|
||||
rec {
|
||||
# Common patterns
|
||||
when = cond: opt: if cond then opt else null;
|
||||
whenAtLeast = ver: when (versionAtLeast version ver);
|
||||
whenOlder = ver: when (versionOlder version ver);
|
||||
whenBetween = verLow: verHigh: when (versionAtLeast version verLow && versionOlder version verHigh);
|
||||
# Common patterns/legacy
|
||||
whenAtLeast = ver: mkIf (versionAtLeast version ver);
|
||||
whenOlder = ver: mkIf (versionOlder version ver);
|
||||
# range is (inclusive, exclusive)
|
||||
whenBetween = verLow: verHigh: mkIf (versionAtLeast version verLow && versionOlder version verHigh);
|
||||
|
||||
|
||||
# Keeping these around in case we decide to change this horrible implementation :)
|
||||
option = x: if x == null then null else "?${x}";
|
||||
yes = "y";
|
||||
no = "n";
|
||||
module = "m";
|
||||
option = x:
|
||||
x // { optional = true; };
|
||||
|
||||
mkValue = val:
|
||||
let
|
||||
isNumber = c: elem c ["0" "1" "2" "3" "4" "5" "6" "7" "8" "9"];
|
||||
in
|
||||
if val == "" then "\"\""
|
||||
else if val == yes || val == module || val == no then val
|
||||
else if all isNumber (stringToCharacters val) then val
|
||||
else if substring 0 2 val == "0x" then val
|
||||
else val; # FIXME: fix quoting one day
|
||||
yes = { tristate = "y"; };
|
||||
no = { tristate = "n"; };
|
||||
module = { tristate = "m"; };
|
||||
freeform = x: { freeform = x; };
|
||||
|
||||
|
||||
# generate nix intermediate kernel config file of the form
|
||||
#
|
||||
# VIRTIO_MMIO m
|
||||
# VIRTIO_BLK y
|
||||
# VIRTIO_CONSOLE n
|
||||
# NET_9P_VIRTIO? y
|
||||
#
|
||||
# Use mkValuePreprocess to preprocess option values, aka mark 'modules' as
|
||||
# 'yes' or vice-versa
|
||||
# Borrowed from copumpkin https://github.com/NixOS/nixpkgs/pull/12158
|
||||
# returns a string, expr should be an attribute set
|
||||
generateNixKConf = exprs: mkValuePreprocess:
|
||||
let
|
||||
mkConfigLine = key: rawval:
|
||||
let
|
||||
val = if builtins.isFunction mkValuePreprocess then mkValuePreprocess rawval else rawval;
|
||||
in
|
||||
if val == null
|
||||
then ""
|
||||
else if hasPrefix "?" val
|
||||
then "${key}? ${mkValue (removePrefix "?" val)}\n"
|
||||
else "${key} ${mkValue val}\n";
|
||||
mkConf = cfg: concatStrings (mapAttrsToList mkConfigLine cfg);
|
||||
in mkConf exprs;
|
||||
}
|
||||
|
@ -214,23 +214,25 @@ rec {
|
||||
qux = [ "module.hidden=baz,value=bar" "module.hidden=fli,value=gne" ];
|
||||
}
|
||||
*/
|
||||
byName = attr: f: modules: foldl' (acc: module:
|
||||
foldl' (inner: name:
|
||||
inner // { ${name} = (acc.${name} or []) ++ (f module module.${attr}.${name}); }
|
||||
) acc (attrNames module.${attr})
|
||||
) {} modules;
|
||||
byName = attr: f: modules:
|
||||
foldl' (acc: module:
|
||||
acc // (mapAttrs (n: v:
|
||||
(acc.${n} or []) ++ f module v
|
||||
) module.${attr}
|
||||
)
|
||||
) {} modules;
|
||||
# an attrset 'name' => list of submodules that declare ‘name’.
|
||||
declsByName = byName "options"
|
||||
(module: option: [{ inherit (module) file; options = option; }])
|
||||
options;
|
||||
declsByName = byName "options" (module: option:
|
||||
[{ inherit (module) file; options = option; }]
|
||||
) options;
|
||||
# an attrset 'name' => list of submodules that define ‘name’.
|
||||
defnsByName = byName "config" (module: value:
|
||||
map (config: { inherit (module) file; inherit config; }) (pushDownProperties value)
|
||||
map (config: { inherit (module) file; inherit config; }) (pushDownProperties value)
|
||||
) configs;
|
||||
# extract the definitions for each loc
|
||||
defnsByName' = byName "config"
|
||||
(module: value: [{ inherit (module) file; inherit value; }])
|
||||
configs;
|
||||
defnsByName' = byName "config" (module: value:
|
||||
[{ inherit (module) file; inherit value; }]
|
||||
) configs;
|
||||
in
|
||||
(flip mapAttrs declsByName (name: decls:
|
||||
# We're descending into attribute ‘name’.
|
||||
@ -362,7 +364,6 @@ rec {
|
||||
values = defs''';
|
||||
inherit (defs'') highestPrio;
|
||||
};
|
||||
|
||||
defsFinal = defsFinal'.values;
|
||||
|
||||
# Type-check the remaining definitions, and merge them.
|
||||
@ -475,22 +476,8 @@ rec {
|
||||
optionSet to options of type submodule. FIXME: remove
|
||||
eventually. */
|
||||
fixupOptionType = loc: opt:
|
||||
let
|
||||
options = opt.options or
|
||||
(throw "Option `${showOption loc'}' has type optionSet but has no option attribute, in ${showFiles opt.declarations}.");
|
||||
f = tp:
|
||||
let optionSetIn = type: (tp.name == type) && (tp.functor.wrapped.name == "optionSet");
|
||||
in
|
||||
if tp.name == "option set" || tp.name == "submodule" then
|
||||
throw "The option ${showOption loc} uses submodules without a wrapping type, in ${showFiles opt.declarations}."
|
||||
else if optionSetIn "attrsOf" then types.attrsOf (types.submodule options)
|
||||
else if optionSetIn "loaOf" then types.loaOf (types.submodule options)
|
||||
else if optionSetIn "listOf" then types.listOf (types.submodule options)
|
||||
else if optionSetIn "nullOr" then types.nullOr (types.submodule options)
|
||||
else tp;
|
||||
in
|
||||
if opt.type.getSubModules or null == null
|
||||
then opt // { type = f (opt.type or types.unspecified); }
|
||||
if opt.type.getSubModules or null == null
|
||||
then opt // { type = opt.type or types.unspecified; }
|
||||
else opt // { type = opt.type.substSubModules opt.options; options = []; };
|
||||
|
||||
|
||||
|
@ -48,8 +48,6 @@ rec {
|
||||
visible ? null,
|
||||
# Whether the option can be set only once
|
||||
readOnly ? null,
|
||||
# Obsolete, used by types.optionSet.
|
||||
options ? null
|
||||
} @ attrs:
|
||||
attrs // { _type = "option"; };
|
||||
|
||||
|
@ -58,6 +58,7 @@ rec {
|
||||
"netbsd" = "NetBSD";
|
||||
"freebsd" = "FreeBSD";
|
||||
"openbsd" = "OpenBSD";
|
||||
"wasm" = "Wasm";
|
||||
}.${final.parsed.kernel.name} or null;
|
||||
|
||||
# uname -p
|
||||
|
@ -284,8 +284,7 @@ rec {
|
||||
(mergeDefinitions (loc ++ [name]) elemType defs).optionalValue
|
||||
)
|
||||
# Push down position info.
|
||||
(map (def: listToAttrs (mapAttrsToList (n: def':
|
||||
{ name = n; value = { inherit (def) file; value = def'; }; }) def.value)) defs)));
|
||||
(map (def: mapAttrs (n: v: { inherit (def) file; value = v; }) def.value) defs)));
|
||||
getSubOptions = prefix: elemType.getSubOptions (prefix ++ ["<name>"]);
|
||||
getSubModules = elemType.getSubModules;
|
||||
substSubModules = m: attrsOf (elemType.substSubModules m);
|
||||
@ -470,10 +469,7 @@ rec {
|
||||
# Obsolete alternative to configOf. It takes its option
|
||||
# declarations from the ‘options’ attribute of containing option
|
||||
# declaration.
|
||||
optionSet = mkOptionType {
|
||||
name = builtins.trace "types.optionSet is deprecated; use types.submodule instead" "optionSet";
|
||||
description = "option set";
|
||||
};
|
||||
optionSet = builtins.throw "types.optionSet is deprecated; use types.submodule instead" "optionSet";
|
||||
|
||||
# Augment the given type with an additional type check function.
|
||||
addCheck = elemType: check: elemType // { check = x: elemType.check x && check x; };
|
||||
|
@ -1,21 +1,41 @@
|
||||
/* List of NixOS maintainers.
|
||||
|
||||
handle = {
|
||||
name = "Real name";
|
||||
# Required
|
||||
name = "Your name";
|
||||
email = "address@example.org";
|
||||
|
||||
# Optional
|
||||
github = "GithubUsername";
|
||||
keys = [{
|
||||
longkeyid = "rsa2048/0x0123456789ABCDEF";
|
||||
fingerprint = "AAAA BBBB CCCC DDDD EEEE FFFF 0000 1111 2222 3333";
|
||||
}];
|
||||
};
|
||||
|
||||
where `name` is your real name, `email` is your maintainer email
|
||||
address and `github` is your GitHub handle (as it appears in the
|
||||
URL of your profile page, `https://github.com/<userhandle>`).
|
||||
address
|
||||
The only required fields are `name` and `email`.
|
||||
where
|
||||
|
||||
- `handle` is the handle you are going to use in nixpkgs expressions,
|
||||
- `name` is your, preferably real, name,
|
||||
- `email` is your maintainer email address, and
|
||||
- `github` is your GitHub handle (as it appears in the URL of your profile page, `https://github.com/<userhandle>`),
|
||||
- `keys` is a list of your PGP/GPG key IDs and fingerprints.
|
||||
|
||||
`handle == github` is strongly preferred whenever `github` is an acceptable attribute name and is short and convenient.
|
||||
|
||||
Add PGP/GPG keys only if you actually use them to sign commits and/or mail.
|
||||
|
||||
To get the required PGP/GPG values for a key run
|
||||
```shell
|
||||
gpg --keyid-format 0xlong --fingerprint <email> | head -n 2
|
||||
```
|
||||
|
||||
!!! Note that PGP/GPG values stored here are for informational purposes only, don't use this file as a source of truth.
|
||||
|
||||
More fields may be added in the future.
|
||||
|
||||
Please keep the list alphabetically sorted.
|
||||
See `../maintainers/scripts/check-maintainer-github-handles.sh`
|
||||
for an example on how to work with this data.
|
||||
See `./scripts/check-maintainer-github-handles.sh` for an example on how to work with this data.
|
||||
*/
|
||||
{
|
||||
"1000101" = {
|
||||
@ -872,6 +892,11 @@
|
||||
github = "cko";
|
||||
name = "Christine Koppelt";
|
||||
};
|
||||
clacke = {
|
||||
email = "claes.wallin@greatsinodevelopment.com";
|
||||
github = "clacke";
|
||||
name = "Claes Wallin";
|
||||
};
|
||||
cleverca22 = {
|
||||
email = "cleverca22@gmail.com";
|
||||
github = "cleverca22";
|
||||
@ -1407,6 +1432,10 @@
|
||||
email = "justin.humm@posteo.de";
|
||||
github = "erictapen";
|
||||
name = "Justin Humm";
|
||||
keys = [{
|
||||
longkeyid = "rsa4096/0x438871E000AA178E";
|
||||
fingerprint = "984E 4BAD 9127 4D0E AE47 FF03 4388 71E0 00AA 178E";
|
||||
}];
|
||||
};
|
||||
erikryb = {
|
||||
email = "erik.rybakken@math.ntnu.no";
|
||||
@ -1436,6 +1465,10 @@
|
||||
email = "elis@hirwing.se";
|
||||
github = "etu";
|
||||
name = "Elis Hirwing";
|
||||
keys = [{
|
||||
longkeyid = "rsa4096/0xD57EFA625C9A925F";
|
||||
fingerprint = "67FE 98F2 8C44 CF22 1828 E12F D57E FA62 5C9A 925F";
|
||||
}];
|
||||
};
|
||||
evck = {
|
||||
email = "eric@evenchick.com";
|
||||
@ -1740,6 +1773,11 @@
|
||||
github = "dguibert";
|
||||
name = "David Guibert";
|
||||
};
|
||||
groodt = {
|
||||
email = "groodt@gmail.com";
|
||||
github = "groodt";
|
||||
name = "Greg Roodt";
|
||||
};
|
||||
guibou = {
|
||||
email = "guillaum.bouchard@gmail.com";
|
||||
github = "guibou";
|
||||
@ -1929,6 +1967,11 @@
|
||||
github = "infinisil";
|
||||
name = "Silvan Mosberger";
|
||||
};
|
||||
ingenieroariel = {
|
||||
email = "ariel@nunez.co";
|
||||
github = "ingenieroariel";
|
||||
name = "Ariel Nunez";
|
||||
};
|
||||
ironpinguin = {
|
||||
email = "michele@catalano.de";
|
||||
github = "ironpinguin";
|
||||
@ -1943,6 +1986,15 @@
|
||||
email = "tkatchev@gmail.com";
|
||||
name = "Ivan Tkatchev";
|
||||
};
|
||||
ivegotasthma = {
|
||||
email = "ivegotasthma@protonmail.com";
|
||||
github = "ivegotasthma";
|
||||
name = "John Doe";
|
||||
keys = [{
|
||||
longkeyid = "rsa4096/09AC52AEA87817A4";
|
||||
fingerprint = "4008 2A5B 56A4 79B9 83CB 95FD 09AC 52AE A878 17A4";
|
||||
}];
|
||||
};
|
||||
ixmatus = {
|
||||
email = "parnell@digitalmentat.com";
|
||||
github = "ixmatus";
|
||||
@ -2848,6 +2900,11 @@
|
||||
email = "joerg@thalheim.io";
|
||||
github = "mic92";
|
||||
name = "Jörg Thalheim";
|
||||
keys = [{
|
||||
# compare with https://keybase.io/Mic92
|
||||
longkeyid = "rsa4096/0x003F2096411B5F92";
|
||||
fingerprint = "3DEE 1C55 6E1C 3DC5 54F5 875A 003F 2096 411B 5F92";
|
||||
}];
|
||||
};
|
||||
michaelpj = {
|
||||
email = "michaelpj@gmail.com";
|
||||
@ -3255,6 +3312,11 @@
|
||||
github = "nyarly";
|
||||
name = "Judson Lester";
|
||||
};
|
||||
nzhang-zh = {
|
||||
email = "n.zhang.hp.au@gmail.com";
|
||||
github = "nzhang-zh";
|
||||
name = "Ning Zhang";
|
||||
};
|
||||
obadz = {
|
||||
email = "obadz-nixos@obadz.com";
|
||||
github = "obadz";
|
||||
@ -3333,6 +3395,10 @@
|
||||
email = "oxij@oxij.org";
|
||||
github = "oxij";
|
||||
name = "Jan Malakhovski";
|
||||
keys = [{
|
||||
longkeyid = "rsa2048/0x0E6CA66E5C557AA8";
|
||||
fingerprint = "514B B966 B46E 3565 0508 86E8 0E6C A66E 5C55 7AA8";
|
||||
}];
|
||||
};
|
||||
oyren = {
|
||||
email = "m.scheuren@oyra.eu";
|
||||
@ -3557,6 +3623,14 @@
|
||||
email = "dev.primeos@gmail.com";
|
||||
github = "primeos";
|
||||
name = "Michael Weiss";
|
||||
keys = [
|
||||
{ longkeyid = "ed25519/0x130826A6C2A389FD"; # Git only
|
||||
fingerprint = "86A7 4A55 07D0 58D1 322E 37FD 1308 26A6 C2A3 89FD";
|
||||
}
|
||||
{ longkeyid = "rsa3072/0xBCA9943DD1DF4C04"; # Email, etc.
|
||||
fingerprint = "AF85 991C C950 49A2 4205 1933 BCA9 943D D1DF 4C04";
|
||||
}
|
||||
];
|
||||
};
|
||||
Profpatsch = {
|
||||
email = "mail@profpatsch.de";
|
||||
@ -3597,6 +3671,11 @@
|
||||
github = "PsyanticY";
|
||||
name = "Psyanticy";
|
||||
};
|
||||
ptival = {
|
||||
email = "valentin.robert.42@gmail.com";
|
||||
github = "Ptival";
|
||||
name = "Valentin Robert";
|
||||
};
|
||||
puffnfresh = {
|
||||
email = "brian@brianmckenna.org";
|
||||
github = "puffnfresh";
|
||||
@ -3625,6 +3704,10 @@
|
||||
email = "hi@alyssa.is";
|
||||
github = "alyssais";
|
||||
name = "Alyssa Ross";
|
||||
keys = [{
|
||||
longkeyid = "rsa4096/736CCDF9EF51BD97";
|
||||
fingerprint = "7573 56D7 79BB B888 773E 415E 736C CDF9 EF51 BD97";
|
||||
}];
|
||||
};
|
||||
ragge = {
|
||||
email = "r.dahlen@gmail.com";
|
||||
@ -4140,6 +4223,10 @@
|
||||
email = "sebastien.maret@icloud.com";
|
||||
github = "smaret";
|
||||
name = "Sébastien Maret";
|
||||
keys = [{
|
||||
longkeyid = "rsa4096/0x86E30E5A0F5FC59C";
|
||||
fingerprint = "4242 834C D401 86EF 8281 4093 86E3 0E5A 0F5F C59C";
|
||||
}];
|
||||
};
|
||||
smironov = {
|
||||
email = "grrwlf@gmail.com";
|
||||
@ -4326,6 +4413,15 @@
|
||||
github = "t184256";
|
||||
name = "Alexander Sosedkin";
|
||||
};
|
||||
tadeokondrak = {
|
||||
email = "me@tadeo.ca";
|
||||
github = "tadeokondrak";
|
||||
name = "Tadeo Kondrak";
|
||||
keys = [{
|
||||
longkeyid = "ed25519/0xFBE607FCC49516D3";
|
||||
fingerprint = "0F2B C0C7 E77C 5B42 AC5B 4C18 FBE6 07FC C495 16D3";
|
||||
}];
|
||||
};
|
||||
tadfisher = {
|
||||
email = "tadfisher@gmail.com";
|
||||
github = "tadfisher";
|
||||
@ -4831,11 +4927,6 @@
|
||||
github = "wjlroe";
|
||||
name = "William Roe";
|
||||
};
|
||||
wkennington = {
|
||||
email = "william@wkennington.com";
|
||||
github = "wkennington";
|
||||
name = "William A. Kennington III";
|
||||
};
|
||||
wmertens = {
|
||||
email = "Wout.Mertens@gmail.com";
|
||||
github = "wmertens";
|
||||
|
@ -1,5 +1,5 @@
|
||||
#! /usr/bin/env nix-shell
|
||||
#! nix-shell -i perl -p perl perlPackages.NetAmazonS3 perlPackages.FileSlurp nixUnstable nixUnstable.perl-bindings
|
||||
#! nix-shell -i perl -p perl perlPackages.NetAmazonS3 perlPackages.FileSlurp perlPackages.JSON perlPackages.LWPProtocolHttps nixUnstable nixUnstable.perl-bindings
|
||||
|
||||
# This command uploads tarballs to tarballs.nixos.org, the
|
||||
# content-addressed cache used by fetchurl as a fallback for when
|
||||
@ -101,8 +101,8 @@ sub uploadFile {
|
||||
my ($name, $dest) = @_;
|
||||
#print STDERR "linking $name to $dest...\n";
|
||||
$bucket->add_key($name, "", {
|
||||
'x-amz-website-redirect-location' => "/" . $dest,
|
||||
'x-amz-acl' => "public-read"
|
||||
'x-amz-website-redirect-location' => "/" . $dest,
|
||||
'x-amz-acl' => "public-read"
|
||||
})
|
||||
or die "failed to create redirect from $name to $dest\n";
|
||||
$cache{$name} = 1;
|
||||
@ -116,8 +116,8 @@ sub uploadFile {
|
||||
# Upload the file as sha512/<hash-in-base-16>.
|
||||
print STDERR "uploading $fn to $mainKey...\n";
|
||||
$bucket->add_key_filename($mainKey, $fn, {
|
||||
'x-amz-meta-original-name' => $name,
|
||||
'x-amz-acl' => "public-read"
|
||||
'x-amz-meta-original-name' => $name,
|
||||
'x-amz-acl' => "public-read"
|
||||
})
|
||||
or die "failed to upload $fn to $mainKey\n";
|
||||
$cache{$mainKey} = 1;
|
||||
|
@ -4,4 +4,8 @@ if [[ -z "$VERBOSE" ]]; then
|
||||
echo "You may set VERBOSE=1 to see debug output or to any other non-empty string to make this script completely silent"
|
||||
fi
|
||||
unset HOME NIXPKGS_CONFIG # Force empty config
|
||||
|
||||
# With the default heap size (380MB), nix-instantiate fails:
|
||||
# Too many heap sections: Increase MAXHINCR or MAX_HEAP_SECTS
|
||||
export GC_INITIAL_HEAP_SIZE=${GC_INITIAL_HEAP_SIZE:-2000000000} # 2GB
|
||||
nix-instantiate --strict --eval-only --xml --show-trace "$(dirname "$0")"/eval-release.nix 2>&1 > /dev/null
|
@ -31,7 +31,7 @@ let
|
||||
if !canEval x then []
|
||||
else if isDerivation x then optional (canEval x.drvPath) x
|
||||
else if isList x then concatLists (map derivationsIn' x)
|
||||
else if isAttrs x then concatLists (mapAttrsToList (n: v: derivationsIn' v) x)
|
||||
else if isAttrs x then concatLists (mapAttrsToList (n: v: addErrorContext "while finding tarballs in '${n}':" (derivationsIn' v)) x)
|
||||
else [ ];
|
||||
|
||||
keyDrv = drv: if canEval drv.drvPath then { key = drv.drvPath; value = drv; } else { };
|
||||
|
@ -23,7 +23,7 @@ $ diskutil list
|
||||
[..]
|
||||
$ diskutil unmountDisk diskN
|
||||
Unmount of all volumes on diskN was successful
|
||||
$ sudo dd bs=1000000 if=nix.iso of=/dev/rdiskN
|
||||
$ sudo dd if=nix.iso of=/dev/rdiskN
|
||||
</programlisting>
|
||||
Using the 'raw' <command>rdiskN</command> device instead of
|
||||
<command>diskN</command> completes in minutes instead of hours. After
|
||||
|
@ -331,17 +331,29 @@
|
||||
<para>
|
||||
The <literal>pam_unix</literal> account module is now loaded with its
|
||||
control field set to <literal>required</literal> instead of
|
||||
<literal>sufficient</literal>, so that later pam account modules that
|
||||
<literal>sufficient</literal>, so that later PAM account modules that
|
||||
might do more extensive checks are being executed.
|
||||
Previously, the whole account module verification was exited prematurely
|
||||
in case a nss module provided the account name to
|
||||
<literal>pam_unix</literal>.
|
||||
The LDAP and SSSD NixOS modules already add their NSS modules when
|
||||
enabled. In case your setup breaks due to some later pam account module
|
||||
enabled. In case your setup breaks due to some later PAM account module
|
||||
previosuly shadowed, or failing NSS lookups, please file a bug. You can
|
||||
get back the old behaviour by manually setting
|
||||
<literal><![CDATA[security.pam.services.<name?>.text]]></literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>pam_unix</literal> password module is now loaded with its
|
||||
control field set to <literal>sufficient</literal> instead of
|
||||
<literal>required</literal>, so that password managed only
|
||||
by later PAM password modules are being executed.
|
||||
Previously, for example, changing an LDAP account's password through PAM
|
||||
was not possible: the whole password module verification
|
||||
was exited prematurely by <literal>pam_unix</literal>,
|
||||
preventing <literal>pam_ldap</literal> to manage the password as it should.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
@ -350,6 +362,22 @@
|
||||
See the <literal>fish</literal> <link xlink:href="https://github.com/fish-shell/fish-shell/releases/tag/3.0.0">release notes</link> for more information.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The ibus-table input method has had a change in config format, which
|
||||
causes all previous settings to be lost. See
|
||||
<link xlink:href="https://github.com/mike-fabian/ibus-table/commit/f9195f877c5212fef0dfa446acb328c45ba5852b">this commit message</link>
|
||||
for details.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Support for NixOS module system type <literal>types.optionSet</literal> and
|
||||
<literal>lib.mkOption</literal> argument <literal>options</literal> is removed.
|
||||
Use <literal>types.submodule</literal> instead.
|
||||
(<link xlink:href="https://github.com/NixOS/nixpkgs/pull/54637">#54637</link>)
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
|
||||
@ -402,12 +430,47 @@
|
||||
of maintainers.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The httpd service now saves log files with a .log file extension by default for
|
||||
easier integration with the logrotate service.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The owncloud server packages and httpd subservice module were removed
|
||||
from nixpkgs due to the lack of maintainers.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
It is possible now to uze ZRAM devices as general purpose ephemeral block devices,
|
||||
not only as swap. Using more than 1 device as ZRAM swap is no longer recommended,
|
||||
but is still possible by setting <literal>zramSwap.swapDevices</literal> explicitly.
|
||||
</para>
|
||||
<para>
|
||||
Default algorithm for ZRAM swap was changed to <literal>zstd</literal>.
|
||||
</para>
|
||||
<para>
|
||||
Changes to ZRAM algorithm are applied during <literal>nixos-rebuild switch</literal>,
|
||||
so make sure you have enough swap space on disk to survive ZRAM device rebuild. Alternatively,
|
||||
use <literal>nixos-rebuild boot; reboot</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Flat volumes are now disabled by default in <literal>hardware.pulseaudio</literal>.
|
||||
This has been done to prevent applications, which are unaware of this feature, setting
|
||||
their volumes to 100% on startup causing harm to your audio hardware and potentially your ears.
|
||||
</para>
|
||||
<note>
|
||||
<para>
|
||||
With this change application specific volumes are relative to the master volume which can be
|
||||
adjusted independently, whereas before they were absolute; meaning that in effect, it scaled the
|
||||
device-volume with the volume of the loudest application.
|
||||
</para>
|
||||
</note>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
</section>
|
||||
|
@ -27,6 +27,9 @@
|
||||
, # The root file system type.
|
||||
fsType ? "ext4"
|
||||
|
||||
, # Filesystem label
|
||||
label ? "nixos"
|
||||
|
||||
, # The initial NixOS configuration file to be copied to
|
||||
# /etc/nixos/configuration.nix.
|
||||
configFile ? null
|
||||
@ -134,9 +137,9 @@ let format' = format; in let
|
||||
# Get start & length of the root partition in sectors to $START and $SECTORS.
|
||||
eval $(partx $diskImage -o START,SECTORS --nr ${rootPartition} --pairs)
|
||||
|
||||
mkfs.${fsType} -F -L nixos $diskImage -E offset=$(sectorsToBytes $START) $(sectorsToKilobytes $SECTORS)K
|
||||
mkfs.${fsType} -F -L ${label} $diskImage -E offset=$(sectorsToBytes $START) $(sectorsToKilobytes $SECTORS)K
|
||||
'' else ''
|
||||
mkfs.${fsType} -F -L nixos $diskImage
|
||||
mkfs.${fsType} -F -L ${label} $diskImage
|
||||
''}
|
||||
|
||||
root="$PWD/root"
|
||||
|
@ -38,6 +38,8 @@ let
|
||||
bind_timelimit ${toString cfg.bind.timeLimit}
|
||||
${optionalString (cfg.bind.distinguishedName != "")
|
||||
"binddn ${cfg.bind.distinguishedName}" }
|
||||
${optionalString (cfg.daemon.rootpwmoddn != "")
|
||||
"rootpwmoddn ${cfg.daemon.rootpwmoddn}" }
|
||||
${optionalString (cfg.daemon.extraConfig != "") cfg.daemon.extraConfig }
|
||||
'';
|
||||
};
|
||||
@ -126,6 +128,26 @@ in
|
||||
the end of the nslcd configuration file (nslcd.conf).
|
||||
'' ;
|
||||
} ;
|
||||
|
||||
rootpwmoddn = mkOption {
|
||||
default = "";
|
||||
example = "cn=admin,dc=example,dc=com";
|
||||
type = types.str;
|
||||
description = ''
|
||||
The distinguished name to use to bind to the LDAP server
|
||||
when the root user tries to modify a user's password.
|
||||
'';
|
||||
};
|
||||
|
||||
rootpwmodpw = mkOption {
|
||||
default = "";
|
||||
example = "/run/keys/nslcd.rootpwmodpw";
|
||||
type = types.str;
|
||||
description = ''
|
||||
The path to a file containing the credentials with which
|
||||
to bind to the LDAP server if the root user tries to change a user's password
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
bind = {
|
||||
@ -203,9 +225,11 @@ in
|
||||
system.activationScripts = mkIf insertLdapPassword {
|
||||
ldap = stringAfter [ "etc" "groups" "users" ] ''
|
||||
if test -f "${cfg.bind.password}" ; then
|
||||
echo "bindpw "$(cat ${cfg.bind.password})"" | cat ${ldapConfig.source} - > /etc/ldap.conf.bindpw
|
||||
mv -fT /etc/ldap.conf.bindpw /etc/ldap.conf
|
||||
chmod 600 /etc/ldap.conf
|
||||
umask 0077
|
||||
conf="$(mktemp)"
|
||||
printf 'bindpw %s\n' "$(cat ${cfg.bind.password})" |
|
||||
cat ${ldapConfig.source} - >"$conf"
|
||||
mv -fT "$conf" /etc/ldap.conf
|
||||
fi
|
||||
'';
|
||||
};
|
||||
@ -232,21 +256,31 @@ in
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
preStart = ''
|
||||
mkdir -p /run/nslcd
|
||||
rm -f /run/nslcd/nslcd.pid;
|
||||
chown nslcd.nslcd /run/nslcd
|
||||
${optionalString (cfg.bind.distinguishedName != "") ''
|
||||
if test -s "${cfg.bind.password}" ; then
|
||||
ln -sfT "${cfg.bind.password}" /run/nslcd/bindpw
|
||||
fi
|
||||
''}
|
||||
umask 0077
|
||||
conf="$(mktemp)"
|
||||
{
|
||||
cat ${nslcdConfig.source}
|
||||
test -z '${cfg.bind.distinguishedName}' -o ! -f '${cfg.bind.password}' ||
|
||||
printf 'bindpw %s\n' "$(cat '${cfg.bind.password}')"
|
||||
test -z '${cfg.daemon.rootpwmoddn}' -o ! -f '${cfg.daemon.rootpwmodpw}' ||
|
||||
printf 'rootpwmodpw %s\n' "$(cat '${cfg.daemon.rootpwmodpw}')"
|
||||
} >"$conf"
|
||||
mv -fT "$conf" /etc/nslcd.conf
|
||||
'';
|
||||
|
||||
# NOTE: because one cannot pass a custom config path to `nslcd`
|
||||
# (which is only able to use `/etc/nslcd.conf`)
|
||||
# changes in `nslcdConfig` won't change `serviceConfig`,
|
||||
# and thus won't restart `nslcd`.
|
||||
# Therefore `restartTriggers` is used on `/etc/nslcd.conf`.
|
||||
restartTriggers = [ nslcdConfig.source ];
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart = "${nss_pam_ldapd}/sbin/nslcd";
|
||||
Type = "forking";
|
||||
PIDFile = "/run/nslcd/nslcd.pid";
|
||||
Restart = "always";
|
||||
RuntimeDirectory = [ "nslcd" ];
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -180,7 +180,7 @@ in {
|
||||
type = types.attrsOf types.unspecified;
|
||||
default = {};
|
||||
description = ''Config of the pulse daemon. See <literal>man pulse-daemon.conf</literal>.'';
|
||||
example = literalExample ''{ flat-volumes = "no"; }'';
|
||||
example = literalExample ''{ realtime-scheduling = "yes"; }'';
|
||||
};
|
||||
};
|
||||
|
||||
@ -242,6 +242,9 @@ in {
|
||||
source = writeText "libao.conf" "default_driver=pulse"; }
|
||||
];
|
||||
|
||||
# Disable flat volumes to enable relative ones
|
||||
hardware.pulseaudio.daemon.config.flat-volumes = mkDefault "no";
|
||||
|
||||
# Allow PulseAudio to get realtime priority using rtkit.
|
||||
security.rtkit.enable = true;
|
||||
|
||||
|
@ -6,10 +6,27 @@ let
|
||||
|
||||
cfg = config.zramSwap;
|
||||
|
||||
devices = map (nr: "zram${toString nr}") (range 0 (cfg.numDevices - 1));
|
||||
# don't set swapDevices as mkDefault, so we can detect user had read our warning
|
||||
# (see below) and made an action (or not)
|
||||
devicesCount = if cfg.swapDevices != null then cfg.swapDevices else cfg.numDevices;
|
||||
|
||||
devices = map (nr: "zram${toString nr}") (range 0 (devicesCount - 1));
|
||||
|
||||
modprobe = "${pkgs.kmod}/bin/modprobe";
|
||||
|
||||
warnings =
|
||||
assert cfg.swapDevices != null -> cfg.numDevices >= cfg.swapDevices;
|
||||
flatten [
|
||||
(optional (cfg.numDevices > 1 && cfg.swapDevices == null) ''
|
||||
Using several small zram devices as swap is no better than using one large.
|
||||
Set either zramSwap.numDevices = 1 or explicitly set zramSwap.swapDevices.
|
||||
|
||||
Previously multiple zram devices were used to enable multithreaded
|
||||
compression. Linux supports multithreaded compression for 1 device
|
||||
since 3.15. See https://lkml.org/lkml/2014/2/28/404 for details.
|
||||
'')
|
||||
];
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
@ -24,9 +41,11 @@ in
|
||||
default = false;
|
||||
type = types.bool;
|
||||
description = ''
|
||||
Enable in-memory compressed swap space provided by the zram kernel
|
||||
module.
|
||||
See https://www.kernel.org/doc/Documentation/blockdev/zram.txt
|
||||
Enable in-memory compressed devices and swap space provided by the zram
|
||||
kernel module.
|
||||
See <link xlink:href="https://www.kernel.org/doc/Documentation/blockdev/zram.txt">
|
||||
https://www.kernel.org/doc/Documentation/blockdev/zram.txt
|
||||
</link>.
|
||||
'';
|
||||
};
|
||||
|
||||
@ -34,7 +53,19 @@ in
|
||||
default = 1;
|
||||
type = types.int;
|
||||
description = ''
|
||||
Number of zram swap devices to create.
|
||||
Number of zram devices to create. See also
|
||||
<literal>zramSwap.swapDevices</literal>
|
||||
'';
|
||||
};
|
||||
|
||||
swapDevices = mkOption {
|
||||
default = null;
|
||||
example = 1;
|
||||
type = with types; nullOr int;
|
||||
description = ''
|
||||
Number of zram devices to be used as swap. Must be
|
||||
<literal><= zramSwap.numDevices</literal>.
|
||||
Default is same as <literal>zramSwap.numDevices</literal>, recommended is 1.
|
||||
'';
|
||||
};
|
||||
|
||||
@ -44,7 +75,8 @@ in
|
||||
description = ''
|
||||
Maximum amount of memory that can be used by the zram swap devices
|
||||
(as a percentage of your total memory). Defaults to 1/2 of your total
|
||||
RAM.
|
||||
RAM. Run <literal>zramctl</literal> to check how good memory is
|
||||
compressed.
|
||||
'';
|
||||
};
|
||||
|
||||
@ -58,12 +90,26 @@ in
|
||||
'';
|
||||
};
|
||||
|
||||
algorithm = mkOption {
|
||||
default = "zstd";
|
||||
example = "lzo";
|
||||
type = with types; either (enum [ "lzo" "lz4" "zstd" ]) str;
|
||||
description = ''
|
||||
Compression algorithm. <literal>lzo</literal> has good compression,
|
||||
but is slow. <literal>lz4</literal> has bad compression, but is fast.
|
||||
<literal>zstd</literal> is both good compression and fast.
|
||||
You can check what other algorithms are supported by your zram device with
|
||||
<programlisting>cat /sys/class/block/zram*/comp_algorithm</programlisting>
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
inherit warnings;
|
||||
|
||||
system.requiredKernelConfig = with config.lib.kernelConfig; [
|
||||
(isModule "ZRAM")
|
||||
];
|
||||
@ -85,25 +131,25 @@ in
|
||||
createZramInitService = dev:
|
||||
nameValuePair "zram-init-${dev}" {
|
||||
description = "Init swap on zram-based device ${dev}";
|
||||
bindsTo = [ "dev-${dev}.swap" ];
|
||||
after = [ "dev-${dev}.device" "zram-reloader.service" ];
|
||||
requires = [ "dev-${dev}.device" "zram-reloader.service" ];
|
||||
before = [ "dev-${dev}.swap" ];
|
||||
requiredBy = [ "dev-${dev}.swap" ];
|
||||
unitConfig.DefaultDependencies = false; # needed to prevent a cycle
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
ExecStop = "${pkgs.runtimeShell} -c 'echo 1 > /sys/class/block/${dev}/reset'";
|
||||
};
|
||||
script = ''
|
||||
set -u
|
||||
set -o pipefail
|
||||
|
||||
# Calculate memory to use for zram
|
||||
totalmem=$(${pkgs.gnugrep}/bin/grep 'MemTotal: ' /proc/meminfo | ${pkgs.gawk}/bin/awk '{print $2}')
|
||||
mem=$(((totalmem * ${toString cfg.memoryPercent} / 100 / ${toString cfg.numDevices}) * 1024))
|
||||
set -euo pipefail
|
||||
|
||||
echo $mem > /sys/class/block/${dev}/disksize
|
||||
# Calculate memory to use for zram
|
||||
mem=$(${pkgs.gawk}/bin/awk '/MemTotal: / {
|
||||
print int($2*${toString cfg.memoryPercent}/100.0/${toString devicesCount}*1024)
|
||||
}' /proc/meminfo)
|
||||
|
||||
${pkgs.utillinux}/sbin/zramctl --size $mem --algorithm ${cfg.algorithm} /dev/${dev}
|
||||
${pkgs.utillinux}/sbin/mkswap /dev/${dev}
|
||||
'';
|
||||
restartIfChanged = false;
|
||||
@ -111,6 +157,9 @@ in
|
||||
in listToAttrs ((map createZramInitService devices) ++ [(nameValuePair "zram-reloader"
|
||||
{
|
||||
description = "Reload zram kernel module when number of devices changes";
|
||||
wants = [ "systemd-udevd.service" ];
|
||||
after = [ "systemd-udevd.service" ];
|
||||
unitConfig.DefaultDependencies = false; # needed to prevent a cycle
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
@ -118,7 +167,11 @@ in
|
||||
ExecStart = "${modprobe} zram";
|
||||
ExecStop = "${modprobe} -r zram";
|
||||
};
|
||||
restartTriggers = [ cfg.numDevices ];
|
||||
restartTriggers = [
|
||||
cfg.numDevices
|
||||
cfg.algorithm
|
||||
cfg.memoryPercent
|
||||
];
|
||||
restartIfChanged = true;
|
||||
})]);
|
||||
|
||||
|
@ -124,10 +124,14 @@ in
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
assertions = lib.singleton {
|
||||
assertion = cfg.driSupport32Bit -> pkgs.stdenv.isx86_64;
|
||||
message = "Option driSupport32Bit only makes sense on a 64-bit system.";
|
||||
};
|
||||
assertions = [
|
||||
{ assertion = cfg.driSupport32Bit -> pkgs.stdenv.isx86_64;
|
||||
message = "Option driSupport32Bit only makes sense on a 64-bit system.";
|
||||
}
|
||||
{ assertion = cfg.driSupport32Bit -> (config.boot.kernelPackages.kernel.features.ia32Emulation or false);
|
||||
message = "Option driSupport32Bit requires a kernel that supports 32bit emulation";
|
||||
}
|
||||
];
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"L+ /run/opengl-driver - - - - ${package}"
|
||||
|
@ -449,7 +449,11 @@ EOF
|
||||
if (-e $slave) {
|
||||
my $dmName = read_file("/sys/class/block/$deviceName/dm/name");
|
||||
chomp $dmName;
|
||||
$fileSystems .= " boot.initrd.luks.devices.\"$dmName\".device = \"${\(findStableDevPath $slave)}\";\n\n";
|
||||
# Ensure to add an entry only once
|
||||
my $luksDevice = " boot.initrd.luks.devices.\"$dmName\".device";
|
||||
if ($fileSystems !~ /^\Q$luksDevice\E/m) {
|
||||
$fileSystems .= "$luksDevice = \"${\(findStableDevPath $slave)}\";\n\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -631,9 +635,10 @@ $bootLoaderConfig
|
||||
# services.xserver.desktopManager.plasma5.enable = true;
|
||||
|
||||
# Define a user account. Don't forget to set a password with ‘passwd’.
|
||||
# users.users.guest = {
|
||||
# users.users.jane = {
|
||||
# isNormalUser = true;
|
||||
# uid = 1000;
|
||||
# extraGroups = [ "wheel" ]; # Enable ‘sudo’ for the user.
|
||||
# };
|
||||
|
||||
# This value determines the NixOS release with which your system is to be
|
||||
|
@ -314,13 +314,13 @@ else
|
||||
# echo 1>&2 "Warning: This value is not an option."
|
||||
|
||||
result=$(evalCfg "")
|
||||
if names=$(attrNames "$result" 2> /dev/null); then
|
||||
if [ ! -z "$result" ]; then
|
||||
names=$(attrNames "$result" 2> /dev/null)
|
||||
echo 1>&2 "This attribute set contains:"
|
||||
escapeQuotes () { eval echo "$1"; }
|
||||
nixMap escapeQuotes "$names"
|
||||
else
|
||||
echo 1>&2 "An error occurred while looking for attribute names."
|
||||
echo $result
|
||||
echo 1>&2 "An error occurred while looking for attribute names. Are you sure that '$option' exists?"
|
||||
fi
|
||||
fi
|
||||
|
||||
|
@ -681,6 +681,7 @@
|
||||
./services/security/hologram-server.nix
|
||||
./services/security/hologram-agent.nix
|
||||
./services/security/munge.nix
|
||||
./services/security/nginx-sso.nix
|
||||
./services/security/oauth2_proxy.nix
|
||||
./services/security/oauth2_proxy_nginx.nix
|
||||
./services/security/physlock.nix
|
||||
|
@ -13,5 +13,5 @@ with lib;
|
||||
|
||||
documentation.enable = mkDefault false;
|
||||
|
||||
services.nixosManual.enable = mkDefault false;
|
||||
documentation.nixos.enable = mkDefault false;
|
||||
}
|
||||
|
@ -167,16 +167,16 @@ in
|
||||
The set of system-wide known SSH hosts.
|
||||
'';
|
||||
example = literalExample ''
|
||||
[
|
||||
{
|
||||
{
|
||||
myhost = {
|
||||
hostNames = [ "myhost" "myhost.mydomain.com" "10.10.1.4" ];
|
||||
publicKeyFile = ./pubkeys/myhost_ssh_host_dsa_key.pub;
|
||||
}
|
||||
{
|
||||
};
|
||||
myhost2 = {
|
||||
hostNames = [ "myhost2" ];
|
||||
publicKeyFile = ./pubkeys/myhost2_ssh_host_dsa_key.pub;
|
||||
}
|
||||
]
|
||||
};
|
||||
}
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -48,6 +48,23 @@ in
|
||||
https://github.com/zsh-users/zsh-syntax-highlighting/blob/master/docs/highlighters/pattern.md
|
||||
'';
|
||||
};
|
||||
styles = mkOption {
|
||||
default = {};
|
||||
type = types.attrsOf types.string;
|
||||
|
||||
example = literalExample ''
|
||||
{
|
||||
"alias" = "fg=magenta,bold";
|
||||
}
|
||||
'';
|
||||
|
||||
description = ''
|
||||
Specifies custom styles to be highlighted by zsh-syntax-highlighting.
|
||||
|
||||
Please refer to the docs for more information about the usage:
|
||||
https://github.com/zsh-users/zsh-syntax-highlighting/blob/master/docs/highlighters/main.md
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
@ -73,6 +90,11 @@ in
|
||||
pattern: design:
|
||||
"ZSH_HIGHLIGHT_PATTERNS+=('${pattern}' '${design}')"
|
||||
) cfg.patterns)
|
||||
++ optionals (length(attrNames cfg.styles) > 0)
|
||||
(mapAttrsToList (
|
||||
styles: design:
|
||||
"ZSH_HIGHLIGHT_STYLES[${styles}]='${design}'"
|
||||
) cfg.styles)
|
||||
);
|
||||
};
|
||||
}
|
||||
|
@ -69,6 +69,9 @@ with lib;
|
||||
(mkRemovedOptionModule [ "security" "setuidOwners" ] "Use security.wrappers instead")
|
||||
(mkRemovedOptionModule [ "security" "setuidPrograms" ] "Use security.wrappers instead")
|
||||
|
||||
# PAM
|
||||
(mkRenamedOptionModule [ "security" "pam" "enableU2F" ] [ "security" "pam" "u2f" "enable" ])
|
||||
|
||||
(mkRemovedOptionModule [ "services" "rmilter" "bindInetSockets" ] "Use services.rmilter.bindSocket.* instead")
|
||||
(mkRemovedOptionModule [ "services" "rmilter" "bindUnixSockets" ] "Use services.rmilter.bindSocket.* instead")
|
||||
|
||||
|
@ -37,12 +37,14 @@ let
|
||||
};
|
||||
|
||||
u2fAuth = mkOption {
|
||||
default = config.security.pam.enableU2F;
|
||||
default = config.security.pam.u2f.enable;
|
||||
type = types.bool;
|
||||
description = ''
|
||||
If set, users listed in
|
||||
<filename>~/.config/Yubico/u2f_keys</filename> are able to log in
|
||||
with the associated U2F key.
|
||||
<filename>$XDG_CONFIG_HOME/Yubico/u2f_keys</filename> (or
|
||||
<filename>$HOME/.config/Yubico/u2f_keys</filename> if XDG variable is
|
||||
not set) are able to log in with the associated U2F key. Path can be
|
||||
changed using <option>security.pam.u2f.authFile</option> option.
|
||||
'';
|
||||
};
|
||||
|
||||
@ -320,8 +322,8 @@ let
|
||||
"auth sufficient ${pkgs.pam_ssh_agent_auth}/libexec/pam_ssh_agent_auth.so file=~/.ssh/authorized_keys:~/.ssh/authorized_keys2:/etc/ssh/authorized_keys.d/%u"}
|
||||
${optionalString cfg.fprintAuth
|
||||
"auth sufficient ${pkgs.fprintd}/lib/security/pam_fprintd.so"}
|
||||
${optionalString cfg.u2fAuth
|
||||
"auth sufficient ${pkgs.pam_u2f}/lib/security/pam_u2f.so"}
|
||||
${let u2f = config.security.pam.u2f; in optionalString cfg.u2fAuth
|
||||
"auth ${u2f.control} ${pkgs.pam_u2f}/lib/security/pam_u2f.so ${optionalString u2f.debug "debug"} ${optionalString (u2f.authFile != null) "authfile=${u2f.authFile}"} ${optionalString u2f.interactive "interactive"} ${optionalString u2f.cue "cue"}"}
|
||||
${optionalString cfg.usbAuth
|
||||
"auth sufficient ${pkgs.pam_usb}/lib/security/pam_usb.so"}
|
||||
${let oath = config.security.pam.oath; in optionalString cfg.oathAuth
|
||||
@ -368,7 +370,7 @@ let
|
||||
auth required pam_deny.so
|
||||
|
||||
# Password management.
|
||||
password requisite pam_unix.so nullok sha512
|
||||
password sufficient pam_unix.so nullok sha512
|
||||
${optionalString config.security.pam.enableEcryptfs
|
||||
"password optional ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so"}
|
||||
${optionalString cfg.pamMount
|
||||
@ -527,11 +529,96 @@ in
|
||||
'';
|
||||
};
|
||||
|
||||
security.pam.enableU2F = mkOption {
|
||||
default = false;
|
||||
description = ''
|
||||
Enable the U2F PAM module.
|
||||
'';
|
||||
security.pam.u2f = {
|
||||
enable = mkOption {
|
||||
default = false;
|
||||
type = types.bool;
|
||||
description = ''
|
||||
Enables U2F PAM (<literal>pam-u2f</literal>) module.
|
||||
|
||||
If set, users listed in
|
||||
<filename>$XDG_CONFIG_HOME/Yubico/u2f_keys</filename> (or
|
||||
<filename>$HOME/.config/Yubico/u2f_keys</filename> if XDG variable is
|
||||
not set) are able to log in with the associated U2F key. The path can
|
||||
be changed using <option>security.pam.u2f.authFile</option> option.
|
||||
|
||||
File format is:
|
||||
<literal>username:first_keyHandle,first_public_key: second_keyHandle,second_public_key</literal>
|
||||
This file can be generated using <command>pamu2fcfg</command> command.
|
||||
|
||||
More information can be found <link
|
||||
xlink:href="https://developers.yubico.com/pam-u2f/">here</link>.
|
||||
'';
|
||||
};
|
||||
|
||||
authFile = mkOption {
|
||||
default = null;
|
||||
type = with types; nullOr path;
|
||||
description = ''
|
||||
By default <literal>pam-u2f</literal> module reads the keys from
|
||||
<filename>$XDG_CONFIG_HOME/Yubico/u2f_keys</filename> (or
|
||||
<filename>$HOME/.config/Yubico/u2f_keys</filename> if XDG variable is
|
||||
not set).
|
||||
|
||||
If you want to change auth file locations or centralize database (for
|
||||
example use <filename>/etc/u2f-mappings</filename>) you can set this
|
||||
option.
|
||||
|
||||
File format is:
|
||||
<literal>username:first_keyHandle,first_public_key: second_keyHandle,second_public_key</literal>
|
||||
This file can be generated using <command>pamu2fcfg</command> command.
|
||||
|
||||
More information can be found <link
|
||||
xlink:href="https://developers.yubico.com/pam-u2f/">here</link>.
|
||||
'';
|
||||
};
|
||||
|
||||
control = mkOption {
|
||||
default = "sufficient";
|
||||
type = types.enum [ "required" "requisite" "sufficient" "optional" ];
|
||||
description = ''
|
||||
This option sets pam "control".
|
||||
If you want to have multi factor authentication, use "required".
|
||||
If you want to use U2F device instead of regular password, use "sufficient".
|
||||
|
||||
Read
|
||||
<citerefentry>
|
||||
<refentrytitle>pam.conf</refentrytitle>
|
||||
<manvolnum>5</manvolnum>
|
||||
</citerefentry>
|
||||
for better understanding of this option.
|
||||
'';
|
||||
};
|
||||
|
||||
debug = mkOption {
|
||||
default = false;
|
||||
type = types.bool;
|
||||
description = ''
|
||||
Debug output to stderr.
|
||||
'';
|
||||
};
|
||||
|
||||
interactive = mkOption {
|
||||
default = false;
|
||||
type = types.bool;
|
||||
description = ''
|
||||
Set to prompt a message and wait before testing the presence of a U2F device.
|
||||
Recommended if your device doesn’t have a tactile trigger.
|
||||
'';
|
||||
};
|
||||
|
||||
cue = mkOption {
|
||||
default = false;
|
||||
type = types.bool;
|
||||
description = ''
|
||||
By default <literal>pam-u2f</literal> module does not inform user
|
||||
that he needs to use the u2f device, it just waits without a prompt.
|
||||
|
||||
If you set this option to <literal>true</literal>,
|
||||
<literal>cue</literal> option is added to <literal>pam-u2f</literal>
|
||||
module and reminder message will be displayed.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
security.pam.enableEcryptfs = mkOption {
|
||||
@ -563,7 +650,7 @@ in
|
||||
++ optionals config.krb5.enable [pam_krb5 pam_ccreds]
|
||||
++ optionals config.security.pam.enableOTPW [ pkgs.otpw ]
|
||||
++ optionals config.security.pam.oath.enable [ pkgs.oathToolkit ]
|
||||
++ optionals config.security.pam.enableU2F [ pkgs.pam_u2f ];
|
||||
++ optionals config.security.pam.u2f.enable [ pkgs.pam_u2f ];
|
||||
|
||||
boot.supportedFilesystems = optionals config.security.pam.enableEcryptfs [ "ecryptfs" ];
|
||||
|
||||
|
@ -6,11 +6,11 @@ let
|
||||
|
||||
cfg = config.services.postgresqlBackup;
|
||||
|
||||
postgresqlBackupService = db :
|
||||
postgresqlBackupService = db: dumpCmd:
|
||||
{
|
||||
enable = true;
|
||||
|
||||
description = "Backup of database ${db}";
|
||||
description = "Backup of ${db} database(s)";
|
||||
|
||||
requires = [ "postgresql.service" ];
|
||||
|
||||
@ -26,7 +26,7 @@ let
|
||||
${pkgs.coreutils}/bin/mv ${cfg.location}/${db}.sql.gz ${cfg.location}/${db}.prev.sql.gz
|
||||
fi
|
||||
|
||||
${config.services.postgresql.package}/bin/pg_dump ${cfg.pgdumpOptions} ${db} | \
|
||||
${dumpCmd} | \
|
||||
${pkgs.gzip}/bin/gzip -c > ${cfg.location}/${db}.sql.gz
|
||||
'';
|
||||
|
||||
@ -42,9 +42,7 @@ let
|
||||
in {
|
||||
|
||||
options = {
|
||||
|
||||
services.postgresqlBackup = {
|
||||
|
||||
enable = mkOption {
|
||||
default = false;
|
||||
description = ''
|
||||
@ -61,6 +59,19 @@ in {
|
||||
'';
|
||||
};
|
||||
|
||||
backupAll = mkOption {
|
||||
default = cfg.databases == [];
|
||||
defaultText = "services.postgresqlBackup.databases == []";
|
||||
type = lib.types.bool;
|
||||
description = ''
|
||||
Backup all databases using pg_dumpall.
|
||||
This option is mutual exclusive to
|
||||
<literal>services.postgresqlBackup.databases</literal>.
|
||||
The resulting backup dump will have the name all.sql.gz.
|
||||
This option is the default if no databases are specified.
|
||||
'';
|
||||
};
|
||||
|
||||
databases = mkOption {
|
||||
default = [];
|
||||
description = ''
|
||||
@ -79,18 +90,36 @@ in {
|
||||
type = types.string;
|
||||
default = "-Cbo";
|
||||
description = ''
|
||||
Command line options for pg_dump.
|
||||
Command line options for pg_dump. This options is not used
|
||||
if <literal>config.services.postgresqlBackup.backupAll</literal> is enabled.
|
||||
Note that config.services.postgresqlBackup.backupAll is also active,
|
||||
when no databases where specified.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
config = mkIf config.services.postgresqlBackup.enable {
|
||||
|
||||
systemd.services = listToAttrs (map (db : {
|
||||
config = mkMerge [
|
||||
{
|
||||
assertions = [{
|
||||
assertion = cfg.backupAll -> cfg.databases == [];
|
||||
message = "config.services.postgresqlBackup.backupAll cannot be used together with config.services.postgresqlBackup.databases";
|
||||
}];
|
||||
}
|
||||
(mkIf (cfg.enable && cfg.backupAll) {
|
||||
systemd.services.postgresqlBackup =
|
||||
postgresqlBackupService "all" "${config.services.postgresql.package}/bin/pg_dumpall";
|
||||
})
|
||||
(mkIf (cfg.enable && !cfg.backupAll) {
|
||||
systemd.services = listToAttrs (map (db:
|
||||
let
|
||||
cmd = "${config.services.postgresql.package}/bin/pg_dump ${cfg.pgdumpOptions} ${db}";
|
||||
in {
|
||||
name = "postgresqlBackup-${db}";
|
||||
value = postgresqlBackupService db; } ) cfg.databases);
|
||||
};
|
||||
value = postgresqlBackupService db cmd;
|
||||
}) cfg.databases);
|
||||
})
|
||||
];
|
||||
|
||||
}
|
||||
|
@ -1,6 +1,11 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
# Type for a valid systemd unit option. Needed for correctly passing "timerConfig" to "systemd.timers"
|
||||
unitOption = (import ../../system/boot/systemd-unit-options.nix { inherit config lib; }).unitOption;
|
||||
in
|
||||
{
|
||||
options.services.restic.backups = mkOption {
|
||||
description = ''
|
||||
@ -47,7 +52,7 @@ with lib;
|
||||
};
|
||||
|
||||
timerConfig = mkOption {
|
||||
type = types.attrsOf types.str;
|
||||
type = types.attrsOf unitOption;
|
||||
default = {
|
||||
OnCalendar = "daily";
|
||||
};
|
||||
|
@ -249,6 +249,7 @@ in
|
||||
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
restartTriggers = [ config.environment.etc."my.cnf".source ];
|
||||
|
||||
unitConfig.RequiresMountsFor = "${cfg.dataDir}";
|
||||
|
||||
@ -274,7 +275,8 @@ in
|
||||
serviceConfig = {
|
||||
Type = if hasNotify then "notify" else "simple";
|
||||
RuntimeDirectory = "mysqld";
|
||||
ExecStart = "${mysql}/bin/mysqld --defaults-file=/etc/my.cnf ${mysqldOptions}";
|
||||
# The last two environment variables are used for starting Galera clusters
|
||||
ExecStart = "${mysql}/bin/mysqld --defaults-file=/etc/my.cnf ${mysqldOptions} $_WSREP_NEW_CLUSTER $_WSREP_START_POSITION";
|
||||
};
|
||||
|
||||
postStart = ''
|
||||
@ -362,7 +364,7 @@ in
|
||||
${optionalString (cfg.ensureDatabases != []) ''
|
||||
(
|
||||
${concatMapStrings (database: ''
|
||||
echo "CREATE DATABASE IF NOT EXISTS ${database};"
|
||||
echo "CREATE DATABASE IF NOT EXISTS \`${database}\`;"
|
||||
'') cfg.ensureDatabases}
|
||||
) | ${mysql}/bin/mysql -u root -N
|
||||
''}
|
||||
|
@ -15,6 +15,19 @@ let
|
||||
mkName = p: "pki/fwupd/${baseNameOf (toString p)}";
|
||||
mkEtcFile = p: nameValuePair (mkName p) { source = p; };
|
||||
in listToAttrs (map mkEtcFile cfg.extraTrustedKeys);
|
||||
|
||||
# We cannot include the file in $out and rely on filesInstalledToEtc
|
||||
# to install it because it would create a cyclic dependency between
|
||||
# the outputs. We also need to enable the remote,
|
||||
# which should not be done by default.
|
||||
testRemote = if cfg.enableTestRemote then {
|
||||
"fwupd/remotes.d/fwupd-tests.conf" = {
|
||||
source = pkgs.runCommand "fwupd-tests-enabled.conf" {} ''
|
||||
sed "s,^Enabled=false,Enabled=true," \
|
||||
"${pkgs.fwupd.installedTests}/etc/fwupd/remotes.d/fwupd-tests.conf" > "$out"
|
||||
'';
|
||||
};
|
||||
} else {};
|
||||
in {
|
||||
|
||||
###### interface
|
||||
@ -40,7 +53,7 @@ in {
|
||||
|
||||
blacklistPlugins = mkOption {
|
||||
type = types.listOf types.string;
|
||||
default = [];
|
||||
default = [ "test" ];
|
||||
example = [ "udev" ];
|
||||
description = ''
|
||||
Allow blacklisting specific plugins
|
||||
@ -55,6 +68,15 @@ in {
|
||||
Installing a public key allows firmware signed with a matching private key to be recognized as trusted, which may require less authentication to install than for untrusted files. By default trusted firmware can be upgraded (but not downgraded) without the user or administrator password. Only very few keys are installed by default.
|
||||
'';
|
||||
};
|
||||
|
||||
enableTestRemote = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to enable test remote. This is used by
|
||||
<link xlink:href="https://github.com/hughsie/fwupd/blob/master/data/installed-tests/README.md">installed tests</link>.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
@ -78,7 +100,7 @@ in {
|
||||
'';
|
||||
};
|
||||
|
||||
} // originalEtc // extraTrustedKeys;
|
||||
} // originalEtc // extraTrustedKeys // testRemote;
|
||||
|
||||
services.dbus.packages = [ pkgs.fwupd ];
|
||||
|
||||
|
@ -145,7 +145,7 @@ in
|
||||
PLEX_MEDIA_SERVER_HOME="${cfg.package}/usr/lib/plexmediaserver";
|
||||
PLEX_MEDIA_SERVER_MAX_PLUGIN_PROCS="6";
|
||||
PLEX_MEDIA_SERVER_TMPDIR="/tmp";
|
||||
LD_LIBRARY_PATH="${cfg.package}/usr/lib/plexmediaserver";
|
||||
LD_LIBRARY_PATH="/run/opengl-driver/lib:${cfg.package}/usr/lib/plexmediaserver";
|
||||
LC_ALL="en_US.UTF-8";
|
||||
LANG="en_US.UTF-8";
|
||||
};
|
||||
|
@ -292,6 +292,7 @@ in
|
||||
# execute redmine required commands prior to starting the application
|
||||
# NOTE: su required in case using mysql socket authentication
|
||||
/run/wrappers/bin/su -s ${pkgs.bash}/bin/bash -m -l redmine -c '${bundle} exec rake db:migrate'
|
||||
/run/wrappers/bin/su -s ${pkgs.bash}/bin/bash -m -l redmine -c '${bundle} exec rake redmine:plugins:migrate'
|
||||
/run/wrappers/bin/su -s ${pkgs.bash}/bin/bash -m -l redmine -c '${bundle} exec rake redmine:load_default_data'
|
||||
|
||||
|
||||
|
@ -88,6 +88,8 @@ in {
|
||||
ExecStart = "${cfg.package}/sbin/collectd -C ${conf} -f";
|
||||
User = cfg.user;
|
||||
PermissionsStartOnly = true;
|
||||
Restart = "on-failure";
|
||||
RestartSec = 3;
|
||||
};
|
||||
|
||||
preStart = ''
|
||||
|
@ -10,9 +10,14 @@ let
|
||||
ln -s /run/wrappers/bin/apps.plugin $out/libexec/netdata/plugins.d/apps.plugin
|
||||
'';
|
||||
|
||||
plugins = [
|
||||
"${pkgs.netdata}/libexec/netdata/plugins.d"
|
||||
"${wrappedPlugins}/libexec/netdata/plugins.d"
|
||||
] ++ cfg.extraPluginPaths;
|
||||
|
||||
localConfig = {
|
||||
global = {
|
||||
"plugins directory" = "${pkgs.netdata}/libexec/netdata/plugins.d ${wrappedPlugins}/libexec/netdata/plugins.d";
|
||||
"plugins directory" = concatStringsSep " " plugins;
|
||||
};
|
||||
web = {
|
||||
"web files owner" = "root";
|
||||
@ -78,6 +83,24 @@ in {
|
||||
};
|
||||
};
|
||||
|
||||
extraPluginPaths = mkOption {
|
||||
type = types.listOf types.path;
|
||||
default = [ ];
|
||||
example = literalExample ''
|
||||
[ "/path/to/plugins.d" ]
|
||||
'';
|
||||
description = ''
|
||||
Extra paths to add to the netdata global "plugins directory"
|
||||
option. Useful for when you want to include your own
|
||||
collection scripts.
|
||||
</para><para>
|
||||
Details about writing a custom netdata plugin are available at:
|
||||
<link xlink:href="https://docs.netdata.cloud/collectors/plugins.d/"/>
|
||||
</para><para>
|
||||
Cannot be combined with configText.
|
||||
'';
|
||||
};
|
||||
|
||||
config = mkOption {
|
||||
type = types.attrsOf types.attrs;
|
||||
default = {};
|
||||
|
@ -325,7 +325,8 @@ let
|
||||
promTypes.relabel_config = types.submodule {
|
||||
options = {
|
||||
source_labels = mkOption {
|
||||
type = types.listOf types.str;
|
||||
type = with types; nullOr (listOf str);
|
||||
default = null;
|
||||
description = ''
|
||||
The source labels select values from existing labels. Their content
|
||||
is concatenated using the configured separator and matched against
|
||||
|
@ -18,12 +18,34 @@ in
|
||||
socketPath = mkOption {
|
||||
type = types.path;
|
||||
default = "/var/run/dovecot/stats";
|
||||
example = "/var/run/dovecot2/stats";
|
||||
example = "/var/run/dovecot2/old-stats";
|
||||
description = ''
|
||||
Path under which the stats socket is placed.
|
||||
The user/group under which the exporter runs,
|
||||
should be able to access the socket in order
|
||||
to scrape the metrics successfully.
|
||||
|
||||
Please keep in mind that the stats module has changed in
|
||||
<link xlink:href="https://wiki2.dovecot.org/Upgrading/2.3">Dovecot 2.3+</link> which
|
||||
is not <link xlink:href="https://github.com/kumina/dovecot_exporter/issues/8">compatible with this exporter</link>.
|
||||
|
||||
The following extra config has to be passed to Dovecot to ensure that recent versions
|
||||
work with this exporter:
|
||||
<programlisting>
|
||||
{
|
||||
<xref linkend="opt-services.prometheus.exporters.dovecot.enable" /> = true;
|
||||
<xref linkend="opt-services.prometheus.exporters.dovecot.socketPath" /> = "/var/run/dovecot2/old-stats";
|
||||
<xref linkend="opt-services.dovecot2.extraConfig" /> = '''
|
||||
mail_plugins = $mail_plugins old_stats
|
||||
service old-stats {
|
||||
unix_listener old-stats {
|
||||
user = nobody
|
||||
group = nobody
|
||||
}
|
||||
}
|
||||
''';
|
||||
}
|
||||
</programlisting>
|
||||
'';
|
||||
};
|
||||
scopes = mkOption {
|
||||
|
@ -13,7 +13,7 @@ let
|
||||
overrides = ${cfg.privateConfig}
|
||||
|
||||
[server:main]
|
||||
use = egg:Paste#http
|
||||
use = egg:gunicorn
|
||||
host = ${cfg.listen.address}
|
||||
port = ${toString cfg.listen.port}
|
||||
|
||||
@ -30,6 +30,8 @@ let
|
||||
audiences = ${removeSuffix "/" cfg.publicUrl}
|
||||
'';
|
||||
|
||||
user = "syncserver";
|
||||
group = "syncserver";
|
||||
in
|
||||
|
||||
{
|
||||
@ -126,15 +128,14 @@ in
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
systemd.services.syncserver = let
|
||||
syncServerEnv = pkgs.python.withPackages(ps: with ps; [ syncserver pasteScript requests ]);
|
||||
user = "syncserver";
|
||||
group = "syncserver";
|
||||
in {
|
||||
systemd.services.syncserver = {
|
||||
after = [ "network.target" ];
|
||||
description = "Firefox Sync Server";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
path = [ pkgs.coreutils syncServerEnv ];
|
||||
path = [
|
||||
pkgs.coreutils
|
||||
(pkgs.python.withPackages (ps: [ pkgs.syncserver ps.gunicorn ]))
|
||||
];
|
||||
|
||||
serviceConfig = {
|
||||
User = user;
|
||||
@ -166,14 +167,17 @@ in
|
||||
chown ${user}:${group} ${defaultDbLocation}
|
||||
fi
|
||||
'';
|
||||
serviceConfig.ExecStart = "${syncServerEnv}/bin/paster serve ${syncServerIni}";
|
||||
|
||||
script = ''
|
||||
gunicorn --paste ${syncServerIni}
|
||||
'';
|
||||
};
|
||||
|
||||
users.users.syncserver = {
|
||||
group = "syncserver";
|
||||
users.users.${user} = {
|
||||
inherit group;
|
||||
isSystemUser = true;
|
||||
};
|
||||
|
||||
users.groups.syncserver = {};
|
||||
users.groups.${group} = {};
|
||||
};
|
||||
}
|
||||
|
@ -142,7 +142,6 @@ in
|
||||
description = "Collection of named nylon instances";
|
||||
type = with types; loaOf (submodule nylonOpts);
|
||||
internal = true;
|
||||
options = [ nylonOpts ];
|
||||
};
|
||||
|
||||
};
|
||||
|
@ -513,6 +513,7 @@ in
|
||||
RuntimeDirectory = [ "prosody" ];
|
||||
PIDFile = "/run/prosody/prosody.pid";
|
||||
ExecStart = "${cfg.package}/bin/prosodyctl start";
|
||||
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -11,7 +11,7 @@ let
|
||||
|
||||
userOptions = {
|
||||
|
||||
openssh.authorizedKeys = {
|
||||
options.openssh.authorizedKeys = {
|
||||
keys = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
@ -320,7 +320,7 @@ in
|
||||
};
|
||||
|
||||
users.users = mkOption {
|
||||
options = [ userOptions ];
|
||||
type = with types; loaOf (submodule userOptions);
|
||||
};
|
||||
|
||||
};
|
||||
|
@ -184,4 +184,5 @@ in
|
||||
|
||||
};
|
||||
|
||||
meta.maintainers = with lib.maintainers; [ erictapen ];
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
{ config, lib, pkgs, utils, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
@ -193,7 +193,7 @@ in {
|
||||
# FIXME: start a separate wpa_supplicant instance per interface.
|
||||
systemd.services.wpa_supplicant = let
|
||||
ifaces = cfg.interfaces;
|
||||
deviceUnit = interface: [ "sys-subsystem-net-devices-${interface}.device" ];
|
||||
deviceUnit = interface: [ "sys-subsystem-net-devices-${utils.escapeSystemdPath interface}.device" ];
|
||||
in {
|
||||
description = "WPA Supplicant";
|
||||
|
||||
|
@ -26,6 +26,12 @@ let
|
||||
substituteInPlace $out/sesman.ini \
|
||||
--replace LogFile=xrdp-sesman.log LogFile=/dev/null \
|
||||
--replace EnableSyslog=1 EnableSyslog=0
|
||||
|
||||
# Ensure that clipboard works for non-ASCII characters
|
||||
sed -i -e '/.*SessionVariables.*/ a\
|
||||
LANG=${config.i18n.defaultLocale}\
|
||||
LOCALE_ARCHIVE=${config.i18n.glibcLocales}/lib/locale/locale-archive
|
||||
' $out/sesman.ini
|
||||
'';
|
||||
in
|
||||
{
|
||||
|
@ -30,13 +30,20 @@ let
|
||||
|
||||
preStart = ''
|
||||
${concatStringsSep " \\\n" (["mkdir -p"] ++ map escapeShellArg specPaths)}
|
||||
${pkgs.certmgr}/bin/certmgr -f ${certmgrYaml} check
|
||||
${cfg.package}/bin/certmgr -f ${certmgrYaml} check
|
||||
'';
|
||||
in
|
||||
{
|
||||
options.services.certmgr = {
|
||||
enable = mkEnableOption "certmgr";
|
||||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.certmgr;
|
||||
defaultText = "pkgs.certmgr";
|
||||
description = "Which certmgr package to use in the service.";
|
||||
};
|
||||
|
||||
defaultRemote = mkOption {
|
||||
type = types.str;
|
||||
default = "127.0.0.1:8888";
|
||||
@ -187,7 +194,7 @@ in
|
||||
serviceConfig = {
|
||||
Restart = "always";
|
||||
RestartSec = "10s";
|
||||
ExecStart = "${pkgs.certmgr}/bin/certmgr -f ${certmgrYaml}";
|
||||
ExecStart = "${cfg.package}/bin/certmgr -f ${certmgrYaml}";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@ -50,7 +50,7 @@ in
|
||||
path = [ pkgs.munge pkgs.coreutils ];
|
||||
|
||||
preStart = ''
|
||||
chmod 0700 ${cfg.password}
|
||||
chmod 0400 ${cfg.password}
|
||||
mkdir -p /var/lib/munge -m 0711
|
||||
chown -R munge:munge /var/lib/munge
|
||||
mkdir -p /run/munge -m 0755
|
||||
|
58
nixos/modules/services/security/nginx-sso.nix
Normal file
58
nixos/modules/services/security/nginx-sso.nix
Normal file
@ -0,0 +1,58 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.nginx.sso;
|
||||
pkg = getBin pkgs.nginx-sso;
|
||||
configYml = pkgs.writeText "nginx-sso.yml" (builtins.toJSON cfg.configuration);
|
||||
in {
|
||||
options.services.nginx.sso = {
|
||||
enable = mkEnableOption "nginx-sso service";
|
||||
|
||||
configuration = mkOption {
|
||||
type = types.attrsOf types.unspecified;
|
||||
default = {};
|
||||
example = literalExample ''
|
||||
{
|
||||
listen = { addr = "127.0.0.1"; port = 8080; };
|
||||
|
||||
providers.token.tokens = {
|
||||
myuser = "MyToken";
|
||||
};
|
||||
|
||||
acl = {
|
||||
rule_sets = [
|
||||
{
|
||||
rules = [ { field = "x-application"; equals = "MyApp"; } ];
|
||||
allow = [ "myuser" ];
|
||||
}
|
||||
];
|
||||
};
|
||||
}
|
||||
'';
|
||||
description = ''
|
||||
nginx-sso configuration
|
||||
(<link xlink:href="https://github.com/Luzifer/nginx-sso/wiki/Main-Configuration">documentation</link>)
|
||||
as a Nix attribute set.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
systemd.services.nginx-sso = {
|
||||
description = "Nginx SSO Backend";
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkg}/bin/nginx-sso \
|
||||
--config ${configYml} \
|
||||
--frontend-dir ${pkg}/share/frontend
|
||||
'';
|
||||
Restart = "always";
|
||||
DynamicUser = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
@ -5,6 +5,9 @@ with lib;
|
||||
let
|
||||
cfg = config.services.sks;
|
||||
sksPkg = cfg.package;
|
||||
dbConfig = pkgs.writeText "DB_CONFIG" ''
|
||||
${cfg.extraDbConfig}
|
||||
'';
|
||||
|
||||
in {
|
||||
meta.maintainers = with maintainers; [ primeos calbrecht jcumming ];
|
||||
@ -39,6 +42,20 @@ in {
|
||||
'';
|
||||
};
|
||||
|
||||
extraDbConfig = mkOption {
|
||||
type = types.str;
|
||||
default = "";
|
||||
description = ''
|
||||
Set contents of the files "KDB/DB_CONFIG" and "PTree/DB_CONFIG" within
|
||||
the ''${dataDir} directory. This is used to configure options for the
|
||||
database for the sks key server.
|
||||
|
||||
Documentation of available options are available in the file named
|
||||
"sampleConfig/DB_CONFIG" in the following repository:
|
||||
https://bitbucket.org/skskeyserver/sks-keyserver/src
|
||||
'';
|
||||
};
|
||||
|
||||
hkpAddress = mkOption {
|
||||
default = [ "127.0.0.1" "::1" ];
|
||||
type = types.listOf types.str;
|
||||
@ -99,6 +116,17 @@ in {
|
||||
${lib.optionalString (cfg.webroot != null)
|
||||
"ln -sfT \"${cfg.webroot}\" web"}
|
||||
mkdir -p dump
|
||||
# Check that both database configs are symlinks before overwriting them
|
||||
if [ -e KDB/DB_CONFIG ] && [ ! -L KBD/DB_CONFIG ]; then
|
||||
echo "KDB/DB_CONFIG exists but is not a symlink." >&2
|
||||
exit 1
|
||||
fi
|
||||
if [ -e PTree/DB_CONFIG ] && [ ! -L PTree/DB_CONFIG ]; then
|
||||
echo "PTree/DB_CONFIG exists but is not a symlink." >&2
|
||||
exit 1
|
||||
fi
|
||||
ln -sf ${dbConfig} KDB/DB_CONFIG
|
||||
ln -sf ${dbConfig} PTree/DB_CONFIG
|
||||
${sksPkg}/bin/sks build dump/*.gpg -n 10 -cache 100 || true #*/
|
||||
${sksPkg}/bin/sks cleandb || true
|
||||
${sksPkg}/bin/sks pbuild -cache 20 -ptree_cache 70 || true
|
||||
|
@ -4,6 +4,7 @@ with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.sshguard;
|
||||
|
||||
in {
|
||||
|
||||
###### interface
|
||||
@ -77,65 +78,65 @@ in {
|
||||
Systemd services sshguard should receive logs of.
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
|
||||
###### implementation
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
environment.systemPackages = [ pkgs.sshguard pkgs.iptables pkgs.ipset ];
|
||||
|
||||
environment.etc."sshguard.conf".text = let
|
||||
list_services = ( name: "-t ${name} ");
|
||||
in ''
|
||||
BACKEND="${pkgs.sshguard}/libexec/sshg-fw-ipset"
|
||||
LOGREADER="LANG=C ${pkgs.systemd}/bin/journalctl -afb -p info -n1 ${toString (map list_services cfg.services)} -o cat"
|
||||
args = lib.concatStringsSep " " ([
|
||||
"-afb"
|
||||
"-p info"
|
||||
"-o cat"
|
||||
"-n1"
|
||||
] ++ (map (name: "-t ${escapeShellArg name}") cfg.services));
|
||||
in ''
|
||||
BACKEND="${pkgs.sshguard}/libexec/sshg-fw-ipset"
|
||||
LOGREADER="LANG=C ${pkgs.systemd}/bin/journalctl ${args}"
|
||||
'';
|
||||
|
||||
systemd.services.sshguard = {
|
||||
description = "SSHGuard brute-force attacks protection system";
|
||||
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
partOf = optional config.networking.firewall.enable "firewall.service";
|
||||
|
||||
path = with pkgs; [ iptables ipset iproute systemd ];
|
||||
|
||||
postStart = ''
|
||||
${pkgs.ipset}/bin/ipset -quiet create -exist sshguard4 hash:ip family inet
|
||||
${pkgs.ipset}/bin/ipset -quiet create -exist sshguard6 hash:ip family inet6
|
||||
${pkgs.iptables}/bin/iptables -I INPUT -m set --match-set sshguard4 src -j DROP
|
||||
${pkgs.iptables}/bin/ip6tables -I INPUT -m set --match-set sshguard6 src -j DROP
|
||||
'';
|
||||
|
||||
systemd.services.sshguard =
|
||||
{ description = "SSHGuard brute-force attacks protection system";
|
||||
preStop = ''
|
||||
${pkgs.iptables}/bin/iptables -D INPUT -m set --match-set sshguard4 src -j DROP
|
||||
${pkgs.iptables}/bin/ip6tables -D INPUT -m set --match-set sshguard6 src -j DROP
|
||||
'';
|
||||
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
partOf = optional config.networking.firewall.enable "firewall.service";
|
||||
unitConfig.Documentation = "man:sshguard(8)";
|
||||
|
||||
path = [ pkgs.iptables pkgs.ipset pkgs.iproute pkgs.systemd ];
|
||||
|
||||
postStart = ''
|
||||
mkdir -p /var/lib/sshguard
|
||||
${pkgs.ipset}/bin/ipset -quiet create -exist sshguard4 hash:ip family inet
|
||||
${pkgs.ipset}/bin/ipset -quiet create -exist sshguard6 hash:ip family inet6
|
||||
${pkgs.iptables}/bin/iptables -I INPUT -m set --match-set sshguard4 src -j DROP
|
||||
${pkgs.iptables}/bin/ip6tables -I INPUT -m set --match-set sshguard6 src -j DROP
|
||||
'';
|
||||
|
||||
preStop = ''
|
||||
${pkgs.iptables}/bin/iptables -D INPUT -m set --match-set sshguard4 src -j DROP
|
||||
${pkgs.iptables}/bin/ip6tables -D INPUT -m set --match-set sshguard6 src -j DROP
|
||||
'';
|
||||
|
||||
unitConfig.Documentation = "man:sshguard(8)";
|
||||
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
ExecStart = let
|
||||
list_whitelist = ( name: "-w ${name} ");
|
||||
in ''
|
||||
${pkgs.sshguard}/bin/sshguard -a ${toString cfg.attack_threshold} ${optionalString (cfg.blacklist_threshold != null) "-b ${toString cfg.blacklist_threshold}:${cfg.blacklist_file} "}-i /run/sshguard/sshguard.pid -p ${toString cfg.blocktime} -s ${toString cfg.detection_time} ${toString (map list_whitelist cfg.whitelist)}
|
||||
'';
|
||||
PIDFile = "/run/sshguard/sshguard.pid";
|
||||
Restart = "always";
|
||||
|
||||
ReadOnlyDirectories = "/";
|
||||
ReadWriteDirectories = "/run/sshguard /var/lib/sshguard";
|
||||
RuntimeDirectory = "sshguard";
|
||||
StateDirectory = "sshguard";
|
||||
CapabilityBoundingSet = "CAP_NET_ADMIN CAP_NET_RAW";
|
||||
};
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
ExecStart = let
|
||||
args = lib.concatStringsSep " " ([
|
||||
"-a ${toString cfg.attack_threshold}"
|
||||
"-p ${toString cfg.blocktime}"
|
||||
"-s ${toString cfg.detection_time}"
|
||||
(optionalString (cfg.blacklist_threshold != null) "-b ${toString cfg.blacklist_threshold}:${cfg.blacklist_file}")
|
||||
] ++ (map (name: "-w ${escapeShellArg name}") cfg.whitelist));
|
||||
in "${pkgs.sshguard}/bin/sshguard ${args}";
|
||||
Restart = "always";
|
||||
ProtectSystem = "strict";
|
||||
ProtectHome = "tmpfs";
|
||||
RuntimeDirectory = "sshguard";
|
||||
StateDirectory = "sshguard";
|
||||
CapabilityBoundingSet = "CAP_NET_ADMIN CAP_NET_RAW";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
@ -143,6 +143,9 @@ in
|
||||
${getLib pkgs.lz4}/lib/liblz4*.so* mr,
|
||||
${getLib pkgs.libkrb5}/lib/lib*.so* mr,
|
||||
${getLib pkgs.keyutils}/lib/libkeyutils*.so* mr,
|
||||
${getLib pkgs.utillinuxMinimal.out}/lib/libblkid.so.* mr,
|
||||
${getLib pkgs.utillinuxMinimal.out}/lib/libmount.so.* mr,
|
||||
${getLib pkgs.utillinuxMinimal.out}/lib/libuuid.so.* mr,
|
||||
|
||||
@{PROC}/sys/kernel/random/uuid r,
|
||||
@{PROC}/sys/vm/overcommit_memory r,
|
||||
|
@ -151,7 +151,7 @@ let
|
||||
|
||||
|
||||
loggingConf = (if mainCfg.logFormat != "none" then ''
|
||||
ErrorLog ${mainCfg.logDir}/error_log
|
||||
ErrorLog ${mainCfg.logDir}/error.log
|
||||
|
||||
LogLevel notice
|
||||
|
||||
@ -160,7 +160,7 @@ let
|
||||
LogFormat "%{Referer}i -> %U" referer
|
||||
LogFormat "%{User-agent}i" agent
|
||||
|
||||
CustomLog ${mainCfg.logDir}/access_log ${mainCfg.logFormat}
|
||||
CustomLog ${mainCfg.logDir}/access.log ${mainCfg.logFormat}
|
||||
'' else ''
|
||||
ErrorLog /dev/null
|
||||
'');
|
||||
@ -187,8 +187,8 @@ let
|
||||
SSLRandomSeed startup builtin
|
||||
SSLRandomSeed connect builtin
|
||||
|
||||
SSLProtocol All -SSLv2 -SSLv3
|
||||
SSLCipherSuite HIGH:!aNULL:!MD5:!EXP
|
||||
SSLProtocol ${mainCfg.sslProtocols}
|
||||
SSLCipherSuite ${mainCfg.sslCiphers}
|
||||
SSLHonorCipherOrder on
|
||||
'';
|
||||
|
||||
@ -261,8 +261,8 @@ let
|
||||
'' else ""}
|
||||
|
||||
${if !isMainServer && mainCfg.logPerVirtualHost then ''
|
||||
ErrorLog ${mainCfg.logDir}/error_log-${cfg.hostName}
|
||||
CustomLog ${mainCfg.logDir}/access_log-${cfg.hostName} ${cfg.logFormat}
|
||||
ErrorLog ${mainCfg.logDir}/error-${cfg.hostName}.log
|
||||
CustomLog ${mainCfg.logDir}/access-${cfg.hostName}.log ${cfg.logFormat}
|
||||
'' else ""}
|
||||
|
||||
${optionalString (robotsTxt != "") ''
|
||||
@ -630,6 +630,19 @@ in
|
||||
description =
|
||||
"Maximum number of httpd requests answered per httpd child (prefork), 0 means unlimited";
|
||||
};
|
||||
|
||||
sslCiphers = mkOption {
|
||||
type = types.str;
|
||||
default = "HIGH:!aNULL:!MD5:!EXP";
|
||||
description = "Cipher Suite available for negotiation in SSL proxy handshake.";
|
||||
};
|
||||
|
||||
sslProtocols = mkOption {
|
||||
type = types.str;
|
||||
default = "All -SSLv2 -SSLv3";
|
||||
example = "All -SSLv2 -SSLv3 -TLSv1";
|
||||
description = "Allowed SSL/TLS protocol versions.";
|
||||
};
|
||||
}
|
||||
|
||||
# Include the options shared between the main server and virtual hosts.
|
||||
|
@ -8,21 +8,31 @@ let
|
||||
|
||||
stateDir = "/run/phpfpm";
|
||||
|
||||
poolConfigs = cfg.poolConfigs // mapAttrs mkPool cfg.pools;
|
||||
poolConfigs =
|
||||
(mapAttrs mapPoolConfig cfg.poolConfigs) //
|
||||
(mapAttrs mapPool cfg.pools);
|
||||
|
||||
mkPool = n: p: ''
|
||||
listen = ${p.listen}
|
||||
${p.extraConfig}
|
||||
'';
|
||||
mapPoolConfig = n: p: {
|
||||
phpPackage = cfg.phpPackage;
|
||||
config = p;
|
||||
};
|
||||
|
||||
fpmCfgFile = pool: poolConfig: pkgs.writeText "phpfpm-${pool}.conf" ''
|
||||
mapPool = n: p: {
|
||||
phpPackage = p.phpPackage;
|
||||
config = ''
|
||||
listen = ${p.listen}
|
||||
${p.extraConfig}
|
||||
'';
|
||||
};
|
||||
|
||||
fpmCfgFile = pool: conf: pkgs.writeText "phpfpm-${pool}.conf" ''
|
||||
[global]
|
||||
error_log = syslog
|
||||
daemonize = no
|
||||
${cfg.extraConfig}
|
||||
|
||||
[${pool}]
|
||||
${poolConfig}
|
||||
${conf}
|
||||
'';
|
||||
|
||||
phpIni = pkgs.runCommand "php.ini" {
|
||||
@ -97,13 +107,14 @@ in {
|
||||
|
||||
pools = mkOption {
|
||||
type = types.attrsOf (types.submodule (import ./pool-options.nix {
|
||||
inherit lib;
|
||||
inherit lib config;
|
||||
}));
|
||||
default = {};
|
||||
example = literalExample ''
|
||||
{
|
||||
mypool = {
|
||||
listen = "/path/to/unix/socket";
|
||||
phpPackage = pkgs.php;
|
||||
extraConfig = '''
|
||||
user = nobody
|
||||
pm = dynamic
|
||||
@ -144,7 +155,7 @@ in {
|
||||
mkdir -p ${stateDir}
|
||||
'';
|
||||
serviceConfig = let
|
||||
cfgFile = fpmCfgFile pool poolConfig;
|
||||
cfgFile = fpmCfgFile pool poolConfig.config;
|
||||
in {
|
||||
Slice = "phpfpm.slice";
|
||||
PrivateDevices = true;
|
||||
@ -153,7 +164,7 @@ in {
|
||||
# XXX: We need AF_NETLINK to make the sendmail SUID binary from postfix work
|
||||
RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6 AF_NETLINK";
|
||||
Type = "notify";
|
||||
ExecStart = "${cfg.phpPackage}/bin/php-fpm -y ${cfgFile} -c ${phpIni}";
|
||||
ExecStart = "${poolConfig.phpPackage}/bin/php-fpm -y ${cfgFile} -c ${phpIni}";
|
||||
ExecReload = "${pkgs.coreutils}/bin/kill -USR2 $MAINPID";
|
||||
};
|
||||
}
|
||||
|
@ -1,4 +1,8 @@
|
||||
{ lib }:
|
||||
{ lib, config }:
|
||||
|
||||
let
|
||||
fpmCfg = config.services.phpfpm;
|
||||
in
|
||||
|
||||
with lib; {
|
||||
|
||||
@ -12,6 +16,15 @@ with lib; {
|
||||
'';
|
||||
};
|
||||
|
||||
phpPackage = mkOption {
|
||||
type = types.package;
|
||||
default = fpmCfg.phpPackage;
|
||||
defaultText = "config.services.phpfpm.phpPackage";
|
||||
description = ''
|
||||
The PHP package to use for running this PHP-FPM pool.
|
||||
'';
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
type = types.lines;
|
||||
example = ''
|
||||
|
@ -36,6 +36,8 @@ let
|
||||
${pkgs.glib.dev}/bin/glib-compile-schemas $out/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas/
|
||||
'';
|
||||
|
||||
flashbackEnabled = cfg.flashback.enableMetacity || length cfg.flashback.customSessions > 0;
|
||||
|
||||
in {
|
||||
|
||||
options = {
|
||||
@ -71,6 +73,36 @@ in {
|
||||
};
|
||||
|
||||
debug = mkEnableOption "gnome-session debug messages";
|
||||
|
||||
flashback = {
|
||||
enableMetacity = mkEnableOption "Enable the standard GNOME Flashback session with Metacity.";
|
||||
|
||||
customSessions = mkOption {
|
||||
type = types.listOf (types.submodule {
|
||||
options = {
|
||||
wmName = mkOption {
|
||||
type = types.str;
|
||||
description = "The filename-compatible name of the window manager to use.";
|
||||
example = "xmonad";
|
||||
};
|
||||
|
||||
wmLabel = mkOption {
|
||||
type = types.str;
|
||||
description = "The pretty name of the window manager to use.";
|
||||
example = "XMonad";
|
||||
};
|
||||
|
||||
wmCommand = mkOption {
|
||||
type = types.str;
|
||||
description = "The executable of the window manager to use.";
|
||||
example = "\${pkgs.haskellPackages.xmonad}/bin/xmonad";
|
||||
};
|
||||
};
|
||||
});
|
||||
default = [];
|
||||
description = "Other GNOME Flashback sessions to enable.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
environment.gnome3.excludePackages = mkOption {
|
||||
@ -113,7 +145,9 @@ in {
|
||||
services.telepathy.enable = mkDefault true;
|
||||
networking.networkmanager.enable = mkDefault true;
|
||||
services.upower.enable = config.powerManagement.enable;
|
||||
services.dbus.packages = mkIf config.services.printing.enable [ pkgs.system-config-printer ];
|
||||
services.dbus.packages =
|
||||
optional config.services.printing.enable pkgs.system-config-printer ++
|
||||
optional flashbackEnabled pkgs.gnome3.gnome-screensaver;
|
||||
services.colord.enable = mkDefault true;
|
||||
services.packagekit.enable = mkDefault true;
|
||||
hardware.bluetooth.enable = mkDefault true;
|
||||
@ -127,7 +161,15 @@ in {
|
||||
|
||||
fonts.fonts = [ pkgs.dejavu_fonts pkgs.cantarell-fonts ];
|
||||
|
||||
services.xserver.displayManager.extraSessionFilePackages = [ pkgs.gnome3.gnome-session ];
|
||||
services.xserver.displayManager.extraSessionFilePackages = [ pkgs.gnome3.gnome-session ]
|
||||
++ map
|
||||
(wm: pkgs.gnome3.gnome-flashback.mkSessionForWm {
|
||||
inherit (wm) wmName wmLabel wmCommand;
|
||||
}) (optional cfg.flashback.enableMetacity {
|
||||
wmName = "metacity";
|
||||
wmLabel = "Metacity";
|
||||
wmCommand = "${pkgs.gnome3.metacity}/bin/metacity";
|
||||
} ++ cfg.flashback.customSessions);
|
||||
|
||||
environment.extraInit = ''
|
||||
${concatMapStrings (p: ''
|
||||
@ -177,6 +219,9 @@ in {
|
||||
"/share/nautilus-python/extensions"
|
||||
];
|
||||
|
||||
security.pam.services.gnome-screensaver = mkIf flashbackEnabled {
|
||||
enableGnomeKeyring = true;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
|
137
nixos/modules/system/boot/kernel_config.nix
Normal file
137
nixos/modules/system/boot/kernel_config.nix
Normal file
@ -0,0 +1,137 @@
|
||||
{ lib, config, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
findWinner = candidates: winner:
|
||||
any (x: x == winner) candidates;
|
||||
|
||||
# winners is an ordered list where first item wins over 2nd etc
|
||||
mergeAnswer = winners: locs: defs:
|
||||
let
|
||||
values = map (x: x.value) defs;
|
||||
freeformAnswer = intersectLists values winners;
|
||||
inter = intersectLists values winners;
|
||||
winner = head winners;
|
||||
in
|
||||
if defs == [] then abort "This case should never happen."
|
||||
else if winner == [] then abort "Give a valid list of winner"
|
||||
else if inter == [] then mergeOneOption locs defs
|
||||
else if findWinner values winner then
|
||||
winner
|
||||
else
|
||||
mergeAnswer (tail winners) locs defs;
|
||||
|
||||
mergeFalseByDefault = locs: defs:
|
||||
if defs == [] then abort "This case should never happen."
|
||||
else if any (x: x == false) defs then false
|
||||
else true;
|
||||
|
||||
kernelItem = types.submodule {
|
||||
options = {
|
||||
tristate = mkOption {
|
||||
type = types.enum [ "y" "m" "n" null ] // {
|
||||
merge = mergeAnswer [ "y" "m" "n" ];
|
||||
};
|
||||
default = null;
|
||||
internal = true;
|
||||
visible = true;
|
||||
description = ''
|
||||
Use this field for tristate kernel options expecting a "y" or "m" or "n".
|
||||
'';
|
||||
};
|
||||
|
||||
freeform = mkOption {
|
||||
type = types.nullOr types.str // {
|
||||
merge = mergeEqualOption;
|
||||
};
|
||||
default = null;
|
||||
example = ''MMC_BLOCK_MINORS.freeform = "32";'';
|
||||
description = ''
|
||||
Freeform description of a kernel configuration item value.
|
||||
'';
|
||||
};
|
||||
|
||||
optional = mkOption {
|
||||
type = types.bool // { merge = mergeFalseByDefault; };
|
||||
default = false;
|
||||
description = ''
|
||||
Wether option should generate a failure when unused.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
mkValue = with lib; val:
|
||||
let
|
||||
isNumber = c: elem c ["0" "1" "2" "3" "4" "5" "6" "7" "8" "9"];
|
||||
|
||||
in
|
||||
if (val == "") then "\"\""
|
||||
else if val == "y" || val == "m" || val == "n" then val
|
||||
else if all isNumber (stringToCharacters val) then val
|
||||
else if substring 0 2 val == "0x" then val
|
||||
else val; # FIXME: fix quoting one day
|
||||
|
||||
|
||||
# generate nix intermediate kernel config file of the form
|
||||
#
|
||||
# VIRTIO_MMIO m
|
||||
# VIRTIO_BLK y
|
||||
# VIRTIO_CONSOLE n
|
||||
# NET_9P_VIRTIO? y
|
||||
#
|
||||
# Borrowed from copumpkin https://github.com/NixOS/nixpkgs/pull/12158
|
||||
# returns a string, expr should be an attribute set
|
||||
# Use mkValuePreprocess to preprocess option values, aka mark 'modules' as 'yes' or vice-versa
|
||||
# use the identity if you don't want to override the configured values
|
||||
generateNixKConf = exprs:
|
||||
let
|
||||
mkConfigLine = key: item:
|
||||
let
|
||||
val = if item.freeform != null then item.freeform else item.tristate;
|
||||
in
|
||||
if val == null
|
||||
then ""
|
||||
else if (item.optional)
|
||||
then "${key}? ${mkValue val}\n"
|
||||
else "${key} ${mkValue val}\n";
|
||||
|
||||
mkConf = cfg: concatStrings (mapAttrsToList mkConfigLine cfg);
|
||||
in mkConf exprs;
|
||||
|
||||
in
|
||||
{
|
||||
|
||||
options = {
|
||||
|
||||
intermediateNixConfig = mkOption {
|
||||
readOnly = true;
|
||||
type = types.lines;
|
||||
example = ''
|
||||
USB? y
|
||||
DEBUG n
|
||||
'';
|
||||
description = ''
|
||||
The result of converting the structured kernel configuration in settings
|
||||
to an intermediate string that can be parsed by generate-config.pl to
|
||||
answer the kernel `make defconfig`.
|
||||
'';
|
||||
};
|
||||
|
||||
settings = mkOption {
|
||||
type = types.attrsOf kernelItem;
|
||||
example = literalExample '' with lib.kernel; {
|
||||
"9P_NET" = yes;
|
||||
USB = optional yes;
|
||||
MMC_BLOCK_MINORS = freeform "32";
|
||||
}'';
|
||||
description = ''
|
||||
Structured kernel configuration.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
intermediateNixConfig = generateNixKConf config.settings;
|
||||
};
|
||||
}
|
@ -525,16 +525,18 @@ in
|
||||
};
|
||||
|
||||
fileSystems = mkOption {
|
||||
options.neededForBoot = mkOption {
|
||||
default = false;
|
||||
type = types.bool;
|
||||
description = ''
|
||||
If set, this file system will be mounted in the initial
|
||||
ramdisk. By default, this applies to the root file system
|
||||
and to the file system containing
|
||||
<filename>/nix/store</filename>.
|
||||
'';
|
||||
};
|
||||
type = with lib.types; loaOf (submodule {
|
||||
options.neededForBoot = mkOption {
|
||||
default = false;
|
||||
type = types.bool;
|
||||
description = ''
|
||||
If set, this file system will be mounted in the initial
|
||||
ramdisk. By default, this applies to the root file system
|
||||
and to the file system containing
|
||||
<filename>/nix/store</filename>.
|
||||
'';
|
||||
};
|
||||
});
|
||||
};
|
||||
|
||||
};
|
||||
|
@ -210,6 +210,15 @@ in rec {
|
||||
'';
|
||||
};
|
||||
|
||||
startLimitIntervalSec = mkOption {
|
||||
type = types.int;
|
||||
description = ''
|
||||
Configure unit start rate limiting. Units which are started
|
||||
more than burst times within an interval time interval are
|
||||
not permitted to start any more.
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
@ -193,7 +193,7 @@ let
|
||||
let mkScriptName = s: "unit-script-" + (replaceChars [ "\\" "@" ] [ "-" "_" ] (shellEscape s) );
|
||||
in pkgs.writeTextFile { name = mkScriptName name; executable = true; inherit text; };
|
||||
|
||||
unitConfig = { config, ... }: {
|
||||
unitConfig = { config, options, ... }: {
|
||||
config = {
|
||||
unitConfig =
|
||||
optionalAttrs (config.requires != [])
|
||||
@ -219,7 +219,9 @@ let
|
||||
// optionalAttrs (config.documentation != []) {
|
||||
Documentation = toString config.documentation; }
|
||||
// optionalAttrs (config.onFailure != []) {
|
||||
OnFailure = toString config.onFailure;
|
||||
OnFailure = toString config.onFailure; }
|
||||
// optionalAttrs (options.startLimitIntervalSec.isDefined) {
|
||||
StartLimitIntervalSec = toString config.startLimitIntervalSec;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@ -12,28 +12,28 @@ let
|
||||
|
||||
encryptedFSOptions = {
|
||||
|
||||
encrypted = {
|
||||
options.encrypted = {
|
||||
enable = mkOption {
|
||||
default = false;
|
||||
type = types.bool;
|
||||
description = "The block device is backed by an encrypted one, adds this device as a initrd luks entry.";
|
||||
};
|
||||
|
||||
blkDev = mkOption {
|
||||
options.blkDev = mkOption {
|
||||
default = null;
|
||||
example = "/dev/sda1";
|
||||
type = types.nullOr types.str;
|
||||
description = "Location of the backing encrypted device.";
|
||||
};
|
||||
|
||||
label = mkOption {
|
||||
options.label = mkOption {
|
||||
default = null;
|
||||
example = "rootfs";
|
||||
type = types.nullOr types.str;
|
||||
description = "Label of the unlocked encrypted device. Set <literal>fileSystems.<name?>.device</literal> to <literal>/dev/mapper/<label></literal> to mount the unlocked device.";
|
||||
};
|
||||
|
||||
keyFile = mkOption {
|
||||
options.keyFile = mkOption {
|
||||
default = null;
|
||||
example = "/mnt-root/root/.swapkey";
|
||||
type = types.nullOr types.str;
|
||||
@ -47,10 +47,10 @@ in
|
||||
|
||||
options = {
|
||||
fileSystems = mkOption {
|
||||
options = [encryptedFSOptions];
|
||||
type = with lib.types; loaOf (submodule encryptedFSOptions);
|
||||
};
|
||||
swapDevices = mkOption {
|
||||
options = [encryptedFSOptions];
|
||||
type = with lib.types; listOf (submodule encryptedFSOptions);
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -535,6 +535,7 @@ in
|
||||
|
||||
systemd.timers.zfs-scrub = {
|
||||
wantedBy = [ "timers.target" ];
|
||||
after = [ "multi-user.target" ]; # Apparently scrubbing before boot is complete hangs the system? #53583
|
||||
timerConfig = {
|
||||
OnCalendar = cfgScrub.interval;
|
||||
Persistent = "yes";
|
||||
|
@ -92,23 +92,24 @@ let
|
||||
exit($mainRes & 127 ? 255 : $mainRes << 8);
|
||||
'';
|
||||
|
||||
opts = { config, name, ... }: {
|
||||
options.runner = mkOption {
|
||||
internal = true;
|
||||
description = ''
|
||||
A script that runs the service outside of systemd,
|
||||
useful for testing or for using NixOS services outside
|
||||
of NixOS.
|
||||
'';
|
||||
};
|
||||
config.runner = makeScript name config;
|
||||
};
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
options = {
|
||||
systemd.services = mkOption {
|
||||
options =
|
||||
{ config, name, ... }:
|
||||
{ options.runner = mkOption {
|
||||
internal = true;
|
||||
description = ''
|
||||
A script that runs the service outside of systemd,
|
||||
useful for testing or for using NixOS services outside
|
||||
of NixOS.
|
||||
'';
|
||||
};
|
||||
config.runner = makeScript name config;
|
||||
};
|
||||
type = with types; attrsOf (submodule opts);
|
||||
};
|
||||
};
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ let
|
||||
#! ${pkgs.runtimeShell} -e
|
||||
|
||||
# Initialise the container side of the veth pair.
|
||||
if [ -n "$HOST_ADDRESS" ] || [ -n "$LOCAL_ADDRESS" ]; then
|
||||
if [ -n "$HOST_ADDRESS" ] || [ -n "$LOCAL_ADDRESS" ] || [ -n "$HOST_BRIDGE" ]; then
|
||||
|
||||
ip link set host0 name eth0
|
||||
ip link set dev eth0 up
|
||||
@ -90,18 +90,20 @@ let
|
||||
|
||||
if [ -n "$HOST_ADDRESS" ] || [ -n "$LOCAL_ADDRESS" ]; then
|
||||
extraFlags+=" --network-veth"
|
||||
if [ -n "$HOST_BRIDGE" ]; then
|
||||
extraFlags+=" --network-bridge=$HOST_BRIDGE"
|
||||
fi
|
||||
if [ -n "$HOST_PORT" ]; then
|
||||
OIFS=$IFS
|
||||
IFS=","
|
||||
for i in $HOST_PORT
|
||||
do
|
||||
extraFlags+=" --port=$i"
|
||||
done
|
||||
IFS=$OIFS
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "$HOST_PORT" ]; then
|
||||
OIFS=$IFS
|
||||
IFS=","
|
||||
for i in $HOST_PORT
|
||||
do
|
||||
extraFlags+=" --port=$i"
|
||||
done
|
||||
IFS=$OIFS
|
||||
fi
|
||||
|
||||
if [ -n "$HOST_BRIDGE" ]; then
|
||||
extraFlags+=" --network-bridge=$HOST_BRIDGE"
|
||||
fi
|
||||
|
||||
extraFlags+=" ${concatStringsSep " " (mapAttrsToList nspawnExtraVethArgs cfg.extraVeths)}"
|
||||
|
@ -107,6 +107,7 @@ in
|
||||
initrdNetwork = handleTest ./initrd-network.nix {};
|
||||
installer = handleTest ./installer.nix {};
|
||||
ipv6 = handleTest ./ipv6.nix {};
|
||||
jackett = handleTest ./jackett.nix {};
|
||||
jenkins = handleTest ./jenkins.nix {};
|
||||
#kafka = handleTest ./kafka.nix {}; # broken since openjdk: 8u181 -> 8u192
|
||||
kerberos = handleTest ./kerberos/default.nix {};
|
||||
@ -120,6 +121,7 @@ in
|
||||
latestKernel.login = handleTest ./login.nix { latestKernel = true; };
|
||||
ldap = handleTest ./ldap.nix {};
|
||||
leaps = handleTest ./leaps.nix {};
|
||||
lidarr = handleTest ./lidarr.nix {};
|
||||
#lightdm = handleTest ./lightdm.nix {};
|
||||
login = handleTest ./login.nix {};
|
||||
#logstash = handleTest ./logstash.nix {};
|
||||
@ -151,6 +153,7 @@ in
|
||||
nfs4 = handleTest ./nfs.nix { version = 4; };
|
||||
nghttpx = handleTest ./nghttpx.nix {};
|
||||
nginx = handleTest ./nginx.nix {};
|
||||
nginx-sso = handleTest ./nginx-sso.nix {};
|
||||
nix-ssh-serve = handleTest ./nix-ssh-serve.nix {};
|
||||
novacomd = handleTestOn ["x86_64-linux"] ./novacomd.nix {};
|
||||
nsd = handleTest ./nsd.nix {};
|
||||
@ -160,6 +163,7 @@ in
|
||||
osquery = handleTest ./osquery.nix {};
|
||||
ostree = handleTest ./ostree.nix {};
|
||||
pam-oath-login = handleTest ./pam-oath-login.nix {};
|
||||
pam-u2f = handleTest ./pam-u2f.nix {};
|
||||
peerflix = handleTest ./peerflix.nix {};
|
||||
pgjwt = handleTest ./pgjwt.nix {};
|
||||
pgmanage = handleTest ./pgmanage.nix {};
|
||||
@ -178,6 +182,7 @@ in
|
||||
quagga = handleTest ./quagga.nix {};
|
||||
quake3 = handleTest ./quake3.nix {};
|
||||
rabbitmq = handleTest ./rabbitmq.nix {};
|
||||
radarr = handleTest ./radarr.nix {};
|
||||
radicale = handleTest ./radicale.nix {};
|
||||
redmine = handleTest ./redmine.nix {};
|
||||
roundcube = handleTest ./roundcube.nix {};
|
||||
@ -194,12 +199,14 @@ in
|
||||
smokeping = handleTest ./smokeping.nix {};
|
||||
snapper = handleTest ./snapper.nix {};
|
||||
solr = handleTest ./solr.nix {};
|
||||
sonarr = handleTest ./sonarr.nix {};
|
||||
strongswan-swanctl = handleTest ./strongswan-swanctl.nix {};
|
||||
sudo = handleTest ./sudo.nix {};
|
||||
switchTest = handleTest ./switch-test.nix {};
|
||||
syncthing-relay = handleTest ./syncthing-relay.nix {};
|
||||
systemd = handleTest ./systemd.nix {};
|
||||
taskserver = handleTest ./taskserver.nix {};
|
||||
telegraf = handleTest ./telegraf.nix {};
|
||||
tomcat = handleTest ./tomcat.nix {};
|
||||
tor = handleTest ./tor.nix {};
|
||||
transmission = handleTest ./transmission.nix {};
|
||||
|
@ -23,7 +23,7 @@ in
|
||||
{
|
||||
name = "bittorrent";
|
||||
meta = with pkgs.stdenv.lib.maintainers; {
|
||||
maintainers = [ domenkozar eelco chaoflow rob wkennington bobvanderlinden ];
|
||||
maintainers = [ domenkozar eelco chaoflow rob bobvanderlinden ];
|
||||
};
|
||||
|
||||
nodes =
|
||||
|
@ -45,6 +45,19 @@ import ./make-test.nix ({ pkgs, ...} : {
|
||||
};
|
||||
};
|
||||
|
||||
containers.web-noip =
|
||||
{
|
||||
autoStart = true;
|
||||
privateNetwork = true;
|
||||
hostBridge = "br0";
|
||||
config =
|
||||
{ services.httpd.enable = true;
|
||||
services.httpd.adminAddr = "foo@example.org";
|
||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
virtualisation.pathsInNixDB = [ pkgs.stdenv ];
|
||||
};
|
||||
|
||||
@ -56,6 +69,10 @@ import ./make-test.nix ({ pkgs, ...} : {
|
||||
# Start the webserver container.
|
||||
$machine->succeed("nixos-container status webserver") =~ /up/ or die;
|
||||
|
||||
# Check if bridges exist inside containers
|
||||
$machine->succeed("nixos-container run webserver -- ip link show eth0");
|
||||
$machine->succeed("nixos-container run web-noip -- ip link show eth0");
|
||||
|
||||
"${containerIp}" =~ /([^\/]+)\/([0-9+])/;
|
||||
my $ip = $1;
|
||||
chomp $ip;
|
||||
|
@ -8,6 +8,8 @@ import ./make-test.nix ({ pkgs, ... }: {
|
||||
|
||||
machine = { pkgs, ... }: {
|
||||
services.fwupd.enable = true;
|
||||
services.fwupd.blacklistPlugins = []; # don't blacklist test plugin
|
||||
services.fwupd.enableTestRemote = true;
|
||||
environment.systemPackages = with pkgs; [ gnome-desktop-testing ];
|
||||
environment.variables.XDG_DATA_DIRS = [ "${pkgs.fwupd.installedTests}/share" ];
|
||||
virtualisation.memorySize = 768;
|
||||
|
@ -73,7 +73,7 @@ in {
|
||||
$hass->succeed("curl http://localhost:8123/api/states/binary_sensor.mqtt_binary_sensor -H 'x-ha-access: ${apiPassword}' | grep -qF '\"state\": \"on\"'");
|
||||
|
||||
# Toggle a binary sensor using hass-cli
|
||||
$hass->succeed("${hassCli} entity get binary_sensor.mqtt_binary_sensor | grep -qF '\"state\": \"on\"'");
|
||||
$hass->succeed("${hassCli} --output json entity get binary_sensor.mqtt_binary_sensor | grep -qF '\"state\": \"on\"'");
|
||||
$hass->succeed("${hassCli} entity edit binary_sensor.mqtt_binary_sensor --json='{\"state\": \"off\"}'");
|
||||
$hass->succeed("curl http://localhost:8123/api/states/binary_sensor.mqtt_binary_sensor -H 'x-ha-access: ${apiPassword}' | grep -qF '\"state\": \"off\"'");
|
||||
|
||||
|
@ -200,7 +200,7 @@ let
|
||||
name = "installer-" + name;
|
||||
meta = with pkgs.stdenv.lib.maintainers; {
|
||||
# put global maintainers here, individuals go into makeInstallerTest fkt call
|
||||
maintainers = [ wkennington ] ++ (meta.maintainers or []);
|
||||
maintainers = (meta.maintainers or []);
|
||||
};
|
||||
nodes = {
|
||||
|
||||
|
18
nixos/tests/jackett.nix
Normal file
18
nixos/tests/jackett.nix
Normal file
@ -0,0 +1,18 @@
|
||||
import ./make-test.nix ({ lib, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
rec {
|
||||
name = "jackett";
|
||||
meta.maintainers = with maintainers; [ etu ];
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{ services.jackett.enable = true; };
|
||||
|
||||
testScript = ''
|
||||
$machine->waitForUnit('jackett.service');
|
||||
$machine->waitForOpenPort('9117');
|
||||
$machine->succeed("curl --fail http://localhost:9117/");
|
||||
'';
|
||||
})
|
@ -1,41 +1,23 @@
|
||||
import ./make-test.nix ({ pkgs, lib, ...} :
|
||||
|
||||
let
|
||||
unlines = lib.concatStringsSep "\n";
|
||||
unlinesAttrs = f: as: unlines (lib.mapAttrsToList f as);
|
||||
|
||||
dbDomain = "example.com";
|
||||
dbSuffix = "dc=example,dc=com";
|
||||
dbPath = "/var/db/openldap";
|
||||
dbAdminDn = "cn=admin,${dbSuffix}";
|
||||
dbAdminPwd = "test";
|
||||
serverUri = "ldap:///";
|
||||
dbAdminPwd = "admin-password";
|
||||
# NOTE: slappasswd -h "{SSHA}" -s '${dbAdminPwd}'
|
||||
dbAdminPwdHash = "{SSHA}i7FopSzkFQMrHzDMB1vrtkI0rBnwouP8";
|
||||
ldapUser = "test-ldap-user";
|
||||
ldapUserId = 10000;
|
||||
ldapUserPwd = "test";
|
||||
ldapUserPwd = "user-password";
|
||||
# NOTE: slappasswd -h "{SSHA}" -s '${ldapUserPwd}'
|
||||
ldapUserPwdHash = "{SSHA}v12XICMZNGT6r2KJ26rIkN8Vvvp4QX6i";
|
||||
ldapGroup = "test-ldap-group";
|
||||
ldapGroupId = 10000;
|
||||
setupLdif = pkgs.writeText "test-ldap.ldif" ''
|
||||
dn: ${dbSuffix}
|
||||
dc: ${with lib; let dc = head (splitString "," dbSuffix); dcName = head (tail (splitString "=" dc)); in dcName}
|
||||
o: ${dbSuffix}
|
||||
objectclass: top
|
||||
objectclass: dcObject
|
||||
objectclass: organization
|
||||
|
||||
dn: cn=${ldapUser},${dbSuffix}
|
||||
sn: ${ldapUser}
|
||||
objectClass: person
|
||||
objectClass: posixAccount
|
||||
uid: ${ldapUser}
|
||||
uidNumber: ${toString ldapUserId}
|
||||
gidNumber: ${toString ldapGroupId}
|
||||
homeDirectory: /home/${ldapUser}
|
||||
loginShell: /bin/sh
|
||||
userPassword: ${ldapUserPwd}
|
||||
|
||||
dn: cn=${ldapGroup},${dbSuffix}
|
||||
objectClass: posixGroup
|
||||
gidNumber: ${toString ldapGroupId}
|
||||
memberUid: ${ldapUser}
|
||||
'';
|
||||
mkClient = useDaemon:
|
||||
{ lib, ... }:
|
||||
{
|
||||
@ -43,13 +25,24 @@ let
|
||||
virtualisation.vlans = [ 1 ];
|
||||
security.pam.services.su.rootOK = lib.mkForce false;
|
||||
users.ldap.enable = true;
|
||||
users.ldap.daemon.enable = useDaemon;
|
||||
users.ldap.daemon = {
|
||||
enable = useDaemon;
|
||||
rootpwmoddn = "cn=admin,${dbSuffix}";
|
||||
rootpwmodpw = "/etc/nslcd.rootpwmodpw";
|
||||
};
|
||||
# NOTE: password stored in clear in Nix's store, but this is a test.
|
||||
environment.etc."nslcd.rootpwmodpw".source = pkgs.writeText "rootpwmodpw" dbAdminPwd;
|
||||
users.ldap.loginPam = true;
|
||||
users.ldap.nsswitch = true;
|
||||
users.ldap.server = "ldap://server";
|
||||
users.ldap.base = "${dbSuffix}";
|
||||
users.ldap.base = "ou=posix,${dbSuffix}";
|
||||
users.ldap.bind = {
|
||||
distinguishedName = "cn=admin,${dbSuffix}";
|
||||
password = "/etc/ldap/bind.password";
|
||||
};
|
||||
# NOTE: password stored in clear in Nix's store, but this is a test.
|
||||
environment.etc."ldap/bind.password".source = pkgs.writeText "password" dbAdminPwd;
|
||||
};
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
@ -61,28 +54,237 @@ in
|
||||
nodes = {
|
||||
|
||||
server =
|
||||
{ pkgs, ... }:
|
||||
{ pkgs, config, ... }:
|
||||
let
|
||||
inherit (config.services) openldap;
|
||||
|
||||
slapdConfig = pkgs.writeText "cn=config.ldif" (''
|
||||
dn: cn=config
|
||||
objectClass: olcGlobal
|
||||
#olcPidFile: /run/slapd/slapd.pid
|
||||
# List of arguments that were passed to the server
|
||||
#olcArgsFile: /run/slapd/slapd.args
|
||||
# Read slapd-config(5) for possible values
|
||||
olcLogLevel: none
|
||||
# The tool-threads parameter sets the actual amount of CPU's
|
||||
# that is used for indexing.
|
||||
olcToolThreads: 1
|
||||
|
||||
dn: olcDatabase={-1}frontend,cn=config
|
||||
objectClass: olcDatabaseConfig
|
||||
objectClass: olcFrontendConfig
|
||||
# The maximum number of entries that is returned for a search operation
|
||||
olcSizeLimit: 500
|
||||
# Allow unlimited access to local connection from the local root user
|
||||
olcAccess: to *
|
||||
by dn.exact=gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth manage
|
||||
by * break
|
||||
# Allow unauthenticated read access for schema and base DN autodiscovery
|
||||
olcAccess: to dn.exact=""
|
||||
by * read
|
||||
olcAccess: to dn.base="cn=Subschema"
|
||||
by * read
|
||||
|
||||
dn: olcDatabase=config,cn=config
|
||||
objectClass: olcDatabaseConfig
|
||||
olcRootDN: cn=admin,cn=config
|
||||
#olcRootPW:
|
||||
# NOTE: access to cn=config, system root can be manager
|
||||
# with SASL mechanism (-Y EXTERNAL) over unix socket (-H ldapi://)
|
||||
olcAccess: to *
|
||||
by dn.exact="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" manage
|
||||
by * break
|
||||
|
||||
dn: cn=schema,cn=config
|
||||
objectClass: olcSchemaConfig
|
||||
|
||||
include: file://${pkgs.openldap}/etc/schema/core.ldif
|
||||
include: file://${pkgs.openldap}/etc/schema/cosine.ldif
|
||||
include: file://${pkgs.openldap}/etc/schema/nis.ldif
|
||||
include: file://${pkgs.openldap}/etc/schema/inetorgperson.ldif
|
||||
|
||||
dn: cn=module{0},cn=config
|
||||
objectClass: olcModuleList
|
||||
# Where the dynamically loaded modules are stored
|
||||
#olcModulePath: /usr/lib/ldap
|
||||
olcModuleLoad: back_mdb
|
||||
|
||||
''
|
||||
+ unlinesAttrs (olcSuffix: {conf, ...}:
|
||||
"include: file://" + pkgs.writeText "config.ldif" conf
|
||||
) slapdDatabases
|
||||
);
|
||||
|
||||
slapdDatabases = {
|
||||
"${dbSuffix}" = {
|
||||
conf = ''
|
||||
dn: olcBackend={1}mdb,cn=config
|
||||
objectClass: olcBackendConfig
|
||||
|
||||
dn: olcDatabase={1}mdb,cn=config
|
||||
olcSuffix: ${dbSuffix}
|
||||
olcDbDirectory: ${openldap.dataDir}/${dbSuffix}
|
||||
objectClass: olcDatabaseConfig
|
||||
objectClass: olcMdbConfig
|
||||
# NOTE: checkpoint the database periodically in case of system failure
|
||||
# and to speed up slapd shutdown.
|
||||
olcDbCheckpoint: 512 30
|
||||
# Database max size is 1G
|
||||
olcDbMaxSize: 1073741824
|
||||
olcLastMod: TRUE
|
||||
# NOTE: database superuser. Needed for syncrepl,
|
||||
# and used to auth as admin through a TCP connection.
|
||||
olcRootDN: cn=admin,${dbSuffix}
|
||||
olcRootPW: ${dbAdminPwdHash}
|
||||
#
|
||||
olcDbIndex: objectClass eq
|
||||
olcDbIndex: cn,uid eq
|
||||
olcDbIndex: uidNumber,gidNumber eq
|
||||
olcDbIndex: member,memberUid eq
|
||||
#
|
||||
olcAccess: to attrs=userPassword
|
||||
by self write
|
||||
by anonymous auth
|
||||
by dn="cn=admin,${dbSuffix}" write
|
||||
by dn="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" write
|
||||
by * none
|
||||
olcAccess: to attrs=shadowLastChange
|
||||
by self write
|
||||
by dn="cn=admin,${dbSuffix}" write
|
||||
by dn="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" write
|
||||
by * none
|
||||
olcAccess: to dn.sub="ou=posix,${dbSuffix}"
|
||||
by self read
|
||||
by dn="cn=admin,${dbSuffix}" read
|
||||
by dn="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" read
|
||||
olcAccess: to *
|
||||
by self read
|
||||
by * none
|
||||
'';
|
||||
data = ''
|
||||
dn: ${dbSuffix}
|
||||
objectClass: top
|
||||
objectClass: dcObject
|
||||
objectClass: organization
|
||||
o: ${dbDomain}
|
||||
|
||||
dn: cn=admin,${dbSuffix}
|
||||
objectClass: simpleSecurityObject
|
||||
objectClass: organizationalRole
|
||||
description: ${dbDomain} LDAP administrator
|
||||
roleOccupant: ${dbSuffix}
|
||||
userPassword: ${ldapUserPwdHash}
|
||||
|
||||
dn: ou=posix,${dbSuffix}
|
||||
objectClass: top
|
||||
objectClass: organizationalUnit
|
||||
|
||||
dn: ou=accounts,ou=posix,${dbSuffix}
|
||||
objectClass: top
|
||||
objectClass: organizationalUnit
|
||||
|
||||
dn: ou=groups,ou=posix,${dbSuffix}
|
||||
objectClass: top
|
||||
objectClass: organizationalUnit
|
||||
''
|
||||
+ lib.concatMapStrings posixAccount [
|
||||
{ uid=ldapUser; uidNumber=ldapUserId; gidNumber=ldapGroupId; userPassword=ldapUserPwdHash; }
|
||||
]
|
||||
+ lib.concatMapStrings posixGroup [
|
||||
{ gid=ldapGroup; gidNumber=ldapGroupId; members=[]; }
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
# NOTE: create a user account using the posixAccount objectClass.
|
||||
posixAccount =
|
||||
{ uid
|
||||
, uidNumber ? null
|
||||
, gidNumber ? null
|
||||
, cn ? ""
|
||||
, sn ? ""
|
||||
, userPassword ? ""
|
||||
, loginShell ? "/bin/sh"
|
||||
}: ''
|
||||
|
||||
dn: uid=${uid},ou=accounts,ou=posix,${dbSuffix}
|
||||
objectClass: person
|
||||
objectClass: posixAccount
|
||||
objectClass: shadowAccount
|
||||
cn: ${cn}
|
||||
gecos:
|
||||
${if gidNumber == null then "#" else "gidNumber: ${toString gidNumber}"}
|
||||
homeDirectory: /home/${uid}
|
||||
loginShell: ${loginShell}
|
||||
sn: ${sn}
|
||||
${if uidNumber == null then "#" else "uidNumber: ${toString uidNumber}"}
|
||||
${if userPassword == "" then "#" else "userPassword: ${userPassword}"}
|
||||
'';
|
||||
|
||||
# NOTE: create a group using the posixGroup objectClass.
|
||||
posixGroup =
|
||||
{ gid
|
||||
, gidNumber
|
||||
, members
|
||||
}: ''
|
||||
|
||||
dn: cn=${gid},ou=groups,ou=posix,${dbSuffix}
|
||||
objectClass: top
|
||||
objectClass: posixGroup
|
||||
gidNumber: ${toString gidNumber}
|
||||
${lib.concatMapStrings (member: "memberUid: ${member}\n") members}
|
||||
'';
|
||||
in
|
||||
{
|
||||
virtualisation.memorySize = 256;
|
||||
virtualisation.vlans = [ 1 ];
|
||||
networking.firewall.allowedTCPPorts = [ 389 ];
|
||||
services.openldap.enable = true;
|
||||
services.openldap.dataDir = dbPath;
|
||||
services.openldap.dataDir = "/var/db/openldap";
|
||||
services.openldap.configDir = "/var/db/slapd";
|
||||
services.openldap.urlList = [
|
||||
serverUri
|
||||
"ldap:///"
|
||||
"ldapi:///"
|
||||
];
|
||||
services.openldap.extraConfig = ''
|
||||
include ${pkgs.openldap.out}/etc/schema/core.schema
|
||||
include ${pkgs.openldap.out}/etc/schema/cosine.schema
|
||||
include ${pkgs.openldap.out}/etc/schema/inetorgperson.schema
|
||||
include ${pkgs.openldap.out}/etc/schema/nis.schema
|
||||
|
||||
database mdb
|
||||
suffix ${dbSuffix}
|
||||
rootdn ${dbAdminDn}
|
||||
rootpw ${dbAdminPwd}
|
||||
directory ${dbPath}
|
||||
'';
|
||||
systemd.services.openldap = {
|
||||
preStart = ''
|
||||
set -e
|
||||
# NOTE: slapd's config is always re-initialized.
|
||||
rm -rf "${openldap.configDir}"/cn=config \
|
||||
"${openldap.configDir}"/cn=config.ldif
|
||||
install -D -d -m 0700 -o "${openldap.user}" -g "${openldap.group}" "${openldap.configDir}"
|
||||
# NOTE: olcDbDirectory must be created before adding the config.
|
||||
'' +
|
||||
unlinesAttrs (olcSuffix: {data, ...}: ''
|
||||
# NOTE: database is always re-initialized.
|
||||
rm -rf "${openldap.dataDir}/${olcSuffix}"
|
||||
install -D -d -m 0700 -o "${openldap.user}" -g "${openldap.group}" \
|
||||
"${openldap.dataDir}/${olcSuffix}"
|
||||
'') slapdDatabases
|
||||
+ ''
|
||||
# NOTE: slapd is supposed to be stopped while in preStart,
|
||||
# hence slap* commands can safely be used.
|
||||
umask 0077
|
||||
${pkgs.openldap}/bin/slapadd -n 0 \
|
||||
-F "${openldap.configDir}" \
|
||||
-l ${slapdConfig}
|
||||
chown -R "${openldap.user}:${openldap.group}" "${openldap.configDir}"
|
||||
# NOTE: slapadd(8): To populate the config database slapd-config(5),
|
||||
# use -n 0 as it is always the first database.
|
||||
# It must physically exist on the filesystem prior to this, however.
|
||||
'' +
|
||||
unlinesAttrs (olcSuffix: {data, ...}: ''
|
||||
# NOTE: load database ${olcSuffix}
|
||||
# (as root to avoid depending on sudo or chpst)
|
||||
${pkgs.openldap}/bin/slapadd \
|
||||
-F "${openldap.configDir}" \
|
||||
-l ${pkgs.writeText "data.ldif" data}
|
||||
'' + ''
|
||||
# NOTE: redundant with default openldap's preStart, but do not harm.
|
||||
chown -R "${openldap.user}:${openldap.group}" \
|
||||
"${openldap.dataDir}/${olcSuffix}"
|
||||
'') slapdDatabases;
|
||||
};
|
||||
};
|
||||
|
||||
client1 = mkClient true; # use nss_pam_ldapd
|
||||
@ -91,15 +293,91 @@ in
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
startAll;
|
||||
$server->start;
|
||||
$server->waitForUnit("default.target");
|
||||
|
||||
subtest "slapd", sub {
|
||||
subtest "auth as database admin with SASL and check a POSIX account", sub {
|
||||
$server->succeed(join ' ', 'test',
|
||||
'"$(ldapsearch -LLL -H ldapi:// -Y EXTERNAL',
|
||||
'-b \'uid=${ldapUser},ou=accounts,ou=posix,${dbSuffix}\' ',
|
||||
'-s base uidNumber |',
|
||||
'sed -ne \'s/^uidNumber: \\(.*\\)/\\1/p\' ',
|
||||
')" -eq ${toString ldapUserId}');
|
||||
};
|
||||
subtest "auth as database admin with password and check a POSIX account", sub {
|
||||
$server->succeed(join ' ', 'test',
|
||||
'"$(ldapsearch -LLL -H ldap://server',
|
||||
'-D \'cn=admin,${dbSuffix}\' -w \'${dbAdminPwd}\' ',
|
||||
'-b \'uid=${ldapUser},ou=accounts,ou=posix,${dbSuffix}\' ',
|
||||
'-s base uidNumber |',
|
||||
'sed -ne \'s/^uidNumber: \\(.*\\)/\\1/p\' ',
|
||||
')" -eq ${toString ldapUserId}');
|
||||
};
|
||||
};
|
||||
|
||||
$client1->start;
|
||||
$client1->waitForUnit("default.target");
|
||||
|
||||
subtest "password", sub {
|
||||
subtest "su with password to a POSIX account", sub {
|
||||
$client1->succeed("${pkgs.expect}/bin/expect -c '" . join ';',
|
||||
'spawn su "${ldapUser}"',
|
||||
'expect "Password:"',
|
||||
'send "${ldapUserPwd}\n"',
|
||||
'expect "*"',
|
||||
'send "whoami\n"',
|
||||
'expect -ex "${ldapUser}" {exit}',
|
||||
'exit 1' . "'");
|
||||
};
|
||||
subtest "change password of a POSIX account as root", sub {
|
||||
$client1->succeed("chpasswd <<<'${ldapUser}:new-password'");
|
||||
$client1->succeed("${pkgs.expect}/bin/expect -c '" . join ';',
|
||||
'spawn su "${ldapUser}"',
|
||||
'expect "Password:"',
|
||||
'send "new-password\n"',
|
||||
'expect "*"',
|
||||
'send "whoami\n"',
|
||||
'expect -ex "${ldapUser}" {exit}',
|
||||
'exit 1' . "'");
|
||||
$client1->succeed('chpasswd <<<\'${ldapUser}:${ldapUserPwd}\' ');
|
||||
};
|
||||
subtest "change password of a POSIX account from itself", sub {
|
||||
$client1->succeed('chpasswd <<<\'${ldapUser}:${ldapUserPwd}\' ');
|
||||
$client1->succeed("${pkgs.expect}/bin/expect -c '" . join ';',
|
||||
'spawn su --login ${ldapUser} -c passwd',
|
||||
'expect "Password: "',
|
||||
'send "${ldapUserPwd}\n"',
|
||||
'expect "(current) UNIX password: "',
|
||||
'send "${ldapUserPwd}\n"',
|
||||
'expect "New password: "',
|
||||
'send "new-password\n"',
|
||||
'expect "Retype new password: "',
|
||||
'send "new-password\n"',
|
||||
'expect "passwd: password updated successfully" {exit}',
|
||||
'exit 1' . "'");
|
||||
$client1->succeed("${pkgs.expect}/bin/expect -c '" . join ';',
|
||||
'spawn su "${ldapUser}"',
|
||||
'expect "Password:"',
|
||||
'send "${ldapUserPwd}\n"',
|
||||
'expect "su: Authentication failure" {exit}',
|
||||
'exit 1' . "'");
|
||||
$client1->succeed("${pkgs.expect}/bin/expect -c '" . join ';',
|
||||
'spawn su "${ldapUser}"',
|
||||
'expect "Password:"',
|
||||
'send "new-password\n"',
|
||||
'expect "*"',
|
||||
'send "whoami\n"',
|
||||
'expect -ex "${ldapUser}" {exit}',
|
||||
'exit 1' . "'");
|
||||
$client1->succeed('chpasswd <<<\'${ldapUser}:${ldapUserPwd}\' ');
|
||||
};
|
||||
};
|
||||
|
||||
$client2->start;
|
||||
$client2->waitForUnit("default.target");
|
||||
|
||||
$server->succeed("ldapadd -D '${dbAdminDn}' -w ${dbAdminPwd} -H ${serverUri} -f '${setupLdif}'");
|
||||
|
||||
# NSS tests
|
||||
subtest "nss", sub {
|
||||
subtest "NSS", sub {
|
||||
$client1->succeed("test \"\$(id -u '${ldapUser}')\" -eq ${toString ldapUserId}");
|
||||
$client1->succeed("test \"\$(id -u -n '${ldapUser}')\" = '${ldapUser}'");
|
||||
$client1->succeed("test \"\$(id -g '${ldapUser}')\" -eq ${toString ldapGroupId}");
|
||||
@ -110,8 +388,7 @@ in
|
||||
$client2->succeed("test \"\$(id -g -n '${ldapUser}')\" = '${ldapGroup}'");
|
||||
};
|
||||
|
||||
# PAM tests
|
||||
subtest "pam", sub {
|
||||
subtest "PAM", sub {
|
||||
$client1->succeed("echo ${ldapUserPwd} | su -l '${ldapUser}' -c true");
|
||||
$client2->succeed("echo ${ldapUserPwd} | su -l '${ldapUser}' -c true");
|
||||
};
|
||||
|
18
nixos/tests/lidarr.nix
Normal file
18
nixos/tests/lidarr.nix
Normal file
@ -0,0 +1,18 @@
|
||||
import ./make-test.nix ({ lib, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
rec {
|
||||
name = "lidarr";
|
||||
meta.maintainers = with maintainers; [ etu ];
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{ services.lidarr.enable = true; };
|
||||
|
||||
testScript = ''
|
||||
$machine->waitForUnit('lidarr.service');
|
||||
$machine->waitForOpenPort('8686');
|
||||
$machine->succeed("curl --fail http://localhost:8686/");
|
||||
'';
|
||||
})
|
@ -8,7 +8,7 @@ import ./make-test.nix ({ pkgs, ...} : let
|
||||
in {
|
||||
name = "mongodb";
|
||||
meta = with pkgs.stdenv.lib.maintainers; {
|
||||
maintainers = [ bluescreen303 offline wkennington cstrahan rvl ];
|
||||
maintainers = [ bluescreen303 offline cstrahan rvl ];
|
||||
};
|
||||
|
||||
nodes = {
|
||||
|
@ -24,7 +24,7 @@ import ./make-test.nix ({ pkgs, lib, withFirewall, withConntrackHelpers ? false,
|
||||
name = "nat" + (if withFirewall then "WithFirewall" else "Standalone")
|
||||
+ (lib.optionalString withConntrackHelpers "withConntrackHelpers");
|
||||
meta = with pkgs.stdenv.lib.maintainers; {
|
||||
maintainers = [ eelco chaoflow rob wkennington ];
|
||||
maintainers = [ eelco chaoflow rob ];
|
||||
};
|
||||
|
||||
nodes =
|
||||
|
@ -606,7 +606,4 @@ let
|
||||
|
||||
in mapAttrs (const (attrs: makeTest (attrs // {
|
||||
name = "${attrs.name}-Networking-${if networkd then "Networkd" else "Scripted"}";
|
||||
meta = with pkgs.stdenv.lib.maintainers; {
|
||||
maintainers = [ wkennington ];
|
||||
};
|
||||
}))) testCases
|
||||
|
@ -20,7 +20,7 @@ in
|
||||
{
|
||||
name = "nfs";
|
||||
meta = with pkgs.stdenv.lib.maintainers; {
|
||||
maintainers = [ eelco chaoflow wkennington ];
|
||||
maintainers = [ eelco chaoflow ];
|
||||
};
|
||||
|
||||
nodes =
|
||||
|
44
nixos/tests/nginx-sso.nix
Normal file
44
nixos/tests/nginx-sso.nix
Normal file
@ -0,0 +1,44 @@
|
||||
import ./make-test.nix ({ pkgs, ... }: {
|
||||
name = "nginx-sso";
|
||||
meta = {
|
||||
maintainers = with pkgs.stdenv.lib.maintainers; [ delroth ];
|
||||
};
|
||||
|
||||
machine = {
|
||||
services.nginx.sso = {
|
||||
enable = true;
|
||||
configuration = {
|
||||
listen = { addr = "127.0.0.1"; port = 8080; };
|
||||
|
||||
providers.token.tokens = {
|
||||
myuser = "MyToken";
|
||||
};
|
||||
|
||||
acl = {
|
||||
rule_sets = [
|
||||
{
|
||||
rules = [ { field = "x-application"; equals = "MyApp"; } ];
|
||||
allow = [ "myuser" ];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
startAll;
|
||||
|
||||
$machine->waitForUnit("nginx-sso.service");
|
||||
$machine->waitForOpenPort(8080);
|
||||
|
||||
# No valid user -> 401.
|
||||
$machine->fail("curl -sSf http://localhost:8080/auth");
|
||||
|
||||
# Valid user but no matching ACL -> 403.
|
||||
$machine->fail("curl -sSf -H 'Authorization: Token MyToken' http://localhost:8080/auth");
|
||||
|
||||
# Valid user and matching ACL -> 200.
|
||||
$machine->succeed("curl -sSf -H 'Authorization: Token MyToken' -H 'X-Application: MyApp' http://localhost:8080/auth");
|
||||
'';
|
||||
})
|
23
nixos/tests/pam-u2f.nix
Normal file
23
nixos/tests/pam-u2f.nix
Normal file
@ -0,0 +1,23 @@
|
||||
import ./make-test.nix ({ ... }:
|
||||
|
||||
{
|
||||
name = "pam-u2f";
|
||||
|
||||
machine =
|
||||
{ ... }:
|
||||
{
|
||||
security.pam.u2f = {
|
||||
control = "required";
|
||||
cue = true;
|
||||
debug = true;
|
||||
enable = true;
|
||||
interactive = true;
|
||||
};
|
||||
};
|
||||
|
||||
testScript =
|
||||
''
|
||||
$machine->waitForUnit('multi-user.target');
|
||||
$machine->succeed('egrep "auth required .*/lib/security/pam_u2f.so.*debug.*interactive.*cue" /etc/pam.d/ -R');
|
||||
'';
|
||||
})
|
@ -12,7 +12,9 @@ import ./make-test.nix ({ pkgs, ...} : {
|
||||
services.postgresql = let mypg = pkgs.postgresql_11; in {
|
||||
enable = true;
|
||||
package = mypg;
|
||||
extraPlugins = [ (pkgs.postgis.override { postgresql = mypg; }) ];
|
||||
extraPlugins = with mypg.pkgs; [
|
||||
postgis
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
@ -22,5 +24,6 @@ import ./make-test.nix ({ pkgs, ...} : {
|
||||
$master->waitForUnit("postgresql");
|
||||
$master->sleep(10); # Hopefully this is long enough!!
|
||||
$master->succeed("sudo -u postgres psql -c 'CREATE EXTENSION postgis;'");
|
||||
$master->succeed("sudo -u postgres psql -c 'CREATE EXTENSION postgis_topology;'");
|
||||
'';
|
||||
})
|
||||
|
@ -7,7 +7,7 @@ with import ../lib/testing.nix { inherit system pkgs; };
|
||||
with pkgs.lib;
|
||||
|
||||
let
|
||||
postgresql-versions = pkgs.callPackages ../../pkgs/servers/sql/postgresql { };
|
||||
postgresql-versions = import ../../pkgs/servers/sql/postgresql pkgs pkgs;
|
||||
test-sql = pkgs.writeText "postgresql-test" ''
|
||||
CREATE EXTENSION pgcrypto; -- just to check if lib loading works
|
||||
CREATE TABLE sth (
|
||||
@ -21,7 +21,7 @@ let
|
||||
CREATE TABLE xmltest ( doc xml );
|
||||
INSERT INTO xmltest (doc) VALUES ('<test>ok</test>'); -- check if libxml2 enabled
|
||||
'';
|
||||
make-postgresql-test = postgresql-name: postgresql-package: makeTest {
|
||||
make-postgresql-test = postgresql-name: postgresql-package: backup-all: makeTest {
|
||||
name = postgresql-name;
|
||||
meta = with pkgs.stdenv.lib.maintainers; {
|
||||
maintainers = [ zagy ];
|
||||
@ -29,14 +29,17 @@ let
|
||||
|
||||
machine = {...}:
|
||||
{
|
||||
services.postgresql.package=postgresql-package;
|
||||
services.postgresql.enable = true;
|
||||
services.postgresql.package = postgresql-package;
|
||||
|
||||
services.postgresqlBackup.enable = true;
|
||||
services.postgresqlBackup.databases = [ "postgres" ];
|
||||
services.postgresqlBackup.databases = optional (!backup-all) "postgres";
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
testScript = let
|
||||
backupName = if backup-all then "all" else "postgres";
|
||||
backupService = if backup-all then "postgresqlBackup" else "postgresqlBackup-postgres";
|
||||
in ''
|
||||
sub check_count {
|
||||
my ($select, $nlines) = @_;
|
||||
return 'test $(sudo -u postgres psql postgres -tAc "' . $select . '"|wc -l) -eq ' . $nlines;
|
||||
@ -56,12 +59,20 @@ let
|
||||
$machine->succeed(check_count("SELECT xpath(\'/test/text()\', doc) FROM xmltest;", 1));
|
||||
|
||||
# Check backup service
|
||||
$machine->succeed("systemctl start postgresqlBackup-postgres.service");
|
||||
$machine->succeed("zcat /var/backup/postgresql/postgres.sql.gz | grep '<test>ok</test>'");
|
||||
$machine->succeed("stat -c '%a' /var/backup/postgresql/postgres.sql.gz | grep 600");
|
||||
$machine->succeed("systemctl start ${backupService}.service");
|
||||
$machine->succeed("zcat /var/backup/postgresql/${backupName}.sql.gz | grep '<test>ok</test>'");
|
||||
$machine->succeed("stat -c '%a' /var/backup/postgresql/${backupName}.sql.gz | grep 600");
|
||||
$machine->shutdown;
|
||||
'';
|
||||
|
||||
};
|
||||
in
|
||||
mapAttrs' (p-name: p-package: {name=p-name; value=make-postgresql-test p-name p-package;}) postgresql-versions
|
||||
(mapAttrs' (name: package: { inherit name; value=make-postgresql-test name package false;}) postgresql-versions) // (
|
||||
# just pick one version for the dump all test
|
||||
let
|
||||
first = head (attrNames postgresql-versions);
|
||||
name = "${first}-backup-all";
|
||||
in {
|
||||
${name} = make-postgresql-test name postgresql-versions.${first} true;
|
||||
}
|
||||
)
|
||||
|
18
nixos/tests/radarr.nix
Normal file
18
nixos/tests/radarr.nix
Normal file
@ -0,0 +1,18 @@
|
||||
import ./make-test.nix ({ lib, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
rec {
|
||||
name = "radarr";
|
||||
meta.maintainers = with maintainers; [ etu ];
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{ services.radarr.enable = true; };
|
||||
|
||||
testScript = ''
|
||||
$machine->waitForUnit('radarr.service');
|
||||
$machine->waitForOpenPort('7878');
|
||||
$machine->succeed("curl --fail http://localhost:7878/");
|
||||
'';
|
||||
})
|
18
nixos/tests/sonarr.nix
Normal file
18
nixos/tests/sonarr.nix
Normal file
@ -0,0 +1,18 @@
|
||||
import ./make-test.nix ({ lib, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
rec {
|
||||
name = "sonarr";
|
||||
meta.maintainers = with maintainers; [ etu ];
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{ services.sonarr.enable = true; };
|
||||
|
||||
testScript = ''
|
||||
$machine->waitForUnit('sonarr.service');
|
||||
$machine->waitForOpenPort('8989');
|
||||
$machine->succeed("curl --fail http://localhost:8989/");
|
||||
'';
|
||||
})
|
30
nixos/tests/telegraf.nix
Normal file
30
nixos/tests/telegraf.nix
Normal file
@ -0,0 +1,30 @@
|
||||
import ./make-test.nix ({ pkgs, ...} : {
|
||||
name = "telegraf";
|
||||
meta = with pkgs.stdenv.lib.maintainers; {
|
||||
maintainers = [ mic92 ];
|
||||
};
|
||||
|
||||
machine = { ... }: {
|
||||
services.telegraf.enable = true;
|
||||
services.telegraf.extraConfig = {
|
||||
agent.interval = "1s";
|
||||
agent.flush_interval = "1s";
|
||||
inputs.exec = {
|
||||
commands = [
|
||||
"${pkgs.runtimeShell} -c 'echo example,tag=a i=42i'"
|
||||
];
|
||||
timeout = "5s";
|
||||
data_format = "influx";
|
||||
};
|
||||
outputs.file.files = ["/tmp/metrics.out"];
|
||||
outputs.file.data_format = "influx";
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
startAll;
|
||||
|
||||
$machine->waitForUnit("telegraf.service");
|
||||
$machine->waitUntilSucceeds("grep -q example /tmp/metrics.out");
|
||||
'';
|
||||
})
|
@ -379,7 +379,7 @@ let
|
||||
'';
|
||||
|
||||
meta = with pkgs.stdenv.lib.maintainers; {
|
||||
maintainers = [ aszlig wkennington cdepillabout ];
|
||||
maintainers = [ aszlig cdepillabout ];
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
let
|
||||
version = "2.2.5";
|
||||
sha256 = "0q9vgwc0jlja73r4na7yil624iagq1607ac47wh8a7xgfjmjjai1";
|
||||
cargoSha256 = "0ibdmyh1jvfq51vhwn4riyhilqwhf71hjd4vyj525smn95p75b14";
|
||||
version = "2.3.0";
|
||||
sha256 = "0v79nz19riaga6iwj6m59fq8adm5llrkq61xizriz30rw8rkk04z";
|
||||
cargoSha256 = "01vdrfqh2nlghbgnbb7qmrazsjmynrb9542qrgchxq589wasb4j2";
|
||||
in
|
||||
import ./parity.nix { inherit version sha256 cargoSha256; }
|
||||
|
@ -1,6 +1,6 @@
|
||||
let
|
||||
version = "2.1.10";
|
||||
sha256 = "1l4yl8i24q8v4hzljzai37f587x8m3cz3byzifhvq3bjky7p8h80";
|
||||
cargoSha256 = "04pni9cmz8nhlqznwafz9d81006808kh24aqnb8rjdcr84d11zis";
|
||||
version = "2.2.7";
|
||||
sha256 = "0bxq4z84vsb8hmbscr41xiw11m9xg6if231v76c2dmkbyqgpqy8p";
|
||||
cargoSha256 = "1izwqg87qxhmmkd49m0k09i7r05sfcb18m5jbpvggjzp57ips09r";
|
||||
in
|
||||
import ./parity.nix { inherit version sha256 cargoSha256; }
|
||||
|
@ -1,8 +1,8 @@
|
||||
{ stdenv, fetchurl, alsaLib, bzip2, cairo, dpkg, freetype, gdk_pixbuf
|
||||
, glib, gtk2, harfbuzz, jdk, lib, xorg
|
||||
, libbsd, libjack2, libpng
|
||||
, wrapGAppsHook, gtk2, gtk3, harfbuzz, jdk, lib, xorg
|
||||
, libbsd, libjack2, libpng, ffmpeg
|
||||
, libxkbcommon
|
||||
, makeWrapper, pixman
|
||||
, makeWrapper, pixman, autoPatchelfHook
|
||||
, xdg_utils, zenity, zlib }:
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
@ -14,22 +14,21 @@ stdenv.mkDerivation rec {
|
||||
sha256 = "0n0fxh9gnmilwskjcayvjsjfcs3fz9hn00wh7b3gg0cv3qqhich8";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [ dpkg makeWrapper ];
|
||||
nativeBuildInputs = [ dpkg makeWrapper autoPatchelfHook wrapGAppsHook ];
|
||||
|
||||
unpackCmd = "mkdir root ; dpkg-deb -x $curSrc root";
|
||||
|
||||
dontBuild = true;
|
||||
dontPatchELF = true;
|
||||
dontStrip = true;
|
||||
dontWrapGApps = true; # we only want $gappsWrapperArgs here
|
||||
|
||||
libPath = with xorg; lib.makeLibraryPath [
|
||||
alsaLib bzip2.out cairo freetype gdk_pixbuf glib gtk2 harfbuzz libX11 libXau
|
||||
buildInputs = with xorg; [
|
||||
alsaLib bzip2.out cairo freetype gdk_pixbuf gtk2 gtk3 harfbuzz libX11 libXau
|
||||
libXcursor libXdmcp libXext libXfixes libXrender libbsd libjack2 libpng libxcb
|
||||
libxkbfile pixman xcbutil xcbutilwm zlib
|
||||
];
|
||||
|
||||
binPath = lib.makeBinPath [
|
||||
xdg_utils zenity
|
||||
xdg_utils zenity ffmpeg
|
||||
];
|
||||
|
||||
installPhase = ''
|
||||
@ -49,6 +48,16 @@ stdenv.mkDerivation rec {
|
||||
rm -rf $out/libexec/lib/jre
|
||||
ln -s ${jdk.home}/jre $out/libexec/lib/jre
|
||||
|
||||
mkdir -p $out/bin
|
||||
ln -s $out/libexec/bitwig-studio $out/bin/bitwig-studio
|
||||
|
||||
cp -r usr/share $out/share
|
||||
substitute usr/share/applications/bitwig-studio.desktop \
|
||||
$out/share/applications/bitwig-studio.desktop \
|
||||
--replace /usr/bin/bitwig-studio $out/bin/bitwig-studio
|
||||
'';
|
||||
|
||||
postFixup = ''
|
||||
# Bitwig’s `libx11-windowing-system.so` has several problems:
|
||||
#
|
||||
# • has some old version of libxkbcommon linked statically (ಠ_ಠ),
|
||||
@ -67,22 +76,11 @@ stdenv.mkDerivation rec {
|
||||
-not -name '*.so' \
|
||||
-not -path '*/resources/*' | \
|
||||
while IFS= read -r f ; do
|
||||
patchelf \
|
||||
--set-interpreter $(cat ${stdenv.cc}/nix-support/dynamic-linker) \
|
||||
$f && \
|
||||
wrapProgram $f \
|
||||
--prefix PATH : "${binPath}" \
|
||||
--prefix LD_LIBRARY_PATH : "${libPath}" \
|
||||
"''${gappsWrapperArgs[@]}" \
|
||||
--set LD_PRELOAD "${libxkbcommon.out}/lib/libxkbcommon.so" || true
|
||||
done
|
||||
|
||||
mkdir -p $out/bin
|
||||
ln -s $out/libexec/bitwig-studio $out/bin/bitwig-studio
|
||||
|
||||
cp -r usr/share $out/share
|
||||
substitute usr/share/applications/bitwig-studio.desktop \
|
||||
$out/share/applications/bitwig-studio.desktop \
|
||||
--replace /usr/bin/bitwig-studio $out/bin/bitwig-studio
|
||||
'';
|
||||
|
||||
meta = with stdenv.lib; {
|
||||
|
@ -1,18 +1,16 @@
|
||||
{ stdenv, fetchurl, bitwig-studio1,
|
||||
xdg_utils, zenity, ffmpeg }:
|
||||
xdg_utils, zenity, ffmpeg, pulseaudio }:
|
||||
|
||||
bitwig-studio1.overrideAttrs (oldAttrs: rec {
|
||||
name = "bitwig-studio-${version}";
|
||||
version = "2.3.5";
|
||||
version = "2.4.3";
|
||||
|
||||
src = fetchurl {
|
||||
url = "https://downloads.bitwig.com/stable/${version}/bitwig-studio-${version}.deb";
|
||||
sha256 = "1v62z08hqla8fz5m7hl9ynf2hpr0j0arm0nb5lpd99qrv36ibrsc";
|
||||
sha256 = "17754y4ni0zj9vjxl8ldivi33gdb0nk6sdlcmlpskgffrlx8di08";
|
||||
};
|
||||
|
||||
buildInputs = bitwig-studio1.buildInputs ++ [ ffmpeg ];
|
||||
|
||||
binPath = stdenv.lib.makeBinPath [
|
||||
ffmpeg xdg_utils zenity
|
||||
runtimeDependencies = [
|
||||
pulseaudio
|
||||
];
|
||||
})
|
||||
|
@ -1,4 +1,4 @@
|
||||
{ stdenv, fetchurl, fetchpatch, boost, cmake, chromaprint, gettext, gst_all_1, liblastfm
|
||||
{ stdenv, fetchFromGitHub, fetchpatch, boost, cmake, chromaprint, gettext, gst_all_1, liblastfm
|
||||
, qt4, taglib, fftw, glew, qjson, sqlite, libgpod, libplist, usbmuxd, libmtp
|
||||
, libpulseaudio, gvfs, libcdio, libechonest, libspotify, pcre, projectm, protobuf
|
||||
, qca2, pkgconfig, sparsehash, config, makeWrapper, gst_plugins }:
|
||||
@ -11,14 +11,16 @@ let
|
||||
|
||||
version = "1.3.1";
|
||||
|
||||
src = fetchurl {
|
||||
url = https://github.com/clementine-player/Clementine/archive/1.3.1.tar.gz;
|
||||
sha256 = "0z7k73wyz54c3020lb6x2dgw0vz4ri7wcl3vs03qdj5pk8d971gq";
|
||||
src = fetchFromGitHub {
|
||||
owner = "clementine-player";
|
||||
repo = "Clementine";
|
||||
rev = version;
|
||||
sha256 = "0i3jkfs8dbfkh47jq3cnx7pip47naqg7w66vmfszk4d8vj37j62j";
|
||||
};
|
||||
|
||||
patches = [
|
||||
./clementine-spotify-blob.patch
|
||||
# Required so as to avoid adding libspotify as a build dependency (as it is
|
||||
# Required so as to avoid adding libspotify as a build dependency (as it is
|
||||
# unfree and thus would prevent us from having a free package).
|
||||
./clementine-spotify-blob-remove-from-build.patch
|
||||
(fetchpatch {
|
||||
|
@ -1,5 +1,5 @@
|
||||
{ stdenv, fetchurl, pkgconfig, wrapGAppsHook, intltool, libgpod, curl, flac,
|
||||
gnome3, gtk3, gettext, perlPackages, flex, libid3tag,
|
||||
gnome3, gtk3, gettext, perlPackages, flex, libid3tag, gdl,
|
||||
libvorbis, gdk_pixbuf }:
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
@ -15,7 +15,7 @@ stdenv.mkDerivation rec {
|
||||
buildInputs = [
|
||||
curl gettext
|
||||
flex libgpod libid3tag flac libvorbis gtk3 gdk_pixbuf
|
||||
gnome3.gdl gnome3.defaultIconTheme gnome3.anjuta
|
||||
gdl gnome3.defaultIconTheme gnome3.anjuta
|
||||
] ++ (with perlPackages; [ perl XMLParser ]);
|
||||
|
||||
patchPhase = ''
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
python3.pkgs.buildPythonApplication rec {
|
||||
pname = "lollypop";
|
||||
version = "0.9.909";
|
||||
version = "0.9.915";
|
||||
|
||||
format = "other";
|
||||
doCheck = false;
|
||||
@ -14,10 +14,10 @@ python3.pkgs.buildPythonApplication rec {
|
||||
url = "https://gitlab.gnome.org/World/lollypop";
|
||||
rev = "refs/tags/${version}";
|
||||
fetchSubmodules = true;
|
||||
sha256 = "19d82dy0wprabg5kzcgs3ydmp9iz3h437n55cnlp20mbpya09k7n";
|
||||
sha256 = "133qmqb015ghif4d4zh6sf8585fpfgbq00rv6qdj5xn13wziipwh";
|
||||
};
|
||||
|
||||
nativeBuildInputs = with python3.pkgs; [
|
||||
nativeBuildInputs = [
|
||||
appstream-glib
|
||||
desktop-file-utils
|
||||
gobject-introspection
|
||||
|
@ -13,7 +13,7 @@ pythonPackages.buildPythonApplication rec {
|
||||
mopidy
|
||||
pythonPackages.requests
|
||||
pythonPackages.gmusicapi
|
||||
pythonPackages.cachetools_1
|
||||
pythonPackages.cachetools
|
||||
];
|
||||
|
||||
doCheck = false;
|
||||
|
@ -1,4 +1,4 @@
|
||||
{ stdenv, lib, fetchFromGitHub, cmake, pkgconfig
|
||||
{ stdenv, lib, fetchzip, cmake, pkgconfig
|
||||
, alsaLib, freetype, libjack2, lame, libogg, libpulseaudio, libsndfile, libvorbis
|
||||
, portaudio, portmidi, qtbase, qtdeclarative, qtscript, qtsvg, qttools
|
||||
, qtwebengine, qtxmlpatterns
|
||||
@ -6,13 +6,12 @@
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
name = "musescore-${version}";
|
||||
version = "3.0";
|
||||
version = "3.0.1";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "musescore";
|
||||
repo = "MuseScore";
|
||||
rev = "v${version}";
|
||||
sha256 = "0g8n8xpw5d6wh8bwbvy12sinl9i0ir009sr28i4izr28lr4x8v50";
|
||||
src = fetchzip {
|
||||
url = "https://download.musescore.com/releases/MuseScore-${version}/MuseScore-${version}.zip";
|
||||
sha256 = "1l9djxq5hdfqiya2jwcag7qq4dhmb9qcv68y27dlza19imrnim80";
|
||||
stripRoot = false;
|
||||
};
|
||||
|
||||
patches = [
|
||||
|
@ -3,13 +3,13 @@
|
||||
stdenv.mkDerivation rec {
|
||||
|
||||
name = "ncpamixer-${version}";
|
||||
version = "1.2";
|
||||
version = "1.3";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "fulhax";
|
||||
repo = "ncpamixer";
|
||||
rev = version;
|
||||
sha256 = "01kvd0pg5yraymlln5xdzqj1r6adxfvvza84wxn2481kcxfral54";
|
||||
sha256 = "02v8vsx26w3wrzkg61457diaxv1hyzsh103p53j80la9vglamdsh";
|
||||
};
|
||||
|
||||
buildInputs = [ ncurses libpulseaudio ];
|
||||
|
@ -1,24 +1,30 @@
|
||||
{ stdenv, fetchurl, SDL2, pkgconfig, flac, libsndfile }:
|
||||
{ stdenv, fetchurl, zlib, pkgconfig, mpg123, libogg, libvorbis, portaudio, libsndfile, flac
|
||||
, usePulseAudio ? false, libpulseaudio }:
|
||||
|
||||
let
|
||||
version = "0.2.7025-beta20.1";
|
||||
version = "0.4.1";
|
||||
in stdenv.mkDerivation rec {
|
||||
name = "openmpt123-${version}";
|
||||
|
||||
src = fetchurl {
|
||||
url = "https://lib.openmpt.org/files/libopenmpt/src/libopenmpt-${version}.tar.gz";
|
||||
sha256 = "0qp2nnz6pnl1d7yv9hcjyim7q6yax5881k1jxm8jfgjqagmz5k6p";
|
||||
url = "https://lib.openmpt.org/files/libopenmpt/src/libopenmpt-${version}+release.autotools.tar.gz";
|
||||
sha256 = "1k1m1adjh4s2q9lxgkf836k5243akxrzq1hsdjhrkg4idd3pxzp4";
|
||||
};
|
||||
|
||||
enableParallelBuilding = true;
|
||||
doCheck = true;
|
||||
|
||||
nativeBuildInputs = [ pkgconfig ];
|
||||
buildInputs = [ SDL2 flac libsndfile ];
|
||||
makeFlags = [ "NO_PULSEAUDIO=1 NO_LTDL=1 TEST=0 EXAMPLES=0" ]
|
||||
++ stdenv.lib.optional (stdenv.isDarwin) "SHARED_SONAME=0";
|
||||
installFlags = "PREFIX=\${out}";
|
||||
buildInputs = [ zlib mpg123 libogg libvorbis portaudio libsndfile flac ]
|
||||
++ stdenv.lib.optional usePulseAudio libpulseaudio;
|
||||
|
||||
configureFlags = stdenv.lib.optional (!usePulseAudio) [ "--without-pulseaudio" ];
|
||||
|
||||
meta = with stdenv.lib; {
|
||||
description = "A cross-platform command-line based module file player";
|
||||
homepage = https://lib.openmpt.org/libopenmpt/;
|
||||
license = licenses.bsd3;
|
||||
maintainers = [ stdenv.lib.maintainers.gnidorah ];
|
||||
platforms = stdenv.lib.platforms.unix;
|
||||
maintainers = with maintainers; [ gnidorah ];
|
||||
platforms = platforms.linux;
|
||||
};
|
||||
}
|
||||
|
@ -1,14 +1,16 @@
|
||||
{ stdenv, python3Packages, fetchurl, gettext, chromaprint }:
|
||||
{ stdenv, python3Packages, fetchFromGitHub, gettext, chromaprint }:
|
||||
|
||||
let
|
||||
pythonPackages = python3Packages;
|
||||
in pythonPackages.buildPythonApplication rec {
|
||||
pname = "picard";
|
||||
version = "2.1";
|
||||
version = "2.1.2";
|
||||
|
||||
src = fetchurl {
|
||||
url = "http://ftp.musicbrainz.org/pub/musicbrainz/picard/picard-${version}.tar.gz";
|
||||
sha256 = "054a37q5828q59jzml4npkyczsp891d89kawgsif9kwpi0dxa06c";
|
||||
src = fetchFromGitHub {
|
||||
owner = "metabrainz";
|
||||
repo = pname;
|
||||
rev = "release-${version}";
|
||||
sha256 = "1p2bvfzby0nk1vh04yfmsvjcldgkj6m6s1hcv9v13hc8q1cbdfk5";
|
||||
};
|
||||
|
||||
buildInputs = [ gettext ];
|
||||
@ -29,8 +31,6 @@ in pythonPackages.buildPythonApplication rec {
|
||||
substituteInPlace setup.cfg --replace "‘" "'"
|
||||
'';
|
||||
|
||||
doCheck = false;
|
||||
|
||||
meta = with stdenv.lib; {
|
||||
homepage = http://musicbrainz.org/doc/MusicBrainz_Picard;
|
||||
description = "The official MusicBrainz tagger";
|
||||
|
@ -31,6 +31,7 @@
|
||||
, zam-plugins
|
||||
, rubberband
|
||||
, mda_lv2
|
||||
, lsp-plugins
|
||||
, hicolor-icon-theme
|
||||
}:
|
||||
|
||||
@ -38,6 +39,7 @@ let
|
||||
lv2Plugins = [
|
||||
calf # limiter, compressor exciter, bass enhancer and others
|
||||
mda_lv2 # loudness
|
||||
lsp-plugins # delay
|
||||
];
|
||||
ladspaPlugins = [
|
||||
rubberband # pitch shifting
|
||||
@ -45,13 +47,13 @@ let
|
||||
];
|
||||
in stdenv.mkDerivation rec {
|
||||
pname = "pulseeffects";
|
||||
version = "4.4.6";
|
||||
version = "4.4.7";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "wwmm";
|
||||
repo = "pulseeffects";
|
||||
rev = "v${version}";
|
||||
sha256 = "0zvcj2qliz2rlcz59ag4ljrs78qb7kpyaph16qvi07ij7c5bm333";
|
||||
sha256 = "14sxwy3mayzn9k5hy58mjzhxaj4wqxvs257xaj03mwvm48k7c7ia";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
|
@ -1,6 +1,6 @@
|
||||
{ fetchurl, stdenv, squashfsTools, xorg, alsaLib, makeWrapper, openssl, freetype
|
||||
, glib, pango, cairo, atk, gdk_pixbuf, gtk2, cups, nspr, nss, libpng
|
||||
, libgcrypt, systemd, fontconfig, dbus, expat, ffmpeg_0_10, curl, zlib, gnome3
|
||||
, libgcrypt, systemd, fontconfig, dbus, expat, ffmpeg, curl, zlib, gnome3
|
||||
, at-spi2-atk
|
||||
}:
|
||||
|
||||
@ -26,7 +26,7 @@ let
|
||||
curl
|
||||
dbus
|
||||
expat
|
||||
ffmpeg_0_10
|
||||
ffmpeg
|
||||
fontconfig
|
||||
freetype
|
||||
gdk_pixbuf
|
||||
@ -118,6 +118,9 @@ stdenv.mkDerivation {
|
||||
ln -s ${nspr.out}/lib/libnspr4.so $libdir/libnspr4.so
|
||||
ln -s ${nspr.out}/lib/libplc4.so $libdir/libplc4.so
|
||||
|
||||
ln -s ${ffmpeg.out}/lib/libavcodec.so.56 $libdir/libavcodec-ffmpeg.so.56
|
||||
ln -s ${ffmpeg.out}/lib/libavformat.so.56 $libdir/libavformat-ffmpeg.so.56
|
||||
|
||||
rpath="$out/share/spotify:$libdir"
|
||||
|
||||
patchelf \
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user