Merge branch 'master' into postgresql-socket-in-run

This commit is contained in:
Danylo Hlynskyi 2019-03-25 01:06:59 +02:00 committed by GitHub
commit 40cc269561
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
654 changed files with 33356 additions and 25166 deletions

View File

@ -11,11 +11,10 @@
- [ ] macOS
- [ ] other Linux distributions
- [ ] Tested via one or more NixOS test(s) if existing and applicable for the change (look inside [nixos/tests](https://github.com/NixOS/nixpkgs/blob/master/nixos/tests))
- [ ] Tested compilation of all pkgs that depend on this change using `nix-shell -p nox --run "nox-review wip"`
- [ ] Tested compilation of all pkgs that depend on this change using `nix-shell -p nix-review --run "nix-review wip"`
- [ ] Tested execution of all binary files (usually in `./result/bin/`)
- [ ] Determined the impact on package closure size (by running `nix path-info -S` before and after)
- [ ] Assured whether relevant documentation is up to date
- [ ] Fits [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/.github/CONTRIBUTING.md).
---

View File

@ -1,4 +1,4 @@
Copyright (c) 2003-2018 Eelco Dolstra and the Nixpkgs/NixOS contributors
Copyright (c) 2003-2019 Eelco Dolstra and the Nixpkgs/NixOS contributors
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the

View File

@ -3,12 +3,91 @@
xml:id="sec-language-go">
<title>Go</title>
<para>
The function <varname>buildGoPackage</varname> builds standard Go programs.
</para>
<section xml:id="ssec-go-modules">
<title>Go modules</title>
<example xml:id='ex-buildGoPackage'>
<title>buildGoPackage</title>
<para>
The function <varname> buildGoModule </varname> builds Go programs managed
with Go modules. It builds a
<link xlink:href="https://github.com/golang/go/wiki/Modules">Go
modules</link> through a two phase build:
<itemizedlist>
<listitem>
<para>
An intermediate fetcher derivation. This derivation will be used to fetch
all of the dependencies of the Go module.
</para>
</listitem>
<listitem>
<para>
A final derivation will use the output of the intermediate derivation to
build the binaries and produce the final output.
</para>
</listitem>
</itemizedlist>
</para>
<example xml:id='ex-buildGoModule'>
<title>buildGoModule</title>
<programlisting>
pet = buildGoModule rec {
name = "pet-${version}";
version = "0.3.4";
src = fetchFromGitHub {
owner = "knqyf263";
repo = "pet";
rev = "v${version}";
sha256 = "0m2fzpqxk7hrbxsgqplkg7h2p7gv6s1miymv3gvw0cz039skag0s";
};
modSha256 = "1879j77k96684wi554rkjxydrj8g3hpp0kvxz03sd8dmwr3lh83j"; <co xml:id='ex-buildGoModule-1' />
subPackages = [ "." ]; <co xml:id='ex-buildGoModule-2' />
meta = with lib; {
description = "Simple command-line snippet manager, written in Go";
homepage = https://github.com/knqyf263/pet;
license = licenses.mit;
maintainers = with maintainers; [ kalbasit ];
platforms = platforms.linux ++ platforms.darwin;
};
}
</programlisting>
</example>
<para>
<xref linkend='ex-buildGoModule'/> is an example expression using
buildGoModule, the following arguments are of special significance to the
function:
<calloutlist>
<callout arearefs='ex-buildGoModule-1'>
<para>
<varname>modSha256</varname> is the hash of the output of the
intermediate fetcher derivation.
</para>
</callout>
<callout arearefs='ex-buildGoModule-2'>
<para>
<varname>subPackages</varname> limits the builder from building child
packages that have not been listed. If <varname>subPackages</varname> is
not specified, all child packages will be built.
</para>
</callout>
</calloutlist>
</para>
</section>
<section xml:id="ssec-go-legacy">
<title>Go legacy</title>
<para>
The function <varname> buildGoPackage </varname> builds legacy Go programs,
not supporting Go modules.
</para>
<example xml:id='ex-buildGoPackage'>
<title>buildGoPackage</title>
<programlisting>
deis = buildGoPackage rec {
name = "deis-${version}";
@ -29,56 +108,56 @@ deis = buildGoPackage rec {
buildFlags = "--tags release"; <co xml:id='ex-buildGoPackage-4' />
}
</programlisting>
</example>
</example>
<para>
<xref linkend='ex-buildGoPackage'/> is an example expression using
buildGoPackage, the following arguments are of special significance to the
function:
<calloutlist>
<callout arearefs='ex-buildGoPackage-1'>
<para>
<varname>goPackagePath</varname> specifies the package's canonical Go
import path.
</para>
</callout>
<callout arearefs='ex-buildGoPackage-2'>
<para>
<varname>subPackages</varname> limits the builder from building child
packages that have not been listed. If <varname>subPackages</varname> is
not specified, all child packages will be built.
</para>
<para>
In this example only <literal>github.com/deis/deis/client</literal> will
be built.
</para>
</callout>
<callout arearefs='ex-buildGoPackage-3'>
<para>
<varname>goDeps</varname> is where the Go dependencies of a Go program are
listed as a list of package source identified by Go import path. It could
be imported as a separate <varname>deps.nix</varname> file for
readability. The dependency data structure is described below.
</para>
</callout>
<callout arearefs='ex-buildGoPackage-4'>
<para>
<varname>buildFlags</varname> is a list of flags passed to the go build
command.
</para>
</callout>
</calloutlist>
</para>
<para>
<xref linkend='ex-buildGoPackage'/> is an example expression using
buildGoPackage, the following arguments are of special significance to the
function:
<calloutlist>
<callout arearefs='ex-buildGoPackage-1'>
<para>
<varname>goPackagePath</varname> specifies the package's canonical Go
import path.
</para>
</callout>
<callout arearefs='ex-buildGoPackage-2'>
<para>
<varname>subPackages</varname> limits the builder from building child
packages that have not been listed. If <varname>subPackages</varname> is
not specified, all child packages will be built.
</para>
<para>
In this example only <literal>github.com/deis/deis/client</literal> will
be built.
</para>
</callout>
<callout arearefs='ex-buildGoPackage-3'>
<para>
<varname>goDeps</varname> is where the Go dependencies of a Go program
are listed as a list of package source identified by Go import path. It
could be imported as a separate <varname>deps.nix</varname> file for
readability. The dependency data structure is described below.
</para>
</callout>
<callout arearefs='ex-buildGoPackage-4'>
<para>
<varname>buildFlags</varname> is a list of flags passed to the go build
command.
</para>
</callout>
</calloutlist>
</para>
<para>
The <varname>goDeps</varname> attribute can be imported from a separate
<varname>nix</varname> file that defines which Go libraries are needed and
should be included in <varname>GOPATH</varname> for
<varname>buildPhase</varname>.
</para>
<para>
The <varname>goDeps</varname> attribute can be imported from a separate
<varname>nix</varname> file that defines which Go libraries are needed and
should be included in <varname>GOPATH</varname> for
<varname>buildPhase</varname>.
</para>
<example xml:id='ex-goDeps'>
<title>deps.nix</title>
<example xml:id='ex-goDeps'>
<title>deps.nix</title>
<programlisting>
[ <co xml:id='ex-goDeps-1' />
{
@ -101,60 +180,62 @@ deis = buildGoPackage rec {
}
]
</programlisting>
</example>
</example>
<para>
<calloutlist>
<callout arearefs='ex-goDeps-1'>
<para>
<varname>goDeps</varname> is a list of Go dependencies.
</para>
</callout>
<callout arearefs='ex-goDeps-2'>
<para>
<varname>goPackagePath</varname> specifies Go package import path.
</para>
</callout>
<callout arearefs='ex-goDeps-3'>
<para>
<varname>fetch type</varname> that needs to be used to get package source.
If <varname>git</varname> is used there should be <varname>url</varname>,
<varname>rev</varname> and <varname>sha256</varname> defined next to it.
</para>
</callout>
</calloutlist>
</para>
<para>
<calloutlist>
<callout arearefs='ex-goDeps-1'>
<para>
<varname>goDeps</varname> is a list of Go dependencies.
</para>
</callout>
<callout arearefs='ex-goDeps-2'>
<para>
<varname>goPackagePath</varname> specifies Go package import path.
</para>
</callout>
<callout arearefs='ex-goDeps-3'>
<para>
<varname>fetch type</varname> that needs to be used to get package
source. If <varname>git</varname> is used there should be
<varname>url</varname>, <varname>rev</varname> and
<varname>sha256</varname> defined next to it.
</para>
</callout>
</calloutlist>
</para>
<para>
To extract dependency information from a Go package in automated way use
<link xlink:href="https://github.com/kamilchm/go2nix">go2nix</link>. It can
produce complete derivation and <varname>goDeps</varname> file for Go
programs.
</para>
<para>
To extract dependency information from a Go package in automated way use
<link xlink:href="https://github.com/kamilchm/go2nix">go2nix</link>. It can
produce complete derivation and <varname>goDeps</varname> file for Go
programs.
</para>
<para>
<varname>buildGoPackage</varname> produces
<xref linkend='chap-multiple-output' xrefstyle="select: title" /> where
<varname>bin</varname> includes program binaries. You can test build a Go
binary as follows:
<para>
<varname>buildGoPackage</varname> produces
<xref linkend='chap-multiple-output' xrefstyle="select: title" /> where
<varname>bin</varname> includes program binaries. You can test build a Go
binary as follows:
<screen>
$ nix-build -A deis.bin
</screen>
or build all outputs with:
or build all outputs with:
<screen>
$ nix-build -A deis.all
</screen>
<varname>bin</varname> output will be installed by default with
<varname>nix-env -i</varname> or <varname>systemPackages</varname>.
</para>
<varname>bin</varname> output will be installed by default with
<varname>nix-env -i</varname> or <varname>systemPackages</varname>.
</para>
<para>
You may use Go packages installed into the active Nix profiles by adding the
following to your ~/.bashrc:
<para>
You may use Go packages installed into the active Nix profiles by adding the
following to your ~/.bashrc:
<screen>
for p in $NIX_PROFILES; do
GOPATH="$p/share/go:$GOPATH"
done
</screen>
</para>
</para>
</section>
</section>

View File

@ -188,25 +188,24 @@ building Python libraries is `buildPythonPackage`. Let's see how we can build th
```nix
{ lib, buildPythonPackage, fetchPypi }:
toolz = buildPythonPackage rec {
pname = "toolz";
version = "0.7.4";
buildPythonPackage rec {
pname = "toolz";
version = "0.7.4";
src = fetchPypi {
inherit pname version;
sha256 = "43c2c9e5e7a16b6c88ba3088a9bfc82f7db8e13378be7c78d6c14a5f8ed05afd";
};
doCheck = false;
meta = with lib; {
homepage = https://github.com/pytoolz/toolz;
description = "List processing tools and functional utilities";
license = licenses.bsd3;
maintainers = with maintainers; [ fridh ];
};
src = fetchPypi {
inherit pname version;
sha256 = "43c2c9e5e7a16b6c88ba3088a9bfc82f7db8e13378be7c78d6c14a5f8ed05afd";
};
}
doCheck = false;
meta = with lib; {
homepage = https://github.com/pytoolz/toolz;
description = "List processing tools and functional utilities";
license = licenses.bsd3;
maintainers = with maintainers; [ fridh ];
};
};
```
What happens here? The function `buildPythonPackage` is called and as argument

View File

@ -189,14 +189,14 @@ $ git rebase --onto nixos-unstable BASEBRANCH FETCH_HEAD <co
</listitem>
<listitem>
<para>
The <link xlink:href="https://github.com/madjar/nox">nox</link> tool can
be used to review a pull request content in a single command. It doesn't
rebase on a channel branch so it might trigger multiple source builds.
The <link xlink:href="https://github.com/Mic92/nix-review">nix-review</link>
tool can be used to review a pull request content in a single command.
<varname>PRNUMBER</varname> should be replaced by the number at the end
of the pull request title.
of the pull request title. You can also provide the full github pull
request url.
</para>
<screen>
$ nix-shell -p nox --run "nox-review -k pr PRNUMBER"
$ nix-shell -p nix-review --run "nix-review pr PRNUMBER"
</screen>
</listitem>
</itemizedlist>

View File

@ -2633,7 +2633,8 @@ addEnvHooks "$hostOffset" myBashFunction
happens. It prevents nix from cleaning up the build environment
immediately and allows the user to attach to a build environment using
the <command>cntr</command> command. Upon build error it will print
instructions on how to use <command>cntr</command>. Installing cntr and
instructions on how to use <command>cntr</command>, which can be used
to enter the environment for debugging. Installing cntr and
running the command will provide shell access to the build sandbox of
failed build. At <filename>/var/lib/cntr</filename> the sandboxed
filesystem is mounted. All commands and files of the system are still

View File

@ -351,26 +351,26 @@ Additional information.
</section>
<section xml:id="submitting-changes-tested-compilation">
<title>Tested compilation of all pkgs that depend on this change using <command>nox-review</command></title>
<title>Tested compilation of all pkgs that depend on this change using <command>nix-review</command></title>
<para>
If you are updating a package's version, you can use nox to make sure all
packages that depend on the updated package still compile correctly. This
can be done using the nox utility. The <command>nox-review</command>
utility can look for and build all dependencies either based on uncommited
changes with the <literal>wip</literal> option or specifying a github pull
request number.
If you are updating a package's version, you can use nix-review to make sure all
packages that depend on the updated package still compile correctly.
The <command>nix-review</command> utility can look for and build all dependencies
either based on uncommited changes with the <literal>wip</literal> option or
specifying a github pull request number.
</para>
<para>
review uncommitted changes:
<screen>nix-shell -p nox --run "nox-review wip"</screen>
review changes from pull request number 12345:
<screen>nix-shell -p nix-review --run "nix-review pr 12345"</screen>
</para>
<para>
review changes from pull request number 12345:
<screen>nix-shell -p nox --run "nox-review pr 12345"</screen>
review uncommitted changes:
<screen>nix-shell -p nix-review --run "nix-review wip"</screen>
</para>
</section>
<section xml:id="submitting-changes-tested-execution">

View File

@ -90,7 +90,7 @@ rec {
/* Same as `concatMapStringsSep`, but the mapping function
additionally receives the position of its argument.
Type: concatMapStringsSep :: string -> (int -> string -> string) -> [string] -> string
Type: concatIMapStringsSep :: string -> (int -> string -> string) -> [string] -> string
Example:
concatImapStringsSep "-" (pos: x: toString (x / pos)) [ 6 6 6 ]

View File

@ -523,6 +523,11 @@
email = "sivaraman.balaji@gmail.com";
name = "Balaji Sivaraman";
};
balsoft = {
email = "balsoft75@gmail.com";
github = "balsoft";
name = "Alexander Bantyev";
};
bandresen = {
email = "bandresen@gmail.com";
github = "bandresen";
@ -1294,7 +1299,7 @@
name = "Tim Dysinger";
};
dywedir = {
email = "dywedir@protonmail.ch";
email = "dywedir@gra.red";
github = "dywedir";
name = "Vladyslav M.";
};
@ -1583,6 +1588,11 @@
github = "fdns";
name = "Felipe Espinoza";
};
ffinkdevs = {
email = "fink@h0st.space";
github = "ffinkdevs";
name = "Fabian Fink";
};
fgaz = {
email = "fgaz@fgaz.me";
github = "fgaz";
@ -1765,6 +1775,11 @@
github = "Gerschtli";
name = "Tobias Happ";
};
ggpeti = {
email = "ggpeti@gmail.com";
github = "ggpeti";
name = "Peter Ferenczy";
};
gilligan = {
email = "tobias.pflug@gmail.com";
github = "gilligan";
@ -2365,6 +2380,11 @@
github = "juliendehos";
name = "Julien Dehos";
};
justinwoo = {
email = "moomoowoo@gmail.com";
github = "justinwoo";
name = "Justin Woo";
};
jwiegley = {
email = "johnw@newartisans.com";
github = "jwiegley";
@ -2658,6 +2678,11 @@
github = "limeytexan";
name = "Michael Brantley";
};
linarcx = {
email = "linarcx@gmail.com";
github = "linarcx";
name = "Kaveh Ahangar";
};
linc01n = {
email = "git@lincoln.hk";
github = "linc01n";
@ -2870,6 +2895,11 @@
github = "mathnerd314";
name = "Mathnerd314";
};
matklad = {
email = "aleksey.kladov@gmail.com";
github = "matklad";
name = "matklad";
};
matthewbauer = {
email = "mjbauer95@gmail.com";
github = "matthewbauer";
@ -2955,6 +2985,11 @@
github = "meisternu";
name = "Matt Miemiec";
};
melchips = {
email = "truphemus.francois@gmail.com";
github = "melchips";
name = "Francois Truphemus";
};
melsigl = {
email = "melanie.bianca.sigl@gmail.com";
github = "melsigl";
@ -3727,6 +3762,11 @@
github = "polyrod";
name = "Maurizio Di Pietro";
};
pombeirp = {
email = "nix@endgr.33mail.com";
github = "PombeirP";
name = "Pedro Pombeiro";
};
pradeepchhetri = {
email = "pradeep.chhetri89@gmail.com";
github = "pradeepchhetri";
@ -3827,6 +3867,16 @@
fingerprint = "7573 56D7 79BB B888 773E 415E 736C CDF9 EF51 BD97";
}];
};
rafaelgg = {
email = "rafael.garcia.gallego@gmail.com";
github = "rafaelgg";
name = "Rafael García";
};
raquelgb = {
email = "raquel.garcia.bautista@gmail.com";
github = "raquelgb";
name = "Raquel García";
};
ragge = {
email = "r.dahlen@gmail.com";
github = "ragnard";
@ -3966,6 +4016,11 @@
github = "rittelle";
name = "Lennart Rittel";
};
rixed = {
email = "rixed-github@happyleptic.org";
github = "rixed";
name = "Cedric Cellier";
};
rkoe = {
email = "rk@simple-is-better.org";
github = "rkoe";
@ -4668,6 +4723,11 @@
github = "teozkr";
name = "Teo Klestrup Röijezon";
};
terlar = {
email = "terlar@gmail.com";
github = "terlar";
name = "Terje Larsen";
};
teto = {
email = "mcoudron@hotmail.com";
github = "teto";

View File

@ -6,13 +6,14 @@ debug: generated manual-combined.xml
manual-combined.xml: generated *.xml **/*.xml
rm -f ./manual-combined.xml
nix-shell --packages xmloscopy \
nix-shell --pure -Q --packages xmloscopy \
--run "xmloscopy --docbook5 ./manual.xml ./manual-combined.xml"
.PHONY: format
format:
find ../../ -iname '*.xml' -type f -print0 | xargs -0 -I{} -n1 \
xmlformat --config-file "../xmlformat.conf" -i {}
nix-shell --pure -Q --packages xmlformat \
--run "find ../../ -iname '*.xml' -type f -print0 | xargs -0 -I{} -n1 \
xmlformat --config-file '../xmlformat.conf' -i {}"
.PHONY: fix-misc-xml
fix-misc-xml:

View File

@ -200,8 +200,9 @@ swapDevices = [ { device = "/dev/disk/by-label/swap"; } ];
<xref linkend="opt-services.postgresql.package"/> = pkgs.postgresql_10;
</programlisting>
The latter option definition changes the default PostgreSQL package used
by NixOSs PostgreSQL service to 10.x. For more information on packages,
including how to add new ones, see <xref linkend="sec-custom-packages"/>.
by NixOSs PostgreSQL service to 10.x. For more information on
packages, including how to add new ones, see
<xref linkend="sec-custom-packages"/>.
</para>
</listitem>
</varlistentry>

View File

@ -21,6 +21,7 @@
<xi:include href="xfce.xml" />
<xi:include href="networking.xml" />
<xi:include href="linux-kernel.xml" />
<xi:include href="matrix.xml" />
<xi:include href="../generated/modules.xml" xpointer="xpointer(//section[@id='modules']/*)" />
<xi:include href="profiles.xml" />
<xi:include href="kubernetes.xml" />

View File

@ -4,15 +4,13 @@
version="5.0"
xml:id="sec-kubernetes">
<title>Kubernetes</title>
<para>
The NixOS Kubernetes module is a collective term for a handful of
individual submodules implementing the Kubernetes cluster components.
The NixOS Kubernetes module is a collective term for a handful of individual
submodules implementing the Kubernetes cluster components.
</para>
<para>
There are generally two ways of enabling Kubernetes on NixOS.
One way is to enable and configure cluster components appropriately by hand:
There are generally two ways of enabling Kubernetes on NixOS. One way is to
enable and configure cluster components appropriately by hand:
<programlisting>
services.kubernetes = {
apiserver.enable = true;
@ -33,95 +31,82 @@ services.kubernetes = {
<programlisting>
<xref linkend="opt-services.kubernetes.roles"/> = [ "node" ];
</programlisting>
Assigning both the master and node roles is usable if you want a single
node Kubernetes cluster for dev or testing purposes:
Assigning both the master and node roles is usable if you want a single node
Kubernetes cluster for dev or testing purposes:
<programlisting>
<xref linkend="opt-services.kubernetes.roles"/> = [ "master" "node" ];
</programlisting>
Note: Assigning either role will also default both
<xref linkend="opt-services.kubernetes.flannel.enable"/> and
<xref linkend="opt-services.kubernetes.easyCerts"/> to true.
This sets up flannel as CNI and activates automatic PKI bootstrapping.
<xref linkend="opt-services.kubernetes.easyCerts"/> to true. This sets up
flannel as CNI and activates automatic PKI bootstrapping.
</para>
<para>
As of kubernetes 1.10.X it has been deprecated to open
non-tls-enabled ports on kubernetes components. Thus, from NixOS 19.03 all
plain HTTP ports have been disabled by default.
While opening insecure ports is still possible, it is recommended not to
bind these to other interfaces than loopback.
To re-enable the insecure port on the apiserver, see options:
<xref linkend="opt-services.kubernetes.apiserver.insecurePort"/>
and
<xref linkend="opt-services.kubernetes.apiserver.insecureBindAddress"/>
As of kubernetes 1.10.X it has been deprecated to open non-tls-enabled ports
on kubernetes components. Thus, from NixOS 19.03 all plain HTTP ports have
been disabled by default. While opening insecure ports is still possible, it
is recommended not to bind these to other interfaces than loopback. To
re-enable the insecure port on the apiserver, see options:
<xref linkend="opt-services.kubernetes.apiserver.insecurePort"/> and
<xref linkend="opt-services.kubernetes.apiserver.insecureBindAddress"/>
</para>
<note>
<para>
As of NixOS 19.03, it is mandatory to configure:
<xref linkend="opt-services.kubernetes.masterAddress"/>.
The masterAddress must be resolveable and routeable by all cluster nodes.
In single node clusters, this can be set to <literal>localhost</literal>.
<xref linkend="opt-services.kubernetes.masterAddress"/>. The masterAddress
must be resolveable and routeable by all cluster nodes. In single node
clusters, this can be set to <literal>localhost</literal>.
</para>
</note>
<para>
Role-based access control (RBAC) authorization mode is enabled by default.
This means that anonymous requests to the apiserver secure port will
expectedly cause a permission denied error. All cluster components must
therefore be configured with x509 certificates for two-way tls communication.
The x509 certificate subject section determines the roles and permissions
granted by the apiserver to perform clusterwide or namespaced operations.
See also:
<link
Role-based access control (RBAC) authorization mode is enabled by default.
This means that anonymous requests to the apiserver secure port will
expectedly cause a permission denied error. All cluster components must
therefore be configured with x509 certificates for two-way tls communication.
The x509 certificate subject section determines the roles and permissions
granted by the apiserver to perform clusterwide or namespaced operations. See
also:
<link
xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/rbac/">
Using RBAC Authorization</link>.
Using RBAC Authorization</link>.
</para>
<para>
The NixOS kubernetes module provides an option for automatic certificate
bootstrapping and configuration,
<xref linkend="opt-services.kubernetes.easyCerts"/>.
The PKI bootstrapping process involves setting up a certificate authority
(CA) daemon (cfssl) on the kubernetes master node. cfssl generates a CA-cert
for the cluster, and uses the CA-cert for signing subordinate certs issued to
each of the cluster components. Subsequently, the certmgr daemon monitors
active certificates and renews them when needed. For single node Kubernetes
clusters, setting <xref linkend="opt-services.kubernetes.easyCerts"/> = true
is sufficient and no further action is required. For joining extra node
machines to an existing cluster on the other hand, establishing initial trust
is mandatory.
</para>
<para>
To add new nodes to the cluster:
On any (non-master) cluster node where
<xref linkend="opt-services.kubernetes.easyCerts"/> is enabled, the helper
script <literal>nixos-kubernetes-node-join</literal> is available on PATH.
Given a token on stdin, it will copy the token to the kubernetes
secrets directory and restart the certmgr service. As requested
certificates are issued, the script will restart kubernetes cluster
components as needed for them to pick up new keypairs.
The NixOS kubernetes module provides an option for automatic certificate
bootstrapping and configuration,
<xref linkend="opt-services.kubernetes.easyCerts"/>. The PKI bootstrapping
process involves setting up a certificate authority (CA) daemon (cfssl) on
the kubernetes master node. cfssl generates a CA-cert for the cluster, and
uses the CA-cert for signing subordinate certs issued to each of the cluster
components. Subsequently, the certmgr daemon monitors active certificates and
renews them when needed. For single node Kubernetes clusters, setting
<xref linkend="opt-services.kubernetes.easyCerts"/> = true is sufficient and
no further action is required. For joining extra node machines to an existing
cluster on the other hand, establishing initial trust is mandatory.
</para>
<para>
To add new nodes to the cluster: On any (non-master) cluster node where
<xref linkend="opt-services.kubernetes.easyCerts"/> is enabled, the helper
script <literal>nixos-kubernetes-node-join</literal> is available on PATH.
Given a token on stdin, it will copy the token to the kubernetes secrets
directory and restart the certmgr service. As requested certificates are
issued, the script will restart kubernetes cluster components as needed for
them to pick up new keypairs.
</para>
<note>
<para>
Multi-master (HA) clusters are not supported by the easyCerts module.
</para>
</note>
<para>
In order to interact with an RBAC-enabled cluster as an administrator, one
needs to have cluster-admin privileges. By default, when easyCerts is
enabled, a cluster-admin kubeconfig file is generated and linked into
<literal>/etc/kubernetes/cluster-admin.kubeconfig</literal> as determined by
<xref linkend="opt-services.kubernetes.pki.etcClusterAdminKubeconfig"/>.
<literal>export KUBECONFIG=/etc/kubernetes/cluster-admin.kubeconfig</literal>
will make kubectl use this kubeconfig to access and authenticate the cluster.
The cluster-admin kubeconfig references an auto-generated keypair owned by
root. Thus, only root on the kubernetes master may obtain cluster-admin
rights by means of this file.
In order to interact with an RBAC-enabled cluster as an administrator, one
needs to have cluster-admin privileges. By default, when easyCerts is
enabled, a cluster-admin kubeconfig file is generated and linked into
<literal>/etc/kubernetes/cluster-admin.kubeconfig</literal> as determined by
<xref linkend="opt-services.kubernetes.pki.etcClusterAdminKubeconfig"/>.
<literal>export KUBECONFIG=/etc/kubernetes/cluster-admin.kubeconfig</literal>
will make kubectl use this kubeconfig to access and authenticate the cluster.
The cluster-admin kubeconfig references an auto-generated keypair owned by
root. Thus, only root on the kubernetes master may obtain cluster-admin
rights by means of this file.
</para>
</chapter>

View File

@ -0,0 +1,203 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="module-services-matrix">
<title>Matrix</title>
<para>
<link xlink:href="https://matrix.org/">Matrix</link> is an open standard for
interoperable, decentralised, real-time communication over IP. It can be used
to power Instant Messaging, VoIP/WebRTC signalling, Internet of Things
communication - or anywhere you need a standard HTTP API for publishing and
subscribing to data whilst tracking the conversation history.
</para>
<para>
This chapter will show you how to set up your own, self-hosted Matrix
homeserver using the Synapse reference homeserver, and how to serve your own
copy of the Riot web client. See the
<link xlink:href="https://matrix.org/docs/projects/try-matrix-now.html">Try
Matrix Now!</link> overview page for links to Riot Apps for Android and iOS,
desktop clients, as well as bridges to other networks and other projects
around Matrix.
</para>
<section xml:id="module-services-matrix-synapse">
<title>Synapse Homeserver</title>
<para>
<link xlink:href="https://github.com/matrix-org/synapse">Synapse</link> is
the reference homeserver implementation of Matrix from the core development
team at matrix.org. The following configuration example will set up a
synapse server for the <literal>example.org</literal> domain, served from
the host <literal>myhostname.example.org</literal>. For more information,
please refer to the
<link xlink:href="https://github.com/matrix-org/synapse#synapse-installation">
installation instructions of Synapse </link>.
<programlisting>
let
fqdn =
let
join = hostName: domain: hostName + optionalString (domain != null) ".${domain}";
in join config.networking.hostName config.networking.domain;
in {
networking = {
hostName = "myhostname";
domain = "example.org";
};
networking.firewall.allowedTCPPorts = [ 80 443 ];
services.nginx = {
enable = true;
# only recommendedProxySettings and recommendedGzipSettings are strictly required,
# but the rest make sense as well
recommendedTlsSettings = true;
recommendedOptimisation = true;
recommendedGzipSettings = true;
recommendedProxySettings = true;
virtualHosts = {
# This host section can be placed on a different host than the rest,
# i.e. to delegate from the host being accessible as ${config.networking.domain}
# to another host actually running the Matrix homeserver.
"${config.networking.domain}" = {
locations."= /.well-known/matrix/server".extraConfig =
let
# use 443 instead of the default 8448 port to unite
# the client-server and server-server port for simplicity
server = { "m.server" = "${fqdn}:443"; };
in ''
add_header Content-Type application/json;
return 200 '${builtins.toJSON server}';
'';
locations."= /.well-known/matrix/client".extraConfig =
let
client = {
"m.homeserver" = { "base_url" = "https://${fqdn}"; };
"m.identity_server" = { "base_url" = "https://vector.im"; };
};
# ACAO required to allow riot-web on any URL to request this json file
in ''
add_header Content-Type application/json;
add_header Access-Control-Allow-Origin *;
return 200 '${builtins.toJSON client}';
'';
};
# Reverse proxy for Matrix client-server and server-server communication
${fqdn} = {
enableACME = true;
forceSSL = true;
# Or do a redirect instead of the 404, or whatever is appropriate for you.
# But do not put a Matrix Web client here! See the Riot Web section below.
locations."/".extraConfig = ''
return 404;
'';
# forward all Matrix API calls to the synapse Matrix homeserver
locations."/_matrix" = {
proxyPass = "http://[::1]:8008";
};
};
};
};
services.matrix-synapse = {
enable = true;
server_name = config.networking.domain;
listeners = [
{
port = 8008;
bind_address = "::1";
type = "http";
tls = false;
x_forwarded = true;
resources = [
{ names = [ "client" "federation" ]; compress = false; }
];
}
];
};
};
</programlisting>
</para>
<para>
If the <code>A</code> and <code>AAAA</code> DNS records on
<literal>example.org</literal> do not point on the same host as the records
for <code>myhostname.example.org</code>, you can easily move the
<code>/.well-known</code> virtualHost section of the code to the host that
is serving <literal>example.org</literal>, while the rest stays on
<literal>myhostname.example.org</literal> with no other changes required.
This pattern also allows to seamlessly move the homeserver from
<literal>myhostname.example.org</literal> to
<literal>myotherhost.example.org</literal> by only changing the
<code>/.well-known</code> redirection target.
</para>
<para>
If you want to run a server with public registration by anybody, you can
then enable <option>services.matrix-synapse.enable_registration =
true;</option>. Otherwise, or you can generate a registration secret with
<command>pwgen -s 64 1</command> and set it with
<option>services.matrix-synapse.registration_shared_secret</option>. To
create a new user or admin, run the following after you have set the secret
and have rebuilt NixOS:
<programlisting>
$ nix run nixpkgs.matrix-synapse
$ register_new_matrix_user -k &lt;your-registration-shared-secret&gt; http://localhost:8008
New user localpart: &lt;your-username&gt;
Password:
Confirm password:
Make admin [no]:
Success!
</programlisting>
In the example, this would create a user with the Matrix Identifier
<literal>@your-username:example.org</literal>. Note that the registration
secret ends up in the nix store and therefore is world-readable by any user
on your machine, so it makes sense to only temporarily activate the
<option>registration_shared_secret</option> option until a better solution
for NixOS is in place.
</para>
</section>
<section xml:id="module-services-matrix-riot-web">
<title>Riot Web Client</title>
<para>
<link xlink:href="https://github.com/vector-im/riot-web/">Riot Web</link> is
the reference web client for Matrix and developed by the core team at
matrix.org. The following snippet can be optionally added to the code before
to complete the synapse installation with a web client served at
<code>https://riot.myhostname.example.org</code> and
<code>https://riot.example.org</code>. Alternatively, you can use the hosted
copy at <link xlink:href="https://riot.im/app">https://riot.im/app</link>,
or use other web clients or native client applications. Due to the
<literal>/.well-known</literal> urls set up done above, many clients should
fill in the required connection details automatically when you enter your
Matrix Identifier. See
<link xlink:href="https://matrix.org/docs/projects/try-matrix-now.html">Try
Matrix Now!</link> for a list of existing clients and their supported
featureset.
<programlisting>
services.nginx.virtualHosts."riot.${fqdn}" = {
enableACME = true;
forceSSL = true;
serverAliases = [
"riot.${config.networking.domain}"
];
root = pkgs.riot-web;
};
</programlisting>
</para>
<para>
Note that the Riot developers do not recommend running Riot and your Matrix
homeserver on the same fully-qualified domain name for security reasons. In
the example, this means that you should not reuse the
<literal>myhostname.example.org</literal> virtualHost to also serve Riot,
but instead serve it on a different subdomain, like
<literal>riot.example.org</literal> in the example. See the
<link xlink:href="https://github.com/vector-im/riot-web#important-security-note">Riot
Important Security Notes</link> for more information on this subject.
</para>
</section>
</chapter>

View File

@ -112,9 +112,8 @@ true
$ nixos-option <xref linkend="opt-boot.kernelModules"/>
[ "tun" "ipv6" "loop" <replaceable>...</replaceable> ]
</screen>
Interactive exploration of the configuration is possible using
<command>nix repl</command>, a read-eval-print loop for Nix expressions.
A typical use:
Interactive exploration of the configuration is possible using <command>nix
repl</command>, a read-eval-print loop for Nix expressions. A typical use:
<screen>
$ nix repl '&lt;nixpkgs/nixos>'
@ -127,11 +126,10 @@ nix-repl> map (x: x.hostName) config.<xref linkend="opt-services.httpd.virtualHo
</para>
<para>
While abstracting your configuration, you may find it useful to generate
modules using code, instead of writing files. The example
below would have the same effect as importing a file which sets those
options.
<screen>
While abstracting your configuration, you may find it useful to generate
modules using code, instead of writing files. The example below would have
the same effect as importing a file which sets those options.
<screen>
{ config, pkgs, ... }:
let netConfig = { hostName }: {
@ -143,5 +141,5 @@ nix-repl> map (x: x.hostName) config.<xref linkend="opt-services.httpd.virtualHo
{ imports = [ (netConfig "nixos.localdomain") ]; }
</screen>
</para>
</para>
</section>

View File

@ -12,14 +12,14 @@
That is to say, expected usage is to add them to the imports list of your
<filename>/etc/configuration.nix</filename> as such:
</para>
<programlisting>
<programlisting>
imports = [
&lt;nixpkgs/nixos/modules/profiles/profile-name.nix&gt;
];
</programlisting>
<para>
Even if some of these profiles seem only useful in the context of
install media, many are actually intended to be used in real installs.
Even if some of these profiles seem only useful in the context of install
media, many are actually intended to be used in real installs.
</para>
<para>
What follows is a brief explanation on the purpose and use-case for each

View File

@ -1,15 +1,16 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-profile-all-hardware">
<title>All Hardware</title>
<para>
Enables all hardware supported by NixOS: i.e., all firmware is
included, and all devices from which one may boot are enabled in the initrd.
Its primary use is in the NixOS installation CDs.
Enables all hardware supported by NixOS: i.e., all firmware is included, and
all devices from which one may boot are enabled in the initrd. Its primary
use is in the NixOS installation CDs.
</para>
<para>
The enabled kernel modules include support for SATA and PATA, SCSI
(partially), USB, Firewire (untested), Virtio (QEMU, KVM, etc.), VMware, and

View File

@ -1,15 +1,15 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-profile-base">
<title>Base</title>
<para>
Defines the software packages included in the "minimal"
installation CD. It installs several utilities useful in a simple recovery or
install media, such as a text-mode web browser, and tools for manipulating
block devices, networking, hardware diagnostics, and filesystems (with their
respective kernel modules).
Defines the software packages included in the "minimal" installation CD. It
installs several utilities useful in a simple recovery or install media, such
as a text-mode web browser, and tools for manipulating block devices,
networking, hardware diagnostics, and filesystems (with their respective
kernel modules).
</para>
</section>

View File

@ -1,14 +1,14 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-profile-clone-config">
<title>Clone Config</title>
<para>
This profile is used in installer images.
It provides an editable configuration.nix that imports all the modules that
were also used when creating the image in the first place.
As a result it allows users to edit and rebuild the live-system.
This profile is used in installer images. It provides an editable
configuration.nix that imports all the modules that were also used when
creating the image in the first place. As a result it allows users to edit
and rebuild the live-system.
</para>
</section>

View File

@ -1,13 +1,15 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-profile-demo">
<title>Demo</title>
<para>
This profile just enables a <systemitem class="username">demo</systemitem> user, with password <literal>demo</literal>, uid <literal>1000</literal>, <systemitem class="groupname">wheel</systemitem>
group and <link linkend="opt-services.xserver.displayManager.sddm.autoLogin">
autologin in the SDDM display manager</link>.
This profile just enables a <systemitem class="username">demo</systemitem>
user, with password <literal>demo</literal>, uid <literal>1000</literal>,
<systemitem class="groupname">wheel</systemitem> group and
<link linkend="opt-services.xserver.displayManager.sddm.autoLogin"> autologin
in the SDDM display manager</link>.
</para>
</section>

View File

@ -1,15 +1,16 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-profile-docker-container">
<title>Docker Container</title>
<para>
This is the profile from which the Docker images are generated. It prepares a
working system by importing the <link linkend="sec-profile-minimal">Minimal</link> and
<link linkend="sec-profile-clone-config">Clone Config</link> profiles, and setting appropriate
configuration options that are useful inside a container context, like
<xref linkend="opt-boot.isContainer"/>.
working system by importing the
<link linkend="sec-profile-minimal">Minimal</link> and
<link linkend="sec-profile-clone-config">Clone Config</link> profiles, and
setting appropriate configuration options that are useful inside a container
context, like <xref linkend="opt-boot.isContainer"/>.
</para>
</section>

View File

@ -1,20 +1,21 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-profile-graphical">
<title>Graphical</title>
<para>
Defines a NixOS configuration with the Plasma 5 desktop. It's used by the
graphical installation CD.
</para>
<para>
It sets <xref linkend="opt-services.xserver.enable"/>,
<xref linkend="opt-services.xserver.displayManager.sddm.enable"/>,
<xref linkend="opt-services.xserver.desktopManager.plasma5.enable"/> (
<link linkend="opt-services.xserver.desktopManager.plasma5.enableQt4Support">
without Qt4 Support</link>), and
without Qt4 Support</link>), and
<xref linkend="opt-services.xserver.libinput.enable"/> to true. It also
includes glxinfo and firefox in the system packages list.
</para>

View File

@ -1,22 +1,24 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-profile-hardened">
<title>Hardened</title>
<para>
A profile with most (vanilla) hardening options enabled by default,
potentially at the cost of features and performance.
</para>
<para>
This includes a hardened kernel, and limiting the system information
available to processes through the <filename>/sys</filename> and
<filename>/proc</filename> filesystems. It also disables the User Namespaces
feature of the kernel, which stops Nix from being able to build anything
(this particular setting can be overriden via
<xref linkend="opt-security.allowUserNamespaces"/>). See the <literal
<xref linkend="opt-security.allowUserNamespaces"/>). See the
<literal
xlink:href="https://github.com/nixos/nixpkgs/tree/master/nixos/modules/profiles/hardened.nix">
profile source</literal> for further detail on which settings are altered.
profile source</literal> for further detail on which settings are altered.
</para>
</section>

View File

@ -1,18 +1,19 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-profile-headless">
<title>Headless</title>
<para>
Common configuration for headless machines (e.g., Amazon EC2 instances).
</para>
<para>
Disables <link linkend="opt-sound.enable">sound</link>,
<link linkend="opt-boot.vesa">vesa</link>, serial consoles,
<link linkend="opt-systemd.enableEmergencyMode">emergency mode</link>,
<link linkend="opt-boot.loader.grub.splashImage">grub splash images</link> and
configures the kernel to reboot automatically on panic.
<link linkend="opt-boot.loader.grub.splashImage">grub splash images</link>
and configures the kernel to reboot automatically on panic.
</para>
</section>

View File

@ -1,31 +1,34 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-profile-installation-device">
<title>Installation Device</title>
<para>
Provides a basic configuration for installation devices like CDs. This means
enabling hardware scans, using the <link linkend="sec-profile-clone-config">
Clone Config profile</link> to guarantee
Clone Config profile</link> to guarantee
<filename>/etc/nixos/configuration.nix</filename> exists (for
<command>nixos-rebuild</command> to work), a copy of the Nixpkgs channel
snapshot used to create the install media.
</para>
<para>
Additionally, documentation for <link linkend="opt-documentation.enable">
Nixpkgs</link> and <link linkend="opt-documentation.nixos.enable">NixOS
</link> are forcefully enabled (to override the
<link linkend="sec-profile-minimal">Minimal profile</link> preference); the
NixOS manual is shown automatically on TTY 8, sudo and udisks are disabled.
Autologin is enabled as root.
Nixpkgs</link> and <link linkend="opt-documentation.nixos.enable">NixOS
</link> are forcefully enabled (to override the
<link linkend="sec-profile-minimal">Minimal profile</link> preference); the
NixOS manual is shown automatically on TTY 8, sudo and udisks are disabled.
Autologin is enabled as root.
</para>
<para>
A message is shown to the user to start a display manager if needed,
ssh with <xref linkend="opt-services.openssh.permitRootLogin"/> are enabled (but
A message is shown to the user to start a display manager if needed, ssh with
<xref linkend="opt-services.openssh.permitRootLogin"/> are enabled (but
doesn't autostart). WPA Supplicant is also enabled without autostart.
</para>
<para>
Finally, vim is installed, root is set to not have a password, the kernel is
made more silent for remote public IP installs, and several settings are

View File

@ -1,16 +1,16 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-profile-minimal">
<title>Minimal</title>
<para>
This profile defines a small NixOS configuration. It does not contain any
graphical stuff. It's a very short file that enables
<link linkend="opt-environment.noXlibs">noXlibs</link>, sets
<link linkend="opt-i18n.supportedLocales">i18n.supportedLocales</link>
to only support the user-selected locale,
<link linkend="opt-i18n.supportedLocales">i18n.supportedLocales</link> to
only support the user-selected locale,
<link linkend="opt-documentation.enable">disables packages' documentation
</link>, and <link linkend="opt-sound.enable">disables sound</link>.
</para>

View File

@ -4,10 +4,12 @@
version="5.0"
xml:id="sec-profile-qemu-guest">
<title>QEMU Guest</title>
<para>
This profile contains common configuration for virtual machines running under
QEMU (using virtio).
</para>
<para>
It makes virtio modules available on the initrd, sets the system time from
the hardware clock to work around a bug in qemu-kvm, and

View File

@ -23,16 +23,14 @@
psk = "abcdefgh";
};
"free.wifi" = {};
}
};
</programlisting>
Be aware that keys will be written to the nix store in plaintext! When no
networks are set, it will default to using a configuration file at
<literal>/etc/wpa_supplicant.conf</literal>. You should edit this file
yourself to define wireless networks, WPA keys and so on (see
<citerefentry>
<refentrytitle>wpa_supplicant.conf</refentrytitle>
<manvolnum>5</manvolnum>
</citerefentry>).
yourself to define wireless networks, WPA keys and so on (see <citerefentry>
<refentrytitle>wpa_supplicant.conf</refentrytitle>
<manvolnum>5</manvolnum> </citerefentry>).
</para>
<para>

View File

@ -35,8 +35,8 @@
</para>
<para>
NixOSs default <emphasis>display manager</emphasis> (the program that
provides a graphical login prompt and manages the X server) is LightDM. You can
select an alternative one by picking one of the following lines:
provides a graphical login prompt and manages the X server) is LightDM. You
can select an alternative one by picking one of the following lines:
<programlisting>
<xref linkend="opt-services.xserver.displayManager.sddm.enable"/> = true;
<xref linkend="opt-services.xserver.displayManager.slim.enable"/> = true;
@ -59,9 +59,16 @@
<screen>
# systemctl start display-manager.service
</screen>
</para>
<para>
On 64-bit systems, if you want OpenGL for 32-bit programs such as in Wine,
you should also set the following:
<programlisting>
<xref linkend="opt-hardware.opengl.driSupport32Bit"/> = true;
</programlisting>
</para>
<simplesect xml:id="sec-x11-graphics-cards-nvidia">
<title>NVIDIA Graphics Cards</title>
<title>Proprietary NVIDIA drivers</title>
<para>
NVIDIA provides a proprietary driver for its graphics cards that has better
3D performance than the X.org drivers. It is not enabled by default because
@ -71,6 +78,7 @@
</programlisting>
Or if you have an older card, you may have to use one of the legacy drivers:
<programlisting>
<xref linkend="opt-services.xserver.videoDrivers"/> = [ "nvidiaLegacy390" ];
<xref linkend="opt-services.xserver.videoDrivers"/> = [ "nvidiaLegacy340" ];
<xref linkend="opt-services.xserver.videoDrivers"/> = [ "nvidiaLegacy304" ];
<xref linkend="opt-services.xserver.videoDrivers"/> = [ "nvidiaLegacy173" ];
@ -78,16 +86,9 @@
You may need to reboot after enabling this driver to prevent a clash with
other kernel modules.
</para>
<para>
On 64-bit systems, if you want full acceleration for 32-bit programs such as
Wine, you should also set the following:
<programlisting>
<xref linkend="opt-hardware.opengl.driSupport32Bit"/> = true;
</programlisting>
</para>
</simplesect>
<simplesect xml:id="sec-x11--graphics-cards-amd">
<title>AMD Graphics Cards</title>
<title>Proprietary AMD drivers</title>
<para>
AMD provides a proprietary driver for its graphics cards that has better 3D
performance than the X.org drivers. It is not enabled by default because
@ -99,11 +100,8 @@
other kernel modules.
</para>
<para>
On 64-bit systems, if you want full acceleration for 32-bit programs such as
Wine, you should also set the following:
<programlisting>
<xref linkend="opt-hardware.opengl.driSupport32Bit"/> = true;
</programlisting>
Note: for recent AMD GPUs you most likely want to keep either the defaults
or <literal>"amdgpu"</literal> (both free).
</para>
</simplesect>
<simplesect xml:id="sec-x11-touchpads">

View File

@ -77,10 +77,10 @@
Shared folders can be given a name and a path in the host system in the
VirtualBox settings (Machine / Settings / Shared Folders, then click on the
"Add" icon). Add the following to the
<literal>/etc/nixos/configuration.nix</literal> to auto-mount them. If you
do not add <literal>"nofail"</literal>, the system will no boot properly.
The same goes for disabling <literal>rngd</literal> which is normally used
to get randomness but this does not work in virtual machines.
<literal>/etc/nixos/configuration.nix</literal> to auto-mount them. If you do
not add <literal>"nofail"</literal>, the system will no boot properly. The
same goes for disabling <literal>rngd</literal> which is normally used to get
randomness but this does not work in virtual machines.
</para>
<programlisting>

View File

@ -70,9 +70,9 @@
<arg>
<option>--rollback</option>
</arg>
<arg>
<option>--builders</option>
<replaceable>builder-spec</replaceable>
<option>--builders</option> <replaceable>builder-spec</replaceable>
</arg>
<sbr />
<arg>
@ -198,7 +198,7 @@ $ nix-build /path/to/nixpkgs/nixos -A system
</term>
<listitem>
<para>
Opens <filename>configuration.nix</filename> in the default editor.
Opens <filename>configuration.nix</filename> in the default editor.
</para>
</listitem>
</varlistentry>
@ -334,25 +334,23 @@ $ ./result/bin/run-*-vm
</listitem>
</varlistentry>
<varlistentry>
<term>
<option>--builders</option>
<replaceable>builder-spec</replaceable>
</term>
<listitem>
<para>
Allow ad-hoc remote builders for building the new system.
This requires the user executing <command>nixos-rebuild</command> (usually
root) to be configured as a trusted user in the Nix daemon. This can be
achieved by using the <literal>nix.trustedUsers</literal> NixOS option.
Examples values for that option are described in the
<literal>Remote builds chapter</literal> in the Nix manual,
(i.e. <command>--builders "ssh://bigbrother x86_64-linux"</command>).
By specifying an empty string existing builders specified in
<filename>/etc/nix/machines</filename> can be ignored:
<command>--builders ""</command> for example when they are not
reachable due to network connectivity.
</para>
</listitem>
<term>
<option>--builders</option> <replaceable>builder-spec</replaceable>
</term>
<listitem>
<para>
Allow ad-hoc remote builders for building the new system. This requires
the user executing <command>nixos-rebuild</command> (usually root) to be
configured as a trusted user in the Nix daemon. This can be achieved by
using the <literal>nix.trustedUsers</literal> NixOS option. Examples
values for that option are described in the <literal>Remote builds
chapter</literal> in the Nix manual, (i.e. <command>--builders
"ssh://bigbrother x86_64-linux"</command>). By specifying an empty string
existing builders specified in <filename>/etc/nix/machines</filename> can
be ignored: <command>--builders ""</command> for example when they are
not reachable due to network connectivity.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>

View File

@ -639,7 +639,8 @@ $ nix-instantiate -E '(import &lt;nixpkgsunstable&gt; {}).gitFull'
</listitem>
<listitem>
<para>
Groups <literal>kvm</literal> and <literal>render</literal> are introduced now, as systemd requires them.
Groups <literal>kvm</literal> and <literal>render</literal> are introduced
now, as systemd requires them.
</para>
</listitem>
</itemizedlist>

View File

@ -20,25 +20,27 @@
<itemizedlist>
<listitem>
<para>
The default Python 3 interpreter is now CPython 3.7 instead of CPython 3.6.
The default Python 3 interpreter is now CPython 3.7 instead of CPython
3.6.
</para>
</listitem>
<listitem>
<para>
Added the Pantheon desktop environment.
It can be enabled through <varname>services.xserver.desktopManager.pantheon.enable</varname>.
Added the Pantheon desktop environment. It can be enabled through
<varname>services.xserver.desktopManager.pantheon.enable</varname>.
</para>
<note>
<para>
<varname>services.xserver.desktopManager.pantheon</varname> default enables lightdm
as a display manager and using Pantheon's greeter.
<varname>services.xserver.desktopManager.pantheon</varname> default
enables lightdm as a display manager and using Pantheon's greeter.
</para>
<para>
This is because of limitations with the screenlocking implementation, whereas the
screenlocker would be non-functional without it.
This is because of limitations with the screenlocking implementation,
whereas the screenlocker would be non-functional without it.
</para>
<para>
Because of that it is recommended to retain this precaution, however if you'd like to change this set:
Because of that it is recommended to retain this precaution, however if
you'd like to change this set:
</para>
<itemizedlist>
<listitem>
@ -52,17 +54,19 @@
</para>
</listitem>
</itemizedlist>
<para>to <literal>false</literal> and enable your preferred display manager.</para>
<para>
to <literal>false</literal> and enable your preferred display manager.
</para>
</note>
</listitem>
<listitem>
<para>
A major refactoring of the Kubernetes module has been completed.
Refactorings primarily focus on decoupling components and enhancing
security. Two-way TLS and RBAC has been enabled by default for all
components, which slightly changes the way the module is configured.
See: <xref linkend="sec-kubernetes"/> for details.
</para>
<para>
A major refactoring of the Kubernetes module has been completed.
Refactorings primarily focus on decoupling components and enhancing
security. Two-way TLS and RBAC has been enabled by default for all
components, which slightly changes the way the module is configured. See:
<xref linkend="sec-kubernetes"/> for details.
</para>
</listitem>
</itemizedlist>
</section>
@ -87,10 +91,11 @@
<listitem>
<para>
There is a new <varname>security.googleOsLogin</varname> module for using
<link xlink:href="https://cloud.google.com/compute/docs/instances/managing-instance-access">OS Login</link>
to manage SSH access to Google Compute Engine instances, which supersedes
the imperative and broken <literal>google-accounts-daemon</literal> used
in <literal>nixos/modules/virtualisation/google-compute-config.nix</literal>.
<link xlink:href="https://cloud.google.com/compute/docs/instances/managing-instance-access">OS
Login</link> to manage SSH access to Google Compute Engine instances,
which supersedes the imperative and broken
<literal>google-accounts-daemon</literal> used in
<literal>nixos/modules/virtualisation/google-compute-config.nix</literal>.
</para>
</listitem>
<listitem>
@ -101,8 +106,9 @@
<listitem>
<para>
There is a new <varname>services.cockroachdb</varname> module for running
CockroachDB databases. NixOS now ships with CockroachDB 2.1.x as well, available
on <literal>x86_64-linux</literal> and <literal>aarch64-linux</literal>.
CockroachDB databases. NixOS now ships with CockroachDB 2.1.x as well,
available on <literal>x86_64-linux</literal> and
<literal>aarch64-linux</literal>.
</para>
</listitem>
</itemizedlist>
@ -110,15 +116,15 @@
<itemizedlist>
<listitem>
<para>
<literal>./security/duosec.nix</literal>
<literal>./security/duosec.nix</literal>
</para>
</listitem>
<listitem>
<para>
The <link xlink:href="https://duo.com/docs/duounix">PAM module for Duo
Security</link> has been enabled for use. One can configure it using
the <option>security.duosec</option> options along with the
corresponding PAM option in
Security</link> has been enabled for use. One can configure it using the
<option>security.duosec</option> options along with the corresponding PAM
option in
<option>security.pam.services.&lt;name?&gt;.duoSecurity.enable</option>.
</para>
</listitem>
@ -184,36 +190,37 @@
</listitem>
<listitem>
<para>
The <varname>buildPythonPackage</varname> function now sets <varname>strictDeps = true</varname>
to help distinguish between native and non-native dependencies in order to
improve cross-compilation compatibility. Note however that this may break
user expressions.
The <varname>buildPythonPackage</varname> function now sets
<varname>strictDeps = true</varname> to help distinguish between native
and non-native dependencies in order to improve cross-compilation
compatibility. Note however that this may break user expressions.
</para>
</listitem>
<listitem>
<para>
The <varname>buildPythonPackage</varname> function now sets <varname>LANG = C.UTF-8</varname>
to enable Unicode support. The <varname>glibcLocales</varname> package is no longer needed as a build input.
The <varname>buildPythonPackage</varname> function now sets <varname>LANG
= C.UTF-8</varname> to enable Unicode support. The
<varname>glibcLocales</varname> package is no longer needed as a build
input.
</para>
</listitem>
<listitem>
<para>
The Syncthing state and configuration data has been moved from
<varname>services.syncthing.dataDir</varname> to the newly defined
<varname>services.syncthing.configDir</varname>, which default to
<literal>/var/lib/syncthing/.config/syncthing</literal>.
This change makes possible to share synced directories using ACLs
without Syncthing resetting the permission on every start.
The Syncthing state and configuration data has been moved from
<varname>services.syncthing.dataDir</varname> to the newly defined
<varname>services.syncthing.configDir</varname>, which default to
<literal>/var/lib/syncthing/.config/syncthing</literal>. This change makes
possible to share synced directories using ACLs without Syncthing
resetting the permission on every start.
</para>
</listitem>
<listitem>
<para>
The <literal>ntp</literal> module now has sane default restrictions.
If you're relying on the previous defaults, which permitted all queries
and commands from all firewall-permitted sources, you can set
<varname>services.ntp.restrictDefault</varname> and
<varname>services.ntp.restrictSource</varname> to
<literal>[]</literal>.
The <literal>ntp</literal> module now has sane default restrictions. If
you're relying on the previous defaults, which permitted all queries and
commands from all firewall-permitted sources, you can set
<varname>services.ntp.restrictDefault</varname> and
<varname>services.ntp.restrictSource</varname> to <literal>[]</literal>.
</para>
</listitem>
<listitem>
@ -241,17 +248,21 @@
</listitem>
<listitem>
<para>
Options
<literal>services.znc.confOptions.networks.<replaceable>name</replaceable>.userName</literal> and
<literal>services.znc.confOptions.networks.<replaceable>name</replaceable>.modulePackages</literal>
were removed. They were never used for anything and can therefore safely be removed.
Options
<literal>services.znc.confOptions.networks.<replaceable>name</replaceable>.userName</literal>
and
<literal>services.znc.confOptions.networks.<replaceable>name</replaceable>.modulePackages</literal>
were removed. They were never used for anything and can therefore safely
be removed.
</para>
</listitem>
<listitem>
<para>
Package <literal>wasm</literal> has been renamed <literal>proglodyte-wasm</literal>. The package
<literal>wasm</literal> will be pointed to <literal>ocamlPackages.wasm</literal> in 19.09, so
make sure to update your configuration if you want to keep <literal>proglodyte-wasm</literal>
Package <literal>wasm</literal> has been renamed
<literal>proglodyte-wasm</literal>. The package <literal>wasm</literal>
will be pointed to <literal>ocamlPackages.wasm</literal> in 19.09, so make
sure to update your configuration if you want to keep
<literal>proglodyte-wasm</literal>
</para>
</listitem>
<listitem>
@ -279,37 +290,41 @@
</listitem>
<listitem>
<para>
Package <literal>consul-ui</literal> and passthrough <literal>consul.ui</literal> have been removed.
The package <literal>consul</literal> now uses upstream releases that vendor the UI into the binary.
See <link xlink:href="https://github.com/NixOS/nixpkgs/pull/48714#issuecomment-433454834">#48714</link>
Package <literal>consul-ui</literal> and passthrough
<literal>consul.ui</literal> have been removed. The package
<literal>consul</literal> now uses upstream releases that vendor the UI
into the binary. See
<link xlink:href="https://github.com/NixOS/nixpkgs/pull/48714#issuecomment-433454834">#48714</link>
for details.
</para>
</listitem>
<listitem>
<para>
Slurm introduces the new option
<literal>services.slurm.stateSaveLocation</literal>,
which is now set to <literal>/var/spool/slurm</literal> by default
(instead of <literal>/var/spool</literal>).
Make sure to move all files to the new directory or to set the option accordingly.
Slurm introduces the new option
<literal>services.slurm.stateSaveLocation</literal>, which is now set to
<literal>/var/spool/slurm</literal> by default (instead of
<literal>/var/spool</literal>). Make sure to move all files to the new
directory or to set the option accordingly.
</para>
<para>
The slurmctld now runs as user <literal>slurm</literal> instead of <literal>root</literal>.
If you want to keep slurmctld running as <literal>root</literal>, set
<literal>services.slurm.user = root</literal>.
The slurmctld now runs as user <literal>slurm</literal> instead of
<literal>root</literal>. If you want to keep slurmctld running as
<literal>root</literal>, set <literal>services.slurm.user =
root</literal>.
</para>
<para>
The options <literal>services.slurm.nodeName</literal> and
<literal>services.slurm.partitionName</literal> are now sets of
strings to correctly reflect that fact that each of these
options can occour more than once in the configuration.
The options <literal>services.slurm.nodeName</literal> and
<literal>services.slurm.partitionName</literal> are now sets of strings to
correctly reflect that fact that each of these options can occour more
than once in the configuration.
</para>
</listitem>
<listitem>
<para>
The <literal>solr</literal> package has been upgraded from 4.10.3 to 7.5.0 and has undergone
some major changes. The <literal>services.solr</literal> module has been updated to reflect
these changes. Please review http://lucene.apache.org/solr/ carefully before upgrading.
The <literal>solr</literal> package has been upgraded from 4.10.3 to 7.5.0
and has undergone some major changes. The <literal>services.solr</literal>
module has been updated to reflect these changes. Please review
http://lucene.apache.org/solr/ carefully before upgrading.
</para>
</listitem>
<listitem>
@ -321,46 +336,49 @@
</listitem>
<listitem>
<para>
The option <literal>services.xserver.displayManager.job.logToFile</literal> which was
The option
<literal>services.xserver.displayManager.job.logToFile</literal> which was
previously set to <literal>true</literal> when using the display managers
<literal>lightdm</literal>, <literal>sddm</literal> or <literal>xpra</literal> has been
reset to the default value (<literal>false</literal>).
<literal>lightdm</literal>, <literal>sddm</literal> or
<literal>xpra</literal> has been reset to the default value
(<literal>false</literal>).
</para>
</listitem>
<listitem>
<para>
Network interface indiscriminate NixOS firewall options
(<literal>networking.firewall.allow*</literal>) are now preserved when also
setting interface specific rules such as <literal>networking.firewall.interfaces.en0.allow*</literal>.
These rules continue to use the pseudo device "default"
(<literal>networking.firewall.interfaces.default.*</literal>), and assigning
to this pseudo device will override the (<literal>networking.firewall.allow*</literal>)
options.
</para>
</listitem>
<listitem>
<para>
(<literal>networking.firewall.allow*</literal>) are now preserved when
also setting interface specific rules such as
<literal>networking.firewall.interfaces.en0.allow*</literal>. These rules
continue to use the pseudo device "default"
(<literal>networking.firewall.interfaces.default.*</literal>), and
assigning to this pseudo device will override the
(<literal>networking.firewall.allow*</literal>) options.
</para>
</listitem>
<listitem>
<para>
The <literal>nscd</literal> service now disables all caching of
<literal>passwd</literal> and <literal>group</literal> databases by
default. This was interferring with the correct functioning of the
<literal>libnss_systemd.so</literal> module which is used by
<literal>systemd</literal> to manage uids and usernames in the presence of
<literal>DynamicUser=</literal> in systemd services. This was already the
<literal>DynamicUser=</literal> in systemd services. This was already the
default behaviour in presence of <literal>services.sssd.enable =
true</literal> because nscd caching would interfere with
<literal>sssd</literal> in unpredictable ways as well. Because we're
using nscd not for caching, but for convincing glibc to find NSS modules
in the nix store instead of an absolute path, we have decided to disable
caching globally now, as it's usually not the behaviour the user wants and
can lead to surprising behaviour. Furthermore, negative caching of host
true</literal> because nscd caching would interfere with
<literal>sssd</literal> in unpredictable ways as well. Because we're using
nscd not for caching, but for convincing glibc to find NSS modules in the
nix store instead of an absolute path, we have decided to disable caching
globally now, as it's usually not the behaviour the user wants and can
lead to surprising behaviour. Furthermore, negative caching of host
lookups is also disabled now by default. This should fix the issue of dns
lookups failing in the presence of an unreliable network.
</para>
<para>
If the old behaviour is desired, this can be restored by setting
the <literal>services.nscd.config</literal> option
with the desired caching parameters.
<programlisting>
</para>
<para>
If the old behaviour is desired, this can be restored by setting the
<literal>services.nscd.config</literal> option with the desired caching
parameters.
<programlisting>
services.nscd.config =
''
server-user nscd
@ -393,95 +411,109 @@
shared hosts yes
'';
</programlisting>
See <link xlink:href="https://github.com/NixOS/nixpkgs/pull/50316">#50316</link>
See
<link xlink:href="https://github.com/NixOS/nixpkgs/pull/50316">#50316</link>
for details.
</para>
</listitem>
<listitem>
<para>
</para>
</listitem>
<listitem>
<para>
GitLab Shell previously used the nix store paths for the
<literal>gitlab-shell</literal> command in its
<literal>authorized_keys</literal> file, which might stop working after
garbage collection. To circumvent that, we regenerated that file on each
startup. As <literal>gitlab-shell</literal> has now been changed to use
startup. As <literal>gitlab-shell</literal> has now been changed to use
<literal>/var/run/current-system/sw/bin/gitlab-shell</literal>, this is
not necessary anymore, but there might be leftover lines with a nix store
path. Regenerate the <literal>authorized_keys</literal> file via
<command>sudo -u git -H gitlab-rake gitlab:shell:setup</command> in that
case.
</para>
</listitem>
<listitem>
<para>
</para>
</listitem>
<listitem>
<para>
The <literal>pam_unix</literal> account module is now loaded with its
control field set to <literal>required</literal> instead of
<literal>sufficient</literal>, so that later PAM account modules that
might do more extensive checks are being executed.
Previously, the whole account module verification was exited prematurely
in case a nss module provided the account name to
<literal>pam_unix</literal>.
The LDAP and SSSD NixOS modules already add their NSS modules when
enabled. In case your setup breaks due to some later PAM account module
previosuly shadowed, or failing NSS lookups, please file a bug. You can
get back the old behaviour by manually setting
<literal><![CDATA[security.pam.services.<name?>.text]]></literal>.
</para>
</listitem>
<listitem>
<para>
The <literal>pam_unix</literal> password module is now loaded with its
control field set to <literal>sufficient</literal> instead of
<literal>required</literal>, so that password managed only
by later PAM password modules are being executed.
Previously, for example, changing an LDAP account's password through PAM
was not possible: the whole password module verification
was exited prematurely by <literal>pam_unix</literal>,
preventing <literal>pam_ldap</literal> to manage the password as it should.
</para>
</listitem>
<listitem>
<para>
<literal>fish</literal> has been upgraded to 3.0.
It comes with a number of improvements and backwards incompatible changes.
See the <literal>fish</literal> <link xlink:href="https://github.com/fish-shell/fish-shell/releases/tag/3.0.0">release notes</link> for more information.
might do more extensive checks are being executed. Previously, the whole
account module verification was exited prematurely in case a nss module
provided the account name to <literal>pam_unix</literal>. The LDAP and
SSSD NixOS modules already add their NSS modules when enabled. In case
your setup breaks due to some later PAM account module previosuly
shadowed, or failing NSS lookups, please file a bug. You can get back the
old behaviour by manually setting <literal>
<![CDATA[security.pam.services.<name?>.text]]>
</literal>.
</para>
</listitem>
<listitem>
<listitem>
<para>
The ibus-table input method has had a change in config format, which
causes all previous settings to be lost. See
<link xlink:href="https://github.com/mike-fabian/ibus-table/commit/f9195f877c5212fef0dfa446acb328c45ba5852b">this commit message</link>
for details.
The <literal>pam_unix</literal> password module is now loaded with its
control field set to <literal>sufficient</literal> instead of
<literal>required</literal>, so that password managed only by later PAM
password modules are being executed. Previously, for example, changing an
LDAP account's password through PAM was not possible: the whole password
module verification was exited prematurely by <literal>pam_unix</literal>,
preventing <literal>pam_ldap</literal> to manage the password as it
should.
</para>
</listitem>
<listitem>
<para>
NixOS module system type <literal>types.optionSet</literal> and
<literal>lib.mkOption</literal> argument <literal>options</literal> are deprecated.
Use <literal>types.submodule</literal> instead.
(<link xlink:href="https://github.com/NixOS/nixpkgs/pull/54637">#54637</link>)
</para>
</listitem>
<listitem>
<para>
<literal>matrix-synapse</literal> has been updated to version 0.99. It will
<link xlink:href="https://github.com/matrix-org/synapse/pull/4509">no longer generate a self-signed certificate on first launch</link>
and will be <link xlink:href="https://matrix.org/blog/2019/02/05/synapse-0-99-0/">the last version to accept self-signed certificates</link>.
As such, it is now recommended to use a proper certificate verified by a
root CA (for example Let's Encrypt).
</para>
</listitem>
</listitem>
<listitem>
<para>
<literal>fish</literal> has been upgraded to 3.0. It comes with a number
of improvements and backwards incompatible changes. See the
<literal>fish</literal>
<link xlink:href="https://github.com/fish-shell/fish-shell/releases/tag/3.0.0">release
notes</link> for more information.
</para>
</listitem>
<listitem>
<para>
The ibus-table input method has had a change in config format, which
causes all previous settings to be lost. See
<link xlink:href="https://github.com/mike-fabian/ibus-table/commit/f9195f877c5212fef0dfa446acb328c45ba5852b">this
commit message</link> for details.
</para>
</listitem>
<listitem>
<para>
NixOS module system type <literal>types.optionSet</literal> and
<literal>lib.mkOption</literal> argument <literal>options</literal> are
deprecated. Use <literal>types.submodule</literal> instead.
(<link xlink:href="https://github.com/NixOS/nixpkgs/pull/54637">#54637</link>)
</para>
</listitem>
<listitem>
<para>
<literal>matrix-synapse</literal> has been updated to version 0.99. It
will <link xlink:href="https://github.com/matrix-org/synapse/pull/4509">no
longer generate a self-signed certificate on first launch</link> and will
be
<link xlink:href="https://matrix.org/blog/2019/02/05/synapse-0-99-0/">the
last version to accept self-signed certificates</link>. As such, it is now
recommended to use a proper certificate verified by a root CA (for example
Let's Encrypt). The new <link linkend="module-services-matrix">manual
chapter on Matrix</link> contains a working example of using nginx as a
reverse proxy in front of <literal>matrix-synapse</literal>, using Let's
Encrypt certificates.
</para>
</listitem>
<listitem>
<para>
<literal>mailutils</literal> now works by default when
<literal>sendmail</literal> is not in a setuid wrapper. As a consequence,
the <literal>sendmailPath</literal> argument, having lost its main use, has
been removed.
the <literal>sendmailPath</literal> argument, having lost its main use,
has been removed.
</para>
</listitem>
<listitem>
<para>
<literal>graylog</literal> has been upgraded from version 2.* to 3.*. Some setups making use of extraConfig (especially those exposing Graylog via reverse proxies) need to be updated as upstream removed/replaced some settings. See <link xlink:href="http://docs.graylog.org/en/3.0/pages/upgrade/graylog-3.0.html#simplified-http-interface-configuration">Upgrading Graylog</link> for details.
<literal>graylog</literal> has been upgraded from version 2.* to 3.*. Some
setups making use of extraConfig (especially those exposing Graylog via
reverse proxies) need to be updated as upstream removed/replaced some
settings. See
<link xlink:href="http://docs.graylog.org/en/3.0/pages/upgrade/graylog-3.0.html#simplified-http-interface-configuration">Upgrading
Graylog</link> for details.
</para>
</listitem>
</itemizedlist>
@ -498,197 +530,219 @@
<listitem>
<para>
The <option>services.matomo</option> module gained the option
<option>services.matomo.package</option> which determines the used
Matomo version.
<option>services.matomo.package</option> which determines the used Matomo
version.
</para>
<para>
The Matomo module now also comes with the systemd service <literal>matomo-archive-processing.service</literal>
and a timer that automatically triggers archive processing every hour.
This means that you can safely
The Matomo module now also comes with the systemd service
<literal>matomo-archive-processing.service</literal> and a timer that
automatically triggers archive processing every hour. This means that you
can safely
<link xlink:href="https://matomo.org/docs/setup-auto-archiving/#disable-browser-triggers-for-matomo-archiving-and-limit-matomo-reports-to-updating-every-hour">
disable browser triggers for Matomo archiving
</link> at <literal>Administration > System > General Settings</literal>.
disable browser triggers for Matomo archiving </link> at
<literal>Administration > System > General Settings</literal>.
</para>
<para>
Additionally, you can enable to
<link xlink:href="https://matomo.org/docs/privacy/#step-2-delete-old-visitors-logs">
delete old visitor logs
</link> at <literal>Administration > System > Privacy</literal>,
but make sure that you run <literal>systemctl start matomo-archive-processing.service</literal>
at least once without errors if you have already collected data before,
so that the reports get archived before the source data gets deleted.
delete old visitor logs </link> at <literal>Administration > System >
Privacy</literal>, but make sure that you run <literal>systemctl start
matomo-archive-processing.service</literal> at least once without errors
if you have already collected data before, so that the reports get
archived before the source data gets deleted.
</para>
</listitem>
<listitem>
<para>
<literal>composableDerivation</literal> along with supporting library functions
has been removed.
<literal>composableDerivation</literal> along with supporting library
functions has been removed.
</para>
</listitem>
<listitem>
<para>
The deprecated <literal>truecrypt</literal> package has been removed
and <literal>truecrypt</literal> attribute is now an alias for
The deprecated <literal>truecrypt</literal> package has been removed and
<literal>truecrypt</literal> attribute is now an alias for
<literal>veracrypt</literal>. VeraCrypt is backward-compatible with
TrueCrypt volumes. Note that <literal>cryptsetup</literal> also
supports loading TrueCrypt volumes.
TrueCrypt volumes. Note that <literal>cryptsetup</literal> also supports
loading TrueCrypt volumes.
</para>
</listitem>
<listitem>
<para>
The Kubernetes DNS addons, kube-dns, has been replaced with CoreDNS.
This change is made in accordance with Kubernetes making CoreDNS the official default
starting from
<link xlink:href="https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.11.md#sig-cluster-lifecycle">Kubernetes v1.11</link>.
Please beware that upgrading DNS-addon on existing clusters might induce
minor downtime while the DNS-addon terminates and re-initializes.
Also note that the DNS-service now runs with 2 pod replicas by default.
The desired number of replicas can be configured using:
<option>services.kubernetes.addons.dns.replicas</option>.
The Kubernetes DNS addons, kube-dns, has been replaced with CoreDNS. This
change is made in accordance with Kubernetes making CoreDNS the official
default starting from
<link xlink:href="https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.11.md#sig-cluster-lifecycle">Kubernetes
v1.11</link>. Please beware that upgrading DNS-addon on existing clusters
might induce minor downtime while the DNS-addon terminates and
re-initializes. Also note that the DNS-service now runs with 2 pod
replicas by default. The desired number of replicas can be configured
using: <option>services.kubernetes.addons.dns.replicas</option>.
</para>
</listitem>
<listitem>
<para>
The quassel-webserver package and module was removed from nixpkgs due to the lack
of maintainers.
</para>
</listitem>
<listitem>
<para>
The astah-community package was removed from nixpkgs due to it being discontinued and the downloads not being available anymore.
</para>
</listitem>
<listitem>
<para>
The httpd service now saves log files with a .log file extension by default for
easier integration with the logrotate service.
</para>
</listitem>
<listitem>
<para>
The owncloud server packages and httpd subservice module were removed
from nixpkgs due to the lack of maintainers.
</para>
</listitem>
<listitem>
<para>
It is possible now to uze ZRAM devices as general purpose ephemeral block devices,
not only as swap. Using more than 1 device as ZRAM swap is no longer recommended,
but is still possible by setting <literal>zramSwap.swapDevices</literal> explicitly.
</para>
<para>
ZRAM algorithm can be changed now.
</para>
<para>
Changes to ZRAM algorithm are applied during <literal>nixos-rebuild switch</literal>,
so make sure you have enough swap space on disk to survive ZRAM device rebuild. Alternatively,
use <literal>nixos-rebuild boot; reboot</literal>.
</para>
<para>
The quassel-webserver package and module was removed from nixpkgs due to
the lack of maintainers.
</para>
</listitem>
<listitem>
<para>
Flat volumes are now disabled by default in <literal>hardware.pulseaudio</literal>.
This has been done to prevent applications, which are unaware of this feature, setting
their volumes to 100% on startup causing harm to your audio hardware and potentially your ears.
The manual gained a <link linkend="module-services-matrix"> new chapter on
self-hosting <literal>matrix-synapse</literal> and
<literal>riot-web</literal> </link>, the most prevalent server and client
implementations for the
<link xlink:href="https://matrix.org/">Matrix</link> federated
communication network.
</para>
</listitem>
<listitem>
<para>
The astah-community package was removed from nixpkgs due to it being
discontinued and the downloads not being available anymore.
</para>
</listitem>
<listitem>
<para>
The httpd service now saves log files with a .log file extension by
default for easier integration with the logrotate service.
</para>
</listitem>
<listitem>
<para>
The owncloud server packages and httpd subservice module were removed from
nixpkgs due to the lack of maintainers.
</para>
</listitem>
<listitem>
<para>
It is possible now to uze ZRAM devices as general purpose ephemeral block
devices, not only as swap. Using more than 1 device as ZRAM swap is no
longer recommended, but is still possible by setting
<literal>zramSwap.swapDevices</literal> explicitly.
</para>
<para>
ZRAM algorithm can be changed now.
</para>
<para>
Changes to ZRAM algorithm are applied during <literal>nixos-rebuild
switch</literal>, so make sure you have enough swap space on disk to
survive ZRAM device rebuild. Alternatively, use <literal>nixos-rebuild
boot; reboot</literal>.
</para>
</listitem>
<listitem>
<para>
Flat volumes are now disabled by default in
<literal>hardware.pulseaudio</literal>. This has been done to prevent
applications, which are unaware of this feature, setting their volumes to
100% on startup causing harm to your audio hardware and potentially your
ears.
</para>
<note>
<para>
With this change application specific volumes are relative to the master volume which can be
adjusted independently, whereas before they were absolute; meaning that in effect, it scaled the
device-volume with the volume of the loudest application.
With this change application specific volumes are relative to the master
volume which can be adjusted independently, whereas before they were
absolute; meaning that in effect, it scaled the device-volume with the
volume of the loudest application.
</para>
</note>
</listitem>
<listitem>
<para>
The <link xlink:href="https://github.com/DanielAdolfsson/ndppd"><literal>ndppd</literal></link> module
now supports <link linkend="opt-services.ndppd.enable">all config options</link> provided by the current
upstream version as service options. Additionally the <literal>ndppd</literal> package doesn't contain
the systemd unit configuration from upstream anymore, the unit is completely configured by the NixOS module now.
The
<link xlink:href="https://github.com/DanielAdolfsson/ndppd"><literal>ndppd</literal></link>
module now supports <link linkend="opt-services.ndppd.enable">all config
options</link> provided by the current upstream version as service
options. Additionally the <literal>ndppd</literal> package doesn't contain
the systemd unit configuration from upstream anymore, the unit is
completely configured by the NixOS module now.
</para>
</listitem>
<listitem>
<para>
New installs of NixOS will default to the Redmine 4.x series unless otherwise specified in
<literal>services.redmine.package</literal> while existing installs of NixOS will default to
the Redmine 3.x series.
New installs of NixOS will default to the Redmine 4.x series unless
otherwise specified in <literal>services.redmine.package</literal> while
existing installs of NixOS will default to the Redmine 3.x series.
</para>
</listitem>
<listitem>
<para>
The <link linkend="opt-services.grafana.enable">Grafana module</link> now supports declarative
<link xlink:href="http://docs.grafana.org/administration/provisioning/">datasource and dashboard</link>
provisioning.
The <link linkend="opt-services.grafana.enable">Grafana module</link> now
supports declarative
<link xlink:href="http://docs.grafana.org/administration/provisioning/">datasource
and dashboard</link> provisioning.
</para>
</listitem>
<listitem>
<para>
The use of insecure ports on kubernetes has been deprecated.
Thus options:
<varname>services.kubernetes.apiserver.port</varname> and
<varname>services.kubernetes.controllerManager.port</varname>
has been renamed to <varname>.insecurePort</varname>,
and default of both options has changed to 0 (disabled).
</para>
</listitem>
<listitem>
<para>
Note that the default value of
<varname>services.kubernetes.apiserver.bindAddress</varname>
has changed from 127.0.0.1 to 0.0.0.0, allowing the apiserver to be
accessible from outside the master node itself.
If the apiserver insecurePort is enabled,
it is strongly recommended to only bind on the loopback interface. See:
<varname>services.kubernetes.apiserver.insecurebindAddress</varname>.
</para>
</listitem>
<listitem>
<para>
The option <varname>services.kubernetes.apiserver.allowPrivileged</varname>
and <varname>services.kubernetes.kubelet.allowPrivileged</varname> now
defaults to false. Disallowing privileged containers on the cluster.
</para>
</listitem>
<listitem>
<para>
The kubernetes module does no longer add the kubernetes package to
<varname>environment.systemPackages</varname> implicitly.
</para>
</listitem>
<listitem>
<para>
The <literal>intel</literal> driver has been removed from the default list of
<link linkend="opt-services.xserver.videoDrivers">X.org video drivers</link>.
The <literal>modesetting</literal> driver should take over automatically,
it is better maintained upstream and has less problems with advanced X11 features.
This can lead to a change in the output names used by <literal>xrandr</literal>.
Some performance regressions on some GPU models might happen.
Some OpenCL and VA-API applications might also break
(Beignet seems to provide OpenCL support with
<literal>modesetting</literal> driver, too).
Kernel mode setting API does not support backlight control,
so <literal>xbacklight</literal> tool will not work;
backlight level can be controlled directly via <literal>/sys/</literal>
or with <literal>brightnessctl</literal>.
Users who need this functionality more than multi-output XRandR are advised
to add `intel` to `videoDrivers` and report an issue (or provide additional
details in an existing one)
</para>
</listitem>
<listitem>
<para>
Openmpi has been updated to version 4.0.0, which removes some deprecated MPI-1 symbols.
This may break some older applications that still rely on those symbols.
An upgrade guide can be found <link xlink:href="https://www.open-mpi.org/faq/?category=mpi-removed">here</link>.
</para>
<para>
The nginx package now relies on OpenSSL 1.1 and supports TLS 1.3 by default. You can set the protocols used by the nginx service using <xref linkend="opt-services.nginx.sslProtocols"/>.
The use of insecure ports on kubernetes has been deprecated. Thus options:
<varname>services.kubernetes.apiserver.port</varname> and
<varname>services.kubernetes.controllerManager.port</varname> has been
renamed to <varname>.insecurePort</varname>, and default of both options
has changed to 0 (disabled).
</para>
</listitem>
<listitem>
<para>
A new subcommand <command>nixos-rebuild edit</command> was added.
</para>
<para>
Note that the default value of
<varname>services.kubernetes.apiserver.bindAddress</varname> has changed
from 127.0.0.1 to 0.0.0.0, allowing the apiserver to be accessible from
outside the master node itself. If the apiserver insecurePort is enabled,
it is strongly recommended to only bind on the loopback interface. See:
<varname>services.kubernetes.apiserver.insecurebindAddress</varname>.
</para>
</listitem>
<listitem>
<para>
The option
<varname>services.kubernetes.apiserver.allowPrivileged</varname> and
<varname>services.kubernetes.kubelet.allowPrivileged</varname> now
defaults to false. Disallowing privileged containers on the cluster.
</para>
</listitem>
<listitem>
<para>
The kubernetes module does no longer add the kubernetes package to
<varname>environment.systemPackages</varname> implicitly.
</para>
</listitem>
<listitem>
<para>
The <literal>intel</literal> driver has been removed from the default list
of <link linkend="opt-services.xserver.videoDrivers">X.org video
drivers</link>. The <literal>modesetting</literal> driver should take over
automatically, it is better maintained upstream and has less problems with
advanced X11 features. This can lead to a change in the output names used
by <literal>xrandr</literal>. Some performance regressions on some GPU
models might happen. Some OpenCL and VA-API applications might also break
(Beignet seems to provide OpenCL support with
<literal>modesetting</literal> driver, too). Kernel mode setting API does
not support backlight control, so <literal>xbacklight</literal> tool will
not work; backlight level can be controlled directly via
<literal>/sys/</literal> or with <literal>brightnessctl</literal>. Users
who need this functionality more than multi-output XRandR are advised to
add `intel` to `videoDrivers` and report an issue (or provide additional
details in an existing one)
</para>
</listitem>
<listitem>
<para>
Openmpi has been updated to version 4.0.0, which removes some deprecated
MPI-1 symbols. This may break some older applications that still rely on
those symbols. An upgrade guide can be found
<link xlink:href="https://www.open-mpi.org/faq/?category=mpi-removed">here</link>.
</para>
<para>
The nginx package now relies on OpenSSL 1.1 and supports TLS 1.3 by
default. You can set the protocols used by the nginx service using
<xref linkend="opt-services.nginx.sslProtocols"/>.
</para>
</listitem>
<listitem>
<para>
A new subcommand <command>nixos-rebuild edit</command> was added.
</para>
</listitem>
</itemizedlist>
</section>

View File

@ -49,7 +49,19 @@
xml:id="sec-release-19.09-incompatibilities">
<title>Backward Incompatibilities</title>
<para>
When upgrading from a previous release, please be aware of the following
incompatible changes:
</para>
<itemizedlist>
<listitem>
<para>
Buildbot no longer supports Python 2, as support was dropped upstream in
version 2.0.0. Configurations may need to be modified to make them
compatible with Python 3.
</para>
</listitem>
<listitem>
<para>
PostgreSQL now uses
@ -74,14 +86,17 @@
<listitem>
<para>
The <option>documentation</option> module gained an option named
<option>documentation.nixos.includeAllModules</option> which makes the generated
<citerefentry><refentrytitle>configuration.nix</refentrytitle>
<manvolnum>5</manvolnum></citerefentry> manual page include all options from all NixOS modules
included in a given <literal>configuration.nix</literal> configuration file. Currently, it is
set to <literal>false</literal> by default as enabling it frequently prevents evaluation. But
the plan is to eventually have it set to <literal>true</literal> by default. Please set it to
<literal>true</literal> now in your <literal>configuration.nix</literal> and fix all the bugs
it uncovers.
<option>documentation.nixos.includeAllModules</option> which makes the
generated <citerefentry>
<refentrytitle>configuration.nix</refentrytitle>
<manvolnum>5</manvolnum></citerefentry> manual page include all options
from all NixOS modules included in a given
<literal>configuration.nix</literal> configuration file. Currently, it is
set to <literal>false</literal> by default as enabling it frequently
prevents evaluation. But the plan is to eventually have it set to
<literal>true</literal> by default. Please set it to
<literal>true</literal> now in your <literal>configuration.nix</literal>
and fix all the bugs it uncovers.
</para>
</listitem>
</itemizedlist>

View File

@ -53,6 +53,7 @@ in {
pkgs = import ../../../.. { inherit (pkgs) system; }; # ensure we use the regular qemu-kvm package
partitionTableType = if config.ec2.hvm then "legacy" else "none";
diskSize = cfg.sizeMB;
fsType = "ext4";
configFile = pkgs.writeText "configuration.nix"
''
{

View File

@ -166,6 +166,8 @@ let
"# No refind for ${targetArch}"
;
grubPkgs = if config.boot.loader.grub.forcei686 then pkgs.pkgsi686Linux else pkgs;
grubMenuCfg = ''
#
# Menu configuration
@ -241,7 +243,7 @@ let
# Modules that may or may not be available per-platform.
echo "Adding additional modules:"
for mod in efi_uga; do
if [ -f ${pkgs.grub2_efi}/lib/grub/${pkgs.grub2_efi.grubTarget}/$mod.mod ]; then
if [ -f ${grubPkgs.grub2_efi}/lib/grub/${grubPkgs.grub2_efi.grubTarget}/$mod.mod ]; then
echo " - $mod"
MODULES+=" $mod"
fi
@ -249,9 +251,9 @@ let
# Make our own efi program, we can't rely on "grub-install" since it seems to
# probe for devices, even with --skip-fs-probe.
${pkgs.grub2_efi}/bin/grub-mkimage -o $out/EFI/boot/boot${targetArch}.efi -p /EFI/boot -O ${pkgs.grub2_efi.grubTarget} \
${grubPkgs.grub2_efi}/bin/grub-mkimage -o $out/EFI/boot/boot${targetArch}.efi -p /EFI/boot -O ${grubPkgs.grub2_efi.grubTarget} \
$MODULES
cp ${pkgs.grub2_efi}/share/grub/unicode.pf2 $out/EFI/boot/
cp ${grubPkgs.grub2_efi}/share/grub/unicode.pf2 $out/EFI/boot/
cat <<EOF > $out/EFI/boot/grub.cfg
@ -362,7 +364,7 @@ let
# Name used by UEFI for architectures.
targetArch =
if pkgs.stdenv.isi686 then
if pkgs.stdenv.isi686 || config.boot.loader.grub.forcei686 then
"ia32"
else if pkgs.stdenv.isx86_64 then
"x64"
@ -506,7 +508,7 @@ in
# here and it causes a cyclic dependency.
boot.loader.grub.enable = false;
environment.systemPackages = [ pkgs.grub2 pkgs.grub2_efi ]
environment.systemPackages = [ grubPkgs.grub2 grubPkgs.grub2_efi ]
++ optional canx86BiosBoot pkgs.syslinux
;

View File

@ -35,6 +35,7 @@
./config/users-groups.nix
./config/vpnc.nix
./config/zram.nix
./hardware/acpilight.nix
./hardware/all-firmware.nix
./hardware/bladeRF.nix
./hardware/brightnessctl.nix
@ -136,6 +137,7 @@
./programs/vim.nix
./programs/wavemon.nix
./programs/way-cooler.nix
./programs/waybar.nix
./programs/wireshark.nix
./programs/xfs_quota.nix
./programs/xonsh.nix
@ -310,6 +312,7 @@
./services/hardware/ratbagd.nix
./services/hardware/sane.nix
./services/hardware/sane_extra_backends/brscan4.nix
./services/hardware/sane_extra_backends/dsseries.nix
./services/hardware/tcsd.nix
./services/hardware/tlp.nix
./services/hardware/thinkfan.nix
@ -577,6 +580,7 @@
./services/networking/keepalived/default.nix
./services/networking/keybase.nix
./services/networking/kippo.nix
./services/networking/knot.nix
./services/networking/kresd.nix
./services/networking/lambdabot.nix
./services/networking/libreswan.nix
@ -764,6 +768,7 @@
./services/web-servers/nginx/default.nix
./services/web-servers/nginx/gitweb.nix
./services/web-servers/phpfpm/default.nix
./services/web-servers/unit/default.nix
./services/web-servers/shellinabox.nix
./services/web-servers/tomcat.nix
./services/web-servers/traefik.nix
@ -871,9 +876,11 @@
./tasks/trackpoint.nix
./tasks/powertop.nix
./testing/service-runner.nix
./virtualisation/anbox.nix
./virtualisation/container-config.nix
./virtualisation/containers.nix
./virtualisation/docker.nix
./virtualisation/docker-containers.nix
./virtualisation/ecs-agent.nix
./virtualisation/libvirtd.nix
./virtualisation/lxc.nix

View File

@ -0,0 +1,20 @@
{ lib, pkgs, config, ... }:
with lib;
{
options.programs.waybar = {
enable = mkEnableOption "waybar";
};
config = mkIf config.programs.waybar.enable {
systemd.user.services.waybar = {
description = "Waybar as systemd service";
wantedBy = [ "graphical-session.target" ];
partOf = [ "graphical-session.target" ];
script = "${pkgs.waybar}/bin/waybar";
};
};
meta.maintainers = [ maintainers.FlorianFranzen ];
}

View File

@ -87,9 +87,9 @@
<para>
<emphasis>Please keep in mind that this is not compatible with
<literal>programs.zsh.ohMyZsh.custom</literal> as it requires an immutable store
path while <literal>custom</literal> shall remain mutable! An evaluation
failure will be thrown if both <literal>custom</literal> and
<literal>programs.zsh.ohMyZsh.custom</literal> as it requires an immutable
store path while <literal>custom</literal> shall remain mutable! An
evaluation failure will be thrown if both <literal>custom</literal> and
<literal>customPkgs</literal> are set.</emphasis>
</para>
</section>

View File

@ -79,6 +79,33 @@ in
type = types.lines;
};
histSize = mkOption {
default = 2000;
description = ''
Change history size.
'';
type = types.int;
};
histFile = mkOption {
default = "$HOME/.zsh_history";
description = ''
Change history file.
'';
type = types.str;
};
setOptions = mkOption {
type = types.listOf types.str;
default = [
"HIST_IGNORE_DUPS" "SHARE_HISTORY" "HIST_FCNTL_LOCK"
];
example = [ "EXTENDED_HISTORY" "RM_STAR_WAIT" ];
description = ''
Configure zsh options.
'';
};
enableCompletion = mkOption {
default = true;
description = ''
@ -162,12 +189,12 @@ in
. /etc/zinputrc
# history defaults
SAVEHIST=2000
HISTSIZE=2000
HISTFILE=$HOME/.zsh_history
# Don't export these, otherwise other shells (bash) will try to use same histfile
SAVEHIST=${toString cfg.histSize}
HISTSIZE=${toString cfg.histSize}
HISTFILE=${cfg.histFile}
setopt HIST_IGNORE_DUPS SHARE_HISTORY HIST_FCNTL_LOCK
${optionalString (cfg.setOptions != []) "setopt ${concatStringsSep " " cfg.setOptions}"}
HELPDIR="${pkgs.zsh}/share/zsh/$ZSH_VERSION/help"

View File

@ -199,10 +199,10 @@ in {
package = mkOption {
type = types.package;
default = pkgs.pythonPackages.buildbot-full;
defaultText = "pkgs.pythonPackages.buildbot-full";
default = pkgs.python3Packages.buildbot-full;
defaultText = "pkgs.python3Packages.buildbot-full";
description = "Package to use for buildbot.";
example = literalExample "pkgs.python3Packages.buildbot-full";
example = literalExample "pkgs.python3Packages.buildbot";
};
packages = mkOption {

View File

@ -118,10 +118,10 @@ in {
package = mkOption {
type = types.package;
default = pkgs.pythonPackages.buildbot-worker;
defaultText = "pkgs.pythonPackages.buildbot-worker";
default = pkgs.python3Packages.buildbot-worker;
defaultText = "pkgs.python3Packages.buildbot-worker";
description = "Package to use for buildbot worker.";
example = literalExample "pkgs.python3Packages.buildbot-worker";
example = literalExample "pkgs.python2Packages.buildbot-worker";
};
packages = mkOption {

View File

@ -9,6 +9,8 @@ let
in
{
meta.maintainers = pkgs.pantheon.maintainers;
###### interface
options = {

View File

@ -6,6 +6,8 @@ with lib;
{
meta.maintainers = pkgs.pantheon.maintainers;
###### interface
options = {

View File

@ -6,6 +6,8 @@ with lib;
{
meta.maintainers = pkgs.pantheon.maintainers;
###### interface
options = {

View File

@ -0,0 +1,26 @@
{ config, lib, pkgs, ... }:
with lib;
{
options = {
hardware.sane.dsseries.enable =
mkEnableOption "Brother DSSeries scan backend" // {
description = ''
When enabled, will automatically register the "dsseries" SANE backend.
This supports the Brother DSmobile scanner series, including the
DS-620, DS-720D, DS-820W, and DS-920DW scanners.
'';
};
};
config = mkIf (config.hardware.sane.enable && config.hardware.sane.dsseries.enable) {
hardware.sane.extraBackends = [ pkgs.dsseries ];
services.udev.packages = [ pkgs.dsseries ];
boot.kernelModules = [ "sg" ];
};
}

View File

@ -14,9 +14,10 @@ let
log.fields.service = "registry";
storage = {
cache.blobdescriptor = blobCache;
filesystem.rootdirectory = cfg.storagePath;
delete.enabled = cfg.enableDelete;
};
} // (if cfg.storagePath != null
then { filesystem.rootdirectory = cfg.storagePath; }
else {});
http = {
addr = "${cfg.listenAddress}:${builtins.toString cfg.port}";
headers.X-Content-Type-Options = ["nosniff"];
@ -61,9 +62,12 @@ in {
};
storagePath = mkOption {
type = types.path;
type = types.nullOr types.path;
default = "/var/lib/docker-registry";
description = "Docker registry storage path.";
description = ''
Docker registry storage path for the filesystem storage backend. Set to
null to configure another backend via extraConfig.
'';
};
enableDelete = mkOption {
@ -140,9 +144,12 @@ in {
startAt = optional cfg.enableGarbageCollect cfg.garbageCollectDates;
};
users.users.docker-registry = {
createHome = true;
home = cfg.storagePath;
};
users.users.docker-registry =
if cfg.storagePath != null
then {
createHome = true;
home = cfg.storagePath;
}
else {};
};
}

View File

@ -235,7 +235,7 @@ in {
'';
script = ''
export DD_API_KEY=$(head -n 1 ${cfg.apiKeyFile})
exec ${datadogPkg}/bin/agent start -c /etc/datadog-agent/datadog.yaml
exec ${datadogPkg}/bin/agent run -c /etc/datadog-agent/datadog.yaml
'';
serviceConfig.PermissionsStartOnly = true;
};

View File

@ -0,0 +1,95 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.knot;
configFile = pkgs.writeText "knot.conf" cfg.extraConfig;
socketFile = "/run/knot/knot.sock";
knotConfCheck = file: pkgs.runCommand "knot-config-checked"
{ buildInputs = [ cfg.package ]; } ''
ln -s ${configFile} $out
knotc --config=${configFile} conf-check
'';
knot-cli-wrappers = pkgs.stdenv.mkDerivation {
name = "knot-cli-wrappers";
buildInputs = [ pkgs.makeWrapper ];
buildCommand = ''
mkdir -p $out/bin
makeWrapper ${cfg.package}/bin/knotc "$out/bin/knotc" \
--add-flags "--config=${configFile}" \
--add-flags "--socket=${socketFile}"
makeWrapper ${cfg.package}/bin/keymgr "$out/bin/keymgr" \
--add-flags "--config=${configFile}"
for executable in kdig khost kjournalprint knsec3hash knsupdate kzonecheck
do
ln -s "${cfg.package}/bin/$executable" "$out/bin/$executable"
done
mkdir -p "$out/share"
ln -s '${cfg.package}/share/man' "$out/share/"
'';
};
in {
options = {
services.knot = {
enable = mkEnableOption "Knot authoritative-only DNS server";
extraArgs = mkOption {
type = types.listOf types.str;
default = [];
description = ''
List of additional command line paramters for knotd
'';
};
extraConfig = mkOption {
type = types.lines;
default = "";
description = ''
Extra lines to be added verbatim to knot.conf
'';
};
package = mkOption {
type = types.package;
default = pkgs.knot-dns;
description = ''
Which Knot DNS package to use
'';
};
};
};
config = mkIf config.services.knot.enable {
systemd.services.knot = {
unitConfig.Documentation = "man:knotd(8) man:knot.conf(5) man:knotc(8) https://www.knot-dns.cz/docs/${cfg.package.version}/html/";
description = cfg.package.meta.description;
wantedBy = [ "multi-user.target" ];
wants = [ "network.target" ];
after = ["network.target" ];
serviceConfig = {
Type = "notify";
ExecStart = "${cfg.package}/bin/knotd --config=${knotConfCheck configFile} --socket=${socketFile} ${concatStringsSep " " cfg.extraArgs}";
ExecReload = "${knot-cli-wrappers}/bin/knotc reload";
CapabilityBoundingSet = "CAP_NET_BIND_SERVICE CAP_SETPCAP";
AmbientCapabilities = "CAP_NET_BIND_SERVICE CAP_SETPCAP";
NoNewPrivileges = true;
DynamicUser = "yes";
RuntimeDirectory = "knot";
StateDirectory = "knot";
StateDirectoryMode = "0700";
PrivateDevices = true;
RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6";
SystemCallArchitectures = "native";
Restart = "on-abort";
};
};
environment.systemPackages = [ knot-cli-wrappers ];
};
}

View File

@ -86,7 +86,7 @@ in {
startAt = cfg.interval;
serviceConfig = {
ExecStart =
"${pkgs.python3Packages.elasticsearch-curator}/bin/curator" +
"${pkgs.elasticsearch-curator}/bin/curator" +
" --config ${curatorConfig} ${curatorAction}";
};
};

View File

@ -12,15 +12,15 @@
An automatic setup is not suported by Matomo, so you need to configure Matomo
itself in the browser-based Matomo setup.
</para>
<section xml:id="module-services-matomo-database-setup">
<title>Database Setup</title>
<para>
You also need to configure a MariaDB or MySQL database and -user for Matomo
yourself, and enter those credentials in your browser. You can use
passwordless database authentication via the UNIX_SOCKET authentication
plugin with the following SQL commands:
<programlisting>
<programlisting>
# For MariaDB
INSTALL PLUGIN unix_socket SONAME 'auth_socket';
CREATE DATABASE matomo;
@ -46,30 +46,32 @@
database is not on the same host.
</para>
</section>
<section xml:id="module-services-matomo-archive-processing">
<title>Archive Processing</title>
<para>
This module comes with the systemd service <literal>matomo-archive-processing.service</literal>
and a timer that automatically triggers archive processing every hour.
This means that you can safely
This module comes with the systemd service
<literal>matomo-archive-processing.service</literal> and a timer that
automatically triggers archive processing every hour. This means that you
can safely
<link xlink:href="https://matomo.org/docs/setup-auto-archiving/#disable-browser-triggers-for-matomo-archiving-and-limit-matomo-reports-to-updating-every-hour">
disable browser triggers for Matomo archiving
</link> at <literal>Administration > System > General Settings</literal>.
disable browser triggers for Matomo archiving </link> at
<literal>Administration > System > General Settings</literal>.
</para>
<para>
With automatic archive processing, you can now also enable to
<link xlink:href="https://matomo.org/docs/privacy/#step-2-delete-old-visitors-logs">
delete old visitor logs
</link> at <literal>Administration > System > Privacy</literal>,
but make sure that you run <literal>systemctl start matomo-archive-processing.service</literal>
at least once without errors if you have already collected data before,
so that the reports get archived before the source data gets deleted.
delete old visitor logs </link> at <literal>Administration > System >
Privacy</literal>, but make sure that you run <literal>systemctl start
matomo-archive-processing.service</literal> at least once without errors if
you have already collected data before, so that the reports get archived
before the source data gets deleted.
</para>
</section>
<section xml:id="module-services-matomo-backups">
<title>Backup</title>
<para>
You only need to take backups of your MySQL database and the
<filename>/var/lib/matomo/config/config.ini.php</filename> file. Use a user
@ -78,9 +80,9 @@
<link xlink:href="https://matomo.org/faq/how-to-install/faq_138/" />.
</para>
</section>
<section xml:id="module-services-matomo-issues">
<title>Issues</title>
<itemizedlist>
<listitem>
<para>
@ -97,7 +99,6 @@
</listitem>
</itemizedlist>
</section>
<section xml:id="module-services-matomo-other-web-servers">
<title>Using other Web Servers than nginx</title>

View File

@ -4,24 +4,26 @@
version="5.0"
xml:id="module-services-nextcloud">
<title>Nextcloud</title>
<para>
<link xlink:href="https://nextcloud.com/">Nextcloud</link> is an open-source, self-hostable cloud
platform. The server setup can be automated using
<link linkend="opt-services.nextcloud.enable">services.nextcloud</link>. A desktop client is packaged
at <literal>pkgs.nextcloud-client</literal>.
<link xlink:href="https://nextcloud.com/">Nextcloud</link> is an open-source,
self-hostable cloud platform. The server setup can be automated using
<link linkend="opt-services.nextcloud.enable">services.nextcloud</link>. A
desktop client is packaged at <literal>pkgs.nextcloud-client</literal>.
</para>
<section xml:id="module-services-nextcloud-basic-usage">
<title>Basic usage</title>
<para>
Nextcloud is a PHP-based application which requires an HTTP server
(<literal><link linkend="opt-services.nextcloud.enable">services.nextcloud</link></literal> optionally supports
<literal><link linkend="opt-services.nginx.enable">services.nginx</link></literal>) and a database
(it's recommended to use <literal><link linkend="opt-services.postgresql.enable">services.postgresql</link></literal>).
(<literal><link linkend="opt-services.nextcloud.enable">services.nextcloud</link></literal>
optionally supports
<literal><link linkend="opt-services.nginx.enable">services.nginx</link></literal>)
and a database (it's recommended to use
<literal><link linkend="opt-services.postgresql.enable">services.postgresql</link></literal>).
</para>
<para>
A very basic configuration may look like this:
A very basic configuration may look like this:
<programlisting>{ pkgs, ... }:
{
services.nextcloud = {
@ -55,45 +57,59 @@
<link linkend="opt-networking.firewall.allowedTCPPorts">networking.firewall.allowedTCPPorts</link> = [ 80 443 ];
}</programlisting>
</para>
<para>
The options <literal>hostName</literal> and <literal>nginx.enable</literal> are used internally to configure an
HTTP server using <literal><link xlink:href="https://php-fpm.org/">PHP-FPM</link></literal> and <literal>nginx</literal>.
The <literal>config</literal> attribute set is used for the <literal>config.php</literal> which is used
for the application's configuration.
<emphasis>Beware: this isn't entirely pure since the config is modified by the application's runtime!</emphasis>
The options <literal>hostName</literal> and <literal>nginx.enable</literal>
are used internally to configure an HTTP server using
<literal><link xlink:href="https://php-fpm.org/">PHP-FPM</link></literal>
and <literal>nginx</literal>. The <literal>config</literal> attribute set is
used for the <literal>config.php</literal> which is used for the
application's configuration. <emphasis>Beware: this isn't entirely pure
since the config is modified by the application's runtime!</emphasis>
</para>
<para>
In case the application serves multiple hosts (those are checked with
<literal><link xlink:href="http://php.net/manual/en/reserved.variables.server.php">$_SERVER['HTTP_HOST']</link></literal>)
those can be added using
<literal><link linkend="opt-services.nextcloud.config.extraTrustedDomains">services.nextcloud.config.extraTrustedDomains</link></literal>.
In case the application serves multiple hosts (those are checked with
<literal><link xlink:href="http://php.net/manual/en/reserved.variables.server.php">$_SERVER['HTTP_HOST']</link></literal>)
those can be added using
<literal><link linkend="opt-services.nextcloud.config.extraTrustedDomains">services.nextcloud.config.extraTrustedDomains</link></literal>.
</para>
</section>
<section xml:id="module-services-nextcloud-pitfalls-during-upgrade">
<title>Pitfalls</title>
<para>
Unfortunately Nextcloud appears to be very stateful when it comes to managing its own configuration. The
config file lives in the home directory of the <literal>nextcloud</literal> user (by default
<literal>/var/lib/nextcloud/config/config.php</literal>) and is also used to track several
states of the application (e.g. whether installed or not).
Unfortunately Nextcloud appears to be very stateful when it comes to
managing its own configuration. The config file lives in the home directory
of the <literal>nextcloud</literal> user (by default
<literal>/var/lib/nextcloud/config/config.php</literal>) and is also used to
track several states of the application (e.g. whether installed or not).
</para>
<para>
Right now changes to the <literal>services.nextcloud.config</literal> attribute set won't take effect
after the first install
(except <literal><link linkend="opt-services.nextcloud.config.extraTrustedDomains">services.nextcloud.config.extraTrustedDomains</link></literal>) since the actual configuration
file is generated by the NextCloud installer which also sets up critical parts such as the database
structure.
Right now changes to the <literal>services.nextcloud.config</literal>
attribute set won't take effect after the first install (except
<literal><link linkend="opt-services.nextcloud.config.extraTrustedDomains">services.nextcloud.config.extraTrustedDomains</link></literal>)
since the actual configuration file is generated by the NextCloud installer
which also sets up critical parts such as the database structure.
</para>
<para>
<emphasis>Warning: don't delete <literal>config.php</literal>! This file tracks the application's state and a deletion can cause unwanted side-effects!</emphasis>
<emphasis>Warning: don't delete <literal>config.php</literal>! This file
tracks the application's state and a deletion can cause unwanted
side-effects!</emphasis>
</para>
<para>
<emphasis>Warning: don't rerun <literal>nextcloud-occ maintenance:install</literal>! This command tries to install the application and can cause unwanted side-effects!</emphasis>
<emphasis>Warning: don't rerun <literal>nextcloud-occ
maintenance:install</literal>! This command tries to install the application
and can cause unwanted side-effects!</emphasis>
</para>
<para>
The issues are known and reported in <link xlink:href="https://github.com/NixOS/nixpkgs/issues/49783">#49783</link>, for now it's unfortunately necessary to manually work around these issues.
The issues are known and reported in
<link xlink:href="https://github.com/NixOS/nixpkgs/issues/49783">#49783</link>,
for now it's unfortunately necessary to manually work around these issues.
</para>
</section>
</chapter>

View File

@ -86,11 +86,19 @@ in with lib; {
default = false;
description = "Serve and listen only through HTTPS.";
};
videoPaths = mkOption {
type = types.listOf types.path;
default = [];
example = [ "/home/okina/Videos/tehe_pero.webm" ];
description = "Videos that will be symlinked into www/videos.";
};
};
config = mkIf cfg.enable {
security.sudo.enable = cfg.enable;
services.postgresql.enable = cfg.enable;
services.postgresql.package = pkgs.postgresql_11;
services.meguca.passwordFile = mkDefault (pkgs.writeText "meguca-password-file" cfg.password);
services.meguca.postgresArgsFile = mkDefault (pkgs.writeText "meguca-postgres-args" cfg.postgresArgs);
services.meguca.postgresArgs = mkDefault "user=meguca password=${cfg.password} dbname=meguca sslmode=disable";
@ -102,8 +110,16 @@ in with lib; {
preStart = ''
# Ensure folder exists or create it and links and permissions are correct
mkdir -p ${escapeShellArg cfg.dataDir}
ln -sf ${pkgs.meguca}/share/meguca/www ${escapeShellArg cfg.dataDir}
mkdir -p ${escapeShellArg cfg.dataDir}/www
rm -rf ${escapeShellArg cfg.dataDir}/www/videos
ln -sf ${pkgs.meguca}/share/meguca/www/* ${escapeShellArg cfg.dataDir}/www
unlink ${escapeShellArg cfg.dataDir}/www/videos
mkdir -p ${escapeShellArg cfg.dataDir}/www/videos
for vid in ${escapeShellArg cfg.videoPaths}; do
ln -sf $vid ${escapeShellArg cfg.dataDir}/www/videos
done
chmod 750 ${escapeShellArg cfg.dataDir}
chown -R meguca:meguca ${escapeShellArg cfg.dataDir}

View File

@ -197,7 +197,7 @@ let
listenString = { addr, port, ssl, extraParameters ? [], ... }:
"listen ${addr}:${toString port} "
+ optionalString ssl "ssl "
+ optionalString vhost.http2 "http2 "
+ optionalString (ssl && vhost.http2) "http2 "
+ optionalString vhost.default "default_server "
+ optionalString (extraParameters != []) (concatStringsSep " " extraParameters)
+ ";";
@ -276,6 +276,7 @@ let
${optionalString (config.tryFiles != null) "try_files ${config.tryFiles};"}
${optionalString (config.root != null) "root ${config.root};"}
${optionalString (config.alias != null) "alias ${config.alias};"}
${optionalString (config.return != null) "return ${config.return};"}
${config.extraConfig}
${optionalString (config.proxyPass != null && cfg.recommendedProxySettings) "include ${recommendedProxyConfig};"}
}

View File

@ -64,6 +64,15 @@ with lib;
'';
};
return = mkOption {
type = types.nullOr types.str;
default = null;
example = "301 http://example.com$request_uri;";
description = ''
Adds a return directive, for e.g. redirections.
'';
};
extraConfig = mkOption {
type = types.lines;
default = "";

View File

@ -0,0 +1,125 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.unit;
configFile = pkgs.writeText "unit.json" cfg.config;
in {
options = {
services.unit = {
enable = mkEnableOption "Unit App Server";
package = mkOption {
type = types.package;
default = pkgs.unit;
defaultText = "pkgs.unit";
description = "Unit package to use.";
};
user = mkOption {
type = types.str;
default = "unit";
description = "User account under which unit runs.";
};
group = mkOption {
type = types.str;
default = "unit";
description = "Group account under which unit runs.";
};
stateDir = mkOption {
default = "/var/spool/unit";
description = "Unit data directory.";
};
logDir = mkOption {
default = "/var/log/unit";
description = "Unit log directory.";
};
config = mkOption {
type = types.str;
default = ''
{
"listeners": {},
"applications": {}
}
'';
example = literalExample ''
{
"listeners": {
"*:8300": {
"application": "example-php-72"
}
},
"applications": {
"example-php-72": {
"type": "php 7.2",
"processes": 4,
"user": "nginx",
"group": "nginx",
"root": "/var/www",
"index": "index.php",
"options": {
"file": "/etc/php.d/default.ini",
"admin": {
"max_execution_time": "30",
"max_input_time": "30",
"display_errors": "off",
"display_startup_errors": "off",
"open_basedir": "/dev/urandom:/proc/cpuinfo:/proc/meminfo:/etc/ssl/certs:/var/www",
"disable_functions": "exec,passthru,shell_exec,system"
}
}
}
}
}
'';
description = "Unit configuration in JSON format. More details here https://unit.nginx.org/configuration";
};
};
};
config = mkIf cfg.enable {
environment.systemPackages = [ cfg.package ];
systemd.tmpfiles.rules = [
"d '${cfg.stateDir}' 0750 ${cfg.user} ${cfg.group} - -"
"d '${cfg.logDir}' 0750 ${cfg.user} ${cfg.group} - -"
];
systemd.services.unit = {
description = "Unit App Server";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
path = with pkgs; [ curl ];
preStart = ''
test -f '/run/unit/control.unit.sock' || rm -f '/run/unit/control.unit.sock'
'';
postStart = ''
curl -X PUT --data-binary '@${configFile}' --unix-socket '/run/unit/control.unit.sock' 'http://localhost/config'
'';
serviceConfig = {
User = cfg.user;
Group = cfg.group;
AmbientCapabilities = "CAP_NET_BIND_SERVICE CAP_SETGID CAP_SETUID";
CapabilityBoundingSet = "CAP_NET_BIND_SERVICE CAP_SETGID CAP_SETUID";
ExecStart = ''
${cfg.package}/bin/unitd --control 'unix:/run/unit/control.unit.sock' --pid '/run/unit/unit.pid' \
--log '${cfg.logDir}/unit.log' --state '${cfg.stateDir}' --no-daemon \
--user ${cfg.user} --group ${cfg.group}
'';
RuntimeDirectory = "unit";
RuntimeDirectoryMode = "0750";
};
};
users.users = optionalAttrs (cfg.user == "unit") (singleton {
name = "unit";
group = cfg.group;
});
users.groups = optionalAttrs (cfg.group == "unit") (singleton {
name = "unit";
});
};
}

View File

@ -14,6 +14,9 @@ let
in
{
meta.maintainers = pkgs.pantheon.maintainers;
options = {
services.xserver.desktopManager.pantheon = {
@ -108,26 +111,26 @@ in
([ pkgs.pantheon.switchboard-plug-power ])
(mkIf config.services.printing.enable ([pkgs.system-config-printer]) )
];
services.pantheon.contractor.enable = true;
services.pantheon.contractor.enable = mkDefault true;
services.geoclue2.enable = mkDefault true;
# pantheon has pantheon-agent-geoclue2
services.geoclue2.enableDemoAgent = false;
services.gnome3.at-spi2-core.enable = true;
services.gnome3.evolution-data-server.enable = true;
services.gnome3.file-roller.enable = true;
services.gnome3.file-roller.enable = mkDefault true;
# TODO: gnome-keyring's xdg autostarts will still be in the environment (from elementary-session-settings) if disabled forcefully
services.gnome3.gnome-keyring.enable = true;
services.gnome3.gnome-settings-daemon.enable = true;
services.gnome3.gnome-settings-daemon.package = pkgs.pantheon.elementary-settings-daemon;
services.gnome3.gvfs.enable = true;
services.gnome3.rygel.enable = true;
services.gsignond.enable = true;
services.gnome3.rygel.enable = mkDefault true;
services.gsignond.enable = mkDefault true;
services.gsignond.plugins = with pkgs.gsignondPlugins; [ lastfm mail oauth ];
services.udisks2.enable = true;
services.upower.enable = config.powerManagement.enable;
services.xserver.libinput.enable = mkDefault true;
services.xserver.updateDbusEnvironment = true;
services.zeitgeist.enable = true;
services.zeitgeist.enable = mkDefault true;
networking.networkmanager.enable = mkDefault true;
networking.networkmanager.basePackages =
@ -152,19 +155,15 @@ in
"/share"
];
environment.systemPackages = pkgs.pantheon.artwork ++ pkgs.pantheon.desktop ++ pkgs.pantheon.services ++ cfg.sessionPath
++ (pkgs.gnome3.removePackagesByName pkgs.pantheon.apps config.environment.pantheon.excludePackages)
++ (with pkgs.gnome3;
[
adwaita-icon-theme
dconf
epiphany
environment.systemPackages =
pkgs.pantheon.artwork ++ pkgs.pantheon.desktop ++ pkgs.pantheon.services ++ cfg.sessionPath
++ (with pkgs; gnome3.removePackagesByName
([
gnome3.geary
gnome3.epiphany
gnome3.gnome-font-viewer
evince
geary
gnome-bluetooth
gnome-font-viewer
gnome-power-manager
])
] ++ pantheon.apps) config.environment.pantheon.excludePackages)
++ (with pkgs;
[
adwaita-qt
@ -172,6 +171,8 @@ in
glib
glib-networking
gnome-menus
gnome3.adwaita-icon-theme
gnome3.dconf
gtk3.out
hicolor-icon-theme
lightlocker
@ -187,6 +188,7 @@ in
roboto-mono
pantheon.elementary-redacted-script # needed by screenshot-tool
];
fonts.fontconfig.defaultFonts = {
monospace = [ "Roboto Mono" ];
sansSerif = [ "Open Sans" ];

View File

@ -25,7 +25,7 @@ in
{ name = "dwm";
start =
''
${pkgs.dwm}/bin/dwm &
dwm &
waitPID=$!
'';
};

View File

@ -246,7 +246,7 @@ in
default = [ "ati" "cirrus" "vesa" "vmware" "modesetting" ];
example = [
"ati_unfree" "amdgpu" "amdgpu-pro"
"nv" "nvidia" "nvidiaLegacy340" "nvidiaLegacy304"
"nv" "nvidia" "nvidiaLegacy390" "nvidiaLegacy340" "nvidiaLegacy304"
];
# TODO(@oxij): think how to easily add the rest, like those nvidia things
relatedPackages = concatLists
@ -259,6 +259,11 @@ in
The names of the video drivers the configuration
supports. They will be tried in order until one that
supports your card is found.
Don't combine those with "incompatible" OpenGL implementations,
e.g. free ones (mesa-based) with proprietary ones.
For unfree "nvidia*", the supported GPU lists are on
https://www.nvidia.com/object/unix.html
'';
};

View File

@ -8,13 +8,17 @@ let
efi = config.boot.loader.efi;
realGrub = if cfg.version == 1 then pkgs.grub
else if cfg.zfsSupport then pkgs.grub2.override { zfsSupport = true; }
grubPkgs =
# Package set of targeted architecture
if cfg.forcei686 then pkgs.pkgsi686Linux else pkgs;
realGrub = if cfg.version == 1 then grubPkgs.grub
else if cfg.zfsSupport then grubPkgs.grub2.override { zfsSupport = true; }
else if cfg.trustedBoot.enable
then if cfg.trustedBoot.isHPLaptop
then pkgs.trustedGrub-for-HP
else pkgs.trustedGrub
else pkgs.grub2;
then grubPkgs.trustedGrub-for-HP
else grubPkgs.trustedGrub
else grubPkgs.grub2;
grub =
# Don't include GRUB if we're only generating a GRUB menu (e.g.,
@ -58,14 +62,10 @@ let
version extraConfig extraPerEntryConfig extraEntries forceInstall useOSProber
extraEntriesBeforeNixOS extraPrepareConfig extraInitrd configurationLimit copyKernels
default fsIdentifier efiSupport efiInstallAsRemovable gfxmodeEfi gfxmodeBios;
path = (makeBinPath ([
pkgs.coreutils pkgs.gnused pkgs.gnugrep pkgs.findutils pkgs.diffutils pkgs.btrfs-progs
pkgs.utillinux ]
++ (optional (cfg.efiSupport && (cfg.version == 2)) pkgs.efibootmgr)
++ (optionals cfg.useOSProber [pkgs.busybox pkgs.os-prober])
)) + ":" + (makeSearchPathOutput "bin" "sbin" [
pkgs.mdadm pkgs.utillinux
]);
path = with pkgs; makeBinPath (
[ coreutils gnused gnugrep findutils diffutils btrfs-progs utillinux mdadm ]
++ optional (cfg.efiSupport && (cfg.version == 2)) efibootmgr
++ optionals cfg.useOSProber [ busybox os-prober ]);
font = if cfg.font == null then ""
else (if lib.last (lib.splitString "." cfg.font) == "pf2"
then cfg.font
@ -512,6 +512,15 @@ in
'';
};
forcei686 = mkOption {
default = false;
type = types.bool;
description = ''
Whether to force the use of a ia32 boot loader on x64 systems. Required
to install and run NixOS on 64bit x86 systems with 32bit (U)EFI.
'';
};
trustedBoot = {
enable = mkOption {

View File

@ -209,10 +209,17 @@ in
assertions = let
ls = sep: concatMapStringsSep sep (x: x.mountPoint);
notAutoResizable = fs: fs.autoResize && !(hasPrefix "ext" fs.fsType || fs.fsType == "f2fs");
in [
{ assertion = ! (fileSystems' ? "cycle");
message = "The fileSystems option can't be topologically sorted: mountpoint dependency path ${ls " -> " fileSystems'.cycle} loops to ${ls ", " fileSystems'.loops}";
}
{ assertion = ! (any notAutoResizable fileSystems);
message = let
fs = head (filter notAutoResizable fileSystems);
in
"Mountpoint '${fs.mountPoint}': 'autoResize = true' is not supported for 'fsType = \"${fs.fsType}\"':${if fs.fsType == "auto" then " fsType has to be explicitly set and" else ""} only the ext filesystems and f2fs support it.";
}
];
# Export for use in other modules

View File

@ -31,6 +31,7 @@ in
fileSystems."/" = {
device = "/dev/disk/by-label/nixos";
fsType = "ext4";
autoResize = true;
};

View File

@ -0,0 +1,144 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.virtualisation.anbox;
kernelPackages = config.boot.kernelPackages;
addrOpts = v: addr: pref: name: {
address = mkOption {
default = addr;
type = types.str;
description = ''
IPv${toString v} ${name} address.
'';
};
prefixLength = mkOption {
default = pref;
type = types.addCheck types.int (n: n >= 0 && n <= (if v == 4 then 32 else 128));
description = ''
Subnet mask of the ${name} address, specified as the number of
bits in the prefix (<literal>${if v == 4 then "24" else "64"}</literal>).
'';
};
};
in
{
options.virtualisation.anbox = {
enable = mkEnableOption "Anbox";
image = mkOption {
default = pkgs.anbox.image;
example = literalExample "pkgs.anbox.image";
type = types.package;
description = ''
Base android image for Anbox.
'';
};
extraInit = mkOption {
type = types.lines;
default = "";
description = ''
Extra shell commands to be run inside the container image during init.
'';
};
ipv4 = {
container = addrOpts 4 "192.168.250.2" 24 "Container";
gateway = addrOpts 4 "192.168.250.1" 24 "Host";
dns = mkOption {
default = "1.1.1.1";
type = types.string;
description = ''
Container DNS server.
'';
};
};
};
config = mkIf cfg.enable {
assertions = singleton {
assertion = versionAtLeast (getVersion config.boot.kernelPackages.kernel) "4.18";
message = "Anbox needs user namespace support to work properly";
};
environment.systemPackages = with pkgs; [ anbox ];
boot.kernelModules = [ "ashmem_linux" "binder_linux" ];
boot.extraModulePackages = [ kernelPackages.anbox ];
services.udev.extraRules = ''
KERNEL=="ashmem", NAME="%k", MODE="0666"
KERNEL=="binder*", NAME="%k", MODE="0666"
'';
virtualisation.lxc.enable = true;
networking.bridges.anbox0.interfaces = [];
networking.interfaces.anbox0.ipv4.addresses = [ cfg.ipv4.gateway ];
networking.nat = {
enable = true;
internalInterfaces = [ "anbox0" ];
};
systemd.services.anbox-container-manager = let
anboxloc = "/var/lib/anbox";
in {
description = "Anbox Container Management Daemon";
environment.XDG_RUNTIME_DIR="${anboxloc}";
wantedBy = [ "multi-user.target" ];
after = [ "systemd-udev-settle.service" ];
preStart = let
initsh = let
ip = cfg.ipv4.container.address;
gw = cfg.ipv4.gateway.address;
dns = cfg.ipv4.dns;
in
pkgs.writeText "nixos-init" (''
#!/system/bin/sh
setprop nixos.version ${config.system.nixos.version}
# we don't have radio
setprop ro.radio.noril yes
stop ril-daemon
# speed up boot
setprop debug.sf.nobootanimation 1
'' + cfg.extraInit);
initshloc = "${anboxloc}/rootfs-overlay/system/etc/init.goldfish.sh";
in ''
mkdir -p ${anboxloc}
mkdir -p $(dirname ${initshloc})
[ -f ${initshloc} ] && rm ${initshloc}
cp ${initsh} ${initshloc}
chown 100000:100000 ${initshloc}
chmod +x ${initshloc}
'';
serviceConfig = {
ExecStart = ''
${pkgs.anbox}/bin/anbox container-manager \
--data-path=${anboxloc} \
--android-image=${cfg.image} \
--container-network-address=${cfg.ipv4.container.address} \
--container-network-gateway=${cfg.ipv4.gateway.address} \
--container-network-dns-servers=${cfg.ipv4.dns} \
--use-rootfs-overlay \
--privileged
'';
};
};
};
}

View File

@ -36,8 +36,9 @@ let
#! ${pkgs.runtimeShell} -e
# Initialise the container side of the veth pair.
if [ -n "$HOST_ADDRESS" ] || [ -n "$LOCAL_ADDRESS" ] || [ -n "$HOST_BRIDGE" ]; then
if [ -n "$HOST_ADDRESS" ] || [ -n "$HOST_ADDRESS6" ] ||
[ -n "$LOCAL_ADDRESS" ] || [ -n "$LOCAL_ADDRESS6" ] ||
[ -n "$HOST_BRIDGE" ]; then
ip link set host0 name eth0
ip link set dev eth0 up
@ -88,7 +89,8 @@ let
extraFlags+=" --private-network"
fi
if [ -n "$HOST_ADDRESS" ] || [ -n "$LOCAL_ADDRESS" ]; then
if [ -n "$HOST_ADDRESS" ] || [ -n "$LOCAL_ADDRESS" ] ||
[ -n "$HOST_ADDRESS6" ] || [ -n "$LOCAL_ADDRESS6" ]; then
extraFlags+=" --network-veth"
fi
@ -159,7 +161,8 @@ let
# Clean up existing machined registration and interfaces.
machinectl terminate "$INSTANCE" 2> /dev/null || true
if [ -n "$HOST_ADDRESS" ] || [ -n "$LOCAL_ADDRESS" ]; then
if [ -n "$HOST_ADDRESS" ] || [ -n "$LOCAL_ADDRESS" ] ||
[ -n "$HOST_ADDRESS6" ] || [ -n "$LOCAL_ADDRESS6" ]; then
ip link del dev "ve-$INSTANCE" 2> /dev/null || true
ip link del dev "vb-$INSTANCE" 2> /dev/null || true
fi
@ -208,7 +211,8 @@ let
'';
in
''
if [ -n "$HOST_ADDRESS" ] || [ -n "$LOCAL_ADDRESS" ]; then
if [ -n "$HOST_ADDRESS" ] || [ -n "$LOCAL_ADDRESS" ] ||
[ -n "$HOST_ADDRESS6" ] || [ -n "$LOCAL_ADDRESS6" ]; then
if [ -z "$HOST_BRIDGE" ]; then
ifaceHost=ve-$INSTANCE
ip link set dev $ifaceHost up

View File

@ -0,0 +1,233 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.docker-containers;
dockerContainer =
{ name, config, ... }: {
options = {
image = mkOption {
type = types.str;
description = "Docker image to run.";
example = "library/hello-world";
};
cmd = mkOption {
type = with types; listOf str;
default = [];
description = "Commandline arguments to pass to the image's entrypoint.";
example = literalExample ''
["--port=9000"]
'';
};
entrypoint = mkOption {
type = with types; nullOr str;
description = "Overwrite the default entrypoint of the image.";
default = null;
example = "/bin/my-app";
};
environment = mkOption {
type = with types; attrsOf str;
default = {};
description = "Environment variables to set for this container.";
example = literalExample ''
{
DATABASE_HOST = "db.example.com";
DATABASE_PORT = "3306";
}
'';
};
log-driver = mkOption {
type = types.str;
default = "none";
description = ''
Logging driver for the container. The default of
<literal>"none"</literal> means that the container's logs will be
handled as part of the systemd unit. Setting this to
<literal>"journald"</literal> will result in duplicate logging, but
the container's logs will be visible to the <command>docker
logs</command> command.
For more details and a full list of logging drivers, refer to the
<link xlink:href="https://docs.docker.com/engine/reference/run/#logging-drivers---log-driver">
Docker engine documentation</link>
'';
};
ports = mkOption {
type = with types; listOf str;
default = [];
description = ''
Network ports to publish from the container to the outer host.
</para>
<para>
Valid formats:
</para>
<itemizedlist>
<listitem>
<para>
<literal>&lt;ip&gt;:&lt;hostPort&gt;:&lt;containerPort&gt;</literal>
</para>
</listitem>
<listitem>
<para>
<literal>&lt;ip&gt;::&lt;containerPort&gt;</literal>
</para>
</listitem>
<listitem>
<para>
<literal>&lt;hostPort&gt;:&lt;containerPort&gt;</literal>
</para>
</listitem>
<listitem>
<para>
<literal>&lt;containerPort&gt;</literal>
</para>
</listitem>
</itemizedlist>
<para>
Both <literal>hostPort</literal> and
<literal>containerPort</literal> can be specified as a range of
ports. When specifying ranges for both, the number of container
ports in the range must match the number of host ports in the
range. Example: <literal>1234-1236:1234-1236/tcp</literal>
</para>
<para>
When specifying a range for <literal>hostPort</literal> only, the
<literal>containerPort</literal> must <emphasis>not</emphasis> be a
range. In this case, the container port is published somewhere
within the specified <literal>hostPort</literal> range. Example:
<literal>1234-1236:1234/tcp</literal>
</para>
<para>
Refer to the
<link xlink:href="https://docs.docker.com/engine/reference/run/#expose-incoming-ports">
Docker engine documentation</link> for full details.
'';
example = literalExample ''
[
"8080:9000"
]
'';
};
user = mkOption {
type = with types; nullOr str;
default = null;
description = ''
Override the username or UID (and optionally groupname or GID) used
in the container.
'';
example = "nobody:nogroup";
};
volumes = mkOption {
type = with types; listOf str;
default = [];
description = ''
List of volumes to attach to this container.
Note that this is a list of <literal>"src:dst"</literal> strings to
allow for <literal>src</literal> to refer to
<literal>/nix/store</literal> paths, which would difficult with an
attribute set. There are also a variety of mount options available
as a third field; please refer to the
<link xlink:href="https://docs.docker.com/engine/reference/run/#volume-shared-filesystems">
docker engine documentation</link> for details.
'';
example = literalExample ''
[
"volume_name:/path/inside/container"
"/path/on/host:/path/inside/container"
]
'';
};
workdir = mkOption {
type = with types; nullOr str;
default = null;
description = "Override the default working directory for the container.";
example = "/var/lib/hello_world";
};
extraDockerOptions = mkOption {
type = with types; listOf str;
default = [];
description = "Extra options for <command>docker run</command>.";
example = literalExample ''
["--network=host"]
'';
};
};
};
mkService = name: container: {
wantedBy = [ "multi-user.target" ];
after = [ "docker.service" "docker.socket" ];
requires = [ "docker.service" "docker.socket" ];
serviceConfig = {
ExecStart = concatStringsSep " \\\n " ([
"${pkgs.docker}/bin/docker run"
"--rm"
"--name=%n"
"--log-driver=${container.log-driver}"
] ++ optional (! isNull container.entrypoint)
"--entrypoint=${escapeShellArg container.entrypoint}"
++ (mapAttrsToList (k: v: "-e ${escapeShellArg k}=${escapeShellArg v}") container.environment)
++ map (p: "-p ${escapeShellArg p}") container.ports
++ optional (! isNull container.user) "-u ${escapeShellArg container.user}"
++ map (v: "-v ${escapeShellArg v}") container.volumes
++ optional (! isNull container.workdir) "-w ${escapeShellArg container.workdir}"
++ map escapeShellArg container.extraDockerOptions
++ [container.image]
++ map escapeShellArg container.cmd
);
ExecStartPre = "-${pkgs.docker}/bin/docker rm -f %n";
ExecStop = "${pkgs.docker}/bin/docker stop %n";
ExecStopPost = "-${pkgs.docker}/bin/docker rm -f %n";
### There is no generalized way of supporting `reload` for docker
### containers. Some containers may respond well to SIGHUP sent to their
### init process, but it is not guaranteed; some apps have other reload
### mechanisms, some don't have a reload signal at all, and some docker
### images just have broken signal handling. The best compromise in this
### case is probably to leave ExecReload undefined, so `systemctl reload`
### will at least result in an error instead of potentially undefined
### behaviour.
###
### Advanced users can still override this part of the unit to implement
### a custom reload handler, since the result of all this is a normal
### systemd service from the perspective of the NixOS module system.
###
# ExecReload = ...;
###
TimeoutStartSec = 0;
TimeoutStopSec = 120;
Restart = "always";
};
};
in {
options.docker-containers = mkOption {
default = {};
type = types.attrsOf (types.submodule dockerContainer);
description = "Docker containers to run as systemd services.";
};
config = mkIf (cfg != []) {
systemd.services = mapAttrs' (n: v: nameValuePair "docker-${n}" (mkService n v)) cfg;
virtualisation.docker.enable = true;
};
}

View File

@ -20,6 +20,7 @@ in
config = {
fileSystems."/" = {
device = "/dev/disk/by-label/nixos";
fsType = "ext4";
autoResize = true;
};

View File

@ -64,6 +64,7 @@ in rec {
#(all nixos.tests.containers)
(all nixos.tests.containers-imperative)
(all nixos.tests.containers-ipv4)
(all nixos.tests.containers-ipv6)
nixos.tests.chromium.x86_64-linux or []
(all nixos.tests.firefox)
(all nixos.tests.firewall)

View File

@ -33,6 +33,7 @@ in rec {
inherit (nixos'.tests)
containers-imperative
containers-ipv4
containers-ipv6
firewall
ipv6
login

View File

@ -43,6 +43,7 @@ in
clickhouse = handleTest ./clickhouse.nix {};
cloud-init = handleTest ./cloud-init.nix {};
codimd = handleTest ./codimd.nix {};
colord = handleTest ./colord.nix {};
containers-bridge = handleTest ./containers-bridge.nix {};
containers-extra_veth = handleTest ./containers-extra_veth.nix {};
containers-hosts = handleTest ./containers-hosts.nix {};
@ -58,6 +59,7 @@ in
dhparams = handleTest ./dhparams.nix {};
dnscrypt-proxy = handleTestOn ["x86_64-linux"] ./dnscrypt-proxy.nix {};
docker = handleTestOn ["x86_64-linux"] ./docker.nix {};
docker-containers = handleTestOn ["x86_64-linux"] ./docker-containers.nix {};
docker-edge = handleTestOn ["x86_64-linux"] ./docker-edge.nix {};
docker-preloader = handleTestOn ["x86_64-linux"] ./docker-preloader.nix {};
docker-registry = handleTest ./docker-registry.nix {};
@ -117,6 +119,7 @@ in
kernel-latest = handleTest ./kernel-latest.nix {};
kernel-lts = handleTest ./kernel-lts.nix {};
keymap = handleTest ./keymap.nix {};
knot = handleTest ./knot.nix {};
kubernetes.dns = handleTestOn ["x86_64-linux"] ./kubernetes/dns.nix {};
# kubernetes.e2e should eventually replace kubernetes.rbac when it works
#kubernetes.e2e = handleTestOn ["x86_64-linux"] ./kubernetes/e2e.nix {};
@ -172,6 +175,7 @@ in
osquery = handleTest ./osquery.nix {};
osrm-backend = handleTest ./osrm-backend.nix {};
ostree = handleTest ./ostree.nix {};
overlayfs = handleTest ./overlayfs.nix {};
pam-oath-login = handleTest ./pam-oath-login.nix {};
pam-u2f = handleTest ./pam-u2f.nix {};
pantheon = handleTest ./pantheon.nix {};

View File

@ -5,116 +5,109 @@
with import ../lib/testing.nix { inherit system pkgs; };
let
# Test ensures buildbot master comes up correctly and workers can connect
mkBuildbotTest = python: makeTest {
name = "buildbot";
# Test ensures buildbot master comes up correctly and workers can connect
makeTest {
name = "buildbot";
nodes = {
bbmaster = { pkgs, ... }: {
services.buildbot-master = {
enable = true;
package = python.pkgs.buildbot-full;
nodes = {
bbmaster = { pkgs, ... }: {
services.buildbot-master = {
enable = true;
# NOTE: use fake repo due to no internet in hydra ci
factorySteps = [
"steps.Git(repourl='git://gitrepo/fakerepo.git', mode='incremental')"
"steps.ShellCommand(command=['bash', 'fakerepo.sh'])"
];
changeSource = [
"changes.GitPoller('git://gitrepo/fakerepo.git', workdir='gitpoller-workdir', branch='master', pollinterval=300)"
];
};
networking.firewall.allowedTCPPorts = [ 8010 8011 9989 ];
environment.systemPackages = with pkgs; [ git python.pkgs.buildbot-full ];
};
bbworker = { pkgs, ... }: {
services.buildbot-worker = {
enable = true;
masterUrl = "bbmaster:9989";
};
environment.systemPackages = with pkgs; [ git python.pkgs.buildbot-worker ];
};
gitrepo = { pkgs, ... }: {
services.openssh.enable = true;
networking.firewall.allowedTCPPorts = [ 22 9418 ];
environment.systemPackages = with pkgs; [ git ];
# NOTE: use fake repo due to no internet in hydra ci
factorySteps = [
"steps.Git(repourl='git://gitrepo/fakerepo.git', mode='incremental')"
"steps.ShellCommand(command=['bash', 'fakerepo.sh'])"
];
changeSource = [
"changes.GitPoller('git://gitrepo/fakerepo.git', workdir='gitpoller-workdir', branch='master', pollinterval=300)"
];
};
networking.firewall.allowedTCPPorts = [ 8010 8011 9989 ];
environment.systemPackages = with pkgs; [ git python3Packages.buildbot-full ];
};
testScript = ''
#Start up and populate fake repo
$gitrepo->waitForUnit("multi-user.target");
print($gitrepo->execute(" \
git config --global user.name 'Nobody Fakeuser' && \
git config --global user.email 'nobody\@fakerepo.com' && \
rm -rvf /srv/repos/fakerepo.git /tmp/fakerepo && \
mkdir -pv /srv/repos/fakerepo ~/.ssh && \
ssh-keyscan -H gitrepo > ~/.ssh/known_hosts && \
cat ~/.ssh/known_hosts && \
cd /srv/repos/fakerepo && \
git init && \
echo -e '#!/bin/sh\necho fakerepo' > fakerepo.sh && \
cat fakerepo.sh && \
touch .git/git-daemon-export-ok && \
git add fakerepo.sh .git/git-daemon-export-ok && \
git commit -m fakerepo && \
git daemon --verbose --export-all --base-path=/srv/repos --reuseaddr & \
"));
# Test gitrepo
$bbmaster->waitForUnit("network-online.target");
#$bbmaster->execute("nc -z gitrepo 9418");
print($bbmaster->execute(" \
rm -rfv /tmp/fakerepo && \
git clone git://gitrepo/fakerepo /tmp/fakerepo && \
pwd && \
ls -la && \
ls -la /tmp/fakerepo \
"));
# Test start master and connect worker
$bbmaster->waitForUnit("buildbot-master.service");
$bbmaster->waitUntilSucceeds("curl -s --head http://bbmaster:8010") =~ /200 OK/;
$bbworker->waitForUnit("network-online.target");
$bbworker->execute("nc -z bbmaster 8010");
$bbworker->execute("nc -z bbmaster 9989");
$bbworker->waitForUnit("buildbot-worker.service");
print($bbworker->execute("ls -la /home/bbworker/worker"));
# Test stop buildbot master and worker
print($bbmaster->execute(" \
systemctl -l --no-pager status buildbot-master && \
systemctl stop buildbot-master \
"));
$bbworker->fail("nc -z bbmaster 8010");
$bbworker->fail("nc -z bbmaster 9989");
print($bbworker->execute(" \
systemctl -l --no-pager status buildbot-worker && \
systemctl stop buildbot-worker && \
ls -la /home/bbworker/worker \
"));
# Test buildbot daemon mode
$bbmaster->execute("buildbot create-master /tmp");
$bbmaster->execute("mv -fv /tmp/master.cfg.sample /tmp/master.cfg");
$bbmaster->execute("sed -i 's/8010/8011/' /tmp/master.cfg");
$bbmaster->execute("buildbot start /tmp");
$bbworker->execute("nc -z bbmaster 8011");
$bbworker->waitUntilSucceeds("curl -s --head http://bbmaster:8011") =~ /200 OK/;
$bbmaster->execute("buildbot stop /tmp");
$bbworker->fail("nc -z bbmaster 8011");
'';
meta.maintainers = with pkgs.stdenv.lib.maintainers; [ nand0p ];
bbworker = { pkgs, ... }: {
services.buildbot-worker = {
enable = true;
masterUrl = "bbmaster:9989";
};
environment.systemPackages = with pkgs; [ git python3Packages.buildbot-worker ];
};
gitrepo = { pkgs, ... }: {
services.openssh.enable = true;
networking.firewall.allowedTCPPorts = [ 22 9418 ];
environment.systemPackages = with pkgs; [ git ];
};
};
in {
python2 = mkBuildbotTest pkgs.python2;
python3 = mkBuildbotTest pkgs.python3;
testScript = ''
#Start up and populate fake repo
$gitrepo->waitForUnit("multi-user.target");
print($gitrepo->execute(" \
git config --global user.name 'Nobody Fakeuser' && \
git config --global user.email 'nobody\@fakerepo.com' && \
rm -rvf /srv/repos/fakerepo.git /tmp/fakerepo && \
mkdir -pv /srv/repos/fakerepo ~/.ssh && \
ssh-keyscan -H gitrepo > ~/.ssh/known_hosts && \
cat ~/.ssh/known_hosts && \
cd /srv/repos/fakerepo && \
git init && \
echo -e '#!/bin/sh\necho fakerepo' > fakerepo.sh && \
cat fakerepo.sh && \
touch .git/git-daemon-export-ok && \
git add fakerepo.sh .git/git-daemon-export-ok && \
git commit -m fakerepo && \
git daemon --verbose --export-all --base-path=/srv/repos --reuseaddr & \
"));
# Test gitrepo
$bbmaster->waitForUnit("network-online.target");
#$bbmaster->execute("nc -z gitrepo 9418");
print($bbmaster->execute(" \
rm -rfv /tmp/fakerepo && \
git clone git://gitrepo/fakerepo /tmp/fakerepo && \
pwd && \
ls -la && \
ls -la /tmp/fakerepo \
"));
# Test start master and connect worker
$bbmaster->waitForUnit("buildbot-master.service");
$bbmaster->waitUntilSucceeds("curl -s --head http://bbmaster:8010") =~ /200 OK/;
$bbworker->waitForUnit("network-online.target");
$bbworker->execute("nc -z bbmaster 8010");
$bbworker->execute("nc -z bbmaster 9989");
$bbworker->waitForUnit("buildbot-worker.service");
print($bbworker->execute("ls -la /home/bbworker/worker"));
# Test stop buildbot master and worker
print($bbmaster->execute(" \
systemctl -l --no-pager status buildbot-master && \
systemctl stop buildbot-master \
"));
$bbworker->fail("nc -z bbmaster 8010");
$bbworker->fail("nc -z bbmaster 9989");
print($bbworker->execute(" \
systemctl -l --no-pager status buildbot-worker && \
systemctl stop buildbot-worker && \
ls -la /home/bbworker/worker \
"));
# Test buildbot daemon mode
$bbmaster->execute("buildbot create-master /tmp");
$bbmaster->execute("mv -fv /tmp/master.cfg.sample /tmp/master.cfg");
$bbmaster->execute("sed -i 's/8010/8011/' /tmp/master.cfg");
$bbmaster->execute("buildbot start /tmp");
$bbworker->execute("nc -z bbmaster 8011");
$bbworker->waitUntilSucceeds("curl -s --head http://bbmaster:8011") =~ /200 OK/;
$bbmaster->execute("buildbot stop /tmp");
$bbworker->fail("nc -z bbmaster 8011");
'';
meta.maintainers = with pkgs.stdenv.lib.maintainers; [ nand0p ];
}

18
nixos/tests/colord.nix Normal file
View File

@ -0,0 +1,18 @@
# run installed tests
import ./make-test.nix ({ pkgs, ... }:
{
name = "colord";
meta = {
maintainers = pkgs.colord.meta.maintainers;
};
machine = { pkgs, ... }: {
environment.systemPackages = with pkgs; [ gnome-desktop-testing ];
};
testScript = ''
$machine->succeed("gnome-desktop-testing-runner -d '${pkgs.colord.installedTests}/share'");
'';
})

View File

@ -1,10 +1,18 @@
<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:blogChannel="http://backend.userland.com/blogChannelModule" version="2.0"><channel><title>NixOS News</title><link>https://nixos.org</link><description>News for NixOS, the purely functional Linux distribution.</description><item><title>
NixOS 18.09 released
</title><link>https://nixos.org/news.html</link><description>
<a href="https://github.com/NixOS/nixos-artwork/blob/master/releases/18.09-jellyfish/jellyfish.png">
<img class="inline" src="logo/nixos-logo-18.09-jellyfish-lores.png" alt="18.09 Jellyfish logo" with="100" height="87"/>
</a>
<rss xmlns:blogChannel="http://backend.userland.com/blogChannelModule" version="2.0">
<channel>
<title>NixOS News</title><link>https://nixos.org</link>
<description>News for NixOS, the purely functional Linux distribution.</description>

<item>
<title>NixOS 18.09 released</title><link>https://nixos.org/news.html</link>
<description>
<a href="https://github.com/NixOS/nixos-artwork/blob/master/releases/18.09-jellyfish/jellyfish.png">
<img class="inline" src="logo/nixos-logo-18.09-jellyfish-lores.png" alt="18.09 Jellyfish logo" with="100" height="87"/>
</a>
NixOS 18.09 “Jellyfish” has been released, the tenth stable release branch.
See the <a href="/nixos/manual/release-notes.html#sec-release-18.09">release notes</a>
for details. You can get NixOS 18.09 ISOs and VirtualBox appliances
@ -12,4 +20,8 @@
For information on how to upgrade from older release branches
to 18.09, check out the
<a href="/nixos/manual/index.html#sec-upgrading">manual section on upgrading</a>.
</description><pubDate>Sat Oct 06 2018 00:00:00 GMT</pubDate></item></channel></rss>
</description>
<pubDate>Sat Oct 06 2018 00:00:00 GMT</pubDate>
</item>
</channel>
</rss>

View File

@ -0,0 +1,29 @@
# Test Docker containers as systemd units
import ./make-test.nix ({ pkgs, lib, ... }: {
name = "docker-containers";
meta = {
maintainers = with lib.maintainers; [ benley ];
};
nodes = {
docker = { pkgs, ... }:
{
virtualisation.docker.enable = true;
virtualisation.dockerPreloader.images = [ pkgs.dockerTools.examples.nginx ];
docker-containers.nginx = {
image = "nginx-container";
ports = ["8181:80"];
};
};
};
testScript = ''
startAll;
$docker->waitForUnit("docker-nginx.service");
$docker->waitForOpenPort(8181);
$docker->waitUntilSucceeds("curl http://localhost:8181|grep Hello");
'';
})

View File

@ -33,11 +33,13 @@ in {
longitude = "0.0";
elevation = 0;
auth_providers = [
{ type = "legacy_api_password"; }
{
type = "legacy_api_password";
api_password = apiPassword;
}
];
};
frontend = { };
http.api_password = apiPassword;
mqtt = { # Use hbmqtt as broker
password = mqttPassword;
};

197
nixos/tests/knot.nix Normal file
View File

@ -0,0 +1,197 @@
import ./make-test.nix ({ pkgs, lib, ...} :
let
common = {
networking.firewall.enable = false;
networking.useDHCP = false;
};
exampleZone = pkgs.writeTextDir "example.com.zone" ''
@ SOA ns.example.com. noc.example.com. 2019031301 86400 7200 3600000 172800
@ NS ns1
@ NS ns2
ns1 A 192.168.0.1
ns1 AAAA fd00::1
ns2 A 192.168.0.2
ns2 AAAA fd00::2
www A 192.0.2.1
www AAAA 2001:DB8::1
sub NS ns.example.com.
'';
delegatedZone = pkgs.writeTextDir "sub.example.com.zone" ''
@ SOA ns.example.com. noc.example.com. 2019031301 86400 7200 3600000 172800
@ NS ns1.example.com.
@ NS ns2.example.com.
@ A 192.0.2.2
@ AAAA 2001:DB8::2
'';
knotZonesEnv = pkgs.buildEnv {
name = "knot-zones";
paths = [ exampleZone delegatedZone ];
};
in {
name = "knot";
nodes = {
master = { lib, ... }: {
imports = [ common ];
networking.interfaces.eth1 = {
ipv4.addresses = lib.mkForce [
{ address = "192.168.0.1"; prefixLength = 24; }
];
ipv6.addresses = lib.mkForce [
{ address = "fd00::1"; prefixLength = 64; }
];
};
services.knot.enable = true;
services.knot.extraArgs = [ "-v" ];
services.knot.extraConfig = ''
server:
listen: 0.0.0.0@53
listen: ::@53
acl:
- id: slave_acl
address: 192.168.0.2
action: transfer
remote:
- id: slave
address: 192.168.0.2@53
template:
- id: default
storage: ${knotZonesEnv}
notify: [slave]
acl: [slave_acl]
dnssec-signing: on
# Input-only zone files
# https://www.knot-dns.cz/docs/2.8/html/operation.html#example-3
# prevents modification of the zonefiles, since the zonefiles are immutable
zonefile-sync: -1
zonefile-load: difference
journal-content: changes
# move databases below the state directory, because they need to be writable
journal-db: /var/lib/knot/journal
kasp-db: /var/lib/knot/kasp
timer-db: /var/lib/knot/timer
zone:
- domain: example.com
file: example.com.zone
- domain: sub.example.com
file: sub.example.com.zone
log:
- target: syslog
any: info
'';
};
slave = { lib, ... }: {
imports = [ common ];
networking.interfaces.eth1 = {
ipv4.addresses = lib.mkForce [
{ address = "192.168.0.2"; prefixLength = 24; }
];
ipv6.addresses = lib.mkForce [
{ address = "fd00::2"; prefixLength = 64; }
];
};
services.knot.enable = true;
services.knot.extraArgs = [ "-v" ];
services.knot.extraConfig = ''
server:
listen: 0.0.0.0@53
listen: ::@53
acl:
- id: notify_from_master
address: 192.168.0.1
action: notify
remote:
- id: master
address: 192.168.0.1@53
template:
- id: default
master: master
acl: [notify_from_master]
# zonefileless setup
# https://www.knot-dns.cz/docs/2.8/html/operation.html#example-2
zonefile-sync: -1
zonefile-load: none
journal-content: all
# move databases below the state directory, because they need to be writable
journal-db: /var/lib/knot/journal
kasp-db: /var/lib/knot/kasp
timer-db: /var/lib/knot/timer
zone:
- domain: example.com
file: example.com.zone
- domain: sub.example.com
file: sub.example.com.zone
log:
- target: syslog
any: info
'';
};
client = { lib, nodes, ... }: {
imports = [ common ];
networking.interfaces.eth1 = {
ipv4.addresses = [
{ address = "192.168.0.3"; prefixLength = 24; }
];
ipv6.addresses = [
{ address = "fd00::3"; prefixLength = 64; }
];
};
environment.systemPackages = [ pkgs.knot-dns ];
};
};
testScript = { nodes, ... }: let
master4 = (lib.head nodes.master.config.networking.interfaces.eth1.ipv4.addresses).address;
master6 = (lib.head nodes.master.config.networking.interfaces.eth1.ipv6.addresses).address;
slave4 = (lib.head nodes.slave.config.networking.interfaces.eth1.ipv4.addresses).address;
slave6 = (lib.head nodes.slave.config.networking.interfaces.eth1.ipv6.addresses).address;
in ''
startAll;
$client->waitForUnit("network.target");
$master->waitForUnit("knot.service");
$slave->waitForUnit("knot.service");
sub assertResponse {
my ($knot, $query_type, $query, $expected) = @_;
my $out = $client->succeed("khost -t $query_type $query $knot");
$client->log("$knot replies with: $out");
chomp $out;
die "DNS query for $query ($query_type) against $knot gave '$out' instead of '$expected'"
if ($out !~ $expected);
}
foreach ("${master4}", "${master6}", "${slave4}", "${slave6}") {
subtest $_, sub {
assertResponse($_, "SOA", "example.com", qr/start of authority.*?noc\.example\.com/);
assertResponse($_, "A", "example.com", qr/has no [^ ]+ record/);
assertResponse($_, "AAAA", "example.com", qr/has no [^ ]+ record/);
assertResponse($_, "A", "www.example.com", qr/address 192.0.2.1$/);
assertResponse($_, "AAAA", "www.example.com", qr/address 2001:db8::1$/);
assertResponse($_, "NS", "sub.example.com", qr/nameserver is ns\d\.example\.com.$/);
assertResponse($_, "A", "sub.example.com", qr/address 192.0.2.2$/);
assertResponse($_, "AAAA", "sub.example.com", qr/address 2001:db8::2$/);
assertResponse($_, "RRSIG", "www.example.com", qr/RR set signature is/);
assertResponse($_, "DNSKEY", "example.com", qr/DNSSEC key is/);
};
}
'';
})

57
nixos/tests/overlayfs.nix Normal file
View File

@ -0,0 +1,57 @@
import ./make-test.nix ({ pkgs, ... }: {
name = "overlayfs";
meta.maintainers = with pkgs.stdenv.lib.maintainers; [ bachp ];
machine = { pkgs, ... }: {
virtualisation.emptyDiskImages = [ 512 ];
networking.hostId = "deadbeef";
environment.systemPackages = with pkgs; [ parted ];
};
testScript = ''
$machine->succeed("ls /dev");
$machine->succeed("mkdir -p /tmp/mnt");
# Test ext4 + overlayfs
$machine->succeed(
"mkfs.ext4 -F -L overlay-ext4 /dev/vdb",
"mount -t ext4 /dev/vdb /tmp/mnt",
"mkdir -p /tmp/mnt/upper /tmp/mnt/lower /tmp/mnt/work /tmp/mnt/merged",
# Setup some existing files
"echo 'Replace' > /tmp/mnt/lower/replace.txt",
"echo 'Append' > /tmp/mnt/lower/append.txt",
"echo 'Overwrite' > /tmp/mnt/lower/overwrite.txt",
"mount -t overlay overlay -o lowerdir=/tmp/mnt/lower,upperdir=/tmp/mnt/upper,workdir=/tmp/mnt/work /tmp/mnt/merged",
# Test new
"echo 'New' > /tmp/mnt/merged/new.txt",
"[[ \"\$(cat /tmp/mnt/merged/new.txt)\" == \"New\" ]]",
# Test replace
"[[ \"\$(cat /tmp/mnt/merged/replace.txt)\" == \"Replace\" ]]",
"echo 'Replaced' > /tmp/mnt/merged/replace-tmp.txt",
"mv /tmp/mnt/merged/replace-tmp.txt /tmp/mnt/merged/replace.txt",
"[[ \"\$(cat /tmp/mnt/merged/replace.txt)\" == \"Replaced\" ]]",
# Overwrite
"[[ \"\$(cat /tmp/mnt/merged/overwrite.txt)\" == \"Overwrite\" ]]",
"echo 'Overwritten' > /tmp/mnt/merged/overwrite.txt",
"[[ \"\$(cat /tmp/mnt/merged/overwrite.txt)\" == \"Overwritten\" ]]",
# Test append
"[[ \"\$(cat /tmp/mnt/merged/append.txt)\" == \"Append\" ]]",
"echo 'ed' >> /tmp/mnt/merged/append.txt",
#"cat /tmp/mnt/merged/append.txt && exit 1",
"[[ \"\$(cat /tmp/mnt/merged/append.txt)\" == \"Append\ned\" ]]",
"umount /tmp/mnt/merged",
"umount /tmp/mnt",
"udevadm settle"
);
'';
})

View File

@ -108,6 +108,8 @@ in {
# cluster in the database before slurmctld is restarted
subtest "add_account", sub {
$control->succeed("sacctmgr -i add cluster default");
# check for cluster entry
$control->succeed("sacctmgr list cluster | awk '{ print \$1 }' | grep default");
};
subtest "can_start_slurmctld", sub {
@ -133,6 +135,7 @@ in {
subtest "check_slurm_dbd", sub {
# find the srun job from above in the database
sleep 2;
$submit->succeed("sacct | grep hostname");
};
'';

View File

@ -11,6 +11,8 @@ stdenv.mkDerivation rec {
sha256 = "1v1qwv4x5agjba82s1vknmdgq67y26wzdwbmwwqavv7f7y3y860h";
};
enableParallelBuilding = false;
qmakeFlags = ["USE_UPNP=-"];
# I think that openssl and zlib are required, but come through other

View File

@ -4,14 +4,14 @@
, pname ? "ADLplug" }:
stdenv.mkDerivation rec {
name = "${pname}-${version}";
version = "v1.0.0-beta.5";
inherit pname;
version = "1.0.0";
src = fetchFromGitHub {
owner = "jpcima";
repo = "ADLplug";
rev = version;
sha256 = "1f8v61nv33xwpzmmk38dkr3fvm2j2xf0a74agxnl9p1yvy3a9w3s";
rev = "v${version}";
sha256 = "1rpd7v1rx74cv7nhs70ah0bly314rjzj70cp30mvhns2hzk66s3c";
fetchSubmodules = true;
};

View File

@ -3,11 +3,11 @@
stdenv.mkDerivation rec {
name = packageName + "-" + version ;
packageName = "aj-snapshot" ;
version = "0.9.8";
version = "0.9.9";
src = fetchurl {
url = "mirror://sourceforge/${packageName}/${name}.tar.bz2";
sha256 = "0wilky1g2mb88v2z0520s7sw1dsn10iwanc8id5p6z1xsnhg7b6p";
sha256 = "0z8wd5yvxdmw1h1rj6km9h01xd4xmp4d86gczlix7hsc7zrf0wil";
};
doCheck = false;

View File

@ -1,19 +1,20 @@
{
stdenv, fetchurl, docbook_xsl,
stdenv, fetchFromGitHub, docbook_xsl,
docbook_xml_dtd_45, python, pygments,
libxslt
}:
stdenv.mkDerivation rec {
version = "6.12.0";
name = "csound-manual-${version}";
pname = "csound-manual";
version = "unstable-2019-02-22";
src = fetchurl {
url = "https://github.com/csound/manual/archive/${version}.tar.gz";
sha256 = "1v1scp468rnfbcajnp020kdj8zigimc2mbcwzxxqi8sf8paccdrp";
src = fetchFromGitHub {
owner = "csound";
repo = "manual";
rev = "3b0bdc83f9245261b4b85a57c3ed636d5d924a4f";
sha256 = "074byjhaxraapyg54dxgg7hi1d4978aa9c1rmyi50p970nsxnacn";
};
prePatch = ''
substituteInPlace manual.xml \
--replace "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd" \
@ -41,4 +42,3 @@ stdenv.mkDerivation rec {
platforms = stdenv.lib.platforms.all;
};
}

View File

@ -1,25 +0,0 @@
{ stdenv, fetchgit, python2Packages }:
python2Packages.buildPythonApplication rec {
pname = "lastwatch";
version = "0.4.1";
src = fetchgit {
url = "git://github.com/aszlig/LastWatch.git";
rev = "refs/tags/v${version}";
sha256 = "0nlng3595j5jvnikk8i5hb915zak5zsmfn2306cc4gfcns9xzjwp";
};
propagatedBuildInputs = with python2Packages; [
pyinotify
pylast
mutagen
];
meta = {
homepage = https://github.com/aszlig/LastWatch;
description = "An inotify-based last.fm audio scrobbler";
license = stdenv.lib.licenses.gpl2;
platforms = stdenv.lib.platforms.linux;
};
}

View File

@ -5,7 +5,7 @@
python3.pkgs.buildPythonApplication rec {
pname = "lollypop";
version = "0.9.923";
version = "1.0";
format = "other";
doCheck = false;
@ -14,7 +14,7 @@ python3.pkgs.buildPythonApplication rec {
url = "https://gitlab.gnome.org/World/lollypop";
rev = "refs/tags/${version}";
fetchSubmodules = true;
sha256 = "0jgz36lrhigcsr9vs5sp4ngv8rir3zqicygymjv7d61d6pclkx1z";
sha256 = "00hjxpgmhzhyjjdpm92cbbxwnc17xdhhk8svk5ih3n18yk5655fs";
};
nativeBuildInputs = [

View File

@ -2,11 +2,11 @@
pythonPackages.buildPythonApplication rec {
pname = "Mopidy-Iris";
version = "3.32.5";
version = "3.33.0";
src = pythonPackages.fetchPypi {
inherit pname version;
sha256 = "0vs8x26zcakk6c31sc774h2lcdw3syp236vyymmx1jnfsh1jaqpn";
sha256 = "0g00rjkmsnza4gjjdm0cwrpw3gqvmjj58157dvrh7f8k7j0gdvdm";
};
propagatedBuildInputs = [
@ -14,14 +14,10 @@ pythonPackages.buildPythonApplication rec {
mopidy-local-images
] ++ (with pythonPackages; [
configobj
pylast
spotipy
raven
requests
tornado_4
]);
postPatch = "sed -i /tornado/d setup.py";
# no tests implemented
doCheck = false;

View File

@ -0,0 +1,33 @@
{ stdenv, fetchFromGitHub, cmake, eigen, libav_all }:
stdenv.mkDerivation rec {
pname = "musly";
version = "unstable-2017-04-26";
src = fetchFromGitHub {
owner = "dominikschnitzer";
repo = "musly";
rev = "f911eacbbe0b39ebe87cb37d0caef09632fa40d6";
sha256 = "1q42wvdwy2pac7bhfraqqj2czw7w2m33ms3ifjl8phm7d87i8825";
};
nativeBuildInputs = [ cmake ];
buildInputs = [ eigen (libav_all.override { vaapiSupport = stdenv.isLinux; }).libav_11 ];
fixupPhase = if stdenv.isDarwin then ''
install_name_tool -change libmusly.dylib $out/lib/libmusly.dylib $out/bin/musly
install_name_tool -change libmusly_resample.dylib $out/lib/libmusly_resample.dylib $out/bin/musly
install_name_tool -change libmusly_resample.dylib $out/lib/libmusly_resample.dylib $out/lib/libmusly.dylib
'' else "";
meta = with stdenv.lib; {
homepage = https://www.musly.org;
description = "A fast and high-quality audio music similarity library written in C/C++";
longDescription = ''
Musly analyzes the the audio signal of music pieces to estimate their similarity.
No meta-data about the music piece is included in the similarity estimation.
To use Musly in your application, have a look at the library documentation
or try the command line application included in the package and start generating
some automatic music playlists right away.
'';
license = licenses.mpl20;
maintainers = with maintainers; [ ggpeti ];
platforms = with platforms; darwin ++ linux;
};
}

View File

@ -3,13 +3,13 @@
stdenv.mkDerivation rec {
name = "ncpamixer-${version}";
version = "1.3";
version = "1.3.3";
src = fetchFromGitHub {
owner = "fulhax";
repo = "ncpamixer";
rev = version;
sha256 = "02v8vsx26w3wrzkg61457diaxv1hyzsh103p53j80la9vglamdsh";
sha256 = "19pxfvfhhrbfk1wz5awx60y51jccrgrcvlq7lb622sw2z0wzw4ac";
};
buildInputs = [ ncurses libpulseaudio ];

View File

@ -0,0 +1,69 @@
{ stdenv, fetchFromGitHub, pkgconfig, meson, gtk3, at-spi2-core, dbus, gst_all_1, sphinxbase, pocketsphinx, ninja, gettext, appstream-glib, python3, glib, gobject-introspection, gsettings-desktop-schemas, itstool, wrapGAppsHook, makeWrapper, hicolor-icon-theme }:
stdenv.mkDerivation rec {
pname = "parlatype";
version = "1.6-beta";
src = fetchFromGitHub {
owner = "gkarsay";
repo = pname;
rev = "v${version}";
sha256 = "0bi0djic9kf178s7vl3y83v4rzhvynlvyf64n94fy80n2f100dj9";
};
nativeBuildInputs = [
pkgconfig
meson
ninja
gettext
appstream-glib
python3
gobject-introspection
itstool
wrapGAppsHook
];
buildInputs = [
gtk3
at-spi2-core
dbus
gst_all_1.gstreamer
gst_all_1.gst-plugins-base
gst_all_1.gst-plugins-good
gst_all_1.gst-plugins-bad
gst_all_1.gst-plugins-ugly
gst_all_1.gst-libav
sphinxbase
pocketsphinx
glib
gsettings-desktop-schemas
hicolor-icon-theme
];
mesonFlags = [ "-Dlibreoffice=false" ];
postPatch = ''
chmod +x data/meson_post_install.py
patchShebangs data/meson_post_install.py
'';
doCheck = false;
enableParallelBuilding = true;
buildPhase = ''
export GST_PLUGIN_SYSTEM_PATH_1_0="$out/lib/gstreamer-1.0/:$GST_PLUGIN_SYSTEM_PATH_1_0"
'';
meta = with stdenv.lib; {
description = "GNOME audio player for transcription";
longDescription = ''
Parlatype is a minimal audio player for manual speech transcription, written for the GNOME desktop environment.
It plays audio sources to transcribe them in your favourite text application.
Its intended to be useful for journalists, students, scientists and whoever needs to transcribe audio files.
'';
homepage = https://gkarsay.github.io/parlatype/;
license = licenses.gpl3Plus;
maintainers = [ maintainers.melchips ];
platforms = platforms.linux;
};
}

View File

@ -0,0 +1,50 @@
{ stdenv, fetchFromGitHub, audiofile, libvorbis, fltk, fftw, fftwFloat,
minixml, pkgconfig, libmad, libjack2, portaudio, libsamplerate }:
stdenv.mkDerivation {
pname = "paulstretch";
version = "2.2-2";
src = fetchFromGitHub {
owner = "paulnasca";
repo = "paulstretch_cpp";
rev = "7f5c3993abe420661ea0b808304b0e2b4b0048c5";
sha256 = "06dy03dbz1yznhsn0xvsnkpc5drzwrgxbxdx0hfpsjn2xcg0jrnc";
};
nativeBuildInputs = [ pkgconfig ];
buildInputs = [
audiofile
libvorbis
fltk
fftw
fftwFloat
minixml
libmad
libjack2
portaudio
libsamplerate
];
buildPhase = ''
bash compile_linux_fftw_jack.sh
'';
installPhase = ''
install -Dm555 ./paulstretch $out/bin/paulstretch
'';
meta = with stdenv.lib; {
description = "Produces high quality extreme sound stretching";
longDescription = ''
This is a program for stretching the audio. It is suitable only for
extreme sound stretching of the audio (like 50x) and for applying
special effects by "spectral smoothing" the sounds.
It can transform any sound/music to a texture.
'';
homepage = http://hypermammut.sourceforge.net/paulstretch/;
platforms = platforms.linux;
license = licenses.gpl2;
};
}

View File

@ -23,13 +23,13 @@ let
in stdenv.mkDerivation rec {
name = "pulseaudio-modules-bt-${version}";
version = "unstable-2019-01-05";
version = "unstable-2019-03-15";
src = fetchFromGitHub {
owner = "EHfive";
repo = "pulseaudio-modules-bt";
rev = "4b0cde160c96f40d860fef267a6ded49ae045be0";
sha256 = "15jw5nf2dhqqdwzyh2x5kdkrq7f3qn140gw6gmspcai9kplhk24w";
rev = "0b397c26eb4fd5dc611bd3e2baa79776de646856";
sha256 = "09q0xh9iz0crik6xpln9lijirf62aljxa1jrds1i1zgflyfidd0z";
fetchSubmodules = true;
};

View File

@ -1,20 +1,11 @@
diff --git a/CMakeLists.txt b/CMakeLists.txt
index d869979..185144d 100644
index 8d20dbf..63fe7ba 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -143,13 +143,13 @@ INSTALL(TARGETS
@@ -213,5 +213,4 @@ INSTALL(TARGETS
module-bluez5-device
module-bluetooth-discover
module-bluetooth-policy
- LIBRARY DESTINATION ${PulseAudio_modlibexecdir})
-
+ LIBRARY DESTINATION ${CMAKE_INSTALL_PREFIX}/lib/pulse-${PulseAudio_VERSION}/modules/)
if(NOT ${HAVE_SYSTEM_LDAC})
INSTALL(TARGETS
ldacBT_enc
ldacBT_abr
- LIBRARY DESTINATION ${PulseAudio_modlibexecdir})
+ LIBRARY DESTINATION ${CMAKE_INSTALL_PREFIX}/lib/pulse-${PulseAudio_VERSION}/modules/)
endif()

View File

@ -5,11 +5,11 @@
stdenv.mkDerivation rec {
pname = "qtractor";
version = "0.9.4";
version = "0.9.5";
src = fetchurl {
url = "mirror://sourceforge/${pname}/${pname}-${version}.tar.gz";
sha256 = "05xrzr48b19mghbpbzjqw5fy6pl9140bm5m929lrsi4rq5hp3xgg";
sha256 = "1zsikhqj5xzhw2x3b6pqlmcwz3hxx07lbbif8v3m3j41snzjic22";
};
nativeBuildInputs = [

View File

@ -40,13 +40,13 @@ let
in
stdenv.mkDerivation rec {
name = "radiotray-ng-${version}";
version = "0.2.4";
version = "0.2.5";
src = fetchFromGitHub {
owner = "ebruck";
repo = "radiotray-ng";
rev = "v${version}";
sha256 = "1jk80fv8ivwdx7waivls0mczn0rx4wv0fy7a28k77m88i5gkfgyw";
sha256 = "1crvpn1mgrv7bd2k683mpgs59785mkrjvmp1f14iyq4qrr0f9zzi";
};
nativeBuildInputs = [ cmake pkgconfig wrapGAppsHook makeWrapper ];
@ -61,6 +61,8 @@ stdenv.mkDerivation rec {
] ++ gstInputs
++ pythonInputs;
patches = [ ./no-dl-googletest.patch ];
postPatch = ''
for x in debian/CMakeLists.txt include/radiotray-ng/common.hpp data/*.desktop; do
substituteInPlace $x --replace /usr $out
@ -80,8 +82,7 @@ stdenv.mkDerivation rec {
enableParallelBuilding = true;
checkInputs = [ gtest ];
# doCheck = stdenv.hostPlatform == stdenv.buildPlatform;
doCheck = false; # fails to pick up supplied gtest, tries to download it instead
doCheck = !stdenv.isAarch64; # single failure that I can't explain
preFixup = ''
gappsWrapperArgs+=(--suffix PATH : ${stdenv.lib.makeBinPath [ dbus ]})

View File

@ -0,0 +1,55 @@
From 2ce91cd2244e61d54e0c0a3b26851912240b0667 Mon Sep 17 00:00:00 2001
From: Will Dietz <w@wdtz.org>
Date: Sat, 16 Mar 2019 11:40:00 -0500
Subject: [PATCH] don't download googletest
---
CMakeLists.txt | 18 ------------------
tests/CMakeLists.txt | 1 -
2 files changed, 19 deletions(-)
diff --git a/CMakeLists.txt b/CMakeLists.txt
index fc1b9de..301c266 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -70,25 +70,7 @@ endif()
# build tests? Then we need googletest...
if (BUILD_TESTS)
- include(ExternalProject)
-
- ExternalProject_Add(googletest
- PREFIX "${CMAKE_CURRENT_BINARY_DIR}/googletest"
- URL https://github.com/google/googletest/archive/release-1.8.1.tar.gz
- URL_HASH SHA256=9bf1fe5182a604b4135edc1a425ae356c9ad15e9b23f9f12a02e80184c3a249c
- TIMEOUT 30
- DOWNLOAD_NO_PROGRESS true
- INSTALL_COMMAND "")
-
- ExternalProject_Get_Property(googletest SOURCE_DIR)
- include_directories(${SOURCE_DIR}/googlemock/include ${SOURCE_DIR}/googletest/include)
- ExternalProject_Get_Property(googletest BINARY_DIR)
- link_directories(${BINARY_DIR}/googlemock ${BINARY_DIR}/googlemock/gtest)
set(GMOCK_BOTH_LIBRARIES gmock_main gmock gtest)
- set_property(DIRECTORY PROPERTY CLEAN_NO_CUSTOM "${CMAKE_CURRENT_BINARY_DIR}/googletest")
- unset(SOURCE_DIR)
- unset(BINARY_DIR)
-
enable_testing()
add_subdirectory(tests)
add_subdirectory(tests/runners/)
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt
index 859c048..58ab5c2 100644
--- a/tests/CMakeLists.txt
+++ b/tests/CMakeLists.txt
@@ -2,7 +2,6 @@ include(GoogleTest)
function(add_gmock_test target)
add_executable(${target} ${ARGN})
- add_dependencies(${target} googletest)
target_link_libraries(${target} config playlist bookmarks event_bus ${GMOCK_BOTH_LIBRARIES} ${XDG_BASEDIR_LIBRARIES} ${Boost_LIBRARIES} ${CURL_LIBRARIES} ${JSONCPP_LIBRARIES} pthread)
target_include_directories(${target} PRIVATE ${JSONCPP_INCLUDE_DIRS})
gtest_discover_tests(${target})
--
2.21.GIT

View File

@ -1,6 +1,6 @@
{ stdenv, fetchurl, autoPatchelfHook, makeWrapper
, alsaLib, xorg
, gnome3, pango, gdk_pixbuf, cairo, glib, freetype
, gnome3, gtk3, pango, gdk_pixbuf, cairo, glib, freetype
, libpulseaudio, xdg_utils
}:
@ -31,7 +31,7 @@ stdenv.mkDerivation rec {
];
runtimeDependencies = [
gnome3.gtk
gtk3
];
dontBuild = true;

View File

@ -4,6 +4,7 @@
, perlPackages
, gtk3
, intltool
, libpeas
, libsoup
, gnome3
, totem-pl-parser
@ -48,7 +49,7 @@ in stdenv.mkDerivation rec {
json-glib
gtk3
gnome3.libpeas
libpeas
totem-pl-parser
gnome3.adwaita-icon-theme

View File

@ -1,12 +1,15 @@
{ fetchurl, stdenv, squashfsTools, xorg, alsaLib, makeWrapper, openssl, freetype
, glib, pango, cairo, atk, gdk_pixbuf, gtk2, cups, nspr, nss, libpng
, libgcrypt, systemd, fontconfig, dbus, expat, ffmpeg, curl, zlib, gnome3
, libgcrypt, systemd, fontconfig, dbus, expat, ffmpeg_3, curl, zlib, gnome3
, at-spi2-atk
}:
let
# TO UPDATE: just execute the ./update.sh script (won't do anything if there is no update)
# "rev" decides what is actually being downloaded
# If an update breaks things, one of those might have valuable info:
# https://aur.archlinux.org/packages/spotify/
# https://community.spotify.com/t5/Desktop-Linux
version = "1.0.96.181.gf6bc1b6b-12";
# To get the latest stable revision:
# curl -H 'X-Ubuntu-Series: 16' 'https://api.snapcraft.io/api/v1/snaps/details/spotify?channel=stable' | jq '.download_url,.version,.last_updated'
@ -26,7 +29,7 @@ let
curl
dbus
expat
ffmpeg
ffmpeg_3
fontconfig
freetype
gdk_pixbuf
@ -118,8 +121,8 @@ stdenv.mkDerivation {
ln -s ${nspr.out}/lib/libnspr4.so $libdir/libnspr4.so
ln -s ${nspr.out}/lib/libplc4.so $libdir/libplc4.so
ln -s ${ffmpeg.out}/lib/libavcodec.so.56 $libdir/libavcodec-ffmpeg.so.56
ln -s ${ffmpeg.out}/lib/libavformat.so.56 $libdir/libavformat-ffmpeg.so.56
ln -s ${ffmpeg_3.out}/lib/libavcodec.so* $libdir
ln -s ${ffmpeg_3.out}/lib/libavformat.so* $libdir
rpath="$out/share/spotify:$libdir"
@ -154,7 +157,7 @@ stdenv.mkDerivation {
homepage = https://www.spotify.com/;
description = "Play music from the Spotify music service";
license = licenses.unfree;
maintainers = with maintainers; [ eelco ftrvxmtrx sheenobu mudri ];
maintainers = with maintainers; [ eelco ftrvxmtrx sheenobu mudri timokau ];
platforms = [ "x86_64-linux" ];
};
}

View File

@ -3,12 +3,12 @@
, libGLU, lv2, gtk2, cairo, pango, fftwFloat, zita-convolver }:
stdenv.mkDerivation rec {
version = "20190105";
version = "20190206";
name = "x42-plugins-${version}";
src = fetchurl {
url = "https://gareus.org/misc/x42-plugins/${name}.tar.xz";
sha256 = "1bb7k3ly4qa05zgkbpm7d3x9cjch1fklgh279m6hp0ac3hhncdxp";
sha256 = "0rsp8lm8zr20l410whr98d61401rkphgpl8llbn5p2wsiw0q9aqd";
};
nativeBuildInputs = [ pkgconfig ];

View File

@ -1,6 +1,6 @@
{ stdenv, fetchFromGitLab, substituteAll, meson, ninja, pkgconfig, vala_0_40, gettext
, gnome3, libnotify, itstool, glib, gtk3, libxml2
, coreutils, libsecret, pcre, libxkbcommon, wrapGAppsHook
, coreutils, libpeas, libsecret, pcre, libxkbcommon, wrapGAppsHook
, libpthreadstubs, libXdmcp, epoxy, at-spi2-core, dbus, libgpgerror
, appstream-glib, desktop-file-utils, duplicity
}:
@ -35,7 +35,7 @@ stdenv.mkDerivation rec {
];
buildInputs = [
libnotify gnome3.libpeas glib gtk3 libsecret
libnotify libpeas glib gtk3 libsecret
pcre libxkbcommon libpthreadstubs libXdmcp epoxy gnome3.nautilus
at-spi2-core dbus gnome3.gnome-online-accounts libgpgerror
];

View File

@ -11,8 +11,6 @@ stdenv.mkDerivation rec {
pname = "lightdm";
version = "1.28.0";
name = "${pname}-${version}";
outputs = [ "out" "dev" ];
src = fetchFromGitHub {
@ -40,9 +38,9 @@ stdenv.mkDerivation rec {
accountsservice
audit
glib
libXdmcp
libgcrypt
libxcb
libXdmcp
libxklavier
pam
polkit
@ -50,7 +48,6 @@ stdenv.mkDerivation rec {
++ optional withQt5 qtbase;
patches = [
./run-dir.patch
# Adds option to disable writing dmrc files
(fetchpatch {
url = "https://src.fedoraproject.org/rpms/lightdm/raw/4cf0d2bed8d1c68970b0322ccd5dbbbb7a0b12bc/f/lightdm-1.25.1-disable_dmrc.patch";

View File

@ -1,13 +0,0 @@
diff --git a/data/lightdm.conf b/data/lightdm.conf
index 16b80f7..b3af435 100644
--- a/data/lightdm.conf
+++ b/data/lightdm.conf
@@ -28,7 +28,7 @@
#guest-account-script=guest-account
#logind-check-graphical=false
#log-directory=/var/log/lightdm
-#run-directory=/var/run/lightdm
+run-directory=/run/lightdm
#cache-directory=/var/cache/lightdm
#sessions-directory=/usr/share/lightdm/sessions:/usr/share/xsessions:/usr/share/wayland-sessions
#remote-sessions-directory=/usr/share/lightdm/remote-sessions

Some files were not shown because too many files have changed in this diff Show More