Merge remote-tracking branch 'upstream/master' into xmr-stak

This commit is contained in:
Bernard Fortz 2018-10-19 10:48:09 +02:00
commit e576406357
960 changed files with 26827 additions and 12212 deletions

View File

@ -1,8 +0,0 @@
;;; Directory Local Variables
;;; For more information see (info "(emacs) Directory Variables")
((nil
(bug-reference-bug-regexp . "\\(\\(?:[Ii]ssue \\|[Ff]ixe[ds] \\|[Rr]esolve[ds]? \\|[Cc]lose[ds]? \\|[Pp]\\(?:ull [Rr]equest\\|[Rr]\\) \\|(\\)#\\([0-9]+\\))?\\)")
(bug-reference-url-format . "https://github.com/NixOS/nixpkgs/issues/%s"))
(nix-mode
(tab-width . 2)))

View File

@ -20,6 +20,8 @@ under the terms of [COPYING](../COPYING), which is an MIT-like license.
(Motivation for change. Additional information.)
```
For consistency, there should not be a period at the end of the commit message's summary line (the first line of the commit message).
Examples:
* nginx: init at 2.0.1

View File

@ -18,12 +18,3 @@ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
======================================================================
Note: the license above does not apply to the packages built by the
Nix Packages collection, merely to the package descriptions (i.e., Nix
expressions, build scripts, etc.). It also might not apply to patches
included in Nixpkgs, which may be derivative works of the packages to
which they apply. The aforementioned artifacts are all covered by the
licenses of the respective packages.

View File

@ -39,3 +39,9 @@ Communication:
* [Discourse Forum](https://discourse.nixos.org/)
* [IRC - #nixos on freenode.net](irc://irc.freenode.net/#nixos)
Note: MIT license does not apply to the packages built by Nixpkgs, merely to
the package descriptions (Nix expressions, build scripts, and so on). It also
might not apply to patches included in Nixpkgs, which may be derivative works
of the packages to which they apply. The aforementioned artifacts are all
covered by the licenses of the respective packages.

View File

@ -18,7 +18,7 @@ if ! builtins ? nixVersion || builtins.compareVersions requiredVersion builtins.
For more information, please see the NixOS release notes at
https://nixos.org/nixos/manual or locally at
${toString ./doc/manual/release-notes}.
${toString ./nixos/doc/manual/release-notes}.
If you need further help, see https://nixos.org/nixos/support.html
''

View File

@ -966,5 +966,766 @@ lib.attrsets.mapAttrsToList (name: value: "${name}=${value}")
itself to attribute sets. Also, the first argument of the argument function
is a <emphasis>list</emphasis> of the names of the containing attributes.
</para>
<variablelist>
<varlistentry>
<term>
<varname>f</varname>
</term>
<listitem>
<para>
<literal>[ String ] -> Any -> Any</literal>
</para>
<para>
Given a list of attribute names and value, return a new value.
</para>
<variablelist>
<varlistentry>
<term>
<varname>name_path</varname>
</term>
<listitem>
<para>
The list of attribute names to this value.
</para>
<para>
For example, the <varname>name_path</varname> for the
<literal>example</literal> string in the attribute set <literal>{ foo
= { bar = "example"; }; }</literal> is <literal>[ "foo" "bar"
]</literal>.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>value</varname>
</term>
<listitem>
<para>
The attribute's value.
</para>
</listitem>
</varlistentry>
</variablelist>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>set</varname>
</term>
<listitem>
<para>
The attribute set to recursively map over.
</para>
</listitem>
</varlistentry>
</variablelist>
<example xml:id="function-library-lib.attrsets.mapAttrsRecursive-example">
<title>A contrived example of using <function>lib.attrsets.mapAttrsRecursive</function></title>
<programlisting><![CDATA[
mapAttrsRecursive
(path: value: concatStringsSep "-" (path ++ [value]))
{
n = {
a = "A";
m = {
b = "B";
c = "C";
};
};
d = "D";
}
=> {
n = {
a = "n-a-A";
m = {
b = "n-m-b-B";
c = "n-m-c-C";
};
};
d = "d-D";
}
]]></programlisting>
</example>
</section>
<section xml:id="function-library-lib.attrsets.mapAttrsRecursiveCond">
<title><function>lib.attrsets.mapAttrsRecursiveCond</function></title>
<subtitle><literal>mapAttrsRecursiveCond :: (AttrSet -> Bool) -> ([ String ] -> Any -> Any) -> AttrSet -> AttrSet</literal>
</subtitle>
<xi:include href="./locations.xml" xpointer="lib.attrsets.mapAttrsRecursiveCond" />
<para>
Like <function>mapAttrsRecursive</function>, but it takes an additional
predicate function that tells it whether to recursive into an attribute set.
If it returns false, <function>mapAttrsRecursiveCond</function> does not
recurse, but does apply the map function. It is returns true, it does
recurse, and does not apply the map function.
</para>
<variablelist>
<varlistentry>
<term>
<varname>cond</varname>
</term>
<listitem>
<para>
<literal>(AttrSet -> Bool)</literal>
</para>
<para>
Determine if <function>mapAttrsRecursive</function> should recurse deeper
in to the attribute set.
</para>
<variablelist>
<varlistentry>
<term>
<varname>attributeset</varname>
</term>
<listitem>
<para>
An attribute set.
</para>
</listitem>
</varlistentry>
</variablelist>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>f</varname>
</term>
<listitem>
<para>
<literal>[ String ] -> Any -> Any</literal>
</para>
<para>
Given a list of attribute names and value, return a new value.
</para>
<variablelist>
<varlistentry>
<term>
<varname>name_path</varname>
</term>
<listitem>
<para>
The list of attribute names to this value.
</para>
<para>
For example, the <varname>name_path</varname> for the
<literal>example</literal> string in the attribute set <literal>{ foo
= { bar = "example"; }; }</literal> is <literal>[ "foo" "bar"
]</literal>.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>value</varname>
</term>
<listitem>
<para>
The attribute's value.
</para>
</listitem>
</varlistentry>
</variablelist>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>set</varname>
</term>
<listitem>
<para>
The attribute set to recursively map over.
</para>
</listitem>
</varlistentry>
</variablelist>
<example xml:id="function-library-lib.attrsets.mapAttrsRecursiveCond-example">
<title>Only convert attribute values to JSON if the containing attribute set is marked for recursion</title>
<programlisting><![CDATA[
lib.attrsets.mapAttrsRecursiveCond
({ recurse ? false, ... }: recurse)
(name: value: builtins.toJSON value)
{
dorecur = {
recurse = true;
hello = "there";
};
dontrecur = {
converted-to- = "json";
};
}
=> {
dorecur = {
hello = "\"there\"";
recurse = "true";
};
dontrecur = "{\"converted-to\":\"json\"}";
}
]]></programlisting>
</example>
</section>
<section xml:id="function-library-lib.attrsets.genAttrs">
<title><function>lib.attrsets.genAttrs</function></title>
<subtitle><literal>genAttrs :: [ String ] -> (String -> Any) -> AttrSet</literal>
</subtitle>
<xi:include href="./locations.xml" xpointer="lib.attrsets.genAttrs" />
<para>
Generate an attribute set by mapping a function over a list of attribute
names.
</para>
<variablelist>
<varlistentry>
<term>
<varname>names</varname>
</term>
<listitem>
<para>
Names of values in the resulting attribute set.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>f</varname>
</term>
<listitem>
<para>
<literal>String -> Any</literal>
</para>
<para>
Takes the name of the attribute and return the attribute's value.
</para>
<variablelist>
<varlistentry>
<term>
<varname>name</varname>
</term>
<listitem>
<para>
The name of the attribute to generate a value for.
</para>
</listitem>
</varlistentry>
</variablelist>
</listitem>
</varlistentry>
</variablelist>
<example xml:id="function-library-lib.attrsets.genAttrs-example">
<title>Generate an attrset based on names only</title>
<programlisting><![CDATA[
lib.attrsets.genAttrs [ "foo" "bar" ] (name: "x_${name}")
=> { foo = "x_foo"; bar = "x_bar"; }
]]></programlisting>
</example>
</section>
<section xml:id="function-library-lib.attrsets.isDerivation">
<title><function>lib.attrsets.isDerivation</function></title>
<subtitle><literal>isDerivation :: Any -> Bool</literal>
</subtitle>
<xi:include href="./locations.xml" xpointer="lib.attrsets.isDerivation" />
<para>
Check whether the argument is a derivation. Any set with <code>{ type =
"derivation"; }</code> counts as a derivation.
</para>
<variablelist>
<varlistentry>
<term>
<varname>value</varname>
</term>
<listitem>
<para>
The value which is possibly a derivation.
</para>
</listitem>
</varlistentry>
</variablelist>
<example xml:id="function-library-lib.attrsets.isDerivation-example-true">
<title>A package is a derivation</title>
<programlisting><![CDATA[
lib.attrsets.isDerivation (import <nixpkgs> {}).ruby
=> true
]]></programlisting>
</example>
<example xml:id="function-library-lib.attrsets.isDerivation-example-false">
<title>Anything else is not a derivation</title>
<programlisting><![CDATA[
lib.attrsets.isDerivation "foobar"
=> false
]]></programlisting>
</example>
</section>
<section xml:id="function-library-lib.attrsets.toDerivation">
<title><function>lib.attrsets.toDerivation</function></title>
<subtitle><literal>toDerivation :: Path -> Derivation</literal>
</subtitle>
<xi:include href="./locations.xml" xpointer="lib.attrsets.toDerivation" />
<para>
Converts a store path to a fake derivation.
</para>
<variablelist>
<varlistentry>
<term>
<varname>path</varname>
</term>
<listitem>
<para>
A store path to convert to a derivation.
</para>
</listitem>
</varlistentry>
</variablelist>
</section>
<section xml:id="function-library-lib.attrsets.optionalAttrs">
<title><function>lib.attrsets.optionalAttrs</function></title>
<subtitle><literal>optionalAttrs :: Bool -> AttrSet</literal>
</subtitle>
<xi:include href="./locations.xml" xpointer="lib.attrsets.optionalAttrs" />
<para>
Conditionally return an attribute set or an empty attribute set.
</para>
<variablelist>
<varlistentry>
<term>
<varname>cond</varname>
</term>
<listitem>
<para>
Condition under which the <varname>as</varname> attribute set is
returned.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>as</varname>
</term>
<listitem>
<para>
The attribute set to return if <varname>cond</varname> is true.
</para>
</listitem>
</varlistentry>
</variablelist>
<example xml:id="function-library-lib.attrsets.optionalAttrs-example-true">
<title>Return the provided attribute set when <varname>cond</varname> is true</title>
<programlisting><![CDATA[
lib.attrsets.optionalAttrs true { my = "set"; }
=> { my = "set"; }
]]></programlisting>
</example>
<example xml:id="function-library-lib.attrsets.optionalAttrs-example-false">
<title>Return an empty attribute set when <varname>cond</varname> is false</title>
<programlisting><![CDATA[
lib.attrsets.optionalAttrs false { my = "set"; }
=> { }
]]></programlisting>
</example>
</section>
<section xml:id="function-library-lib.attrsets.zipAttrsWithNames">
<title><function>lib.attrsets.zipAttrsWithNames</function></title>
<subtitle><literal>zipAttrsWithNames :: [ String ] -> (String -> [ Any ] -> Any) -> [ AttrSet ] -> AttrSet</literal>
</subtitle>
<xi:include href="./locations.xml" xpointer="lib.attrsets.zipAttrsWithNames" />
<para>
Merge sets of attributes and use the function <varname>f</varname> to merge
attribute values where the attribute name is in <varname>names</varname>.
</para>
<variablelist>
<varlistentry>
<term>
<varname>names</varname>
</term>
<listitem>
<para>
A list of attribute names to zip.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>f</varname>
</term>
<listitem>
<para>
<literal>(String -> [ Any ] -> Any</literal>
</para>
<para>
Accepts an attribute name, all the values, and returns a combined value.
</para>
<variablelist>
<varlistentry>
<term>
<varname>name</varname>
</term>
<listitem>
<para>
The name of the attribute each value came from.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>vs</varname>
</term>
<listitem>
<para>
A list of values collected from the list of attribute sets.
</para>
</listitem>
</varlistentry>
</variablelist>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>sets</varname>
</term>
<listitem>
<para>
A list of attribute sets to zip together.
</para>
</listitem>
</varlistentry>
</variablelist>
<example xml:id="function-library-lib.attrsets.zipAttrsWithNames-example">
<title>Summing a list of attribute sets of numbers</title>
<programlisting><![CDATA[
lib.attrsets.zipAttrsWithNames
[ "a" "b" ]
(name: vals: "${name} ${toString (builtins.foldl' (a: b: a + b) 0 vals)}")
[
{ a = 1; b = 1; c = 1; }
{ a = 10; }
{ b = 100; }
{ c = 1000; }
]
=> { a = "a 11"; b = "b 101"; }
]]></programlisting>
</example>
</section>
<section xml:id="function-library-lib.attrsets.zipAttrsWith">
<title><function>lib.attrsets.zipAttrsWith</function></title>
<subtitle><literal>zipAttrsWith :: (String -> [ Any ] -> Any) -> [ AttrSet ] -> AttrSet</literal>
</subtitle>
<xi:include href="./locations.xml" xpointer="lib.attrsets.zipAttrsWith" />
<para>
Merge sets of attributes and use the function <varname>f</varname> to merge
attribute values. Similar to
<xref
linkend="function-library-lib.attrsets.zipAttrsWithNames" /> where
all key names are passed for <varname>names</varname>.
</para>
<variablelist>
<varlistentry>
<term>
<varname>f</varname>
</term>
<listitem>
<para>
<literal>(String -> [ Any ] -> Any</literal>
</para>
<para>
Accepts an attribute name, all the values, and returns a combined value.
</para>
<variablelist>
<varlistentry>
<term>
<varname>name</varname>
</term>
<listitem>
<para>
The name of the attribute each value came from.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>vs</varname>
</term>
<listitem>
<para>
A list of values collected from the list of attribute sets.
</para>
</listitem>
</varlistentry>
</variablelist>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>sets</varname>
</term>
<listitem>
<para>
A list of attribute sets to zip together.
</para>
</listitem>
</varlistentry>
</variablelist>
<example xml:id="function-library-lib.attrsets.zipAttrsWith-example">
<title>Summing a list of attribute sets of numbers</title>
<programlisting><![CDATA[
lib.attrsets.zipAttrsWith
(name: vals: "${name} ${toString (builtins.foldl' (a: b: a + b) 0 vals)}")
[
{ a = 1; b = 1; c = 1; }
{ a = 10; }
{ b = 100; }
{ c = 1000; }
]
=> { a = "a 11"; b = "b 101"; c = "c 1001"; }
]]></programlisting>
</example>
</section>
<section xml:id="function-library-lib.attrsets.zipAttrs">
<title><function>lib.attrsets.zipAttrs</function></title>
<subtitle><literal>zipAttrsWith :: [ AttrSet ] -> AttrSet</literal>
</subtitle>
<xi:include href="./locations.xml" xpointer="lib.attrsets.zipAttrs" />
<para>
Merge sets of attributes and combine each attribute value in to a list.
Similar to <xref linkend="function-library-lib.attrsets.zipAttrsWith" />
where the merge function returns a list of all values.
</para>
<variablelist>
<varlistentry>
<term>
<varname>sets</varname>
</term>
<listitem>
<para>
A list of attribute sets to zip together.
</para>
</listitem>
</varlistentry>
</variablelist>
<example xml:id="function-library-lib.attrsets.zipAttrs-example">
<title>Combining a list of attribute sets</title>
<programlisting><![CDATA[
lib.attrsets.zipAttrs
[
{ a = 1; b = 1; c = 1; }
{ a = 10; }
{ b = 100; }
{ c = 1000; }
]
=> { a = [ 1 10 ]; b = [ 1 100 ]; c = [ 1 1000 ]; }
]]></programlisting>
</example>
</section>
<section xml:id="function-library-lib.attrsets.recursiveUpdateUntil">
<title><function>lib.attrsets.recursiveUpdateUntil</function></title>
<subtitle><literal>recursiveUpdateUntil :: ( [ String ] -> AttrSet -> AttrSet -> Bool ) -> AttrSet -> AttrSet -> AttrSet</literal>
</subtitle>
<xi:include href="./locations.xml" xpointer="lib.attrsets.recursiveUpdateUntil" />
<para>
Does the same as the update operator <literal>//</literal> except that
attributes are merged until the given predicate is verified. The predicate
should accept 3 arguments which are the path to reach the attribute, a part
of the first attribute set and a part of the second attribute set. When the
predicate is verified, the value of the first attribute set is replaced by
the value of the second attribute set.
</para>
<variablelist>
<varlistentry>
<term>
<varname>pred</varname>
</term>
<listitem>
<para>
<literal>[ String ] -> AttrSet -> AttrSet -> Bool</literal>
</para>
<variablelist>
<varlistentry>
<term>
<varname>path</varname>
</term>
<listitem>
<para>
The path to the values in the left and right hand sides.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>l</varname>
</term>
<listitem>
<para>
The left hand side value.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>r</varname>
</term>
<listitem>
<para>
The right hand side value.
</para>
</listitem>
</varlistentry>
</variablelist>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>lhs</varname>
</term>
<listitem>
<para>
The left hand attribute set of the merge.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>rhs</varname>
</term>
<listitem>
<para>
The right hand attribute set of the merge.
</para>
</listitem>
</varlistentry>
</variablelist>
<example xml:id="function-library-lib.attrsets.recursiveUpdateUntil-example">
<title>Recursively merging two attribute sets</title>
<programlisting><![CDATA[
lib.attrsets.recursiveUpdateUntil (path: l: r: path == ["foo"])
{
# first attribute set
foo.bar = 1;
foo.baz = 2;
bar = 3;
}
{
#second attribute set
foo.bar = 1;
foo.quz = 2;
baz = 4;
}
=> {
foo.bar = 1; # 'foo.*' from the second set
foo.quz = 2; #
bar = 3; # 'bar' from the first set
baz = 4; # 'baz' from the second set
}
]]></programlisting>
</example>
</section>
<section xml:id="function-library-lib.attrsets.recursiveUpdate">
<title><function>lib.attrsets.recursiveUpdate</function></title>
<subtitle><literal>recursiveUpdate :: AttrSet -> AttrSet -> AttrSet</literal>
</subtitle>
<xi:include href="./locations.xml" xpointer="lib.attrsets.recursiveUpdate" />
<para>
A recursive variant of the update operator <literal>//</literal>. The
recursion stops when one of the attribute values is not an attribute set, in
which case the right hand side value takes precedence over the left hand
side value.
</para>
<variablelist>
<varlistentry>
<term>
<varname>lhs</varname>
</term>
<listitem>
<para>
The left hand attribute set of the merge.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>rhs</varname>
</term>
<listitem>
<para>
The right hand attribute set of the merge.
</para>
</listitem>
</varlistentry>
</variablelist>
<example xml:id="function-library-lib.attrsets.recursiveUpdate-example">
<title>Recursively merging two attribute sets</title>
<programlisting><![CDATA[
recursiveUpdate
{
boot.loader.grub.enable = true;
boot.loader.grub.device = "/dev/hda";
}
{
boot.loader.grub.device = "";
}
=> {
boot.loader.grub.enable = true;
boot.loader.grub.device = "";
}
]]></programlisting>
</example>
</section>
</section>

View File

@ -186,7 +186,7 @@ building Python libraries is `buildPythonPackage`. Let's see how we can build th
`toolz` package.
```nix
{ # ...
{ lib, buildPythonPackage, fetchPypi }:
toolz = buildPythonPackage rec {
pname = "toolz";
@ -199,8 +199,8 @@ building Python libraries is `buildPythonPackage`. Let's see how we can build th
doCheck = false;
meta = {
homepage = "https://github.com/pytoolz/toolz/";
meta = with lib; {
homepage = https://github.com/pytoolz/toolz;
description = "List processing tools and functional utilities";
license = licenses.bsd3;
maintainers = with maintainers; [ fridh ];
@ -267,12 +267,13 @@ that we introduced with the `let` expression.
#### Handling dependencies
Our example, `toolz`, does not have any dependencies on other Python
packages or system libraries. According to the manual, `buildPythonPackage`
uses the arguments `buildInputs` and `propagatedBuildInputs` to specify dependencies. If something is
exclusively a build-time dependency, then the dependency should be included as a
`buildInput`, but if it is (also) a runtime dependency, then it should be added
to `propagatedBuildInputs`. Test dependencies are considered build-time dependencies.
Our example, `toolz`, does not have any dependencies on other Python packages or
system libraries. According to the manual, `buildPythonPackage` uses the
arguments `buildInputs` and `propagatedBuildInputs` to specify dependencies. If
something is exclusively a build-time dependency, then the dependency should be
included as a `buildInput`, but if it is (also) a runtime dependency, then it
should be added to `propagatedBuildInputs`. Test dependencies are considered
build-time dependencies and passed to `checkInputs`.
The following example shows which arguments are given to `buildPythonPackage` in
order to build [`datashape`](https://github.com/blaze/datashape).
@ -292,7 +293,7 @@ order to build [`datashape`](https://github.com/blaze/datashape).
checkInputs = with self; [ pytest ];
propagatedBuildInputs = with self; [ numpy multipledispatch dateutil ];
meta = {
meta = with lib; {
homepage = https://github.com/ContinuumIO/datashape;
description = "A data description language";
license = licenses.bsd2;
@ -326,7 +327,7 @@ when building the bindings and are therefore added as `buildInputs`.
buildInputs = with self; [ pkgs.libxml2 pkgs.libxslt ];
meta = {
meta = with lib; {
description = "Pythonic binding for the libxml2 and libxslt libraries";
homepage = https://lxml.de;
license = licenses.bsd3;
@ -370,9 +371,9 @@ and `CFLAGS`.
export CFLAGS="-I${pkgs.fftw.dev}/include -I${pkgs.fftwFloat.dev}/include -I${pkgs.fftwLongDouble.dev}/include"
'';
meta = {
meta = with lib; {
description = "A pythonic wrapper around FFTW, the FFT library, presenting a unified interface for all the supported transforms";
homepage = http://hgomersall.github.com/pyFFTW/;
homepage = http://hgomersall.github.com/pyFFTW;
license = with licenses; [ bsd2 bsd3 ];
maintainers = with maintainers; [ fridh ];
};
@ -478,8 +479,6 @@ don't explicitly define which `python` derivation should be used. In the above
example we use `buildPythonPackage` that is part of the set `python35Packages`,
and in this case the `python35` interpreter is automatically used.
## Reference
### Interpreters
@ -549,31 +548,31 @@ The `buildPythonPackage` function is implemented in
The following is an example:
```nix
{ lib, buildPythonPackage, fetchPypi, hypothesis, setuptools_scm, attrs, py, setuptools, six, pluggy }:
buildPythonPackage rec {
version = "3.3.1";
pname = "pytest";
preCheck = ''
# don't test bash builtins
rm testing/test_argcomplete.py
'';
version = "3.3.1";
src = fetchPypi {
inherit pname version;
sha256 = "cf8436dc59d8695346fcd3ab296de46425ecab00d64096cebe79fb51ecb2eb93";
};
postPatch = ''
# don't test bash builtins
rm testing/test_argcomplete.py
'';
checkInputs = [ hypothesis ];
buildInputs = [ setuptools_scm ];
propagatedBuildInputs = [ attrs py setuptools six pluggy ];
meta = with stdenv.lib; {
meta = with lib; {
maintainers = with maintainers; [ domenkozar lovek323 madjar lsix ];
description = "Framework for writing tests";
};
}
```
The `buildPythonPackage` mainly does four things:
@ -655,6 +654,39 @@ Another difference is that `buildPythonPackage` by default prefixes the names of
the packages with the version of the interpreter. Because this is irrelevant for
applications, the prefix is omitted.
When packaging a python application with `buildPythonApplication`, it should be
called with `callPackage` and passed `python` or `pythonPackages` (possibly
specifying an interpreter version), like this:
```nix
{ lib, python3Packages }:
python3Packages.buildPythonApplication rec {
pname = "luigi";
version = "2.7.9";
src = python3Packages.fetchPypi {
inherit pname version;
sha256 = "035w8gqql36zlan0xjrzz9j4lh9hs0qrsgnbyw07qs7lnkvbdv9x";
};
propagatedBuildInputs = with python3Packages; [ tornado_4 pythondaemon ];
meta = with lib; {
...
};
}
```
This is then added to `all-packages.nix` just as any other application would be.
```nix
luigi = callPackage ../applications/networking/cluster/luigi { };
```
Since the package is an application, a consumer doesn't need to care about
python versions or modules, which is why they don't go in `pythonPackages`.
#### `toPythonApplication` function
A distinction is made between applications and libraries, however, sometimes a

View File

@ -413,7 +413,8 @@ packageOverrides = pkgs: {
in your <filename>/etc/nixos/configuration.nix</filename>. You'll also need
<programlisting>hardware.pulseaudio.support32Bit = true;</programlisting>
if you are using PulseAudio - this will enable 32bit ALSA apps integration.
To use the Steam controller or other Steam supported controllers such as the DualShock 4 or Nintendo Switch Pro, you need to add
To use the Steam controller or other Steam supported controllers such as
the DualShock 4 or Nintendo Switch Pro, you need to add
<programlisting>hardware.steam-hardware.enable = true;</programlisting>
to your configuration.
</para>

View File

@ -196,7 +196,7 @@ rec {
newScope = scope: newScope (self // scope);
callPackage = self.newScope {};
overrideScope = g: lib.warn
"`overrideScope` (from `lib.makeScope`) is deprecated. Do `overrideScope' (self: self: { })` instead of `overrideScope (super: self: { })`. All other overrides have the parameters in that order, including other definitions of `overrideScope`. This was the only definition violating the pattern."
"`overrideScope` (from `lib.makeScope`) is deprecated. Do `overrideScope' (self: super: { })` instead of `overrideScope (super: self: { })`. All other overrides have the parameters in that order, including other definitions of `overrideScope`. This was the only definition violating the pattern."
(makeScope newScope (lib.fixedPoints.extends (lib.flip g) f));
overrideScope' = g: makeScope newScope (lib.fixedPoints.extends g f);
packages = f;

View File

@ -143,6 +143,7 @@ rec {
}@args: v: with builtins;
let isPath = v: typeOf v == "path";
in if isInt v then toString v
else if isFloat v then "~${toString v}"
else if isString v then ''"${libStr.escape [''"''] v}"''
else if true == v then "true"
else if false == v then "false"

View File

@ -387,6 +387,14 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = "ISC License";
};
# Proprietary binaries; free to redistribute without modification.
issl = {
fullName = "Intel Simplified Software License";
url = https://software.intel.com/en-us/license/intel-simplified-software-license;
free = false;
};
lgpl2 = spdx {
spdxId = "LGPL-2.0";
fullName = "GNU Library General Public License v2 only";
@ -500,6 +508,12 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = "Non-Profit Open Software License 3.0";
};
ocamlpro_nc = {
fullName = "OCamlPro Non Commercial license version 1";
url = "https://alt-ergo.ocamlpro.com/http/alt-ergo-2.2.0/OCamlPro-Non-Commercial-License.pdf";
free = false;
};
ofl = spdx {
spdxId = "OFL-1.1";
fullName = "SIL Open Font License 1.1";
@ -571,6 +585,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = "Ruby License";
};
sendmail = spdx {
spdxId = "Sendmail";
fullName = "Sendmail License";
};
sgi-b-20 = spdx {
spdxId = "SGI-B-2.0";
fullName = "SGI Free Software License B v2.0";

View File

@ -46,6 +46,25 @@ rec {
# Misc boolean options
useAndroidPrebuilt = false;
useiOSPrebuilt = false;
# Output from uname
uname = {
# uname -s
system = {
"linux" = "Linux";
"windows" = "Windows";
"darwin" = "Darwin";
"netbsd" = "NetBSD";
"freebsd" = "FreeBSD";
"openbsd" = "OpenBSD";
}.${final.parsed.kernel.name} or null;
# uname -p
processor = final.parsed.cpu.name;
# uname -r
release = null;
};
} // mapAttrs (n: v: v final.parsed) inspect.predicates
// args;
in assert final.useAndroidPrebuilt -> final.isAndroid;

View File

@ -15,6 +15,8 @@ let
"x86_64-cygwin" "x86_64-darwin" "x86_64-freebsd" "x86_64-linux"
"x86_64-netbsd" "x86_64-openbsd" "x86_64-solaris"
"x86_64-windows" "i686-windows"
];
allParsed = map parse.mkSystemFromString all;
@ -37,12 +39,13 @@ in rec {
darwin = filterDoubles predicates.isDarwin;
freebsd = filterDoubles predicates.isFreeBSD;
# Should be better, but MinGW is unclear.
gnu = filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnu; });
gnu = filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnu; }) ++ filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnueabi; }) ++ filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnueabihf; });
illumos = filterDoubles predicates.isSunOS;
linux = filterDoubles predicates.isLinux;
netbsd = filterDoubles predicates.isNetBSD;
openbsd = filterDoubles predicates.isOpenBSD;
unix = filterDoubles predicates.isUnix;
windows = filterDoubles predicates.isWindows;
mesaPlatforms = ["i686-linux" "x86_64-linux" "x86_64-darwin" "armv5tel-linux" "armv6l-linux" "armv7l-linux" "aarch64-linux" "powerpc64le-linux"];
}

View File

@ -28,7 +28,7 @@ rec {
};
armv7l-hf-multiplatform = rec {
config = "armv7a-unknown-linux-gnueabihf";
config = "armv7l-unknown-linux-gnueabihf";
platform = platforms.armv7l-hf-multiplatform;
};

View File

@ -369,6 +369,7 @@ runTests {
testToPretty = {
expr = mapAttrs (const (generators.toPretty {})) rec {
int = 42;
float = 0.1337;
bool = true;
string = ''fno"rd'';
path = /. + "/foo";
@ -381,6 +382,7 @@ runTests {
};
expected = rec {
int = "42";
float = "~0.133700";
bool = "true";
string = ''"fno\"rd"'';
path = "/foo";

View File

@ -12,20 +12,21 @@ let
expected = lib.sort lib.lessThan y;
};
in with lib.systems.doubles; lib.runTests {
all = assertTrue (mseteq all (linux ++ darwin ++ cygwin ++ freebsd ++ openbsd ++ netbsd ++ illumos));
testall = mseteq all (linux ++ darwin ++ freebsd ++ openbsd ++ netbsd ++ illumos ++ windows);
arm = assertTrue (mseteq arm [ "armv5tel-linux" "armv6l-linux" "armv7l-linux" ]);
i686 = assertTrue (mseteq i686 [ "i686-linux" "i686-freebsd" "i686-netbsd" "i686-openbsd" "i686-cygwin" ]);
mips = assertTrue (mseteq mips [ "mipsel-linux" ]);
x86_64 = assertTrue (mseteq x86_64 [ "x86_64-linux" "x86_64-darwin" "x86_64-freebsd" "x86_64-openbsd" "x86_64-netbsd" "x86_64-cygwin" "x86_64-solaris" ]);
testarm = mseteq arm [ "armv5tel-linux" "armv6l-linux" "armv7l-linux" ];
testi686 = mseteq i686 [ "i686-linux" "i686-freebsd" "i686-netbsd" "i686-openbsd" "i686-cygwin" "i686-windows" ];
testmips = mseteq mips [ "mipsel-linux" ];
testx86_64 = mseteq x86_64 [ "x86_64-linux" "x86_64-darwin" "x86_64-freebsd" "x86_64-openbsd" "x86_64-netbsd" "x86_64-cygwin" "x86_64-solaris" "x86_64-windows" ];
cygwin = assertTrue (mseteq cygwin [ "i686-cygwin" "x86_64-cygwin" ]);
darwin = assertTrue (mseteq darwin [ "x86_64-darwin" ]);
freebsd = assertTrue (mseteq freebsd [ "i686-freebsd" "x86_64-freebsd" ]);
gnu = assertTrue (mseteq gnu (linux /* ++ kfreebsd ++ ... */));
illumos = assertTrue (mseteq illumos [ "x86_64-solaris" ]);
linux = assertTrue (mseteq linux [ "i686-linux" "x86_64-linux" "armv5tel-linux" "armv6l-linux" "armv7l-linux" "aarch64-linux" "mipsel-linux" ]);
netbsd = assertTrue (mseteq netbsd [ "i686-netbsd" "x86_64-netbsd" ]);
openbsd = assertTrue (mseteq openbsd [ "i686-openbsd" "x86_64-openbsd" ]);
unix = assertTrue (mseteq unix (linux ++ darwin ++ freebsd ++ openbsd ++ netbsd ++ illumos));
testcygwin = mseteq cygwin [ "i686-cygwin" "x86_64-cygwin" ];
testdarwin = mseteq darwin [ "x86_64-darwin" ];
testfreebsd = mseteq freebsd [ "i686-freebsd" "x86_64-freebsd" ];
testgnu = mseteq gnu (linux /* ++ kfreebsd ++ ... */);
testillumos = mseteq illumos [ "x86_64-solaris" ];
testlinux = mseteq linux [ "i686-linux" "x86_64-linux" "armv5tel-linux" "armv6l-linux" "armv7l-linux" "aarch64-linux" "mipsel-linux" ];
testnetbsd = mseteq netbsd [ "i686-netbsd" "x86_64-netbsd" ];
testopenbsd = mseteq openbsd [ "i686-openbsd" "x86_64-openbsd" ];
testwindows = mseteq windows [ "i686-cygwin" "x86_64-cygwin" "i686-windows" "x86_64-windows" ];
testunix = mseteq unix (linux ++ darwin ++ freebsd ++ openbsd ++ netbsd ++ illumos ++ cygwin);
}

View File

@ -194,7 +194,10 @@ rec {
# separator between the values).
separatedString = sep: mkOptionType rec {
name = "separatedString";
description = "string";
description = if sep == ""
then "Concatenated string" # for types.string.
else "strings concatenated with ${builtins.toJSON sep}"
;
check = isString;
merge = loc: defs: concatStringsSep sep (getValues defs);
functor = (defaultFunctor name) // {

View File

@ -520,6 +520,11 @@
github = "bgamari";
name = "Ben Gamari";
};
bhall = {
email = "brendan.j.hall@bath.edu";
github = "brendan-hall";
name = "Brendan Hall";
};
bhipple = {
email = "bhipple@protonmail.com";
github = "bhipple";
@ -981,6 +986,11 @@
github = "deepfire";
name = "Kosyrev Serge";
};
delroth = {
email = "delroth@gmail.com";
github = "delroth";
name = "Pierre Bourdon";
};
deltaevo = {
email = "deltaduartedavid@gmail.com";
github = "DeltaEvo";
@ -1195,6 +1205,11 @@
github = "eduarrrd";
name = "Eduard Bachmakov";
};
edude03 = {
email = "michael@melenion.com";
github = "edude03";
name = "Michael Francis";
};
edwtjo = {
email = "ed@cflags.cc";
github = "edwtjo";
@ -1362,6 +1377,11 @@
github = "expipiplus1";
name = "Joe Hermaszewski";
};
f--t = {
email = "git@f-t.me";
github = "f--t";
name = "f--t";
};
f-breidenstein = {
email = "mail@felixbreidenstein.de";
github = "f-breidenstein";
@ -1872,6 +1892,11 @@
github = "jdagilliland";
name = "Jason Gilliland";
};
jdehaas = {
email = "qqlq@nullptr.club";
github = "jeroendehaas";
name = "Jeroen de Haas";
};
jefdaj = {
email = "jefdaj@gmail.com";
github = "jefdaj";
@ -2148,6 +2173,11 @@
github = "kiloreux";
name = "Kiloreux Emperex";
};
kimburgess = {
email = "kim@acaprojects.com";
github = "kimburgess";
name = "Kim Burgess";
};
kini = {
email = "keshav.kini@gmail.com";
github = "kini";
@ -3057,7 +3087,7 @@
name = "Oliver Dunkl";
};
offline = {
email = "jakahudoklin@gmail.com";
email = "jaka@x-truder.net";
github = "offlinehacker";
name = "Jaka Hudoklin";
};
@ -3085,6 +3115,11 @@
github = "olynch";
name = "Owen Lynch";
};
OPNA2608 = {
email = "christoph.neidahl@gmail.com";
github = "OPNA2608";
name = "Christoph Neidahl";
};
orbekk = {
email = "kjetil.orbekk@gmail.com";
github = "orbekk";
@ -3120,6 +3155,16 @@
github = "oyren";
name = "Moritz Scheuren";
};
pacien = {
email = "b4gx3q.nixpkgs@pacien.net";
github = "pacien";
name = "Pacien Tran-Girard";
};
paddygord = {
email = "pgpatrickgordon@gmail.com";
github = "paddygord";
name = "Patrick Gordon";
};
paholg = {
email = "paho@paholg.com";
github = "paholg";
@ -4624,7 +4669,7 @@
name = "Dmitry V.";
};
yegortimoshenko = {
email = "yegortimoshenko@gmail.com";
email = "yegortimoshenko@riseup.net";
github = "yegortimoshenko";
name = "Yegor Timoshenko";
};
@ -4673,6 +4718,11 @@
github = "maggesi";
name = "Marco Maggesi";
};
zachcoyle = {
email = "zach.coyle@gmail.com";
github = "zachcoyle";
name = "Zach Coyle";
};
zagy = {
email = "cz@flyingcircus.io";
github = "zagy";

View File

@ -52,4 +52,8 @@ $ ping -c1 10.233.4.2
networking.networkmanager.unmanaged = [ "interface-name:ve-*" ];
</programlisting>
</para>
<para>
You may need to restart your system for the changes to take effect.
</para>
</section>

View File

@ -73,7 +73,8 @@ Linux foo 3.4.82 #1-NixOS SMP Thu Mar 20 14:44:05 UTC 2014 x86_64 GNU/Linux
</para>
<para>
To change the configuration of the container, you can edit
There are several ways to change the configuration of the container. First,
on the host, you can edit
<literal>/var/lib/container/<replaceable>name</replaceable>/etc/nixos/configuration.nix</literal>,
and run
<screen>
@ -86,7 +87,8 @@ Linux foo 3.4.82 #1-NixOS SMP Thu Mar 20 14:44:05 UTC 2014 x86_64 GNU/Linux
<xref linkend="opt-services.httpd.enable"/> = true;
<xref linkend="opt-services.httpd.adminAddr"/> = "foo@example.org";
<xref linkend="opt-networking.firewall.allowedTCPPorts"/> = [ 80 ];
'
'
# curl http://$(nixos-container show-ip foo)/
&lt;!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">…
</screen>
@ -95,13 +97,11 @@ Linux foo 3.4.82 #1-NixOS SMP Thu Mar 20 14:44:05 UTC 2014 x86_64 GNU/Linux
</para>
<para>
Note that in previous versions of NixOS (17.09 and earlier) one could also
use all nix-related commands (like <command>nixos-rebuild switch</command>)
from inside the container. However, since the release of Nix 2.0 this is not
supported anymore. Supporting Nix commands inside the container might be
possible again in future versions. See
<link xlink:href="https://github.com/NixOS/nixpkgs/issues/40355">the github
issue</link> for tracking progress on this issue.
Alternatively, you can change the configuration from within the container
itself by running <command>nixos-rebuild switch</command> inside the
container. Note that the container by default does not have a copy of the
NixOS channel, so you should run <command>nix-channel --update</command>
first.
</para>
<para>

View File

@ -115,10 +115,17 @@
</listitem>
<listitem>
<para>
Add a <emphasis>swap</emphasis> partition. The size required will vary
according to needs, here a 8GiB one is created. The space left in front
(512MiB) will be used by the boot partition.
<screen language="commands"># parted /dev/sda -- mkpart primary linux-swap 512MiB 8.5GiB</screen>
Add the <emphasis>root</emphasis> partition. This will fill the disk
except for the end part, where the swap will live, and the space left in
front (512MiB) which will be used by the boot partition.
<screen language="commands"># parted /dev/sda -- mkpart primary 512MiB -8GiB</screen>
</para>
</listitem>
<listitem>
<para>
Next, add a <emphasis>swap</emphasis> partition. The size required will
vary according to needs, here a 8GiB one is created.
<screen language="commands"># parted /dev/sda -- mkpart primary linux-swap -8GiB 100%</screen>
<note>
<para>
The swap partition size rules are no different than for other Linux
@ -127,20 +134,13 @@
</note>
</para>
</listitem>
<listitem>
<para>
Next, add the <emphasis>root</emphasis> partition. This will fill the
remainder ending part of the disk.
<screen language="commands"># parted /dev/sda -- mkpart primary 8.5GiB -1MiB</screen>
</para>
</listitem>
<listitem>
<para>
Finally, the <emphasis>boot</emphasis> partition. NixOS by default uses
the ESP (EFI system partition) as its <emphasis>/boot</emphasis>
partition. It uses the initially reserved 512MiB at the start of the
disk.
<screen language="commands"># parted /dev/sda -- mkpart ESP fat32 1M 512MiB
<screen language="commands"># parted /dev/sda -- mkpart ESP fat32 1MiB 512MiB
# parted /dev/sda -- set 3 boot on</screen>
</para>
</listitem>
@ -177,9 +177,16 @@
</listitem>
<listitem>
<para>
Add a <emphasis>swap</emphasis> partition. The size required will vary
according to needs, here a 8GiB one is created.
<screen language="commands"># parted /dev/sda -- mkpart primary linux-swap 1M 8GiB</screen>
Add the <emphasis>root</emphasis> partition. This will fill the the disk
except for the end part, where the swap will live.
<screen language="commands"># parted /dev/sda -- mkpart primary 1MiB -8GiB</screen>
</para>
</listitem>
<listitem>
<para>
Finally, add a <emphasis>swap</emphasis> partition. The size required
will vary according to needs, here a 8GiB one is created.
<screen language="commands"># parted /dev/sda -- mkpart primary linux-swap -8GiB 100%</screen>
<note>
<para>
The swap partition size rules are no different than for other Linux
@ -188,13 +195,6 @@
</note>
</para>
</listitem>
<listitem>
<para>
Finally, add the <emphasis>root</emphasis> partition. This will fill the
remainder of the disk.
<screen language="commands"># parted /dev/sda -- mkpart primary 8GiB -1s</screen>
</para>
</listitem>
</orderedlist>
</para>
@ -486,17 +486,17 @@ $ nix-env -i w3m</screen>
<title>Example partition schemes for NixOS on <filename>/dev/sda</filename> (MBR)</title>
<screen language="commands">
# parted /dev/sda -- mklabel msdos
# parted /dev/sda -- mkpart primary linux-swap 1M 8GiB
# parted /dev/sda -- mkpart primary 8GiB -1s</screen>
# parted /dev/sda -- mkpart primary 1MiB -8GiB
# parted /dev/sda -- mkpart primary linux-swap -8GiB 100%</screen>
</example>
<example xml:id="ex-partition-scheme-UEFI">
<title>Example partition schemes for NixOS on <filename>/dev/sda</filename> (UEFI)</title>
<screen language="commands">
# parted /dev/sda -- mklabel gpt
# parted /dev/sda -- mkpart primary linux-swap 512MiB 8.5GiB
# parted /dev/sda -- mkpart primary 8.5GiB -1MiB
# parted /dev/sda -- mkpart ESP fat32 1M 512MiB
# parted /dev/sda -- mkpart primary 512MiB -8GiB
# parted /dev/sda -- mkpart primary linux-swap -8GiB 100%
# parted /dev/sda -- mkpart ESP fat32 1MiB 512MiB
# parted /dev/sda -- set 3 boot on</screen>
</example>

View File

@ -475,6 +475,48 @@ $ nix-instantiate -E '(import &lt;nixpkgsunstable&gt; {}).gitFull'
</para>
<itemizedlist>
<listitem>
<para>
Some licenses that were incorrectly not marked as unfree now are. This is
the case for:
<itemizedlist>
<listitem>
<para>
cc-by-nc-sa-20: Creative Commons Attribution Non Commercial Share Alike
2.0
</para>
</listitem>
<listitem>
<para>
cc-by-nc-sa-25: Creative Commons Attribution Non Commercial Share Alike
2.5
</para>
</listitem>
<listitem>
<para>
cc-by-nc-sa-30: Creative Commons Attribution Non Commercial Share Alike
3.0
</para>
</listitem>
<listitem>
<para>
cc-by-nc-sa-40: Creative Commons Attribution Non Commercial Share Alike
4.0
</para>
</listitem>
<listitem>
<para>
cc-by-nd-30: Creative Commons Attribution-No Derivative Works v3.00
</para>
</listitem>
<listitem>
<para>
msrla: Microsoft Research License Agreement
</para>
</listitem>
</itemizedlist>
</para>
</listitem>
<listitem>
<para>
The deprecated <varname>services.cassandra</varname> module has seen a

View File

@ -106,12 +106,29 @@
</para>
</listitem>
<listitem>
<para>
The <literal>light</literal> module no longer uses setuid binaries, but
udev rules. As a consequence users of that module have to belong to the
<literal>video</literal> group in order to use the executable
(i.e. <literal>users.users.yourusername.extraGroups = ["video"];</literal>).
</para>
<para>
The <literal>light</literal> module no longer uses setuid binaries, but
udev rules. As a consequence users of that module have to belong to the
<literal>video</literal> group in order to use the executable (i.e.
<literal>users.users.yourusername.extraGroups = ["video"];</literal>).
</para>
</listitem>
<listitem>
<para>
Buildbot now supports Python 3 and its packages have been moved to
<literal>pythonPackages</literal>. The options
<option>services.buildbot-master.package</option> and
<option>services.buildbot-worker.package</option> can be used to select
the Python 2 or 3 version of the package.
</para>
</listitem>
<listitem>
<para>
Options
<literal>services.znc.confOptions.networks.<replaceable>name</replaceable>.userName</literal> and
<literal>services.znc.confOptions.networks.<replaceable>name</replaceable>.modulePackages</literal>
were removed. They were never used for anything and can therefore safely be removed.
</para>
</listitem>
</itemizedlist>
</section>
@ -125,7 +142,11 @@
<itemizedlist>
<listitem>
<para />
<para>
The <option>services.matomo</option> module gained the option
<option>services.matomo.package</option> which determines the used
Matomo version.
</para>
</listitem>
</itemizedlist>
</section>

View File

@ -79,7 +79,7 @@ in {
options = {
krb5 = {
enable = mkEnableOption "Whether to enable Kerberos V.";
enable = mkEnableOption "building krb5.conf, configuration file for Kerberos V";
kerberos = mkOption {
type = types.package;

View File

@ -154,6 +154,18 @@ in {
'';
};
extraModules = mkOption {
type = types.listOf types.package;
default = [];
example = literalExample "[ pkgs.pulseaudio-modules-bt ]";
description = ''
Extra pulseaudio modules to use. This is intended for out-of-tree
pulseaudio modules like extra bluetooth codecs.
Extra modules take precedence over built-in pulseaudio modules.
'';
};
daemon = {
logLevel = mkOption {
type = types.str;
@ -236,6 +248,18 @@ in {
systemd.packages = [ overriddenPackage ];
})
(mkIf (cfg.extraModules != []) {
hardware.pulseaudio.daemon.config.dl-search-path = let
overriddenModules = builtins.map
(drv: drv.override { pulseaudio = overriddenPackage; })
cfg.extraModules;
modulePaths = builtins.map
(drv: "${drv}/lib/pulse-${overriddenPackage.version}/modules")
# User-provided extra modules take precedence
(overriddenModules ++ [ overriddenPackage ]);
in lib.concatStringsSep ":" modulePaths;
})
(mkIf hasZeroconf {
services.avahi.enable = true;
})

View File

@ -108,14 +108,14 @@ in
};
environment.shellAliases = mkOption {
default = {};
example = { ll = "ls -l"; };
example = { l = null; ll = "ls -l"; };
description = ''
An attribute set that maps aliases (the top level attribute names in
this option) to command strings or directly to build outputs. The
aliases are added to all users' shells.
Aliases mapped to <code>null</code> are ignored.
'';
type = types.attrs; # types.attrsOf types.stringOrPath;
type = with types; attrsOf (nullOr (either str path));
};
environment.binsh = mkOption {
@ -157,6 +157,12 @@ in
# terminal instead of logging out of X11).
environment.variables = config.environment.sessionVariables;
environment.shellAliases = mapAttrs (name: mkDefault) {
ls = "ls --color=tty";
ll = "ls -l";
l = "ls -alh";
};
environment.etc."shells".text =
''
${concatStringsSep "\n" (map utils.toShellPath cfg.shells)}

View File

@ -140,7 +140,7 @@ in
if [ -x $out/bin/glib-compile-schemas -a -w $out/share/glib-2.0/schemas ]; then
$out/bin/glib-compile-schemas $out/share/glib-2.0/schemas
fi
${config.environment.extraSetup}
'';
};

View File

@ -24,11 +24,11 @@ with lib;
environment.extraSetup = ''
if [ -w $out/share/mime ] && [ -d $out/share/mime/packages ]; then
XDG_DATA_DIRS=$out/share ${pkgs.shared-mime-info}/bin/update-mime-database -V $out/share/mime > /dev/null
XDG_DATA_DIRS=$out/share ${pkgs.buildPackages.shared-mime-info}/bin/update-mime-database -V $out/share/mime > /dev/null
fi
if [ -w $out/share/applications ]; then
${pkgs.desktop-file-utils}/bin/update-desktop-database $out/share/applications
${pkgs.buildPackages.desktop-file-utils}/bin/update-desktop-database $out/share/applications
fi
'';
};

View File

@ -1,6 +1,6 @@
# This module provides the proprietary NVIDIA X11 / OpenGL drivers.
{ config, lib, pkgs, pkgs_i686, ... }:
{ stdenv, config, lib, pkgs, pkgs_i686, ... }:
with lib;
@ -23,7 +23,11 @@ let
else null;
nvidia_x11 = nvidiaForKernel config.boot.kernelPackages;
nvidia_libs32 = (nvidiaForKernel pkgs_i686.linuxPackages).override { libsOnly = true; kernel = null; };
nvidia_libs32 =
if versionOlder nvidia_x11.version "391" then
((nvidiaForKernel pkgs_i686.linuxPackages).override { libsOnly = true; kernel = null; }).out
else
(nvidiaForKernel config.boot.kernelPackages).lib32;
enabled = nvidia_x11 != null;
@ -98,7 +102,7 @@ in
assertions = [
{
assertion = config.services.xserver.displayManager.gdm.wayland;
message = "NVidia drivers don't support wayland";
message = "NVIDIA drivers don't support wayland";
}
{
assertion = !optimusCfg.enable ||
@ -161,7 +165,7 @@ in
};
hardware.opengl.package = nvidia_x11.out;
hardware.opengl.package32 = nvidia_libs32.out;
hardware.opengl.package32 = nvidia_libs32;
environment.systemPackages = [ nvidia_x11.bin nvidia_x11.settings ]
++ lib.filter (p: p != null) [ nvidia_x11.persistenced ];

View File

@ -13,6 +13,7 @@ extraBuildFlags=()
mountPoint=/mnt
channelPath=
system=
while [ "$#" -gt 0 ]; do
i="$1"; shift 1

View File

@ -166,7 +166,7 @@ in
if [ -w $out/share/info ]; then
shopt -s nullglob
for i in $out/share/info/*.info $out/share/info/*.info.gz; do
${pkgs.texinfo}/bin/install-info $i $out/share/info/dir
${pkgs.buildPackages.texinfo}/bin/install-info $i $out/share/info/dir
done
fi
'';

View File

@ -149,6 +149,7 @@
./security/duosec.nix
./security/hidepid.nix
./security/lock-kernel-modules.nix
./security/misc.nix
./security/oath.nix
./security/pam.nix
./security/pam_usb.nix
@ -284,6 +285,7 @@
./services/hardware/tlp.nix
./services/hardware/thinkfan.nix
./services/hardware/trezord.nix
./services/hardware/triggerhappy.nix
./services/hardware/u2f.nix
./services/hardware/udev.nix
./services/hardware/udisks2.nix
@ -633,7 +635,7 @@
./services/networking/zerobin.nix
./services/networking/zeronet.nix
./services/networking/zerotierone.nix
./services/networking/znc.nix
./services/networking/znc/default.nix
./services/printing/cupsd.nix
./services/scheduling/atd.nix
./services/scheduling/chronos.nix
@ -734,12 +736,14 @@
./services/x11/display-managers/lightdm.nix
./services/x11/display-managers/sddm.nix
./services/x11/display-managers/slim.nix
./services/x11/display-managers/startx.nix
./services/x11/display-managers/xpra.nix
./services/x11/fractalart.nix
./services/x11/hardware/libinput.nix
./services/x11/hardware/multitouch.nix
./services/x11/hardware/synaptics.nix
./services/x11/hardware/wacom.nix
./services/x11/gdk-pixbuf.nix
./services/x11/redshift.nix
./services/x11/urxvtd.nix
./services/x11/window-managers/awesome.nix

View File

@ -6,12 +6,18 @@
with lib;
{
meta = {
maintainers = [ maintainers.joachifm ];
};
boot.kernelPackages = mkDefault pkgs.linuxPackages_hardened;
security.hideProcessInformation = mkDefault true;
security.lockKernelModules = mkDefault true;
security.allowUserNamespaces = mkDefault false;
security.apparmor.enable = mkDefault true;
boot.kernelParams = [
@ -55,18 +61,6 @@ with lib;
# ... or at least apply some hardening to it
boot.kernel.sysctl."net.core.bpf_jit_harden" = mkDefault true;
# A recurring problem with user namespaces is that there are
# still code paths where the kernel's permission checking logic
# fails to account for namespacing, instead permitting a
# namespaced process to act outside the namespace with the
# same privileges as it would have inside it. This is particularly
# bad in the common case of running as root within the namespace.
#
# Setting the number of allowed user namespaces to 0 effectively disables
# the feature at runtime. Attempting to create a user namespace
# with unshare will then fail with "no space left on device".
boot.kernel.sysctl."user.max_user_namespaces" = mkDefault 0;
# Raise ASLR entropy for 64bit & 32bit, respectively.
#
# Note: mmap_rnd_compat_bits may not exist on 64bit.

View File

@ -33,7 +33,8 @@ let
'';
bashAliases = concatStringsSep "\n" (
mapAttrsFlatten (k: v: "alias ${k}='${v}'") cfg.shellAliases
mapAttrsFlatten (k: v: "alias ${k}=${escapeShellArg v}")
(filterAttrs (k: v: !isNull v) cfg.shellAliases)
);
in
@ -59,12 +60,12 @@ in
*/
shellAliases = mkOption {
default = config.environment.shellAliases;
default = {};
description = ''
Set of aliases for bash shell. See <option>environment.shellAliases</option>
for an option format description.
Set of aliases for bash shell, which overrides <option>environment.shellAliases</option>.
See <option>environment.shellAliases</option> for an option format description.
'';
type = types.attrs; # types.attrsOf types.stringOrPath;
type = with types; attrsOf (nullOr (either str path));
};
shellInit = mkOption {
@ -125,6 +126,8 @@ in
programs.bash = {
shellAliases = mapAttrs (name: mkDefault) cfge.shellAliases;
shellInit = ''
if [ -z "$__NIXOS_SET_ENVIRONMENT_DONE" ]; then
. ${config.system.build.setEnvironment}

View File

@ -9,7 +9,8 @@ let
cfg = config.programs.fish;
fishAliases = concatStringsSep "\n" (
mapAttrsFlatten (k: v: "alias ${k} '${v}'") cfg.shellAliases
mapAttrsFlatten (k: v: "alias ${k} ${escapeShellArg v}")
(filterAttrs (k: v: !isNull v) cfg.shellAliases)
);
in
@ -53,12 +54,12 @@ in
};
shellAliases = mkOption {
default = config.environment.shellAliases;
default = {};
description = ''
Set of aliases for fish shell. See <option>environment.shellAliases</option>
for an option format description.
Set of aliases for fish shell, which overrides <option>environment.shellAliases</option>.
See <option>environment.shellAliases</option> for an option format description.
'';
type = types.attrs;
type = with types; attrsOf (nullOr (either str path));
};
shellInit = mkOption {
@ -99,6 +100,8 @@ in
config = mkIf cfg.enable {
programs.fish.shellAliases = mapAttrs (name: mkDefault) cfge.shellAliases;
environment.etc."fish/foreign-env/shellInit".text = cfge.shellInit;
environment.etc."fish/foreign-env/loginShellInit".text = cfge.loginShellInit;
environment.etc."fish/foreign-env/interactiveShellInit".text = cfge.interactiveShellInit;

View File

@ -8,12 +8,6 @@ with lib;
config = {
environment.shellAliases =
{ ls = "ls --color=tty";
ll = "ls -l";
l = "ls -alh";
};
environment.shellInit =
''
# Set up the per-user profile.

View File

@ -29,8 +29,8 @@ in
config = mkIf cfg.enable {
environment.systemPackages = with pkgs; [ thefuck ];
environment.shellInit = initScript;
programs.bash.interactiveShellInit = initScript;
programs.zsh.interactiveShellInit = mkIf prg.zsh.enable initScript;
programs.fish.interactiveShellInit = mkIf prg.fish.enable ''
${pkgs.thefuck}/bin/thefuck --alias | source

View File

@ -11,7 +11,8 @@ let
cfg = config.programs.zsh;
zshAliases = concatStringsSep "\n" (
mapAttrsFlatten (k: v: "alias ${k}=${escapeShellArg v}") cfg.shellAliases
mapAttrsFlatten (k: v: "alias ${k}=${escapeShellArg v}")
(filterAttrs (k: v: !isNull v) cfg.shellAliases)
);
in
@ -34,13 +35,12 @@ in
};
shellAliases = mkOption {
default = config.environment.shellAliases;
default = {};
description = ''
Set of aliases for zsh shell. Overrides the default value taken from
<option>environment.shellAliases</option>.
Set of aliases for zsh shell, which overrides <option>environment.shellAliases</option>.
See <option>environment.shellAliases</option> for an option format description.
'';
type = types.attrs; # types.attrsOf types.stringOrPath;
type = with types; attrsOf (nullOr (either str path));
};
shellInit = mkOption {
@ -106,6 +106,8 @@ in
config = mkIf cfg.enable {
programs.zsh.shellAliases = mapAttrs (name: mkDefault) cfge.shellAliases;
environment.etc."zshenv".text =
''
# /etc/zshenv: DO NOT EDIT -- this file has been generated automatically.

View File

@ -3,6 +3,10 @@
with lib;
{
meta = {
maintainers = [ maintainers.joachifm ];
};
options = {
security.lockKernelModules = mkOption {
type = types.bool;

View File

@ -0,0 +1,39 @@
{ config, lib, ... }:
with lib;
{
meta = {
maintainers = [ maintainers.joachifm ];
};
options = {
security.allowUserNamespaces = mkOption {
type = types.bool;
default = true;
description = ''
Whether to allow creation of user namespaces. A recurring problem
with user namespaces is the presence of code paths where the kernel's
permission checking logic fails to account for namespacing, instead
permitting a namespaced process to act outside the namespace with the
same privileges as it would have inside it. This is particularly
damaging in the common case of running as root within the namespace.
When user namespace creation is disallowed, attempting to create
a user namespace fails with "no space left on device" (ENOSPC).
'';
};
};
config = mkIf (!config.security.allowUserNamespaces) {
# Setting the number of allowed user namespaces to 0 effectively disables
# the feature at runtime. Note that root may raise the limit again
# at any time.
boot.kernel.sysctl."user.max_user_namespaces" = 0;
assertions = [
{ assertion = config.nix.useSandbox -> config.security.allowUserNamespaces;
message = "`nix.useSandbox = true` conflicts with `!security.allowUserNamespaces`.";
}
];
};
}

View File

@ -6,8 +6,12 @@ with lib;
let
cfg = config.services.buildbot-master;
python = cfg.package.pythonModule;
escapeStr = s: escape ["'"] s;
masterCfg = if cfg.masterCfg == null then pkgs.writeText "master.cfg" ''
defaultMasterCfg = pkgs.writeText "master.cfg" ''
from buildbot.plugins import *
factory = util.BuildFactory()
c = BuildmasterConfig = dict(
@ -27,8 +31,28 @@ let
factory.addStep(step)
${cfg.extraConfig}
''
else cfg.masterCfg;
'';
tacFile = pkgs.writeText "buildbot-master.tac" ''
import os
from twisted.application import service
from buildbot.master import BuildMaster
basedir = '${cfg.buildbotDir}'
configfile = '${cfg.masterCfg}'
# Default umask for server
umask = None
# note: this line is matched against to check that this is a buildmaster
# directory; do not edit it.
application = service.Application('buildmaster')
m = BuildMaster(basedir, configfile, umask)
m.setServiceParent(application)
'';
in {
options = {
@ -66,9 +90,9 @@ in {
};
masterCfg = mkOption {
type = types.nullOr types.path;
type = types.path;
description = "Optionally pass master.cfg path. Other options in this configuration will be ignored.";
default = null;
default = defaultMasterCfg;
example = "/etc/nixos/buildbot/master.cfg";
};
@ -175,18 +199,25 @@ in {
package = mkOption {
type = types.package;
default = pkgs.buildbot-full;
defaultText = "pkgs.buildbot-full";
default = pkgs.pythonPackages.buildbot-full;
defaultText = "pkgs.pythonPackages.buildbot-full";
description = "Package to use for buildbot.";
example = literalExample "pkgs.buildbot-full";
example = literalExample "pkgs.python3Packages.buildbot-full";
};
packages = mkOption {
default = with pkgs; [ python27Packages.twisted git ];
default = [ pkgs.git ];
example = literalExample "[ pkgs.git ]";
type = types.listOf types.package;
description = "Packages to add to PATH for the buildbot process.";
};
pythonPackages = mkOption {
default = pythonPackages: with pythonPackages; [ ];
defaultText = "pythonPackages: with pythonPackages; [ ]";
description = "Packages to add the to the PYTHONPATH of the buildbot process.";
example = literalExample "pythonPackages: with pythonPackages; [ requests ]";
};
};
};
@ -210,14 +241,15 @@ in {
description = "Buildbot Continuous Integration Server.";
after = [ "network-online.target" ];
wantedBy = [ "multi-user.target" ];
path = cfg.packages;
path = cfg.packages ++ cfg.pythonPackages python.pkgs;
environment.PYTHONPATH = "${python.withPackages (self: cfg.pythonPackages self ++ [ cfg.package ])}/${python.sitePackages}";
preStart = ''
env > envvars
mkdir -vp ${cfg.buildbotDir}
ln -sfv ${masterCfg} ${cfg.buildbotDir}/master.cfg
rm -fv $cfg.buildbotDir}/buildbot.tac
${cfg.package}/bin/buildbot create-master ${cfg.buildbotDir}
mkdir -vp "${cfg.buildbotDir}"
# Link the tac file so buildbot command line tools recognize the directory
ln -sf "${tacFile}" "${cfg.buildbotDir}/buildbot.tac"
${cfg.package}/bin/buildbot create-master --db "${cfg.dbUrl}" "${cfg.buildbotDir}"
rm -f buildbot.tac.new master.cfg.sample
'';
serviceConfig = {
@ -225,12 +257,11 @@ in {
User = cfg.user;
Group = cfg.group;
WorkingDirectory = cfg.home;
ExecStart = "${cfg.package}/bin/buildbot start --nodaemon ${cfg.buildbotDir}";
# NOTE: call twistd directly with stdout logging for systemd
ExecStart = "${python.pkgs.twisted}/bin/twistd -o --nodaemon --pidfile= --logfile - --python ${tacFile}";
};
};
};
meta.maintainers = with lib.maintainers; [ nand0p mic92 ];
}

View File

@ -7,6 +7,40 @@ with lib;
let
cfg = config.services.buildbot-worker;
python = cfg.package.pythonModule;
tacFile = pkgs.writeText "aur-buildbot-worker.tac" ''
import os
from io import open
from buildbot_worker.bot import Worker
from twisted.application import service
basedir = '${cfg.buildbotDir}'
# note: this line is matched against to check that this is a worker
# directory; do not edit it.
application = service.Application('buildbot-worker')
master_url_split = '${cfg.masterUrl}'.split(':')
buildmaster_host = master_url_split[0]
port = int(master_url_split[1])
workername = '${cfg.workerUser}'
with open('${cfg.workerPassFile}', 'r', encoding='utf-8') as passwd_file:
passwd = passwd_file.read().strip('\r\n')
keepalive = 600
umask = None
maxdelay = 300
numcpus = None
allow_shutdown = None
s = Worker(buildmaster_host, port, workername, passwd, basedir,
keepalive, umask=umask, maxdelay=maxdelay,
numcpus=numcpus, allow_shutdown=allow_shutdown)
s.setServiceParent(application)
'';
in {
options = {
services.buildbot-worker = {
@ -59,6 +93,23 @@ in {
description = "Specifies the Buildbot Worker password.";
};
workerPassFile = mkOption {
type = types.path;
description = "File used to store the Buildbot Worker password";
};
hostMessage = mkOption {
default = null;
type = types.nullOr types.str;
description = "Description of this worker";
};
adminMessage = mkOption {
default = null;
type = types.nullOr types.str;
description = "Name of the administrator of this worker";
};
masterUrl = mkOption {
default = "localhost:9989";
type = types.str;
@ -67,23 +118,24 @@ in {
package = mkOption {
type = types.package;
default = pkgs.buildbot-worker;
defaultText = "pkgs.buildbot-worker";
default = pkgs.pythonPackages.buildbot-worker;
defaultText = "pkgs.pythonPackages.buildbot-worker";
description = "Package to use for buildbot worker.";
example = literalExample "pkgs.buildbot-worker";
example = literalExample "pkgs.python3Packages.buildbot-worker";
};
packages = mkOption {
default = with pkgs; [ python27Packages.twisted git ];
default = with pkgs; [ git ];
example = literalExample "[ pkgs.git ]";
type = types.listOf types.package;
description = "Packages to add to PATH for the buildbot process.";
};
};
};
config = mkIf cfg.enable {
services.buildbot-worker.workerPassFile = mkDefault (pkgs.writeText "buildbot-worker-password" cfg.workerPass);
users.groups = optional (cfg.group == "bbworker") {
name = "bbworker";
};
@ -104,11 +156,16 @@ in {
after = [ "network.target" "buildbot-master.service" ];
wantedBy = [ "multi-user.target" ];
path = cfg.packages;
environment.PYTHONPATH = "${python.withPackages (p: [ cfg.package ])}/${python.sitePackages}";
preStart = ''
mkdir -vp ${cfg.buildbotDir}
rm -fv $cfg.buildbotDir}/buildbot.tac
${cfg.package}/bin/buildbot-worker create-worker ${cfg.buildbotDir} ${cfg.masterUrl} ${cfg.workerUser} ${cfg.workerPass}
mkdir -vp "${cfg.buildbotDir}/info"
${optionalString (cfg.hostMessage != null) ''
ln -sf "${pkgs.writeText "buildbot-worker-host" cfg.hostMessage}" "${cfg.buildbotDir}/info/host"
''}
${optionalString (cfg.adminMessage != null) ''
ln -sf "${pkgs.writeText "buildbot-worker-admin" cfg.adminMessage}" "${cfg.buildbotDir}/info/admin"
''}
'';
serviceConfig = {
@ -116,11 +173,9 @@ in {
User = cfg.user;
Group = cfg.group;
WorkingDirectory = cfg.home;
Environment = "PYTHONPATH=${cfg.package}/lib/python2.7/site-packages:${pkgs.python27Packages.future}/lib/python2.7/site-packages";
# NOTE: call twistd directly with stdout logging for systemd
#ExecStart = "${cfg.package}/bin/buildbot-worker start --nodaemon ${cfg.buildbotDir}";
ExecStart = "${pkgs.python27Packages.twisted}/bin/twistd -n -l - -y ${cfg.buildbotDir}/buildbot.tac";
ExecStart = "${python.pkgs.twisted}/bin/twistd --nodaemon --pidfile= --logfile - --python ${tacFile}";
};
};

View File

@ -36,7 +36,7 @@ in {
description = "Profile Sync daemon";
wants = [ "psd-resync.service" "local-fs.target" ];
wantedBy = [ "default.target" ];
path = with pkgs; [ rsync kmod gawk nettools profile-sync-daemon ];
path = with pkgs; [ rsync kmod gawk nettools utillinux profile-sync-daemon ];
unitConfig = {
RequiresMountsFor = [ "/home/" ];
};
@ -55,7 +55,7 @@ in {
wants = [ "psd-resync.timer" ];
partOf = [ "psd.service" ];
wantedBy = [ "default.target" ];
path = with pkgs; [ rsync kmod gawk nettools profile-sync-daemon ];
path = with pkgs; [ rsync kmod gawk nettools utillinux profile-sync-daemon ];
serviceConfig = {
Type = "oneshot";
ExecStart = "${pkgs.profile-sync-daemon}/bin/profile-sync-daemon resync";

View File

@ -65,6 +65,10 @@ in {
serviceConfig = {
RuntimeDirectory = "lirc";
# socket lives in runtime directory; we have to keep is available
RuntimeDirectoryPreserve = true;
ExecStart = ''
${pkgs.lirc}/bin/lircd --nodaemon \
${escapeShellArgs cfg.extraArguments} \

View File

@ -0,0 +1,114 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.triggerhappy;
socket = "/run/thd.socket";
configFile = pkgs.writeText "triggerhappy.conf" ''
${concatMapStringsSep "\n"
({ keys, event, cmd, ... }:
''${concatMapStringsSep "+" (x: "KEY_" + x) keys} ${toString { press = 1; hold = 2; release = 0; }.${event}} ${cmd}''
)
cfg.bindings}
${cfg.extraConfig}
'';
bindingCfg = { config, ... }: {
options = {
keys = mkOption {
type = types.listOf types.str;
description = "List of keys to match. Key names as defined in linux/input-event-codes.h";
};
event = mkOption {
type = types.enum ["press" "hold" "release"];
default = "press";
description = "Event to match.";
};
cmd = mkOption {
type = types.str;
description = "What to run.";
};
};
};
in
{
###### interface
options = {
services.triggerhappy = {
enable = mkOption {
type = types.bool;
default = false;
description = ''
Whether to enable the <command>triggerhappy</command> hotkey daemon.
'';
};
bindings = mkOption {
type = types.listOf (types.submodule bindingCfg);
default = [];
example = lib.literalExample ''
[ { keys = ["PLAYPAUSE"]; cmd = "''${pkgs.mpc_cli}/bin/mpc -q toggle"; } ]
'';
description = ''
Key bindings for <command>triggerhappy</command>.
'';
};
extraConfig = mkOption {
type = types.lines;
default = "";
description = ''
Literal contents to append to the end of <command>triggerhappy</command> configuration file.
'';
};
};
};
###### implementation
config = mkIf cfg.enable {
systemd.sockets.triggerhappy = {
description = "Triggerhappy Socket";
wantedBy = [ "sockets.target" ];
socketConfig.ListenDatagram = socket;
};
systemd.services.triggerhappy = {
wantedBy = [ "multi-user.target" ];
after = [ "local-fs.target" ];
description = "Global hotkey daemon";
serviceConfig = {
ExecStart = "${pkgs.triggerhappy}/bin/thd --user nobody --socket ${socket} --triggers ${configFile} --deviceglob /dev/input/event*";
};
};
services.udev.packages = lib.singleton (pkgs.writeTextFile {
name = "triggerhappy-udev-rules";
destination = "/etc/udev/rules.d/61-triggerhappy.rules";
text = ''
ACTION=="add", SUBSYSTEM=="input", KERNEL=="event[0-9]*", ATTRS{name}!="triggerhappy", \
RUN+="${pkgs.triggerhappy}/bin/th-cmd --socket ${socket} --passfd --udev"
'';
});
};
}

View File

@ -7,7 +7,7 @@
# to be set.
#
# For further information please consult the documentation in the
# upstream repository at: https://github.com/aprilabank/journaldriver/
# upstream repository at: https://github.com/tazjin/journaldriver/
{ config, lib, pkgs, ...}:

View File

@ -602,7 +602,7 @@ in
target = "postfix";
};
# This makes comfortable for root to run 'postqueue' for example.
# This makes it comfortable to run 'postqueue/postdrop' for example.
systemPackages = [ pkgs.postfix ];
};
@ -616,6 +616,22 @@ in
setgid = true;
};
security.wrappers.postqueue = {
program = "postqueue";
source = "${pkgs.postfix}/bin/postqueue";
group = setgidGroup;
setuid = false;
setgid = true;
};
security.wrappers.postdrop = {
program = "postdrop";
source = "${pkgs.postfix}/bin/postdrop";
group = setgidGroup;
setuid = false;
setgid = true;
};
users.users = optional (user == "postfix")
{ name = "postfix";
description = "Postfix mail server user";

View File

@ -52,7 +52,7 @@ in
enable = mkOption {
type = types.bool;
default = cfg.rspamd.enable;
default = false;
description = "Whether to run the rmilter daemon.";
};

View File

@ -159,7 +159,7 @@ in
services.rspamd = {
enable = mkEnableOption "Whether to run the rspamd daemon.";
enable = mkEnableOption "rspamd, the Rapid spam filtering system";
debug = mkOption {
type = types.bool;

View File

@ -55,7 +55,7 @@ in
User = cfg.user;
Group = cfg.group;
PermissionsStartOnly = "true";
ExecStart = "${pkgs.emby}/bin/emby";
ExecStart = "${pkgs.emby}/bin/emby -programdata ${cfg.dataDir}";
Restart = "on-failure";
};
};

View File

@ -53,6 +53,7 @@ let
repos_path: "${cfg.statePath}/repositories"
secret_file: "${cfg.statePath}/config/gitlab_shell_secret"
log_file: "${cfg.statePath}/log/gitlab-shell.log"
custom_hooks_dir: "${cfg.statePath}/custom_hooks"
redis:
bin: ${pkgs.redis}/bin/redis-cli
host: 127.0.0.1
@ -562,6 +563,9 @@ in {
mkdir -p ${cfg.statePath}/shell
mkdir -p ${cfg.statePath}/db
mkdir -p ${cfg.statePath}/uploads
mkdir -p ${cfg.statePath}/custom_hooks/pre-receive.d
mkdir -p ${cfg.statePath}/custom_hooks/post-receive.d
mkdir -p ${cfg.statePath}/custom_hooks/update.d
rm -rf ${cfg.statePath}/config ${cfg.statePath}/shell/hooks
mkdir -p ${cfg.statePath}/config

View File

@ -399,8 +399,8 @@ in
systemd.sockets.nix-daemon.wantedBy = [ "sockets.target" ];
systemd.services.nix-daemon =
{ path = [ nix pkgs.utillinux ]
++ optionals cfg.distributedBuilds [ config.programs.ssh.package pkgs.gzip ]
{ path = [ nix pkgs.utillinux config.programs.ssh.package ]
++ optionals cfg.distributedBuilds [ pkgs.gzip ]
++ optionals (!isNix20) [ pkgs.openssl.bin ];
environment = cfg.envVars

View File

@ -40,6 +40,8 @@ in
systemd.services.nix-optimise =
{ description = "Nix Store Optimiser";
# No point running it inside a nixos-container. It should be on the host instead.
unitConfig.ConditionVirtualization = "!container";
serviceConfig.ExecStart = "${config.nix.package}/bin/nix-store --optimise";
startAt = optionals cfg.automatic cfg.dates;
};

View File

@ -5,7 +5,7 @@ with lib;
let
cfg = config.services.redmine;
bundle = "${pkgs.redmine}/share/redmine/bin/bundle";
bundle = "${cfg.package}/share/redmine/bin/bundle";
databaseYml = pkgs.writeText "database.yml" ''
production:
@ -15,6 +15,7 @@ let
port: ${toString cfg.database.port}
username: ${cfg.database.user}
password: #dbpass#
${optionalString (cfg.database.socket != null) "socket: ${cfg.database.socket}"}
'';
configurationYml = pkgs.writeText "configuration.yml" ''
@ -29,6 +30,19 @@ let
${cfg.extraConfig}
'';
unpackTheme = unpack "theme";
unpackPlugin = unpack "plugin";
unpack = id: (name: source:
pkgs.stdenv.mkDerivation {
name = "redmine-${id}-${name}";
buildInputs = [ pkgs.unzip ];
buildCommand = ''
mkdir -p $out
cd $out
unpackFile ${source}
'';
});
in
{
@ -40,6 +54,14 @@ in
description = "Enable the Redmine service.";
};
package = mkOption {
type = types.package;
default = pkgs.redmine;
defaultText = "pkgs.redmine";
description = "Which Redmine package to use.";
example = "pkgs.redmine.override { ruby = pkgs.ruby_2_3; }";
};
user = mkOption {
type = types.str;
default = "redmine";
@ -52,6 +74,12 @@ in
description = "Group under which Redmine is ran.";
};
port = mkOption {
type = types.int;
default = 3000;
description = "Port on which Redmine is ran.";
};
stateDir = mkOption {
type = types.str;
default = "/var/lib/redmine";
@ -66,6 +94,41 @@ in
See https://guides.rubyonrails.org/action_mailer_basics.html#action-mailer-configuration
'';
example = literalExample ''
email_delivery:
delivery_method: smtp
smtp_settings:
address: mail.example.com
port: 25
'';
};
themes = mkOption {
type = types.attrsOf types.path;
default = {};
description = "Set of themes.";
example = literalExample ''
{
dkuk-redmine_alex_skin = builtins.fetchurl {
url = https://bitbucket.org/dkuk/redmine_alex_skin/get/1842ef675ef3.zip;
sha256 = "0hrin9lzyi50k4w2bd2b30vrf1i4fi1c0gyas5801wn8i7kpm9yl";
};
}
'';
};
plugins = mkOption {
type = types.attrsOf types.path;
default = {};
description = "Set of plugins.";
example = literalExample ''
{
redmine_env_auth = builtins.fetchurl {
url = https://github.com/Intera/redmine_env_auth/archive/0.6.zip;
sha256 = "0yyr1yjd8gvvh832wdc8m3xfnhhxzk2pk3gm2psg5w9jdvd6skak";
};
}
'';
};
database = {
@ -78,7 +141,7 @@ in
host = mkOption {
type = types.str;
default = "127.0.0.1";
default = (if cfg.database.socket != null then "localhost" else "127.0.0.1");
description = "Database host address.";
};
@ -119,6 +182,13 @@ in
<option>database.user</option>.
'';
};
socket = mkOption {
type = types.nullOr types.path;
default = null;
example = "/run/mysqld/mysqld.sock";
description = "Path to the unix socket file to use for authentication.";
};
};
};
};
@ -126,17 +196,20 @@ in
config = mkIf cfg.enable {
assertions = [
{ assertion = cfg.database.passwordFile != null || cfg.database.password != "";
message = "either services.redmine.database.passwordFile or services.redmine.database.password must be set";
{ assertion = cfg.database.passwordFile != null || cfg.database.password != "" || cfg.database.socket != null;
message = "one of services.redmine.database.socket, services.redmine.database.passwordFile, or services.redmine.database.password must be set";
}
{ assertion = cfg.database.socket != null -> (cfg.database.type == "mysql2");
message = "Socket authentication is only available for the mysql2 database type";
}
];
environment.systemPackages = [ pkgs.redmine ];
environment.systemPackages = [ cfg.package ];
systemd.services.redmine = {
after = [ "network.target" (if cfg.database.type == "mysql2" then "mysql.service" else "postgresql.service") ];
wantedBy = [ "multi-user.target" ];
environment.HOME = "${pkgs.redmine}/share/redmine";
environment.HOME = "${cfg.package}/share/redmine";
environment.RAILS_ENV = "production";
environment.RAILS_CACHE = "${cfg.stateDir}/cache";
environment.REDMINE_LANG = "en";
@ -151,43 +224,80 @@ in
subversion
];
preStart = ''
# start with a fresh config directory every time
rm -rf ${cfg.stateDir}/config
cp -r ${pkgs.redmine}/share/redmine/config.dist ${cfg.stateDir}/config
# ensure cache directory exists for db:migrate command
mkdir -p "${cfg.stateDir}/cache"
# create the basic state directory layout pkgs.redmine expects
mkdir -p /run/redmine
# create the basic directory layout the redmine package expects
mkdir -p /run/redmine/public
for i in config files log plugins tmp; do
mkdir -p ${cfg.stateDir}/$i
ln -fs ${cfg.stateDir}/$i /run/redmine/$i
mkdir -p "${cfg.stateDir}/$i"
ln -fs "${cfg.stateDir}/$i" /run/redmine/
done
# ensure cache directory exists for db:migrate command
mkdir -p ${cfg.stateDir}/cache
for i in plugin_assets themes; do
mkdir -p "${cfg.stateDir}/public/$i"
ln -fs "${cfg.stateDir}/public/$i" /run/redmine/public/
done
# start with a fresh config directory
# the config directory is copied instead of linked as some mutable data is stored in there
rm -rf "${cfg.stateDir}/config/"*
cp -r ${cfg.package}/share/redmine/config.dist/* "${cfg.stateDir}/config/"
# link in the application configuration
ln -fs ${configurationYml} ${cfg.stateDir}/config/configuration.yml
ln -fs ${configurationYml} "${cfg.stateDir}/config/configuration.yml"
chmod -R ug+rwX,o-rwx+x ${cfg.stateDir}/
# handle database.passwordFile
# link in all user specified themes
rm -rf "${cfg.stateDir}/public/themes/"*
for theme in ${concatStringsSep " " (mapAttrsToList unpackTheme cfg.themes)}; do
ln -fs $theme/* "${cfg.stateDir}/public/themes"
done
# link in redmine provided themes
ln -sf ${cfg.package}/share/redmine/public/themes.dist/* "${cfg.stateDir}/public/themes/"
# link in all user specified plugins
rm -rf "${cfg.stateDir}/plugins/"*
for plugin in ${concatStringsSep " " (mapAttrsToList unpackPlugin cfg.plugins)}; do
ln -fs $plugin/* "${cfg.stateDir}/plugins/''${plugin##*-redmine-plugin-}"
done
# ensure correct permissions for most files
chmod -R ug+rwX,o-rwx+x "${cfg.stateDir}/"
# handle database.passwordFile & permissions
DBPASS=$(head -n1 ${cfg.database.passwordFile})
cp -f ${databaseYml} ${cfg.stateDir}/config/database.yml
sed -e "s,#dbpass#,$DBPASS,g" -i ${cfg.stateDir}/config/database.yml
chmod 440 ${cfg.stateDir}/config/database.yml
cp -f ${databaseYml} "${cfg.stateDir}/config/database.yml"
sed -e "s,#dbpass#,$DBPASS,g" -i "${cfg.stateDir}/config/database.yml"
chmod 440 "${cfg.stateDir}/config/database.yml"
# generate a secret token if required
if ! test -e "${cfg.stateDir}/config/initializers/secret_token.rb"; then
${bundle} exec rake generate_secret_token
chmod 440 ${cfg.stateDir}/config/initializers/secret_token.rb
chmod 440 "${cfg.stateDir}/config/initializers/secret_token.rb"
fi
# ensure everything is owned by ${cfg.user}
chown -R ${cfg.user}:${cfg.group} ${cfg.stateDir}
${bundle} exec rake db:migrate
${bundle} exec rake redmine:load_default_data
# ensure everything is owned by ${cfg.user}
chown -R ${cfg.user}:${cfg.group} "${cfg.stateDir}"
# execute redmine required commands prior to starting the application
# NOTE: su required in case using mysql socket authentication
/run/wrappers/bin/su -s ${pkgs.bash}/bin/bash -m -l redmine -c '${bundle} exec rake db:migrate'
/run/wrappers/bin/su -s ${pkgs.bash}/bin/bash -m -l redmine -c '${bundle} exec rake redmine:load_default_data'
# log files don't exist until after first command has been executed
# correct ownership of files generated by calling exec rake ...
chown -R ${cfg.user}:${cfg.group} "${cfg.stateDir}/log"
'';
serviceConfig = {
@ -196,13 +306,13 @@ in
User = cfg.user;
Group = cfg.group;
TimeoutSec = "300";
WorkingDirectory = "${pkgs.redmine}/share/redmine";
ExecStart="${bundle} exec rails server webrick -e production -P ${cfg.stateDir}/redmine.pid";
WorkingDirectory = "${cfg.package}/share/redmine";
ExecStart="${bundle} exec rails server webrick -e production -p ${toString cfg.port} -P '${cfg.stateDir}/redmine.pid'";
};
};
users.extraUsers = optionalAttrs (cfg.user == "redmine") (singleton
users.users = optionalAttrs (cfg.user == "redmine") (singleton
{ name = "redmine";
group = cfg.group;
home = cfg.stateDir;
@ -210,7 +320,7 @@ in
uid = config.ids.uids.redmine;
});
users.extraGroups = optionalAttrs (cfg.group == "redmine") (singleton
users.groups = optionalAttrs (cfg.group == "redmine") (singleton
{ name = "redmine";
gid = config.ids.gids.redmine;
});

View File

@ -46,10 +46,12 @@ in
Group = "weechat";
RemainAfterExit = "yes";
};
script = "exec ${pkgs.screen}/bin/screen -Dm -S ${cfg.sessionName} ${cfg.binary}";
script = "exec ${config.security.wrapperDir}/screen -Dm -S ${cfg.sessionName} ${cfg.binary}";
wantedBy = [ "multi-user.target" ];
wants = [ "network.target" ];
};
security.wrappers.screen.source = "${pkgs.screen}/bin/screen";
};
meta.doc = ./weechat.xml;

View File

@ -54,7 +54,7 @@
</programlisting>
Now, the session can be re-attached like this:
<programlisting>
screen -r weechat-screen
screen -x weechat/weechat-screen
</programlisting>
</para>

View File

@ -30,6 +30,7 @@ let
postfix = import ./exporters/postfix.nix { inherit config lib pkgs; };
snmp = import ./exporters/snmp.nix { inherit config lib pkgs; };
surfboard = import ./exporters/surfboard.nix { inherit config lib pkgs; };
tor = import ./exporters/tor.nix { inherit config lib pkgs; };
unifi = import ./exporters/unifi.nix { inherit config lib pkgs; };
varnish = import ./exporters/varnish.nix { inherit config lib pkgs; };
};
@ -123,15 +124,13 @@ let
systemd.services."prometheus-${name}-exporter" = mkMerge ([{
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
Restart = mkDefault "always";
PrivateTmp = mkDefault true;
WorkingDirectory = mkDefault /tmp;
} // mkIf (!(serviceOpts.serviceConfig.DynamicUser or false)) {
User = conf.user;
Group = conf.group;
};
} serviceOpts ]);
serviceConfig.Restart = mkDefault "always";
serviceConfig.PrivateTmp = mkDefault true;
serviceConfig.WorkingDirectory = mkDefault /tmp;
} serviceOpts ] ++ optional (serviceOpts.serviceConfig.DynamicUser or false) {
serviceConfig.User = conf.user;
serviceConfig.Group = conf.group;
});
};
in
{
@ -172,5 +171,8 @@ in
}) exporterOpts)
);
meta.doc = ./exporters.xml;
meta = {
doc = ./exporters.xml;
maintainers = [ maintainers.willibutz ];
};
}

View File

@ -60,10 +60,10 @@ in
DynamicUser = true;
ExecStart = ''
${pkgs.prometheus-snmp-exporter.bin}/bin/snmp_exporter \
-config.file ${configFile} \
-log.format ${cfg.logFormat} \
-log.level ${cfg.logLevel} \
-web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
--config.file=${configFile} \
--log.format=${cfg.logFormat} \
--log.level=${cfg.logLevel} \
--web.listen-address=${cfg.listenAddress}:${toString cfg.port} \
${concatStringsSep " \\\n " cfg.extraFlags}
'';
};

View File

@ -0,0 +1,40 @@
{ config, lib, pkgs }:
with lib;
let
cfg = config.services.prometheus.exporters.tor;
in
{
port = 9130;
extraOpts = {
torControlAddress = mkOption {
type = types.str;
default = "127.0.0.1";
description = ''
Tor control IP address or hostname.
'';
};
torControlPort = mkOption {
type = types.int;
default = 9051;
description = ''
Tor control port.
'';
};
};
serviceOpts = {
serviceConfig = {
DynamicUser = true;
ExecStart = ''
${pkgs.prometheus-tor-exporter}/bin/prometheus-tor-exporter \
-b ${cfg.listenAddress} \
-p ${toString cfg.port} \
-a ${cfg.torControlAddress} \
-c ${toString cfg.torControlPort} \
${concatStringsSep " \\\n " cfg.extraFlags}
'';
};
};
}

View File

@ -69,6 +69,7 @@ in
path = [ pkgs.varnish ];
serviceConfig = {
DynamicUser = true;
RestartSec = mkDefault 1;
ExecStart = ''
${pkgs.prometheus-varnish-exporter}/bin/prometheus_varnish_exporter \
--web.listen-address ${cfg.listenAddress}:${toString cfg.port} \

View File

@ -149,11 +149,13 @@ in
packages = {
module = mkOption {
default = config.boot.kernelPackages.openafs;
defaultText = "config.boot.kernelPackages.openafs";
type = types.package;
description = "OpenAFS kernel module package. MUST match the userland package!";
};
programs = mkOption {
default = getBin pkgs.openafs;
defaultText = "config.boot.kernelPackages.openafs";
type = types.package;
description = "OpenAFS programs package. MUST match the kernel module package!";
};

View File

@ -80,6 +80,7 @@ in {
package = mkOption {
default = pkgs.openafs.server or pkgs.openafs;
defaultText = "pkgs.openafs.server or pkgs.openafs";
type = types.package;
description = "OpenAFS package for the server binaries";
};

View File

@ -90,7 +90,7 @@ in
BANDB_DBPATH = "${cfg.statedir}/ban.db";
};
serviceConfig = {
ExecStart = "${charybdis}/bin/charybdis-ircd -foreground -logfile /dev/stdout -configfile ${configFile}";
ExecStart = "${charybdis}/bin/charybdis -foreground -logfile /dev/stdout -configfile ${configFile}";
Group = cfg.group;
User = cfg.user;
PermissionsStartOnly = true; # preStart needs to run with root permissions

View File

@ -157,9 +157,9 @@ in
{ description = "hostapd wireless AP";
path = [ pkgs.hostapd ];
wantedBy = [ "network.target" ];
after = [ "${cfg.interface}-cfg.service" "nat.service" "bind.service" "dhcpd.service" "sys-subsystem-net-devices-${cfg.interface}.device" ];
after = [ "sys-subsystem-net-devices-${cfg.interface}.device" ];
bindsTo = [ "sys-subsystem-net-devices-${cfg.interface}.device" ];
requiredBy = [ "network-link-${cfg.interface}.service" ];
serviceConfig =
{ ExecStart = "${pkgs.hostapd}/bin/hostapd ${configFile}";

View File

@ -50,7 +50,7 @@ in
enable = mkOption {
type = types.bool;
default = false;
description = "If enabled, start the Murmur Service.";
description = "If enabled, start the Murmur Mumble server.";
};
autobanAttempts = mkOption {

View File

@ -5,7 +5,7 @@ with lib;
{
options = {
services.pptpd = {
enable = mkEnableOption "Whether pptpd should be run on startup.";
enable = mkEnableOption "pptpd, the Point-to-Point Tunneling Protocol daemon";
serverIp = mkOption {
type = types.string;

View File

@ -16,6 +16,14 @@ in {
available on http://127.0.0.1:8384/.
'';
guiAddress = mkOption {
type = types.str;
default = "127.0.0.1:8384";
description = ''
Address to serve the GUI.
'';
};
systemService = mkOption {
type = types.bool;
default = true;
@ -23,7 +31,7 @@ in {
};
user = mkOption {
type = types.string;
type = types.str;
default = defaultUser;
description = ''
Syncthing will be run under this user (user will be created if it doesn't exist.
@ -32,7 +40,7 @@ in {
};
group = mkOption {
type = types.string;
type = types.str;
default = "nogroup";
description = ''
Syncthing will be run under this group (group will not be created if it doesn't exist.
@ -41,7 +49,7 @@ in {
};
all_proxy = mkOption {
type = types.nullOr types.string;
type = with types; nullOr str;
default = null;
example = "socks5://address.com:1234";
description = ''
@ -132,7 +140,12 @@ in {
User = cfg.user;
Group = cfg.group;
PermissionsStartOnly = true;
ExecStart = "${cfg.package}/bin/syncthing -no-browser -home=${cfg.dataDir}";
ExecStart = ''
${cfg.package}/bin/syncthing \
-no-browser \
-gui-address=${cfg.guiAddress} \
-home=${cfg.dataDir}
'';
};
};

View File

@ -148,14 +148,6 @@ in
}
));
networking.interfaces = flip mapAttrs' cfg.networks (network: data: nameValuePair
("tinc.${network}")
({
virtual = true;
virtualType = "${data.interfaceType}";
})
);
systemd.services = flip mapAttrs' cfg.networks (network: data: nameValuePair
("tinc.${network}")
({

View File

@ -5,7 +5,7 @@ with lib;
{
options = {
services.xl2tpd = {
enable = mkEnableOption "Whether xl2tpd should be run on startup.";
enable = mkEnableOption "xl2tpd, the Layer 2 Tunnelling Protocol Daemon";
serverIp = mkOption {
type = types.string;

View File

@ -36,7 +36,7 @@ in
services.xrdp = {
enable = mkEnableOption "Whether xrdp should be run on startup.";
enable = mkEnableOption "xrdp, the Remote Desktop Protocol server";
package = mkOption {
type = types.package;

View File

@ -1,431 +0,0 @@
{ config, lib, pkgs, ...}:
with lib;
let
cfg = config.services.znc;
defaultUser = "znc"; # Default user to own process.
# Default user and pass:
# un=znc
# pw=nixospass
defaultUserName = "znc";
defaultPassBlock = "
<Pass password>
Method = sha256
Hash = e2ce303c7ea75c571d80d8540a8699b46535be6a085be3414947d638e48d9e93
Salt = l5Xryew4g*!oa(ECfX2o
</Pass>
";
modules = pkgs.buildEnv {
name = "znc-modules";
paths = cfg.modulePackages;
};
# Keep znc.conf in nix store, then symlink or copy into `dataDir`, depending on `mutable`.
mkZncConf = confOpts: ''
Version = 1.6.3
${concatMapStrings (n: "LoadModule = ${n}\n") confOpts.modules}
<Listener l>
Port = ${toString confOpts.port}
IPv4 = true
IPv6 = true
SSL = ${boolToString confOpts.useSSL}
${lib.optionalString (confOpts.uriPrefix != null) "URIPrefix = ${confOpts.uriPrefix}"}
</Listener>
<User ${confOpts.userName}>
${confOpts.passBlock}
Admin = true
Nick = ${confOpts.nick}
AltNick = ${confOpts.nick}_
Ident = ${confOpts.nick}
RealName = ${confOpts.nick}
${concatMapStrings (n: "LoadModule = ${n}\n") confOpts.userModules}
${ lib.concatStringsSep "\n" (lib.mapAttrsToList (name: net: ''
<Network ${name}>
${concatMapStrings (m: "LoadModule = ${m}\n") net.modules}
Server = ${net.server} ${lib.optionalString net.useSSL "+"}${toString net.port} ${net.password}
${concatMapStrings (c: "<Chan #${c}>\n</Chan>\n") net.channels}
${lib.optionalString net.hasBitlbeeControlChannel ''
<Chan &bitlbee>
</Chan>
''}
${net.extraConf}
</Network>
'') confOpts.networks) }
</User>
${confOpts.extraZncConf}
'';
zncConfFile = pkgs.writeTextFile {
name = "znc.conf";
text = if cfg.zncConf != ""
then cfg.zncConf
else mkZncConf cfg.confOptions;
};
networkOpts = { ... }: {
options = {
server = mkOption {
type = types.str;
example = "chat.freenode.net";
description = ''
IRC server address.
'';
};
port = mkOption {
type = types.int;
default = 6697;
example = 6697;
description = ''
IRC server port.
'';
};
userName = mkOption {
default = "";
example = "johntron";
type = types.string;
description = ''
A nick identity specific to the IRC server.
'';
};
password = mkOption {
type = types.str;
default = "";
description = ''
IRC server password, such as for a Slack gateway.
'';
};
useSSL = mkOption {
type = types.bool;
default = true;
description = ''
Whether to use SSL to connect to the IRC server.
'';
};
modulePackages = mkOption {
type = types.listOf types.package;
default = [];
example = [ "pkgs.zncModules.push" "pkgs.zncModules.fish" ];
description = ''
External ZNC modules to build.
'';
};
modules = mkOption {
type = types.listOf types.str;
default = [ "simple_away" ];
example = literalExample "[ simple_away sasl ]";
description = ''
ZNC modules to load.
'';
};
channels = mkOption {
type = types.listOf types.str;
default = [];
example = [ "nixos" ];
description = ''
IRC channels to join.
'';
};
hasBitlbeeControlChannel = mkOption {
type = types.bool;
default = false;
description = ''
Whether to add the special Bitlbee operations channel.
'';
};
extraConf = mkOption {
default = "";
type = types.lines;
example = ''
Encoding = ^UTF-8
FloodBurst = 4
FloodRate = 1.00
IRCConnectEnabled = true
Ident = johntron
JoinDelay = 0
Nick = johntron
'';
description = ''
Extra config for the network.
'';
};
};
};
in
{
###### Interface
options = {
services.znc = {
enable = mkOption {
default = false;
type = types.bool;
description = ''
Enable a ZNC service for a user.
'';
};
user = mkOption {
default = "znc";
example = "john";
type = types.string;
description = ''
The name of an existing user account to use to own the ZNC server process.
If not specified, a default user will be created to own the process.
'';
};
group = mkOption {
default = "";
example = "users";
type = types.string;
description = ''
Group to own the ZNCserver process.
'';
};
dataDir = mkOption {
default = "/var/lib/znc/";
example = "/home/john/.znc/";
type = types.path;
description = ''
The data directory. Used for configuration files and modules.
'';
};
openFirewall = mkOption {
type = types.bool;
default = false;
description = ''
Whether to open ports in the firewall for ZNC.
'';
};
zncConf = mkOption {
default = "";
example = "See: http://wiki.znc.in/Configuration";
type = types.lines;
description = ''
Config file as generated with `znc --makeconf` to use for the whole ZNC configuration.
If specified, `confOptions` will be ignored, and this value, as-is, will be used.
If left empty, a conf file with default values will be used.
'';
};
confOptions = {
modules = mkOption {
type = types.listOf types.str;
default = [ "webadmin" "adminlog" ];
example = [ "partyline" "webadmin" "adminlog" "log" ];
description = ''
A list of modules to include in the `znc.conf` file.
'';
};
userModules = mkOption {
type = types.listOf types.str;
default = [ "chansaver" "controlpanel" ];
example = [ "chansaver" "controlpanel" "fish" "push" ];
description = ''
A list of user modules to include in the `znc.conf` file.
'';
};
userName = mkOption {
default = defaultUserName;
example = "johntron";
type = types.string;
description = ''
The user name used to log in to the ZNC web admin interface.
'';
};
networks = mkOption {
default = { };
type = with types; attrsOf (submodule networkOpts);
description = ''
IRC networks to connect the user to.
'';
example = {
"freenode" = {
server = "chat.freenode.net";
port = 6697;
useSSL = true;
modules = [ "simple_away" ];
};
};
};
nick = mkOption {
default = "znc-user";
example = "john";
type = types.string;
description = ''
The IRC nick.
'';
};
passBlock = mkOption {
example = defaultPassBlock;
type = types.string;
description = ''
Generate with `nix-shell -p znc --command "znc --makepass"`.
This is the password used to log in to the ZNC web admin interface.
'';
};
port = mkOption {
default = 5000;
example = 5000;
type = types.int;
description = ''
Specifies the port on which to listen.
'';
};
useSSL = mkOption {
default = true;
type = types.bool;
description = ''
Indicates whether the ZNC server should use SSL when listening on the specified port. A self-signed certificate will be generated.
'';
};
uriPrefix = mkOption {
type = types.nullOr types.str;
default = null;
example = "/znc/";
description = ''
An optional URI prefix for the ZNC web interface. Can be
used to make ZNC available behind a reverse proxy.
'';
};
extraZncConf = mkOption {
default = "";
type = types.lines;
description = ''
Extra config to `znc.conf` file.
'';
};
};
modulePackages = mkOption {
type = types.listOf types.package;
default = [ ];
example = literalExample "[ pkgs.zncModules.fish pkgs.zncModules.push ]";
description = ''
A list of global znc module packages to add to znc.
'';
};
mutable = mkOption {
default = true;
type = types.bool;
description = ''
Indicates whether to allow the contents of the `dataDir` directory to be changed
by the user at run-time.
If true, modifications to the ZNC configuration after its initial creation are not
overwritten by a NixOS system rebuild.
If false, the ZNC configuration is rebuilt by every system rebuild.
If the user wants to manage the ZNC service using the web admin interface, this value
should be set to true.
'';
};
extraFlags = mkOption {
default = [ ];
example = [ "--debug" ];
type = types.listOf types.str;
description = ''
Extra flags to use when executing znc command.
'';
};
};
};
###### Implementation
config = mkIf cfg.enable {
networking.firewall = mkIf cfg.openFirewall {
allowedTCPPorts = [ cfg.confOptions.port ];
};
systemd.services.znc = {
description = "ZNC Server";
wantedBy = [ "multi-user.target" ];
after = [ "network.service" ];
serviceConfig = {
User = cfg.user;
Group = cfg.group;
Restart = "always";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
ExecStop = "${pkgs.coreutils}/bin/kill -INT $MAINPID";
};
preStart = ''
${pkgs.coreutils}/bin/mkdir -p ${cfg.dataDir}/configs
# If mutable, regenerate conf file every time.
${optionalString (!cfg.mutable) ''
${pkgs.coreutils}/bin/echo "znc is set to be system-managed. Now deleting old znc.conf file to be regenerated."
${pkgs.coreutils}/bin/rm -f ${cfg.dataDir}/configs/znc.conf
''}
# Ensure essential files exist.
if [[ ! -f ${cfg.dataDir}/configs/znc.conf ]]; then
${pkgs.coreutils}/bin/echo "No znc.conf file found in ${cfg.dataDir}. Creating one now."
${pkgs.coreutils}/bin/cp --no-clobber ${zncConfFile} ${cfg.dataDir}/configs/znc.conf
${pkgs.coreutils}/bin/chmod u+rw ${cfg.dataDir}/configs/znc.conf
${pkgs.coreutils}/bin/chown ${cfg.user} ${cfg.dataDir}/configs/znc.conf
fi
if [[ ! -f ${cfg.dataDir}/znc.pem ]]; then
${pkgs.coreutils}/bin/echo "No znc.pem file found in ${cfg.dataDir}. Creating one now."
${pkgs.znc}/bin/znc --makepem --datadir ${cfg.dataDir}
fi
# Symlink modules
rm ${cfg.dataDir}/modules || true
ln -fs ${modules}/lib/znc ${cfg.dataDir}/modules
'';
script = "${pkgs.znc}/bin/znc --foreground --datadir ${cfg.dataDir} ${toString cfg.extraFlags}";
};
users.users = optional (cfg.user == defaultUser)
{ name = defaultUser;
description = "ZNC server daemon owner";
group = defaultUser;
uid = config.ids.uids.znc;
home = cfg.dataDir;
createHome = true;
};
users.groups = optional (cfg.user == defaultUser)
{ name = defaultUser;
gid = config.ids.gids.znc;
members = [ defaultUser ];
};
};
}

View File

@ -0,0 +1,306 @@
{ config, lib, pkgs, ...}:
with lib;
let
cfg = config.services.znc;
defaultUser = "znc";
modules = pkgs.buildEnv {
name = "znc-modules";
paths = cfg.modulePackages;
};
listenerPorts = concatMap (l: optional (l ? Port) l.Port)
(attrValues (cfg.config.Listener or {}));
# Converts the config option to a string
semanticString = let
sortedAttrs = set: sort (l: r:
if l == "extraConfig" then false # Always put extraConfig last
else if isAttrs set.${l} == isAttrs set.${r} then l < r
else isAttrs set.${r} # Attrsets should be last, makes for a nice config
# This last case occurs when any side (but not both) is an attrset
# The order of these is correct when the attrset is on the right
# which we're just returning
) (attrNames set);
# Specifies an attrset that encodes the value according to its type
encode = name: value: {
null = [];
bool = [ "${name} = ${boolToString value}" ];
int = [ "${name} = ${toString value}" ];
# extraConfig should be inserted verbatim
string = [ (if name == "extraConfig" then value else "${name} = ${value}") ];
# Values like `Foo = [ "bar" "baz" ];` should be transformed into
# Foo=bar
# Foo=baz
list = concatMap (encode name) value;
# Values like `Foo = { bar = { Baz = "baz"; Qux = "qux"; Florps = null; }; };` should be transmed into
# <Foo bar>
# Baz=baz
# Qux=qux
# </Foo>
set = concatMap (subname: [
"<${name} ${subname}>"
] ++ map (line: "\t${line}") (toLines value.${subname}) ++ [
"</${name}>"
]) (filter (v: v != null) (attrNames value));
}.${builtins.typeOf value};
# One level "above" encode, acts upon a set and uses encode on each name,value pair
toLines = set: concatMap (name: encode name set.${name}) (sortedAttrs set);
in
concatStringsSep "\n" (toLines cfg.config);
semanticTypes = with types; rec {
zncAtom = nullOr (either (either int bool) str);
zncAttr = attrsOf (nullOr zncConf);
zncAll = either (either zncAtom (listOf zncAtom)) zncAttr;
zncConf = attrsOf (zncAll // {
# Since this is a recursive type and the description by default contains
# the description of its subtypes, infinite recursion would occur without
# explicitly breaking this cycle
description = "znc values (null, atoms (str, int, bool), list of atoms, or attrsets of znc values)";
});
};
in
{
imports = [ ./options.nix ];
options = {
services.znc = {
enable = mkEnableOption "ZNC";
user = mkOption {
default = "znc";
example = "john";
type = types.str;
description = ''
The name of an existing user account to use to own the ZNC server
process. If not specified, a default user will be created.
'';
};
group = mkOption {
default = defaultUser;
example = "users";
type = types.str;
description = ''
Group to own the ZNC process.
'';
};
dataDir = mkOption {
default = "/var/lib/znc/";
example = "/home/john/.znc/";
type = types.path;
description = ''
The state directory for ZNC. The config and the modules will be linked
to from this directory as well.
'';
};
openFirewall = mkOption {
type = types.bool;
default = false;
description = ''
Whether to open ports in the firewall for ZNC. Does work with
ports for listeners specified in
<option>services.znc.config.Listener</option>.
'';
};
config = mkOption {
type = semanticTypes.zncConf;
default = {};
example = literalExample ''
{
LoadModule = [ "webadmin" "adminlog" ];
User.paul = {
Admin = true;
Nick = "paul";
AltNick = "paul1";
LoadModule = [ "chansaver" "controlpanel" ];
Network.freenode = {
Server = "chat.freenode.net +6697";
LoadModule = [ "simple_away" ];
Chan = {
"#nixos" = { Detached = false; };
"##linux" = { Disabled = true; };
};
};
Pass.password = {
Method = "sha256";
Hash = "e2ce303c7ea75c571d80d8540a8699b46535be6a085be3414947d638e48d9e93";
Salt = "l5Xryew4g*!oa(ECfX2o";
};
};
}
'';
description = ''
Configuration for ZNC, see
<literal>https://wiki.znc.in/Configuration</literal> for details. The
Nix value declared here will be translated directly to the xml-like
format ZNC expects. This is much more flexible than the legacy options
under <option>services.znc.confOptions.*</option>, but also can't do
any type checking.
</para>
<para>
You can use <command>nix-instantiate --eval --strict '&lt;nixpkgs/nixos&gt;' -A config.services.znc.config</command>
to view the current value. By default it contains a listener for port
5000 with SSL enabled.
</para>
<para>
Nix attributes called <literal>extraConfig</literal> will be inserted
verbatim into the resulting config file.
</para>
<para>
If <option>services.znc.useLegacyConfig</option> is turned on, the
option values in <option>services.znc.confOptions.*</option> will be
gracefully be applied to this option.
</para>
<para>
If you intend to update the configuration through this option, be sure
to enable <option>services.znc.mutable</option>, otherwise none of the
changes here will be applied after the initial deploy.
'';
};
configFile = mkOption {
type = types.path;
example = "~/.znc/configs/znc.conf";
description = ''
Configuration file for ZNC. It is recommended to use the
<option>config</option> option instead.
</para>
<para>
Setting this option will override any auto-generated config file
through the <option>confOptions</option> or <option>config</option>
options.
'';
};
modulePackages = mkOption {
type = types.listOf types.package;
default = [ ];
example = literalExample "[ pkgs.zncModules.fish pkgs.zncModules.push ]";
description = ''
A list of global znc module packages to add to znc.
'';
};
mutable = mkOption {
default = true; # TODO: Default to true when config is set, make sure to not delete the old config if present
type = types.bool;
description = ''
Indicates whether to allow the contents of the
<literal>dataDir</literal> directory to be changed by the user at
run-time.
</para>
<para>
If enabled, modifications to the ZNC configuration after its initial
creation are not overwritten by a NixOS rebuild. If disabled, the
ZNC configuration is rebuilt on every NixOS rebuild.
</para>
<para>
If the user wants to manage the ZNC service using the web admin
interface, this option should be enabled.
'';
};
extraFlags = mkOption {
default = [ ];
example = [ "--debug" ];
type = types.listOf types.str;
description = ''
Extra arguments to use for executing znc.
'';
};
};
};
###### Implementation
config = mkIf cfg.enable {
services.znc = {
configFile = mkDefault (pkgs.writeText "znc-generated.conf" semanticString);
config = {
Version = (builtins.parseDrvName pkgs.znc.name).version;
Listener.l.Port = mkDefault 5000;
Listener.l.SSL = mkDefault true;
};
};
networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall listenerPorts;
systemd.services.znc = {
description = "ZNC Server";
wantedBy = [ "multi-user.target" ];
after = [ "network-online.target" ];
serviceConfig = {
User = cfg.user;
Group = cfg.group;
Restart = "always";
ExecStart = "${pkgs.znc}/bin/znc --foreground --datadir ${cfg.dataDir} ${escapeShellArgs cfg.extraFlags}";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
ExecStop = "${pkgs.coreutils}/bin/kill -INT $MAINPID";
};
preStart = ''
mkdir -p ${cfg.dataDir}/configs
# If mutable, regenerate conf file every time.
${optionalString (!cfg.mutable) ''
echo "znc is set to be system-managed. Now deleting old znc.conf file to be regenerated."
rm -f ${cfg.dataDir}/configs/znc.conf
''}
# Ensure essential files exist.
if [[ ! -f ${cfg.dataDir}/configs/znc.conf ]]; then
echo "No znc.conf file found in ${cfg.dataDir}. Creating one now."
cp --no-clobber ${cfg.configFile} ${cfg.dataDir}/configs/znc.conf
chmod u+rw ${cfg.dataDir}/configs/znc.conf
chown ${cfg.user} ${cfg.dataDir}/configs/znc.conf
fi
if [[ ! -f ${cfg.dataDir}/znc.pem ]]; then
echo "No znc.pem file found in ${cfg.dataDir}. Creating one now."
${pkgs.znc}/bin/znc --makepem --datadir ${cfg.dataDir}
fi
# Symlink modules
rm ${cfg.dataDir}/modules || true
ln -fs ${modules}/lib/znc ${cfg.dataDir}/modules
'';
};
users.users = optional (cfg.user == defaultUser)
{ name = defaultUser;
description = "ZNC server daemon owner";
group = defaultUser;
uid = config.ids.uids.znc;
home = cfg.dataDir;
createHome = true;
};
users.groups = optional (cfg.user == defaultUser)
{ name = defaultUser;
gid = config.ids.gids.znc;
members = [ defaultUser ];
};
};
}

View File

@ -0,0 +1,268 @@
{ lib, config, ... }:
with lib;
let
cfg = config.services.znc;
networkOpts = {
options = {
server = mkOption {
type = types.str;
example = "chat.freenode.net";
description = ''
IRC server address.
'';
};
port = mkOption {
type = types.ints.u16;
default = 6697;
description = ''
IRC server port.
'';
};
password = mkOption {
type = types.str;
default = "";
description = ''
IRC server password, such as for a Slack gateway.
'';
};
useSSL = mkOption {
type = types.bool;
default = true;
description = ''
Whether to use SSL to connect to the IRC server.
'';
};
modules = mkOption {
type = types.listOf types.str;
default = [ "simple_away" ];
example = literalExample "[ simple_away sasl ]";
description = ''
ZNC network modules to load.
'';
};
channels = mkOption {
type = types.listOf types.str;
default = [];
example = [ "nixos" ];
description = ''
IRC channels to join.
'';
};
hasBitlbeeControlChannel = mkOption {
type = types.bool;
default = false;
description = ''
Whether to add the special Bitlbee operations channel.
'';
};
extraConf = mkOption {
default = "";
type = types.lines;
example = ''
Encoding = ^UTF-8
FloodBurst = 4
FloodRate = 1.00
IRCConnectEnabled = true
Ident = johntron
JoinDelay = 0
Nick = johntron
'';
description = ''
Extra config for the network. Consider using
<option>services.znc.config</option> instead.
'';
};
};
};
in
{
options = {
services.znc = {
useLegacyConfig = mkOption {
default = true;
type = types.bool;
description = ''
Whether to propagate the legacy options under
<option>services.znc.confOptions.*</option> to the znc config. If this
is turned on, the znc config will contain a user with the default name
"znc", global modules "webadmin" and "adminlog" will be enabled by
default, and more, all controlled through the
<option>services.znc.confOptions.*</option> options.
You can use <command>nix-instantiate --eval --strict '&lt;nixpkgs/nixos&gt;' -A config.services.znc.config</command>
to view the current value of the config.
</para>
<para>
In any case, if you need more flexibility,
<option>services.znc.config</option> can be used to override/add to
all of the legacy options.
'';
};
confOptions = {
modules = mkOption {
type = types.listOf types.str;
default = [ "webadmin" "adminlog" ];
example = [ "partyline" "webadmin" "adminlog" "log" ];
description = ''
A list of modules to include in the `znc.conf` file.
'';
};
userModules = mkOption {
type = types.listOf types.str;
default = [ "chansaver" "controlpanel" ];
example = [ "chansaver" "controlpanel" "fish" "push" ];
description = ''
A list of user modules to include in the `znc.conf` file.
'';
};
userName = mkOption {
default = "znc";
example = "johntron";
type = types.str;
description = ''
The user name used to log in to the ZNC web admin interface.
'';
};
networks = mkOption {
default = { };
type = with types; attrsOf (submodule networkOpts);
description = ''
IRC networks to connect the user to.
'';
example = literalExample ''
{
"freenode" = {
server = "chat.freenode.net";
port = 6697;
useSSL = true;
modules = [ "simple_away" ];
};
};
'';
};
nick = mkOption {
default = "znc-user";
example = "john";
type = types.str;
description = ''
The IRC nick.
'';
};
passBlock = mkOption {
example = literalExample ''
&lt;Pass password&gt;
Method = sha256
Hash = e2ce303c7ea75c571d80d8540a8699b46535be6a085be3414947d638e48d9e93
Salt = l5Xryew4g*!oa(ECfX2o
&lt;/Pass&gt;
'';
type = types.str;
description = ''
Generate with `nix-shell -p znc --command "znc --makepass"`.
This is the password used to log in to the ZNC web admin interface.
You can also set this through
<option>services.znc.config.User.&lt;username&gt;.Pass.Method</option>
and co.
'';
};
port = mkOption {
default = 5000;
type = types.int;
description = ''
Specifies the port on which to listen.
'';
};
useSSL = mkOption {
default = true;
type = types.bool;
description = ''
Indicates whether the ZNC server should use SSL when listening on
the specified port. A self-signed certificate will be generated.
'';
};
uriPrefix = mkOption {
type = types.nullOr types.str;
default = null;
example = "/znc/";
description = ''
An optional URI prefix for the ZNC web interface. Can be
used to make ZNC available behind a reverse proxy.
'';
};
extraZncConf = mkOption {
default = "";
type = types.lines;
description = ''
Extra config to `znc.conf` file.
'';
};
};
};
};
config = mkIf cfg.useLegacyConfig {
services.znc.config = let
c = cfg.confOptions;
# defaults here should override defaults set in the non-legacy part
mkDefault = mkOverride 900;
in {
LoadModule = mkDefault c.modules;
Listener.l = {
Port = mkDefault c.port;
IPv4 = mkDefault true;
IPv6 = mkDefault true;
SSL = mkDefault c.useSSL;
};
User.${c.userName} = {
Admin = mkDefault true;
Nick = mkDefault c.nick;
AltNick = mkDefault "${c.nick}_";
Ident = mkDefault c.nick;
RealName = mkDefault c.nick;
LoadModule = mkDefault c.userModules;
Network = mapAttrs (name: net: {
LoadModule = mkDefault net.modules;
Server = mkDefault "${net.server} ${optionalString net.useSSL "+"}${toString net.port} ${net.password}";
Chan = optionalAttrs net.hasBitlbeeControlChannel { "&bitlbee" = mkDefault {}; } //
listToAttrs (map (n: nameValuePair "#${n}" (mkDefault {})) net.channels);
extraConfig = if net.extraConf == "" then mkDefault null else net.extraConf;
}) c.networks;
extraConfig = [ c.passBlock ] ++ optional (c.extraZncConf != "") c.extraZncConf;
};
};
};
imports = [
(mkRemovedOptionModule ["services" "znc" "zncConf"] ''
Instead of `services.znc.zncConf = "... foo ...";`, use
`services.znc.configFile = pkgs.writeText "znc.conf" "... foo ...";`.
'')
];
}

View File

@ -250,7 +250,7 @@ in
drivers = mkOption {
type = types.listOf types.path;
default = [];
example = literalExample "[ pkgs.gutenprint pkgs.hplip pkgs.splix ]";
example = literalExample "with pkgs; [ gutenprint hplip splix cups-googlecloudprint ]";
description = ''
CUPS drivers to use. Drivers provided by CUPS, cups-filters,
Ghostscript and Samba are added unconditionally. If this list contains

View File

@ -16,7 +16,7 @@ in
services.saslauthd = {
enable = mkEnableOption "Whether to enable the Cyrus SASL authentication daemon.";
enable = mkEnableOption "saslauthd, the Cyrus SASL authentication daemon";
package = mkOption {
default = pkgs.cyrus_sasl.bin;

View File

@ -4,6 +4,8 @@ let
cfg = config.services.kmscon;
autologinArg = lib.optionalString (cfg.autologinUser != null) "-f ${cfg.autologinUser}";
configDir = pkgs.writeTextFile { name = "kmscon-config"; destination = "/kmscon.conf"; text = cfg.extraConfig; };
in {
options = {
@ -39,6 +41,15 @@ in {
default = "";
example = "--term xterm-256color";
};
autologinUser = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
Username of the account that will be automatically logged in at the console.
If unspecified, a login prompt is shown as usual.
'';
};
};
};
@ -61,7 +72,7 @@ in {
[Service]
ExecStart=
ExecStart=${pkgs.kmscon}/bin/kmscon "--vt=%I" ${cfg.extraOptions} --seats=seat0 --no-switchvt --configdir ${configDir} --login -- ${pkgs.shadow}/bin/login -p
ExecStart=${pkgs.kmscon}/bin/kmscon "--vt=%I" ${cfg.extraOptions} --seats=seat0 --no-switchvt --configdir ${configDir} --login -- ${pkgs.shadow}/bin/login -p ${autologinArg}
UtmpIdentifier=%I
TTYPath=/dev/%I
TTYReset=yes

View File

@ -34,6 +34,13 @@ in {
'';
};
package = mkOption {
type = types.package;
description = "Matomo package to use";
default = pkgs.matomo;
defaultText = "pkgs.matomo";
};
webServerUser = mkOption {
type = types.nullOr types.str;
default = null;
@ -124,7 +131,7 @@ in {
# the update part of the script can only work if the database is already up and running
requires = [ databaseService ];
after = [ databaseService ];
path = [ pkgs.matomo ];
path = [ cfg.package ];
serviceConfig = {
Type = "oneshot";
User = user;
@ -151,7 +158,7 @@ in {
# Use User-Private Group scheme to protect matomo data, but allow administration / backup via matomo group
# Copy config folder
chmod g+s "${dataDir}"
cp -r "${pkgs.matomo}/config" "${dataDir}/"
cp -r "${cfg.package}/config" "${dataDir}/"
chmod -R u+rwX,g+rwX,o-rwx "${dataDir}"
# check whether user setup has already been done
@ -164,7 +171,7 @@ in {
systemd.services.${phpExecutionUnit} = {
# stop phpfpm on package upgrade, do database upgrade via matomo_setup_update, and then restart
restartTriggers = [ pkgs.matomo ];
restartTriggers = [ cfg.package ];
# stop config.ini.php from getting written with read permission for others
serviceConfig.UMask = "0007";
};
@ -195,7 +202,7 @@ in {
"${user}.${fqdn}" = mkMerge [ cfg.nginx {
# don't allow to override the root easily, as it will almost certainly break matomo.
# disadvantage: not shown as default in docs.
root = mkForce "${pkgs.matomo}/share";
root = mkForce "${cfg.package}/share";
# define locations here instead of as the submodule option's default
# so that they can easily be extended with additional locations if required

View File

@ -114,6 +114,21 @@ in {
'';
};
poolConfig = mkOption {
type = types.lines;
default = ''
pm = dynamic
pm.max_children = 32
pm.start_servers = 2
pm.min_spare_servers = 2
pm.max_spare_servers = 4
pm.max_requests = 500
'';
description = ''
Options for nextcloud's PHP pool. See the documentation on <literal>php-fpm.conf</literal> for details on configuration directives.
'';
};
config = {
dbtype = mkOption {
type = types.enum [ "sqlite" "pgsql" "mysql" ];
@ -339,11 +354,7 @@ in {
listen.group = nginx
user = nextcloud
group = nginx
pm = dynamic
pm.max_children = 32
pm.start_servers = 2
pm.min_spare_servers = 2
pm.max_spare_servers = 4
${cfg.poolConfig}
env[NEXTCLOUD_CONFIG_DIR] = ${cfg.home}/config
env[PATH] = /run/wrappers/bin:/nix/var/nix/profiles/default/bin:/run/current-system/sw/bin:/usr/bin:/bin
${phpAdminValues}

View File

@ -133,7 +133,6 @@ in {
fonts.fonts = [ pkgs.dejavu_fonts pkgs.cantarell-fonts ];
services.xserver.displayManager.gdm.enable = mkDefault true;
services.xserver.displayManager.extraSessionFilePackages = [ pkgs.gnome3.gnome-session ];
services.xserver.displayManager.sessionCommands = ''

View File

@ -185,10 +185,8 @@ in
target = "X11/xkb";
};
environment.variables = {
# Enable GTK applications to load SVG icons
GDK_PIXBUF_MODULE_FILE = "${pkgs.librsvg.out}/lib/gdk-pixbuf-2.0/2.10.0/loaders.cache";
};
# Enable GTK applications to load SVG icons
services.xserver.gdk-pixbuf.modulePackages = [ pkgs.librsvg ];
fonts.fonts = with pkgs; [ noto-fonts hack-font ];
fonts.fontconfig.defaultFonts = {

View File

@ -101,10 +101,11 @@ in
];
environment.variables = {
GDK_PIXBUF_MODULE_FILE = "${pkgs.librsvg.out}/lib/gdk-pixbuf-2.0/2.10.0/loaders.cache";
GIO_EXTRA_MODULES = [ "${pkgs.xfce.gvfs}/lib/gio/modules" ];
};
services.xserver.gdk-pixbuf.modulePackages = [ pkgs.librsvg ];
services.xserver.desktopManager.session = [{
name = "xfce";
bgSupport = true;

View File

@ -0,0 +1,159 @@
{ config, lib, pkgs, ... }:
with lib;
let
dmcfg = config.services.xserver.displayManager;
ldmcfg = dmcfg.lightdm;
cfg = ldmcfg.greeters.enso;
theme = cfg.theme.package;
icons = cfg.iconTheme.package;
cursors = cfg.cursorTheme.package;
# We need a few things in the environment for the greeter to run with
# fonts/icons.
wrappedEnsoGreeter = pkgs.runCommand "lightdm-enso-os-greeter"
{ buildInputs = [ pkgs.makeWrapper ]; }
''
# This wrapper ensures that we actually get themes
makeWrapper ${pkgs.lightdm-enso-os-greeter}/bin/pantheon-greeter \
$out/greeter \
--prefix PATH : "${pkgs.glibc.bin}/bin" \
--set GDK_PIXBUF_MODULE_FILE "${pkgs.librsvg.out}/lib/gdk-pixbuf-2.0/2.10.0/loaders.cache" \
--set GTK_PATH "${theme}:${pkgs.gtk3.out}" \
--set GTK_EXE_PREFIX "${theme}" \
--set GTK_DATA_PREFIX "${theme}" \
--set XDG_DATA_DIRS "${theme}/share:${icons}/share:${cursors}/share" \
--set XDG_CONFIG_HOME "${theme}/share"
cat - > $out/lightdm-enso-os-greeter.desktop << EOF
[Desktop Entry]
Name=LightDM Greeter
Comment=This runs the LightDM Greeter
Exec=$out/greeter
Type=Application
EOF
'';
ensoGreeterConf = pkgs.writeText "lightdm-enso-os-greeter.conf" ''
[greeter]
default-wallpaper=${ldmcfg.background}
gtk-theme=${cfg.theme.name}
icon-theme=${cfg.iconTheme.name}
cursor-theme=${cfg.cursorTheme.name}
blur=${toString cfg.blur}
brightness=${toString cfg.brightness}
${cfg.extraConfig}
'';
in {
options = {
services.xserver.displayManager.lightdm.greeters.enso = {
enable = mkOption {
type = types.bool;
default = false;
description = ''
Whether to enable enso-os-greeter as the lightdm greeter
'';
};
theme = {
package = mkOption {
type = types.package;
default = pkgs.gnome3.gnome-themes-extra;
defaultText = "pkgs.gnome3.gnome-themes-extra";
description = ''
The package path that contains the theme given in the name option.
'';
};
name = mkOption {
type = types.str;
default = "Adwaita";
description = ''
Name of the theme to use for the lightdm-enso-os-greeter
'';
};
};
iconTheme = {
package = mkOption {
type = types.package;
default = pkgs.papirus-icon-theme;
defaultText = "pkgs.papirus-icon-theme";
description = ''
The package path that contains the icon theme given in the name option.
'';
};
name = mkOption {
type = types.str;
default = "ePapirus";
description = ''
Name of the icon theme to use for the lightdm-enso-os-greeter
'';
};
};
cursorTheme = {
package = mkOption {
type = types.package;
default = pkgs.capitaine-cursors;
defaultText = "pkgs.capitaine-cursors";
description = ''
The package path that contains the cursor theme given in the name option.
'';
};
name = mkOption {
type = types.str;
default = "capitane-cursors";
description = ''
Name of the cursor theme to use for the lightdm-enso-os-greeter
'';
};
};
blur = mkOption {
type = types.bool;
default = false;
description = ''
Whether or not to enable blur
'';
};
brightness = mkOption {
type = types.int;
default = 7;
description = ''
Brightness
'';
};
extraConfig = mkOption {
type = types.lines;
default = "";
description = ''
Extra configuration that should be put in the greeter.conf
configuration file
'';
};
};
};
config = mkIf (ldmcfg.enable && cfg.enable) {
environment.etc."lightdm/greeter.conf".source = ensoGreeterConf;
services.xserver.displayManager.lightdm = {
greeter = mkDefault {
package = wrappedEnsoGreeter;
name = "lightdm-enso-os-greeter";
};
greeters = {
gtk = {
enable = mkDefault false;
};
};
};
};
}

View File

@ -80,6 +80,7 @@ in
imports = [
./lightdm-greeters/gtk.nix
./lightdm-greeters/mini.nix
./lightdm-greeters/enso-os.nix
];
options = {

View File

@ -0,0 +1,44 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.xserver.displayManager.startx;
in
{
###### interface
options = {
services.xserver.displayManager.startx = {
enable = mkOption {
default = false;
description = ''
Whether to enable the dummy "startx" pseudo-display manager,
which allows users to start X manually via the "startx" command
from a vt shell. The X server runs under the user's id, not as root.
The user must provide a ~/.xinintrc file containing session startup
commands, see startx(1). This is not autmatically generated
from the desktopManager and windowManager settings.
'';
};
};
};
###### implementation
config = mkIf cfg.enable {
services.xserver = {
exportConfiguration = true;
displayManager.job.execCmd = "";
displayManager.lightdm.enable = lib.mkForce false;
};
systemd.services.display-manager.enable = false;
environment.systemPackages = with pkgs; [ xorg.xinit ];
};
}

View File

@ -0,0 +1,45 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.xserver.gdk-pixbuf;
# Get packages to generate the cache for. We always include gdk_pixbuf.
effectivePackages = unique ([pkgs.gdk_pixbuf] ++ cfg.modulePackages);
# Generate the cache file by running gdk-pixbuf-query-loaders for each
# package and concatenating the results.
loadersCache = pkgs.runCommand "gdk-pixbuf-loaders.cache" {} ''
(
for package in ${concatStringsSep " " effectivePackages}; do
module_dir="$package/${pkgs.gdk_pixbuf.moduleDir}"
if [[ ! -d $module_dir ]]; then
echo "Warning (services.xserver.gdk-pixbuf): missing module directory $module_dir" 1>&2
continue
fi
GDK_PIXBUF_MODULEDIR="$module_dir" \
${pkgs.gdk_pixbuf.dev}/bin/gdk-pixbuf-query-loaders
done
) > "$out"
'';
in
{
options = {
services.xserver.gdk-pixbuf.modulePackages = mkOption {
type = types.listOf types.package;
default = [ ];
description = "Packages providing GDK-Pixbuf modules, for cache generation.";
};
};
# If there is any package configured in modulePackages, we generate the
# loaders.cache based on that and set the environment variable
# GDK_PIXBUF_MODULE_FILE to point to it.
config = mkIf (cfg.modulePackages != []) {
environment.variables = {
GDK_PIXBUF_MODULE_FILE = "${loadersCache}";
};
};
}

View File

@ -7,19 +7,19 @@ let
commonFunctions = ''
die() {
echo "$@" >&2
exit 1
echo "$@" >&2
exit 1
}
dev_exist() {
local target="$1"
if [ -e $target ]; then
return 0
else
local uuid=$(echo -n $target | sed -e 's,UUID=\(.*\),\1,g')
local dev=$(blkid --uuid $uuid)
return $?
fi
local target="$1"
if [ -e $target ]; then
return 0
else
local uuid=$(echo -n $target | sed -e 's,UUID=\(.*\),\1,g')
blkid --uuid $uuid >/dev/null
return $?
fi
}
wait_target() {
@ -51,30 +51,30 @@ let
}
wait_yubikey() {
local secs="''${1:-10}"
local secs="''${1:-10}"
ykinfo -v 1>/dev/null 2>&1
if [ $? != 0 ]; then
echo -n "Waiting $secs seconds for Yubikey to appear..."
local success=false
for try in $(seq $secs); do
echo -n .
sleep 1
ykinfo -v 1>/dev/null 2>&1
if [ $? == 0 ]; then
success=true
break
fi
done
if [ $success == true ]; then
echo " - success";
return 0
else
echo " - failure";
return 1
fi
fi
return 0
ykinfo -v 1>/dev/null 2>&1
if [ $? != 0 ]; then
echo -n "Waiting $secs seconds for Yubikey to appear..."
local success=false
for try in $(seq $secs); do
echo -n .
sleep 1
ykinfo -v 1>/dev/null 2>&1
if [ $? == 0 ]; then
success=true
break
fi
done
if [ $success == true ]; then
echo " - success";
return 0
else
echo " - failure";
return 1
fi
fi
return 0
}
'';

View File

@ -251,9 +251,9 @@ let
postInstall = ''
echo checking syntax
# check both with bash
${pkgs.bash}/bin/sh -n $target
${pkgs.buildPackages.bash}/bin/sh -n $target
# and with ash shell, just in case
${extraUtils}/bin/ash -n $target
${pkgs.buildPackages.busybox}/bin/ash -n $target
'';
inherit udevRules extraUtils modulesClosure;

View File

@ -74,7 +74,7 @@ let
importLib = {zpoolCmd, awkCmd, cfgZfs}: ''
poolReady() {
pool="$1"
state="$("${zpoolCmd}" import | "${awkCmd}" "/pool: $pool/ { found = 1 }; /state:/ { if (found == 1) { print \$2; exit } }; END { if (found == 0) { print \"MISSING\" } }")"
state="$("${zpoolCmd}" import 2>/dev/null | "${awkCmd}" "/pool: $pool/ { found = 1 }; /state:/ { if (found == 1) { print \$2; exit } }; END { if (found == 0) { print \"MISSING\" } }")"
if [[ "$state" = "ONLINE" ]]; then
return 0
else

View File

@ -85,7 +85,8 @@ let
after = [ "network-pre.target" "systemd-udevd.service" "systemd-sysctl.service" ];
before = [ "network.target" "shutdown.target" ];
wants = [ "network.target" ];
partOf = map (i: "network-addresses-${i.name}.service") interfaces;
# exclude bridges from the partOf relationship to fix container networking bug #47210
partOf = map (i: "network-addresses-${i.name}.service") (filter (i: !(hasAttr i.name cfg.bridges)) interfaces);
conflicts = [ "shutdown.target" ];
wantedBy = [ "multi-user.target" ] ++ optional hasDefaultGatewaySet "network-online.target";

View File

@ -145,8 +145,12 @@ let cfg = config.ec2; in
environment.systemPackages = [ pkgs.cryptsetup ];
boot.initrd.supportedFilesystems = [ "unionfs-fuse" ];
# EC2 has its own NTP server provided by the hypervisor
networking.timeServers = [ "169.254.169.123" ];
# udisks has become too bloated to have in a headless system
# (e.g. it depends on GTK+).
services.udisks2.enable = false;
};
}

View File

@ -22,6 +22,13 @@ with lib;
# Not supported in systemd-nspawn containers.
security.audit.enable = false;
# Make sure that root user in container will talk to host nix-daemon
environment.etc."profile".text = ''
export NIX_REMOTE=daemon
'';
};
}

View File

@ -130,6 +130,7 @@ let
--bind-ro=/nix/var/nix/daemon-socket \
--bind="/nix/var/nix/profiles/per-container/$INSTANCE:/nix/var/nix/profiles" \
--bind="/nix/var/nix/gcroots/per-container/$INSTANCE:/nix/var/nix/gcroots" \
--link-journal=try-guest \
--setenv PRIVATE_NETWORK="$PRIVATE_NETWORK" \
--setenv HOST_BRIDGE="$HOST_BRIDGE" \
--setenv HOST_ADDRESS="$HOST_ADDRESS" \

View File

@ -257,5 +257,22 @@ let self = {
"18.03".sa-east-1.hvm-ebs = "ami-163e1f7a";
"18.03".ap-south-1.hvm-ebs = "ami-6a390b05";
latest = self."18.03";
# 18.09.910.c15e342304a
"18.09".eu-west-1.hvm-ebs = "ami-0f412186fb8a0ec97";
"18.09".eu-west-2.hvm-ebs = "ami-0dada3805ce43c55e";
"18.09".eu-west-3.hvm-ebs = "ami-074df85565f2e02e2";
"18.09".eu-central-1.hvm-ebs = "ami-07c9b884e679df4f8";
"18.09".us-east-1.hvm-ebs = "ami-009c9c3f1af480ff3";
"18.09".us-east-2.hvm-ebs = "ami-08199961085ea8bc6";
"18.09".us-west-1.hvm-ebs = "ami-07aa7f56d612ddd38";
"18.09".us-west-2.hvm-ebs = "ami-01c84b7c368ac24d1";
"18.09".ca-central-1.hvm-ebs = "ami-04f66113f76198f6c";
"18.09".ap-southeast-1.hvm-ebs = "ami-0892c7e24ebf2194f";
"18.09".ap-southeast-2.hvm-ebs = "ami-010730f36424b0a2c";
"18.09".ap-northeast-1.hvm-ebs = "ami-0cdba8e998f076547";
"18.09".ap-northeast-2.hvm-ebs = "ami-0400a698e6a9f4a15";
"18.09".sa-east-1.hvm-ebs = "ami-0e4a8a47fd6db6112";
"18.09".ap-south-1.hvm-ebs = "ami-0880a678d3f555313";
latest = self."18.09";
}; in self

View File

@ -25,7 +25,7 @@ in {
systemd.services.qemu-guest-agent = {
description = "Run the QEMU Guest Agent";
serviceConfig = {
ExecStart = "${pkgs.kvm.ga}/bin/qemu-ga";
ExecStart = "${pkgs.qemu.ga}/bin/qemu-ga";
Restart = "always";
RestartSec = 0;
};

View File

@ -12,7 +12,7 @@ in {
virtualbox = {
baseImageSize = mkOption {
type = types.int;
default = 100 * 1024;
default = 10 * 1024;
description = ''
The size of the VirtualBox base image in MiB.
'';

View File

@ -5,7 +5,7 @@ let
in {
options = {
services.xe-guest-utilities = {
enable = mkEnableOption "Whether to enable the Xen guest utilities daemon.";
enable = mkEnableOption "the Xen guest utilities daemon";
};
};
config = mkIf cfg.enable {

View File

@ -257,7 +257,7 @@ in rec {
tests.boot = callSubTests tests/boot.nix {};
tests.boot-stage1 = callTest tests/boot-stage1.nix {};
tests.borgbackup = callTest tests/borgbackup.nix {};
tests.buildbot = callTest tests/buildbot.nix {};
tests.buildbot = callSubTests tests/buildbot.nix {};
tests.cadvisor = callTestOnMatchingSystems ["x86_64-linux"] tests/cadvisor.nix {};
tests.ceph = callTestOnMatchingSystems ["x86_64-linux"] tests/ceph.nix {};
tests.certmgr = callSubTests tests/certmgr.nix {};
@ -390,12 +390,14 @@ in rec {
tests.predictable-interface-names = callSubTests tests/predictable-interface-names.nix {};
tests.printing = callTest tests/printing.nix {};
tests.prometheus = callTest tests/prometheus.nix {};
tests.prometheus-exporters = callTest tests/prometheus-exporters.nix {};
tests.prosody = callTest tests/prosody.nix {};
tests.proxy = callTest tests/proxy.nix {};
tests.quagga = callTest tests/quagga.nix {};
tests.quake3 = callTest tests/quake3.nix {};
tests.rabbitmq = callTest tests/rabbitmq.nix {};
tests.radicale = callTest tests/radicale.nix {};
tests.redmine = callTest tests/redmine.nix {};
tests.rspamd = callSubTests tests/rspamd.nix {};
tests.runInMachine = callTest tests/run-in-machine.nix {};
tests.rxe = callTest tests/rxe.nix {};

View File

@ -1,111 +1,117 @@
# Test ensures buildbot master comes up correctly and workers can connect
{ system ? builtins.currentSystem }:
import ./make-test.nix ({ pkgs, ... } : {
name = "buildbot";
with import ../lib/testing.nix { inherit system; };
nodes = {
bbmaster = { pkgs, ... }: {
services.buildbot-master = {
enable = true;
package = pkgs.buildbot-full;
let
# Test ensures buildbot master comes up correctly and workers can connect
mkBuildbotTest = python: makeTest {
name = "buildbot";
# NOTE: use fake repo due to no internet in hydra ci
factorySteps = [
"steps.Git(repourl='git://gitrepo/fakerepo.git', mode='incremental')"
"steps.ShellCommand(command=['bash', 'fakerepo.sh'])"
];
changeSource = [
"changes.GitPoller('git://gitrepo/fakerepo.git', workdir='gitpoller-workdir', branch='master', pollinterval=300)"
];
nodes = {
bbmaster = { pkgs, ... }: {
services.buildbot-master = {
enable = true;
package = python.pkgs.buildbot-full;
# NOTE: use fake repo due to no internet in hydra ci
factorySteps = [
"steps.Git(repourl='git://gitrepo/fakerepo.git', mode='incremental')"
"steps.ShellCommand(command=['bash', 'fakerepo.sh'])"
];
changeSource = [
"changes.GitPoller('git://gitrepo/fakerepo.git', workdir='gitpoller-workdir', branch='master', pollinterval=300)"
];
};
networking.firewall.allowedTCPPorts = [ 8010 8011 9989 ];
environment.systemPackages = with pkgs; [ git python.pkgs.buildbot-full ];
};
networking.firewall.allowedTCPPorts = [ 8010 8011 9989 ];
environment.systemPackages = with pkgs; [ git buildbot-full ];
};
bbworker = { pkgs, ... }: {
services.buildbot-worker = {
enable = true;
masterUrl = "bbmaster:9989";
bbworker = { pkgs, ... }: {
services.buildbot-worker = {
enable = true;
masterUrl = "bbmaster:9989";
};
environment.systemPackages = with pkgs; [ git python.pkgs.buildbot-worker ];
};
gitrepo = { pkgs, ... }: {
services.openssh.enable = true;
networking.firewall.allowedTCPPorts = [ 22 9418 ];
environment.systemPackages = with pkgs; [ git ];
};
environment.systemPackages = with pkgs; [ git buildbot-worker ];
};
gitrepo = { pkgs, ... }: {
services.openssh.enable = true;
networking.firewall.allowedTCPPorts = [ 22 9418 ];
environment.systemPackages = with pkgs; [ git ];
};
testScript = ''
#Start up and populate fake repo
$gitrepo->waitForUnit("multi-user.target");
print($gitrepo->execute(" \
git config --global user.name 'Nobody Fakeuser' && \
git config --global user.email 'nobody\@fakerepo.com' && \
rm -rvf /srv/repos/fakerepo.git /tmp/fakerepo && \
mkdir -pv /srv/repos/fakerepo ~/.ssh && \
ssh-keyscan -H gitrepo > ~/.ssh/known_hosts && \
cat ~/.ssh/known_hosts && \
cd /srv/repos/fakerepo && \
git init && \
echo -e '#!/bin/sh\necho fakerepo' > fakerepo.sh && \
cat fakerepo.sh && \
touch .git/git-daemon-export-ok && \
git add fakerepo.sh .git/git-daemon-export-ok && \
git commit -m fakerepo && \
git daemon --verbose --export-all --base-path=/srv/repos --reuseaddr & \
"));
# Test gitrepo
$bbmaster->waitForUnit("network-online.target");
#$bbmaster->execute("nc -z gitrepo 9418");
print($bbmaster->execute(" \
rm -rfv /tmp/fakerepo && \
git clone git://gitrepo/fakerepo /tmp/fakerepo && \
pwd && \
ls -la && \
ls -la /tmp/fakerepo \
"));
# Test start master and connect worker
$bbmaster->waitForUnit("buildbot-master.service");
$bbmaster->waitUntilSucceeds("curl -s --head http://bbmaster:8010") =~ /200 OK/;
$bbworker->waitForUnit("network-online.target");
$bbworker->execute("nc -z bbmaster 8010");
$bbworker->execute("nc -z bbmaster 9989");
$bbworker->waitForUnit("buildbot-worker.service");
print($bbworker->execute("ls -la /home/bbworker/worker"));
# Test stop buildbot master and worker
print($bbmaster->execute(" \
systemctl -l --no-pager status buildbot-master && \
systemctl stop buildbot-master \
"));
$bbworker->fail("nc -z bbmaster 8010");
$bbworker->fail("nc -z bbmaster 9989");
print($bbworker->execute(" \
systemctl -l --no-pager status buildbot-worker && \
systemctl stop buildbot-worker && \
ls -la /home/bbworker/worker \
"));
# Test buildbot daemon mode
$bbmaster->execute("buildbot create-master /tmp");
$bbmaster->execute("mv -fv /tmp/master.cfg.sample /tmp/master.cfg");
$bbmaster->execute("sed -i 's/8010/8011/' /tmp/master.cfg");
$bbmaster->execute("buildbot start /tmp");
$bbworker->execute("nc -z bbmaster 8011");
$bbworker->waitUntilSucceeds("curl -s --head http://bbmaster:8011") =~ /200 OK/;
$bbmaster->execute("buildbot stop /tmp");
$bbworker->fail("nc -z bbmaster 8011");
'';
meta.maintainers = with pkgs.stdenv.lib.maintainers; [ nand0p ];
};
testScript = ''
#Start up and populate fake repo
$gitrepo->waitForUnit("multi-user.target");
print($gitrepo->execute(" \
git config --global user.name 'Nobody Fakeuser' && \
git config --global user.email 'nobody\@fakerepo.com' && \
rm -rvf /srv/repos/fakerepo.git /tmp/fakerepo && \
mkdir -pv /srv/repos/fakerepo ~/.ssh && \
ssh-keyscan -H gitrepo > ~/.ssh/known_hosts && \
cat ~/.ssh/known_hosts && \
cd /srv/repos/fakerepo && \
git init && \
echo -e '#!/bin/sh\necho fakerepo' > fakerepo.sh && \
cat fakerepo.sh && \
touch .git/git-daemon-export-ok && \
git add fakerepo.sh .git/git-daemon-export-ok && \
git commit -m fakerepo && \
git daemon --verbose --export-all --base-path=/srv/repos --reuseaddr & \
"));
# Test gitrepo
$bbmaster->waitForUnit("network-online.target");
#$bbmaster->execute("nc -z gitrepo 9418");
print($bbmaster->execute(" \
rm -rfv /tmp/fakerepo && \
git clone git://gitrepo/fakerepo /tmp/fakerepo && \
pwd && \
ls -la && \
ls -la /tmp/fakerepo \
"));
# Test start master and connect worker
$bbmaster->waitForUnit("buildbot-master.service");
$bbmaster->waitUntilSucceeds("curl -s --head http://bbmaster:8010") =~ /200 OK/;
$bbworker->waitForUnit("network-online.target");
$bbworker->execute("nc -z bbmaster 8010");
$bbworker->execute("nc -z bbmaster 9989");
$bbworker->waitForUnit("buildbot-worker.service");
print($bbworker->execute("ls -la /home/bbworker/worker"));
# Test stop buildbot master and worker
print($bbmaster->execute(" \
systemctl -l --no-pager status buildbot-master && \
systemctl stop buildbot-master \
"));
$bbworker->fail("nc -z bbmaster 8010");
$bbworker->fail("nc -z bbmaster 9989");
print($bbworker->execute(" \
systemctl -l --no-pager status buildbot-worker && \
systemctl stop buildbot-worker && \
ls -la /home/bbworker/worker \
"));
# Test buildbot daemon mode
# NOTE: daemon mode tests disabled due to broken PYTHONPATH child inheritence
#
#$bbmaster->execute("buildbot create-master /tmp");
#$bbmaster->execute("mv -fv /tmp/master.cfg.sample /tmp/master.cfg");
#$bbmaster->execute("sed -i 's/8010/8011/' /tmp/master.cfg");
#$bbmaster->execute("buildbot start /tmp");
#$bbworker->execute("nc -z bbmaster 8011");
#$bbworker->waitUntilSucceeds("curl -s --head http://bbmaster:8011") =~ /200 OK/;
#$bbmaster->execute("buildbot stop /tmp");
#$bbworker->fail("nc -z bbmaster 8011");
'';
meta.maintainers = with pkgs.stdenv.lib.maintainers; [ nand0p ];
})
in {
python2 = mkBuildbotTest pkgs.python2;
python3 = mkBuildbotTest pkgs.python3;
}

View File

@ -40,8 +40,7 @@ import ./make-test.nix ({ pkgs, lib, ... }:
subtest "CodiMD sqlite", sub {
$codimdSqlite->waitForUnit("codimd.service");
$codimdSqlite->waitForOpenPort(3000);
$codimdSqlite->sleep(10); # avoid 503 during startup
$codimdSqlite->succeed("curl -sSf http://localhost:3000/new");
$codimdSqlite->waitUntilSucceeds("curl -sSf http://localhost:3000/new");
};
subtest "CodiMD postgres", sub {
@ -49,8 +48,7 @@ import ./make-test.nix ({ pkgs, lib, ... }:
$codimdPostgres->waitForUnit("codimd.service");
$codimdPostgres->waitForOpenPort(5432);
$codimdPostgres->waitForOpenPort(3000);
$codimdPostgres->sleep(10); # avoid 503 during startup
$codimdPostgres->succeed("curl -sSf http://localhost:3000/new");
$codimdPostgres->waitUntilSucceeds("curl -sSf http://localhost:3000/new");
};
'';
})

View File

@ -86,6 +86,9 @@ import ./make-test.nix ({ pkgs, ...} : {
# Execute commands via the root shell.
$machine->succeed("nixos-container run $id1 -- uname") =~ /Linux/ or die;
# Execute a nix command via the root shell. (regression test for #40355)
$machine->succeed("nixos-container run $id1 -- nix-instantiate -E 'derivation { name = \"empty\"; builder = \"false\"; system = \"false\"; }'");
# Stop and start (regression test for #4989)
$machine->succeed("nixos-container stop $id1");
$machine->succeed("nixos-container start $id1");

Some files were not shown because too many files have changed in this diff Show More