Merge remote-tracking branch 'upstream/master' into darwin-android-ndk-for-master

This commit is contained in:
John Ericson 2018-09-17 22:48:25 -04:00
commit 226d574870
1184 changed files with 58296 additions and 25402 deletions

View File

@ -1 +1 @@
18.09
19.03

View File

@ -325,7 +325,7 @@
};
};
}
</screen>
</screen>
<para>
To install it into our environment, you can just run <literal>nix-env -iA
@ -347,7 +347,7 @@
};
};
}
</screen>
</screen>
<para>
<literal>pathsToLink</literal> tells Nixpkgs to only link the paths listed
@ -383,7 +383,7 @@
};
};
}
</screen>
</screen>
<para>
This provides us with some useful documentation for using our packages.
@ -395,15 +395,15 @@
{
packageOverrides = pkgs: with pkgs; rec {
myProfile = writeText "my-profile" ''
export PATH=$HOME/.nix-profile/bin:/nix/var/nix/profiles/default/bin:/sbin:/bin:/usr/sbin:/usr/bin
export MANPATH=$HOME/.nix-profile/share/man:/nix/var/nix/profiles/default/share/man:/usr/share/man
export PATH=$HOME/.nix-profile/bin:/nix/var/nix/profiles/default/bin:/sbin:/bin:/usr/sbin:/usr/bin
export MANPATH=$HOME/.nix-profile/share/man:/nix/var/nix/profiles/default/share/man:/usr/share/man
'';
myPackages = pkgs.buildEnv {
name = "my-packages";
paths = [
(runCommand "profile" {} ''
mkdir -p $out/etc/profile.d
cp ${myProfile} $out/etc/profile.d/my-profile.sh
mkdir -p $out/etc/profile.d
cp ${myProfile} $out/etc/profile.d/my-profile.sh
'')
aspell
bc
@ -421,7 +421,7 @@ cp ${myProfile} $out/etc/profile.d/my-profile.sh
};
};
}
</screen>
</screen>
<para>
For this to work fully, you must also have this script sourced when you are
@ -438,7 +438,7 @@ if [ -d $HOME/.nix-profile/etc/profile.d ]; then
fi
done
fi
</screen>
</screen>
<para>
Now just run <literal>source $HOME/.profile</literal> and you can starting
@ -459,16 +459,16 @@ fi
{
packageOverrides = pkgs: with pkgs; rec {
myProfile = writeText "my-profile" ''
export PATH=$HOME/.nix-profile/bin:/nix/var/nix/profiles/default/bin:/sbin:/bin:/usr/sbin:/usr/bin
export MANPATH=$HOME/.nix-profile/share/man:/nix/var/nix/profiles/default/share/man:/usr/share/man
export INFOPATH=$HOME/.nix-profile/share/info:/nix/var/nix/profiles/default/share/info:/usr/share/info
export PATH=$HOME/.nix-profile/bin:/nix/var/nix/profiles/default/bin:/sbin:/bin:/usr/sbin:/usr/bin
export MANPATH=$HOME/.nix-profile/share/man:/nix/var/nix/profiles/default/share/man:/usr/share/man
export INFOPATH=$HOME/.nix-profile/share/info:/nix/var/nix/profiles/default/share/info:/usr/share/info
'';
myPackages = pkgs.buildEnv {
name = "my-packages";
paths = [
(runCommand "profile" {} ''
mkdir -p $out/etc/profile.d
cp ${myProfile} $out/etc/profile.d/my-profile.sh
mkdir -p $out/etc/profile.d
cp ${myProfile} $out/etc/profile.d/my-profile.sh
'')
aspell
bc
@ -485,17 +485,17 @@ cp ${myProfile} $out/etc/profile.d/my-profile.sh
pathsToLink = [ "/share/man" "/share/doc" "/share/info" "/bin" "/etc" ];
extraOutputsToInstall = [ "man" "doc" "info" ];
postBuild = ''
if [ -x $out/bin/install-info -a -w $out/share/info ]; then
shopt -s nullglob
for i in $out/share/info/*.info $out/share/info/*.info.gz; do
$out/bin/install-info $i $out/share/info/dir
done
fi
if [ -x $out/bin/install-info -a -w $out/share/info ]; then
shopt -s nullglob
for i in $out/share/info/*.info $out/share/info/*.info.gz; do
$out/bin/install-info $i $out/share/info/dir
done
fi
'';
};
};
}
</screen>
</screen>
<para>
<literal>postBuild</literal> tells Nixpkgs to run a command after building

View File

@ -47,9 +47,13 @@
<para>
In Nixpkgs, these three platforms are defined as attribute sets under the
names <literal>buildPlatform</literal>, <literal>hostPlatform</literal>, and
<literal>targetPlatform</literal>. They are always defined as attributes in
the standard environment. That means one can access them like:
names <literal>buildPlatform</literal>, <literal>hostPlatform</literal>,
and <literal>targetPlatform</literal>. All three are always defined as
attributes in the standard environment, and at the top level. That means
one can get at them just like a dependency in a function that is imported
with <literal>callPackage</literal>:
<programlisting>{ stdenv, buildPlatform, hostPlatform, fooDep, barDep, .. }: ...buildPlatform...</programlisting>
, or just off <varname>stdenv</varname>:
<programlisting>{ stdenv, fooDep, barDep, .. }: ...stdenv.buildPlatform...</programlisting>
.
</para>

View File

@ -1,7 +1,7 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
xml:id="chap-functions">
xml:id="chap-functions">
<title>Functions reference</title>
<para>
The nixpkgs repository has several utility functions to manipulate Nix
@ -31,12 +31,16 @@
<para>
Example usages:
<programlisting>pkgs.foo.override { arg1 = val1; arg2 = val2; ... }</programlisting>
<programlisting>import pkgs.path { overlays = [ (self: super: {
foo = super.foo.override { barSupport = true ; };
})]};</programlisting>
<programlisting>mypkg = pkgs.callPackage ./mypkg.nix {
mydep = pkgs.mydep.override { ... };
}</programlisting>
<programlisting>
import pkgs.path { overlays = [ (self: super: {
foo = super.foo.override { barSupport = true ; };
})]};
</programlisting>
<programlisting>
mypkg = pkgs.callPackage ./mypkg.nix {
mydep = pkgs.mydep.override { ... };
}
</programlisting>
</para>
<para>
@ -61,9 +65,11 @@
<para>
Example usage:
<programlisting>helloWithDebug = pkgs.hello.overrideAttrs (oldAttrs: rec {
separateDebugInfo = true;
});</programlisting>
<programlisting>
helloWithDebug = pkgs.hello.overrideAttrs (oldAttrs: rec {
separateDebugInfo = true;
});
</programlisting>
</para>
<para>
@ -134,14 +140,16 @@
<para>
Example usage:
<programlisting>mySed = pkgs.gnused.overrideDerivation (oldAttrs: {
name = "sed-4.2.2-pre";
src = fetchurl {
url = ftp://alpha.gnu.org/gnu/sed/sed-4.2.2-pre.tar.bz2;
sha256 = "11nq06d131y4wmf3drm0yk502d2xc6n5qy82cg88rb9nqd2lj41k";
};
patches = [];
});</programlisting>
<programlisting>
mySed = pkgs.gnused.overrideDerivation (oldAttrs: {
name = "sed-4.2.2-pre";
src = fetchurl {
url = ftp://alpha.gnu.org/gnu/sed/sed-4.2.2-pre.tar.bz2;
sha256 = "11nq06d131y4wmf3drm0yk502d2xc6n5qy82cg88rb9nqd2lj41k";
};
patches = [];
});
</programlisting>
</para>
<para>
@ -181,8 +189,10 @@
<para>
Example usage:
<programlisting>f = { a, b }: { result = a+b; }
c = lib.makeOverridable f { a = 1; b = 2; }</programlisting>
<programlisting>
f = { a, b }: { result = a+b; };
c = lib.makeOverridable f { a = 1; b = 2; };
</programlisting>
</para>
<para>
@ -482,29 +492,29 @@ merge:"diff3"
<example xml:id='ex-dockerTools-buildImage'>
<title>Docker build</title>
<programlisting>
buildImage {
name = "redis"; <co xml:id='ex-dockerTools-buildImage-1' />
tag = "latest"; <co xml:id='ex-dockerTools-buildImage-2' />
buildImage {
name = "redis"; <co xml:id='ex-dockerTools-buildImage-1' />
tag = "latest"; <co xml:id='ex-dockerTools-buildImage-2' />
fromImage = someBaseImage; <co xml:id='ex-dockerTools-buildImage-3' />
fromImageName = null; <co xml:id='ex-dockerTools-buildImage-4' />
fromImageTag = "latest"; <co xml:id='ex-dockerTools-buildImage-5' />
fromImage = someBaseImage; <co xml:id='ex-dockerTools-buildImage-3' />
fromImageName = null; <co xml:id='ex-dockerTools-buildImage-4' />
fromImageTag = "latest"; <co xml:id='ex-dockerTools-buildImage-5' />
contents = pkgs.redis; <co xml:id='ex-dockerTools-buildImage-6' />
runAsRoot = '' <co xml:id='ex-dockerTools-buildImage-runAsRoot' />
#!${stdenv.shell}
mkdir -p /data
'';
contents = pkgs.redis; <co xml:id='ex-dockerTools-buildImage-6' />
runAsRoot = '' <co xml:id='ex-dockerTools-buildImage-runAsRoot' />
#!${stdenv.shell}
mkdir -p /data
'';
config = { <co xml:id='ex-dockerTools-buildImage-8' />
Cmd = [ "/bin/redis-server" ];
WorkingDir = "/data";
Volumes = {
"/data" = {};
};
config = { <co xml:id='ex-dockerTools-buildImage-8' />
Cmd = [ "/bin/redis-server" ];
WorkingDir = "/data";
Volumes = {
"/data" = {};
};
}
</programlisting>
};
}
</programlisting>
</example>
<para>
@ -647,15 +657,15 @@ merge:"diff3"
<example xml:id='ex-dockerTools-pullImage'>
<title>Docker pull</title>
<programlisting>
pullImage {
imageName = "nixos/nix"; <co xml:id='ex-dockerTools-pullImage-1' />
imageDigest = "sha256:20d9485b25ecfd89204e843a962c1bd70e9cc6858d65d7f5fadc340246e2116b"; <co xml:id='ex-dockerTools-pullImage-2' />
finalImageTag = "1.11"; <co xml:id='ex-dockerTools-pullImage-3' />
sha256 = "0mqjy3zq2v6rrhizgb9nvhczl87lcfphq9601wcprdika2jz7qh8"; <co xml:id='ex-dockerTools-pullImage-4' />
os = "linux"; <co xml:id='ex-dockerTools-pullImage-5' />
arch = "x86_64"; <co xml:id='ex-dockerTools-pullImage-6' />
}
</programlisting>
pullImage {
imageName = "nixos/nix"; <co xml:id='ex-dockerTools-pullImage-1' />
imageDigest = "sha256:20d9485b25ecfd89204e843a962c1bd70e9cc6858d65d7f5fadc340246e2116b"; <co xml:id='ex-dockerTools-pullImage-2' />
finalImageTag = "1.11"; <co xml:id='ex-dockerTools-pullImage-3' />
sha256 = "0mqjy3zq2v6rrhizgb9nvhczl87lcfphq9601wcprdika2jz7qh8"; <co xml:id='ex-dockerTools-pullImage-4' />
os = "linux"; <co xml:id='ex-dockerTools-pullImage-5' />
arch = "x86_64"; <co xml:id='ex-dockerTools-pullImage-6' />
}
</programlisting>
</example>
<calloutlist>
@ -677,9 +687,9 @@ merge:"diff3"
exactly which image you want. By default it will match the OS and
architecture of the host the command is run on.
<programlisting>
$ nix-shell --packages skopeo jq --command "skopeo --override-os linux --override-arch x86_64 inspect docker://docker.io/nixos/nix:1.11 | jq -r '.Digest'"
sha256:20d9485b25ecfd89204e843a962c1bd70e9cc6858d65d7f5fadc340246e2116b
</programlisting>
$ nix-shell --packages skopeo jq --command "skopeo --override-os linux --override-arch x86_64 inspect docker://docker.io/nixos/nix:1.11 | jq -r '.Digest'"
sha256:20d9485b25ecfd89204e843a962c1bd70e9cc6858d65d7f5fadc340246e2116b
</programlisting>
This argument is required.
</para>
</callout>
@ -737,13 +747,13 @@ merge:"diff3"
<example xml:id='ex-dockerTools-exportImage'>
<title>Docker export</title>
<programlisting>
exportImage {
fromImage = someLayeredImage;
fromImageName = null;
fromImageTag = null;
exportImage {
fromImage = someLayeredImage;
fromImageName = null;
fromImageTag = null;
name = someLayeredImage.name;
}
name = someLayeredImage.name;
}
</programlisting>
</example>
@ -774,19 +784,19 @@ merge:"diff3"
<example xml:id='ex-dockerTools-shadowSetup'>
<title>Shadow base files</title>
<programlisting>
buildImage {
name = "shadow-basic";
buildImage {
name = "shadow-basic";
runAsRoot = ''
#!${stdenv.shell}
${shadowSetup}
groupadd -r redis
useradd -r -g redis redis
mkdir /data
chown redis:redis /data
'';
}
</programlisting>
runAsRoot = ''
#!${stdenv.shell}
${shadowSetup}
groupadd -r redis
useradd -r -g redis redis
mkdir /data
chown redis:redis /data
'';
}
</programlisting>
</example>
<para>

View File

@ -64,9 +64,6 @@ When the `Cargo.lock`, provided by upstream, is not in sync with the
added in `cargoPatches` will also be prepended to the patches in `patches` at
build-time.
To install crates with nix there is also an experimental project called
[nixcrates](https://github.com/fractalide/nixcrates).
## Compiling Rust crates using Nix instead of Cargo
### Simple operation

View File

@ -5,11 +5,16 @@ date: 2016-06-25
---
# User's Guide to Vim Plugins/Addons/Bundles/Scripts in Nixpkgs
You'll get a vim(-your-suffix) in PATH also loading the plugins you want.
Both Neovim and Vim can be configured to include your favorite plugins
and additional libraries.
Loading can be deferred; see examples.
Vim packages, VAM (=vim-addon-manager) and Pathogen are supported to load
packages.
At the moment we support three different methods for managing plugins:
- Vim packages (*recommend*)
- VAM (=vim-addon-manager)
- Pathogen
## Custom configuration
@ -25,7 +30,19 @@ vim_configurable.customize {
}
```
## Vim packages
For Neovim the `configure` argument can be overridden to achieve the same:
```
neovim.override {
configure = {
customRC = ''
# here your custom configuration goes!
'';
};
}
```
## Managing plugins with Vim packages
To store you plugins in Vim packages the following example can be used:
@ -38,13 +55,50 @@ vim_configurable.customize {
opt = [ phpCompletion elm-vim ];
# To automatically load a plugin when opening a filetype, add vimrc lines like:
# autocmd FileType php :packadd phpCompletion
}
};
};
}
```
## VAM
For Neovim the syntax is
### dependencies by Vim plugins
```
neovim.override {
configure = {
customRC = ''
# here your custom configuration goes!
'';
packages.myVimPackage = with pkgs.vimPlugins; {
# see examples below how to use custom packages
start = [ ];
opt = [ ];
};
};
}
```
The resulting package can be added to `packageOverrides` in `~/.nixpkgs/config.nix` to make it installable:
```
{
packageOverrides = pkgs: with pkgs; {
myVim = vim_configurable.customize {
name = "vim-with-plugins";
# add here code from the example section
};
myNeovim = neovim.override {
configure = {
# add here code from the example section
};
};
};
}
```
After that you can install your special grafted `myVim` or `myNeovim` packages.
## Managing plugins with VAM
### Handling dependencies of Vim plugins
VAM introduced .json files supporting dependencies without versioning
assuming that "using latest version" is ok most of the time.
@ -125,6 +179,18 @@ Sample output2:
]
## Adding new plugins to nixpkgs
In `pkgs/misc/vim-plugins/vim-plugin-names` we store the plugin names
for all vim plugins we automatically generate plugins for.
The format of this file `github username/github repository`:
For example https://github.com/scrooloose/nerdtree becomes `scrooloose/nerdtree`.
After adding your plugin to this file run the `./update.py` in the same folder.
This will updated a file called `generated.nix` and make your plugin accessible in the
`vimPlugins` attribute set (`vimPlugins.nerdtree` in our example).
If additional steps to the build process of the plugin are required, add an
override to the `pkgs/misc/vim-plugins/default.nix` in the same directory.
## Important repositories
- [vim-pi](https://bitbucket.org/vimcommunity/vim-pi) is a plugin repository

View File

@ -671,6 +671,8 @@ overrides = super: self: rec {
plugins = with availablePlugins; [ python perl ];
}
}</programlisting>
If the <literal>configure</literal> function returns an attrset without the <literal>plugins</literal>
attribute, <literal>availablePlugins</literal> will be used automatically.
</para>
<para>
@ -704,6 +706,55 @@ overrides = super: self: rec {
}; }
</programlisting>
</para>
<para>
WeeChat allows to set defaults on startup using the <literal>--run-command</literal>.
The <literal>configure</literal> method can be used to pass commands to the program:
<programlisting>weechat.override {
configure = { availablePlugins, ... }: {
init = ''
/set foo bar
/server add freenode chat.freenode.org
'';
};
}</programlisting>
Further values can be added to the list of commands when running
<literal>weechat --run-command "your-commands"</literal>.
</para>
<para>
Additionally it's possible to specify scripts to be loaded when starting <literal>weechat</literal>.
These will be loaded before the commands from <literal>init</literal>:
<programlisting>weechat.override {
configure = { availablePlugins, ... }: {
scripts = with pkgs.weechatScripts; [
weechat-xmpp weechat-matrix-bridge wee-slack
];
init = ''
/set plugins.var.python.jabber.key "val"
'':
};
}</programlisting>
</para>
<para>
In <literal>nixpkgs</literal> there's a subpackage which contains derivations for
WeeChat scripts. Such derivations expect a <literal>passthru.scripts</literal> attribute
which contains a list of all scripts inside the store path. Furthermore all scripts
have to live in <literal>$out/share</literal>. An exemplary derivation looks like this:
<programlisting>{ stdenv, fetchurl }:
stdenv.mkDerivation {
name = "exemplary-weechat-script";
src = fetchurl {
url = "https://scripts.tld/your-scripts.tar.gz";
sha256 = "...";
};
passthru.scripts = [ "foo.py" "bar.lua" ];
installPhase = ''
mkdir $out/share
cp foo.py $out/share
cp bar.lua $out/share
'';
}</programlisting>
</para>
</section>
<section xml:id="sec-citrix">
<title>Citrix Receiver</title>

44
lib/asserts.nix Normal file
View File

@ -0,0 +1,44 @@
{ lib }:
rec {
/* Print a trace message if pred is false.
Intended to be used to augment asserts with helpful error messages.
Example:
assertMsg false "nope"
=> false
stderr> trace: nope
assert (assertMsg ("foo" == "bar") "foo is not bar, silly"); ""
stderr> trace: foo is not bar, silly
stderr> assert failed at
Type:
assertMsg :: Bool -> String -> Bool
*/
# TODO(Profpatsch): add tests that check stderr
assertMsg = pred: msg:
if pred
then true
else builtins.trace msg false;
/* Specialized `assertMsg` for checking if val is one of the elements
of a list. Useful for checking enums.
Example:
let sslLibrary = "libressl"
in assertOneOf "sslLibrary" sslLibrary [ "openssl" "bearssl" ]
=> false
stderr> trace: sslLibrary must be one of "openssl", "bearssl", but is: "libressl"
Type:
assertOneOf :: String -> ComparableVal -> List ComparableVal -> Bool
*/
assertOneOf = name: val: xs: assertMsg
(lib.elem val xs)
"${name} must be one of ${
lib.generators.toPretty {} xs}, but is: ${
lib.generators.toPretty {} val}";
}

View File

@ -38,10 +38,11 @@ let
systems = callLibs ./systems;
# misc
asserts = callLibs ./asserts.nix;
debug = callLibs ./debug.nix;
generators = callLibs ./generators.nix;
misc = callLibs ./deprecated.nix;
# domain-specific
fetchers = callLibs ./fetchers.nix;
@ -60,7 +61,6 @@ let
boolToString mergeAttrs flip mapNullable inNixShell min max
importJSON warn info nixpkgsVersion version mod compare
splitByAndCompare functionArgs setFunctionArgs isFunction;
inherit (fixedPoints) fix fix' extends composeExtensions
makeExtensible makeExtensibleWithCustomName;
inherit (attrsets) attrByPath hasAttrByPath setAttrByPath
@ -117,6 +117,8 @@ let
unknownModule mkOption;
inherit (types) isType setType defaultTypeMerge defaultFunctor
isOptionType mkOptionType;
inherit (asserts)
assertMsg assertOneOf;
inherit (debug) addErrorContextToAttrs traceIf traceVal traceValFn
traceXMLVal traceXMLValMarked traceSeq traceSeqN traceValSeq
traceValSeqFn traceValSeqN traceValSeqNFn traceShowVal

View File

@ -355,6 +355,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = "Independent JPEG Group License";
};
imagemagick = spdx {
fullName = "ImageMagick License";
spdxId = "imagemagick";
};
inria-compcert = {
fullName = "INRIA Non-Commercial License Agreement for the CompCert verified compiler";
url = "http://compcert.inria.fr/doc/LICENSE";
@ -546,6 +551,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = "Public Domain";
};
purdueBsd = {
fullName = " Purdue BSD-Style License"; # also know as lsof license
url = https://enterprise.dejacode.com/licenses/public/purdue-bsd;
};
qpl = spdx {
spdxId = "QPL-1.0";
fullName = "Q Public License 1.0";

View File

@ -509,7 +509,8 @@ rec {
=> 3
*/
last = list:
assert list != []; elemAt list (length list - 1);
assert lib.assertMsg (list != []) "lists.last: list must not be empty!";
elemAt list (length list - 1);
/* Return all elements but the last
@ -517,7 +518,9 @@ rec {
init [ 1 2 3 ]
=> [ 1 2 ]
*/
init = list: assert list != []; take (length list - 1) list;
init = list:
assert lib.assertMsg (list != []) "lists.init: list must not be empty!";
take (length list - 1) list;
/* return the image of the cross product of some lists by a function

View File

@ -410,7 +410,7 @@ rec {
components = splitString "/" url;
filename = lib.last components;
name = builtins.head (splitString sep filename);
in assert name != filename; name;
in assert name != filename; name;
/* Create an --{enable,disable}-<feat> string that can be passed to
standard GNU Autoconf scripts.
@ -468,7 +468,10 @@ rec {
strw = lib.stringLength str;
reqWidth = width - (lib.stringLength filler);
in
assert strw <= width;
assert lib.assertMsg (strw <= width)
"fixedWidthString: requested string length (${
toString width}) must not be shorter than actual length (${
toString strw})";
if strw == width then str else filler + fixedWidthString reqWidth filler str;
/* Format a number adding leading zeroes up to fixed width.
@ -501,7 +504,7 @@ rec {
isStorePath = x:
isCoercibleToString x
&& builtins.substring 0 1 (toString x) == "/"
&& dirOf (builtins.toPath x) == builtins.storeDir;
&& dirOf x == builtins.storeDir;
/* Convert string to int
Obviously, it is a bit hacky to use fromJSON that way.
@ -537,11 +540,10 @@ rec {
*/
readPathsFromFile = rootPath: file:
let
root = toString rootPath;
lines = lib.splitString "\n" (builtins.readFile file);
removeComments = lib.filter (line: line != "" && !(lib.hasPrefix "#" line));
relativePaths = removeComments lines;
absolutePaths = builtins.map (path: builtins.toPath (root + "/" + path)) relativePaths;
absolutePaths = builtins.map (path: rootPath + "/${path}") relativePaths;
in
absolutePaths;

7
lib/tests/check-eval.nix Normal file
View File

@ -0,0 +1,7 @@
# Throws an error if any of our lib tests fail.
let tests = [ "misc" "systems" ];
all = builtins.concatLists (map (f: import (./. + "/${f}.nix")) tests);
in if all == []
then null
else throw (builtins.toJSON all)

View File

@ -112,7 +112,7 @@ runTests {
storePathAppendix = isStorePath
"${goodPath}/bin/python";
nonAbsolute = isStorePath (concatStrings (tail (stringToCharacters goodPath)));
asPath = isStorePath (builtins.toPath goodPath);
asPath = isStorePath goodPath;
otherPath = isStorePath "/something/else";
otherVals = {
attrset = isStorePath {};
@ -357,7 +357,7 @@ runTests {
int = 42;
bool = true;
string = ''fno"rd'';
path = /. + "/foo"; # toPath returns a string
path = /. + "/foo";
null_ = null;
function = x: x;
functionArgs = { arg ? 4, foo }: arg;

View File

@ -36,18 +36,18 @@ rec {
/* bitwise and */
bitAnd = builtins.bitAnd
or import ./zip-int-bits.nix
(a: b: if a==1 && b==1 then 1 else 0);
or (import ./zip-int-bits.nix
(a: b: if a==1 && b==1 then 1 else 0));
/* bitwise or */
bitOr = builtins.bitOr
or import ./zip-int-bits.nix
(a: b: if a==1 || b==1 then 1 else 0);
or (import ./zip-int-bits.nix
(a: b: if a==1 || b==1 then 1 else 0));
/* bitwise xor */
bitXor = builtins.bitXor
or import ./zip-int-bits.nix
(a: b: if a!=b then 1 else 0);
or (import ./zip-int-bits.nix
(a: b: if a!=b then 1 else 0));
/* bitwise not */
bitNot = builtins.sub (-1);
@ -171,7 +171,7 @@ rec {
builtins.fromJSON (builtins.readFile path);
## Warnings and asserts
## Warnings
/* See https://github.com/NixOS/nix/issues/749. Eventually we'd like these
to expand to Nix builtins that carry metadata so that Nix can filter out

View File

@ -119,7 +119,9 @@ rec {
let
betweenDesc = lowest: highest:
"${toString lowest} and ${toString highest} (both inclusive)";
between = lowest: highest: assert lowest <= highest;
between = lowest: highest:
assert lib.assertMsg (lowest <= highest)
"ints.between: lowest must be smaller than highest";
addCheck int (x: x >= lowest && x <= highest) // {
name = "intBetween";
description = "integer between ${betweenDesc lowest highest}";
@ -439,7 +441,9 @@ rec {
# Either value of type `finalType` or `coercedType`, the latter is
# converted to `finalType` using `coerceFunc`.
coercedTo = coercedType: coerceFunc: finalType:
assert coercedType.getSubModules == null;
assert lib.assertMsg (coercedType.getSubModules == null)
"coercedTo: coercedType must not have submodules (its a ${
coercedType.description})";
mkOptionType rec {
name = "coercedTo";
description = "${finalType.description} or ${coercedType.description} convertible to it";

View File

@ -227,7 +227,7 @@
name = "Andrew Morsillo";
};
AndersonTorres = {
email = "torres.anderson.85@gmail.com";
email = "torres.anderson.85@protonmail.com";
github = "AndersonTorres";
name = "Anderson Torres";
};
@ -1847,6 +1847,11 @@
github = "jerith666";
name = "Matt McHenry";
};
jethro = {
email = "jethrokuan95@gmail.com";
github = "jethrokuan";
name = "Jethro Kuan";
};
jfb = {
email = "james@yamtime.com";
github = "tftio";
@ -3396,6 +3401,11 @@
github = "relrod";
name = "Ricky Elrod";
};
renatoGarcia = {
email = "fgarcia.renato@gmail.com";
github = "renatoGarcia";
name = "Renato Garcia";
};
renzo = {
email = "renzocarbonara@gmail.com";
github = "k0001";
@ -3888,6 +3898,11 @@
github = "StillerHarpo";
name = "Florian Engel";
};
stites = {
email = "sam@stites.io";
github = "stites";
name = "Sam Stites";
};
stumoss = {
email = "samoss@gmail.com";
github = "stumoss";
@ -4153,6 +4168,11 @@
github = "tomsmeets";
name = "Tom Smeets";
};
toonn = {
email = "nnoot@toonn.io";
github = "toonn";
name = "Toon Nolten";
};
travisbhartwell = {
email = "nafai@travishartwell.net";
github = "travisbhartwell";
@ -4508,6 +4528,11 @@
github = "y0no";
name = "Yoann Ono";
};
yarny = {
email = "41838844+Yarny0@users.noreply.github.com";
github = "Yarny0";
name = "Yarny";
};
yarr = {
email = "savraz@gmail.com";
github = "Eternity-Yarr";

View File

@ -90,7 +90,9 @@ let
fi
${buildPackages.libxslt.bin}/bin/xsltproc \
--stringparam revision '${revision}' \
-o $out ${./options-to-docbook.xsl} $optionsXML
-o intermediate.xml ${./options-to-docbook.xsl} $optionsXML
${buildPackages.libxslt.bin}/bin/xsltproc \
-o "$out" ${./postprocess-option-descriptions.xsl} intermediate.xml
'';
sources = lib.sourceFilesBySuffices ./. [".xml"];

View File

@ -19,6 +19,7 @@ starting VDE switch for network 1
&gt; startAll
&gt; testScript
&gt; $machine->succeed("touch /tmp/foo")
&gt; print($machine->succeed("pwd"), "\n") # Show stdout of command
</screen>
The function <command>testScript</command> executes the entire test script
and drops you back into the test driver command line upon its completion.
@ -33,8 +34,11 @@ $ nix-build nixos/tests/login.nix -A driver
$ ./result/bin/nixos-run-vms
</screen>
The script <command>nixos-run-vms</command> starts the virtual machines
defined by test. The root file system of the VMs is created on the fly and
kept across VM restarts in
<filename>./</filename><varname>hostname</varname><filename>.qcow2</filename>.
defined by test.
</para>
<para>
The machine state is kept across VM restarts in
<filename>/tmp/vm-state-</filename><varname>machinename</varname>.
</para>
</section>

View File

@ -108,7 +108,7 @@ xlink:href="https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/virtualis
<programlisting>
$machine->start;
$machine->waitForUnit("default.target");
$machine->succeed("uname") =~ /Linux/;
die unless $machine->succeed("uname") =~ /Linux/;
</programlisting>
The first line is actually unnecessary; machines are implicitly started when
you first execute an action on them (such as <literal>waitForUnit</literal>

View File

@ -52,10 +52,13 @@
</listitem>
</itemizedlist>
To see what channels are available, go to
<link
xlink:href="https://nixos.org/channels"/>. (Note that the URIs of the
<link xlink:href="https://nixos.org/channels"/>. (Note that the URIs of the
various channels redirect to a directory that contains the channels latest
version and includes ISO images and VirtualBox appliances.)
version and includes ISO images and VirtualBox appliances.) Please note that
during the release process, channels that are not yet released will be
present here as well. See the Getting NixOS page
<link xlink:href="https://nixos.org/nixos/download.html"/> to find the newest
supported stable release.
</para>
<para>
When you first install NixOS, youre automatically subscribed to the NixOS

View File

@ -4,6 +4,7 @@
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:str="http://exslt.org/strings"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:nixos="tag:nixos.org"
xmlns="http://docbook.org/ns/docbook"
extension-element-prefixes="str"
>
@ -30,10 +31,12 @@
<listitem>
<para>
<xsl:value-of disable-output-escaping="yes"
select="attr[@name = 'description']/string/@value" />
</para>
<nixos:option-description>
<para>
<xsl:value-of disable-output-escaping="yes"
select="attr[@name = 'description']/string/@value" />
</para>
</nixos:option-description>
<xsl:if test="attr[@name = 'type']">
<para>

View File

@ -0,0 +1,115 @@
<?xml version="1.0"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:str="http://exslt.org/strings"
xmlns:exsl="http://exslt.org/common"
xmlns:db="http://docbook.org/ns/docbook"
xmlns:nixos="tag:nixos.org"
extension-element-prefixes="str exsl">
<xsl:output method='xml' encoding="UTF-8" />
<xsl:template match="@*|node()">
<xsl:copy>
<xsl:apply-templates select="@*|node()" />
</xsl:copy>
</xsl:template>
<xsl:template name="break-up-description">
<xsl:param name="input" />
<xsl:param name="buffer" />
<!-- Every time we have two newlines following each other, we want to
break it into </para><para>. -->
<xsl:variable name="parbreak" select="'&#xa;&#xa;'" />
<!-- Similar to "(head:tail) = input" in Haskell. -->
<xsl:variable name="head" select="$input[1]" />
<xsl:variable name="tail" select="$input[position() &gt; 1]" />
<xsl:choose>
<xsl:when test="$head/self::text() and contains($head, $parbreak)">
<!-- If the haystack provided to str:split() directly starts or
ends with $parbreak, it doesn't generate a <token/> for that,
so we are doing this here. -->
<xsl:variable name="splitted-raw">
<xsl:if test="starts-with($head, $parbreak)"><token /></xsl:if>
<xsl:for-each select="str:split($head, $parbreak)">
<token><xsl:value-of select="node()" /></token>
</xsl:for-each>
<!-- Something like ends-with($head, $parbreak), but there is
no ends-with() in XSLT, so we need to use substring(). -->
<xsl:if test="
substring($head, string-length($head) -
string-length($parbreak) + 1) = $parbreak
"><token /></xsl:if>
</xsl:variable>
<xsl:variable name="splitted"
select="exsl:node-set($splitted-raw)/token" />
<!-- The buffer we had so far didn't contain any text nodes that
contain a $parbreak, so we can put the buffer along with the
first token of $splitted into a para element. -->
<para xmlns="http://docbook.org/ns/docbook">
<xsl:apply-templates select="exsl:node-set($buffer)" />
<xsl:apply-templates select="$splitted[1]/node()" />
</para>
<!-- We have already emitted the first splitted result, so the
last result is going to be set as the new $buffer later
because its contents may not be directly followed up by a
$parbreak. -->
<xsl:for-each select="$splitted[position() &gt; 1
and position() &lt; last()]">
<para xmlns="http://docbook.org/ns/docbook">
<xsl:apply-templates select="node()" />
</para>
</xsl:for-each>
<xsl:call-template name="break-up-description">
<xsl:with-param name="input" select="$tail" />
<xsl:with-param name="buffer" select="$splitted[last()]/node()" />
</xsl:call-template>
</xsl:when>
<!-- Either non-text node or one without $parbreak, which we just
want to buffer and continue recursing. -->
<xsl:when test="$input">
<xsl:call-template name="break-up-description">
<xsl:with-param name="input" select="$tail" />
<!-- This essentially appends $head to $buffer. -->
<xsl:with-param name="buffer">
<xsl:if test="$buffer">
<xsl:for-each select="exsl:node-set($buffer)">
<xsl:apply-templates select="." />
</xsl:for-each>
</xsl:if>
<xsl:apply-templates select="$head" />
</xsl:with-param>
</xsl:call-template>
</xsl:when>
<!-- No more $input, just put the remaining $buffer in a para. -->
<xsl:otherwise>
<para xmlns="http://docbook.org/ns/docbook">
<xsl:apply-templates select="exsl:node-set($buffer)" />
</para>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template match="nixos:option-description">
<xsl:choose>
<!--
Only process nodes that are comprised of a single <para/> element,
because if that's not the case the description already contains
</para><para> in between and we need no further processing.
-->
<xsl:when test="count(db:para) > 1">
<xsl:apply-templates select="node()" />
</xsl:when>
<xsl:otherwise>
<xsl:call-template name="break-up-description">
<xsl:with-param name="input"
select="exsl:node-set(db:para/node())" />
</xsl:call-template>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
</xsl:stylesheet>

View File

@ -8,6 +8,7 @@
This section lists the release notes for each stable version of NixOS and
current unstable revision.
</para>
<xi:include href="rl-1903.xml" />
<xi:include href="rl-1809.xml" />
<xi:include href="rl-1803.xml" />
<xi:include href="rl-1709.xml" />

View File

@ -451,6 +451,14 @@ inherit (pkgs.nixos {
deprecated. Use <literal>networking.networkmanager.dns</literal> instead.
</para>
</listitem>
<listitem>
<para>
The Kubernetes package has been bumped to major version 1.11.
Please consult the
<link xlink:href="https://github.com/kubernetes/kubernetes/blob/release-1.11/CHANGELOG-1.11.md">release notes</link>
for details on new features and api changes.
</para>
</listitem>
<listitem>
<para>
The option
@ -536,6 +544,21 @@ inherit (pkgs.nixos {
to <literal>true</literal>.
</para>
</listitem>
<listitem>
<para>
NixOS option descriptions are now automatically broken up into individual
paragraphs if the text contains two consecutive newlines, so it's no
longer necessary to use <code>&lt;/para&gt;&lt;para&gt;</code> to start
a new paragraph.
</para>
</listitem>
<listitem>
<para>
Top-level <literal>buildPlatform</literal>, <literal>hostPlatform</literal>, and <literal>targetPlatform</literal> in Nixpkgs are deprecated.
Please use their equivalents in <literal>stdenv</literal> instead:
<literal>stdenv.buildPlatform</literal>, <literal>stdenv.hostPlatform</literal>, and <literal>stdenv.targetPlatform</literal>.
</para>
</listitem>
</itemizedlist>
</section>
</section>

View File

@ -0,0 +1,58 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-release-19.03">
<title>Release 19.03 (“Koi”, 2019/03/??)</title>
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-release-19.03-highlights">
<title>Highlights</title>
<para>
In addition to numerous new and upgraded packages, this release has the
following highlights:
</para>
<itemizedlist>
<listitem>
<para />
</listitem>
</itemizedlist>
</section>
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-release-19.03-new-services">
<title>New Services</title>
<para>
The following new services were added since the last release:
</para>
<itemizedlist>
<listitem>
<para />
</listitem>
</itemizedlist>
</section>
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-release-19.03-notable-changes">
<title>Other Notable Changes</title>
<itemizedlist>
<listitem>
<para />
</listitem>
</itemizedlist>
</section>
</section>

View File

@ -28,7 +28,7 @@
let extraArgs_ = extraArgs; pkgs_ = pkgs;
extraModules = let e = builtins.getEnv "NIXOS_EXTRA_MODULE_PATH";
in if e == "" then [] else [(import (builtins.toPath e))];
in if e == "" then [] else [(import e)];
in
let

View File

@ -163,15 +163,24 @@ in
/bin/sh
'';
# For resetting environment with `. /etc/set-environment` when needed
# and discoverability (see motivation of #30418).
environment.etc."set-environment".source = config.system.build.setEnvironment;
system.build.setEnvironment = pkgs.writeText "set-environment"
''
${exportedEnvVars}
''
# DO NOT EDIT -- this file has been generated automatically.
${cfg.extraInit}
# Prevent this file from being sourced by child shells.
export __NIXOS_SET_ENVIRONMENT_DONE=1
# ~/bin if it exists overrides other bin directories.
export PATH="$HOME/bin:$PATH"
'';
${exportedEnvVars}
${cfg.extraInit}
# ~/bin if it exists overrides other bin directories.
export PATH="$HOME/bin:$PATH"
'';
system.activationScripts.binsh = stringAfter [ "stdio" ]
''

View File

@ -23,12 +23,12 @@ with lib;
];
environment.extraSetup = ''
if [ -w $out/share/mime ]; then
XDG_DATA_DIRS=$out/share ${pkgs.shared-mime-info}/bin/update-mime-database -V $out/share/mime > /dev/null
if [ -w $out/share/mime ] && [ -d $out/share/mime/packages ]; then
XDG_DATA_DIRS=$out/share ${pkgs.shared-mime-info}/bin/update-mime-database -V $out/share/mime > /dev/null
fi
if [ -w $out/share/applications ]; then
${pkgs.desktop-file-utils}/bin/update-desktop-database $out/share/applications
${pkgs.desktop-file-utils}/bin/update-desktop-database $out/share/applications
fi
'';
};

View File

@ -1,6 +1,6 @@
{
x86_64-linux = "/nix/store/0d60i73mcv8z1m8d2m74yfn84980gfsa-nix-2.0.4";
i686-linux = "/nix/store/6ssafj2s5a2g9x28yld7b70vwd6vw6lb-nix-2.0.4";
aarch64-linux = "/nix/store/3wwch7bp7n7xsl8apgy2a4b16yzyij1z-nix-2.0.4";
x86_64-darwin = "/nix/store/771l8i0mz4c8kry8cz3sz8rr3alalckg-nix-2.0.4";
x86_64-linux = "/nix/store/h180y3n5k1ypxgm1pcvj243qix5j45zz-nix-2.1.1";
i686-linux = "/nix/store/v2y4k4v9ml07jmfq739wyflapg3b7b5k-nix-2.1.1";
aarch64-linux = "/nix/store/v485craglq7xm5996ci8qy5dyc17dab0-nix-2.1.1";
x86_64-darwin = "/nix/store/lc3ymlix73kaad5srjdgaxp9ngr1sg6g-nix-2.1.1";
}

View File

@ -53,7 +53,7 @@
tomcat = 16;
#audio = 17; # unused
#floppy = 18; # unused
#uucp = 19; # unused
uucp = 19;
#lp = 20; # unused
#proc = 21; # unused
pulseaudio = 22; # must match `pulseaudio' GID
@ -329,6 +329,7 @@
# kvm = 302; # unused
# render = 303; # unused
zeronet = 304;
lirc = 305;
# When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399!
@ -618,6 +619,7 @@
kvm = 302; # default udev rules from systemd requires these
render = 303; # default udev rules from systemd requires these
zeronet = 304;
lirc = 305;
# When adding a gid, make sure it doesn't match an existing
# uid. Users and groups with the same name should have equal

View File

@ -84,7 +84,7 @@ in
versionSuffix = mkIf (pathIsDirectory gitRepo) (mkDefault (".git." + gitCommitId));
# Note: the first letter is bumped on every release. It's an animal.
codeName = "Jellyfish";
codeName = "Koi";
};
# Generate /etc/os-release. See

View File

@ -245,6 +245,7 @@
./services/desktops/gnome3/gnome-user-share.nix
./services/desktops/gnome3/gpaste.nix
./services/desktops/gnome3/gvfs.nix
./services/desktops/gnome3/rygel.nix
./services/desktops/gnome3/seahorse.nix
./services/desktops/gnome3/sushi.nix
./services/desktops/gnome3/tracker.nix
@ -271,6 +272,7 @@
./services/hardware/interception-tools.nix
./services/hardware/irqbalance.nix
./services/hardware/lcd.nix
./services/hardware/lirc.nix
./services/hardware/nvidia-optimus.nix
./services/hardware/pcscd.nix
./services/hardware/pommed.nix
@ -406,6 +408,7 @@
./services/misc/taskserver
./services/misc/tzupdate.nix
./services/misc/uhub.nix
./services/misc/weechat.nix
./services/misc/xmr-stak.nix
./services/misc/zookeeper.nix
./services/monitoring/apcupsd.nix
@ -494,6 +497,7 @@
./services/networking/dnsdist.nix
./services/networking/dnsmasq.nix
./services/networking/ejabberd.nix
./services/networking/epmd.nix
./services/networking/fakeroute.nix
./services/networking/ferm.nix
./services/networking/firefox/sync-server.nix
@ -515,9 +519,11 @@
./services/networking/heyefi.nix
./services/networking/hostapd.nix
./services/networking/htpdate.nix
./services/networking/hylafax/default.nix
./services/networking/i2pd.nix
./services/networking/i2p.nix
./services/networking/iodine.nix
./services/networking/iperf3.nix
./services/networking/ircd-hybrid/default.nix
./services/networking/iwd.nix
./services/networking/keepalived/default.nix
@ -552,6 +558,7 @@
./services/networking/nsd.nix
./services/networking/ntopng.nix
./services/networking/ntpd.nix
./services/networking/nullidentdmod.nix
./services/networking/nylon.nix
./services/networking/ocserv.nix
./services/networking/oidentd.nix
@ -676,6 +683,7 @@
./services/web-apps/atlassian/confluence.nix
./services/web-apps/atlassian/crowd.nix
./services/web-apps/atlassian/jira.nix
./services/web-apps/codimd.nix
./services/web-apps/frab.nix
./services/web-apps/mattermost.nix
./services/web-apps/nexus.nix

View File

@ -126,7 +126,9 @@ in
programs.bash = {
shellInit = ''
${config.system.build.setEnvironment.text}
if [ -z "$__NIXOS_SET_ENVIRONMENT_DONE" ]; then
. ${config.system.build.setEnvironment}
fi
${cfge.shellInit}
'';
@ -166,11 +168,11 @@ in
# Read system-wide modifications.
if test -f /etc/profile.local; then
. /etc/profile.local
. /etc/profile.local
fi
if [ -n "''${BASH_VERSION:-}" ]; then
. /etc/bashrc
. /etc/bashrc
fi
'';
@ -191,12 +193,12 @@ in
# We are not always an interactive shell.
if [ -n "$PS1" ]; then
${cfg.interactiveShellInit}
${cfg.interactiveShellInit}
fi
# Read system-wide modifications.
if test -f /etc/bashrc.local; then
. /etc/bashrc.local
. /etc/bashrc.local
fi
'';

View File

@ -32,6 +32,8 @@ in
environment.etc = optionals (cfg.profiles != {})
(mapAttrsToList mkDconfProfile cfg.profiles);
services.dbus.packages = [ pkgs.gnome3.dconf ];
environment.variables.GIO_EXTRA_MODULES = optional cfg.enable
"${pkgs.gnome3.dconf.lib}/lib/gio/modules";
# https://github.com/NixOS/nixpkgs/pull/31891

View File

@ -109,7 +109,9 @@ in
set fish_function_path ${pkgs.fish-foreign-env}/share/fish-foreign-env/functions $__fish_datadir/functions
# source the NixOS environment config
fenv source ${config.system.build.setEnvironment}
if [ -z "$__NIXOS_SET_ENVIRONMENT_DONE" ]
fenv source ${config.system.build.setEnvironment}
end
# clear fish_function_path so that it will be correctly set when we return to $__fish_datadir/config.fish
set -e fish_function_path
@ -150,7 +152,6 @@ in
and begin
${fishAliases}
set fish_function_path ${pkgs.fish-foreign-env}/share/fish-foreign-env/functions $fish_function_path
fenv source /etc/fish/foreign-env/interactiveShellInit > /dev/null
set -e fish_function_path[1]

View File

@ -70,7 +70,7 @@ in
promptInit = mkOption {
default = ''
if [ "$TERM" != dumb ]; then
autoload -U promptinit && promptinit && prompt walters
autoload -U promptinit && promptinit && prompt walters
fi
'';
description = ''
@ -116,7 +116,9 @@ in
if [ -n "$__ETC_ZSHENV_SOURCED" ]; then return; fi
export __ETC_ZSHENV_SOURCED=1
${config.system.build.setEnvironment.text}
if [ -z "$__NIXOS_SET_ENVIRONMENT_DONE" ]; then
. ${config.system.build.setEnvironment}
fi
${cfge.shellInit}
@ -124,7 +126,7 @@ in
# Read system-wide modifications.
if test -f /etc/zshenv.local; then
. /etc/zshenv.local
. /etc/zshenv.local
fi
'';
@ -143,7 +145,7 @@ in
# Read system-wide modifications.
if test -f /etc/zprofile.local; then
. /etc/zprofile.local
. /etc/zprofile.local
fi
'';
@ -169,7 +171,7 @@ in
# Tell zsh how to find installed completions
for p in ''${(z)NIX_PROFILES}; do
fpath+=($p/share/zsh/site-functions $p/share/zsh/$ZSH_VERSION/functions $p/share/zsh/vendor-completions)
fpath+=($p/share/zsh/site-functions $p/share/zsh/$ZSH_VERSION/functions $p/share/zsh/vendor-completions)
done
${optionalString cfg.enableGlobalCompInit "autoload -U compinit && compinit"}
@ -184,7 +186,7 @@ in
# Read system-wide modifications.
if test -f /etc/zshrc.local; then
. /etc/zshrc.local
. /etc/zshrc.local
fi
'';

View File

@ -302,15 +302,15 @@ in
workdir="$(mktemp -d)"
# Create CA
openssl genrsa -des3 -passout pass:x -out $workdir/ca.pass.key 2048
openssl rsa -passin pass:x -in $workdir/ca.pass.key -out $workdir/ca.key
openssl genrsa -des3 -passout pass:xxxx -out $workdir/ca.pass.key 2048
openssl rsa -passin pass:xxxx -in $workdir/ca.pass.key -out $workdir/ca.key
openssl req -new -key $workdir/ca.key -out $workdir/ca.csr \
-subj "/C=UK/ST=Warwickshire/L=Leamington/O=OrgName/OU=Security Department/CN=example.com"
openssl x509 -req -days 1 -in $workdir/ca.csr -signkey $workdir/ca.key -out $workdir/ca.crt
# Create key
openssl genrsa -des3 -passout pass:x -out $workdir/server.pass.key 2048
openssl rsa -passin pass:x -in $workdir/server.pass.key -out $workdir/server.key
openssl genrsa -des3 -passout pass:xxxx -out $workdir/server.pass.key 2048
openssl rsa -passin pass:xxxx -in $workdir/server.pass.key -out $workdir/server.key
openssl req -new -key $workdir/server.key -out $workdir/server.csr \
-subj "/C=UK/ST=Warwickshire/L=Leamington/O=OrgName/OU=IT Department/CN=example.com"
openssl x509 -req -days 1 -in $workdir/server.csr -CA $workdir/ca.crt \

View File

@ -8,6 +8,7 @@ let
# configuration file can be generated by http://slurm.schedmd.com/configurator.html
configFile = pkgs.writeTextDir "slurm.conf"
''
ClusterName=${cfg.clusterName}
${optionalString (cfg.controlMachine != null) ''controlMachine=${cfg.controlMachine}''}
${optionalString (cfg.controlAddr != null) ''controlAddr=${cfg.controlAddr}''}
${optionalString (cfg.nodeName != null) ''nodeName=${cfg.nodeName}''}
@ -105,6 +106,15 @@ in
'';
};
clusterName = mkOption {
type = types.str;
default = "default";
example = "myCluster";
description = ''
Necessary to distinguish accounting records in a multi-cluster environment.
'';
};
nodeName = mkOption {
type = types.nullOr types.str;
default = null;

View File

@ -0,0 +1,30 @@
# rygel service.
{ config, lib, pkgs, ... }:
with lib;
{
###### interface
options = {
services.gnome3.rygel = {
enable = mkOption {
default = false;
description = ''
Whether to enable Rygel UPnP Mediaserver.
You will need to also allow UPnP connections in firewall, see the following <link xlink:href="https://github.com/NixOS/nixpkgs/pull/45045#issuecomment-416030795">comment</link>.
'';
type = types.bool;
};
};
};
###### implementation
config = mkIf config.services.gnome3.rygel.enable {
environment.systemPackages = [ pkgs.gnome3.rygel ];
services.dbus.packages = [ pkgs.gnome3.rygel ];
systemd.packages = [ pkgs.gnome3.rygel ];
};
}

View File

@ -0,0 +1,85 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.lirc;
in {
###### interface
options = {
services.lirc = {
enable = mkEnableOption "LIRC daemon";
options = mkOption {
type = types.lines;
example = ''
[lircd]
nodaemon = False
'';
description = "LIRC default options descriped in man:lircd(8) (<filename>lirc_options.conf</filename>)";
};
configs = mkOption {
type = types.listOf types.lines;
description = "Configurations for lircd to load, see man:lircd.conf(5) for details (<filename>lircd.conf</filename>)";
};
extraArguments = mkOption {
type = types.listOf types.str;
default = [];
description = "Extra arguments to lircd.";
};
};
};
###### implementation
config = mkIf cfg.enable {
# Note: LIRC executables raises a warning, if lirc_options.conf do not exists
environment.etc."lirc/lirc_options.conf".text = cfg.options;
environment.systemPackages = [ pkgs.lirc ];
systemd.sockets.lircd = {
description = "LIRC daemon socket";
wantedBy = [ "sockets.target" ];
socketConfig = {
ListenStream = "/run/lirc/lircd";
SocketUser = "lirc";
SocketMode = "0660";
};
};
systemd.services.lircd = let
configFile = pkgs.writeText "lircd.conf" (builtins.concatStringsSep "\n" cfg.configs);
in {
description = "LIRC daemon service";
after = [ "network.target" ];
unitConfig.Documentation = [ "man:lircd(8)" ];
serviceConfig = {
RuntimeDirectory = "lirc";
ExecStart = ''
${pkgs.lirc}/bin/lircd --nodaemon \
${escapeShellArgs cfg.extraArguments} \
${configFile}
'';
User = "lirc";
};
};
users.users.lirc = {
uid = config.ids.uids.lirc;
group = "lirc";
description = "LIRC user for lircd";
};
users.groups.lirc.gid = config.ids.gids.lirc;
};
}

View File

@ -2,7 +2,7 @@
let
inherit (lib) mkIf mkOption singleton types;
inherit (pkgs) coreutils exim;
inherit (pkgs) coreutils;
cfg = config.services.exim;
in
@ -57,6 +57,16 @@ in
'';
};
package = mkOption {
type = types.package;
default = pkgs.exim;
defaultText = "pkgs.exim";
description = ''
The Exim derivation to use.
This can be used to enable features such as LDAP or PAM support.
'';
};
};
};
@ -74,7 +84,7 @@ in
spool_directory = ${cfg.spoolDir}
${cfg.config}
'';
systemPackages = [ exim ];
systemPackages = [ cfg.package ];
};
users.users = singleton {
@ -89,14 +99,14 @@ in
gid = config.ids.gids.exim;
};
security.wrappers.exim.source = "${exim}/bin/exim";
security.wrappers.exim.source = "${cfg.package}/bin/exim";
systemd.services.exim = {
description = "Exim Mail Daemon";
wantedBy = [ "multi-user.target" ];
restartTriggers = [ config.environment.etc."exim.conf".source ];
serviceConfig = {
ExecStart = "${exim}/bin/exim -bdf -q30m";
ExecStart = "${cfg.package}/bin/exim -bdf -q30m";
ExecReload = "${coreutils}/bin/kill -HUP $MAINPID";
};
preStart = ''

View File

@ -0,0 +1,56 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.weechat;
in
{
options.services.weechat = {
enable = mkEnableOption "weechat";
root = mkOption {
description = "Weechat state directory.";
type = types.str;
default = "/var/lib/weechat";
};
sessionName = mkOption {
description = "Name of the `screen' session for weechat.";
default = "weechat-screen";
type = types.str;
};
binary = mkOption {
description = "Binary to execute (by default \${weechat}/bin/weechat).";
example = literalExample ''
''${pkgs.weechat}/bin/weechat-headless
'';
default = "${pkgs.weechat}/bin/weechat";
};
};
config = mkIf cfg.enable {
users = {
groups.weechat = {};
users.weechat = {
createHome = true;
group = "weechat";
home = cfg.root;
isSystemUser = true;
};
};
systemd.services.weechat = {
environment.WEECHAT_HOME = cfg.root;
serviceConfig = {
User = "weechat";
Group = "weechat";
RemainAfterExit = "yes";
};
script = "exec ${pkgs.screen}/bin/screen -Dm -S ${cfg.sessionName} ${cfg.binary}";
wantedBy = [ "multi-user.target" ];
wants = [ "network.target" ];
};
};
meta.doc = ./weechat.xml;
}

View File

@ -0,0 +1,61 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="module-services-weechat">
<title>WeeChat</title>
<para><link xlink:href="https://weechat.org/">WeeChat</link> is a fast and extensible IRC client.</para>
<section><title>Basic Usage</title>
<para>
By default, the module creates a
<literal><link xlink:href="https://www.freedesktop.org/wiki/Software/systemd/">systemd</link></literal> unit
which runs the chat client in a detached
<literal><link xlink:href="https://www.gnu.org/software/screen/">screen</link></literal> session.
</para>
<para>
This can be done by enabling the <literal>weechat</literal> service:
<programlisting>
{ ... }:
{
<link linkend="opt-services.weechat.enable">services.weechat.enable</link> = true;
}
</programlisting>
</para>
<para>
The service is managed by a dedicated user
named <literal>weechat</literal> in the state directory
<literal>/var/lib/weechat</literal>.
</para>
</section>
<section><title>Re-attaching to WeeChat</title>
<para>
WeeChat runs in a screen session owned by a dedicated user. To explicitly
allow your another user to attach to this session, the <literal>screenrc</literal> needs to be tweaked
by adding <link xlink:href="https://www.gnu.org/software/screen/manual/html_node/Multiuser.html#Multiuser">multiuser</link> support:
<programlisting>
{
<link linkend="opt-programs.screen.screenrc">programs.screen.screenrc</link> = ''
multiuser on
acladd normal_user
'';
}
</programlisting>
Now, the session can be re-attached like this:
<programlisting>
screen -r weechat-screen
</programlisting>
</para>
<para>
<emphasis>The session name can be changed using <link linkend="opt-services.weechat.sessionName">services.weechat.sessionName.</link></emphasis>
</para>
</section>
</chapter>

View File

@ -8,7 +8,6 @@ let
ddConf = {
dd_url = "https://app.datadoghq.com";
skip_ssl_validation = "no";
api_key = "";
confd_path = "/etc/datadog-agent/conf.d";
additional_checksd = "/etc/datadog-agent/checks.d";
use_dogstatsd = true;
@ -16,6 +15,7 @@ let
// optionalAttrs (cfg.logLevel != null) { log_level = cfg.logLevel; }
// optionalAttrs (cfg.hostname != null) { inherit (cfg) hostname; }
// optionalAttrs (cfg.tags != null ) { tags = concatStringsSep ", " cfg.tags; }
// optionalAttrs (cfg.enableLiveProcessCollection) { process_config = { enabled = "true"; }; }
// cfg.extraConfig;
# Generate Datadog configuration files for each configured checks.
@ -125,6 +125,13 @@ in {
'';
};
enableLiveProcessCollection = mkOption {
description = ''
Whether to enable the live process collection agent.
'';
default = false;
type = types.bool;
};
checks = mkOption {
description = ''
Configuration for all Datadog checks. Keys of this attribute
@ -229,6 +236,15 @@ in {
path = [ datadogPkg pkgs.python pkgs.sysstat pkgs.procps pkgs.jdk ];
serviceConfig.ExecStart = "${datadogPkg}/bin/dd-jmxfetch";
});
datadog-process-agent = lib.mkIf cfg.enableLiveProcessCollection (makeService {
description = "Datadog Live Process Agent";
path = [ ];
script = ''
export DD_API_KEY=$(head -n 1 ${cfg.apiKeyFile})
${pkgs.datadog-process-agent}/bin/agent --config /etc/datadog-agent/datadog.yaml
'';
});
};
environment.etc = etcfiles;

View File

@ -235,7 +235,7 @@ in {
but without GF_ prefix
'';
default = {};
type = types.attrsOf types.str;
type = with types; attrsOf (either str path);
};
};

View File

@ -17,9 +17,9 @@ let
launcher = writeScriptBin "riemann" ''
#!/bin/sh
exec ${jdk}/bin/java ${concatStringsSep "\n" cfg.extraJavaOpts} \
exec ${jdk}/bin/java ${concatStringsSep " " cfg.extraJavaOpts} \
-cp ${classpath} \
riemann.bin ${writeText "riemann-config.clj" riemannConfig}
riemann.bin ${cfg.configFile}
'';
in {
@ -37,7 +37,8 @@ in {
config = mkOption {
type = types.lines;
description = ''
Contents of the Riemann configuration file.
Contents of the Riemann configuration file. For more complicated
config you should use configFile.
'';
};
configFiles = mkOption {
@ -47,7 +48,15 @@ in {
Extra files containing Riemann configuration. These files will be
loaded at runtime by Riemann (with Clojure's
<literal>load-file</literal> function) at the end of the
configuration.
configuration if you use the config option, this is ignored if you
use configFile.
'';
};
configFile = mkOption {
type = types.str;
description = ''
A Riemann config file. Any files in the same directory as this file
will be added to the classpath by Riemann.
'';
};
extraClasspathEntries = mkOption {
@ -77,6 +86,10 @@ in {
group = "riemann";
};
services.riemann.configFile = mkDefault (
writeText "riemann-config.clj" riemannConfig
);
systemd.services.riemann = {
wantedBy = [ "multi-user.target" ];
path = [ inetutils ];
@ -84,6 +97,7 @@ in {
User = "riemann";
ExecStart = "${launcher}/bin/riemann";
};
serviceConfig.LimitNOFILE = 65536;
};
};

View File

@ -0,0 +1,56 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.epmd;
in
{
###### interface
options.services.epmd = {
enable = mkOption {
type = types.bool;
default = false;
description = ''
Whether to enable socket activation for Erlang Port Mapper Daemon (epmd),
which acts as a name server on all hosts involved in distributed
Erlang computations.
'';
};
package = mkOption {
type = types.package;
default = pkgs.erlang;
description = ''
The Erlang package to use to get epmd binary. That way you can re-use
an Erlang runtime that is already installed for other purposes.
'';
};
};
###### implementation
config = mkIf cfg.enable {
systemd.sockets.epmd = rec {
description = "Erlang Port Mapper Daemon Activation Socket";
wantedBy = [ "sockets.target" ];
before = wantedBy;
socketConfig = {
ListenStream = "4369";
Accept = "false";
};
};
systemd.services.epmd = {
description = "Erlang Port Mapper Daemon";
after = [ "network.target" ];
requires = [ "epmd.socket" ];
serviceConfig = {
DynamicUser = true;
ExecStart = "${cfg.package}/bin/epmd -systemd";
Type = "notify";
};
};
};
}

View File

@ -0,0 +1,29 @@
{ config, lib, pkgs, ... }:
{
imports = [
./options.nix
./systemd.nix
];
config = lib.modules.mkIf config.services.hylafax.enable {
environment.systemPackages = [ pkgs.hylafaxplus ];
users.users.uucp = {
uid = config.ids.uids.uucp;
group = "uucp";
description = "Unix-to-Unix CoPy system";
isSystemUser = true;
inherit (config.users.users.nobody) home;
};
assertions = [{
assertion = config.services.hylafax.modems != {};
message = ''
HylaFAX cannot be used without modems.
Please define at least one modem with
<option>config.services.hylafax.modems</option>.
'';
}];
};
}

View File

@ -0,0 +1,12 @@
{ ... }:
# see man:hylafax-config(5)
{
ModemGroup = [ ''"any:.*"'' ];
ServerTracing = "0x78701";
SessionTracing = "0x78701";
UUCPLockDir = "/var/lock";
}

View File

@ -0,0 +1,29 @@
#! @shell@ -e
# skip this if there are no modems at all
if ! stat -t "@spoolAreaPath@"/etc/config.* >/dev/null 2>&1
then
exit 0
fi
echo "faxq started, waiting for modem(s) to initialize..."
for i in `seq @timeoutSec@0 -1 0` # gracefully timeout
do
sleep 0.1
# done if status files exist, but don't mention initialization
if \
stat -t "@spoolAreaPath@"/status/* >/dev/null 2>&1 \
&& \
! grep --silent --ignore-case 'initializing server' \
"@spoolAreaPath@"/status/*
then
echo "modem(s) apparently ready"
exit 0
fi
# if i reached 0, modems probably failed to initialize
if test $i -eq 0
then
echo "warning: modem initialization timed out"
fi
done

View File

@ -0,0 +1,10 @@
{ ... }:
# see man:hfaxd(8)
{
ServerTracing = "0x91";
XferLogFile = "/clientlog";
}

View File

@ -0,0 +1,22 @@
{ pkgs, ... }:
# see man:hylafax-config(5)
{
TagLineFont = "etc/LiberationSans-25.pcf";
TagLineLocale = ''en_US.UTF-8'';
AdminGroup = "root"; # groups that can change server config
AnswerRotary = "fax"; # don't accept anything else but faxes
LogFileMode = "0640";
PriorityScheduling = true;
RecvFileMode = "0640";
ServerTracing = "0x78701";
SessionTracing = "0x78701";
UUCPLockDir = "/var/lock";
SendPageCmd = ''${pkgs.coreutils}/bin/false''; # prevent pager transmit
SendUUCPCmd = ''${pkgs.coreutils}/bin/false''; # prevent UUCP transmit
}

View File

@ -0,0 +1,375 @@
{ config, lib, pkgs, ... }:
let
inherit (lib.options) literalExample mkEnableOption mkOption;
inherit (lib.types) bool enum int lines loaOf nullOr path str submodule;
inherit (lib.modules) mkDefault mkIf mkMerge;
commonDescr = ''
Values can be either strings or integers
(which will be added to the config file verbatimly)
or lists thereof
(which will be translated to multiple
lines with the same configuration key).
Boolean values are translated to "Yes" or "No".
The default contains some reasonable
configuration to yield an operational system.
'';
str1 = lib.types.addCheck str (s: s!=""); # non-empty string
int1 = lib.types.addCheck int (i: i>0); # positive integer
configAttrType =
# Options in HylaFAX configuration files can be
# booleans, strings, integers, or list thereof
# representing multiple config directives with the same key.
# This type definition resolves all
# those types into a list of strings.
let
inherit (lib.types) attrsOf coercedTo listOf;
innerType = coercedTo bool (x: if x then "Yes" else "No")
(coercedTo int (toString) str);
in
attrsOf (coercedTo innerType lib.singleton (listOf innerType));
cfg = config.services.hylafax;
modemConfigOptions = { name, config, ... }: {
options = {
name = mkOption {
type = str1;
example = "ttyS1";
description = ''
Name of modem device,
will be searched for in <filename>/dev</filename>.
'';
};
type = mkOption {
type = str1;
example = "cirrus";
description = ''
Name of modem configuration file,
will be searched for in <filename>config</filename>
in the spooling area directory.
'';
};
config = mkOption {
type = configAttrType;
example = {
AreaCode = "49";
LocalCode = "30";
FAXNumber = "123456";
LocalIdentifier = "LostInBerlin";
};
description = ''
Attribute set of values for the given modem.
${commonDescr}
Options defined here override options in
<option>commonModemConfig</option> for this modem.
'';
};
};
config.name = mkDefault name;
config.config.Include = [ "config/${config.type}" ];
};
defaultConfig =
let
inherit (config.security) wrapperDir;
inherit (config.services.mail.sendmailSetuidWrapper) program;
mkIfDefault = cond: value: mkIf cond (mkDefault value);
noWrapper = config.services.mail.sendmailSetuidWrapper==null;
# If a sendmail setuid wrapper exists,
# we add the path to the default configuration file.
# Otherwise, we use `false` to provoke
# an error if hylafax tries to use it.
c.sendmailPath = mkMerge [
(mkIfDefault noWrapper ''${pkgs.coreutils}/bin/false'')
(mkIfDefault (!noWrapper) ''${wrapperDir}/${program}'')
];
importDefaultConfig = file:
lib.attrsets.mapAttrs
(lib.trivial.const mkDefault)
(import file { inherit pkgs; });
c.commonModemConfig = importDefaultConfig ./modem-default.nix;
c.faxqConfig = importDefaultConfig ./faxq-default.nix;
c.hfaxdConfig = importDefaultConfig ./hfaxd-default.nix;
in
c;
localConfig =
let
c.hfaxdConfig.UserAccessFile = cfg.userAccessFile;
c.faxqConfig = lib.attrsets.mapAttrs
(lib.trivial.const (v: mkIf (v!=null) v))
{
AreaCode = cfg.areaCode;
CountryCode = cfg.countryCode;
LongDistancePrefix = cfg.longDistancePrefix;
InternationalPrefix = cfg.internationalPrefix;
};
c.commonModemConfig = c.faxqConfig;
in
c;
in
{
options.services.hylafax = {
enable = mkEnableOption ''HylaFAX server'';
autostart = mkOption {
type = bool;
default = true;
example = false;
description = ''
Autostart the HylaFAX queue manager at system start.
If this is <literal>false</literal>, the queue manager
will still be started if there are pending
jobs or if a user tries to connect to it.
'';
};
countryCode = mkOption {
type = nullOr str1;
default = null;
example = "49";
description = ''Country code for server and all modems.'';
};
areaCode = mkOption {
type = nullOr str1;
default = null;
example = "30";
description = ''Area code for server and all modems.'';
};
longDistancePrefix = mkOption {
type = nullOr str;
default = null;
example = "0";
description = ''Long distance prefix for server and all modems.'';
};
internationalPrefix = mkOption {
type = nullOr str;
default = null;
example = "00";
description = ''International prefix for server and all modems.'';
};
spoolAreaPath = mkOption {
type = path;
default = "/var/spool/fax";
description = ''
The spooling area will be created/maintained
at the location given here.
'';
};
userAccessFile = mkOption {
type = path;
default = "/etc/hosts.hfaxd";
description = ''
The <filename>hosts.hfaxd</filename>
file entry in the spooling area
will be symlinked to the location given here.
This file must exist and be
readable only by the <literal>uucp</literal> user.
See hosts.hfaxd(5) for details.
This configuration permits access for all users:
<literal>
environment.etc."hosts.hfaxd" = {
mode = "0600";
user = "uucp";
text = ".*";
};
</literal>
Note that host-based access can be controlled with
<option>config.systemd.sockets.hylafax-hfaxd.listenStreams</option>;
by default, only 127.0.0.1 is permitted to connect.
'';
};
sendmailPath = mkOption {
type = path;
example = literalExample "''${pkgs.postfix}/bin/sendmail";
# '' ; # fix vim
description = ''
Path to <filename>sendmail</filename> program.
The default uses the local sendmail wrapper
(see <option>config.services.mail.sendmailSetuidWrapper</option>),
otherwise the <filename>false</filename>
binary to cause an error if used.
'';
};
hfaxdConfig = mkOption {
type = configAttrType;
example.RecvqProtection = "0400";
description = ''
Attribute set of lines for the global
hfaxd config file <filename>etc/hfaxd.conf</filename>.
${commonDescr}
'';
};
faxqConfig = mkOption {
type = configAttrType;
example = {
InternationalPrefix = "00";
LongDistancePrefix = "0";
};
description = ''
Attribute set of lines for the global
faxq config file <filename>etc/config</filename>.
${commonDescr}
'';
};
commonModemConfig = mkOption {
type = configAttrType;
example = {
InternationalPrefix = "00";
LongDistancePrefix = "0";
};
description = ''
Attribute set of default values for
modem config files <filename>etc/config.*</filename>.
${commonDescr}
Think twice before changing
paths of fax-processing scripts.
'';
};
modems = mkOption {
type = loaOf (submodule [ modemConfigOptions ]);
default = {};
example.ttyS1 = {
type = "cirrus";
config = {
FAXNumber = "123456";
LocalIdentifier = "Smith";
};
};
description = ''
Description of installed modems.
At least on modem must be defined
to enable the HylaFAX server.
'';
};
spoolExtraInit = mkOption {
type = lines;
default = "";
example = ''chmod 0755 . # everyone may read my faxes'';
description = ''
Additional shell code that is executed within the
spooling area directory right after its setup.
'';
};
faxcron.enable.spoolInit = mkEnableOption ''
Purge old files from the spooling area with
<filename>faxcron</filename>
each time the spooling area is initialized.
'';
faxcron.enable.frequency = mkOption {
type = nullOr str1;
default = null;
example = "daily";
description = ''
Purge old files from the spooling area with
<filename>faxcron</filename> with the given frequency
(see systemd.time(7)).
'';
};
faxcron.infoDays = mkOption {
type = int1;
default = 30;
description = ''
Set the expiration time for data in the
remote machine information directory in days.
'';
};
faxcron.logDays = mkOption {
type = int1;
default = 30;
description = ''
Set the expiration time for
session trace log files in days.
'';
};
faxcron.rcvDays = mkOption {
type = int1;
default = 7;
description = ''
Set the expiration time for files in
the received facsimile queue in days.
'';
};
faxqclean.enable.spoolInit = mkEnableOption ''
Purge old files from the spooling area with
<filename>faxqclean</filename>
each time the spooling area is initialized.
'';
faxqclean.enable.frequency = mkOption {
type = nullOr str1;
default = null;
example = "daily";
description = ''
Purge old files from the spooling area with
<filename>faxcron</filename> with the given frequency
(see systemd.time(7)).
'';
};
faxqclean.archiving = mkOption {
type = enum [ "never" "as-flagged" "always" ];
default = "as-flagged";
example = "always";
description = ''
Enable or suppress job archiving:
<literal>never</literal> disables job archiving,
<literal>as-flagged</literal> archives jobs that
have been flagged for archiving by sendfax,
<literal>always</literal> forces archiving of all jobs.
See also sendfax(1) and faxqclean(8).
'';
};
faxqclean.doneqMinutes = mkOption {
type = int1;
default = 15;
example = literalExample ''24*60'';
description = ''
Set the job
age threshold (in minutes) that controls how long
jobs may reside in the doneq directory.
'';
};
faxqclean.docqMinutes = mkOption {
type = int1;
default = 60;
example = literalExample ''24*60'';
description = ''
Set the document
age threshold (in minutes) that controls how long
unreferenced files may reside in the docq directory.
'';
};
};
config.services.hylafax =
mkIf
(config.services.hylafax.enable)
(mkMerge [ defaultConfig localConfig ])
;
}

View File

@ -0,0 +1,111 @@
#! @shell@ -e
# The following lines create/update the HylaFAX spool directory:
# Subdirectories/files with persistent data are kept,
# other directories/files are removed/recreated,
# mostly from the template spool
# directory in the HylaFAX package.
# This block explains how the spool area is
# derived from the spool template in the HylaFAX package:
#
# + capital letter: directory; file otherwise
# + P/p: persistent directory
# + F/f: directory with symlinks per entry
# + T/t: temporary data
# + S/s: single symlink into package
# |
# | + u: change ownership to uucp:uucp
# | + U: ..also change access mode to user-only
# | |
# archive P U
# bin S
# client T u (client connection info)
# config S
# COPYRIGHT s
# dev T u (maybe some FIFOs)
# docq P U
# doneq P U
# etc F contains customized config files!
# etc/hosts.hfaxd f
# etc/xferfaxlog f
# info P u (database of called devices)
# log P u (communication logs)
# pollq P U
# recvq P u
# sendq P U
# status T u (modem status info files)
# tmp T U
shopt -s dotglob # if bash sees "*", it also includes dot files
lnsym () { ln --symbol "$@" ; }
lnsymfrc () { ln --symbolic --force "$@" ; }
cprd () { cp --remove-destination "$@" ; }
update () { install --owner=@faxuser@ --group=@faxgroup@ "$@" ; }
## create/update spooling area
update --mode=0750 -d "@spoolAreaPath@"
cd "@spoolAreaPath@"
persist=(archive docq doneq info log pollq recvq sendq)
# remove entries that don't belong here
touch dummy # ensure "*" resolves to something
for k in *
do
keep=0
for j in "${persist[@]}" xferfaxlog clientlog faxcron.lastrun
do
if test "$k" == "$j"
then
keep=1
break
fi
done
if test "$keep" == "0"
then
rm --recursive "$k"
fi
done
# create persistent data directories (unless they exist already)
update --mode=0700 -d "${persist[@]}"
chmod 0755 info log recvq
# create ``xferfaxlog``, ``faxcron.lastrun``, ``clientlog``
touch clientlog faxcron.lastrun xferfaxlog
chown @faxuser@:@faxgroup@ clientlog faxcron.lastrun xferfaxlog
# create symlinks for frozen directories/files
lnsym --target-directory=. "@hylafax@"/spool/{COPYRIGHT,bin,config}
# create empty temporary directories
update --mode=0700 -d client dev status
update -d tmp
## create and fill etc
install -d "@spoolAreaPath@/etc"
cd "@spoolAreaPath@/etc"
# create symlinks to all files in template's etc
lnsym --target-directory=. "@hylafax@/spool/etc"/*
# set LOCKDIR in setup.cache
sed --regexp-extended 's|^(UUCP_LOCKDIR=).*$|\1'"'@lockPath@'|g" --in-place setup.cache
# etc/{xferfaxlog,lastrun} are stored in the spool root
lnsymfrc --target-directory=. ../xferfaxlog
lnsymfrc --no-target-directory ../faxcron.lastrun lastrun
# etc/hosts.hfaxd is provided by the NixOS configuration
lnsymfrc --no-target-directory "@userAccessFile@" hosts.hfaxd
# etc/config and etc/config.${DEVID} must be copied:
# hfaxd reads these file after locking itself up in a chroot
cprd --no-target-directory "@globalConfigPath@" config
cprd --target-directory=. "@modemConfigPath@"/*

View File

@ -0,0 +1,249 @@
{ config, lib, pkgs, ... }:
let
inherit (lib) mkIf mkMerge;
inherit (lib) concatStringsSep optionalString;
cfg = config.services.hylafax;
mapModems = lib.flip map (lib.attrValues cfg.modems);
mkConfigFile = name: conf:
# creates hylafax config file,
# makes sure "Include" is listed *first*
let
mkLines = conf:
(lib.concatLists
(lib.flip lib.mapAttrsToList conf
(k: map (v: ''${k}: ${v}'')
)));
include = mkLines { Include = conf.Include or []; };
other = mkLines ( conf // { Include = []; } );
in
pkgs.writeText ''hylafax-config${name}''
(concatStringsSep "\n" (include ++ other));
globalConfigPath = mkConfigFile "" cfg.faxqConfig;
modemConfigPath =
let
mkModemConfigFile = { config, name, ... }:
mkConfigFile ''.${name}''
(cfg.commonModemConfig // config);
mkLine = { name, type, ... }@modem: ''
# check if modem config file exists:
test -f "${pkgs.hylafaxplus}/spool/config/${type}"
ln \
--symbolic \
--no-target-directory \
"${mkModemConfigFile modem}" \
"$out/config.${name}"
'';
in
pkgs.runCommand "hylafax-config-modems" {}
''mkdir --parents "$out/" ${concatStringsSep "\n" (mapModems mkLine)}'';
setupSpoolScript = pkgs.substituteAll {
name = "hylafax-setup-spool.sh";
src = ./spool.sh;
isExecutable = true;
inherit (pkgs.stdenv) shell;
hylafax = pkgs.hylafaxplus;
faxuser = "uucp";
faxgroup = "uucp";
lockPath = "/var/lock";
inherit globalConfigPath modemConfigPath;
inherit (cfg) sendmailPath spoolAreaPath userAccessFile;
};
waitFaxqScript = pkgs.substituteAll {
# This script checks the modems status files
# and waits until all modems report readiness.
name = "hylafax-faxq-wait-start.sh";
src = ./faxq-wait.sh;
isExecutable = true;
timeoutSec = toString 10;
inherit (pkgs.stdenv) shell;
inherit (cfg) spoolAreaPath;
};
sockets."hylafax-hfaxd" = {
description = "HylaFAX server socket";
documentation = [ "man:hfaxd(8)" ];
wantedBy = [ "multi-user.target" ];
listenStreams = [ "127.0.0.1:4559" ];
socketConfig.FreeBind = true;
socketConfig.Accept = true;
};
paths."hylafax-faxq" = {
description = "HylaFAX queue manager sendq watch";
documentation = [ "man:faxq(8)" "man:sendq(5)" ];
wantedBy = [ "multi-user.target" ];
pathConfig.PathExistsGlob = [ ''${cfg.spoolAreaPath}/sendq/q*'' ];
};
timers = mkMerge [
(
mkIf (cfg.faxcron.enable.frequency!=null)
{ "hylafax-faxcron".timerConfig.Persistent = true; }
)
(
mkIf (cfg.faxqclean.enable.frequency!=null)
{ "hylafax-faxqclean".timerConfig.Persistent = true; }
)
];
hardenService =
# Add some common systemd service hardening settings,
# but allow each service (here) to override
# settings by explicitely setting those to `null`.
# More hardening would be nice but makes
# customizing hylafax setups very difficult.
# If at all, it should only be added along
# with some options to customize it.
let
hardening = {
PrivateDevices = true; # breaks /dev/tty...
PrivateNetwork = true;
PrivateTmp = true;
ProtectControlGroups = true;
#ProtectHome = true; # breaks custom spool dirs
ProtectKernelModules = true;
ProtectKernelTunables = true;
#ProtectSystem = "strict"; # breaks custom spool dirs
RestrictNamespaces = true;
RestrictRealtime = true;
};
filter = key: value: (value != null) || ! (lib.hasAttr key hardening);
apply = service: lib.filterAttrs filter (hardening // (service.serviceConfig or {}));
in
service: service // { serviceConfig = apply service; };
services."hylafax-spool" = {
description = "HylaFAX spool area preparation";
documentation = [ "man:hylafax-server(4)" ];
script = ''
${setupSpoolScript}
cd "${cfg.spoolAreaPath}"
${cfg.spoolExtraInit}
if ! test -f "${cfg.spoolAreaPath}/etc/hosts.hfaxd"
then
echo hosts.hfaxd is missing
exit 1
fi
'';
serviceConfig.ExecStop = ''${setupSpoolScript}'';
serviceConfig.RemainAfterExit = true;
serviceConfig.Type = "oneshot";
unitConfig.RequiresMountsFor = [ cfg.spoolAreaPath ];
};
services."hylafax-faxq" = {
description = "HylaFAX queue manager";
documentation = [ "man:faxq(8)" ];
requires = [ "hylafax-spool.service" ];
after = [ "hylafax-spool.service" ];
wants = mapModems ( { name, ... }: ''hylafax-faxgetty@${name}.service'' );
wantedBy = mkIf cfg.autostart [ "multi-user.target" ];
serviceConfig.Type = "forking";
serviceConfig.ExecStart = ''${pkgs.hylafaxplus}/spool/bin/faxq -q "${cfg.spoolAreaPath}"'';
# This delays the "readiness" of this service until
# all modems are initialized (or a timeout is reached).
# Otherwise, sending a fax with the fax service
# stopped will always yield a failed send attempt:
# The fax service is started when the job is created with
# `sendfax`, but modems need some time to initialize.
serviceConfig.ExecStartPost = [ ''${waitFaxqScript}'' ];
# faxquit fails if the pipe is already gone
# (e.g. the service is already stopping)
serviceConfig.ExecStop = ''-${pkgs.hylafaxplus}/spool/bin/faxquit -q "${cfg.spoolAreaPath}"'';
# disable some systemd hardening settings
serviceConfig.PrivateDevices = null;
serviceConfig.RestrictRealtime = null;
};
services."hylafax-hfaxd@" = {
description = "HylaFAX server";
documentation = [ "man:hfaxd(8)" ];
after = [ "hylafax-faxq.service" ];
requires = [ "hylafax-faxq.service" ];
serviceConfig.StandardInput = "socket";
serviceConfig.StandardOutput = "socket";
serviceConfig.ExecStart = ''${pkgs.hylafaxplus}/spool/bin/hfaxd -q "${cfg.spoolAreaPath}" -d -I'';
unitConfig.RequiresMountsFor = [ cfg.userAccessFile ];
# disable some systemd hardening settings
serviceConfig.PrivateDevices = null;
serviceConfig.PrivateNetwork = null;
};
services."hylafax-faxcron" = rec {
description = "HylaFAX spool area maintenance";
documentation = [ "man:faxcron(8)" ];
after = [ "hylafax-spool.service" ];
requires = [ "hylafax-spool.service" ];
wantedBy = mkIf cfg.faxcron.enable.spoolInit requires;
startAt = mkIf (cfg.faxcron.enable.frequency!=null) cfg.faxcron.enable.frequency;
serviceConfig.ExecStart = concatStringsSep " " [
''${pkgs.hylafaxplus}/spool/bin/faxcron''
''-q "${cfg.spoolAreaPath}"''
''-info ${toString cfg.faxcron.infoDays}''
''-log ${toString cfg.faxcron.logDays}''
''-rcv ${toString cfg.faxcron.rcvDays}''
];
};
services."hylafax-faxqclean" = rec {
description = "HylaFAX spool area queue cleaner";
documentation = [ "man:faxqclean(8)" ];
after = [ "hylafax-spool.service" ];
requires = [ "hylafax-spool.service" ];
wantedBy = mkIf cfg.faxqclean.enable.spoolInit requires;
startAt = mkIf (cfg.faxqclean.enable.frequency!=null) cfg.faxqclean.enable.frequency;
serviceConfig.ExecStart = concatStringsSep " " [
''${pkgs.hylafaxplus}/spool/bin/faxqclean''
''-q "${cfg.spoolAreaPath}"''
''-v''
(optionalString (cfg.faxqclean.archiving!="never") ''-a'')
(optionalString (cfg.faxqclean.archiving=="always") ''-A'')
''-j ${toString (cfg.faxqclean.doneqMinutes*60)}''
''-d ${toString (cfg.faxqclean.docqMinutes*60)}''
];
};
mkFaxgettyService = { name, ... }:
lib.nameValuePair ''hylafax-faxgetty@${name}'' rec {
description = "HylaFAX faxgetty for %I";
documentation = [ "man:faxgetty(8)" ];
bindsTo = [ "dev-%i.device" ];
requires = [ "hylafax-spool.service" ];
after = bindsTo ++ requires;
before = [ "hylafax-faxq.service" "getty.target" ];
unitConfig.StopWhenUnneeded = true;
unitConfig.AssertFileNotEmpty = ''${cfg.spoolAreaPath}/etc/config.%I'';
serviceConfig.UtmpIdentifier = "%I";
serviceConfig.TTYPath = "/dev/%I";
serviceConfig.Restart = "always";
serviceConfig.KillMode = "process";
serviceConfig.IgnoreSIGPIPE = false;
serviceConfig.ExecStart = ''-${pkgs.hylafaxplus}/spool/bin/faxgetty -q "${cfg.spoolAreaPath}" /dev/%I'';
# faxquit fails if the pipe is already gone
# (e.g. the service is already stopping)
serviceConfig.ExecStop = ''-${pkgs.hylafaxplus}/spool/bin/faxquit -q "${cfg.spoolAreaPath}" %I'';
# disable some systemd hardening settings
serviceConfig.PrivateDevices = null;
serviceConfig.RestrictRealtime = null;
};
modemServices =
lib.listToAttrs (mapModems mkFaxgettyService);
in
{
config.systemd = mkIf cfg.enable {
inherit sockets timers paths;
services = lib.mapAttrs (lib.const hardenService) (services // modemServices);
};
}

View File

@ -8,6 +8,17 @@ let
homeDir = "/var/lib/i2pd";
strOpt = k: v: k + " = " + v;
boolOpt = k: v: k + " = " + boolToString v;
intOpt = k: v: k + " = " + toString v;
lstOpt = k: xs: k + " = " + concatStringsSep "," xs;
optionalNullString = o: s: optional (! isNull s) (strOpt o s);
optionalNullBool = o: b: optional (! isNull b) (boolOpt o b);
optionalNullInt = o: i: optional (! isNull i) (intOpt o i);
optionalEmptyList = o: l: optional ([] != l) (lstOpt o l);
mkEnableTrueOption = name: mkEnableOption name // { default = true; };
mkEndpointOpt = name: addr: port: {
enable = mkEnableOption name;
name = mkOption {
@ -18,42 +29,54 @@ let
address = mkOption {
type = types.str;
default = addr;
description = "Bind address for ${name} endpoint. Default: " + addr;
description = "Bind address for ${name} endpoint.";
};
port = mkOption {
type = types.int;
default = port;
description = "Bind port for ${name} endoint. Default: " + toString port;
description = "Bind port for ${name} endoint.";
};
};
mkKeyedEndpointOpt = name: addr: port: keyFile:
i2cpOpts = name: {
length = mkOption {
type = types.int;
description = "Guaranteed minimum hops for ${name} tunnels.";
default = 3;
};
quantity = mkOption {
type = types.int;
description = "Number of simultaneous ${name} tunnels.";
default = 5;
};
};
mkKeyedEndpointOpt = name: addr: port: keyloc:
(mkEndpointOpt name addr port) // {
keys = mkOption {
type = types.str;
default = "";
type = with types; nullOr str;
default = keyloc;
description = ''
File to persist ${lib.toUpper name} keys.
'';
};
inbound = i2cpOpts name;
outbound = i2cpOpts name;
latency.min = mkOption {
type = with types; nullOr int;
description = "Min latency for tunnels.";
default = null;
};
latency.max = mkOption {
type = with types; nullOr int;
description = "Max latency for tunnels.";
default = null;
};
};
commonTunOpts = let
i2cpOpts = {
length = mkOption {
type = types.int;
description = "Guaranteed minimum hops.";
default = 3;
};
quantity = mkOption {
type = types.int;
description = "Number of simultaneous tunnels.";
default = 5;
};
};
in name: {
outbound = i2cpOpts;
inbound = i2cpOpts;
commonTunOpts = name: {
outbound = i2cpOpts name;
inbound = i2cpOpts name;
crypto.tagsToSend = mkOption {
type = types.int;
description = "Number of ElGamal/AES tags to send.";
@ -70,94 +93,142 @@ let
};
} // mkEndpointOpt name "127.0.0.1" 0;
i2pdConf = pkgs.writeText "i2pd.conf" ''
# DO NOT EDIT -- this file has been generated automatically.
loglevel = ${cfg.logLevel}
ipv4 = ${boolToString cfg.enableIPv4}
ipv6 = ${boolToString cfg.enableIPv6}
notransit = ${boolToString cfg.notransit}
floodfill = ${boolToString cfg.floodfill}
netid = ${toString cfg.netid}
${if isNull cfg.bandwidth then "" else "bandwidth = ${toString cfg.bandwidth}" }
${if isNull cfg.port then "" else "port = ${toString cfg.port}"}
[limits]
transittunnels = ${toString cfg.limits.transittunnels}
[upnp]
enabled = ${boolToString cfg.upnp.enable}
name = ${cfg.upnp.name}
[precomputation]
elgamal = ${boolToString cfg.precomputation.elgamal}
[reseed]
verify = ${boolToString cfg.reseed.verify}
file = ${cfg.reseed.file}
urls = ${builtins.concatStringsSep "," cfg.reseed.urls}
[addressbook]
defaulturl = ${cfg.addressbook.defaulturl}
subscriptions = ${builtins.concatStringsSep "," cfg.addressbook.subscriptions}
${flip concatMapStrings
sec = name: "\n[" + name + "]";
notice = "# DO NOT EDIT -- this file has been generated automatically.";
i2pdConf = let
opts = [
notice
(strOpt "loglevel" cfg.logLevel)
(boolOpt "logclftime" cfg.logCLFTime)
(boolOpt "ipv4" cfg.enableIPv4)
(boolOpt "ipv6" cfg.enableIPv6)
(boolOpt "notransit" cfg.notransit)
(boolOpt "floodfill" cfg.floodfill)
(intOpt "netid" cfg.netid)
] ++ (optionalNullInt "bandwidth" cfg.bandwidth)
++ (optionalNullInt "port" cfg.port)
++ (optionalNullString "family" cfg.family)
++ (optionalNullString "datadir" cfg.dataDir)
++ (optionalNullInt "share" cfg.share)
++ (optionalNullBool "ssu" cfg.ssu)
++ (optionalNullBool "ntcp" cfg.ntcp)
++ (optionalNullString "ntcpproxy" cfg.ntcpProxy)
++ (optionalNullString "ifname" cfg.ifname)
++ (optionalNullString "ifname4" cfg.ifname4)
++ (optionalNullString "ifname6" cfg.ifname6)
++ [
(sec "limits")
(intOpt "transittunnels" cfg.limits.transittunnels)
(intOpt "coresize" cfg.limits.coreSize)
(intOpt "openfiles" cfg.limits.openFiles)
(intOpt "ntcphard" cfg.limits.ntcpHard)
(intOpt "ntcpsoft" cfg.limits.ntcpSoft)
(intOpt "ntcpthreads" cfg.limits.ntcpThreads)
(sec "upnp")
(boolOpt "enabled" cfg.upnp.enable)
(sec "precomputation")
(boolOpt "elgamal" cfg.precomputation.elgamal)
(sec "reseed")
(boolOpt "verify" cfg.reseed.verify)
] ++ (optionalNullString "file" cfg.reseed.file)
++ (optionalEmptyList "urls" cfg.reseed.urls)
++ (optionalNullString "floodfill" cfg.reseed.floodfill)
++ (optionalNullString "zipfile" cfg.reseed.zipfile)
++ (optionalNullString "proxy" cfg.reseed.proxy)
++ [
(sec "trust")
(boolOpt "enabled" cfg.trust.enable)
(boolOpt "hidden" cfg.trust.hidden)
] ++ (optionalEmptyList "routers" cfg.trust.routers)
++ (optionalNullString "family" cfg.trust.family)
++ [
(sec "websockets")
(boolOpt "enabled" cfg.websocket.enable)
(strOpt "address" cfg.websocket.address)
(intOpt "port" cfg.websocket.port)
(sec "exploratory")
(intOpt "inbound.length" cfg.exploratory.inbound.length)
(intOpt "inbound.quantity" cfg.exploratory.inbound.quantity)
(intOpt "outbound.length" cfg.exploratory.outbound.length)
(intOpt "outbound.quantity" cfg.exploratory.outbound.quantity)
(sec "ntcp2")
(boolOpt "enabled" cfg.ntcp2.enable)
(boolOpt "published" cfg.ntcp2.published)
(intOpt "port" cfg.ntcp2.port)
(sec "addressbook")
(strOpt "defaulturl" cfg.addressbook.defaulturl)
] ++ (optionalEmptyList "subscriptions" cfg.addressbook.subscriptions)
++ (flip map
(collect (proto: proto ? port && proto ? address && proto ? name) cfg.proto)
(proto: ''
[${proto.name}]
enabled = ${boolToString proto.enable}
address = ${proto.address}
port = ${toString proto.port}
${if proto ? keys then "keys = ${proto.keys}" else ""}
${if proto ? auth then "auth = ${boolToString proto.auth}" else ""}
${if proto ? user then "user = ${proto.user}" else ""}
${if proto ? pass then "pass = ${proto.pass}" else ""}
${if proto ? outproxy then "outproxy = ${proto.outproxy}" else ""}
${if proto ? outproxyPort then "outproxyport = ${toString proto.outproxyPort}" else ""}
'')
}
'';
(proto: let protoOpts = [
(sec proto.name)
(boolOpt "enabled" proto.enable)
(strOpt "address" proto.address)
(intOpt "port" proto.port)
] ++ (if proto ? keys then optionalNullString "keys" proto.keys else [])
++ (if proto ? auth then optionalNullBool "auth" proto.auth else [])
++ (if proto ? user then optionalNullString "user" proto.user else [])
++ (if proto ? pass then optionalNullString "pass" proto.pass else [])
++ (if proto ? strictHeaders then optionalNullBool "strictheaders" proto.strictHeaders else [])
++ (if proto ? hostname then optionalNullString "hostname" proto.hostname else [])
++ (if proto ? outproxy then optionalNullString "outproxy" proto.outproxy else [])
++ (if proto ? outproxyPort then optionalNullInt "outproxyport" proto.outproxyPort else [])
++ (if proto ? outproxyEnable then optionalNullBool "outproxy.enabled" proto.outproxyEnable else []);
in (concatStringsSep "\n" protoOpts)
));
in
pkgs.writeText "i2pd.conf" (concatStringsSep "\n" opts);
i2pdTunnelConf = pkgs.writeText "i2pd-tunnels.conf" ''
# DO NOT EDIT -- this file has been generated automatically.
${flip concatMapStrings
tunnelConf = let opts = [
notice
(flip map
(collect (tun: tun ? port && tun ? destination) cfg.outTunnels)
(tun: ''
[${tun.name}]
type = client
destination = ${tun.destination}
destinationport = ${toString tun.destinationPort}
keys = ${tun.keys}
address = ${tun.address}
port = ${toString tun.port}
inbound.length = ${toString tun.inbound.length}
outbound.length = ${toString tun.outbound.length}
inbound.quantity = ${toString tun.inbound.quantity}
outbound.quantity = ${toString tun.outbound.quantity}
crypto.tagsToSend = ${toString tun.crypto.tagsToSend}
'')
}
${flip concatMapStrings
(tun: let outTunOpts = [
(sec tun.name)
"type = client"
(intOpt "port" tun.port)
(strOpt "destination" tun.destination)
] ++ (if tun ? destinationPort then optionalNullInt "destinationport" tun.destinationPort else [])
++ (if tun ? keys then
optionalNullString "keys" tun.keys else [])
++ (if tun ? address then
optionalNullString "address" tun.address else [])
++ (if tun ? inbound.length then
optionalNullInt "inbound.length" tun.inbound.length else [])
++ (if tun ? inbound.quantity then
optionalNullInt "inbound.quantity" tun.inbound.quantity else [])
++ (if tun ? outbound.length then
optionalNullInt "outbound.length" tun.outbound.length else [])
++ (if tun ? outbound.quantity then
optionalNullInt "outbound.quantity" tun.outbound.quantity else [])
++ (if tun ? crypto.tagsToSend then
optionalNullInt "crypto.tagstosend" tun.crypto.tagsToSend else []);
in concatStringsSep "\n" outTunOpts))
(flip map
(collect (tun: tun ? port && tun ? address) cfg.inTunnels)
(tun: ''
[${tun.name}]
type = server
destination = ${tun.destination}
keys = ${tun.keys}
host = ${tun.address}
port = ${toString tun.port}
inport = ${toString tun.inPort}
accesslist = ${builtins.concatStringsSep "," tun.accessList}
'')
}
'';
(tun: let inTunOpts = [
(sec tun.name)
"type = server"
(intOpt "port" tun.port)
(strOpt "host" tun.address)
] ++ (if tun ? destination then
optionalNullString "destination" tun.destination else [])
++ (if tun ? keys then
optionalNullString "keys" tun.keys else [])
++ (if tun ? inPort then
optionalNullInt "inport" tun.inPort else [])
++ (if tun ? accessList then
optionalEmptyList "accesslist" tun.accessList else []);
in concatStringsSep "\n" inTunOpts))];
in pkgs.writeText "i2pd-tunnels.conf" opts;
i2pdSh = pkgs.writeScriptBin "i2pd" ''
#!/bin/sh
exec ${pkgs.i2pd}/bin/i2pd \
${if isNull cfg.address then "" else "--host="+cfg.address} \
--service \
--conf=${i2pdConf} \
--tunconf=${i2pdTunnelConf}
--tunconf=${tunnelConf}
'';
in
@ -170,9 +241,7 @@ in
services.i2pd = {
enable = mkOption {
type = types.bool;
default = false;
enable = mkEnableOption "I2Pd daemon" // {
description = ''
Enables I2Pd as a running service upon activation.
Please read http://i2pd.readthedocs.io/en/latest/ for further
@ -192,6 +261,8 @@ in
'';
};
logCLFTime = mkEnableOption "Full CLF-formatted date and time to log";
address = mkOption {
type = with types; nullOr str;
default = null;
@ -200,17 +271,72 @@ in
'';
};
notransit = mkOption {
type = types.bool;
default = false;
family = mkOption {
type = with types; nullOr str;
default = null;
description = ''
Specify a family the router belongs to.
'';
};
dataDir = mkOption {
type = with types; nullOr str;
default = null;
description = ''
Alternative path to storage of i2pd data (RI, keys, peer profiles, ...)
'';
};
share = mkOption {
type = types.int;
default = 100;
description = ''
Limit of transit traffic from max bandwidth in percents.
'';
};
ifname = mkOption {
type = with types; nullOr str;
default = null;
description = ''
Network interface to bind to.
'';
};
ifname4 = mkOption {
type = with types; nullOr str;
default = null;
description = ''
IPv4 interface to bind to.
'';
};
ifname6 = mkOption {
type = with types; nullOr str;
default = null;
description = ''
IPv6 interface to bind to.
'';
};
ntcpProxy = mkOption {
type = with types; nullOr str;
default = null;
description = ''
Proxy URL for NTCP transport.
'';
};
ntcp = mkEnableTrueOption "ntcp";
ssu = mkEnableTrueOption "ssu";
notransit = mkEnableOption "notransit" // {
description = ''
Tells the router to not accept transit tunnels during startup.
'';
};
floodfill = mkOption {
type = types.bool;
default = false;
floodfill = mkEnableOption "floodfill" // {
description = ''
If the router is declared to be unreachable and needs introduction nodes.
'';
@ -241,51 +367,20 @@ in
'';
};
enableIPv4 = mkOption {
type = types.bool;
default = true;
enableIPv4 = mkEnableTrueOption "IPv4 connectivity";
enableIPv6 = mkEnableOption "IPv6 connectivity";
nat = mkEnableTrueOption "NAT bypass";
upnp.enable = mkEnableOption "UPnP service discovery";
upnp.name = mkOption {
type = types.str;
default = "I2Pd";
description = ''
Enables IPv4 connectivity. Enabled by default.
Name i2pd appears in UPnP forwardings list.
'';
};
enableIPv6 = mkOption {
type = types.bool;
default = false;
description = ''
Enables IPv6 connectivity. Disabled by default.
'';
};
nat = mkOption {
type = types.bool;
default = true;
description = ''
Assume router is NATed. Enabled by default.
'';
};
upnp = {
enable = mkOption {
type = types.bool;
default = false;
description = ''
Enables UPnP.
'';
};
name = mkOption {
type = types.str;
default = "I2Pd";
description = ''
Name i2pd appears in UPnP forwardings list.
'';
};
};
precomputation.elgamal = mkOption {
type = types.bool;
default = true;
precomputation.elgamal = mkEnableTrueOption "Precomputed ElGamal tables" // {
description = ''
Whenever to use precomputated tables for ElGamal.
<command>i2pd</command> defaults to <literal>false</literal>
@ -296,76 +391,154 @@ in
'';
};
reseed = {
verify = mkOption {
type = types.bool;
default = false;
description = ''
Request SU3 signature verification
'';
};
reseed.verify = mkEnableOption "SU3 signature verification";
file = mkOption {
type = types.str;
default = "";
description = ''
Full path to SU3 file to reseed from
'';
};
urls = mkOption {
type = with types; listOf str;
default = [
"https://reseed.i2p-project.de/"
"https://i2p.mooo.com/netDb/"
"https://netdb.i2p2.no/"
"https://us.reseed.i2p2.no:444/"
"https://uk.reseed.i2p2.no:444/"
"https://i2p.manas.ca:8443/"
];
description = ''
Reseed URLs
'';
};
reseed.file = mkOption {
type = with types; nullOr str;
default = null;
description = ''
Full path to SU3 file to reseed from.
'';
};
addressbook = {
defaulturl = mkOption {
type = types.str;
default = "http://joajgazyztfssty4w2on5oaqksz6tqoxbduy553y34mf4byv6gpq.b32.i2p/export/alive-hosts.txt";
description = ''
AddressBook subscription URL for initial setup
'';
};
subscriptions = mkOption {
type = with types; listOf str;
default = [
"http://inr.i2p/export/alive-hosts.txt"
"http://i2p-projekt.i2p/hosts.txt"
"http://stats.i2p/cgi-bin/newhosts.txt"
];
description = ''
AddressBook subscription URLs
'';
};
reseed.urls = mkOption {
type = with types; listOf str;
default = [];
description = ''
Reseed URLs.
'';
};
reseed.floodfill = mkOption {
type = with types; nullOr str;
default = null;
description = ''
Path to router info of floodfill to reseed from.
'';
};
reseed.zipfile = mkOption {
type = with types; nullOr str;
default = null;
description = ''
Path to local .zip file to reseed from.
'';
};
reseed.proxy = mkOption {
type = with types; nullOr str;
default = null;
description = ''
URL for reseed proxy, supports http/socks.
'';
};
addressbook.defaulturl = mkOption {
type = types.str;
default = "http://joajgazyztfssty4w2on5oaqksz6tqoxbduy553y34mf4byv6gpq.b32.i2p/export/alive-hosts.txt";
description = ''
AddressBook subscription URL for initial setup
'';
};
addressbook.subscriptions = mkOption {
type = with types; listOf str;
default = [
"http://inr.i2p/export/alive-hosts.txt"
"http://i2p-projekt.i2p/hosts.txt"
"http://stats.i2p/cgi-bin/newhosts.txt"
];
description = ''
AddressBook subscription URLs
'';
};
trust.enable = mkEnableOption "Explicit trust options";
trust.family = mkOption {
type = with types; nullOr str;
default = null;
description = ''
Router Familiy to trust for first hops.
'';
};
trust.routers = mkOption {
type = with types; listOf str;
default = [];
description = ''
Only connect to the listed routers.
'';
};
trust.hidden = mkEnableOption "Router concealment.";
websocket = mkEndpointOpt "websockets" "127.0.0.1" 7666;
exploratory.inbound = i2cpOpts "exploratory";
exploratory.outbound = i2cpOpts "exploratory";
ntcp2.enable = mkEnableTrueOption "NTCP2.";
ntcp2.published = mkEnableOption "NTCP2 publication.";
ntcp2.port = mkOption {
type = types.int;
default = 0;
description = ''
Port to listen for incoming NTCP2 connections (0=auto).
'';
};
limits.transittunnels = mkOption {
type = types.int;
default = 2500;
description = ''
Maximum number of active transit sessions
Maximum number of active transit sessions.
'';
};
limits.coreSize = mkOption {
type = types.int;
default = 0;
description = ''
Maximum size of corefile in Kb (0 - use system limit).
'';
};
limits.openFiles = mkOption {
type = types.int;
default = 0;
description = ''
Maximum number of open files (0 - use system default).
'';
};
limits.ntcpHard = mkOption {
type = types.int;
default = 0;
description = ''
Maximum number of active transit sessions.
'';
};
limits.ntcpSoft = mkOption {
type = types.int;
default = 0;
description = ''
Threshold to start probabalistic backoff with ntcp sessions (default: use system limit).
'';
};
limits.ntcpThreads = mkOption {
type = types.int;
default = 1;
description = ''
Maximum number of threads used by NTCP DH worker.
'';
};
proto.http = (mkEndpointOpt "http" "127.0.0.1" 7070) // {
auth = mkOption {
type = types.bool;
default = false;
description = ''
Enable authentication for webconsole.
'';
};
auth = mkEnableOption "Webconsole authentication";
user = mkOption {
type = types.str;
default = "i2pd";
@ -373,6 +546,7 @@ in
Username for webconsole access
'';
};
pass = mkOption {
type = types.str;
default = "i2pd";
@ -380,11 +554,35 @@ in
Password for webconsole access.
'';
};
strictHeaders = mkOption {
type = with types; nullOr bool;
default = null;
description = ''
Enable strict host checking on WebUI.
'';
};
hostname = mkOption {
type = with types; nullOr str;
default = null;
description = ''
Expected hostname for WebUI.
'';
};
};
proto.httpProxy = mkKeyedEndpointOpt "httpproxy" "127.0.0.1" 4444 "";
proto.socksProxy = (mkKeyedEndpointOpt "socksproxy" "127.0.0.1" 4447 "")
proto.httpProxy = (mkKeyedEndpointOpt "httpproxy" "127.0.0.1" 4444 "httpproxy-keys.dat")
// {
outproxy = mkOption {
type = with types; nullOr str;
default = null;
description = "Upstream outproxy bind address.";
};
};
proto.socksProxy = (mkKeyedEndpointOpt "socksproxy" "127.0.0.1" 4447 "socksproxy-keys.dat")
// {
outproxyEnable = mkEnableOption "SOCKS outproxy";
outproxy = mkOption {
type = types.str;
default = "127.0.0.1";
@ -408,8 +606,8 @@ in
{ name, ... }: {
options = {
destinationPort = mkOption {
type = types.int;
default = 0;
type = with types; nullOr int;
default = null;
description = "Connect to particular port at destination.";
};
} // commonTunOpts name;

View File

@ -0,0 +1,87 @@
{ config, lib, pkgs, ... }: with lib;
let
cfg = config.services.iperf3;
api = {
enable = mkEnableOption "iperf3 network throughput testing server";
port = mkOption {
type = types.ints.u16;
default = 5201;
description = "Server port to listen on for iperf3 client requsts.";
};
affinity = mkOption {
type = types.nullOr types.ints.unsigned;
default = null;
description = "CPU affinity for the process.";
};
bind = mkOption {
type = types.nullOr types.str;
default = null;
description = "Bind to the specific interface associated with the given address.";
};
verbose = mkOption {
type = types.bool;
default = false;
description = "Give more detailed output.";
};
forceFlush = mkOption {
type = types.bool;
default = false;
description = "Force flushing output at every interval.";
};
debug = mkOption {
type = types.bool;
default = false;
description = "Emit debugging output.";
};
rsaPrivateKey = mkOption {
type = types.nullOr types.path;
default = null;
description = "Path to the RSA private key (not password-protected) used to decrypt authentication credentials from the client.";
};
authorizedUsersFile = mkOption {
type = types.nullOr types.path;
default = null;
description = "Path to the configuration file containing authorized users credentials to run iperf tests.";
};
extraFlags = mkOption {
type = types.listOf types.str;
default = [ ];
description = "Extra flags to pass to iperf3(1).";
};
};
imp = {
systemd.services.iperf3 = {
description = "iperf3 daemon";
unitConfig.Documentation = "man:iperf3(1) https://iperf.fr/iperf-doc.php";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
Restart = "on-failure";
RestartSec = 2;
DynamicUser = true;
PrivateDevices = true;
CapabilityBoundingSet = "";
NoNewPrivileges = true;
ExecStart = ''
${pkgs.iperf3}/bin/iperf \
--server \
--port ${toString cfg.port} \
${optionalString (cfg.affinity != null) "--affinity ${toString cfg.affinity}"} \
${optionalString (cfg.bind != null) "--bind ${cfg.bind}"} \
${optionalString (cfg.rsaPrivateKey != null) "--rsa-private-key-path ${cfg.rsaPrivateKey}"} \
${optionalString (cfg.authorizedUsersFile != null) "--authorized-users-path ${cfg.authorizedUsersFile}"} \
${optionalString cfg.verbose "--verbose"} \
${optionalString cfg.debug "--debug"} \
${optionalString cfg.forceFlush "--forceflush"} \
${escapeShellArgs cfg.extraFlags}
'';
};
};
};
in {
options.services.iperf3 = api;
config = mkIf cfg.enable imp;
}

View File

@ -406,25 +406,25 @@ in {
{ source = configFile;
target = "NetworkManager/NetworkManager.conf";
}
{ source = "${networkmanager-openvpn}/etc/NetworkManager/VPN/nm-openvpn-service.name";
{ source = "${networkmanager-openvpn}/lib/NetworkManager/VPN/nm-openvpn-service.name";
target = "NetworkManager/VPN/nm-openvpn-service.name";
}
{ source = "${networkmanager-vpnc}/etc/NetworkManager/VPN/nm-vpnc-service.name";
{ source = "${networkmanager-vpnc}/lib/NetworkManager/VPN/nm-vpnc-service.name";
target = "NetworkManager/VPN/nm-vpnc-service.name";
}
{ source = "${networkmanager-openconnect}/etc/NetworkManager/VPN/nm-openconnect-service.name";
{ source = "${networkmanager-openconnect}/lib/NetworkManager/VPN/nm-openconnect-service.name";
target = "NetworkManager/VPN/nm-openconnect-service.name";
}
{ source = "${networkmanager-fortisslvpn}/etc/NetworkManager/VPN/nm-fortisslvpn-service.name";
{ source = "${networkmanager-fortisslvpn}/lib/NetworkManager/VPN/nm-fortisslvpn-service.name";
target = "NetworkManager/VPN/nm-fortisslvpn-service.name";
}
{ source = "${networkmanager-l2tp}/etc/NetworkManager/VPN/nm-l2tp-service.name";
{ source = "${networkmanager-l2tp}/lib/NetworkManager/VPN/nm-l2tp-service.name";
target = "NetworkManager/VPN/nm-l2tp-service.name";
}
{ source = "${networkmanager_strongswan}/etc/NetworkManager/VPN/nm-strongswan-service.name";
{ source = "${networkmanager_strongswan}/lib/NetworkManager/VPN/nm-strongswan-service.name";
target = "NetworkManager/VPN/nm-strongswan-service.name";
}
{ source = "${networkmanager-iodine}/etc/NetworkManager/VPN/nm-iodine-service.name";
{ source = "${networkmanager-iodine}/lib/NetworkManager/VPN/nm-iodine-service.name";
target = "NetworkManager/VPN/nm-iodine-service.name";
}
] ++ optional (cfg.appendNameservers == [] || cfg.insertNameservers == [])

View File

@ -0,0 +1,34 @@
{ config, lib, pkgs, ... }: with lib; let
cfg = config.services.nullidentdmod;
in {
options.services.nullidentdmod = with types; {
enable = mkEnableOption "Enable the nullidentdmod identd daemon";
userid = mkOption {
type = nullOr str;
description = "User ID to return. Set to null to return a random string each time.";
default = null;
example = "alice";
};
};
config = mkIf cfg.enable {
systemd.sockets.nullidentdmod = {
description = "Socket for identd (NullidentdMod)";
listenStreams = [ "113" ];
socketConfig.Accept = true;
wantedBy = [ "sockets.target" ];
};
systemd.services."nullidentdmod@" = {
description = "NullidentdMod service";
serviceConfig = {
DynamicUser = true;
ExecStart = "${pkgs.nullidentdmod}/bin/nullidentdmod${optionalString (cfg.userid != null) " ${cfg.userid}"}";
StandardInput = "socket";
StandardOutput = "socket";
};
};
};
}

View File

@ -12,6 +12,8 @@ let
log_dir = ${cfg.logDir}
'' + lib.optionalString (cfg.port != null) ''
ui_port = ${toString cfg.port}
'' + lib.optionalString (cfg.torAlways) ''
tor = always
'' + cfg.extraConfig;
};
in with lib; {
@ -35,11 +37,17 @@ in with lib; {
port = mkOption {
type = types.nullOr types.int;
default = null;
example = 15441;
description = "Optional zeronet port.";
example = 43110;
description = "Optional zeronet web UI port.";
};
tor = mkOption {
type = types.bool;
default = false;
description = "Use TOR for zeronet traffic where possible.";
};
torAlways = mkOption {
type = types.bool;
default = false;
description = "Use TOR for all zeronet traffic.";
@ -60,7 +68,11 @@ in with lib; {
services.tor = mkIf cfg.tor {
enable = true;
controlPort = 9051;
extraConfig = "CookieAuthentication 1";
extraConfig = ''
CacheDirectoryGroupReadable 1
CookieAuthentication 1
CookieAuthFileGroupReadable 1
'';
};
systemd.services.zeronet = {

View File

@ -3,78 +3,112 @@
with lib;
let
cfg = config.services.sks;
sksPkg = cfg.package;
in
{
in {
meta.maintainers = with maintainers; [ primeos calbrecht jcumming ];
options = {
services.sks = {
enable = mkEnableOption "sks";
enable = mkEnableOption ''
SKS (synchronizing key server for OpenPGP) and start the database
server. You need to create "''${dataDir}/dump/*.gpg" for the initial
import'';
package = mkOption {
default = pkgs.sks;
defaultText = "pkgs.sks";
type = types.package;
description = "
Which sks derivation to use.
";
description = "Which SKS derivation to use.";
};
dataDir = mkOption {
type = types.path;
default = "/var/db/sks";
example = "/var/lib/sks";
# TODO: The default might change to "/var/lib/sks" as this is more
# common. There's also https://github.com/NixOS/nixpkgs/issues/26256
# and "/var/db" is not FHS compliant (seems to come from BSD).
description = ''
Data directory (-basedir) for SKS, where the database and all
configuration files are located (e.g. KDB, PTree, membership and
sksconf).
'';
};
hkpAddress = mkOption {
default = [ "127.0.0.1" "::1" ];
type = types.listOf types.str;
description = "
Wich ip addresses the sks-keyserver is listening on.
";
description = ''
Domain names, IPv4 and/or IPv6 addresses to listen on for HKP
requests.
'';
};
hkpPort = mkOption {
default = 11371;
type = types.int;
description = "
Which port the sks-keyserver is listening on.
";
type = types.ints.u16;
description = "HKP port to listen on.";
};
webroot = mkOption {
type = types.nullOr types.path;
default = "${sksPkg.webSamples}/OpenPKG";
defaultText = "\${pkgs.sks.webSamples}/OpenPKG";
description = ''
Source directory (will be symlinked, if not null) for the files the
built-in webserver should serve. SKS (''${pkgs.sks.webSamples})
provides the following examples: "HTML5", "OpenPKG", and "XHTML+ES".
The index file can be named index.html, index.htm, index.xhtm, or
index.xhtml. Files with the extensions .css, .es, .js, .jpg, .jpeg,
.png, or .gif are supported. Subdirectories and filenames with
anything other than alphanumeric characters and the '.' character
will be ignored.
'';
};
};
};
config = mkIf cfg.enable {
environment.systemPackages = [ sksPkg ];
users.users.sks = {
createHome = true;
home = "/var/db/sks";
isSystemUser = true;
shell = "${pkgs.coreutils}/bin/true";
users = {
users.sks = {
isSystemUser = true;
description = "SKS user";
home = cfg.dataDir;
createHome = true;
group = "sks";
useDefaultShell = true;
packages = [ sksPkg pkgs.db ];
};
groups.sks = { };
};
systemd.services = let
hkpAddress = "'" + (builtins.concatStringsSep " " cfg.hkpAddress) + "'" ;
hkpPort = builtins.toString cfg.hkpPort;
home = config.users.users.sks.home;
user = config.users.users.sks.name;
in {
sks-keyserver = {
"sks-db" = {
description = "SKS database server";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
preStart = ''
mkdir -p ${home}/dump
${pkgs.sks}/bin/sks build ${home}/dump/*.gpg -n 10 -cache 100 || true #*/
${pkgs.sks}/bin/sks cleandb || true
${pkgs.sks}/bin/sks pbuild -cache 20 -ptree_cache 70 || true
${lib.optionalString (cfg.webroot != null)
"ln -sfT \"${cfg.webroot}\" web"}
mkdir -p dump
${sksPkg}/bin/sks build dump/*.gpg -n 10 -cache 100 || true #*/
${sksPkg}/bin/sks cleandb || true
${sksPkg}/bin/sks pbuild -cache 20 -ptree_cache 70 || true
'';
serviceConfig = {
WorkingDirectory = home;
User = user;
WorkingDirectory = "~";
User = "sks";
Group = "sks";
Restart = "always";
ExecStart = "${pkgs.sks}/bin/sks db -hkp_address ${hkpAddress} -hkp_port ${hkpPort}";
ExecStart = "${sksPkg}/bin/sks db -hkp_address ${hkpAddress} -hkp_port ${hkpPort}";
};
};
};

View File

@ -208,7 +208,7 @@ in
enable = mkOption {
type = types.bool;
default = false;
description = "Whether to enable tor transaprent proxy";
description = "Whether to enable tor transparent proxy";
};
listenAddress = mkOption {

View File

@ -42,7 +42,7 @@ in
protocol = "tcp";
user = "root";
server = "${pkgs.tcp_wrappers}/bin/tcpd";
serverArgs = "${pkgs.heimdalFull}/bin/kadmind";
serverArgs = "${pkgs.heimdalFull}/libexec/heimdal/kadmind";
};
systemd.services.kdc = {
@ -51,13 +51,13 @@ in
preStart = ''
mkdir -m 0755 -p ${stateDir}
'';
script = "${heimdalFull}/bin/kdc";
script = "${heimdalFull}/libexec/heimdal/kdc";
};
systemd.services.kpasswdd = {
description = "Kerberos Password Changing daemon";
wantedBy = [ "multi-user.target" ];
script = "${heimdalFull}/bin/kpasswdd";
script = "${heimdalFull}/libexec/heimdal/kpasswdd";
};
};

View File

@ -0,0 +1,958 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.codimd;
prettyJSON = conf:
pkgs.runCommand "codimd-config.json" { } ''
echo '${builtins.toJSON conf}' | ${pkgs.jq}/bin/jq \
'{production:del(.[]|nulls)|del(.[][]?|nulls)}' > $out
'';
in
{
options.services.codimd = {
enable = mkEnableOption "the CodiMD Markdown Editor";
groups = mkOption {
type = types.listOf types.str;
default = [];
description = ''
Groups to which the codimd user should be added.
'';
};
workDir = mkOption {
type = types.path;
default = "/var/lib/codimd";
description = ''
Working directory for the CodiMD service.
'';
};
configuration = {
debug = mkEnableOption "debug mode";
domain = mkOption {
type = types.nullOr types.str;
default = null;
example = "codimd.org";
description = ''
Domain name for the CodiMD instance.
'';
};
urlPath = mkOption {
type = types.nullOr types.str;
default = null;
example = "/url/path/to/codimd";
description = ''
Path under which CodiMD is accessible.
'';
};
host = mkOption {
type = types.str;
default = "localhost";
description = ''
Address to listen on.
'';
};
port = mkOption {
type = types.int;
default = 3000;
example = "80";
description = ''
Port to listen on.
'';
};
path = mkOption {
type = types.nullOr types.str;
default = null;
example = "/var/run/codimd.sock";
description = ''
Specify where a UNIX domain socket should be placed.
'';
};
allowOrigin = mkOption {
type = types.listOf types.str;
default = [];
example = [ "localhost" "codimd.org" ];
description = ''
List of domains to whitelist.
'';
};
useSSL = mkOption {
type = types.bool;
default = false;
description = ''
Enable to use SSL server. This will also enable
<option>protocolUseSSL</option>.
'';
};
hsts = {
enable = mkOption {
type = types.bool;
default = true;
description = ''
Wheter to enable HSTS if HTTPS is also enabled.
'';
};
maxAgeSeconds = mkOption {
type = types.int;
default = 31536000;
description = ''
Max duration for clients to keep the HSTS status.
'';
};
includeSubdomains = mkOption {
type = types.bool;
default = true;
description = ''
Whether to include subdomains in HSTS.
'';
};
preload = mkOption {
type = types.bool;
default = true;
description = ''
Whether to allow preloading of the site's HSTS status.
'';
};
};
csp = mkOption {
type = types.nullOr types.attrs;
default = null;
example = literalExample ''
{
enable = true;
directives = {
scriptSrc = "trustworthy.scripts.example.com";
};
upgradeInsecureRequest = "auto";
addDefaults = true;
}
'';
description = ''
Specify the Content Security Policy which is passed to Helmet.
For configuration details see <link xlink:href="https://helmetjs.github.io/docs/csp/"
>https://helmetjs.github.io/docs/csp/</link>.
'';
};
protocolUseSSL = mkOption {
type = types.bool;
default = false;
description = ''
Enable to use TLS for resource paths.
This only applies when <option>domain</option> is set.
'';
};
urlAddPort = mkOption {
type = types.bool;
default = false;
description = ''
Enable to add the port to callback URLs.
This only applies when <option>domain</option> is set
and only for ports other than 80 and 443.
'';
};
useCDN = mkOption {
type = types.bool;
default = true;
description = ''
Whether to use CDN resources or not.
'';
};
allowAnonymous = mkOption {
type = types.bool;
default = true;
description = ''
Whether to allow anonymous usage.
'';
};
allowAnonymousEdits = mkOption {
type = types.bool;
default = false;
description = ''
Whether to allow guests to edit existing notes with the `freely' permission,
when <option>allowAnonymous</option> is enabled.
'';
};
allowFreeURL = mkOption {
type = types.bool;
default = false;
description = ''
Whether to allow note creation by accessing a nonexistent note URL.
'';
};
defaultPermission = mkOption {
type = types.enum [ "freely" "editable" "limited" "locked" "private" ];
default = "editable";
description = ''
Default permissions for notes.
This only applies for signed-in users.
'';
};
dbURL = mkOption {
type = types.nullOr types.str;
default = null;
example = ''
postgres://user:pass@host:5432/dbname
'';
description = ''
Specify which database to use.
CodiMD supports mysql, postgres, sqlite and mssql.
See <link xlink:href="https://sequelize.readthedocs.io/en/v3/">
https://sequelize.readthedocs.io/en/v3/</link> for more information.
Note: This option overrides <option>db</option>.
'';
};
db = mkOption {
type = types.attrs;
default = {};
example = literalExample ''
{
dialect = "sqlite";
storage = "/var/lib/codimd/db.codimd.sqlite";
}
'';
description = ''
Specify the configuration for sequelize.
CodiMD supports mysql, postgres, sqlite and mssql.
See <link xlink:href="https://sequelize.readthedocs.io/en/v3/">
https://sequelize.readthedocs.io/en/v3/</link> for more information.
Note: This option overrides <option>db</option>.
'';
};
sslKeyPath= mkOption {
type = types.nullOr types.str;
default = null;
example = "/var/lib/codimd/codimd.key";
description = ''
Path to the SSL key. Needed when <option>useSSL</option> is enabled.
'';
};
sslCertPath = mkOption {
type = types.nullOr types.str;
default = null;
example = "/var/lib/codimd/codimd.crt";
description = ''
Path to the SSL cert. Needed when <option>useSSL</option> is enabled.
'';
};
sslCAPath = mkOption {
type = types.listOf types.str;
default = [];
example = [ "/var/lib/codimd/ca.crt" ];
description = ''
SSL ca chain. Needed when <option>useSSL</option> is enabled.
'';
};
dhParamPath = mkOption {
type = types.nullOr types.str;
default = null;
example = "/var/lib/codimd/dhparam.pem";
description = ''
Path to the SSL dh params. Needed when <option>useSSL</option> is enabled.
'';
};
tmpPath = mkOption {
type = types.str;
default = "/tmp";
description = ''
Path to the temp directory CodiMD should use.
Note that <option>serviceConfig.PrivateTmp</option> is enabled for
the CodiMD systemd service by default.
(Non-canonical paths are relative to CodiMD's base directory)
'';
};
defaultNotePath = mkOption {
type = types.nullOr types.str;
default = "./public/default.md";
description = ''
Path to the default Note file.
(Non-canonical paths are relative to CodiMD's base directory)
'';
};
docsPath = mkOption {
type = types.nullOr types.str;
default = "./public/docs";
description = ''
Path to the docs directory.
(Non-canonical paths are relative to CodiMD's base directory)
'';
};
indexPath = mkOption {
type = types.nullOr types.str;
default = "./public/views/index.ejs";
description = ''
Path to the index template file.
(Non-canonical paths are relative to CodiMD's base directory)
'';
};
hackmdPath = mkOption {
type = types.nullOr types.str;
default = "./public/views/hackmd.ejs";
description = ''
Path to the hackmd template file.
(Non-canonical paths are relative to CodiMD's base directory)
'';
};
errorPath = mkOption {
type = types.nullOr types.str;
default = null;
defaultText = "./public/views/error.ejs";
description = ''
Path to the error template file.
(Non-canonical paths are relative to CodiMD's base directory)
'';
};
prettyPath = mkOption {
type = types.nullOr types.str;
default = null;
defaultText = "./public/views/pretty.ejs";
description = ''
Path to the pretty template file.
(Non-canonical paths are relative to CodiMD's base directory)
'';
};
slidePath = mkOption {
type = types.nullOr types.str;
default = null;
defaultText = "./public/views/slide.hbs";
description = ''
Path to the slide template file.
(Non-canonical paths are relative to CodiMD's base directory)
'';
};
uploadsPath = mkOption {
type = types.str;
default = "${cfg.workDir}/uploads";
defaultText = "/var/lib/codimd/uploads";
description = ''
Path under which uploaded files are saved.
'';
};
sessionName = mkOption {
type = types.str;
default = "connect.sid";
description = ''
Specify the name of the session cookie.
'';
};
sessionSecret = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
Specify the secret used to sign the session cookie.
If unset, one will be generated on startup.
'';
};
sessionLife = mkOption {
type = types.int;
default = 1209600000;
description = ''
Session life time in milliseconds.
'';
};
heartbeatInterval = mkOption {
type = types.int;
default = 5000;
description = ''
Specify the socket.io heartbeat interval.
'';
};
heartbeatTimeout = mkOption {
type = types.int;
default = 10000;
description = ''
Specify the socket.io heartbeat timeout.
'';
};
documentMaxLength = mkOption {
type = types.int;
default = 100000;
description = ''
Specify the maximum document length.
'';
};
email = mkOption {
type = types.bool;
default = true;
description = ''
Whether to enable email sign-in.
'';
};
allowEmailRegister = mkOption {
type = types.bool;
default = true;
description = ''
Wether to enable email registration.
'';
};
allowGravatar = mkOption {
type = types.bool;
default = true;
description = ''
Whether to use gravatar as profile picture source.
'';
};
imageUploadType = mkOption {
type = types.enum [ "imgur" "s3" "minio" "filesystem" ];
default = "filesystem";
description = ''
Specify where to upload images.
'';
};
minio = mkOption {
type = types.nullOr (types.submodule {
options = {
accessKey = mkOption {
type = types.str;
default = "";
description = ''
Minio access key.
'';
};
secretKey = mkOption {
type = types.str;
default = "";
description = ''
Minio secret key.
'';
};
endpoint = mkOption {
type = types.str;
default = "";
description = ''
Minio endpoint.
'';
};
port = mkOption {
type = types.int;
default = 9000;
description = ''
Minio listen port.
'';
};
secure = mkOption {
type = types.bool;
default = true;
description = ''
Whether to use HTTPS for Minio.
'';
};
};
});
default = null;
description = "Configure the minio third-party integration.";
};
s3 = mkOption {
type = types.nullOr (types.submodule {
options = {
accessKeyId = mkOption {
type = types.str;
default = "";
description = ''
AWS access key id.
'';
};
secretAccessKey = mkOption {
type = types.str;
default = "";
description = ''
AWS access key.
'';
};
region = mkOption {
type = types.str;
default = "";
description = ''
AWS S3 region.
'';
};
};
});
default = null;
description = "Configure the s3 third-party integration.";
};
s3bucket = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
Specify the bucket name for upload types <literal>s3</literal> and <literal>minio</literal>.
'';
};
allowPDFExport = mkOption {
type = types.bool;
default = true;
description = ''
Whether to enable PDF exports.
'';
};
imgur.clientId = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
Imgur API client ID.
'';
};
azure = mkOption {
type = types.nullOr (types.submodule {
options = {
connectionString = mkOption {
type = types.str;
default = "";
description = ''
Azure Blob Storage connection string.
'';
};
container = mkOption {
type = types.str;
default = "";
description = ''
Azure Blob Storage container name.
It will be created if non-existent.
'';
};
};
});
default = null;
description = "Configure the azure third-party integration.";
};
oauth2 = mkOption {
type = types.nullOr (types.submodule {
options = {
authorizationURL = mkOption {
type = types.str;
default = "";
description = ''
Specify the OAuth authorization URL.
'';
};
tokenURL = mkOption {
type = types.str;
default = "";
description = ''
Specify the OAuth token URL.
'';
};
clientID = mkOption {
type = types.str;
default = "";
description = ''
Specify the OAuth client ID.
'';
};
clientSecret = mkOption {
type = types.str;
default = "";
description = ''
Specify the OAuth client secret.
'';
};
};
});
default = null;
description = "Configure the OAuth integration.";
};
facebook = mkOption {
type = types.nullOr (types.submodule {
options = {
clientID = mkOption {
type = types.str;
default = "";
description = ''
Facebook API client ID.
'';
};
clientSecret = mkOption {
type = types.str;
default = "";
description = ''
Facebook API client secret.
'';
};
};
});
default = null;
description = "Configure the facebook third-party integration";
};
twitter = mkOption {
type = types.nullOr (types.submodule {
options = {
consumerKey = mkOption {
type = types.str;
default = "";
description = ''
Twitter API consumer key.
'';
};
consumerSecret = mkOption {
type = types.str;
default = "";
description = ''
Twitter API consumer secret.
'';
};
};
});
default = null;
description = "Configure the Twitter third-party integration.";
};
github = mkOption {
type = types.nullOr (types.submodule {
options = {
clientID = mkOption {
type = types.str;
default = "";
description = ''
GitHub API client ID.
'';
};
clientSecret = mkOption {
type = types.str;
default = "";
description = ''
Github API client secret.
'';
};
};
});
default = null;
description = "Configure the GitHub third-party integration.";
};
gitlab = mkOption {
type = types.nullOr (types.submodule {
options = {
baseURL = mkOption {
type = types.str;
default = "";
description = ''
GitLab API authentication endpoint.
Only needed for other endpoints than gitlab.com.
'';
};
clientID = mkOption {
type = types.str;
default = "";
description = ''
GitLab API client ID.
'';
};
clientSecret = mkOption {
type = types.str;
default = "";
description = ''
GitLab API client secret.
'';
};
scope = mkOption {
type = types.enum [ "api" "read_user" ];
default = "api";
description = ''
GitLab API requested scope.
GitLab snippet import/export requires api scope.
'';
};
};
});
default = null;
description = "Configure the GitLab third-party integration.";
};
mattermost = mkOption {
type = types.nullOr (types.submodule {
options = {
baseURL = mkOption {
type = types.str;
default = "";
description = ''
Mattermost authentication endpoint.
'';
};
clientID = mkOption {
type = types.str;
default = "";
description = ''
Mattermost API client ID.
'';
};
clientSecret = mkOption {
type = types.str;
default = "";
description = ''
Mattermost API client secret.
'';
};
};
});
default = null;
description = "Configure the Mattermost third-party integration.";
};
dropbox = mkOption {
type = types.nullOr (types.submodule {
options = {
clientID = mkOption {
type = types.str;
default = "";
description = ''
Dropbox API client ID.
'';
};
clientSecret = mkOption {
type = types.str;
default = "";
description = ''
Dropbox API client secret.
'';
};
appKey = mkOption {
type = types.str;
default = "";
description = ''
Dropbox app key.
'';
};
};
});
default = null;
description = "Configure the Dropbox third-party integration.";
};
google = mkOption {
type = types.nullOr (types.submodule {
options = {
clientID = mkOption {
type = types.str;
default = "";
description = ''
Google API client ID.
'';
};
clientSecret = mkOption {
type = types.str;
default = "";
description = ''
Google API client secret.
'';
};
};
});
default = null;
description = "Configure the Google third-party integration.";
};
ldap = mkOption {
type = types.nullOr (types.submodule {
options = {
providerName = mkOption {
type = types.str;
default = "";
description = ''
Optional name to be displayed at login form, indicating the LDAP provider.
'';
};
url = mkOption {
type = types.str;
default = "";
example = "ldap://localhost";
description = ''
URL of LDAP server.
'';
};
bindDn = mkOption {
type = types.str;
default = "";
description = ''
Bind DN for LDAP access.
'';
};
bindCredentials = mkOption {
type = types.str;
default = "";
description = ''
Bind credentials for LDAP access.
'';
};
searchBase = mkOption {
type = types.str;
default = "";
example = "o=users,dc=example,dc=com";
description = ''
LDAP directory to begin search from.
'';
};
searchFilter = mkOption {
type = types.str;
default = "";
example = "(uid={{username}})";
description = ''
LDAP filter to search with.
'';
};
searchAttributes = mkOption {
type = types.listOf types.str;
default = [];
example = [ "displayName" "mail" ];
description = ''
LDAP attributes to search with.
'';
};
userNameField = mkOption {
type = types.str;
default = "";
description = ''
LDAP field which is used as the username on CodiMD.
By default <option>useridField</option> is used.
'';
};
useridField = mkOption {
type = types.str;
default = "";
example = "uid";
description = ''
LDAP field which is a unique identifier for users on CodiMD.
'';
};
tlsca = mkOption {
type = types.str;
default = "";
example = "server-cert.pem,root.pem";
description = ''
Root CA for LDAP TLS in PEM format.
'';
};
};
});
default = null;
description = "Configure the LDAP integration.";
};
saml = mkOption {
type = types.nullOr (types.submodule {
options = {
idpSsoUrl = mkOption {
type = types.str;
default = "";
example = "https://idp.example.com/sso";
description = ''
IdP authentication endpoint.
'';
};
idPCert = mkOption {
type = types.str;
default = "";
example = "/path/to/cert.pem";
description = ''
Path to IdP certificate file in PEM format.
'';
};
issuer = mkOption {
type = types.str;
default = "";
description = ''
Optional identity of the service provider.
This defaults to the server URL.
'';
};
identifierFormat = mkOption {
type = types.str;
default = "urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress";
description = ''
Optional name identifier format.
'';
};
groupAttribute = mkOption {
type = types.str;
default = "";
example = "memberOf";
description = ''
Optional attribute name for group list.
'';
};
externalGroups = mkOption {
type = types.listOf types.str;
default = [];
example = [ "Temporary-staff" "External-users" ];
description = ''
Excluded group names.
'';
};
requiredGroups = mkOption {
type = types.listOf types.str;
default = [];
example = [ "Hackmd-users" "Codimd-users" ];
description = ''
Required group names.
'';
};
attribute = {
id = mkOption {
type = types.str;
default = "";
description = ''
Attribute map for `id'.
Defaults to `NameID' of SAML response.
'';
};
username = mkOption {
type = types.str;
default = "";
description = ''
Attribute map for `username'.
Defaults to `NameID' of SAML response.
'';
};
email = mkOption {
type = types.str;
default = "";
description = ''
Attribute map for `email'.
Defaults to `NameID' of SAML response if
<option>identifierFormat</option> has
the default value.
'';
};
};
};
});
default = null;
description = "Configure the SAML integration.";
};
};
};
config = mkIf cfg.enable {
assertions = [
{ assertion = cfg.configuration.db == {} -> (
cfg.configuration.dbURL != "" && cfg.configuration.dbURL != null
);
message = "Database configuration for CodiMD missing."; }
];
users.groups.codimd = {};
users.users.codimd = {
description = "CodiMD service user";
group = "codimd";
extraGroups = cfg.groups;
home = cfg.workDir;
createHome = true;
};
systemd.services.codimd = {
description = "CodiMD Service";
wantedBy = [ "multi-user.target" ];
after = [ "networking.target" ];
preStart = ''
mkdir -p ${cfg.workDir}
chown -R codimd: ${cfg.workDir}
'';
serviceConfig = {
WorkingDirectory = cfg.workDir;
ExecStart = "${pkgs.codimd}/bin/codimd";
Environment = [
"CMD_CONFIG_FILE=${prettyJSON cfg.configuration}"
"NODE_ENV=production"
];
Restart = "always";
User = "codimd";
PermissionsStartOnly = true;
PrivateTmp = true;
};
};
};
}

View File

@ -66,7 +66,7 @@ in
'';
}];
security.wrappers = (import (builtins.toPath "${e.enlightenment}/e-wrappers.nix")).security.wrappers;
security.wrappers = (import "${e.enlightenment}/e-wrappers.nix").security.wrappers;
environment.etc = singleton
{ source = xcfg.xkbDir;

View File

@ -110,6 +110,7 @@ in {
services.gnome3.gnome-terminal-server.enable = mkDefault true;
services.gnome3.gnome-user-share.enable = mkDefault true;
services.gnome3.gvfs.enable = true;
services.gnome3.rygel.enable = mkDefault true;
services.gnome3.seahorse.enable = mkDefault true;
services.gnome3.sushi.enable = mkDefault true;
services.gnome3.tracker.enable = mkDefault true;

View File

@ -266,7 +266,7 @@ in
session. Each session script can set the
<varname>waitPID</varname> shell variable to make this script
wait until the end of the user session. Each script is used
to define either a windows manager or a desktop manager. These
to define either a window manager or a desktop manager. These
can be differentiated by setting the attribute
<varname>manage</varname> either to <literal>"window"</literal>
or <literal>"desktop"</literal>.

View File

@ -197,7 +197,7 @@ in
# lightdm relaunches itself via just `lightdm`, so needs to be on the PATH
execCmd = ''
export PATH=${lightdm}/sbin:$PATH
exec ${lightdm}/sbin/lightdm --log-dir=/var/log --run-dir=/run
exec ${lightdm}/sbin/lightdm
'';
};
@ -246,12 +246,19 @@ in
'';
users.users.lightdm = {
createHome = true;
home = "/var/lib/lightdm-data";
home = "/var/lib/lightdm";
group = "lightdm";
uid = config.ids.uids.lightdm;
};
systemd.tmpfiles.rules = [
"d /var/run/lightdm 0711 lightdm lightdm 0"
"d /var/cache/lightdm 0711 root lightdm -"
"d /var/lib/lightdm 1770 lightdm lightdm -"
"d /var/lib/lightdm-data 1775 lightdm lightdm -"
"d /var/log/lightdm 0711 root lightdm -"
];
users.groups.lightdm.gid = config.ids.gids.lightdm;
services.xserver.tty = null; # We might start multiple X servers so let the tty increment themselves..
services.xserver.display = null; # We specify our own display (and logfile) in xserver-wrapper up there

View File

@ -419,7 +419,7 @@ while (my $f = <$listActiveUsers>) {
my ($uid, $name) = ($+{uid}, $+{user});
print STDERR "reloading user units for $name...\n";
system("su", "-l", $name, "-c", "XDG_RUNTIME_DIR=/run/user/$uid @systemd@/bin/systemctl --user daemon-reload");
system("su", "-s", "@shell@", "-l", $name, "-c", "XDG_RUNTIME_DIR=/run/user/$uid @systemd@/bin/systemctl --user daemon-reload");
}
close $listActiveUsers;

View File

@ -115,6 +115,7 @@ let
inherit (pkgs) utillinux coreutils;
systemd = config.systemd.package;
inherit (pkgs.stdenv) shell;
inherit children;
kernelParams = config.boot.kernelParams;

View File

@ -208,7 +208,6 @@ let
"InitialCongestionWindow" "InitialAdvertisedReceiveWindow" "QuickAck"
"MTUBytes"
])
(assertHasField "Gateway")
];
checkDhcp = checkUnitConfig "DHCP" [
@ -249,13 +248,14 @@ let
# .network files have a [Link] section with different options than in .netlink files
checkNetworkLink = checkUnitConfig "Link" [
(assertOnlyFields [
"MACAddress" "MTUBytes" "ARP" "Unmanaged" "RequiredForOnline"
"MACAddress" "MTUBytes" "ARP" "Multicast" "Unmanaged" "RequiredForOnline"
])
(assertMacAddress "MACAddress")
(assertByteFormat "MTUBytes")
(assertValueOneOf "ARP" boolValues)
(assertValueOneOf "Multicast" boolValues)
(assertValueOneOf "Unmanaged" boolValues)
(assertValueOneOf "RquiredForOnline" boolValues)
(assertValueOneOf "RequiredForOnline" boolValues)
];

View File

@ -341,7 +341,7 @@ in
You should try to make this ID unique among your machines. You can
generate a random 32-bit ID using the following commands:
<literal>cksum /etc/machine-id | while read c rest; do printf "%x" $c; done</literal>
<literal>head -c 8 /etc/machine-id</literal>
(this derives it from the machine-id that systemd generates) or

View File

@ -261,6 +261,7 @@ in rec {
tests.chromium = (callSubTestsOnMatchingSystems ["x86_64-linux"] tests/chromium.nix {}).stable or {};
tests.cjdns = callTest tests/cjdns.nix {};
tests.cloud-init = callTest tests/cloud-init.nix {};
tests.codimd = callTest tests/codimd.nix {};
tests.containers-ipv4 = callTest tests/containers-ipv4.nix {};
tests.containers-ipv6 = callTest tests/containers-ipv6.nix {};
tests.containers-bridge = callTest tests/containers-bridge.nix {};
@ -327,7 +328,6 @@ in rec {
tests.keymap = callSubTests tests/keymap.nix {};
tests.initrdNetwork = callTest tests/initrd-network.nix {};
tests.kafka = callSubTests tests/kafka.nix {};
tests.kernel-copperhead = callTest tests/kernel-copperhead.nix {};
tests.kernel-latest = callTest tests/kernel-latest.nix {};
tests.kernel-lts = callTest tests/kernel-lts.nix {};
tests.kubernetes.dns = callSubTestsOnMatchingSystems ["x86_64-linux"] tests/kubernetes/dns.nix {};
@ -400,7 +400,7 @@ in rec {
tests.slurm = callTest tests/slurm.nix {};
tests.smokeping = callTest tests/smokeping.nix {};
tests.snapper = callTest tests/snapper.nix {};
tests.statsd = callTest tests/statsd.nix {};
#tests.statsd = callTest tests/statsd.nix {}; # statsd is broken: #45946
tests.strongswan-swanctl = callTest tests/strongswan-swanctl.nix {};
tests.sudo = callTest tests/sudo.nix {};
tests.systemd = callTest tests/systemd.nix {};

56
nixos/tests/codimd.nix Normal file
View File

@ -0,0 +1,56 @@
import ./make-test.nix ({ pkgs, lib, ... }:
{
name = "codimd";
meta = with lib.maintainers; {
maintainers = [ willibutz ];
};
nodes = {
codimdSqlite = { ... }: {
services = {
codimd = {
enable = true;
configuration.dbURL = "sqlite:///var/lib/codimd/codimd.db";
};
};
};
codimdPostgres = { ... }: {
systemd.services.codimd.after = [ "postgresql.service" ];
services = {
codimd = {
enable = true;
configuration.dbURL = "postgres://codimd:snakeoilpassword@localhost:5432/codimddb";
};
postgresql = {
enable = true;
initialScript = pkgs.writeText "pg-init-script.sql" ''
CREATE ROLE codimd LOGIN PASSWORD 'snakeoilpassword';
CREATE DATABASE codimddb OWNER codimd;
'';
};
};
};
};
testScript = ''
startAll();
subtest "CodiMD sqlite", sub {
$codimdSqlite->waitForUnit("codimd.service");
$codimdSqlite->waitForOpenPort(3000);
$codimdPostgres->succeed("sleep 2"); # avoid 503 during startup
$codimdSqlite->succeed("curl -sSf http://localhost:3000/new");
};
subtest "CodiMD postgres", sub {
$codimdPostgres->waitForUnit("postgresql.service");
$codimdPostgres->waitForUnit("codimd.service");
$codimdPostgres->waitForOpenPort(5432);
$codimdPostgres->waitForOpenPort(3000);
$codimdPostgres->succeed("sleep 2"); # avoid 503 during startup
$codimdPostgres->succeed("curl -sSf http://localhost:3000/new");
};
'';
})

View File

@ -563,6 +563,7 @@ in {
"swapon -L swap",
"mkfs.ext3 -L nixos /dev/sda2",
"mount LABEL=nixos /mnt",
"mkdir -p /mnt/tmp",
);
'';
grubVersion = 1;

View File

@ -1,19 +0,0 @@
import ./make-test.nix ({ pkgs, ...} : {
name = "kernel-copperhead";
meta = with pkgs.stdenv.lib.maintainers; {
maintainers = [ nequissimus ];
};
machine = { pkgs, ... }:
{
boot.kernelPackages = pkgs.linuxPackages_copperhead_lts;
};
testScript =
''
$machine->succeed("uname -a");
$machine->succeed("uname -s | grep 'Linux'");
$machine->succeed("uname -a | grep '${pkgs.linuxPackages_copperhead_lts.kernel.modDirVersion}'");
$machine->succeed("uname -a | grep 'hardened'");
'';
})

View File

@ -9,12 +9,16 @@ import ./make-test.nix ({ pkgs, ...} : {
};
testScript = ''
startAll;
$machine->waitForUnit("multi-user.target");
# multi-user.target wants novacomd.service, but let's make sure
$machine->waitForUnit("novacomd.service");
# Check status and try connecting with novacom
$machine->succeed("systemctl status novacomd.service >&2");
# to prevent non-deterministic failure,
# make sure the daemon is really listening
$machine->waitForOpenPort(6968);
$machine->succeed("novacom -l");
# Stop the daemon, double-check novacom fails if daemon isn't working
@ -23,6 +27,8 @@ import ./make-test.nix ({ pkgs, ...} : {
# And back again for good measure
$machine->startJob("novacomd");
# make sure the daemon is really listening
$machine->waitForOpenPort(6968);
$machine->succeed("novacom -l");
'';
})

View File

@ -102,11 +102,17 @@ import ./make-test.nix {
testScript = ''
startAll;
$client->waitForUnit("network.target");
$client->waitForUnit("network-online.target");
$smtp1->waitForUnit('opensmtpd');
$smtp2->waitForUnit('opensmtpd');
$smtp2->waitForUnit('dovecot2');
# To prevent sporadic failures during daemon startup, make sure
# services are listening on their ports before sending requests
$smtp1->waitForOpenPort(25);
$smtp2->waitForOpenPort(25);
$smtp2->waitForOpenPort(143);
$client->succeed('send-a-test-mail');
$smtp1->waitUntilFails('smtpctl show queue | egrep .');
$smtp2->waitUntilFails('smtpctl show queue | egrep .');

View File

@ -38,6 +38,7 @@ stdenv.mkDerivation rec {
homepage = https://bitcoinabc.org/;
maintainers = with maintainers; [ lassulus ];
license = licenses.mit;
broken = stdenv.isDarwin;
platforms = platforms.unix;
};
}

View File

@ -46,6 +46,7 @@ stdenv.mkDerivation rec {
homepage = https://bitcoinclassic.com/;
maintainers = with maintainers; [ jefdaj ];
license = licenses.mit;
broken = stdenv.isDarwin;
platforms = platforms.unix;
};
}

View File

@ -62,6 +62,7 @@ stdenv.mkDerivation rec {
homepage = https://www.bitcoinunlimited.info/;
maintainers = with maintainers; [ DmitryTsygankov ];
license = licenses.mit;
broken = stdenv.isDarwin;
platforms = platforms.unix;
};
}

View File

@ -43,6 +43,7 @@ stdenv.mkDerivation rec{
homepage = https://bitcoinxt.software/;
maintainers = with maintainers; [ jefdaj ];
license = licenses.mit;
broken = stdenv.isDarwin;
platforms = platforms.unix;
};
}

View File

@ -1,6 +1,8 @@
{ stdenv, fetchurl, pkgconfig, autoreconfHook, openssl, db48, boost
, zlib, miniupnpc, qt4, utillinux, protobuf, qrencode, libevent
, withGui }:
{ stdenv, fetchurl, pkgconfig, autoreconfHook, hexdump, openssl, db48
, boost, zlib, miniupnpc, qt4, utillinux, protobuf, qrencode, libevent
, AppKit
, withGui ? !stdenv.isDarwin
}:
with stdenv.lib;
stdenv.mkDerivation rec{
@ -12,11 +14,10 @@ stdenv.mkDerivation rec{
sha256 = "0v0g2wb4nsnhddxzb63vj2bc1mgyj05vqm5imicjfz8prvgc0si8";
};
nativeBuildInputs = [ pkgconfig autoreconfHook ];
buildInputs = [ openssl db48 boost zlib
miniupnpc protobuf libevent]
++ optionals stdenv.isLinux [ utillinux ]
++ optionals withGui [ qt4 qrencode ];
nativeBuildInputs = [ pkgconfig autoreconfHook hexdump ];
buildInputs = [ openssl db48 boost zlib miniupnpc protobuf libevent ]
++ optionals withGui [ qt4 qrencode ]
++ optional stdenv.isDarwin AppKit;
configureFlags = [ "--with-boost-libdir=${boost.out}/lib" ]
++ optionals withGui [ "--with-gui=qt4" ];

View File

@ -1,4 +1,4 @@
{ callPackage, boost155, boost165, openssl_1_1_0, haskellPackages, darwin, libsForQt5, miniupnpc_2, python3, buildGo110Package }:
{ callPackage, boost155, boost165, openssl_1_1, haskellPackages, darwin, libsForQt5, miniupnpc_2, python3, buildGo110Package }:
rec {
@ -32,8 +32,11 @@ rec {
boost = boost165; withGui = false;
};
btc1 = callPackage ./btc1.nix { boost = boost165; withGui = true; };
btc1d = callPackage ./btc1.nix { boost = boost165; withGui = false; };
btc1 = callPackage ./btc1.nix {
inherit (darwin.apple_sdk.frameworks) AppKit;
boost = boost165;
};
btc1d = btc1.override { withGui = false; };
cryptop = python3.pkgs.callPackage ./cryptop { };
@ -59,8 +62,10 @@ rec {
buildGoPackage = buildGo110Package;
};
litecoin = callPackage ./litecoin.nix { withGui = true; };
litecoind = callPackage ./litecoin.nix { withGui = false; };
litecoin = callPackage ./litecoin.nix {
inherit (darwin.apple_sdk.frameworks) AppKit;
};
litecoind = litecoin.override { withGui = false; };
masari = callPackage ./masari.nix { };
@ -85,7 +90,7 @@ rec {
zcash = callPackage ./zcash {
withGui = false;
openssl = openssl_1_1_0;
openssl = openssl_1_1;
};
parity = callPackage ./parity { };

View File

@ -54,6 +54,7 @@ buildGoPackage rec {
meta = with stdenv.lib; {
homepage = https://github.com/dapphub/ethsign;
description = "Make raw signed Ethereum transactions";
broken = stdenv.isDarwin; # test with CoreFoundation 10.11
license = [licenses.gpl3];
};
}

View File

@ -2,9 +2,12 @@
, pkgconfig, autoreconfHook
, openssl, db48, boost, zlib, miniupnpc
, glib, protobuf, utillinux, qt4, qrencode
, withGui, libevent }:
, AppKit
, withGui ? true, libevent
}:
with stdenv.lib;
stdenv.mkDerivation rec {
name = "litecoin" + (toString (optional (!withGui) "d")) + "-" + version;
@ -20,6 +23,7 @@ stdenv.mkDerivation rec {
nativeBuildInputs = [ pkgconfig autoreconfHook ];
buildInputs = [ openssl db48 boost zlib
miniupnpc glib protobuf utillinux libevent ]
++ optionals stdenv.isDarwin [ AppKit ]
++ optionals withGui [ qt4 qrencode ];
configureFlags = [ "--with-boost-libdir=${boost.out}/lib" ]
@ -39,6 +43,7 @@ stdenv.mkDerivation rec {
homepage = https://litecoin.org/;
platforms = platforms.unix;
license = licenses.mit;
broken = stdenv.isDarwin;
maintainers = with maintainers; [ offline AndersonTorres ];
};
}

View File

@ -12,13 +12,13 @@ with stdenv.lib;
stdenv.mkDerivation rec {
name = "monero-gui-${version}";
version = "0.12.0.0";
version = "0.12.3.0";
src = fetchFromGitHub {
owner = "monero-project";
repo = "monero-gui";
rev = "v${version}";
sha256 = "1mg5ival8a2wdp14yib4wzqax4xyvd40zjy9anhszljds1439jhl";
sha256 = "1ry0455cgirkc6n46qnlv5p49axjllil78xmx6469nbp3a2r3z7i";
};
nativeBuildInputs = [ qmake pkgconfig ];
@ -70,7 +70,8 @@ stdenv.mkDerivation rec {
cp ${desktopItem}/share/applications/* $out/share/applications
# install translations
cp -r release/bin/translations $out/share/
mkdir -p $out/share/translations
cp translations/*.qm $out/share/translations/
# install icons
for n in 16 24 32 48 64 96 128 256; do

View File

@ -1,38 +1,27 @@
diff --git a/main.cpp b/main.cpp
index c03b160..a8ea263 100644
index 79223c0..e80b317 100644
--- a/main.cpp
+++ b/main.cpp
@@ -80,14 +80,16 @@ int main(int argc, char *argv[])
// qDebug() << "High DPI auto scaling - enabled";
//#endif
- // Log settings
- Monero::Wallet::init(argv[0], "monero-wallet-gui");
-// qInstallMessageHandler(messageHandler);
-
MainApp app(argc, argv);
qDebug() << "app startd";
+ // Log settings
+ QString logfile =
+ QStandardPaths::writableLocation(QStandardPaths::CacheLocation)
+ + "/monero-wallet-gui.log";
+ Monero::Wallet::init(argv[0], logfile.toUtf8().constData());
+
app.setApplicationName("monero-core");
app.setOrganizationDomain("getmonero.org");
app.setOrganizationName("monero-project");
diff --git a/src/libwalletqt/Wallet.cpp b/src/libwalletqt/Wallet.cpp
index 74649ce..fe1efc6 100644
--- a/src/libwalletqt/Wallet.cpp
+++ b/src/libwalletqt/Wallet.cpp
@@ -729,7 +729,7 @@ QString Wallet::getWalletLogPath() const
#ifdef Q_OS_MACOS
return QStandardPaths::standardLocations(QStandardPaths::HomeLocation).at(0) + "/Library/Logs/" + filename;
#else
- return QCoreApplication::applicationDirPath() + "/" + filename;
+ return QStandardPaths::writableLocation(QStandardPaths::CacheLocation) + filename;
@@ -115,6 +115,9 @@ int main(int argc, char *argv[])
QCommandLineOption logPathOption(QStringList() << "l" << "log-file",
QCoreApplication::translate("main", "Log to specified file"),
QCoreApplication::translate("main", "file"));
+ logPathOption.setDefaultValue(
+ QStandardPaths::writableLocation(QStandardPaths::CacheLocation)
+ + "/monero-wallet-gui.log");
parser.addOption(logPathOption);
parser.addHelpOption();
parser.process(app);
diff --git a/Logger.cpp b/Logger.cpp
index 660bafc..dae24d4 100644
--- a/Logger.cpp
+++ b/Logger.cpp
@@ -15,7 +15,7 @@ static const QString default_name = "monero-wallet-gui.log";
#elif defined(Q_OS_MAC)
static const QString osPath = QStandardPaths::standardLocations(QStandardPaths::HomeLocation).at(0) + "/Library/Logs";
#else // linux + bsd
- static const QString osPath = QStandardPaths::standardLocations(QStandardPaths::HomeLocation).at(0);
+ static const QString osPath = QStandardPaths::standardLocations(QStandardPaths::CacheLocation).at(0);
#endif
}

View File

@ -1,14 +1,13 @@
diff --git a/TranslationManager.cpp b/TranslationManager.cpp
index fa39d35..5a410f7 100644
index e7fc52a..83534cc 100644
--- a/TranslationManager.cpp
+++ b/TranslationManager.cpp
@@ -29,7 +29,7 @@ bool TranslationManager::setLanguage(const QString &language)
#ifdef Q_OS_MACX
QString dir = qApp->applicationDirPath() + "/../Resources/translations";
#else
@@ -25,7 +25,7 @@ bool TranslationManager::setLanguage(const QString &language)
return true;
}
- QString dir = qApp->applicationDirPath() + "/translations";
+ QString dir = qApp->applicationDirPath() + "/../share/translations";
#endif
QString filename = "monero-core_" + language;
qDebug("%s: loading translation file '%s' from '%s'",

View File

@ -1,4 +1,4 @@
{ stdenv, fetchFromGitHub, fetchpatch
{ stdenv, fetchgit
, cmake, pkgconfig, git
, boost, miniupnpc, openssl, unbound, cppzmq
, zeromq, pcsclite, readline
@ -11,25 +11,16 @@ with stdenv.lib;
stdenv.mkDerivation rec {
name = "monero-${version}";
version = "0.12.0.0";
version = "0.12.3.0";
src = fetchFromGitHub {
owner = "monero-project";
repo = "monero";
src = fetchgit {
url = "https://github.com/monero-project/monero.git";
rev = "v${version}";
sha256 = "1lc9mkrl1m8mdbvj88y8y5rv44vinxf7dyv221ndmw5c5gs5zfgk";
sha256 = "1609k1qn9xx37a92ai36rajds9cmdjlkqyka95hks5xjr3l5ca8i";
};
nativeBuildInputs = [ cmake pkgconfig git ];
patches = [
# fix daemon crash, remove with 0.12.1.0 update
(fetchpatch {
url = "https://github.com/monero-project/monero/commit/08343ab.diff";
sha256 = "0f1snrl2mk2czwk1ysympzr8ismjx39fcqgy13276vcmw0cfqi83";
})
];
buildInputs = [
boost miniupnpc openssl unbound
cppzmq zeromq pcsclite readline
@ -39,7 +30,7 @@ stdenv.mkDerivation rec {
"-DCMAKE_BUILD_TYPE=Release"
"-DBUILD_GUI_DEPS=ON"
"-DReadline_ROOT_DIR=${readline.dev}"
];
] ++ optional stdenv.isDarwin "-DBoost_USE_MULTITHREADED=OFF";
hardeningDisable = [ "fortify" ];

View File

@ -3,13 +3,13 @@
stdenv.mkDerivation rec {
name = "nano-wallet-${version}";
version = "15.2";
version = "16.0";
src = fetchFromGitHub {
owner = "nanocurrency";
repo = "raiblocks";
rev = "V${version}";
sha256 = "0ngsnaczw5y709zk52flp6m2c83q3kxfgz0bzi8rzfjxp10ncnz3";
sha256 = "0fk8jlas3khdh3nlv40krsjdifxp9agblvzap6k93wmm9y34h41c";
fetchSubmodules = true;
};

View File

@ -1,57 +0,0 @@
{ stdenv, lib, fetchurl, intltool, pkgconfig, gstreamer, gst-plugins-base
, gst-plugins-good, gst-plugins-bad, gst-plugins-ugly, gst-ffmpeg, glib
, mono, mono-addins, dbus-sharp-1_0, dbus-sharp-glib-1_0, notify-sharp, gtk-sharp-2_0
, boo, gdata-sharp, taglib-sharp, sqlite, gnome-sharp, gconf, gtk-sharp-beans, gio-sharp
, libmtp, libgpod, mono-zeroconf }:
stdenv.mkDerivation rec {
name = "banshee-${version}";
version = "2.6.2";
src = fetchurl {
url = "https://ftp.gnome.org/pub/GNOME/sources/banshee/2.6/banshee-${version}.tar.xz";
sha256 = "1y30p8wxx5li39i5gpq2wib0ympy8llz0gyi6ri9bp730ndhhz7p";
};
dontStrip = true;
nativeBuildInputs = [ pkgconfig intltool ];
buildInputs = [
gtk-sharp-2_0.gtk gstreamer gst-plugins-base gst-plugins-good
gst-plugins-bad gst-plugins-ugly gst-ffmpeg
mono dbus-sharp-1_0 dbus-sharp-glib-1_0 mono-addins notify-sharp
gtk-sharp-2_0 boo gdata-sharp taglib-sharp sqlite gnome-sharp gconf gtk-sharp-beans
gio-sharp libmtp libgpod mono-zeroconf
];
makeFlags = [ "PREFIX=$(out)" ];
postPatch = ''
patchShebangs data/desktop-files/update-desktop-file.sh
patchShebangs build/private-icon-theme-installer
sed -i "s,DOCDIR=.*,DOCDIR=$out/lib/monodoc," configure
'';
postInstall = let
ldLibraryPath = lib.makeLibraryPath [ gtk-sharp-2_0.gtk gtk-sharp-2_0 sqlite gconf glib gstreamer ];
monoGACPrefix = lib.concatStringsSep ":" [
mono dbus-sharp-1_0 dbus-sharp-glib-1_0 mono-addins notify-sharp gtk-sharp-2_0
boo gdata-sharp taglib-sharp sqlite gnome-sharp gconf gtk-sharp-beans
gio-sharp libmtp libgpod mono-zeroconf
];
in ''
sed -e '2a export MONO_GAC_PREFIX=${monoGACPrefix}' \
-e 's|LD_LIBRARY_PATH=|LD_LIBRARY_PATH=${ldLibraryPath}:|' \
-e "s|GST_PLUGIN_PATH=|GST_PLUGIN_PATH=$GST_PLUGIN_SYSTEM_PATH:|" \
-e 's| mono | ${mono}/bin/mono |' \
-i $out/bin/banshee
'';
meta = with lib; {
homepage = "http://banshee.fm/";
description = "A music player written in C# using GNOME technologies";
platforms = platforms.linux;
maintainers = [ maintainers.zohl ];
license = licenses.mit;
};
}

View File

@ -1,11 +1,15 @@
{ stdenv, fetchgit, meson, ninja, pkgconfig, wrapGAppsHook
, appstream-glib, desktop-file-utils, gobjectIntrospection
, python36Packages, gnome3, glib, gst_all_1 }:
{ stdenv, fetchgit, meson, ninja, pkgconfig
, python3, gtk3, gst_all_1, libsecret, libsoup
, appstream-glib, desktop-file-utils, gnome3
, gobjectIntrospection, wrapGAppsHook }:
stdenv.mkDerivation rec {
python3.pkgs.buildPythonApplication rec {
version = "0.9.522";
name = "lollypop-${version}";
format = "other";
doCheck = false;
src = fetchgit {
url = "https://gitlab.gnome.org/World/lollypop";
rev = "refs/tags/${version}";
@ -13,7 +17,7 @@ stdenv.mkDerivation rec {
sha256 = "0f2brwv884cvmxj644jcj9sg5hix3wvnjy2ndg0fh5cxyqz0kwn5";
};
nativeBuildInputs = with python36Packages; [
nativeBuildInputs = with python3.pkgs; [
appstream-glib
desktop-file-utils
gobjectIntrospection
@ -21,17 +25,22 @@ stdenv.mkDerivation rec {
ninja
pkgconfig
wrapGAppsHook
wrapPython
];
buildInputs = [ glib ] ++ (with gnome3; [
gsettings-desktop-schemas gtk3 libsecret libsoup totem-pl-parser
]) ++ (with gst_all_1; [
gst-libav gst-plugins-bad gst-plugins-base gst-plugins-good gst-plugins-ugly
buildInputs = with gst_all_1; [
gnome3.totem-pl-parser
gst-libav
gst-plugins-bad
gst-plugins-base
gst-plugins-good
gst-plugins-ugly
gstreamer
]);
gtk3
libsecret
libsoup
];
pythonPath = with python36Packages; [
pythonPath = with python3.pkgs; [
beautifulsoup4
gst-python
pillow
@ -41,11 +50,14 @@ stdenv.mkDerivation rec {
pylast
];
postFixup = "wrapPythonPrograms";
postPatch = ''
chmod +x ./meson_post_install.py
patchShebangs ./meson_post_install.py
chmod +x meson_post_install.py
patchShebangs meson_post_install.py
'';
preFixup = ''
buildPythonPath "$out/libexec/lollypop-sp $pythonPath"
patchPythonScript "$out/libexec/lollypop-sp"
'';
meta = with stdenv.lib; {

View File

@ -1,20 +0,0 @@
https://bugs.archlinux.org/task/31324
https://410333.bugs.gentoo.org/attachment.cgi?id=322456
diff -ur src.old/compression/DecompressorGZIP.cpp src/compression/DecompressorGZIP.cpp
--- src.old/compression/DecompressorGZIP.cpp 2012-08-28 17:54:46.000000000 +0200
+++ src/compression/DecompressorGZIP.cpp 2012-08-28 17:55:21.000000000 +0200
@@ -57,11 +57,11 @@
bool DecompressorGZIP::decompress(const PPSystemString& outFileName, Hints hint)
{
- gzFile *gz_input_file = NULL;
+ gzFile gz_input_file = NULL;
int len = 0;
pp_uint8 *buf;
- if ((gz_input_file = (void **)gzopen (fileName.getStrBuffer(), "r")) == NULL)
+ if ((gz_input_file = gzopen (fileName.getStrBuffer(), "r")) == NULL)
return false;
if ((buf = new pp_uint8[0x10000]) == NULL)

View File

@ -1,29 +1,26 @@
{ stdenv, fetchurl, SDL2, alsaLib, cmake, libjack2, perl
, zlib, zziplib, pkgconfig, makeWrapper
}:
{ stdenv, fetchFromGitHub, cmake, pkgconfig, makeWrapper
, SDL2, alsaLib, libjack2, lhasa, perl, rtmidi, zlib, zziplib }:
stdenv.mkDerivation rec {
version = "1.01";
version = "1.02.00";
name = "milkytracker-${version}";
src = fetchurl {
url = "https://github.com/milkytracker/MilkyTracker/archive/v${version}.00.tar.gz";
sha256 = "1dvnddsnn9c83lz4dlm0cfjpc0m524amfkbalxbswdy0qc8cj1wv";
src = fetchFromGitHub {
owner = "milkytracker";
repo = "MilkyTracker";
rev = "v${version}";
sha256 = "05a6d7l98k9i82dwrgi855dnccm3f2lkb144gi244vhk1156n0ca";
};
preBuild=''
export CPATH=${zlib.out}/lib
'';
nativeBuildInputs = [ cmake pkgconfig makeWrapper ];
buildInputs = [ SDL2 alsaLib libjack2 perl zlib zziplib ];
buildInputs = [ SDL2 alsaLib libjack2 lhasa perl rtmidi zlib zziplib ];
meta = {
meta = with stdenv.lib; {
description = "Music tracker application, similar to Fasttracker II";
homepage = http://milkytracker.org;
license = stdenv.lib.licenses.gpl3Plus;
license = licenses.gpl3Plus;
platforms = [ "x86_64-linux" "i686-linux" ];
maintainers = [ stdenv.lib.maintainers.zoomulator ];
maintainers = with maintainers; [ zoomulator ];
};
}

View File

@ -1,55 +0,0 @@
{ stdenv, fetchgit, pythonPackages, cdparanoia, cdrdao
, gst-python, gst-plugins-base, gst-plugins-good
, utillinux, makeWrapper, substituteAll, autoreconfHook }:
let
inherit (pythonPackages) python;
in stdenv.mkDerivation rec {
name = "morituri-${version}";
version = "0.2.3.20151109";
namePrefix = "";
src = fetchgit {
url = "https://github.com/thomasvs/morituri.git";
fetchSubmodules = true;
rev = "135b2f7bf27721177e3aeb1d26403f1b29116599";
sha256 = "1sl5y5j3gdbynf2v0gf9dwd2hzawj8lm8ywadid7qm34yn8lx12k";
};
pythonPath = with pythonPackages; [
pygobject2 gst-python musicbrainzngs
pycdio pyxdg setuptools
CDDB
];
nativeBuildInputs = [ autoreconfHook ];
buildInputs = [
python cdparanoia cdrdao utillinux makeWrapper
gst-plugins-base gst-plugins-good
] ++ pythonPath;
patches = [
(substituteAll {
src = ./paths.patch;
inherit cdrdao cdparanoia python utillinux;
})
];
# This package contains no binaries to patch or strip.
dontPatchELF = true;
dontStrip = true;
postInstall = ''
wrapProgram "$out/bin/rip" \
--prefix PYTHONPATH : "$PYTHONPATH" \
--prefix GST_PLUGIN_SYSTEM_PATH : "$GST_PLUGIN_SYSTEM_PATH"
'';
meta = with stdenv.lib; {
homepage = http://thomas.apestaart.org/morituri/trac/;
description = "A CD ripper aiming for accuracy over speed";
maintainers = with maintainers; [ rycee jgeerds ];
license = licenses.gpl3Plus;
platforms = platforms.linux;
};
}

View File

@ -1,87 +0,0 @@
diff --git a/doc/Makefile.am b/doc/Makefile.am
index c115c2c..78c883e 100644
--- a/doc/Makefile.am
+++ b/doc/Makefile.am
@@ -24,7 +24,7 @@ morituri.ics: $(top_srcdir)/morituri.doap
man_MANS = rip.1
rip.1: $(top_srcdir)/morituri/extern/python-command/scripts/help2man $(top_srcdir)/morituri
- PYTHONPATH=$(top_srcdir) $(PYTHON) $(top_srcdir)/morituri/extern/python-command/scripts/help2man morituri.rip.main.Rip rip > rip.1
+ PYTHONPATH=$(top_srcdir):$(PYTHONPATH) $(PYTHON) $(top_srcdir)/morituri/extern/python-command/scripts/help2man morituri.rip.main.Rip rip > rip.1
clean-local:
@rm -rf reference
diff --git a/morituri/common/program.py b/morituri/common/program.py
index d340fdd..15cb751 100644
--- a/morituri/common/program.py
+++ b/morituri/common/program.py
@@ -92,13 +92,13 @@ class Program(log.Loggable):
"""
Load the given device.
"""
- os.system('eject -t %s' % device)
+ os.system('@utillinux@/bin/eject -t %s' % device)
def ejectDevice(self, device):
"""
Eject the given device.
"""
- os.system('eject %s' % device)
+ os.system('@utillinux@/bin/eject %s' % device)
def unmountDevice(self, device):
"""
@@ -112,7 +112,7 @@ class Program(log.Loggable):
proc = open('/proc/mounts').read()
if device in proc:
print 'Device %s is mounted, unmounting' % device
- os.system('umount %s' % device)
+ os.system('@utillinux@/bin/umount %s' % device)
def getFastToc(self, runner, toc_pickle, device):
"""
Submodule morituri/extern/python-command contains modified content
diff --git a/morituri/program/cdparanoia.py b/morituri/program/cdparanoia.py
index 46176d5..fce14a5 100644
--- a/morituri/program/cdparanoia.py
+++ b/morituri/program/cdparanoia.py
@@ -278,7 +278,7 @@ class ReadTrackTask(log.Loggable, task.Task):
stopTrack, stopOffset)
bufsize = 1024
- argv = ["cdparanoia", "--stderr-progress",
+ argv = ["@cdparanoia@/bin/cdparanoia", "--stderr-progress",
"--sample-offset=%d" % self._offset, ]
if self._device:
argv.extend(["--force-cdrom-device", self._device, ])
@@ -551,7 +551,7 @@ _VERSION_RE = re.compile(
def getCdParanoiaVersion():
getter = common.VersionGetter('cdparanoia',
- ["cdparanoia", "-V"],
+ ["@cdparanoia@/bin/cdparanoia", "-V"],
_VERSION_RE,
"%(version)s %(release)s")
diff --git a/morituri/program/cdrdao.py b/morituri/program/cdrdao.py
index c6fba64..c4d0306 100644
--- a/morituri/program/cdrdao.py
+++ b/morituri/program/cdrdao.py
@@ -257,7 +257,7 @@ class CDRDAOTask(ctask.PopenTask):
def start(self, runner):
self.debug('Starting cdrdao with options %r', self.options)
- self.command = ['cdrdao', ] + self.options
+ self.command = ['@cdrdao@/bin/cdrdao', ] + self.options
ctask.PopenTask.start(self, runner)
@@ -515,7 +515,7 @@ _VERSION_RE = re.compile(
def getCDRDAOVersion():
getter = common.VersionGetter('cdrdao',
- ["cdrdao"],
+ ["@cdrdao@/bin/cdrdao"],
_VERSION_RE,
"%(version)s")

View File

@ -11,6 +11,7 @@ stdenv.mkDerivation rec {
preFixup = ''
wrapProgram "$out/bin/pavucontrol" \
--set GDK_PIXBUF_MODULE_FILE "$GDK_PIXBUF_MODULE_FILE" \
--prefix XDG_DATA_DIRS : "$XDG_ICON_DIRS"
'';

View File

@ -0,0 +1,63 @@
{ stdenv
, runCommand
, fetchFromGitHub
, libpulseaudio
, pulseaudio
, pkgconfig
, libtool
, cmake
, bluez
, dbus
, sbc
}:
let
pulseSources = runCommand "pulseaudio-sources" {} ''
mkdir $out
tar -xf ${pulseaudio.src}
mv pulseaudio*/* $out/
'';
in stdenv.mkDerivation rec {
name = "pulseaudio-modules-bt-${version}";
version = "unstable-2018-09-11";
src = fetchFromGitHub {
owner = "EHfive";
repo = "pulseaudio-modules-bt";
rev = "9c6ad75382f3855916ad2feaa6b40e37356d80cc";
sha256 = "1iz4m3y6arsvwcyvqc429w252dl3apnhvl1zhyvfxlbg00d2ii0h";
fetchSubmodules = true;
};
nativeBuildInputs = [
pkgconfig
cmake
];
buildInputs = [
libpulseaudio
pulseaudio
libtool
bluez
dbus
sbc
];
NIX_CFLAGS_COMPILE = [
"-L${pulseaudio}/lib/pulseaudio"
];
prePatch = ''
rm -r pa
ln -s ${pulseSources} pa
'';
meta = with stdenv.lib; {
homepage = https://github.com/EHfive/pulseaudio-modules-bt;
description = "SBC, Sony LDAC codec (A2DP Audio) support for Pulseaudio";
platforms = platforms.linux;
license = licenses.mit;
maintainers = with maintainers; [ adisbladis ];
};
}

Some files were not shown because too many files have changed in this diff Show More