Merge remote-tracking branch 'origin/master' into r-updates

This commit is contained in:
Justin Bedo 2023-04-06 08:51:55 +10:00
commit 2231a9c89a
No known key found for this signature in database
GPG Key ID: 2C18202C56C182BD
6152 changed files with 541651 additions and 63843 deletions

1
.gitattributes vendored
View File

@ -1,5 +1,6 @@
**/deps.nix linguist-generated
**/deps.json linguist-generated
**/deps.toml lingust-generated
**/node-packages.nix linguist-generated
pkgs/applications/editors/emacs-modes/*-generated.nix linguist-generated

8
.github/CODEOWNERS vendored
View File

@ -132,7 +132,7 @@
/pkgs/development/ruby-modules @marsam
# Rust
/pkgs/development/compilers/rust @Mic92 @LnL7 @zowoq @winterqt @figsoda
/pkgs/development/compilers/rust @Mic92 @zowoq @winterqt @figsoda
/pkgs/build-support/rust @zowoq @winterqt @figsoda
/doc/languages-frameworks/rust.section.md @zowoq @winterqt @figsoda
@ -237,7 +237,6 @@ pkgs/development/python-modules/buildcatrust/ @ajs124 @lukegb @mweinelt
/pkgs/applications/editors/vim/plugins @figsoda @jonringer
# VsCode Extensions
/pkgs/applications/editors/vscode @superherointj
/pkgs/applications/editors/vscode/extensions @jonringer
# Prometheus exporter modules and tests
@ -310,3 +309,8 @@ pkgs/development/python-modules/buildcatrust/ @ajs124 @lukegb @mweinelt
/pkgs/build-support/node/build-npm-package @winterqt
/pkgs/build-support/node/fetch-npm-deps @winterqt
/doc/languages-frameworks/javascript.section.md @winterqt
# OCaml
/pkgs/build-support/ocaml @romildo @ulrikstrid
/pkgs/development/compilers/ocaml @romildo @ulrikstrid
/pkgs/development/ocaml-modules @romildo @ulrikstrid

View File

@ -0,0 +1,24 @@
name: "Check that maintainer list is sorted"
on:
pull_request_target:
paths:
- 'maintainers/maintainer-list.nix'
permissions:
contents: read
jobs:
nixos:
runs-on: ubuntu-latest
if: github.repository_owner == 'NixOS'
steps:
- uses: actions/checkout@v3
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@v20
with:
# explicitly enable sandbox
extra_nix_config: sandbox = true
- name: Check that maintainer-list.nix is sorted
run: nix-instantiate --eval maintainers/scripts/check-maintainers-sorted.nix

View File

@ -12,7 +12,7 @@ jobs:
tf-providers:
permissions:
contents: write # for peter-evans/create-pull-request to create branch
pull-requests: write # for peter-evans/create-pull-request to create a PR, for peter-evans/create-or-update-comment to create or update comment
pull-requests: write # for peter-evans/create-pull-request to create a PR
if: github.repository_owner == 'NixOS' && github.ref == 'refs/heads/master' # ensure workflow_dispatch only runs on master
runs-on: ubuntu-latest
steps:
@ -36,6 +36,12 @@ jobs:
--argstr keep-going true \
--argstr max-workers 2 \
--argstr path terraform-providers
- name: get failed updates
run: |
echo 'FAILED<<EOF' >> $GITHUB_ENV
git ls-files --others >> $GITHUB_ENV
echo 'EOF' >> $GITHUB_ENV
# cleanup logs of failed updates so they aren't included in the PR
- name: clean repo
run: |
git clean -f
@ -47,10 +53,16 @@ jobs:
https://github.com/NixOS/nixpkgs/actions/runs/${{ github.run_id }}
These providers failed to update:
```
${{ env.FAILED }}
```
Check that all providers build with:
```
@ofborg build terraform.full
```
If there is more than ten commits in the PR `ofborg` won't build it automatically and you will need to use the above command.
branch: terraform-providers-update
delete-branch: false
title: ${{ steps.setup.outputs.title }}

View File

@ -3,7 +3,7 @@
Building software with Nix often requires downloading source code and other files from the internet.
`nixpkgs` provides *fetchers* for different protocols and services. Fetchers are functions that simplify downloading files.
## Caveats
## Caveats {#chap-pkgs-fetchers-caveats}
Fetchers create [fixed output derivations](https://nixos.org/manual/nix/stable/#fixed-output-drvs) from downloaded files.
Nix can reuse the downloaded files via the hash of the resulting derivation.

View File

@ -6,7 +6,7 @@ Nix packages are most commonly shared between machines using [HTTP, SSH, or S3](
Note that this function is meant for advanced use-cases. The more idiomatic way to work with flat-file binary caches is via the [nix-copy-closure](https://nixos.org/manual/nix/stable/command-ref/nix-copy-closure.html) command. You may also want to consider [dockerTools](#sec-pkgs-dockerTools) for your containerization needs.
## Example
## Example {#sec-pkgs-binary-cache-example}
The following derivation will construct a flat-file binary cache containing the closure of `hello`.

View File

@ -410,7 +410,7 @@ If the derivation is fully buildable (i.e. `nix-build` can be used on it), runni
The behavior doesn't match `nix-shell` or `nix-build` exactly and this function is known not to work correctly for e.g. fixed-output derivations, content-addressed derivations, impure derivations and other special types of derivations.
:::
### Arguments
### Arguments {#ssec-pkgs-dockerTools-buildNixShellImage-arguments}
`drv`
@ -473,7 +473,7 @@ The behavior doesn't match `nix-shell` or `nix-build` exactly and this function
*Default:* (none)
### Example
### Example {#ssec-pkgs-dockerTools-buildNixShellImage-example}
The following shows how to build the `pkgs.hello` package inside a Docker container built with `buildNixShellImage`.

View File

@ -12,12 +12,12 @@ Whereas for many web servers, applications, it is possible to work with a Nix st
NixOS tests also use this function when preparing the VM. The `cptofs` method is used when `virtualisation.useBootLoader` is false (the default). Otherwise the second method is used.
## Features
## Features {#sec-make-disk-image-features}
For reference, read the function signature source code for documentation on arguments: <https://github.com/NixOS/nixpkgs/blob/master/nixos/lib/make-disk-image.nix>.
Features are separated in various sections depending on if you opt for a Nix-store only image or a full NixOS image.
### Common
### Common {#sec-make-disk-image-features-common}
- arbitrary NixOS configuration
- automatic or bound disk size: `diskSize` parameter, `additionalSpace` can be set when `diskSize` is `auto` to add a constant of disk space
@ -29,7 +29,7 @@ Features are separated in various sections depending on if you opt for a Nix-sto
- the current nixpkgs can be realized as a channel in the disk image, which will change the hash of the image when the sources are updated
- additional store paths can be provided through `additionalPaths`
### Full NixOS image
### Full NixOS image {#sec-make-disk-image-features-full-image}
- arbitrary contents with permissions can be placed in the target filesystem using `contents`
- a `/etc/nixpkgs/nixos/configuration.nix` can be provided through `configFile`
@ -37,7 +37,7 @@ Features are separated in various sections depending on if you opt for a Nix-sto
- EFI variables can be mutated during image production and the result is exposed in `$out`
- boot partition size when partition table is `efi` or `hybrid`
### On bit-to-bit reproducibility
### On bit-to-bit reproducibility {#sec-make-disk-image-features-reproducibility}
Images are **NOT** deterministic, please do not hesitate to try to fix this, source of determinisms are (not exhaustive) :
@ -47,7 +47,7 @@ Images are **NOT** deterministic, please do not hesitate to try to fix this, sou
A `deterministic` flag is available for best efforts determinism.
## Usage
## Usage {#sec-make-disk-image-usage}
To produce a Nix-store only image:
```nix

View File

@ -12,7 +12,7 @@ pkgs.makeSetupHook {
} ./script.sh
```
#### setup hook that depends on the hello package and runs hello and @shell@ is substituted with path to bash
#### setup hook that depends on the hello package and runs hello and @shell@ is substituted with path to bash {#sec-pkgs.makeSetupHook-usage-example}
```nix
pkgs.makeSetupHook {
@ -27,7 +27,7 @@ pkgs.makeSetupHook {
'')
```
## Attributes
## Attributes {#sec-pkgs.makeSetupHook-attributes}
* `name` Set the name of the hook.
* `propagatedBuildInputs` Runtime dependencies (such as binaries) of the hook.

View File

@ -20,7 +20,7 @@ pkgs.mkShell {
}
```
## Attributes
## Attributes {#sec-pkgs-mkShell-attributes}
* `name` (default: `nix-shell`). Set the name of the derivation.
* `packages` (default: `[]`). Add executable packages to the `nix-shell` environment.
@ -29,7 +29,7 @@ pkgs.mkShell {
... all the attributes of `stdenv.mkDerivation`.
## Building the shell
## Building the shell {#sec-pkgs-mkShell-building}
This derivation output will contain a text file that contains a reference to
all the build inputs. This is useful in CI where we want to make sure that

View File

@ -178,7 +178,7 @@ letting NixOS invoke Nixpkgs anew.
If a test machine needs to set NixOS options under `nixpkgs`, it must set only the
`nixpkgs.pkgs` option.
### Parameter
### Parameter {#tester-nixosTest-parameter}
A [NixOS VM test network](https://nixos.org/nixos/manual/index.html#sec-nixos-tests), or path to it. Example:
@ -200,7 +200,7 @@ A [NixOS VM test network](https://nixos.org/nixos/manual/index.html#sec-nixos-te
}
```
### Result
### Result {#tester-nixosTest-result}
A derivation that runs the VM test.

View File

@ -290,7 +290,7 @@ Other examples of reasons are:
- The previous download links were all broken
- Crash when starting on some X11 systems
#### Acceptable backport criteria
#### Acceptable backport criteria {#acceptable-backport-criteria}
The stable branch does have some changes which cannot be backported. Most notable are breaking changes. The desire is to have stable users be uninterrupted when updating packages.

View File

@ -216,7 +216,7 @@ you can test whether it builds correctly by writing in a comment:
@ofborg build agdaPackages.iowa-stdlib
```
### Maintaining Agda packages
### Maintaining Agda packages {#agda-maintaining-packages}
As mentioned before, the aim is to have a compatible, and up-to-date package set.
These two conditions sometimes exclude each other:

View File

@ -14,7 +14,7 @@ nixpkgs follows the [official elixir deprecation schedule](https://hexdocs.pm/el
All BEAM-related expressions are available via the top-level `beam` attribute, which includes:
- `interpreters`: a set of compilers running on the BEAM, including multiple Erlang/OTP versions (`beam.interpreters.erlangR22`, etc), Elixir (`beam.interpreters.elixir`) and LFE (Lisp Flavoured Erlang) (`beam.interpreters.lfe`).
- `interpreters`: a set of compilers running on the BEAM, including multiple Erlang/OTP versions (`beam.interpreters.erlang_22`, etc), Elixir (`beam.interpreters.elixir`) and LFE (Lisp Flavoured Erlang) (`beam.interpreters.lfe`).
- `packages`: a set of package builders (Mix and rebar3), each compiled with a specific Erlang/OTP version, e.g. `beam.packages.erlang22`.
@ -22,7 +22,7 @@ The default Erlang compiler, defined by `beam.interpreters.erlang`, is aliased a
To create a package builder built with a custom Erlang version, use the lambda, `beam.packagesWith`, which accepts an Erlang/OTP derivation and produces a package builder similar to `beam.packages.erlang`.
Many Erlang/OTP distributions available in `beam.interpreters` have versions with ODBC and/or Java enabled or without wx (no observer support). For example, there's `beam.interpreters.erlangR22_odbc_javac`, which corresponds to `beam.interpreters.erlangR22` and `beam.interpreters.erlangR22_nox`, which corresponds to `beam.interpreters.erlangR22`.
Many Erlang/OTP distributions available in `beam.interpreters` have versions with ODBC and/or Java enabled or without wx (no observer support). For example, there's `beam.interpreters.erlang_22_odbc_javac`, which corresponds to `beam.interpreters.erlang_22` and `beam.interpreters.erlang_22_nox`, which corresponds to `beam.interpreters.erlang_22`.
## Build Tools {#build-tools}
@ -128,7 +128,7 @@ You will need to run the build process once to fix the hash to correspond to you
###### FOD {#fixed-output-derivation}
A fixed output derivation will download mix dependencies from the internet. To ensure reproducibility, a hash will be supplied. Note that mix is relatively reproducible. An FOD generating a different hash on each run hasn't been observed (as opposed to npm where the chances are relatively high). See [elixir_ls](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/beam-modules/elixir-ls/default.nix) for a usage example of FOD.
A fixed output derivation will download mix dependencies from the internet. To ensure reproducibility, a hash will be supplied. Note that mix is relatively reproducible. An FOD generating a different hash on each run hasn't been observed (as opposed to npm where the chances are relatively high). See [elixir-ls](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/beam-modules/elixir-ls/default.nix) for a usage example of FOD.
Practical steps
@ -154,7 +154,7 @@ Here is how your `default.nix` file would look for a phoenix project.
with import <nixpkgs> { };
let
# beam.interpreters.erlangR23 is available if you need a particular version
# beam.interpreters.erlang_23 is available if you need a particular version
packages = beam.packagesWith beam.interpreters.erlang;
pname = "your_project";
@ -274,18 +274,18 @@ Usually, we need to create a `shell.nix` file and do our development inside of t
with pkgs;
let
elixir = beam.packages.erlangR24.elixir_1_12;
elixir = beam.packages.erlang_24.elixir_1_12;
in
mkShell {
buildInputs = [ elixir ];
}
```
### Using an overlay
### Using an overlay {#beam-using-overlays}
If you need to use an overlay to change some attributes of a derivation, e.g. if you need a bugfix from a version that is not yet available in nixpkgs, you can override attributes such as `version` (and the corresponding `hash`) and then use this overlay in your development environment:
#### `shell.nix`
#### `shell.nix` {#beam-using-overlays-shell.nix}
```nix
let

View File

@ -4,7 +4,7 @@
[R⁵RS](https://schemers.org/Documents/Standards/R5RS/HTML/)-compliant Scheme
compiler. It includes an interactive mode and a custom package format, "eggs".
## Using Eggs
## Using Eggs {#sec-chicken-using}
Eggs described in nixpkgs are available inside the
`chickenPackages.chickenEggs` attrset. Including an egg as a build input is
@ -22,7 +22,7 @@ might write:
Both `chicken` and its eggs have a setup hook which configures the environment
variables `CHICKEN_INCLUDE_PATH` and `CHICKEN_REPOSITORY_PATH`.
## Updating Eggs
## Updating Eggs {#sec-chicken-updating-eggs}
nixpkgs only knows about a subset of all published eggs. It uses
[egg2nix](https://github.com/the-kenny/egg2nix) to generate a
@ -36,7 +36,7 @@ $ cd pkgs/development/compilers/chicken/5/
$ egg2nix eggs.scm > eggs.nix
```
## Adding Eggs
## Adding Eggs {#sec-chicken-adding-eggs}
When we run `egg2nix`, we obtain one collection of eggs with
mutually-compatible versions. This means that when we add new eggs, we may

View File

@ -7,7 +7,7 @@
- do configuration akin to [Dhall Lang](https://dhall-lang.org/)
- perform data validation
## Cuelang schema quick start
## Cuelang schema quick start {#cuelang-quickstart}
Cuelang schemas are similar to JSON, here is a quick cheatsheet:
@ -21,7 +21,7 @@ Cuelang schemas are similar to JSON, here is a quick cheatsheet:
- Read <https://cuelang.org/docs/concepts/logic/> to learn more about the semantics.
- Read <https://cuelang.org/docs/references/spec/> to learn about the language specification.
## `writeCueValidator`
## `writeCueValidator` {#cuelang-writeCueValidator}
Nixpkgs provides a `pkgs.writeCueValidator` helper, which will write a validation script based on the provided Cuelang schema.

View File

@ -11,7 +11,7 @@ with import <nixpkgs> {};
mkShell {
name = "dotnet-env";
packages = [
dotnet-sdk_3
dotnet-sdk
];
}
```
@ -27,36 +27,57 @@ mkShell {
name = "dotnet-env";
packages = [
(with dotnetCorePackages; combinePackages [
sdk_3_1
sdk_6_0
sdk_7_0
])
];
}
```
This will produce a dotnet installation that has the dotnet 3.1 6.0 sdk. The first sdk listed will have it's cli utility present in the resulting environment. Example info output:
This will produce a dotnet installation that has the dotnet 6.0 7.0 sdk. The first sdk listed will have it's cli utility present in the resulting environment. Example info output:
```ShellSession
$ dotnet --info
.NET Core SDK (reflecting any global.json):
Version: 3.1.101
Commit: b377529961
.NET SDK:
Version: 7.0.202
Commit: 6c74320bc3
...
Środowisko uruchomieniowe:
OS Name: nixos
OS Version: 23.05
OS Platform: Linux
RID: linux-x64
Base Path: /nix/store/n2pm44xq20hz7ybsasgmd7p3yh31gnh4-dotnet-sdk-7.0.202/sdk/7.0.202/
.NET Core SDKs installed:
2.1.803 [/nix/store/iiv98i2jdi226dgh4jzkkj2ww7f8jgpd-dotnet-core-combined/sdk]
3.0.102 [/nix/store/iiv98i2jdi226dgh4jzkkj2ww7f8jgpd-dotnet-core-combined/sdk]
3.1.101 [/nix/store/iiv98i2jdi226dgh4jzkkj2ww7f8jgpd-dotnet-core-combined/sdk]
Host:
Version: 7.0.4
Architecture: x64
Commit: 0a396acafe
.NET Core runtimes installed:
Microsoft.AspNetCore.All 2.1.15 [/nix/store/iiv98i2jdi226dgh4jzkkj2ww7f8jgpd-dotnet-core-combined/shared/Microsoft.AspNetCore.All]
Microsoft.AspNetCore.App 2.1.15 [/nix/store/iiv98i2jdi226dgh4jzkkj2ww7f8jgpd-dotnet-core-combined/shared/Microsoft.AspNetCore.App]
Microsoft.AspNetCore.App 3.0.2 [/nix/store/iiv98i2jdi226dgh4jzkkj2ww7f8jgpd-dotnet-core-combined/shared/Microsoft.AspNetCore.App]
Microsoft.AspNetCore.App 3.1.1 [/nix/store/iiv98i2jdi226dgh4jzkkj2ww7f8jgpd-dotnet-core-combined/shared/Microsoft.AspNetCore.App]
Microsoft.NETCore.App 2.1.15 [/nix/store/iiv98i2jdi226dgh4jzkkj2ww7f8jgpd-dotnet-core-combined/shared/Microsoft.NETCore.App]
Microsoft.NETCore.App 3.0.2 [/nix/store/iiv98i2jdi226dgh4jzkkj2ww7f8jgpd-dotnet-core-combined/shared/Microsoft.NETCore.App]
Microsoft.NETCore.App 3.1.1 [/nix/store/iiv98i2jdi226dgh4jzkkj2ww7f8jgpd-dotnet-core-combined/shared/Microsoft.NETCore.App]
.NET SDKs installed:
6.0.407 [/nix/store/3b19303vwrhv0xxz1hg355c7f2hgxxgd-dotnet-core-combined/sdk]
7.0.202 [/nix/store/3b19303vwrhv0xxz1hg355c7f2hgxxgd-dotnet-core-combined/sdk]
.NET runtimes installed:
Microsoft.AspNetCore.App 6.0.15 [/nix/store/3b19303vwrhv0xxz1hg355c7f2hgxxgd-dotnet-core-combined/shared/Microsoft.AspNetCore.App]
Microsoft.AspNetCore.App 7.0.4 [/nix/store/3b19303vwrhv0xxz1hg355c7f2hgxxgd-dotnet-core-combined/shared/Microsoft.AspNetCore.App]
Microsoft.NETCore.App 6.0.15 [/nix/store/3b19303vwrhv0xxz1hg355c7f2hgxxgd-dotnet-core-combined/shared/Microsoft.NETCore.App]
Microsoft.NETCore.App 7.0.4 [/nix/store/3b19303vwrhv0xxz1hg355c7f2hgxxgd-dotnet-core-combined/shared/Microsoft.NETCore.App]
Other architectures found:
None
Environment variables:
Not set
global.json file:
Not found
Learn more:
https://aka.ms/dotnet/info
Download .NET:
https://aka.ms/dotnet/download
```
## dotnet-sdk vs dotnetCorePackages.sdk {#dotnet-sdk-vs-dotnetcorepackages.sdk}
@ -88,7 +109,7 @@ To package Dotnet applications, you can use `buildDotnetModule`. This has simila
* `runtimeDeps` is used to wrap libraries into `LD_LIBRARY_PATH`. This is how dotnet usually handles runtime dependencies.
* `buildType` is used to change the type of build. Possible values are `Release`, `Debug`, etc. By default, this is set to `Release`.
* `selfContainedBuild` allows to enable the [self-contained](https://docs.microsoft.com/en-us/dotnet/core/deploying/#publish-self-contained) build flag. By default, it is set to false and generated applications have a dependency on the selected dotnet runtime. If enabled, the dotnet runtime is bundled into the executable and the built app has no dependency on Dotnet.
* `dotnet-sdk` is useful in cases where you need to change what dotnet SDK is being used.
* `dotnet-sdk` is useful in cases where you need to change what dotnet SDK is being used. You can also set this to the result of `dotnetSdkPackages.combinePackages`, if the project uses multiple SDKs to build.
* `dotnet-runtime` is useful in cases where you need to change what dotnet runtime is being used. This can be either a regular dotnet runtime, or an aspnetcore.
* `dotnet-test-sdk` is useful in cases where unit tests expect a different dotnet SDK. By default, this is set to the `dotnet-sdk` attribute.
* `testProjectFile` is useful in cases where the regular project file does not contain the unit tests. It gets restored and build, but not installed. You may need to regenerate your nuget lockfile after setting this.
@ -119,8 +140,8 @@ in buildDotnetModule rec {
projectReferences = [ referencedProject ]; # `referencedProject` must contain `nupkg` in the folder structure.
dotnet-sdk = dotnetCorePackages.sdk_3_1;
dotnet-runtime = dotnetCorePackages.net_6_0;
dotnet-sdk = dotnetCorePackages.sdk_6.0;
dotnet-runtime = dotnetCorePackages.runtime_6_0;
executables = [ "foo" ]; # This wraps "$out/lib/$pname/foo" to `$out/bin/foo`.
executables = []; # Don't install any executables.

View File

@ -116,10 +116,6 @@ For convenience, it also adds `dconf.lib` for a GIO module implementing a GSetti
- []{#ssec-gnome-hooks-gobject-introspection} `gobject-introspection` setup hook populates `GI_TYPELIB_PATH` variable with `lib/girepository-1.0` directories of dependencies, which is then added to wrapper by `wrapGAppsHook`. It also adds `share` directories of dependencies to `XDG_DATA_DIRS`, which is intended to promote GIR files but it also [pollutes the closures](https://github.com/NixOS/nixpkgs/issues/32790) of packages using `wrapGAppsHook`.
::: {.warning}
The setup hook [currently](https://github.com/NixOS/nixpkgs/issues/56943) does not work in expressions with `strictDeps` enabled, like Python packages. In those cases, you will need to disable it with `strictDeps = false;`.
:::
- []{#ssec-gnome-hooks-gst-grl-plugins} Setup hooks of `gst_all_1.gstreamer` and `grilo` will populate the `GST_PLUGIN_SYSTEM_PATH_1_0` and `GRL_PLUGIN_PATH` variables, respectively, which will then be added to the wrapper by `wrapGAppsHook`.
You can also pass additional arguments to `makeWrapper` using `gappsWrapperArgs` in `preFixup` hook:

View File

@ -160,7 +160,7 @@ All `haskell.packages.*` package sets use the same package descriptions and the
of versions by default. There are however GHC version specific override `.nix`
files to loosen this a bit.
### Dependency resolution
### Dependency resolution {#haskell-dependency-resolution}
Normally when you build Haskell packages with `cabal-install`, `cabal-install`
does dependency resolution. It will look at all Haskell package versions known
@ -230,7 +230,7 @@ specification, test suites, benchmarks etc. by compiling and invoking the
package's `Setup.hs`. It does *not* use or invoke the `cabal-install` binary,
but uses the underlying `Cabal` library instead.
### General arguments
### General arguments {#haskell-derivation-args}
`pname`
: Package name, assumed to be the same as on Hackage (if applicable)
@ -479,7 +479,7 @@ are especially useful when writing [overrides](#haskell-overriding-haskell-packa
when you want to make sure that they are definitely included. However, it is
recommended to use the more accurate ones listed above when possible.
### Meta attributes
### Meta attributes {#haskell-derivation-meta}
`haskellPackages.mkDerivation` accepts the following attributes as direct
arguments which are transparently set in `meta` of the resulting derivation. See
@ -714,7 +714,7 @@ editor plugin to achieve this.
## Overriding Haskell packages {#haskell-overriding-haskell-packages}
### Overriding a single package
### Overriding a single package {#haskell-overriding-a-single-package}
<!-- TODO(@sternenseemann): we should document /somewhere/ that base == null etc. -->
@ -803,7 +803,7 @@ lib.pipe my-haskell-package [
]
```
#### `haskell.lib.compose`
#### `haskell.lib.compose` {#haskell-haskell.lib.compose}
The base interface for all overriding is the following function:
@ -826,7 +826,7 @@ following overview. Refer to the
[documentation of `haskellPackages.mkDerivation`](#haskell-mkderivation)
for a more detailed description of the effects of the respective arguments.
##### Packaging Helpers
##### Packaging Helpers {#haskell-packaging-helpers}
`overrideSrc { src, version } drv`
: Replace the source used for building `drv` with the path or derivation given
@ -875,7 +875,7 @@ sometimes necessary when working with versioned packages in
altogether. Useful if it fails to evaluate cleanly and is causing
noise in the evaluation errors tab on Hydra.
##### Development Helpers
##### Development Helpers {#haskell-development-helpers}
`sdistTarball drv`
: Create a source distribution tarball like those found on Hackage
@ -913,7 +913,7 @@ for debugging with e.g. `gdb`.
<!-- TODO(@sternenseemann): shellAware -->
##### Trivial Helpers
##### Trivial Helpers {#haskell-trivial-helpers}
`doJailbreak drv`
: Sets the `jailbreak` argument to `true` for `drv`.
@ -998,7 +998,7 @@ benchmark component.
`dontCoverage drv`
: Sets the `doCoverage` argument to `false` for `drv`.
#### Library functions in the Haskell package sets
#### Library functions in the Haskell package sets {#haskell-package-set-lib-functions}
Some library functions depend on packages from the Haskell package sets. Thus they are
exposed from those instead of from `haskell.lib.compose` which can only access what is
@ -1062,7 +1062,7 @@ it does for the unstable branches.
## F.A.Q. {#haskell-faq}
### Why is topic X not covered in this section? Why is section Y missing?
### Why is topic X not covered in this section? Why is section Y missing? {#haskell-why-not-covered}
We have been working on [moving the nixpkgs Haskell documentation back into the
nixpkgs manual](https://github.com/NixOS/nixpkgs/issues/121403). Since this

View File

@ -25,6 +25,7 @@
<xi:include href="ios.section.xml" />
<xi:include href="java.section.xml" />
<xi:include href="javascript.section.xml" />
<xi:include href="lisp.section.xml" />
<xi:include href="lua.section.xml" />
<xi:include href="maven.section.xml" />
<xi:include href="nim.section.xml" />

View File

@ -6,16 +6,16 @@ This contains instructions on how to package javascript applications.
The various tools available will be listed in the [tools-overview](#javascript-tools-overview). Some general principles for packaging will follow. Finally some tool specific instructions will be given.
## Getting unstuck / finding code examples
## Getting unstuck / finding code examples {#javascript-finding-examples}
If you find you are lacking inspiration for packing javascript applications, the links below might prove useful. Searching online for prior art can be helpful if you are running into solved problems.
### Github
### Github {#javascript-finding-examples-github}
- Searching Nix files for `mkYarnPackage`: <https://github.com/search?q=mkYarnPackage+language%3ANix&type=code>
- Searching just `flake.nix` files for `mkYarnPackage`: <https://github.com/search?q=mkYarnPackage+filename%3Aflake.nix&type=code>
### Gitlab
### Gitlab {#javascript-finding-examples-gitlab}
- Searching Nix files for `mkYarnPackage`: <https://gitlab.com/search?scope=blobs&search=mkYarnPackage+extension%3Anix>
- Searching just `flake.nix` files for `mkYarnPackage`: <https://gitlab.com/search?scope=blobs&search=mkYarnPackage+filename%3Aflake.nix>
@ -105,7 +105,7 @@ After you have identified the correct system, you need to override your package
});
```
### Adding and Updating Javascript packages in nixpkgs
### Adding and Updating Javascript packages in nixpkgs {#javascript-adding-or-updating-packages}
To add a package from NPM to nixpkgs:
@ -140,7 +140,7 @@ To update NPM packages in nixpkgs, run the same `generate.sh` script:
./pkgs/development/node-packages/generate.sh
```
#### Git protocol error
#### Git protocol error {#javascript-git-error}
Some packages may have Git dependencies from GitHub specified with `git://`.
GitHub has [disabled unecrypted Git connections](https://github.blog/2021-09-01-improving-git-protocol-security-github/#no-more-unauthenticated-git), so you may see the following error when running the generate script:
@ -288,7 +288,7 @@ configurePhase = ''
This will generate a derivation including the `node_modules` directory.
If you have to build a derivation for an integrated web framework (rails, phoenix..), this is probably the easiest way.
#### Overriding dependency behavior
#### Overriding dependency behavior {#javascript-mkYarnPackage-overriding-dependencies}
In the `mkYarnPackage` record the property `pkgConfig` can be used to override packages when you encounter problems building.

View File

@ -0,0 +1,304 @@
# lisp-modules {#lisp}
This document describes the Nixpkgs infrastructure for building Common Lisp
libraries that use ASDF (Another System Definition Facility). It lives in
`pkgs/development/lisp-modules`.
## Overview {#lisp-overview}
The main entry point of the API are the Common Lisp implementation packages
(e.g. `abcl`, `ccl`, `clasp-common-lisp`, `clisp` `ecl`, `sbcl`)
themselves. They have the `pkgs` and `withPackages` attributes, which can be
used to discover available packages and to build wrappers, respectively.
The `pkgs` attribute set contains packages that were automatically imported from
Quicklisp, and any other manually defined ones. Not every package works for all
the CL implementations (e.g. `nyxt` only makes sense for `sbcl`).
The `withPackages` function is of primary utility. It is used to build runnable
wrappers, with a pinned and pre-built ASDF FASL available in the `ASDF`
environment variable, and `CL_SOURCE_REGISTRY`/`ASDF_OUTPUT_TRANSLATIONS`
configured to find the desired systems on runtime.
With a few exceptions, the primary thing that the infrastructure does is to run
`asdf:load-system` for each system specified in the `systems` argument to
`build-asdf-system`, and save the FASLs to the Nix store. Then, it makes these
FASLs available to wrappers. Any other use-cases, such as producing SBCL
executables with `sb-ext:save-lisp-and-die`, are achieved via overriding the
`buildPhase` etc.
In addition, Lisps have the `withOverrides` function, which can be used to
substitute any package in the scope of their `pkgs`. This will be useful
together with `overrideLispAttrs` when dealing with slashy ASDF systems, because
they should stay in the main package and be build by specifying the `systems`
argument to `build-asdf-system`.
## The 90% use case example {#lisp-use-case-example}
The most common way to use the library is to run ad-hoc wrappers like this:
`nix-shell -p 'sbcl.withPackages (ps: with ps; [ alexandria ])'`
Then, in a shell:
```
$ result/bin/sbcl
* (load (sb-ext:posix-getenv "ASDF"))
* (asdf:load-system 'alexandria)
```
Also one can create a `pkgs.mkShell` environment in `shell.nix`/`flake.nix`:
```
let
sbcl' = sbcl.withPackages (ps: [ ps.alexandria ]);
in mkShell {
buildInputs = [ sbcl' ];
}
```
Such a Lisp can be now used e.g. to compile your sources:
```
buildPhase = ''
${sbcl'}/bin/sbcl --load my-build-file.lisp
''
```
## Importing packages from Quicklisp {#lisp-importing-packages-from-quicklisp}
The library is able to very quickly import all the packages distributed by
Quicklisp by parsing its `releases.txt` and `systems.txt` files. These files are
available from [http://beta.quicklisp.org/dist/quicklisp.txt].
The import process is implemented in the `import` directory as Common Lisp
functions in the `org.lispbuilds.nix` ASDF system. To run the script, one can
execute `ql-import.lisp`:
```
nix-shell --run 'sbcl --script ql-import.lisp'
```
The script will:
1. Download the latest Quicklisp `systems.txt` and `releases.txt` files
2. Generate an SQLite database of all QL systems in `packages.sqlite`
3. Generate an `imported.nix` file from the database
The maintainer's job there is to:
1. Re-run the `ql-import.lisp` script
2. Add missing native dependencies in `ql.nix`
3. For packages that still don't build, package them manually in `packages.nix`
Also, the `imported.nix` file **must not be edited manually**! It should only be
generated as described in this section.
### Adding native dependencies {#lisp-quicklisp-adding-native-dependencies}
The Quicklisp files contain ASDF dependency data, but don't include native
library (CFFI) dependencies, and, in the case of ABCL, Java dependencies.
The `ql.nix` file contains a long list of overrides, where these dependencies
can be added.
Packages defined in `packages.nix` contain these dependencies naturally.
### Trusting `systems.txt` and `releases.txt` {#lisp-quicklisp-trusting}
The previous implementation of `lisp-modules` didn't fully trust the Quicklisp
data, because there were times where the dependencies specified were not
complete, and caused broken builds. It instead used a `nix-shell` environment to
discover real dependencies by using the ASDF APIs.
The current implementation has chosen to trust this data, because it's faster to
parse a text file than to build each system to generate its Nix file, and
because that way packages can be mass-imported. Because of that, there may come
a day where some packages will break, due to bugs in Quicklisp. In that case,
the fix could be a manual override in `packages.nix` and `ql.nix`.
A known fact is that Quicklisp doesn't include dependencies on slashy systems in
its data. This is an example of a situation where such fixes were used, e.g. to
replace the `systems` attribute of the affected packages. (See the definition of
`iolib`).
### Quirks {#lisp-quicklisp-quirks}
During Quicklisp import:
- `+` in names are converted to `_plus{_,}`: `cl+ssl`->`cl_plus_ssl`, `alexandria+`->`alexandria_plus`
- `.` to `_dot_`: `iolib.base`->`iolib_dot_base`
- names starting with a number have a `_` prepended (`3d-vectors`->`_3d-vectors`)
- `_` in names is converted to `__` for reversibility
## Defining packages manually inside Nixpkgs {#lisp-defining-packages-inside}
New packages, that for some reason are not in Quicklisp, and so cannot be
auto-imported, can be written in the `packages.nix` file.
In that file, use the `build-asdf-system` function, which is a wrapper around
`mkDerivation` for building ASDF systems. Various other hacks are present, such
as `build-with-compile-into-pwd` for systems which create files during
compilation.
The `build-asdf-system` function is documented with comments in
`nix-cl.nix`. Also, `packages.nix` is full of examples of how to use it.
## Defining packages manually outside Nixpkgs {#lisp-defining-packages-outside}
Lisp derivations (`abcl`, `sbcl` etc.) also export the `buildASDFSystem`
function, which is the same as `build-asdf-system`, except for the `lisp`
argument which is set to the given CL implementation.
It can be used to define packages outside Nixpkgs, and, for example, add them
into the package scope with `withOverrides` which will be discussed later on.
### Including an external package in scope {#lisp-including-external-pkg-in-scope}
A package defined outside Nixpkgs using `buildASDFSystem` can be woven into the
Nixpkgs-provided scope like this:
```
let
alexandria = sbcl.buildASDFSystem rec {
pname = "alexandria";
version = "1.4";
src = fetchFromGitLab {
domain = "gitlab.common-lisp.net";
owner = "alexandria";
repo = "alexandria";
rev = "v${version}";
hash = "sha256-1Hzxt65dZvgOFIljjjlSGgKYkj+YBLwJCACi5DZsKmQ=";
};
};
sbcl' = sbcl.withOverrides (self: super: {
inherit alexandria;
});
in sbcl'.pkgs.alexandria
```
## Overriding package attributes {#lisp-overriding-package-attributes}
Packages export the `overrideLispAttrs` function, which can be used to build a
new package with different parameters.
Example of overriding `alexandria`:
```
sbcl.pkgs.alexandria.overrideLispAttrs (oldAttrs: rec {
version = "1.4";
src = fetchFromGitLab {
domain = "gitlab.common-lisp.net";
owner = "alexandria";
repo = "alexandria";
rev = "v${version}";
hash = "sha256-1Hzxt65dZvgOFIljjjlSGgKYkj+YBLwJCACi5DZsKmQ=";
};
})
```
## Overriding packages in scope {#lisp-overriding-packages-in-scope}
Packages can be woven into a new scope by using `withOverrides`:
```
let
sbcl' = sbcl.withOverrides (self: super: {
alexandria = super.alexandria.overrideLispAttrs (oldAttrs: rec {
pname = "alexandria";
version = "1.4";
src = fetchFromGitLab {
domain = "gitlab.common-lisp.net";
owner = "alexandria";
repo = "alexandria";
rev = "v${version}";
hash = "sha256-1Hzxt65dZvgOFIljjjlSGgKYkj+YBLwJCACi5DZsKmQ=";
};
});
});
in builtins.elemAt sbcl'.pkgs.bordeaux-threads.lispLibs 0
```
### Dealing with slashy systems {#lisp-dealing-with-slashy-systems}
Slashy (secondary) systems should not exist in their own packages! Instead, they
should be included in the parent package as an extra entry in the `systems`
argument to the `build-asdf-system`/`buildASDFSystem` functions.
The reason is that ASDF searches for a secondary system in the `.asd` of the
parent package. Thus, having them separate would cause either one of them not to
load cleanly, because one will contains FASLs of itself but not the other, and
vice versa.
To package slashy systems, use `overrideLispAttrs`, like so:
```
ecl.pkgs.alexandria.overrideLispAttrs (oldAttrs: {
systems = oldAttrs.systems ++ [ "alexandria/tests" ];
lispLibs = oldAttrs.lispLibs ++ [ ecl.pkgs.rt ];
})
```
See the respective section on using `withOverrides` for how to weave it back
into `ecl.pkgs`.
Note that sometimes the slashy systems might not only have more dependencies
than the main one, but create a circular dependency between `.asd`
files. Unfortunately, in this case an adhoc solution becomes necessary.
## Building Wrappers {#lisp-building-wrappers}
Wrappers can be built using the `withPackages` function of Common Lisp
implementations (`abcl`, `ecl`, `sbcl` etc.):
```
sbcl.withPackages (ps: [ ps.alexandria ps.bordeaux-threads ])
```
Such a wrapper can then be executed like this:
```
result/bin/sbcl
```
### Loading ASDF {#lisp-loading-asdf}
For best results, avoid calling `(require 'asdf)` When using the
library-generated wrappers.
Use `(load (ext:getenv "ASDF"))` instead, supplying your implementation's way of
getting an environment variable for `ext:getenv`. This will load the
(pre-compiled to FASL) Nixpkgs-provided version of ASDF.
### Loading systems {#lisp-loading-systems}
There, you can simply use `asdf:load-system`. This works by setting the right
values for the `CL_SOURCE_REGISTRY`/`ASDF_OUTPUT_TRANSLATIONS` environment
variables, so that systems are found in the Nix store and pre-compiled FASLs are
loaded.
## Adding a new Lisp {#lisp-adding-a-new-lisp}
The function `wrapLisp` is used to wrap Common Lisp implementations. It adds the
`pkgs`, `withPackages`, `withOverrides` and `buildASDFSystem` attributes to the
derivation.
`wrapLisp` takes these arguments:
- `pkg`: the Lisp package
- `faslExt`: Implementation-specific extension for FASL files
- `program`: The name of executable file in `${pkg}/bin/` (Default: `pkg.pname`)
- `flags`: A list of flags to always pass to `program` (Default: `[]`)
- `asdf`: The ASDF version to use (Default: `pkgs.asdf_3_3`)
- `packageOverrides`: Package overrides config (Default: `(self: super: {})`)
This example wraps CLISP:
```
wrapLisp {
pkg = clisp;
faslExt = "fas";
flags = ["-E" "UTF8"];
}
```

View File

@ -4,7 +4,7 @@
Nixpkgs provides a couple of facilities for working with this tool.
## Writing packages providing pkg-config modules
## Writing packages providing pkg-config modules {#pkg-config-writing-packages}
Packages should set `meta.pkgConfigModules` with the list of package config modules they provide.
They should also use `testers.testMetaPkgConfig` to check that the final built package matches that list.
@ -29,9 +29,9 @@ stdenv.mkDerivation (finalAttrs: {
})
```
## Accessing packages via pkg-config module name
## Accessing packages via pkg-config module name {#sec-pkg-config-usage}
### Within Nixpkgs
### Within Nixpkgs {#sec-pkg-config-usage-internal}
A [setup hook](#setup-hook-pkg-config) is bundled in the `pkg-config` package to bring a derivation's declared build inputs into the environment.
This will populate environment variables like `PKG_CONFIG_PATH`, `PKG_CONFIG_PATH_FOR_BUILD`, and `PKG_CONFIG_PATH_HOST` based on:
@ -44,7 +44,7 @@ For more details see the section on [specifying dependencies in general](#ssec-s
Normal pkg-config commands to look up dependencies by name will then work with those environment variables defined by the hook.
### Externally
### Externally {#sec-pkg-config-usage-external}
The `defaultPkgConfigPackages` package set is a set of aliases, named after the modules they provide.
This is meant to be used by language-to-nix integrations.

View File

@ -1117,7 +1117,7 @@ with import <nixpkgs> {};
in python.withPackages(ps: [ps.blaze])).env
```
#### Optional extra dependencies
#### Optional extra dependencies {#python-optional-dependencies}
Some packages define optional dependencies for additional features. With
`setuptools` this is called `extras_require` and `flit` calls it
@ -1801,14 +1801,14 @@ The following rules are desired to be respected:
* Attribute names in `python-packages.nix` should be sorted alphanumerically to
avoid merge conflicts and ease locating attributes.
## Package set maintenance
## Package set maintenance {#python-package-set-maintenance}
The whole Python package set has a lot of packages that do not see regular
updates, because they either are a very fragile component in the Python
ecosystem, like for example the `hypothesis` package, or packages that have
no maintainer, so maintenance falls back to the package set maintainers.
### Updating packages in bulk
### Updating packages in bulk {#python-package-bulk-updates}
There is a tool to update alot of python libraries in bulk, it exists at
`maintainers/scripts/update-python-libraries` with this repository.
@ -1836,7 +1836,7 @@ would be:
$ maintainers/scripts/update-python-libraries --target minor --commit --use-pkgs-prefix pkgs/development/python-modules/**/default.nix
```
## CPython Update Schedule
## CPython Update Schedule {#python-cpython-update-schedule}
With [PEP 602](https://www.python.org/dev/peps/pep-0602/), CPython now
follows a yearly release cadence. In nixpkgs, all supported interpreters

View File

@ -201,7 +201,7 @@ $ nix-shell --run 'ruby -rpg -e "puts PG.library_version"'
Of course for this use-case one could also use overlays since the configuration for `pg` depends on the `postgresql` alias, but for demonstration purposes this has to suffice.
### Platform-specific gems
### Platform-specific gems {#ruby-platform-specif-gems}
Right now, bundix has some issues with pre-built, platform-specific gems: [bundix PR #68](https://github.com/nix-community/bundix/pull/68).
Until this is solved, you can tell bundler to not use platform-specific gems and instead build them from source each time:

View File

@ -50,6 +50,11 @@ package. `cargoHash256` is used for traditional Nix SHA-256 hashes,
such as the one in the example above. `cargoHash` should instead be
used for [SRI](https://www.w3.org/TR/SRI/) hashes. For example:
Exception: If the application has cargo `git` dependencies, the `cargoHash`/`cargoSha256`
approach will not work, and you will need to copy the `Cargo.lock` file of the application
to nixpkgs and continue with the next section for specifying the options of the`cargoLock`
section.
```nix
cargoHash = "sha256-l1vL2ZdtDRxSGvP0X/l3nMw8+6WF67KPutJEzUROjg8=";
```
@ -157,7 +162,7 @@ required to build a rust package. A simple fix is to use:
```nix
postPatch = ''
cp ${./Cargo.lock} Cargo.lock
ln -s ${./Cargo.lock} Cargo.lock
'';
```
@ -411,13 +416,13 @@ rustPlatform.buildRustPackage rec {
}
```
## Compiling non-Rust packages that include Rust code {#compiling-non-rust-packages-that-include-rust-code}
### Compiling non-Rust packages that include Rust code {#compiling-non-rust-packages-that-include-rust-code}
Several non-Rust packages incorporate Rust code for performance- or
security-sensitive parts. `rustPlatform` exposes several functions and
hooks that can be used to integrate Cargo in non-Rust packages.
### Vendoring of dependencies {#vendoring-of-dependencies}
#### Vendoring of dependencies {#vendoring-of-dependencies}
Since network access is not allowed in sandboxed builds, Rust crate
dependencies need to be retrieved using a fetcher. `rustPlatform`
@ -477,7 +482,7 @@ added. To find the correct hash, you can first use `lib.fakeSha256` or
`lib.fakeHash` as a stub hash. Building `cargoDeps` will then inform
you of the correct hash.
### Hooks {#hooks}
#### Hooks {#hooks}
`rustPlatform` provides the following hooks to automate Cargo builds:
@ -513,7 +518,7 @@ you of the correct hash.
* `bindgenHook`: for crates which use `bindgen` as a build dependency, lets
`bindgen` find `libclang` and `libclang` find the libraries in `buildInputs`.
### Examples {#examples}
#### Examples {#examples}
#### Python package using `setuptools-rust` {#python-package-using-setuptools-rust}
@ -642,7 +647,127 @@ buildPythonPackage rec {
}
```
## Setting Up `nix-shell` {#setting-up-nix-shell}
## `buildRustCrate`: Compiling Rust crates using Nix instead of Cargo {#compiling-rust-crates-using-nix-instead-of-cargo}
### Simple operation {#simple-operation}
When run, `cargo build` produces a file called `Cargo.lock`,
containing pinned versions of all dependencies. Nixpkgs contains a
tool called `crate2Nix` (`nix-shell -p crate2nix`), which can be
used to turn a `Cargo.lock` into a Nix expression. That Nix
expression calls `rustc` directly (hence bypassing Cargo), and can
be used to compile a crate and all its dependencies.
See [`crate2nix`'s documentation](https://github.com/kolloch/crate2nix#known-restrictions)
for instructions on how to use it.
### Handling external dependencies {#handling-external-dependencies}
Some crates require external libraries. For crates from
[crates.io](https://crates.io), such libraries can be specified in
`defaultCrateOverrides` package in nixpkgs itself.
Starting from that file, one can add more overrides, to add features
or build inputs by overriding the hello crate in a separate file.
```nix
with import <nixpkgs> {};
((import ./hello.nix).hello {}).override {
crateOverrides = defaultCrateOverrides // {
hello = attrs: { buildInputs = [ openssl ]; };
};
}
```
Here, `crateOverrides` is expected to be a attribute set, where the
key is the crate name without version number and the value a function.
The function gets all attributes passed to `buildRustCrate` as first
argument and returns a set that contains all attribute that should be
overwritten.
For more complicated cases, such as when parts of the crate's
derivation depend on the crate's version, the `attrs` argument of
the override above can be read, as in the following example, which
patches the derivation:
```nix
with import <nixpkgs> {};
((import ./hello.nix).hello {}).override {
crateOverrides = defaultCrateOverrides // {
hello = attrs: lib.optionalAttrs (lib.versionAtLeast attrs.version "1.0") {
postPatch = ''
substituteInPlace lib/zoneinfo.rs \
--replace "/usr/share/zoneinfo" "${tzdata}/share/zoneinfo"
'';
};
};
}
```
Another situation is when we want to override a nested
dependency. This actually works in the exact same way, since the
`crateOverrides` parameter is forwarded to the crate's
dependencies. For instance, to override the build inputs for crate
`libc` in the example above, where `libc` is a dependency of the main
crate, we could do:
```nix
with import <nixpkgs> {};
((import hello.nix).hello {}).override {
crateOverrides = defaultCrateOverrides // {
libc = attrs: { buildInputs = []; };
};
}
```
### Options and phases configuration {#options-and-phases-configuration}
Actually, the overrides introduced in the previous section are more
general. A number of other parameters can be overridden:
- The version of `rustc` used to compile the crate:
```nix
(hello {}).override { rust = pkgs.rust; };
```
- Whether to build in release mode or debug mode (release mode by
default):
```nix
(hello {}).override { release = false; };
```
- Whether to print the commands sent to `rustc` when building
(equivalent to `--verbose` in cargo:
```nix
(hello {}).override { verbose = false; };
```
- Extra arguments to be passed to `rustc`:
```nix
(hello {}).override { extraRustcOpts = "-Z debuginfo=2"; };
```
- Phases, just like in any other derivation, can be specified using
the following attributes: `preUnpack`, `postUnpack`, `prePatch`,
`patches`, `postPatch`, `preConfigure` (in the case of a Rust crate,
this is run before calling the "build" script), `postConfigure`
(after the "build" script),`preBuild`, `postBuild`, `preInstall` and
`postInstall`. As an example, here is how to create a new module
before running the build script:
```nix
(hello {}).override {
preConfigure = ''
echo "pub const PATH=\"${hi.out}\";" >> src/path.rs"
'';
};
```
### Setting Up `nix-shell` {#setting-up-nix-shell}
Oftentimes you want to develop code from within `nix-shell`. Unfortunately
`buildRustCrate` does not support common `nix-shell` operations directly

View File

@ -166,8 +166,8 @@ in
If your package requires building specific parts, use instead `pkgs.vimUtils.buildVimPlugin`.
### Specificities for some plugins
#### Treesitter
### Specificities for some plugins {#vim-plugin-specificities}
#### Treesitter {#vim-plugin-treesitter}
By default `nvim-treesitter` encourages you to download, compile and install
the required Treesitter grammars at run time with `:TSInstall`. This works
@ -244,7 +244,7 @@ Alternatively, set the number of processes to a lower count to avoid rate-limiti
./pkgs/applications/editors/vim/plugins/update.py --proc 1
```
## How to maintain an out-of-tree overlay of vim plugins ?
## How to maintain an out-of-tree overlay of vim plugins ? {#vim-out-of-tree-overlays}
You can use the updater script to generate basic packages out of a custom vim
plugin list:

View File

@ -1,19 +1,20 @@
<book xmlns="http://docbook.org/ns/docbook"
xmlns:xi="http://www.w3.org/2001/XInclude">
xmlns:xi="http://www.w3.org/2001/XInclude"
xml:id="nixpkgs-manual">
<info>
<title>Nixpkgs Manual</title>
<subtitle>Version <xi:include href=".version" parse="text" />
</subtitle>
</info>
<xi:include href="preface.chapter.xml" />
<part>
<part xml:id="part-using">
<title>Using Nixpkgs</title>
<xi:include href="using/configuration.chapter.xml" />
<xi:include href="using/overlays.chapter.xml" />
<xi:include href="using/overrides.chapter.xml" />
<xi:include href="functions.xml" />
</part>
<part>
<part xml:id="part-stdenv">
<title>Standard environment</title>
<xi:include href="stdenv/stdenv.chapter.xml" />
<xi:include href="stdenv/meta.chapter.xml" />
@ -21,7 +22,7 @@
<xi:include href="stdenv/cross-compilation.chapter.xml" />
<xi:include href="stdenv/platform-notes.chapter.xml" />
</part>
<part>
<part xml:id="part-builders">
<title>Builders</title>
<xi:include href="builders/fetchers.chapter.xml" />
<xi:include href="builders/trivial-builders.chapter.xml" />
@ -32,7 +33,7 @@
<xi:include href="languages-frameworks/index.xml" />
<xi:include href="builders/packages/index.xml" />
</part>
<part>
<part xml:id="part-contributing">
<title>Contributing to Nixpkgs</title>
<xi:include href="contributing/quick-start.chapter.xml" />
<xi:include href="contributing/coding-conventions.chapter.xml" />

View File

@ -101,7 +101,7 @@ $ cd path/to/nixpkgs
$ nix-build -A your-package.tests
```
#### Package tests
#### Package tests {#var-meta-tests-packages}
Tests that are part of the source package are often executed in the `installCheckPhase`.
@ -113,7 +113,7 @@ Prefer `passthru.tests` for tests that are introduced in nixpkgs because:
For more on how to write and run package tests, see <xref linkend="sec-package-tests"/>.
#### NixOS tests
#### NixOS tests {#var-meta-tests-nixos}
The NixOS tests are available as `nixosTests` in parameters of derivations. For instance, the OpenSMTPD derivation includes lines similar to:

View File

@ -101,11 +101,11 @@ To build a `stdenv` package in a [`nix-shell`](https://nixos.org/manual/nix/unst
```bash
nix-shell '<nixpkgs>' -A some_package
eval ${unpackPhase:-unpackPhase}
eval "${unpackPhase:-unpackPhase}"
cd $sourceRoot
eval ${patchPhase:-patchPhase}
eval ${configurePhase:-configurePhase}
eval ${buildPhase:-buildPhase}
eval "${patchPhase:-patchPhase}"
eval "${configurePhase:-configurePhase}"
eval "${buildPhase:-buildPhase}"
```
To modify a [phase](#sec-stdenv-phases), first print it with
@ -380,39 +380,107 @@ Values inside it are not passed to the builder, so you can change them without t
#### `passthru.updateScript` {#var-passthru-updateScript}
A script to be run by `maintainers/scripts/update.nix` when the package is matched. It needs to be an executable file, either on the file system:
A script to be run by `maintainers/scripts/update.nix` when the package is matched. The attribute can contain one of the following:
```nix
passthru.updateScript = ./update.sh;
```
- []{#var-passthru-updateScript-command} an executable file, either on the file system:
or inside the expression itself:
```nix
passthru.updateScript = ./update.sh;
```
```nix
passthru.updateScript = writeScript "update-zoom-us" ''
#!/usr/bin/env nix-shell
#!nix-shell -i bash -p curl pcre common-updater-scripts
or inside the expression itself:
set -eu -o pipefail
```nix
passthru.updateScript = writeScript "update-zoom-us" ''
#!/usr/bin/env nix-shell
#!nix-shell -i bash -p curl pcre common-updater-scripts
version="$(curl -sI https://zoom.us/client/latest/zoom_x86_64.tar.xz | grep -Fi 'Location:' | pcregrep -o1 '/(([0-9]\.?)+)/')"
update-source-version zoom-us "$version"
'';
```
set -eu -o pipefail
The attribute can also contain a list, a script followed by arguments to be passed to it:
version="$(curl -sI https://zoom.us/client/latest/zoom_x86_64.tar.xz | grep -Fi 'Location:' | pcregrep -o1 '/(([0-9]\.?)+)/')"
update-source-version zoom-us "$version"
'';
```
```nix
passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ];
```
- a list, a script followed by arguments to be passed to it:
The script will be run with the `UPDATE_NIX_NAME`, `UPDATE_NIX_PNAME`, `UPDATE_NIX_OLD_VERSION` and `UPDATE_NIX_ATTR_PATH` environment variables set respectively to the name, pname, old version and attribute path of the package it is supposed to update.
```nix
passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ];
```
- an attribute set containing:
- [`command`]{#var-passthru-updateScript-set-command} a string or list in the [format expected by `passthru.updateScript`](#var-passthru-updateScript-command).
- [`attrPath`]{#var-passthru-updateScript-set-attrPath} (optional) a string containing the canonical attribute path for the package. If present, it will be passed to the update script instead of the attribute path on which the package was discovered during Nixpkgs traversal.
- [`supportedFeatures`]{#var-passthru-updateScript-set-supportedFeatures} (optional) a list of the [extra features](#var-passthru-updateScript-supported-features) the script supports.
```nix
passthru.updateScript = {
command = [ ../../update.sh pname ];
attrPath = pname;
supportedFeatures = [ … ];
};
```
##### How update scripts are executed? {#var-passthru-updateScript-execution}
Update scripts are to be invoked by `maintainers/scripts/update.nix` script. You can run `nix-shell maintainers/scripts/update.nix` in the root of Nixpkgs repository for information on how to use it. `update.nix` offers several modes for selecting packages to update (e.g. select by attribute path, traverse Nixpkgs and filter by maintainer, etc.), and it will execute update scripts for all matched packages that have an `updateScript` attribute.
Each update script will be passed the following environment variables:
- [`UPDATE_NIX_NAME`]{#var-passthru-updateScript-env-UPDATE_NIX_NAME} content of the `name` attribute of the updated package.
- [`UPDATE_NIX_PNAME`]{#var-passthru-updateScript-env-UPDATE_NIX_PNAME} content of the `pname` attribute of the updated package.
- [`UPDATE_NIX_OLD_VERSION`]{#var-passthru-updateScript-env-UPDATE_NIX_OLD_VERSION} content of the `version` attribute of the updated package.
- [`UPDATE_NIX_ATTR_PATH`]{#var-passthru-updateScript-env-UPDATE_NIX_ATTR_PATH} attribute path the `update.nix` discovered the package on (or the [canonical `attrPath`](#var-passthru-updateScript-set-attrPath) when available). Example: `pantheon.elementary-terminal`
::: {.note}
The script will be usually run from the root of the Nixpkgs repository but you should not rely on that. Also note that the update scripts will be run in parallel by default; you should avoid running `git commit` or any other commands that cannot handle that.
An update script will be usually run from the root of the Nixpkgs repository but you should not rely on that. Also note that `update.nix` executes update scripts in parallel by default so you should avoid running `git commit` or any other commands that cannot handle that.
:::
For information about how to run the updates, execute `nix-shell maintainers/scripts/update.nix`.
::: {.tip}
While update scripts should not create commits themselves, `maintainers/scripts/update.nix` supports automatically creating commits when running it with `--argstr commit true`. If you need to customize commit message, you can have the update script implement [`commit`](#var-passthru-updateScript-commit) feature.
:::
##### Supported features {#var-passthru-updateScript-supported-features}
###### `commit` {#var-passthru-updateScript-commit}
This feature allows update scripts to *ask* `update.nix` to create Git commits.
When support of this feature is declared, whenever the update script exits with `0` return status, it is expected to print a JSON list containing an object described below for each updated attribute to standard output.
When `update.nix` is run with `--argstr commit true` arguments, it will create a separate commit for each of the objects. An empty list can be returned when the script did not update any files, for example, when the package is already at the latest version.
The commit object contains the following values:
- [`attrPath`]{#var-passthru-updateScript-commit-attrPath} a string containing attribute path.
- [`oldVersion`]{#var-passthru-updateScript-commit-oldVersion} a string containing old version.
- [`newVersion`]{#var-passthru-updateScript-commit-newVersion} a string containing new version.
- [`files`]{#var-passthru-updateScript-commit-files} a non-empty list of file paths (as strings) to add to the commit.
- [`commitBody`]{#var-passthru-updateScript-commit-commitBody} (optional) a string with extra content to be appended to the default commit message (useful for adding changelog links).
- [`commitMessage`]{#var-passthru-updateScript-commit-commitMessage} (optional) a string to use instead of the default commit message.
If the returned array contains exactly one object (e.g. `[{}]`), all values are optional and will be determined automatically.
```{=docbook}
<example>
<title>Standard output of an update script using commit feature</title>
```
```json
[
{
"attrPath": "volume_key",
"oldVersion": "0.3.11",
"newVersion": "0.3.12",
"files": [
"/path/to/nixpkgs/pkgs/development/libraries/volume-key/default.nix"
]
}
]
```
```{=docbook}
</example>
```
### Recursive attributes in `mkDerivation` {#mkderivation-recursive-attributes}
@ -635,7 +703,7 @@ The prefix under which the package must be installed, passed via the `--prefix`
The key to use when specifying the prefix. By default, this is set to `--prefix=` as that is used by the majority of packages.
##### `dontAddStaticConfigureFlags`
##### `dontAddStaticConfigureFlags` {#var-stdenv-dontAddStaticConfigureFlags}
By default, when building statically, stdenv will try to add build system appropriate configure flags to try to enable static builds.
@ -1027,15 +1095,15 @@ postInstall = ''
Performs string substitution on the contents of \<infile\>, writing the result to \<outfile\>. The substitutions in \<subs\> are of the following form:
#### `--replace` \<s1\> \<s2\>
#### `--replace` \<s1\> \<s2\> {#fun-substitute-replace}
Replace every occurrence of the string \<s1\> by \<s2\>.
#### `--subst-var` \<varName\>
#### `--subst-var` \<varName\> {#fun-substitute-subst-var}
Replace every occurrence of `@varName@` by the contents of the environment variable \<varName\>. This is useful for generating files from templates, using `@...@` in the template as placeholders.
#### `--subst-var-by` \<varName\> \<s\>
#### `--subst-var-by` \<varName\> \<s\> {#fun-substitute-subst-var-by}
Replace every occurrence of `@varName@` by the string \<s\>.
@ -1176,7 +1244,7 @@ Multiple paths can be specified.
patchShebangs [--build | --host] PATH...
```
##### Flags
##### Flags {#patch-shebangs.sh-invocation-flags}
`--build`
: Look up commands available at build time
@ -1184,7 +1252,7 @@ patchShebangs [--build | --host] PATH...
`--host`
: Look up commands available at run time
##### Examples
##### Examples {#patch-shebangs.sh-invocation-examples}
```sh
patchShebangs --host /nix/store/<hash>-hello-1.0/bin
@ -1271,7 +1339,7 @@ Similarly, the CC Wrapper follows the Bintools Wrapper in defining standard envi
Here are some more packages that provide a setup hook. Since the list of hooks is extensible, this is not an exhaustive list. The mechanism is only to be used as a last resort, so it might cover most uses.
### Other hooks
### Other hooks {#stdenv-other-hooks}
Many other packages provide hooks, that are not part of `stdenv`. You can find
these in the [Hooks Reference](#chap-hooks).

View File

@ -176,7 +176,7 @@ rec {
# Only show the error for the first missing argument
error = errorForArg (lib.head missingArgs);
in if missingArgs == [] then makeOverridable f allArgs else throw error;
in if missingArgs == [] then makeOverridable f allArgs else abort error;
/* Like callPackage, but for a function that returns an attribute

View File

@ -422,7 +422,7 @@ ${expr "" v}
(if v then "True" else "False")
else if isFunction v then
abort "generators.toDhall: cannot convert a function to Dhall"
else if isNull v then
else if v == null then
abort "generators.toDhall: cannot convert a null to Dhall"
else
builtins.toJSON v;

View File

@ -909,6 +909,13 @@ in mkLicense lset) ({
url = "https://github.com/thestk/stk/blob/master/LICENSE";
};
tsl = {
shortName = "TSL";
fullName = "Timescale License Agreegment";
url = "https://github.com/timescale/timescaledb/blob/main/tsl/LICENSE-TIMESCALE";
unfree = true;
};
tcltk = {
spdxId = "TCL";
fullName = "TCL/TK License";

View File

@ -567,15 +567,19 @@ rec {
zipAttrsWith (n: concatLists)
(map (module: let subtree = module.${attr}; in
if !(builtins.isAttrs subtree) then
throw ''
You're trying to declare a value of type `${builtins.typeOf subtree}'
rather than an attribute-set for the option
throw (if attr == "config" then ''
You're trying to define a value of type `${builtins.typeOf subtree}'
rather than an attribute set for the option
`${builtins.concatStringsSep "." prefix}'!
This usually happens if `${builtins.concatStringsSep "." prefix}' has option
definitions inside that are not matched. Please check how to properly define
this option by e.g. referring to `man 5 configuration.nix'!
''
'' else ''
An option declaration for `${builtins.concatStringsSep "." prefix}' has type
`${builtins.typeOf subtree}' rather than an attribute set.
Did you mean to define this outside of `options'?
'')
else
mapAttrs (n: f module) subtree
) modules);

View File

@ -2,7 +2,9 @@
{ lib }:
let
inherit (builtins) length;
inherit (builtins) length;
inherit (lib.trivial) warnIf;
asciiTable = import ./ascii-table.nix;
@ -207,7 +209,20 @@ rec {
normalizePath "/a//b///c/"
=> "/a/b/c/"
*/
normalizePath = s: (builtins.foldl' (x: y: if y == "/" && hasSuffix "/" x then x else x+y) "" (stringToCharacters s));
normalizePath = s:
warnIf
(isPath s)
''
lib.strings.normalizePath: The argument (${toString s}) is a path value, but only strings are supported.
Path values are always normalised in Nix, so there's no need to call this function on them.
This function also copies the path to the Nix store and returns the store path, the same as "''${path}" will, which may not be what you want.
This behavior is deprecated and will throw an error in the future.''
(
builtins.foldl'
(x: y: if y == "/" && hasSuffix "/" x then x else x+y)
""
(stringToCharacters s)
);
/* Depending on the boolean `cond', return either the given string
or the empty string. Useful to concatenate against a bigger string.
@ -240,7 +255,17 @@ rec {
# Prefix to check for
pref:
# Input string
str: substring 0 (stringLength pref) str == pref;
str:
# Before 23.05, paths would be copied to the store before converting them
# to strings and comparing. This was surprising and confusing.
warnIf
(isPath pref)
''
lib.strings.hasPrefix: The first argument (${toString pref}) is a path value, but only strings are supported.
There is almost certainly a bug in the calling code, since this function always returns `false` in such a case.
This function also copies the path to the Nix store, which may not be what you want.
This behavior is deprecated and will throw an error in the future.''
(substring 0 (stringLength pref) str == pref);
/* Determine whether a string has given suffix.
@ -260,8 +285,20 @@ rec {
let
lenContent = stringLength content;
lenSuffix = stringLength suffix;
in lenContent >= lenSuffix &&
substring (lenContent - lenSuffix) lenContent content == suffix;
in
# Before 23.05, paths would be copied to the store before converting them
# to strings and comparing. This was surprising and confusing.
warnIf
(isPath suffix)
''
lib.strings.hasSuffix: The first argument (${toString suffix}) is a path value, but only strings are supported.
There is almost certainly a bug in the calling code, since this function always returns `false` in such a case.
This function also copies the path to the Nix store, which may not be what you want.
This behavior is deprecated and will throw an error in the future.''
(
lenContent >= lenSuffix
&& substring (lenContent - lenSuffix) lenContent content == suffix
);
/* Determine whether a string contains the given infix
@ -278,7 +315,16 @@ rec {
=> false
*/
hasInfix = infix: content:
builtins.match ".*${escapeRegex infix}.*" "${content}" != null;
# Before 23.05, paths would be copied to the store before converting them
# to strings and comparing. This was surprising and confusing.
warnIf
(isPath infix)
''
lib.strings.hasInfix: The first argument (${toString infix}) is a path value, but only strings are supported.
There is almost certainly a bug in the calling code, since this function always returns `false` in such a case.
This function also copies the path to the Nix store, which may not be what you want.
This behavior is deprecated and will throw an error in the future.''
(builtins.match ".*${escapeRegex infix}.*" "${content}" != null);
/* Convert a string to a list of characters (i.e. singleton strings).
This allows you to, e.g., map a function over each character. However,
@ -570,14 +616,23 @@ rec {
prefix:
# Input string
str:
let
# Before 23.05, paths would be copied to the store before converting them
# to strings and comparing. This was surprising and confusing.
warnIf
(isPath prefix)
''
lib.strings.removePrefix: The first argument (${toString prefix}) is a path value, but only strings are supported.
There is almost certainly a bug in the calling code, since this function never removes any prefix in such a case.
This function also copies the path to the Nix store, which may not be what you want.
This behavior is deprecated and will throw an error in the future.''
(let
preLen = stringLength prefix;
sLen = stringLength str;
in
if hasPrefix prefix str then
if substring 0 preLen str == prefix then
substring preLen (sLen - preLen) str
else
str;
str);
/* Return a string without the specified suffix, if the suffix matches.
@ -594,14 +649,23 @@ rec {
suffix:
# Input string
str:
let
# Before 23.05, paths would be copied to the store before converting them
# to strings and comparing. This was surprising and confusing.
warnIf
(isPath suffix)
''
lib.strings.removeSuffix: The first argument (${toString suffix}) is a path value, but only strings are supported.
There is almost certainly a bug in the calling code, since this function never removes any suffix in such a case.
This function also copies the path to the Nix store, which may not be what you want.
This behavior is deprecated and will throw an error in the future.''
(let
sufLen = stringLength suffix;
sLen = stringLength str;
in
if sufLen <= sLen && suffix == substring (sLen - sufLen) sufLen str then
substring 0 (sLen - sufLen) str
else
str;
str);
/* Return true if string v1 denotes a version older than v2.

View File

@ -189,7 +189,7 @@ checkConfigOutput '^"foo"$' config.submodule.foo ./declare-submoduleWith-special
## shorthandOnlyDefines config behaves as expected
checkConfigOutput '^true$' config.submodule.config ./declare-submoduleWith-shorthand.nix ./define-submoduleWith-shorthand.nix
checkConfigError 'is not of type `boolean' config.submodule.config ./declare-submoduleWith-shorthand.nix ./define-submoduleWith-noshorthand.nix
checkConfigError "You're trying to declare a value of type \`bool'\n\s*rather than an attribute-set for the option" config.submodule.config ./declare-submoduleWith-noshorthand.nix ./define-submoduleWith-shorthand.nix
checkConfigError "You're trying to define a value of type \`bool'\n\s*rather than an attribute set for the option" config.submodule.config ./declare-submoduleWith-noshorthand.nix ./define-submoduleWith-shorthand.nix
checkConfigOutput '^true$' config.submodule.config ./declare-submoduleWith-noshorthand.nix ./define-submoduleWith-noshorthand.nix
## submoduleWith should merge all modules in one swoop

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,57 @@
let
lib = import ../../lib;
inherit (lib)
add attrNames elemAt foldl' genList length replaceStrings sort toLower trace;
maintainers = import ../maintainer-list.nix;
simplify = replaceStrings [ "-" "_" ] [ "" "" ];
compare = a: b: simplify (toLower a) < simplify (toLower b);
namesSorted =
sort
(a: b: a.key < b.key)
(map
(n: let pos = builtins.unsafeGetAttrPos n maintainers;
in assert pos == null -> throw "maintainers entry ${n} is malformed";
{ name = n; line = pos.line; key = toLower (simplify n); })
(attrNames maintainers));
before = { name, line, key }:
foldl'
(acc: n: if n.key < key && (acc == null || n.key > acc.key) then n else acc)
null
namesSorted;
errors = foldl' add 0
(map
(i: let a = elemAt namesSorted i;
b = elemAt namesSorted (i + 1);
lim = let t = before a; in if t == null then "the initial {" else t.name;
in if a.line >= b.line
then trace
("maintainer ${a.name} (line ${toString a.line}) should be listed "
+ "after ${lim}, not after ${b.name} (line ${toString b.line})")
1
else 0)
(genList (i: i) (length namesSorted - 1)));
in
assert errors == 0; "all good!"
# generate edit commands to sort the list.
# may everything following the last current entry (closing } ff) in the wrong place
# with lib;
# concatStringsSep
# "\n"
# (let first = foldl' (acc: n: if n.line < acc then n.line else acc) 999999999 namesSorted;
# commands = map
# (i: let e = elemAt namesSorted i;
# begin = foldl'
# (acc: n: if n.line < e.line && n.line > acc then n.line else acc)
# 1
# namesSorted;
# end =
# foldl' (acc: n: if n.line > e.line && n.line < acc then n.line else acc)
# 999999999
# namesSorted;
# in "${toString e.line},${toString (end - 1)} p")
# (genList (i: i) (length namesSorted));
# in map
# (c: "sed -ne '${c}' maintainers/maintainer-list.nix")
# ([ "1,${toString (first - 1)} p" ] ++ commands))

View File

@ -0,0 +1,4 @@
#!/usr/bin/env nix-shell
#!nix-shell -I nixpkgs=. -i bash -p "import ./maintainers/scripts/convert-to-import-cargo-lock" nix-prefetch-git
convert-to-import-cargo-lock "$@"

View File

@ -0,0 +1 @@
/target

View File

@ -0,0 +1,106 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "anyhow"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800"
[[package]]
name = "basic-toml"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2e819b667739967cd44d308b8c7b71305d8bb0729ac44a248aa08f33d01950b4"
dependencies = [
"serde",
]
[[package]]
name = "convert-to-import-cargo-lock"
version = "0.1.0"
dependencies = [
"anyhow",
"basic-toml",
"serde",
"serde_json",
]
[[package]]
name = "itoa"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440"
[[package]]
name = "proc-macro2"
version = "1.0.51"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b"
dependencies = [
"proc-macro2",
]
[[package]]
name = "ryu"
version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde"
[[package]]
name = "serde"
version = "1.0.152"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.152"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.93"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cad406b69c91885b5107daf2c29572f6c8cdb3c66826821e286c533490c0bc76"
dependencies = [
"itoa",
"ryu",
"serde",
]
[[package]]
name = "syn"
version = "1.0.107"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "unicode-ident"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc"

View File

@ -0,0 +1,12 @@
[package]
name = "convert-to-import-cargo-lock"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
anyhow = { version = "1.0.69" }
basic-toml = "0.1.1"
serde = { version = "1.0.152", features = ["derive"] }
serde_json = "1.0.93"

View File

@ -0,0 +1,16 @@
with import ../../../. { };
rustPlatform.buildRustPackage {
name = "convert-to-import-cargo-lock";
src = lib.cleanSourceWith {
src = ./.;
filter = name: type:
let
name' = builtins.baseNameOf name;
in
name' != "default.nix" && name' != "target";
};
cargoLock.lockFile = ./Cargo.lock;
}

View File

@ -0,0 +1,5 @@
with import ../../../. { };
mkShell {
packages = [ rustc cargo clippy rustfmt ] ++ lib.optional stdenv.isDarwin libiconv;
}

View File

@ -0,0 +1,241 @@
#![warn(clippy::pedantic)]
#![allow(clippy::too_many_lines)]
use anyhow::anyhow;
use serde::Deserialize;
use std::{collections::HashMap, env, fs, path::PathBuf, process::Command};
#[derive(Deserialize)]
struct CargoLock<'a> {
#[serde(rename = "package", borrow)]
packages: Vec<Package<'a>>,
metadata: Option<HashMap<&'a str, &'a str>>,
}
#[derive(Deserialize)]
struct Package<'a> {
name: &'a str,
version: &'a str,
source: Option<&'a str>,
checksum: Option<&'a str>,
}
#[derive(Deserialize)]
struct PrefetchOutput {
sha256: String,
}
fn main() -> anyhow::Result<()> {
let mut hashes = HashMap::new();
let attr_count = env::args().len() - 1;
for (i, attr) in env::args().skip(1).enumerate() {
println!("converting {attr} ({}/{attr_count})", i + 1);
convert(&attr, &mut hashes)?;
}
Ok(())
}
fn convert(attr: &str, hashes: &mut HashMap<String, String>) -> anyhow::Result<()> {
let package_path = nix_eval(format!("{attr}.meta.position"))?
.and_then(|p| p.split_once(':').map(|(f, _)| PathBuf::from(f)));
if package_path.is_none() {
eprintln!("can't automatically convert {attr}: doesn't exist");
return Ok(());
}
let package_path = package_path.unwrap();
if package_path.with_file_name("Cargo.lock").exists() {
eprintln!("skipping {attr}: already has a vendored Cargo.lock");
return Ok(());
}
let mut src = PathBuf::from(
String::from_utf8(
Command::new("nix-build")
.arg("-A")
.arg(format!("{attr}.src"))
.output()?
.stdout,
)?
.trim(),
);
if !src.exists() {
eprintln!("can't automatically convert {attr}: src doesn't exist (bad attr?)");
return Ok(());
} else if !src.metadata()?.is_dir() {
eprintln!("can't automatically convert {attr}: src isn't a directory");
return Ok(());
}
if let Some(mut source_root) = nix_eval(format!("{attr}.sourceRoot"))?.map(PathBuf::from) {
source_root = source_root.components().skip(1).collect();
src.push(source_root);
}
let cargo_lock_path = src.join("Cargo.lock");
if !cargo_lock_path.exists() {
eprintln!("can't automatically convert {attr}: src doesn't contain Cargo.lock");
return Ok(());
}
let cargo_lock_content = fs::read_to_string(cargo_lock_path)?;
let cargo_lock: CargoLock = basic_toml::from_str(&cargo_lock_content)?;
let mut git_dependencies = Vec::new();
for package in cargo_lock.packages.iter().filter(|p| {
p.source.is_some()
&& p.checksum
.or_else(|| {
cargo_lock
.metadata
.as_ref()?
.get(
format!("checksum {} {} ({})", p.name, p.version, p.source.unwrap())
.as_str(),
)
.copied()
})
.is_none()
}) {
let (typ, original_url) = package
.source
.unwrap()
.split_once('+')
.expect("dependency should have well-formed source url");
if let Some(hash) = hashes.get(original_url) {
continue;
}
assert_eq!(
typ, "git",
"packages without checksums should be git dependencies"
);
let (mut url, rev) = original_url
.split_once('#')
.expect("git dependency should have commit");
// TODO: improve
if let Some((u, _)) = url.split_once('?') {
url = u;
}
let prefetch_output: PrefetchOutput = serde_json::from_slice(
&Command::new("nix-prefetch-git")
.args(["--url", url, "--rev", rev, "--quiet", "--fetch-submodules"])
.output()?
.stdout,
)?;
let output_hash = String::from_utf8(
Command::new("nix")
.args([
"--extra-experimental-features",
"nix-command",
"hash",
"to-sri",
"--type",
"sha256",
&prefetch_output.sha256,
])
.output()?
.stdout,
)?;
let hash = output_hash.trim().to_string();
git_dependencies.push((
format!("{}-{}", package.name, package.version),
output_hash.trim().to_string().clone(),
));
hashes.insert(original_url.to_string(), hash);
}
fs::write(
package_path.with_file_name("Cargo.lock"),
cargo_lock_content,
)?;
let mut package_lines: Vec<_> = fs::read_to_string(&package_path)?
.lines()
.map(String::from)
.collect();
let (cargo_deps_line_index, cargo_deps_line) = package_lines
.iter_mut()
.enumerate()
.find(|(_, l)| {
l.trim_start().starts_with("cargoHash") || l.trim_start().starts_with("cargoSha256")
})
.expect("package should contain cargoHash/cargoSha256");
let spaces = " ".repeat(cargo_deps_line.len() - cargo_deps_line.trim_start().len());
if git_dependencies.is_empty() {
*cargo_deps_line = format!("{spaces}cargoLock.lockFile = ./Cargo.lock;");
} else {
*cargo_deps_line = format!("{spaces}cargoLock = {{");
let mut index_iter = cargo_deps_line_index + 1..;
package_lines.insert(
index_iter.next().unwrap(),
format!("{spaces} lockFile = ./Cargo.lock;"),
);
package_lines.insert(
index_iter.next().unwrap(),
format!("{spaces} outputHashes = {{"),
);
for ((dep, hash), index) in git_dependencies.drain(..).zip(&mut index_iter) {
package_lines.insert(index, format!("{spaces} {dep:?} = {hash:?};"));
}
package_lines.insert(index_iter.next().unwrap(), format!("{spaces} }};"));
package_lines.insert(index_iter.next().unwrap(), format!("{spaces}}};"));
}
if package_lines.last().map(String::as_str) != Some("") {
package_lines.push(String::new());
}
fs::write(package_path, package_lines.join("\n"))?;
Ok(())
}
fn nix_eval(attr: impl AsRef<str>) -> anyhow::Result<Option<String>> {
let output = String::from_utf8(
Command::new("nix-instantiate")
.args(["--eval", "-A", attr.as_ref()])
.output()?
.stdout,
)?;
let trimmed = output.trim();
if trimmed.is_empty() || trimmed == "null" {
Ok(None)
} else {
Ok(Some(
trimmed
.strip_prefix('"')
.and_then(|p| p.strip_suffix('"'))
.ok_or_else(|| anyhow!("couldn't parse nix-instantiate output: {output:?}"))?
.to_string(),
))
}
}

View File

@ -127,8 +127,7 @@ echo "$urllist" | xargs wget $wgetargs -nH -r -c --no-parent && {
# TODO fetch only missing tar.xz files
echo "fetching $filecount tar.xz files ..."
urllist="$(echo "$filelist" | while read file; do echo "$BASE_URL/$file"; done)"
echo "$urllist" | xargs wget $wgetargs -nH -r -c --no-parent
echo "$filelist" | xargs wget $wgetargs -nH -r -c --no-parent
echo "generating sha256 files ..."
find . -type f -name '*.tar.xz' | while read src; do

View File

@ -3,7 +3,7 @@ let
pkgs = import ../../.. {};
inherit (pkgs) lib;
getDeps = _: pkg: {
deps = builtins.filter (x: !isNull x) (map (x: x.pname or null) (pkg.propagatedBuildInputs or []));
deps = builtins.filter (x: x != null) (map (x: x.pname or null) (pkg.propagatedBuildInputs or []));
broken = (pkg.meta.hydraPlatforms or [null]) == [];
};
in

View File

@ -19,6 +19,8 @@ Because step 1) is quite expensive and takes roughly ~5 minutes the result is ca
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE DerivingStrategies #-}
{-# LANGUAGE DuplicateRecordFields #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE NamedFieldPuns #-}
{-# LANGUAGE OverloadedStrings #-}
@ -32,6 +34,7 @@ import Control.Monad (forM_, (<=<))
import Control.Monad.Trans (MonadIO (liftIO))
import Data.Aeson (
FromJSON,
FromJSONKey,
ToJSON,
decodeFileStrict',
eitherDecodeStrict',
@ -51,6 +54,7 @@ import qualified Data.Set as Set
import Data.Text (Text)
import qualified Data.Text as Text
import Data.Text.Encoding (encodeUtf8)
import qualified Data.Text.IO as Text
import Data.Time (defaultTimeLocale, formatTime, getCurrentTime)
import Data.Time.Clock (UTCTime)
import GHC.Generics (Generic)
@ -90,13 +94,16 @@ import Distribution.Simple.Utils (safeLast, fromUTF8BS)
newtype JobsetEvals = JobsetEvals
{ evals :: Seq Eval
}
deriving (Generic, ToJSON, FromJSON, Show)
deriving stock (Generic, Show)
deriving anyclass (ToJSON, FromJSON)
newtype Nixpkgs = Nixpkgs {revision :: Text}
deriving (Generic, ToJSON, FromJSON, Show)
deriving stock (Generic, Show)
deriving anyclass (ToJSON, FromJSON)
newtype JobsetEvalInputs = JobsetEvalInputs {nixpkgs :: Nixpkgs}
deriving (Generic, ToJSON, FromJSON, Show)
deriving stock (Generic, Show)
deriving anyclass (ToJSON, FromJSON)
data Eval = Eval
{ id :: Int
@ -104,13 +111,42 @@ data Eval = Eval
}
deriving (Generic, ToJSON, FromJSON, Show)
-- | Hydra job name.
--
-- Examples:
-- - @"haskellPackages.lens.x86_64-linux"@
-- - @"haskell.packages.ghc925.cabal-install.aarch64-darwin"@
-- - @"pkgsMusl.haskell.compiler.ghc90.x86_64-linux"@
-- - @"arion.aarch64-linux"@
newtype JobName = JobName { unJobName :: Text }
deriving stock (Generic, Show)
deriving newtype (Eq, FromJSONKey, FromJSON, Ord, ToJSON)
-- | Datatype representing the result of querying the build evals of the
-- haskell-updates Hydra jobset.
--
-- The URL <https://hydra.nixos.org/eval/EVAL_ID/builds> (where @EVAL_ID@ is a
-- value like 1792418) returns a list of 'Build'.
data Build = Build
{ job :: Text
{ job :: JobName
, buildstatus :: Maybe Int
-- ^ Status of the build. See 'getBuildState' for the meaning of each state.
, finished :: Int
-- ^ Whether or not the build is finished. @0@ if finished, non-zero otherwise.
, id :: Int
, nixname :: Text
-- ^ Nix name of the derivation.
--
-- Examples:
-- - @"lens-5.2.1"@
-- - @"cabal-install-3.8.0.1"@
-- - @"lens-static-x86_64-unknown-linux-musl-5.1.1"@
, system :: Text
-- ^ System
--
-- Examples:
-- - @"x86_64-linux"@
-- - @"aarch64-darwin"@
, jobsetevals :: Seq Int
}
deriving (Generic, ToJSON, FromJSON, Show)
@ -196,7 +232,7 @@ newtype Maintainers = Maintainers { maintainers :: Maybe Text }
--
-- Note that Hydra jobs without maintainers will have an empty string for the
-- maintainer list.
type HydraJobs = Map Text Maintainers
type HydraJobs = Map JobName Maintainers
-- | Map of email addresses to GitHub handles.
-- This is built from the file @../../maintainer-list.nix@.
@ -221,12 +257,12 @@ type EmailToGitHubHandles = Map Text Text
-- , ("conduit.x86_64-darwin", ["snoyb", "webber"])
-- ]
-- @@
type MaintainerMap = Map Text (NonEmpty Text)
type MaintainerMap = Map JobName (NonEmpty Text)
-- | Information about a package which lists its dependencies and whether the
-- package is marked broken.
data DepInfo = DepInfo {
deps :: Set Text,
deps :: Set PkgName,
broken :: Bool
}
deriving stock (Generic, Show)
@ -234,23 +270,37 @@ data DepInfo = DepInfo {
-- | Map from package names to their DepInfo. This is the data we get out of a
-- nix call.
type DependencyMap = Map Text DepInfo
type DependencyMap = Map PkgName DepInfo
-- | Map from package names to its broken state, number of reverse dependencies (fst) and
-- unbroken reverse dependencies (snd).
type ReverseDependencyMap = Map Text (Int, Int)
type ReverseDependencyMap = Map PkgName (Int, Int)
-- | Calculate the (unbroken) reverse dependencies of a package by transitively
-- going through all packages if its a dependency of them.
calculateReverseDependencies :: DependencyMap -> ReverseDependencyMap
calculateReverseDependencies depMap = Map.fromDistinctAscList $ zip keys (zip (rdepMap False) (rdepMap True))
calculateReverseDependencies depMap =
Map.fromDistinctAscList $ zip keys (zip (rdepMap False) (rdepMap True))
where
-- This code tries to efficiently invert the dependency map and calculate
-- its transitive closure by internally identifying every pkg with its index
-- in the package list and then using memoization.
keys :: [PkgName]
keys = Map.keys depMap
pkgToIndexMap :: Map PkgName Int
pkgToIndexMap = Map.fromDistinctAscList (zip keys [0..])
intDeps = zip [0..] $ (\DepInfo{broken,deps} -> (broken,mapMaybe (`Map.lookup` pkgToIndexMap) $ Set.toList deps)) <$> Map.elems depMap
depInfos :: [DepInfo]
depInfos = Map.elems depMap
depInfoToIdx :: DepInfo -> (Bool, [Int])
depInfoToIdx DepInfo{broken,deps} =
(broken, mapMaybe (`Map.lookup` pkgToIndexMap) $ Set.toList deps)
intDeps :: [(Int, (Bool, [Int]))]
intDeps = zip [0..] (fmap depInfoToIdx depInfos)
rdepMap onlyUnbroken = IntSet.size <$> resultList
where
resultList = go <$> [0..]
@ -267,7 +317,7 @@ getMaintainerMap = do
handlesMap :: EmailToGitHubHandles <-
readJSONProcess nixExprCommand ("maintainers/scripts/haskell/maintainer-handles.nix":nixExprParams) "Failed to decode nix output for lookup of github handles: "
pure $ Map.mapMaybe (splitMaintainersToGitHubHandles handlesMap) hydraJobs
where
where
-- Split a comma-spearated string of Maintainers into a NonEmpty list of
-- GitHub handles.
splitMaintainersToGitHubHandles
@ -279,7 +329,10 @@ getMaintainerMap = do
-- script ./dependencies.nix.
getDependencyMap :: IO DependencyMap
getDependencyMap =
readJSONProcess nixExprCommand ("maintainers/scripts/haskell/dependencies.nix":nixExprParams) "Failed to decode nix output for lookup of dependencies: "
readJSONProcess
nixExprCommand
("maintainers/scripts/haskell/dependencies.nix" : nixExprParams)
"Failed to decode nix output for lookup of dependencies: "
-- | Run a process that produces JSON on stdout and and decode the JSON to a
-- data type.
@ -331,16 +384,77 @@ platformIcon (Platform x) = case x of
"aarch64-darwin" -> ":green_apple:"
_ -> x
platformIsOS :: OS -> Platform -> Bool
platformIsOS os (Platform x) = case (os, x) of
(Linux, "x86_64-linux") -> True
(Linux, "aarch64-linux") -> True
(Darwin, "x86_64-darwin") -> True
(Darwin, "aarch64-darwin") -> True
_ -> False
-- | A package name. This is parsed from a 'JobName'.
--
-- Examples:
--
-- - The 'JobName' @"haskellPackages.lens.x86_64-linux"@ produces the 'PkgName'
-- @"lens"@.
-- - The 'JobName' @"haskell.packages.ghc925.cabal-install.aarch64-darwin"@
-- produces the 'PkgName' @"cabal-install"@.
-- - The 'JobName' @"pkgsMusl.haskell.compiler.ghc90.x86_64-linux"@ produces
-- the 'PkgName' @"ghc90"@.
-- - The 'JobName' @"arion.aarch64-linux"@ produces the 'PkgName' @"arion"@.
--
-- 'PkgName' is also used as a key in 'DependencyMap' and 'ReverseDependencyMap'.
-- In this case, 'PkgName' originally comes from attribute names in @haskellPackages@
-- in Nixpkgs.
newtype PkgName = PkgName Text
deriving stock (Generic, Show)
deriving newtype (Eq, FromJSON, FromJSONKey, Ord, ToJSON)
-- | A package set name. This is parsed from a 'JobName'.
--
-- Examples:
--
-- - The 'JobName' @"haskellPackages.lens.x86_64-linux"@ produces the 'PkgSet'
-- @"haskellPackages"@.
-- - The 'JobName' @"haskell.packages.ghc925.cabal-install.aarch64-darwin"@
-- produces the 'PkgSet' @"haskell.packages.ghc925"@.
-- - The 'JobName' @"pkgsMusl.haskell.compiler.ghc90.x86_64-linux"@ produces
-- the 'PkgSet' @"pkgsMusl.haskell.compiler"@.
-- - The 'JobName' @"arion.aarch64-linux"@ produces the 'PkgSet' @""@.
--
-- As you can see from the last example, 'PkgSet' can be empty (@""@) for
-- top-level jobs.
newtype PkgSet = PkgSet Text
deriving stock (Generic, Show)
deriving newtype (Eq, FromJSON, FromJSONKey, Ord, ToJSON)
data BuildResult = BuildResult {state :: BuildState, id :: Int} deriving (Show, Eq, Ord)
newtype Platform = Platform {platform :: Text} deriving (Show, Eq, Ord)
newtype Table row col a = Table (Map (row, col) a)
data SummaryEntry = SummaryEntry {
summaryBuilds :: Table Text Platform BuildResult,
summaryBuilds :: Table PkgSet Platform BuildResult,
summaryMaintainers :: Set Text,
summaryReverseDeps :: Int,
summaryUnbrokenReverseDeps :: Int
}
type StatusSummary = Map Text SummaryEntry
type StatusSummary = Map PkgName SummaryEntry
data OS = Linux | Darwin
newtype Table row col a = Table (Map (row, col) a)
singletonTable :: row -> col -> a -> Table row col a
singletonTable row col a = Table $ Map.singleton (row, col) a
unionTable :: (Ord row, Ord col) => Table row col a -> Table row col a -> Table row col a
unionTable (Table l) (Table r) = Table $ Map.union l r
filterWithKeyTable :: (row -> col -> a -> Bool) -> Table row col a -> Table row col a
filterWithKeyTable f (Table t) = Table $ Map.filterWithKey (\(r,c) a -> f r c a) t
nullTable :: Table row col a -> Bool
nullTable (Table t) = Map.null t
instance (Ord row, Ord col, Semigroup a) => Semigroup (Table row col a) where
Table l <> Table r = Table (Map.unionWith (<>) l r)
@ -363,18 +477,45 @@ getBuildState Build{finished, buildstatus} = case (finished, buildstatus) of
(_, Just 11) -> OutputLimitExceeded
(_, i) -> Unknown i
buildSummary :: MaintainerMap -> ReverseDependencyMap -> Seq Build -> StatusSummary
buildSummary maintainerMap reverseDependencyMap = foldl (Map.unionWith unionSummary) Map.empty . fmap toSummary
combineStatusSummaries :: Seq StatusSummary -> StatusSummary
combineStatusSummaries = foldl (Map.unionWith unionSummary) Map.empty
where
unionSummary (SummaryEntry (Table lb) lm lr lu) (SummaryEntry (Table rb) rm rr ru) = SummaryEntry (Table $ Map.union lb rb) (lm <> rm) (max lr rr) (max lu ru)
toSummary build@Build{job, id, system} = Map.singleton name (SummaryEntry (Table (Map.singleton (set, Platform system) (BuildResult (getBuildState build) id))) maintainers reverseDeps unbrokenReverseDeps)
where
packageName = fromMaybe job (Text.stripSuffix ("." <> system) job)
splitted = nonEmpty $ Text.splitOn "." packageName
name = maybe packageName NonEmpty.last splitted
set = maybe "" (Text.intercalate "." . NonEmpty.init) splitted
maintainers = maybe mempty (Set.fromList . toList) (Map.lookup job maintainerMap)
(reverseDeps, unbrokenReverseDeps) = Map.findWithDefault (0,0) name reverseDependencyMap
unionSummary :: SummaryEntry -> SummaryEntry -> SummaryEntry
unionSummary (SummaryEntry lb lm lr lu) (SummaryEntry rb rm rr ru) =
SummaryEntry (unionTable lb rb) (lm <> rm) (max lr rr) (max lu ru)
buildToPkgNameAndSet :: Build -> (PkgName, PkgSet)
buildToPkgNameAndSet Build{job = JobName jobName, system} = (name, set)
where
packageName :: Text
packageName = fromMaybe jobName (Text.stripSuffix ("." <> system) jobName)
splitted :: Maybe (NonEmpty Text)
splitted = nonEmpty $ Text.splitOn "." packageName
name :: PkgName
name = PkgName $ maybe packageName NonEmpty.last splitted
set :: PkgSet
set = PkgSet $ maybe "" (Text.intercalate "." . NonEmpty.init) splitted
buildToStatusSummary :: MaintainerMap -> ReverseDependencyMap -> Build -> StatusSummary
buildToStatusSummary maintainerMap reverseDependencyMap build@Build{job, id, system} =
Map.singleton pkgName summaryEntry
where
(pkgName, pkgSet) = buildToPkgNameAndSet build
maintainers :: Set Text
maintainers = maybe mempty (Set.fromList . toList) (Map.lookup job maintainerMap)
(reverseDeps, unbrokenReverseDeps) =
Map.findWithDefault (0,0) pkgName reverseDependencyMap
buildTable :: Table PkgSet Platform BuildResult
buildTable =
singletonTable pkgSet (Platform system) (BuildResult (getBuildState build) id)
summaryEntry = SummaryEntry buildTable maintainers reverseDeps unbrokenReverseDeps
readBuildReports :: IO (Eval, UTCTime, Seq Build)
readBuildReports = do
@ -396,19 +537,36 @@ printTable name showR showC showE (Table mapping) = joinTable <$> (name : map sh
rows = toList $ Set.fromList (fst <$> Map.keys mapping)
cols = toList $ Set.fromList (snd <$> Map.keys mapping)
printJob :: Int -> Text -> (Table Text Platform BuildResult, Text) -> [Text]
printJob evalId name (Table mapping, maintainers) =
printJob :: Int -> PkgName -> (Table PkgSet Platform BuildResult, Text) -> [Text]
printJob evalId (PkgName name) (Table mapping, maintainers) =
if length sets <= 1
then map printSingleRow sets
else ["- [ ] " <> makeJobSearchLink "" name <> " " <> maintainers] <> map printRow sets
else ["- [ ] " <> makeJobSearchLink (PkgSet "") name <> " " <> maintainers] <> map printRow sets
where
printRow set = " - " <> printState set <> " " <> makeJobSearchLink set (if Text.null set then "toplevel" else set)
printSingleRow set = "- [ ] " <> printState set <> " " <> makeJobSearchLink set (makePkgName set) <> " " <> maintainers
makePkgName set = (if Text.null set then "" else set <> ".") <> name
printState set = Text.intercalate " " $ map (\pf -> maybe "" (label pf) $ Map.lookup (set, pf) mapping) platforms
makeJobSearchLink set linkLabel= makeSearchLink evalId linkLabel (makePkgName set)
printRow :: PkgSet -> Text
printRow (PkgSet set) =
" - " <> printState (PkgSet set) <> " " <>
makeJobSearchLink (PkgSet set) (if Text.null set then "toplevel" else set)
printSingleRow set =
"- [ ] " <> printState set <> " " <>
makeJobSearchLink set (makePkgName set) <> " " <> maintainers
makePkgName :: PkgSet -> Text
makePkgName (PkgSet set) = (if Text.null set then "" else set <> ".") <> name
printState set =
Text.intercalate " " $ map (\pf -> maybe "" (label pf) $ Map.lookup (set, pf) mapping) platforms
makeJobSearchLink :: PkgSet -> Text -> Text
makeJobSearchLink set linkLabel = makeSearchLink evalId linkLabel (makePkgName set)
sets :: [PkgSet]
sets = toList $ Set.fromList (fst <$> Map.keys mapping)
platforms :: [Platform]
platforms = toList $ Set.fromList (snd <$> Map.keys mapping)
label pf (BuildResult s i) = "[[" <> platformIcon pf <> icon s <> "]](https://hydra.nixos.org/build/" <> showT i <> ")"
makeSearchLink :: Int -> Text -> Text -> Text
@ -425,76 +583,177 @@ details summary content = ["<details><summary>" <> summary <> " </summary>", ""]
evalLine :: Eval -> UTCTime -> Text
evalLine Eval{id, jobsetevalinputs = JobsetEvalInputs{nixpkgs = Nixpkgs{revision}}} fetchTime =
"*evaluation ["
<> showT id
<> "](https://hydra.nixos.org/eval/"
<> showT id
<> ") of nixpkgs commit ["
<> Text.take 7 revision
<> "](https://github.com/NixOS/nixpkgs/commits/"
<> revision
<> ") as of "
<> Text.pack (formatTime defaultTimeLocale "%Y-%m-%d %H:%M UTC" fetchTime)
<> "*"
"*evaluation ["
<> showT id
<> "](https://hydra.nixos.org/eval/"
<> showT id
<> ") of nixpkgs commit ["
<> Text.take 7 revision
<> "](https://github.com/NixOS/nixpkgs/commits/"
<> revision
<> ") as of "
<> Text.pack (formatTime defaultTimeLocale "%Y-%m-%d %H:%M UTC" fetchTime)
<> "*"
printBuildSummary :: Eval -> UTCTime -> StatusSummary -> [(Text, Int)] -> Text
printBuildSummary :: Eval -> UTCTime -> StatusSummary -> [(PkgName, Int)] -> Text
printBuildSummary eval@Eval{id} fetchTime summary topBrokenRdeps =
Text.unlines $
headline <> [""] <> tldr <> ((" * "<>) <$> (errors <> warnings)) <> [""]
<> totals
<> optionalList "#### Maintained packages with build failure" (maintainedList fails)
<> optionalList "#### Maintained packages with failed dependency" (maintainedList failedDeps)
<> optionalList "#### Maintained packages with unknown error" (maintainedList unknownErr)
<> optionalHideableList "#### Unmaintained packages with build failure" (unmaintainedList fails)
<> optionalHideableList "#### Unmaintained packages with failed dependency" (unmaintainedList failedDeps)
<> optionalHideableList "#### Unmaintained packages with unknown error" (unmaintainedList unknownErr)
<> optionalHideableList "#### Top 50 broken packages, sorted by number of reverse dependencies" (brokenLine <$> topBrokenRdeps)
<> ["","*:arrow_heading_up:: The number of packages that depend (directly or indirectly) on this package (if any). If two numbers are shown the first (lower) number considers only packages which currently have enabled hydra jobs, i.e. are not marked broken. The second (higher) number considers all packages.*",""]
<> footer
where
footer = ["*Report generated with [maintainers/scripts/haskell/hydra-report.hs](https://github.com/NixOS/nixpkgs/blob/haskell-updates/maintainers/scripts/haskell/hydra-report.hs)*"]
headline =
[ "### [haskell-updates build report from hydra](https://hydra.nixos.org/jobset/nixpkgs/haskell-updates)"
, evalLine eval fetchTime ]
totals =
[ "#### Build summary"
, ""
]
<> printTable "Platform" (\x -> makeSearchLink id (platform x <> " " <> platformIcon x) ("." <> platform x)) (\x -> showT x <> " " <> icon x) showT numSummary
brokenLine (name, rdeps) = "[" <> name <> "](https://packdeps.haskellers.com/reverse/" <> name <> ") :arrow_heading_up: " <> Text.pack (show rdeps) <> " "
numSummary = statusToNumSummary summary
jobsByState predicate = Map.filter (predicate . worstState) summary
worstState = foldl' min Success . fmap state . summaryBuilds
fails = jobsByState (== Failed)
failedDeps = jobsByState (== DependencyFailed)
unknownErr = jobsByState (\x -> x > DependencyFailed && x < TimedOut)
withMaintainer = Map.mapMaybe (\e -> (summaryBuilds e,) <$> nonEmpty (Set.toList (summaryMaintainers e)))
withoutMaintainer = Map.mapMaybe (\e -> if Set.null (summaryMaintainers e) then Just e else Nothing)
optionalList heading list = if null list then mempty else [heading] <> list
optionalHideableList heading list = if null list then mempty else [heading] <> details (showT (length list) <> " job(s)") list
maintainedList = showMaintainedBuild <=< Map.toList . withMaintainer
unmaintainedList = showBuild <=< sortOn (\(snd -> x) -> (negate (summaryUnbrokenReverseDeps x), negate (summaryReverseDeps x))) . Map.toList . withoutMaintainer
showBuild (name, entry) = printJob id name (summaryBuilds entry, Text.pack (if summaryReverseDeps entry > 0 then " :arrow_heading_up: " <> show (summaryUnbrokenReverseDeps entry) <>" | "<> show (summaryReverseDeps entry) else ""))
showMaintainedBuild (name, (table, maintainers)) = printJob id name (table, Text.intercalate " " (fmap ("@" <>) (toList maintainers)))
tldr = case (errors, warnings) of
([],[]) -> [":green_circle: **Ready to merge** (if there are no [evaluation errors](https://hydra.nixos.org/jobset/nixpkgs/haskell-updates))"]
([],_) -> [":yellow_circle: **Potential issues** (and possibly [evaluation errors](https://hydra.nixos.org/jobset/nixpkgs/haskell-updates))"]
_ -> [":red_circle: **Branch not mergeable**"]
warnings =
if' (Unfinished > maybe Success worstState maintainedJob) "`maintained` jobset failed." <>
if' (Unfinished == maybe Success worstState mergeableJob) "`mergeable` jobset is not finished." <>
if' (Unfinished == maybe Success worstState maintainedJob) "`maintained` jobset is not finished."
errors =
if' (isNothing mergeableJob) "No `mergeable` job found." <>
if' (isNothing maintainedJob) "No `maintained` job found." <>
if' (Unfinished > maybe Success worstState mergeableJob) "`mergeable` jobset failed." <>
if' (outstandingJobs (Platform "x86_64-linux") > 100) "Too many outstanding jobs on x86_64-linux." <>
if' (outstandingJobs (Platform "aarch64-linux") > 100) "Too many outstanding jobs on aarch64-linux." <>
if' (outstandingJobs (Platform "aarch64-darwin") > 100) "Too many outstanding jobs on aarch64-darwin."
if' p e = if p then [e] else mempty
outstandingJobs platform | Table m <- numSummary = Map.findWithDefault 0 (platform, Unfinished) m
maintainedJob = Map.lookup "maintained" summary
mergeableJob = Map.lookup "mergeable" summary
Text.unlines $
headline <> [""] <> tldr <> ((" * "<>) <$> (errors <> warnings)) <> [""]
<> totals
<> optionalList "#### Maintained Linux packages with build failure" (maintainedList (fails summaryLinux))
<> optionalList "#### Maintained Linux packages with failed dependency" (maintainedList (failedDeps summaryLinux))
<> optionalList "#### Maintained Linux packages with unknown error" (maintainedList (unknownErr summaryLinux))
<> optionalHideableList "#### Maintained Darwin packages with build failure" (maintainedList (fails summaryDarwin))
<> optionalHideableList "#### Maintained Darwin packages with failed dependency" (maintainedList (failedDeps summaryDarwin))
<> optionalHideableList "#### Maintained Darwin packages with unknown error" (maintainedList (unknownErr summaryDarwin))
<> optionalHideableList "#### Unmaintained packages with build failure" (unmaintainedList (fails summary))
<> optionalHideableList "#### Unmaintained packages with failed dependency" (unmaintainedList (failedDeps summary))
<> optionalHideableList "#### Unmaintained packages with unknown error" (unmaintainedList (unknownErr summary))
<> optionalHideableList "#### Top 50 broken packages, sorted by number of reverse dependencies" (brokenLine <$> topBrokenRdeps)
<> ["","*:arrow_heading_up:: The number of packages that depend (directly or indirectly) on this package (if any). If two numbers are shown the first (lower) number considers only packages which currently have enabled hydra jobs, i.e. are not marked broken. The second (higher) number considers all packages.*",""]
<> footer
where
footer = ["*Report generated with [maintainers/scripts/haskell/hydra-report.hs](https://github.com/NixOS/nixpkgs/blob/haskell-updates/maintainers/scripts/haskell/hydra-report.hs)*"]
headline =
[ "### [haskell-updates build report from hydra](https://hydra.nixos.org/jobset/nixpkgs/haskell-updates)"
, evalLine eval fetchTime
]
totals :: [Text]
totals =
[ "#### Build summary"
, ""
] <>
printTable
"Platform"
(\x -> makeSearchLink id (platform x <> " " <> platformIcon x) ("." <> platform x))
(\x -> showT x <> " " <> icon x)
showT
numSummary
brokenLine :: (PkgName, Int) -> Text
brokenLine (PkgName name, rdeps) =
"[" <> name <> "](https://packdeps.haskellers.com/reverse/" <> name <>
") :arrow_heading_up: " <> Text.pack (show rdeps) <> " "
numSummary = statusToNumSummary summary
summaryLinux :: StatusSummary
summaryLinux = withOS Linux summary
summaryDarwin :: StatusSummary
summaryDarwin = withOS Darwin summary
-- Remove all BuildResult from the Table that have Platform that isn't for
-- the given OS.
tableForOS :: OS -> Table PkgSet Platform BuildResult -> Table PkgSet Platform BuildResult
tableForOS os = filterWithKeyTable (\_ platform _ -> platformIsOS os platform)
-- Remove all BuildResult from the StatusSummary that have a Platform that
-- isn't for the given OS. Completely remove all PkgName from StatusSummary
-- that end up with no BuildResults.
withOS
:: OS
-> StatusSummary
-> StatusSummary
withOS os =
Map.mapMaybe
(\e@SummaryEntry{summaryBuilds} ->
let buildsForOS = tableForOS os summaryBuilds
in if nullTable buildsForOS then Nothing else Just e { summaryBuilds = buildsForOS }
)
jobsByState :: (BuildState -> Bool) -> StatusSummary -> StatusSummary
jobsByState predicate = Map.filter (predicate . worstState)
worstState :: SummaryEntry -> BuildState
worstState = foldl' min Success . fmap state . summaryBuilds
fails :: StatusSummary -> StatusSummary
fails = jobsByState (== Failed)
failedDeps :: StatusSummary -> StatusSummary
failedDeps = jobsByState (== DependencyFailed)
unknownErr :: StatusSummary -> StatusSummary
unknownErr = jobsByState (\x -> x > DependencyFailed && x < TimedOut)
withMaintainer :: StatusSummary -> Map PkgName (Table PkgSet Platform BuildResult, NonEmpty Text)
withMaintainer =
Map.mapMaybe
(\e -> (summaryBuilds e,) <$> nonEmpty (Set.toList (summaryMaintainers e)))
withoutMaintainer :: StatusSummary -> StatusSummary
withoutMaintainer = Map.mapMaybe (\e -> if Set.null (summaryMaintainers e) then Just e else Nothing)
optionalList :: Text -> [Text] -> [Text]
optionalList heading list = if null list then mempty else [heading] <> list
optionalHideableList :: Text -> [Text] -> [Text]
optionalHideableList heading list = if null list then mempty else [heading] <> details (showT (length list) <> " job(s)") list
maintainedList :: StatusSummary -> [Text]
maintainedList = showMaintainedBuild <=< Map.toList . withMaintainer
summaryEntryGetReverseDeps :: SummaryEntry -> (Int, Int)
summaryEntryGetReverseDeps sumEntry =
( negate $ summaryUnbrokenReverseDeps sumEntry
, negate $ summaryReverseDeps sumEntry
)
sortOnReverseDeps :: [(PkgName, SummaryEntry)] -> [(PkgName, SummaryEntry)]
sortOnReverseDeps = sortOn (\(_, sumEntry) -> summaryEntryGetReverseDeps sumEntry)
unmaintainedList :: StatusSummary -> [Text]
unmaintainedList = showBuild <=< sortOnReverseDeps . Map.toList . withoutMaintainer
showBuild :: (PkgName, SummaryEntry) -> [Text]
showBuild (name, entry) =
printJob
id
name
( summaryBuilds entry
, Text.pack
( if summaryReverseDeps entry > 0
then
" :arrow_heading_up: " <> show (summaryUnbrokenReverseDeps entry) <>
" | " <> show (summaryReverseDeps entry)
else ""
)
)
showMaintainedBuild
:: (PkgName, (Table PkgSet Platform BuildResult, NonEmpty Text)) -> [Text]
showMaintainedBuild (name, (table, maintainers)) =
printJob
id
name
( table
, Text.intercalate " " (fmap ("@" <>) (toList maintainers))
)
tldr = case (errors, warnings) of
([],[]) -> [":green_circle: **Ready to merge** (if there are no [evaluation errors](https://hydra.nixos.org/jobset/nixpkgs/haskell-updates))"]
([],_) -> [":yellow_circle: **Potential issues** (and possibly [evaluation errors](https://hydra.nixos.org/jobset/nixpkgs/haskell-updates))"]
_ -> [":red_circle: **Branch not mergeable**"]
warnings =
if' (Unfinished > maybe Success worstState maintainedJob) "`maintained` jobset failed." <>
if' (Unfinished == maybe Success worstState mergeableJob) "`mergeable` jobset is not finished." <>
if' (Unfinished == maybe Success worstState maintainedJob) "`maintained` jobset is not finished."
errors =
if' (isNothing mergeableJob) "No `mergeable` job found." <>
if' (isNothing maintainedJob) "No `maintained` job found." <>
if' (Unfinished > maybe Success worstState mergeableJob) "`mergeable` jobset failed." <>
if' (outstandingJobs (Platform "x86_64-linux") > 100) "Too many outstanding jobs on x86_64-linux." <>
if' (outstandingJobs (Platform "aarch64-linux") > 100) "Too many outstanding jobs on aarch64-linux."
if' p e = if p then [e] else mempty
outstandingJobs platform | Table m <- numSummary = Map.findWithDefault 0 (platform, Unfinished) m
maintainedJob = Map.lookup (PkgName "maintained") summary
mergeableJob = Map.lookup (PkgName "mergeable") summary
printEvalInfo :: IO ()
printEvalInfo = do
@ -509,13 +768,18 @@ printMaintainerPing = do
let tops = take 50 . sortOn (negate . snd) . fmap (second fst) . filter (\x -> maybe False broken $ Map.lookup (fst x) depMap) . Map.toList $ rdepMap
pure (rdepMap, tops)
(eval, fetchTime, buildReport) <- readBuildReports
putStrLn (Text.unpack (printBuildSummary eval fetchTime (buildSummary maintainerMap reverseDependencyMap buildReport) topBrokenRdeps))
let statusSummaries =
fmap (buildToStatusSummary maintainerMap reverseDependencyMap) buildReport
buildSum :: StatusSummary
buildSum = combineStatusSummaries statusSummaries
textBuildSummary = printBuildSummary eval fetchTime buildSum topBrokenRdeps
Text.putStrLn textBuildSummary
printMarkBrokenList :: IO ()
printMarkBrokenList = do
(_, fetchTime, buildReport) <- readBuildReports
runReq defaultHttpConfig $ forM_ buildReport \build@Build{job, id} ->
case (getBuildState build, Text.splitOn "." job) of
case (getBuildState build, Text.splitOn "." $ unJobName job) of
(Failed, ["haskellPackages", name, "x86_64-linux"]) -> do
-- Fetch build log from hydra to figure out the cause of the error.
build_log <- ByteString.lines <$> hydraPlainQuery ["build", showT id, "nixlog", "1", "raw"]

View File

@ -0,0 +1,468 @@
#!/usr/bin/env nix-shell
#!nix-shell update-octave-shell.nix -i python3
"""
Update a Octave package expression by passing in the `.nix` file, or the directory containing it.
You can pass in multiple files or paths.
You'll likely want to use
``
$ ./update-octave-libraries ../../pkgs/development/octave-modules/**/default.nix
``
to update all non-pinned libraries in that folder.
"""
import argparse
import os
import pathlib
import re
import requests
import yaml
from concurrent.futures import ThreadPoolExecutor as Pool
from packaging.version import Version as _Version
from packaging.version import InvalidVersion
from packaging.specifiers import SpecifierSet
import collections
import subprocess
import tempfile
INDEX = "https://raw.githubusercontent.com/gnu-octave/packages/main/packages"
"""url of Octave packages' source on GitHub"""
EXTENSIONS = ['tar.gz', 'tar.bz2', 'tar', 'zip']
"""Permitted file extensions. These are evaluated from left to right and the first occurance is returned."""
PRERELEASES = False
GIT = "git"
NIXPGKS_ROOT = subprocess.check_output(["git", "rev-parse", "--show-toplevel"]).decode('utf-8').strip()
import logging
logging.basicConfig(level=logging.INFO)
class Version(_Version, collections.abc.Sequence):
def __init__(self, version):
super().__init__(version)
# We cannot use `str(Version(0.04.21))` because that becomes `0.4.21`
# https://github.com/avian2/unidecode/issues/13#issuecomment-354538882
self.raw_version = version
def __getitem__(self, i):
return self._version.release[i]
def __len__(self):
return len(self._version.release)
def __iter__(self):
yield from self._version.release
def _get_values(attribute, text):
"""Match attribute in text and return all matches.
:returns: List of matches.
"""
regex = '{}\s+=\s+"(.*)";'.format(attribute)
regex = re.compile(regex)
values = regex.findall(text)
return values
def _get_unique_value(attribute, text):
"""Match attribute in text and return unique match.
:returns: Single match.
"""
values = _get_values(attribute, text)
n = len(values)
if n > 1:
raise ValueError("found too many values for {}".format(attribute))
elif n == 1:
return values[0]
else:
raise ValueError("no value found for {}".format(attribute))
def _get_line_and_value(attribute, text):
"""Match attribute in text. Return the line and the value of the attribute."""
regex = '({}\s+=\s+"(.*)";)'.format(attribute)
regex = re.compile(regex)
value = regex.findall(text)
n = len(value)
if n > 1:
raise ValueError("found too many values for {}".format(attribute))
elif n == 1:
return value[0]
else:
raise ValueError("no value found for {}".format(attribute))
def _replace_value(attribute, value, text):
"""Search and replace value of attribute in text."""
old_line, old_value = _get_line_and_value(attribute, text)
new_line = old_line.replace(old_value, value)
new_text = text.replace(old_line, new_line)
return new_text
def _fetch_page(url):
r = requests.get(url)
if r.status_code == requests.codes.ok:
return list(yaml.safe_load_all(r.content))[0]
else:
raise ValueError("request for {} failed".format(url))
def _fetch_github(url):
headers = {}
token = os.environ.get('GITHUB_API_TOKEN')
if token:
headers["Authorization"] = f"token {token}"
r = requests.get(url, headers=headers)
if r.status_code == requests.codes.ok:
return r.json()
else:
raise ValueError("request for {} failed".format(url))
SEMVER = {
'major' : 0,
'minor' : 1,
'patch' : 2,
}
def _determine_latest_version(current_version, target, versions):
"""Determine latest version, given `target`, returning the more recent version.
"""
current_version = Version(current_version)
def _parse_versions(versions):
for v in versions:
try:
yield Version(v)
except InvalidVersion:
pass
versions = _parse_versions(versions)
index = SEMVER[target]
ceiling = list(current_version[0:index])
if len(ceiling) == 0:
ceiling = None
else:
ceiling[-1]+=1
ceiling = Version(".".join(map(str, ceiling)))
# We do not want prereleases
versions = SpecifierSet(prereleases=PRERELEASES).filter(versions)
if ceiling is not None:
versions = SpecifierSet(f"<{ceiling}").filter(versions)
return (max(sorted(versions))).raw_version
def _get_latest_version_octave_packages(package, extension, current_version, target):
"""Get latest version and hash from Octave Packages."""
url = "{}/{}.yaml".format(INDEX, package)
yaml = _fetch_page(url)
versions = list(map(lambda pv: pv['id'], yaml['versions']))
version = _determine_latest_version(current_version, target, versions)
try:
releases = [v if v['id'] == version else None for v in yaml['versions']]
except KeyError as e:
raise KeyError('Could not find version {} for {}'.format(version, package)) from e
for release in releases:
if release['url'].endswith(extension):
sha256 = release['sha256']
break
else:
sha256 = None
return version, sha256, None
def _get_latest_version_github(package, extension, current_version, target):
def strip_prefix(tag):
return re.sub("^[^0-9]*", "", tag)
def get_prefix(string):
matches = re.findall(r"^([^0-9]*)", string)
return next(iter(matches), "")
# when invoked as an updateScript, UPDATE_NIX_ATTR_PATH will be set
# this allows us to work with packages which live outside of octave-modules
attr_path = os.environ.get("UPDATE_NIX_ATTR_PATH", f"octavePackages.{package}")
try:
homepage = subprocess.check_output(
["nix", "eval", "-f", f"{NIXPGKS_ROOT}/default.nix", "--raw", f"{attr_path}.src.meta.homepage"])\
.decode('utf-8')
except Exception as e:
raise ValueError(f"Unable to determine homepage: {e}")
owner_repo = homepage[len("https://github.com/"):] # remove prefix
owner, repo = owner_repo.split("/")
url = f"https://api.github.com/repos/{owner}/{repo}/releases"
all_releases = _fetch_github(url)
releases = list(filter(lambda x: not x['prerelease'], all_releases))
if len(releases) == 0:
raise ValueError(f"{homepage} does not contain any stable releases")
versions = map(lambda x: strip_prefix(x['tag_name']), releases)
version = _determine_latest_version(current_version, target, versions)
release = next(filter(lambda x: strip_prefix(x['tag_name']) == version, releases))
prefix = get_prefix(release['tag_name'])
try:
sha256 = subprocess.check_output(["nix-prefetch-url", "--type", "sha256", "--unpack", f"{release['tarball_url']}"], stderr=subprocess.DEVNULL)\
.decode('utf-8').strip()
except:
# this may fail if they have both a branch and a tag of the same name, attempt tag name
tag_url = str(release['tarball_url']).replace("tarball","tarball/refs/tags")
sha256 = subprocess.check_output(["nix-prefetch-url", "--type", "sha256", "--unpack", tag_url], stderr=subprocess.DEVNULL)\
.decode('utf-8').strip()
return version, sha256, prefix
def _get_latest_version_git(package, extension, current_version, target):
"""NOTE: Unimplemented!"""
# attr_path = os.environ.get("UPDATE_NIX_ATTR_PATH", f"octavePackages.{package}")
# try:
# download_url = subprocess.check_output(
# ["nix", "--extra-experimental-features", "nix-command", "eval", "-f", f"{NIXPGKS_ROOT}/default.nix", "--raw", f"{attr_path}.src.url"])\
# .decode('utf-8')
# except Exception as e:
# raise ValueError(f"Unable to determine download link: {e}")
# with tempfile.TemporaryDirectory(prefix=attr_path) as new_clone_location:
# subprocess.run(["git", "clone", download_url, new_clone_location])
# newest_commit = subprocess.check_output(
# ["git" "rev-parse" "$(git branch -r)" "|" "tail" "-n" "1"]).decode('utf-8')
pass
FETCHERS = {
'fetchFromGitHub' : _get_latest_version_github,
'fetchurl' : _get_latest_version_octave_packages,
'fetchgit' : _get_latest_version_git,
}
DEFAULT_SETUPTOOLS_EXTENSION = 'tar.gz'
FORMATS = {
'setuptools' : DEFAULT_SETUPTOOLS_EXTENSION,
}
def _determine_fetcher(text):
# Count occurrences of fetchers.
nfetchers = sum(text.count('src = {}'.format(fetcher)) for fetcher in FETCHERS.keys())
if nfetchers == 0:
raise ValueError("no fetcher.")
elif nfetchers > 1:
raise ValueError("multiple fetchers.")
else:
# Then we check which fetcher to use.
for fetcher in FETCHERS.keys():
if 'src = {}'.format(fetcher) in text:
return fetcher
def _determine_extension(text, fetcher):
"""Determine what extension is used in the expression.
If we use:
- fetchPypi, we check if format is specified.
- fetchurl, we determine the extension from the url.
- fetchFromGitHub we simply use `.tar.gz`.
"""
if fetcher == 'fetchurl':
url = _get_unique_value('url', text)
extension = os.path.splitext(url)[1]
elif fetcher == 'fetchFromGitHub' or fetcher == 'fetchgit':
if "fetchSubmodules" in text:
raise ValueError("fetchFromGitHub fetcher doesn't support submodules")
extension = "tar.gz"
return extension
def _update_package(path, target):
# Read the expression
with open(path, 'r') as f:
text = f.read()
# Determine pname. Many files have more than one pname
pnames = _get_values('pname', text)
# Determine version.
version = _get_unique_value('version', text)
# First we check how many fetchers are mentioned.
fetcher = _determine_fetcher(text)
extension = _determine_extension(text, fetcher)
# Attempt a fetch using each pname, e.g. backports-zoneinfo vs backports.zoneinfo
successful_fetch = False
for pname in pnames:
if fetcher == "fetchgit":
logging.warning(f"You must update {pname} MANUALLY!")
return { 'path': path, 'target': target, 'pname': pname,
'old_version': version, 'new_version': version }
try:
new_version, new_sha256, prefix = FETCHERS[fetcher](pname, extension, version, target)
successful_fetch = True
break
except ValueError:
continue
if not successful_fetch:
raise ValueError(f"Unable to find correct package using these pnames: {pnames}")
if new_version == version:
logging.info("Path {}: no update available for {}.".format(path, pname))
return False
elif Version(new_version) <= Version(version):
raise ValueError("downgrade for {}.".format(pname))
if not new_sha256:
raise ValueError("no file available for {}.".format(pname))
text = _replace_value('version', new_version, text)
# hashes from pypi are 16-bit encoded sha256's, normalize it to sri to avoid merge conflicts
# sri hashes have been the default format since nix 2.4+
sri_hash = subprocess.check_output(["nix", "--extra-experimental-features", "nix-command", "hash", "to-sri", "--type", "sha256", new_sha256]).decode('utf-8').strip()
# fetchers can specify a sha256, or a sri hash
try:
text = _replace_value('sha256', sri_hash, text)
except ValueError:
text = _replace_value('hash', sri_hash, text)
if fetcher == 'fetchFromGitHub':
# in the case of fetchFromGitHub, it's common to see `rev = version;` or `rev = "v${version}";`
# in which no string value is meant to be substituted. However, we can just overwrite the previous value.
regex = '(rev\s+=\s+[^;]*;)'
regex = re.compile(regex)
matches = regex.findall(text)
n = len(matches)
if n == 0:
raise ValueError("Unable to find rev value for {}.".format(pname))
else:
# forcefully rewrite rev, incase tagging conventions changed for a release
match = matches[0]
text = text.replace(match, f'rev = "refs/tags/{prefix}${{version}}";')
# incase there's no prefix, just rewrite without interpolation
text = text.replace('"${version}";', 'version;')
with open(path, 'w') as f:
f.write(text)
logging.info("Path {}: updated {} from {} to {}".format(path, pname, version, new_version))
result = {
'path' : path,
'target': target,
'pname': pname,
'old_version' : version,
'new_version' : new_version,
#'fetcher' : fetcher,
}
return result
def _update(path, target):
# We need to read and modify a Nix expression.
if os.path.isdir(path):
path = os.path.join(path, 'default.nix')
# If a default.nix does not exist, we quit.
if not os.path.isfile(path):
logging.info("Path {}: does not exist.".format(path))
return False
# If file is not a Nix expression, we quit.
if not path.endswith(".nix"):
logging.info("Path {}: does not end with `.nix`.".format(path))
return False
try:
return _update_package(path, target)
except ValueError as e:
logging.warning("Path {}: {}".format(path, e))
return False
def _commit(path, pname, old_version, new_version, pkgs_prefix="octave: ", **kwargs):
"""Commit result.
"""
msg = f'{pkgs_prefix}{pname}: {old_version} -> {new_version}'
try:
subprocess.check_call([GIT, 'add', path])
subprocess.check_call([GIT, 'commit', '-m', msg])
except subprocess.CalledProcessError as e:
subprocess.check_call([GIT, 'checkout', path])
raise subprocess.CalledProcessError(f'Could not commit {path}') from e
return True
def main():
epilog = """
environment variables:
GITHUB_API_TOKEN\tGitHub API token used when updating github packages
"""
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, epilog=epilog)
parser.add_argument('package', type=str, nargs='+')
parser.add_argument('--target', type=str, choices=SEMVER.keys(), default='major')
parser.add_argument('--commit', action='store_true', help='Create a commit for each package update')
parser.add_argument('--use-pkgs-prefix', action='store_true', help='Use octavePackages.${pname}: instead of octave: ${pname}: when making commits')
args = parser.parse_args()
target = args.target
packages = list(map(os.path.abspath, args.package))
logging.info("Updating packages...")
# Use threads to update packages concurrently
with Pool() as p:
results = list(filter(bool, p.map(lambda pkg: _update(pkg, target), packages)))
logging.info("Finished updating packages.")
commit_options = {}
if args.use_pkgs_prefix:
logging.info("Using octavePackages. prefix for commits")
commit_options["pkgs_prefix"] = "octavePackages."
# Commits are created sequentially.
if args.commit:
logging.info("Committing updates...")
# list forces evaluation
list(map(lambda x: _commit(**x, **commit_options), results))
logging.info("Finished committing updates")
count = len(results)
logging.info("{} package(s) updated".format(count))
if __name__ == '__main__':
main()

View File

@ -0,0 +1,12 @@
{ nixpkgs ? import ../.. { }
}:
with nixpkgs;
let
pyEnv = python3.withPackages(ps: with ps; [ packaging requests toolz pyyaml ]);
in
mkShell {
packages = [
pyEnv
nix-prefetch-scripts
];
}

View File

@ -100,7 +100,7 @@ async def commit_changes(name: str, merge_lock: asyncio.Lock, worktree: str, bra
# Git can only handle a single index operation at a time
async with merge_lock:
await check_subprocess('git', 'add', *change['files'], cwd=worktree)
commit_message = '{attrPath}: {oldVersion} {newVersion}'.format(**change)
commit_message = '{attrPath}: {oldVersion} -> {newVersion}'.format(**change)
if 'commitMessage' in change:
commit_message = change['commitMessage']
elif 'commitBody' in change:

View File

@ -221,6 +221,7 @@ with lib.maintainers; {
docs = {
members = [
asymmetric
ryantm
];
scope = "Maintain nixpkgs/NixOS documentation and tools for building it.";
@ -293,6 +294,9 @@ with lib.maintainers; {
zowoq
qbit
];
githubTeams = [
"golang"
];
scope = "Maintain Golang compilers.";
shortName = "Go";
enableFeatureFreezePing = true;
@ -422,13 +426,31 @@ with lib.maintainers; {
shortName = "Linux Kernel";
};
lisp = {
members = [
raskin
lukego
nagy
uthar
];
githubTeams = [
"lisp"
];
scope = "Maintain the Lisp ecosystem.";
shortName = "lisp";
enableFeatureFreezePing = true;
};
llvm = {
members = [
ericson2314
sternenseemann
lovek323
dtzWill
ericson2314
lovek323
primeos
qyliss
raitobezarius
rrbutani
sternenseemann
];
scope = "Maintain LLVM package sets and related packages";
shortName = "LLVM";
@ -714,12 +736,14 @@ with lib.maintainers; {
rust = {
members = [
figsoda
lnl7
mic92
tjni
winter
zowoq
];
githubTeams = [
"rust"
];
scope = "Maintain the Rust compiler toolchain and nixpkgs integration.";
shortName = "Rust";
enableFeatureFreezePing = true;

View File

@ -166,7 +166,11 @@ let
--manpage-urls ${manpageUrls} \
--revision ${lib.escapeShellArg revision} \
./manual.md \
./manual-combined.xml
./manual-combined-pre.xml
xsltproc \
-o manual-combined.xml ${./../../lib/make-options-doc/postprocess-option-descriptions.xsl} \
manual-combined-pre.xml
${linterFunctions}

View File

@ -65,7 +65,7 @@ In addition to numerous new and upgraded packages, this release has the followin
- [ArchiSteamFarm](https://github.com/JustArchiNET/ArchiSteamFarm), a C# application with primary purpose of idling Steam cards from multiple accounts simultaneously. Available as [services.archisteamfarm](#opt-services.archisteamfarm.enable).
- [BaGet](https://loic-sharma.github.io/BaGet/), a lightweight NuGet and symbol server. Available at [services.baget](#opt-services.baget.enable).
- [BaGet](https://loic-sharma.github.io/BaGet/), a lightweight NuGet and symbol server. Available at services.baget.
- [bird-lg](https://github.com/xddxdd/bird-lg-go), a BGP looking glass for Bird Routing. Available as [services.bird-lg](#opt-services.bird-lg.package).

View File

@ -12,12 +12,18 @@ In addition to numerous new and upgraded packages, this release has the followin
- default linux: 5.15 -\> 6.1, all supported kernels available
- systemd has been updated to v253.1, see [the pull request](https://github.com/NixOS/nixpkgs/pull/216826) for more info.
It's recommended to use `nixos-rebuild boot` and `reboot`, rather than `nixos-rebuild switch` - since in some rare cases
the switch of a live system might fail.
- Cinnamon has been updated to 5.6, see [the pull request](https://github.com/NixOS/nixpkgs/pull/201328#issue-1449910204) for what is changed.
- KDE Plasma has been updated to v5.27, see [the release notes](https://kde.org/announcements/plasma/5/5.27.0/) for what is changed.
- `nixos-rebuild` now supports an extra `--specialisation` option that can be used to change specialisation for `switch` and `test` commands.
- `libxcrypt`, the library providing the `crypt(3)` password hashing function, is now built without support for algorithms not flagged [`strong`](https://github.com/besser82/libxcrypt/blob/v4.4.33/lib/hashes.conf#L48). This affects the availability of password hashing algorithms used for system login (`login(1)`, `passwd(1)`), but also Apache2 Basic-Auth, Samba, OpenLDAP, Dovecot, and [many other packages](https://github.com/search?q=repo%3ANixOS%2Fnixpkgs%20libxcrypt&type=code).
## New Services {#sec-release-23.05-new-services}
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
@ -41,12 +47,18 @@ In addition to numerous new and upgraded packages, this release has the followin
- [stevenblack-blocklist](https://github.com/StevenBlack/hosts), A unified hosts file with base extensions for blocking unwanted websites. Available as [networking.stevenblack](options.html#opt-networking.stevenblack.enable).
- [Budgie Desktop](https://github.com/BuddiesOfBudgie/budgie-desktop), a familiar, modern desktop environment. Availabe as [services.xserver.desktopManager.budgie](options.html#opt-services.xserver.desktopManager.budgie).
- [imaginary](https://github.com/h2non/imaginary), a microservice for high-level image processing that Nextcloud can use to generate previews. Available as [services.imaginary](#opt-services.imaginary.enable).
- [opensearch](https://opensearch.org), a search server alternative to Elasticsearch. Available as [services.opensearch](options.html#opt-services.opensearch.enable).
- [authelia](https://www.authelia.com/), is an open-source authentication and authorization server. Available under [services.authelia](options.html#opt-services.authelia.enable).
- [goeland](https://github.com/slurdge/goeland), an alternative to rss2email written in golang with many filters. Available as [services.goeland](#opt-services.goeland.enable).
- [alertmanager-irc-relay](https://github.com/google/alertmanager-irc-relay), a Prometheus Alertmanager IRC Relay. Available as [services.prometheus.alertmanagerIrcRelay](options.html#opt-services.prometheus.alertmanagerIrcRelay.enable).
- [tts](https://github.com/coqui-ai/TTS), a battle-tested deep learning toolkit for Text-to-Speech. Mutiple servers may be configured below [services.tts.servers](#opt-services.tts.servers).
- [atuin](https://github.com/ellie/atuin), a sync server for shell history. Available as [services.atuin](#opt-services.atuin.enable).
@ -57,6 +69,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- [QDMR](https://dm3mat.darc.de/qdmr/), a GUI application and command line tool for programming DMR radios [programs.qdmr](#opt-programs.qdmr.enable)
- [keyd](https://github.com/rvaiya/keyd), a key remapping daemon for linux. Available as [services.keyd](#opt-services.keyd.enable).
- [v2rayA](https://v2raya.org), a Linux web GUI client of Project V which supports V2Ray, Xray, SS, SSR, Trojan and Pingtunnel. Available as [services.v2raya](options.html#opt-services.v2raya.enable).
- [ulogd](https://www.netfilter.org/projects/ulogd/index.html), a userspace logging daemon for netfilter/iptables related logging. Available as [services.ulogd](options.html#opt-services.ulogd.enable).
@ -65,16 +79,20 @@ In addition to numerous new and upgraded packages, this release has the followin
- [photoprism](https://photoprism.app/), a AI-Powered Photos App for the Decentralized Web. Available as [services.photoprism](options.html#opt-services.photoprism.enable).
- [peroxide](https://github.com/ljanyst/peroxide), a fork of the official [ProtonMail bridge](https://github.com/ProtonMail/proton-bridge) that aims to be similar to [Hydroxide](https://github.com/emersion/hydroxide). Available as [services.peroxide](#opt-services.peroxide.enable).
- [autosuspend](https://github.com/languitar/autosuspend), a python daemon that suspends a system if certain conditions are met, or not met.
- [sharing](https://github.com/parvardegr/sharing), a command-line tool to share directories and files from the CLI to iOS and Android devices without the need of an extra client app. Available as [programs.sharing](#opt-programs.sharing.enable).
- [nimdow](https://github.com/avahe-kellenberger/nimdow), a window manager written in Nim, inspired by dwm.
- [woodpecker-agent](https://woodpecker-ci.org/), a simple CI engine with great extensibility. Available as [services.woodpecker-agent](#opt-services.woodpecker-agent.enable).
- [woodpecker-agents](https://woodpecker-ci.org/), a simple CI engine with great extensibility. Available as [services.woodpecker-agents](#opt-services.woodpecker-agents.agents._name_.enable).
- [woodpecker-server](https://woodpecker-ci.org/), a simple CI engine with great extensibility. Available as [services.woodpecker-server](#opt-services.woodpecker-server.enable).
- [ReGreet](https://github.com/rharish101/ReGreet), a clean and customizable greeter for greetd. Available as [programs.regreet](#opt-programs.regreet.enable).
## Backward Incompatibilities {#sec-release-23.05-incompatibilities}
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
@ -93,6 +111,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- `git-bug` has been updated to at least version 0.8.0, which includes backwards incompatible changes. The `git-bug-migration` package can be used to upgrade existing repositories.
- `nushell` has been updated to at least version 0.77.0, which includes potential breaking changes in aliases. The old aliases are now available as `old-alias` but it is recommended you migrate to the new format. See [Reworked aliases](https://www.nushell.sh/blog/2023-03-14-nushell_0_77.html#reworked-aliases-breaking-changes-kubouch).
- `keepassx` and `keepassx2` have been removed, due to upstream [stopping development](https://www.keepassx.org/index.html%3Fp=636.html). Consider [KeePassXC](https://keepassxc.org) as a maintained alternative.
- The `services.kubo.settings` option is now no longer stateful. If you changed any of the options in `services.kubo.settings` in the past and then removed them from your NixOS configuration again, those changes are still in your Kubo configuration file but will now be reset to the default. If you're unsure, you may want to make a backup of your configuration file (probably /var/lib/ipfs/config) and compare after the update.
@ -110,34 +130,46 @@ In addition to numerous new and upgraded packages, this release has the followin
- The [services.unifi-video.openFirewall](#opt-services.unifi-video.openFirewall) module option default value has been changed from `true` to `false`. You will need to explicitly set this option to `true`, or configure your firewall.
- The option `i18n.inputMethod.fcitx5.enableRimeData` has been removed. Default RIME data is now included in `fcitx5-rime` by default, and can be customized using `fcitx5-rime.override { rimeDataPkgs = [ pkgs.rime-data, package2, ... ]; }`
- Kime has been updated from 2.5.6 to 3.0.2 and the `i18n.inputMethod.kime.config` option has been removed. Users should use `daemonModules`, `iconColor`, and `extraConfig` options under `i18n.inputMethod.kime` instead.
- `tut` has been updated from 1.0.34 to 2.0.0, and now uses the TOML format for the configuration file instead of INI. Additional information can be found [here](https://github.com/RasmusLindroth/tut/releases/tag/2.0.0).
- `i3status-rust` has been updated from 0.22.0 to 0.30.5, and this brings many changes to its configuration format. Additional information can be found [here](https://github.com/greshake/i3status-rust/blob/v0.30.0/NEWS.md).
- The `wordpress` derivation no longer contains any builtin plugins or themes. If you need them you have to add them back to prevent your site from breaking. You can find them in `wordpressPackages.{plugins,themes}`.
- `llvmPackages_rocm.llvm` will not contain `clang` or `compiler-rt`. `llvmPackages_rocm.clang` will not contain `llvm`. `llvmPackages_rocm.clangNoCompilerRt` has been removed in favor of using `llvmPackages_rocm.clang-unwrapped`.
- `services.xserver.desktopManager.plasma5.excludePackages` has been moved to `environment.plasma5.excludePackages`, for consistency with other Desktop Environments
- The EC2 image module previously detected and automatically mounted ext3-formatted instance store devices and partitions in stage-1 (initramfs), storing `/tmp` on the first discovered device. This behaviour, which only catered to very specific use cases and could not be disabled, has been removed. Users relying on this should provide their own implementation, and probably use ext4 and perform the mount in stage-2.
- `teleport` has been upgraded from major version 10 to major version 12. Please see upstream [upgrade instructions](https://goteleport.com/docs/setup/operations/upgrading/) and release notes for versions [11](https://goteleport.com/docs/changelog/#1100) and [12](https://goteleport.com/docs/changelog/#1201). Note that Teleport does not officially support upgrades across more than one major version at a time. If you're running Teleport server components, it is recommended to first upgrade to an intermediate 11.x version by setting `services.teleport.package = pkgs.teleport_11`. Afterwards, this option can be removed to upgrade to the default version (12).
- The EC2 image module previously detected and activated swap-formatted instance store devices and partitions in stage-1 (initramfs). This behaviour has been removed. Users relying on this should provide their own implementation.
- `fail2ban` has been updated to 1.0.2, which has a few breaking changes compared to 0.11.2 ([changelog for 1.0.1](https://github.com/fail2ban/fail2ban/blob/1.0.1/ChangeLog), [changelog for 1.0.2](https://github.com/fail2ban/fail2ban/blob/1.0.2/ChangeLog))
- Calling `makeSetupHook` without passing a `name` argument is deprecated.
- `lib.systems.examples.ghcjs` and consequently `pkgsCross.ghcjs` now use the target triplet `javascript-unknown-ghcjs` instead of `js-unknown-ghcjs`. This has been done to match an [upstream decision](https://gitlab.haskell.org/ghc/ghc/-/commit/6636b670233522f01d002c9b97827d00289dbf5c) to follow Cabal's platform naming more closely. Nixpkgs will also reject `js` as an architecture name.
- The old unsupported version 6.x of the ELK-stack and Elastic beats have been removed. Use OpenSearch instead.
- The `cosmoc` package has been removed. The upstream scripts in `cosmocc` should be used instead.
- Qt 5.12 and 5.14 have been removed, as the corresponding branches have been EOL upstream for a long time. This affected under 10 packages in nixpkgs, largely unmaintained upstream as well, however, out-of-tree package expressions may need to be updated manually.
- The [services.wordpress.sites.&lt;name&gt;.plugins](#opt-services.wordpress.sites._name_.plugins) and [services.wordpress.sites.&lt;name&gt;.themes](#opt-services.wordpress.sites._name_.themes) options have been converted from sets to attribute sets to allow for consumers to specify explicit install paths via attribute name.
- `protonmail-bridge` package has been updated to v3.0 and the CLI executable is now named bridge instead of protonmail-bridge to be more in line with upstream.
- `protonmail-bridge` package has been updated to major version 3.
- Nebula now runs as a system user and group created for each nebula network, using the `CAP_NET_ADMIN` ambient capability on launch rather than starting as root. Ensure that any files each Nebula instance needs to access are owned by the correct user and group, by default `nebula-${networkName}`.
- The `i18n.inputMethod.fcitx` option has been replaced with `i18n.inputMethod.fcitx5` because fcitx 4 `pkgs.fcitx` has been removed.
- In `mastodon` it is now necessary to specify location of file with `PostgreSQL` database password. In `services.mastodon.database.passwordFile` parameter default value `/var/lib/mastodon/secrets/db-password` has been changed to `null`.
- The `--target-host` and `--build-host` options of `nixos-rebuild` no longer treat the `localhost` value specially to build on/deploy to local machine, omit the relevant flag.
@ -146,7 +178,13 @@ In addition to numerous new and upgraded packages, this release has the followin
- Deprecated `xlibsWrapper` transitional package has been removed in favour of direct use of its constitutents: `xorg.libX11`, `freetype` and others.
- .NET 5.0 was removed due to being end-of-life, use a newer, supported .NET version - https://dotnet.microsoft.com/en-us/platform/support/policy/dotnet-core
- The latest available version of Nextcloud is v26 (available as `pkgs.nextcloud26`) which uses PHP 8.2 as interpreter by default. The installation logic is as follows:
- If `system.stateVersion` is >=23.05, `pkgs.nextcloud26` will be installed by default.
- If `system.stateVersion` is >=22.11, `pkgs.nextcloud25` will be installed by default.
- Please note that an upgrade from v24 (or older) to v26 directly is not possible. Please upgrade to `nextcloud25` (or earlier) first. Nextcloud prohibits skipping major versions while upgrading. You can upgrade by declaring [`services.nextcloud.package = pkgs.nextcloud25;`](options.html#opt-services.nextcloud.package).
- It's recommended to use the latest version available (i.e. v26) and to specify that using `services.nextcloud.package`.
- .NET 5.0 and .NET 3.1 were removed due to being end-of-life, use a newer, supported .NET version - https://dotnet.microsoft.com/en-us/platform/support/policy/dotnet-core
- The iputils package, which is installed by default, no longer provides the
`ninfod`, `rarpd` and `rdisc` tools. See
@ -155,8 +193,24 @@ In addition to numerous new and upgraded packages, this release has the followin
- [services.xserver.videoDrivers](options.html#opt-services.xserver.videoDrivers) now defaults to the `modesetting` driver over device-specific ones. The `radeon`, `amdgpu` and `nouveau` drivers are still available, but effectively unmaintained and not recommended for use.
- To enable the HTTP3 (QUIC) protocol for a nginx virtual host, set the `quic` attribute on it to true, e.g. `services.nginx.virtualHosts.<name>.quic = true;`.
- conntrack helper autodetection has been removed from kernels 6.0 and up upstream, and an assertion was added to ensure things don't silently stop working. Migrate your configuration to assign helpers explicitly or use an older LTS kernel branch as a temporary workaround.
- The `services.pipewire.config` options have been removed, as they have basically never worked correctly. All behavior defined by the default configuration can be overridden with drop-in files as necessary - see [below](#sec-release-23.05-migration-pipewire) for details.
- The catch-all `hardware.video.hidpi.enable` option was removed. Users on high density displays may want to:
- Set `services.xserver.upscaleDefaultCursor` to upscale the default X11 cursor for higher resolutions
- Adjust settings under `fonts.fontconfig` according to preference
- Adjust `console.font` according to preference, though the kernel will generally choose a reasonably sized font
- `services.pipewire.media-session` and the `pipewire-media-session` package have been removed, as they are no longer supported upstream. Users are encouraged to use `services.pipewire.wireplumber` instead.
- The `baget` package and module was removed due to being unmaintained.
- `go-ethereum` package has been updated to v1.11.5 and the `puppeth` command is no longer available as of v1.11.0.
## Other Notable Changes {#sec-release-23.05-notable-changes}
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
@ -181,6 +235,10 @@ In addition to numerous new and upgraded packages, this release has the followin
- `services.openssh.ciphers` to `services.openssh.settings.Ciphers`
- `services.openssh.gatewayPorts` to `services.openssh.settings.GatewayPorts`
- `netbox` was updated to 3.4. NixOS' `services.netbox.package` still defaults to 3.3 if `stateVersion` is earlier than 23.05. Please review upstream's [breaking changes](https://github.com/netbox-community/netbox/releases/tag/v3.4.0), and upgrade NetBox by changing `services.netbox.package`. Database migrations will be run automatically.
- `services.netbox` now support RFC42-style options, through `services.netbox.settings`.
- `services.mastodon` gained a tootctl wrapped named `mastodon-tootctl` similar to `nextcloud-occ` which can be executed from any user and switches to the configured mastodon user with sudo and sources the environment variables.
- DocBook option documentation, which has been deprecated since 22.11, will now cause a warning when documentation is built. Out-of-tree modules should migrate to using CommonMark documentation as outlined in [](#sec-option-declarations) to silence this warning.
@ -200,6 +258,8 @@ In addition to numerous new and upgraded packages, this release has the followin
The `{aclUse,superUser,disableActions}` attributes have been renamed, `pluginsConfig` now also accepts an attribute set of booleans, passing plain PHP is deprecated.
Same applies to `acl` which now also accepts structured settings.
- The `zsh` package changes the way to set environment variables on NixOS systems where `programs.zsh.enable` equals `false`. It now sources `/etc/set-environment` when reading the system-level `zshenv` file. Before, it sourced `/etc/profile` when reading the system-level `zprofile` file.
- The `wordpress` service now takes configuration via the `services.wordpress.sites.<name>.settings` attribute set, `extraConfig` is still available to append additional text to `wp-config.php`.
- To reduce closure size in `nixos/modules/profiles/minimal.nix` profile disabled installation documentations and manuals. Also disabled `logrotate` and `udisks2` services.
@ -213,8 +273,12 @@ In addition to numerous new and upgraded packages, this release has the followin
If undesired, the old behavior can be restored by overriding the builders with
`{ installDocumentation = false; }`.
- The new option `networking.nftables.checkRuleset` controls whether the ruleset is checked for syntax or not during build. It is `true` by default. The check might fail because it is in a sandbox environment. To circumvent this, the ruleset file can be edited using the `networking.nftables.preCheckRuleset` option.
- `mastodon` now supports connection to a remote `PostgreSQL` database.
- `nextcloud` has an option to enable SSE-C in S3.
- `services.peertube` now requires you to specify the secret file `secrets.secretsFile`. It can be generated by running `openssl rand -hex 32`.
Before upgrading, read the release notes for PeerTube:
- [Release v5.0.0](https://github.com/Chocobozzz/PeerTube/releases/tag/v5.0.0)
@ -223,7 +287,18 @@ In addition to numerous new and upgraded packages, this release has the followin
- `services.chronyd` is now started with additional systemd sandbox/hardening options for better security.
- PostgreSQL has opt-in support for [JIT compilation](https://www.postgresql.org/docs/current/jit-reason.html). It can be enabled like this:
```nix
{
services.postgresql = {
enable = true;
enableJIT = true;
};
}
```
- `services.dhcpcd` service now don't solicit or accept IPv6 Router Advertisements on interfaces that use static IPv6 addresses.
If network uses both IPv6 Unique local addresses (ULA) and global IPv6 address auto-configuration with SLAAC, must add the parameter `networking.dhcpcd.IPv6rs = true;`.
- The module `services.headscale` was refactored to be compliant with [RFC 0042](https://github.com/NixOS/rfcs/blob/master/rfcs/0042-config-option.md). To be precise, this means that the following things have changed:
@ -286,16 +361,12 @@ In addition to numerous new and upgraded packages, this release has the followin
- The `unifi-poller` package and corresponding NixOS module have been renamed to `unpoller` to match upstream.
- `protonmail-bridge` package has been updated to v3.0 and the CLI executable is now named bridge instead of protonmail-bridge to be more in line with upstream.
- The new option `services.tailscale.useRoutingFeatures` controls various settings for using Tailscale features like exit nodes and subnet routers. If you wish to use your machine as an exit node, you can set this setting to `server`, otherwise if you wish to use an exit node you can set this setting to `client`. The strict RPF warning has been removed as the RPF will be loosened automatically based on the value of this setting.
- `openjdk` from version 11 and above is not build with `openjfx` (i.e.: JavaFX) support by default anymore. You can re-enable it by overriding, e.g.: `openjdk11.override { enableJavaFX = true; };`.
- [Xastir](https://xastir.org/index.php/Main_Page) can now access AX.25 interfaces via the `libax25` package.
- `tvbrowser-bin` was removed, and now `tvbrowser` is built from source.
- `nixos-version` now accepts `--configuration-revision` to display more information about the current generation revision
- The option `services.nomad.extraSettingsPlugins` has been fixed to allow more than one plugin in the path.
@ -303,3 +374,89 @@ In addition to numerous new and upgraded packages, this release has the followin
- The option `services.prometheus.exporters.pihole.interval` does not exist anymore and has been removed.
- `k3s` can now be configured with an EnvironmentFile for its systemd service, allowing secrets to be provided without ending up in the Nix Store.
- `boot.initrd.luks.device.<name>` has a new `tryEmptyPassphrase` option, this is useful for OEM's who need to install an encrypted disk with a future settable passphrase
- Lisp gained a [manual section](https://nixos.org/manual/nixpkgs/stable/#lisp), documenting a new and backwards incompatible interface. The previous interface will be removed in a future release.
## Detailed migration information {#sec-release-23.05-migration}
### Pipewire configuration overrides {#sec-release-23.05-migration-pipewire}
#### Why this change? {#sec-release-23.05-migration-pipewire-why}
The Pipewire config semantics don't really match the NixOS module semantics, so it's extremely awkward to override the default config, especially when lists are involved. Vendoring the configuration files in nixpkgs also creates unnecessary maintenance overhead.
Also, upstream added a lot of accomodations to allow doing most of the things you'd want to do with a config edit in better ways.
#### Migrating your configuration {#sec-release-23.05-migration-pipewire-how}
Compare your settings to [the defaults](https://gitlab.freedesktop.org/pipewire/pipewire/-/tree/master/src/daemon) and where your configuration differs from them.
Then, create a drop-in JSON file in `/etc/pipewire/<config file name>.d/99-custom.conf` (the actual filename can be anything) and migrate your changes to it according to the following sections.
Repeat for every file you've modified, changing the directory name accordingly.
#### Things you can just copy over {#sec-release-23.05-migration-pipewire-simple}
If you are:
- setting properties via `*.properties`
- loading a new module to `context.modules`
- creating new objects with `context.objects`
- declaring SPA libraries with `context.spa-libs`
- running custom commands with `context.exec`
- adding new rules with `*.rules`
- running custom PulseAudio commands with `pulse.cmd`
Simply move the definitions into the drop-in.
Note that the use of `context.exec` is not recommended and other methods of running your thing are likely a better option.
```json
{
"context.properties": {
"your.property.name": "your.property.value"
},
"context.modules": [
{ "name": "libpipewire-module-my-cool-thing" }
],
"context.objects": [
{ "factory": { ... } }
],
"alsa.rules": [
{ "matches: { ... }, "actions": { ... } }
]
}
```
#### Removing a module from `context.modules` {#sec-release-23.05-migration-pipewire-removing-modules}
Look for an option to disable it via `context.properties` (`"module.x11.bell": "false"` is likely the most common use case here).
If one is not available, proceed to [Nuclear option](#sec-release-23.05-migration-pipewire).
#### Modifying a module's parameters in `context.modules` {#sec-release-23.05-migration-pipewire-modifying-modules}
For most modules (e.g. `libpipewire-module-rt`) it's enough to load the module again with the new arguments, e.g.:
```json
{
"context.modules": [
{
"name": "libpipewire-module-rt",
"args": {
"rt.prio": 90
}
}
]
}
```
Note that `module-rt` specifically will generally use the highest values available by default, so setting limits on the `pipewire` systemd service is preferable to reloading.
If reloading the module is not an option, proceed to [Nuclear option](#sec-release-23.05-migration-pipewire).
#### Nuclear option {#sec-release-23.05-migration-pipewire-nuclear}
If all else fails, you can still manually copy the contents of the default configuration file
from `${pkgs.pipewire.lib}/share/pipewire` to `/etc/pipewire` and edit it to fully override the default.
However, this should be done only as a last resort. Please talk to the Pipewire maintainers if you ever need to do this.

View File

@ -24,7 +24,7 @@ in rec {
}
''
name=${shellEscape name}
mkdir -p "$out/$(dirname "$name")"
mkdir -p "$out/$(dirname -- "$name")"
echo -n "$text" > "$out/$name"
''
else

View File

@ -179,7 +179,6 @@ class Driver:
start_command=cmd,
name=name,
keep_vm_state=args.get("keep_vm_state", False),
allow_reboot=args.get("allow_reboot", False),
)
def serial_stdout_on(self) -> None:

View File

@ -144,7 +144,7 @@ class StartCommand:
self,
monitor_socket_path: Path,
shell_socket_path: Path,
allow_reboot: bool = False, # TODO: unused, legacy?
allow_reboot: bool = False,
) -> str:
display_opts = ""
display_available = any(x in os.environ for x in ["DISPLAY", "WAYLAND_DISPLAY"])
@ -152,16 +152,14 @@ class StartCommand:
display_opts += " -nographic"
# qemu options
qemu_opts = ""
qemu_opts += (
""
if allow_reboot
else " -no-reboot"
qemu_opts = (
" -device virtio-serial"
" -device virtconsole,chardev=shell"
" -device virtio-rng-pci"
" -serial stdio"
)
if not allow_reboot:
qemu_opts += " -no-reboot"
# TODO: qemu script already catpures this env variable, legacy?
qemu_opts += " " + os.environ.get("QEMU_OPTS", "")
@ -195,9 +193,10 @@ class StartCommand:
shared_dir: Path,
monitor_socket_path: Path,
shell_socket_path: Path,
allow_reboot: bool,
) -> subprocess.Popen:
return subprocess.Popen(
self.cmd(monitor_socket_path, shell_socket_path),
self.cmd(monitor_socket_path, shell_socket_path, allow_reboot),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
@ -312,7 +311,6 @@ class Machine:
start_command: StartCommand
keep_vm_state: bool
allow_reboot: bool
process: Optional[subprocess.Popen]
pid: Optional[int]
@ -337,13 +335,11 @@ class Machine:
start_command: StartCommand,
name: str = "machine",
keep_vm_state: bool = False,
allow_reboot: bool = False,
callbacks: Optional[List[Callable]] = None,
) -> None:
self.out_dir = out_dir
self.tmp_dir = tmp_dir
self.keep_vm_state = keep_vm_state
self.allow_reboot = allow_reboot
self.name = name
self.start_command = start_command
self.callbacks = callbacks if callbacks is not None else []
@ -741,9 +737,10 @@ class Machine:
self.connected = True
def screenshot(self, filename: str) -> None:
word_pattern = re.compile(r"^\w+$")
if word_pattern.match(filename):
filename = os.path.join(self.out_dir, f"{filename}.png")
if "." not in filename:
filename += ".png"
if "/" not in filename:
filename = os.path.join(self.out_dir, filename)
tmp = f"{filename}.ppm"
with self.nested(
@ -874,7 +871,7 @@ class Machine:
self.process.stdin.write(chars.encode())
self.process.stdin.flush()
def start(self) -> None:
def start(self, allow_reboot: bool = False) -> None:
if self.booted:
return
@ -898,6 +895,7 @@ class Machine:
self.shared_dir,
self.monitor_path,
self.shell_path,
allow_reboot,
)
self.monitor, _ = monitor_socket.accept()
self.shell, _ = shell_socket.accept()
@ -946,6 +944,15 @@ class Machine:
self.send_monitor_command("quit")
self.wait_for_shutdown()
def reboot(self) -> None:
"""Press Ctrl+Alt+Delete in the guest.
Prepares the machine to be reconnected which is useful if the
machine was started with `allow_reboot = True`
"""
self.send_key(f"ctrl-alt-delete")
self.connected = False
def wait_for_x(self) -> None:
"""Wait until it is possible to connect to the X server. Note that
testing the existence of /tmp/.X11-unix/X0 is insufficient.

View File

@ -21,7 +21,7 @@ let
# Sadly, systemd-vconsole-setup doesn't support binary keymaps.
vconsoleConf = pkgs.writeText "vconsole.conf" ''
KEYMAP=${cfg.keyMap}
FONT=${cfg.font}
${optionalString (cfg.font != null) "FONT=${cfg.font}"}
'';
consoleEnv = kbd: pkgs.buildEnv {
@ -45,14 +45,19 @@ in
};
font = mkOption {
type = with types; either str path;
default = "Lat2-Terminus16";
type = with types; nullOr (either str path);
default = null;
example = "LatArCyrHeb-16";
description = mdDoc ''
The font used for the virtual consoles. Leave empty to use
whatever the {command}`setfont` program considers the
default font.
Can be either a font name or a path to a PSF font file.
The font used for the virtual consoles.
Can be `null`, a font name, or a path to a PSF font file.
Use `null` to let the kernel choose a built-in font.
The default is 8x16, and, as of Linux 5.3, Terminus 32 bold for display
resolutions of 2560x1080 and higher.
These fonts cover the [IBM437][] character set.
[IBM437]: https://en.wikipedia.org/wiki/Code_page_437
'';
};
@ -151,7 +156,7 @@ in
printf "\033%%${if isUnicode then "G" else "@"}" >> /dev/console
loadkmap < ${optimizedKeymap}
${optionalString cfg.earlySetup ''
${optionalString (cfg.earlySetup && cfg.font != null) ''
setfont -C /dev/console $extraUtils/share/consolefonts/font.psf
''}
'');
@ -168,7 +173,7 @@ in
"${config.boot.initrd.systemd.package.kbd}/bin/setfont"
"${config.boot.initrd.systemd.package.kbd}/bin/loadkeys"
"${config.boot.initrd.systemd.package.kbd.gzip}/bin/gzip" # Fonts and keyboard layouts are compressed
] ++ optionals (hasPrefix builtins.storeDir cfg.font) [
] ++ optionals (cfg.font != null && hasPrefix builtins.storeDir cfg.font) [
"${cfg.font}"
] ++ optionals (hasPrefix builtins.storeDir cfg.keyMap) [
"${cfg.keyMap}"
@ -195,7 +200,7 @@ in
];
})
(mkIf (cfg.earlySetup && !config.boot.initrd.systemd.enable) {
(mkIf (cfg.earlySetup && cfg.font != null && !config.boot.initrd.systemd.enable) {
boot.initrd.extraUtilsCommands = ''
mkdir -p $out/share/consolefonts
${if substring 0 1 cfg.font == "/" then ''

View File

@ -7,6 +7,19 @@ This module generates a package containing configuration files and link it in /e
Fontconfig reads files in folder name / file name order, so the number prepended to the configuration file name decide the order of parsing.
Low number means high priority.
NOTE: Please take extreme care when adjusting the default settings of this module.
People care a lot, and I mean A LOT, about their font rendering, and you will be
The Person That Broke It if it changes in a way people don't like.
See prior art:
- https://github.com/NixOS/nixpkgs/pull/194594
- https://github.com/NixOS/nixpkgs/pull/222236
- https://github.com/NixOS/nixpkgs/pull/222689
And do not repeat our mistakes.
- @K900, March 2023
*/
{ config, pkgs, lib, ... }:
@ -218,6 +231,8 @@ let
paths = cfg.confPackages;
ignoreCollisions = true;
};
fontconfigNote = "Consider manually configuring fonts.fontconfig according to personal preference.";
in
{
imports = [
@ -229,6 +244,8 @@ in
(mkRemovedOptionModule [ "fonts" "fontconfig" "forceAutohint" ] "")
(mkRemovedOptionModule [ "fonts" "fontconfig" "renderMonoTTFAsBitmap" ] "")
(mkRemovedOptionModule [ "fonts" "fontconfig" "dpi" ] "Use display server-specific options")
(mkRemovedOptionModule [ "hardware" "video" "hidpi" "enable" ] fontconfigNote)
(mkRemovedOptionModule [ "fonts" "optimizeForVeryHighDPI" ] fontconfigNote)
] ++ lib.forEach [ "enable" "substitutions" "preset" ]
(opt: lib.mkRemovedOptionModule [ "fonts" "fontconfig" "ultimate" "${opt}" ] ''
The fonts.fontconfig.ultimate module and configuration is obsolete.

View File

@ -3,29 +3,7 @@
with lib;
let
# A scalable variant of the X11 "core" cursor
#
# If not running a fancy desktop environment, the cursor is likely set to
# the default `cursor.pcf` bitmap font. This is 17px wide, so it's very
# small and almost invisible on 4K displays.
fontcursormisc_hidpi = pkgs.xorg.fontxfree86type1.overrideAttrs (old:
let
# The scaling constant is 230/96: the scalable `left_ptr` glyph at
# about 23 points is rendered as 17px, on a 96dpi display.
# Note: the XLFD font size is in decipoints.
size = 2.39583 * config.services.xserver.dpi;
sizeString = builtins.head (builtins.split "\\." (toString size));
in
{
postInstall = ''
alias='cursor -xfree86-cursor-medium-r-normal--0-${sizeString}-0-0-p-0-adobe-fontspecific'
echo "$alias" > $out/lib/X11/fonts/Type1/fonts.alias
'';
});
hasHidpi =
config.hardware.video.hidpi.enable &&
config.services.xserver.dpi != null;
cfg = config.fonts;
defaultFonts =
[ pkgs.dejavu_fonts
@ -35,14 +13,7 @@ let
pkgs.unifont
pkgs.noto-fonts-emoji
];
defaultXFonts =
[ (if hasHidpi then fontcursormisc_hidpi else pkgs.xorg.fontcursormisc)
pkgs.xorg.fontmiscmisc
];
in
{
imports = [
(mkRemovedOptionModule [ "fonts" "enableCoreFonts" ] "Use fonts.fonts = [ pkgs.corefonts ]; instead.")
@ -68,14 +39,9 @@ in
and families and reasonable coverage of Unicode.
'';
};
};
};
config = mkMerge [
{ fonts.fonts = mkIf config.fonts.enableDefaultFonts defaultFonts; }
{ fonts.fonts = mkIf config.services.xserver.enable defaultXFonts; }
];
config = { fonts.fonts = mkIf cfg.enableDefaultFonts defaultFonts; };
}

View File

@ -97,6 +97,7 @@ in
};
config = mkIf (cfg.provider != "libc") {
boot.kernel.sysctl."vm.max_map_count" = mkIf (cfg.provider == "graphene-hardened") (mkDefault 1048576);
environment.etc."ld-nix.so.preload".text = ''
${providerLibPath}
'';

View File

@ -132,13 +132,13 @@ in
exit 1
''
else configText;
environment.systemPackages = [ cfg.package ];
}
(mkIf cfg.enable {
networking.resolvconf.package = pkgs.openresolv;
environment.systemPackages = [ cfg.package ];
systemd.services.resolvconf = {
description = "resolvconf update";

View File

@ -539,7 +539,9 @@ in {
###### implementation
config = {
config = let
cryptSchemeIdPatternGroup = "(${lib.concatStringsSep "|" pkgs.libxcrypt.enabledCryptSchemeIds})";
in {
users.users = {
root = {
@ -601,15 +603,16 @@ in {
text = ''
users=()
while IFS=: read -r user hash tail; do
if [[ "$hash" = "$"* && ! "$hash" =~ ^\$(y|gy|7|2b|2y|2a|6)\$ ]]; then
if [[ "$hash" = "$"* && ! "$hash" =~ ^\''$${cryptSchemeIdPatternGroup}\$ ]]; then
users+=("$user")
fi
done </etc/shadow
if (( "''${#users[@]}" )); then
echo "
WARNING: The following user accounts rely on password hashes that will
be removed in NixOS 23.05. They should be renewed as soon as possible."
WARNING: The following user accounts rely on password hashing algorithms
that have been removed. They need to be renewed as soon as possible, as
they do prevent their users from logging in."
printf ' - %s\n' "''${users[@]}"
fi
'';
@ -699,7 +702,20 @@ in {
users.groups.${user.name} = {};
'';
}
]
] ++ (map (shell: {
assertion = (user.shell == pkgs.${shell}) -> (config.programs.${shell}.enable == true);
message = ''
users.users.${user.name}.shell is set to ${shell}, but
programs.${shell}.enable is not true. This will cause the ${shell}
shell to lack the basic nix directories in its PATH and might make
logging in as that user impossible. You can fix it with:
programs.${shell}.enable = true;
'';
}) [
"fish"
"xonsh"
"zsh"
])
));
warnings =
@ -716,9 +732,10 @@ in {
let
sep = "\\$";
base64 = "[a-zA-Z0-9./]+";
id = "[a-z0-9-]+";
id = cryptSchemeIdPatternGroup;
name = "[a-z0-9-]+";
value = "[a-zA-Z0-9/+.-]+";
options = "${id}(=${value})?(,${id}=${value})*";
options = "${name}(=${value})?(,${name}=${value})*";
scheme = "${id}(${sep}${options})?";
content = "${base64}${sep}${base64}(${sep}${base64})?";
mcf = "^${sep}${scheme}${sep}${content}$";

View File

@ -82,12 +82,30 @@ in
{command}`cat /sys/class/block/zram*/comp_algorithm`
'';
};
writebackDevice = lib.mkOption {
default = null;
example = "/dev/zvol/tarta-zoot/swap-writeback";
type = lib.types.nullOr lib.types.path;
description = lib.mdDoc ''
Write incompressible pages to this device,
as there's no gain from keeping them in RAM.
'';
};
};
};
config = lib.mkIf cfg.enable {
assertions = [
{
assertion = cfg.writebackDevice == null || cfg.swapDevices <= 1;
message = "A single writeback device cannot be shared among multiple zram devices";
}
];
system.requiredKernelConfig = with config.lib.kernelConfig; [
(isModule "ZRAM")
];
@ -112,6 +130,8 @@ in
zram-size = if cfg.memoryMax != null then "min(${size}, ${toString cfg.memoryMax} / 1024 / 1024)" else size;
compression-algorithm = cfg.algorithm;
swap-priority = cfg.priority;
} // lib.optionalAttrs (cfg.writebackDevice != null) {
writeback-device = cfg.writebackDevice;
};
})
devices));

View File

@ -65,7 +65,7 @@ let
};
};
filterDTBs = src: if isNull cfg.filter
filterDTBs = src: if cfg.filter == null
then "${src}/dtbs"
else
pkgs.runCommand "dtbs-filtered" {} ''
@ -93,8 +93,8 @@ let
# Fill in `dtboFile` for each overlay if not set already.
# Existence of one of these is guarded by assertion below
withDTBOs = xs: flip map xs (o: o // { dtboFile =
if isNull o.dtboFile then
if !isNull o.dtsFile then compileDTS o.name o.dtsFile
if o.dtboFile == null then
if o.dtsFile != null then compileDTS o.name o.dtsFile
else compileDTS o.name (pkgs.writeText "dts" o.dtsText)
else o.dtboFile; } );
@ -181,7 +181,7 @@ in
config = mkIf (cfg.enable) {
assertions = let
invalidOverlay = o: isNull o.dtsFile && isNull o.dtsText && isNull o.dtboFile;
invalidOverlay = o: (o.dtsFile == null) && (o.dtsText == null) && (o.dtboFile == null);
in lib.singleton {
assertion = lib.all (o: !invalidOverlay o) cfg.overlays;
message = ''

View File

@ -22,6 +22,6 @@ in
};
config = mkIf cfg.enable {
services.udev.packages = [ pkgs.nitrokey-udev-rules ];
services.udev.packages = [ pkgs.libnitrokey ];
};
}

View File

@ -69,21 +69,50 @@ in
package = mkOption {
type = types.package;
internal = true;
default = cfg.mesaPackage;
description = lib.mdDoc ''
The package that provides the OpenGL implementation.
The default is Mesa's drivers which should cover all OpenGL-capable
hardware. If you want to use another Mesa version, adjust
{option}`mesaPackage`.
'';
};
package32 = mkOption {
type = types.package;
internal = true;
default = cfg.mesaPackage32;
description = lib.mdDoc ''
The package that provides the 32-bit OpenGL implementation on
64-bit systems. Used when {option}`driSupport32Bit` is
set.
Same as {option}`package` but for the 32-bit OpenGL implementation on
64-bit systems. Used when {option}`driSupport32Bit` is set.
'';
};
mesaPackage = mkOption {
type = types.package;
default = pkgs.mesa;
defaultText = literalExpression "pkgs.mesa";
example = literalExpression "pkgs.mesa_22";
description = lib.mdDoc ''
The Mesa driver package used for rendering support on the system.
You should only need to adjust this if you require a newer Mesa
version for your hardware or because you need to patch a bug.
'';
apply = mesa: mesa.drivers or (throw "`mesa` package must have a `drivers` output.");
};
mesaPackage32 = mkOption {
type = types.package;
default = pkgs.pkgsi686Linux.mesa;
defaultText = literalExpression "pkgs.pkgsi686Linux.mesa";
example = literalExpression "pkgs.pkgsi686Linux.mesa_22";
description = lib.mdDoc ''
Same as {option}`mesaPackage` but for the 32-bit Mesa on 64-bit
systems. Used when {option}`driSupport32Bit` is set.
'';
apply = mesa: mesa.drivers or (throw "`mesa` package must have a `drivers` output.");
};
extraPackages = mkOption {
type = types.listOf types.package;
default = [];
@ -97,7 +126,6 @@ in
:::
'';
};
extraPackages32 = mkOption {
type = types.listOf types.package;
default = [];
@ -153,9 +181,6 @@ in
environment.sessionVariables.LD_LIBRARY_PATH = mkIf cfg.setLdLibraryPath
([ "/run/opengl-driver/lib" ] ++ optional cfg.driSupport32Bit "/run/opengl-driver-32/lib");
hardware.opengl.package = mkDefault pkgs.mesa.drivers;
hardware.opengl.package32 = mkDefault pkgs.pkgsi686Linux.mesa.drivers;
boot.extraModulePackages = optional (elem "virtualbox" videoDrivers) kernelPackages.virtualboxGuestAdditions;
};
}

View File

@ -1,24 +0,0 @@
{ lib, pkgs, config, ...}:
with lib;
{
options.hardware.video.hidpi.enable = mkEnableOption (lib.mdDoc "Font/DPI configuration optimized for HiDPI displays");
config = mkIf config.hardware.video.hidpi.enable {
console.font = lib.mkDefault "${pkgs.terminus_font}/share/consolefonts/ter-v32n.psf.gz";
# Needed when typing in passwords for full disk encryption
console.earlySetup = mkDefault true;
boot.loader.systemd-boot.consoleMode = mkDefault "1";
# Grayscale anti-aliasing for fonts
fonts.fontconfig.antialias = mkDefault true;
fonts.fontconfig.subpixel = {
rgba = mkDefault "none";
lcdfilter = mkDefault "none";
};
# TODO Find reasonable defaults X11 & wayland
};
}

View File

@ -26,6 +26,8 @@ let
nvidiaPersistencedEnabled = cfg.nvidiaPersistenced;
nvidiaSettings = cfg.nvidiaSettings;
busIDType = types.strMatching "([[:print:]]+[\:\@][0-9]{1,3}\:[0-9]{1,2}\:[0-9])?";
ibtSupport = cfg.open || (nvidia_x11.ibtSupport or false);
in
{
@ -462,8 +464,7 @@ in
boot.kernelParams = optional (offloadCfg.enable || cfg.modesetting.enable) "nvidia-drm.modeset=1"
++ optional cfg.powerManagement.enable "nvidia.NVreg_PreserveVideoMemoryAllocations=1"
++ optional cfg.open "nvidia.NVreg_OpenRmEnableUnsupportedGpus=1"
# proprietary driver is not compiled with support for X86_KERNEL_IBT
++ optional (!cfg.open && config.boot.kernelPackages.kernel.kernelAtLeast "6.2") "ibt=off";
++ optional (config.boot.kernelPackages.kernel.kernelAtLeast "6.2" && !ibtSupport) "ibt=off";
services.udev.extraRules =
''

View File

@ -9,7 +9,7 @@ than there are keys on the keyboard.
The following input methods are available in NixOS:
- IBus: The intelligent input bus.
- Fcitx: A customizable lightweight input method.
- Fcitx5: The next generation of fcitx, addons (including engines, dictionaries, skins) can be added using `i18n.inputMethod.fcitx5.addons`.
- Nabi: A Korean input method based on XIM.
- Uim: The universal input method, is a library with a XIM bridge.
- Hime: An extremely easy-to-use input method framework.
@ -67,38 +67,40 @@ application in the Nix store. The `glib` packages must
match exactly. If they do not, uninstalling and reinstalling the
application is a likely fix.
## Fcitx {#module-services-input-methods-fcitx}
## Fcitx5 {#module-services-input-methods-fcitx}
Fcitx is an input method framework with extension support. It has three
Fcitx5 is an input method framework with extension support. It has three
built-in Input Method Engine, Pinyin, QuWei and Table-based input methods.
The following snippet can be used to configure Fcitx:
```
i18n.inputMethod = {
enabled = "fcitx";
fcitx.engines = with pkgs.fcitx-engines; [ mozc hangul m17n ];
enabled = "fcitx5";
fcitx5.addons = with pkgs; [ fcitx5-mozc fcitx5-hangul fcitx5-m17n ];
};
```
`i18n.inputMethod.fcitx.engines` is optional and can be
used to add extra Fcitx engines.
`i18n.inputMethod.fcitx5.addons` is optional and can be
used to add extra Fcitx5 addons.
Available extra Fcitx engines are:
Available extra Fcitx5 addons are:
- Anthy (`fcitx-engines.anthy`): Anthy is a system for
- Anthy (`fcitx5-anthy`): Anthy is a system for
Japanese input method. It converts Hiragana text to Kana Kanji mixed text.
- Chewing (`fcitx-engines.chewing`): Chewing is an
- Chewing (`fcitx5-chewing`): Chewing is an
intelligent Zhuyin input method. It is one of the most popular input
methods among Traditional Chinese Unix users.
- Hangul (`fcitx-engines.hangul`): Korean input method.
- Unikey (`fcitx-engines.unikey`): Vietnamese input method.
- m17n (`fcitx-engines.m17n`): m17n is an input method that
- Hangul (`fcitx5-hangul`): Korean input method.
- Unikey (`fcitx5-unikey`): Vietnamese input method.
- m17n (`fcitx5-m17n`): m17n is an input method that
uses input methods and corresponding icons in the m17n database.
- mozc (`fcitx-engines.mozc`): A Japanese input method from
- mozc (`fcitx5-mozc`): A Japanese input method from
Google.
- table-others (`fcitx-engines.table-others`): Various
- table-others (`fcitx5-table-other`): Various
table-based input methods.
- chinese-addons (`fcitx5-chinese-addons`): Various chinese input methods.
- rime (`fcitx5-rime`): RIME support for fcitx5.
## Nabi {#module-services-input-methods-nabi}

View File

@ -29,9 +29,9 @@ in
options.i18n = {
inputMethod = {
enabled = mkOption {
type = types.nullOr (types.enum [ "ibus" "fcitx" "fcitx5" "nabi" "uim" "hime" "kime" ]);
type = types.nullOr (types.enum [ "ibus" "fcitx5" "nabi" "uim" "hime" "kime" ]);
default = null;
example = "fcitx";
example = "fcitx5";
description = lib.mdDoc ''
Select the enabled input method. Input methods is a software to input symbols that are not available on standard input devices.
@ -40,7 +40,6 @@ in
Currently the following input methods are available in NixOS:
- ibus: The intelligent input bus, extra input engines can be added using `i18n.inputMethod.ibus.engines`.
- fcitx: A customizable lightweight input method, extra input engines can be added using `i18n.inputMethod.fcitx.engines`.
- fcitx5: The next generation of fcitx, addons (including engines, dictionaries, skins) can be added using `i18n.inputMethod.fcitx5.addons`.
- nabi: A Korean input method based on XIM. Nabi doesn't support Qt 5.
- uim: The universal input method, is a library with a XIM bridge. uim mainly support Chinese, Japanese and Korean.

View File

@ -1,46 +0,0 @@
{ config, pkgs, lib, ... }:
with lib;
let
cfg = config.i18n.inputMethod.fcitx;
fcitxPackage = pkgs.fcitx.override { plugins = cfg.engines; };
fcitxEngine = types.package // {
name = "fcitx-engine";
check = x: (lib.types.package.check x) && (attrByPath ["meta" "isFcitxEngine"] false x);
};
in
{
options = {
i18n.inputMethod.fcitx = {
engines = mkOption {
type = with types; listOf fcitxEngine;
default = [];
example = literalExpression "with pkgs.fcitx-engines; [ mozc hangul ]";
description =
let
enginesDrv = filterAttrs (const isDerivation) pkgs.fcitx-engines;
engines = concatStringsSep ", "
(map (name: "`${name}`") (attrNames enginesDrv));
in
lib.mdDoc "Enabled Fcitx engines. Available engines are: ${engines}.";
};
};
};
config = mkIf (config.i18n.inputMethod.enabled == "fcitx") {
i18n.inputMethod.package = fcitxPackage;
environment.variables = {
GTK_IM_MODULE = "fcitx";
QT_IM_MODULE = "fcitx";
XMODIFIERS = "@im=fcitx";
};
services.xserver.displayManager.sessionCommands = "${fcitxPackage}/bin/fcitx";
};
# uses attributes of the linked package
meta.buildDocsInSandbox = false;
}

View File

@ -5,10 +5,9 @@ with lib;
let
im = config.i18n.inputMethod;
cfg = im.fcitx5;
addons = cfg.addons ++ optional cfg.enableRimeData pkgs.rime-data;
fcitx5Package = pkgs.fcitx5-with-addons.override { inherit addons; };
whetherRimeDataDir = any (p: p.pname == "fcitx5-rime") cfg.addons;
in {
fcitx5Package = pkgs.fcitx5-with-addons.override { inherit (cfg) addons; };
in
{
options = {
i18n.inputMethod.fcitx5 = {
addons = mkOption {
@ -19,30 +18,23 @@ in {
Enabled Fcitx5 addons.
'';
};
enableRimeData = mkEnableOption (lib.mdDoc "default rime-data with fcitx5-rime");
};
};
imports = [
(mkRemovedOptionModule [ "i18n" "inputMethod" "fcitx5" "enableRimeData" ] ''
RIME data is now included in `fcitx5-rime` by default, and can be customized using `fcitx5-rime.override { rimeDataPkgs = ...; }`
'')
];
config = mkIf (im.enabled == "fcitx5") {
i18n.inputMethod.package = fcitx5Package;
environment = mkMerge [{
variables = {
GTK_IM_MODULE = "fcitx";
QT_IM_MODULE = "fcitx";
XMODIFIERS = "@im=fcitx";
QT_PLUGIN_PATH = [ "${fcitx5Package}/${pkgs.qt6.qtbase.qtPluginPrefix}" ];
};
}
(mkIf whetherRimeDataDir {
pathsToLink = [
"/share/rime-data"
];
variables = {
NIX_RIME_DATA_DIR = "/run/current-system/sw/share/rime-data";
};
})];
environment.variables = {
GTK_IM_MODULE = "fcitx";
QT_IM_MODULE = "fcitx";
XMODIFIERS = "@im=fcitx";
QT_PLUGIN_PATH = [ "${fcitx5Package}/${pkgs.qt6.qtbase.qtPluginPrefix}" ];
};
};
}

View File

@ -21,6 +21,9 @@ with lib;
# ISO naming.
isoImage.isoName = "${config.isoImage.isoBaseName}-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}.iso";
# BIOS booting
isoImage.makeBiosBootable = true;
# EFI booting
isoImage.makeEfiBootable = true;

View File

@ -535,10 +535,17 @@ in
'';
};
isoImage.makeBiosBootable = mkOption {
default = false;
description = lib.mdDoc ''
Whether the ISO image should be a BIOS-bootable disk.
'';
};
isoImage.makeEfiBootable = mkOption {
default = false;
description = lib.mdDoc ''
Whether the ISO image should be an efi-bootable volume.
Whether the ISO image should be an EFI-bootable volume.
'';
};
@ -693,7 +700,7 @@ in
boot.loader.grub.enable = false;
environment.systemPackages = [ grubPkgs.grub2 grubPkgs.grub2_efi ]
++ optional canx86BiosBoot pkgs.syslinux
++ optional (config.isoImage.makeBiosBootable && canx86BiosBoot) pkgs.syslinux
;
# In stage 1 of the boot, mount the CD as the root FS by label so
@ -744,7 +751,7 @@ in
{ source = pkgs.writeText "version" config.system.nixos.label;
target = "/version.txt";
}
] ++ optionals canx86BiosBoot [
] ++ optionals (config.isoImage.makeBiosBootable && canx86BiosBoot) [
{ source = config.isoImage.splashImage;
target = "/isolinux/background.png";
}
@ -771,7 +778,7 @@ in
{ source = config.isoImage.efiSplashImage;
target = "/EFI/boot/efi-background.png";
}
] ++ optionals (config.boot.loader.grub.memtest86.enable && canx86BiosBoot) [
] ++ optionals (config.boot.loader.grub.memtest86.enable && config.isoImage.makeBiosBootable && canx86BiosBoot) [
{ source = "${pkgs.memtest86plus}/memtest.bin";
target = "/boot/memtest.bin";
}
@ -786,10 +793,10 @@ in
# Create the ISO image.
system.build.isoImage = pkgs.callPackage ../../../lib/make-iso9660-image.nix ({
inherit (config.isoImage) isoName compressImage volumeID contents;
bootable = canx86BiosBoot;
bootable = config.isoImage.makeBiosBootable && canx86BiosBoot;
bootImage = "/isolinux/isolinux.bin";
syslinux = if canx86BiosBoot then pkgs.syslinux else null;
} // optionalAttrs (config.isoImage.makeUsbBootable && canx86BiosBoot) {
syslinux = if config.isoImage.makeBiosBootable && canx86BiosBoot then pkgs.syslinux else null;
} // optionalAttrs (config.isoImage.makeUsbBootable && config.isoImage.makeBiosBootable && canx86BiosBoot) {
usbBootable = true;
isohybridMbrImage = "${pkgs.syslinux}/share/syslinux/isohdpfx.bin";
} // optionalAttrs config.isoImage.makeEfiBootable {

View File

@ -518,21 +518,6 @@ EOF
}
}
# For lack of a better way to determine it, guess whether we should use a
# bigger font for the console from the display mode on the first
# framebuffer. A way based on the physical size/actual DPI reported by
# the monitor would be nice, but I don't know how to do this without X :)
my $fb_modes_file = "/sys/class/graphics/fb0/modes";
if (-f $fb_modes_file && -r $fb_modes_file) {
my $modes = read_file($fb_modes_file);
$modes =~ m/([0-9]+)x([0-9]+)/;
my $console_width = $1, my $console_height = $2;
if ($console_width > 1920) {
push @attrs, "# high-resolution display";
push @attrs, 'hardware.video.hidpi.enable = lib.mkDefault true;';
}
}
# Generate the hardware configuration file.

View File

@ -159,10 +159,7 @@ in
$desktopConfiguration
# Configure keymap in X11
# services.xserver.layout = "us";
# services.xserver.xkbOptions = {
# "eurosign:e";
# "caps:escape" # map caps to escape.
# };
# services.xserver.xkbOptions = "eurosign:e,caps:escape";
# Enable CUPS to print documents.
# services.printing.enable = true;

View File

@ -131,7 +131,8 @@ let
desktopItem = pkgs.makeDesktopItem {
name = "nixos-manual";
desktopName = "NixOS Manual";
genericName = "View NixOS documentation in a web browser";
genericName = "System Manual";
comment = "View NixOS documentation in a web browser";
icon = "nix-snowflake";
exec = "nixos-help";
categories = ["System"];

View File

@ -338,7 +338,7 @@ in
lidarr = 306;
slurm = 307;
kapacitor = 308;
solr = 309;
# solr = 309; removed 2023-03-16
alerta = 310;
minetest = 311;
rss2email = 312;
@ -648,7 +648,7 @@ in
lidarr = 306;
slurm = 307;
kapacitor = 308;
solr = 309;
# solr = 309; removed 2023-03-16
alerta = 310;
minetest = 311;
rss2email = 312;

View File

@ -95,7 +95,6 @@
./hardware/video/bumblebee.nix
./hardware/video/capture/mwprocapture.nix
./hardware/video/displaylink.nix
./hardware/video/hidpi.nix
./hardware/video/nvidia.nix
./hardware/video/switcheroo-control.nix
./hardware/video/uvcvideo/default.nix
@ -104,7 +103,6 @@
./hardware/xone.nix
./hardware/xpadneo.nix
./i18n/input-method/default.nix
./i18n/input-method/fcitx.nix
./i18n/input-method/fcitx5.nix
./i18n/input-method/hime.nix
./i18n/input-method/ibus.nix
@ -220,6 +218,7 @@
./programs/proxychains.nix
./programs/qdmr.nix
./programs/qt5ct.nix
./programs/regreet.nix
./programs/rog-control-center.nix
./programs/rust-motd.nix
./programs/screen.nix
@ -377,7 +376,7 @@
./services/continuous-integration/jenkins/default.nix
./services/continuous-integration/jenkins/job-builder.nix
./services/continuous-integration/jenkins/slave.nix
./services/continuous-integration/woodpecker/agent.nix
./services/continuous-integration/woodpecker/agents.nix
./services/continuous-integration/woodpecker/server.nix
./services/databases/aerospike.nix
./services/databases/cassandra.nix
@ -431,7 +430,6 @@
./services/desktops/gvfs.nix
./services/desktops/malcontent.nix
./services/desktops/neard.nix
./services/desktops/pipewire/pipewire-media-session.nix
./services/desktops/pipewire/pipewire.nix
./services/desktops/pipewire/wireplumber.nix
./services/desktops/profile-sync-daemon.nix
@ -511,6 +509,7 @@
./services/hardware/usbmuxd.nix
./services/hardware/usbrelayd.nix
./services/hardware/vdr.nix
./services/hardware/keyd.nix
./services/home-automation/evcc.nix
./services/home-automation/home-assistant.nix
./services/home-automation/zigbee2mqtt.nix
@ -563,6 +562,7 @@
./services/mail/schleuder.nix
./services/mail/spamassassin.nix
./services/mail/sympa.nix
./services/mail/zeyple.nix
./services/matrix/appservice-discord.nix
./services/matrix/appservice-irc.nix
./services/matrix/conduit.nix
@ -734,6 +734,7 @@
./services/monitoring/nagios.nix
./services/monitoring/netdata.nix
./services/monitoring/parsedmarc.nix
./services/monitoring/prometheus/alertmanager-irc-relay.nix
./services/monitoring/prometheus/alertmanager.nix
./services/monitoring/prometheus/default.nix
./services/monitoring/prometheus/exporters.nix
@ -950,6 +951,7 @@
./services/networking/owamp.nix
./services/networking/pdns-recursor.nix
./services/networking/pdnsd.nix
./services/networking/peroxide.nix
./services/networking/pixiecore.nix
./services/networking/pleroma.nix
./services/networking/polipo.nix
@ -1061,8 +1063,8 @@
./services/search/meilisearch.nix
./services/search/opensearch.nix
./services/search/qdrant.nix
./services/search/solr.nix
./services/security/aesmd.nix
./services/security/authelia.nix
./services/security/certmgr.nix
./services/security/cfssl.nix
./services/security/clamav.nix
@ -1132,11 +1134,11 @@
./services/web-apps/atlassian/confluence.nix
./services/web-apps/atlassian/crowd.nix
./services/web-apps/atlassian/jira.nix
./services/web-apps/baget.nix
./services/web-apps/bookstack.nix
./services/web-apps/calibre-web.nix
./services/web-apps/coder.nix
./services/web-apps/changedetection-io.nix
./services/web-apps/chatgpt-retrieval-plugin.nix
./services/web-apps/cloudlog.nix
./services/web-apps/code-server.nix
./services/web-apps/convos.nix

View File

@ -17,7 +17,7 @@ in {
type = types.listOf types.str;
description = lib.mdDoc "Nix top-level packages to be compiled using CCache";
default = [];
example = [ "wxGTK30" "ffmpeg" "libav_all" ];
example = [ "wxGTK32" "ffmpeg" "libav_all" ];
};
};

View File

@ -51,6 +51,10 @@ in {
enable = mkEnableOption (lib.mdDoc "installing proxychains configuration");
package = mkPackageOptionMD pkgs "proxychains" {
example = "pkgs.proxychains-ng";
};
chain = {
type = mkOption {
type = types.enum [ "dynamic" "strict" "random" ];
@ -159,7 +163,7 @@ in {
};
environment.etc."proxychains.conf".text = configFile;
environment.systemPackages = [ pkgs.proxychains ];
environment.systemPackages = [ cfg.package ];
};
}

View File

@ -0,0 +1,75 @@
{ lib
, pkgs
, config
, ...
}:
let
cfg = config.programs.regreet;
settingsFormat = pkgs.formats.toml { };
in
{
options.programs.regreet = {
enable = lib.mkEnableOption null // {
description = lib.mdDoc ''
Enable ReGreet, a clean and customizable greeter for greetd.
To use ReGreet, {option}`services.greetd` has to be enabled and
{option}`services.greetd.settings.default_session` should contain the
appropriate configuration to launch
{option}`config.programs.regreet.package`. For examples, see the
[ReGreet Readme](https://github.com/rharish101/ReGreet#set-as-default-session).
A minimal configuration that launches ReGreet in {command}`cage` is
enabled by this module by default.
'';
};
package = lib.mkPackageOptionMD pkgs [ "greetd" "regreet" ] { };
settings = lib.mkOption {
type = lib.types.either lib.types.path settingsFormat.type;
default = { };
description = lib.mdDoc ''
ReGreet configuration file. Refer
<https://github.com/rharish101/ReGreet/blob/main/regreet.sample.toml>
for options.
'';
};
extraCss = lib.mkOption {
type = lib.types.either lib.types.path lib.types.lines;
default = "";
description = lib.mdDoc ''
Extra CSS rules to apply on top of the GTK theme. Refer to
[GTK CSS Properties](https://docs.gtk.org/gtk4/css-properties.html) for
modifiable properties.
'';
};
};
config = lib.mkIf cfg.enable {
services.greetd = {
enable = lib.mkDefault true;
settings.default_session.command = lib.mkDefault "${lib.getExe pkgs.cage} -s -- ${lib.getExe cfg.package}";
};
environment.etc = {
"greetd/regreet.css" =
if lib.isPath cfg.extraCss
then {source = cfg.extraCss;}
else {text = cfg.extraCss;};
"greetd/regreet.toml".source =
if lib.isPath cfg.settings
then cfg.settings
else settingsFormat.generate "regreet.toml" cfg.settings;
};
systemd.tmpfiles.rules = let
user = config.services.greetd.settings.default_session.user;
in [
"d /var/log/regreet 0755 greeter ${user} - -"
"d /var/cache/regreet 0755 greeter ${user} - -"
];
};
}

View File

@ -240,7 +240,7 @@ in
[
./known_hosts
(writeText "github.keys" '''
github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==
github.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk=
github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=
github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl
''')

View File

@ -9,10 +9,27 @@ let
settingsFile = settingsFormat.generate "starship.toml" cfg.settings;
in {
initOption =
if cfg.interactiveOnly then
"promptInit"
else
"shellInit";
in
{
options.programs.starship = {
enable = mkEnableOption (lib.mdDoc "the Starship shell prompt");
interactiveOnly = mkOption {
default = true;
example = false;
type = types.bool;
description = lib.mdDoc ''
Whether to enable starship only when the shell is interactive.
Some plugins require this to be set to false to function correctly.
'';
};
settings = mkOption {
inherit (settingsFormat) type;
default = { };
@ -25,21 +42,21 @@ in {
};
config = mkIf cfg.enable {
programs.bash.promptInit = ''
programs.bash.${initOption} = ''
if [[ $TERM != "dumb" && (-z $INSIDE_EMACS || $INSIDE_EMACS == "vterm") ]]; then
export STARSHIP_CONFIG=${settingsFile}
eval "$(${pkgs.starship}/bin/starship init bash)"
fi
'';
programs.fish.promptInit = ''
programs.fish.${initOption} = ''
if test "$TERM" != "dumb" -a \( -z "$INSIDE_EMACS" -o "$INSIDE_EMACS" = "vterm" \)
set -x STARSHIP_CONFIG ${settingsFile}
eval (${pkgs.starship}/bin/starship init fish)
end
'';
programs.zsh.promptInit = ''
programs.zsh.${initOption} = ''
if [[ $TERM != "dumb" && (-z $INSIDE_EMACS || $INSIDE_EMACS == "vterm") ]]; then
export STARSHIP_CONFIG=${settingsFile}
eval "$(${pkgs.starship}/bin/starship init zsh)"

View File

@ -9,23 +9,36 @@ in {
enable = mkEnableOption (lib.mdDoc "steam");
package = mkOption {
type = types.package;
default = pkgs.steam.override {
extraLibraries = pkgs: with config.hardware.opengl;
if pkgs.stdenv.hostPlatform.is64bit
then [ package ] ++ extraPackages
else [ package32 ] ++ extraPackages32;
};
defaultText = literalExpression ''
pkgs.steam.override {
extraLibraries = pkgs: with config.hardware.opengl;
type = types.package;
default = pkgs.steam;
defaultText = literalExpression "pkgs.steam";
example = literalExpression ''
pkgs.steam-small.override {
extraEnv = {
MANGOHUD = true;
OBS_VKCAPTURE = true;
RADV_TEX_ANISO = 16;
};
extraLibraries = p: with p; [
atk
];
}
'';
apply = steam: steam.override (prev: {
extraLibraries = pkgs: let
prevLibs = if prev ? extraLibraries then prev.extraLibraries pkgs else [ ];
additionalLibs = with config.hardware.opengl;
if pkgs.stdenv.hostPlatform.is64bit
then [ package ] ++ extraPackages
else [ package32 ] ++ extraPackages32;
}
'';
in prevLibs ++ additionalLibs;
});
description = lib.mdDoc ''
steam package to use.
The Steam package to use. Additional libraries are added from the system
configuration to ensure graphics work properly.
Use this option to customise the Steam package rather than adding your
custom Steam to {option}`environment.systemPackages` yourself.
'';
};

View File

@ -33,7 +33,7 @@ in {
security.wrappers.dumpcap = {
source = "${wireshark}/bin/dumpcap";
capabilities = "cap_net_raw+p";
capabilities = "cap_net_raw,cap_net_admin+eip";
owner = "root";
group = "wireshark";
permissions = "u+rx,g+x";

View File

@ -44,6 +44,7 @@ with lib;
The hidepid module was removed, since the underlying machinery
is broken when using cgroups-v2.
'')
(mkRemovedOptionModule [ "services" "baget" "enable" ] "The baget module was removed due to the upstream package being unmaintained.")
(mkRemovedOptionModule [ "services" "beegfs" ] "The BeeGFS module has been removed")
(mkRemovedOptionModule [ "services" "beegfsEnable" ] "The BeeGFS module has been removed")
(mkRemovedOptionModule [ "services" "cgmanager" "enable"] "cgmanager was deprecated by lxc and therefore removed from nixpkgs.")
@ -106,6 +107,8 @@ with lib;
(mkRemovedOptionModule [ "services" "riak" ] "The corresponding package was removed from nixpkgs.")
(mkRemovedOptionModule [ "services" "cryptpad" ] "The corresponding package was removed from nixpkgs.")
(mkRemovedOptionModule [ "i18n" "inputMethod" "fcitx" ] "The fcitx module has been removed. Plesae use fcitx5 instead")
# Do NOT add any option renames here, see top of the file
];
}

View File

@ -19,7 +19,7 @@ let
];
mkArgs = rule:
if (isNull rule.args) then ""
if (rule.args == null) then ""
else if (length rule.args == 0) then "args"
else "args ${concatStringsSep " " rule.args}";
@ -27,9 +27,9 @@ let
let
opts = mkOpts rule;
as = optionalString (!isNull rule.runAs) "as ${rule.runAs}";
as = optionalString (rule.runAs != null) "as ${rule.runAs}";
cmd = optionalString (!isNull rule.cmd) "cmd ${rule.cmd}";
cmd = optionalString (rule.cmd != null) "cmd ${rule.cmd}";
args = mkArgs rule;
in
@ -75,7 +75,9 @@ in
{file}`/etc/doas.conf` file. More specific rules should
come after more general ones in order to yield the expected behavior.
You can use `mkBefore` and/or `mkAfter` to ensure
this is the case when configuration options are merged.
this is the case when configuration options are merged. Be aware that
this option cannot be used to override the behaviour allowing
passwordless operation for root.
'';
example = literalExpression ''
[
@ -224,7 +226,9 @@ in
type = with types; lines;
default = "";
description = lib.mdDoc ''
Extra configuration text appended to {file}`doas.conf`.
Extra configuration text appended to {file}`doas.conf`. Be aware that
this option cannot be used to override the behaviour allowing
passwordless operation for root.
'';
};
};
@ -266,14 +270,14 @@ in
# completely replace the contents of this file, use
# `environment.etc."doas.conf"`.
# "root" is allowed to do anything.
permit nopass keepenv root
# extraRules
${concatStringsSep "\n" (lists.flatten (map mkRule cfg.extraRules))}
# extraConfig
${cfg.extraConfig}
# "root" is allowed to do anything.
permit nopass keepenv root
'';
preferLocalBuild = true;
}

View File

@ -620,7 +620,7 @@ let
optionalString config.services.homed.enable ''
password sufficient ${config.systemd.package}/lib/security/pam_systemd_home.so
'' + ''
password sufficient pam_unix.so nullok sha512
password sufficient pam_unix.so nullok yescrypt
'' +
optionalString config.security.pam.enableEcryptfs ''
password optional ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so
@ -793,7 +793,7 @@ let
};
}));
motd = if isNull config.users.motdFile
motd = if config.users.motdFile == null
then pkgs.writeText "motd" config.users.motd
else config.users.motdFile;
@ -1233,7 +1233,7 @@ in
config = {
assertions = [
{
assertion = isNull config.users.motd || isNull config.users.motdFile;
assertion = config.users.motd == null || config.users.motdFile == null;
message = ''
Only one of users.motd and users.motdFile can be set.
'';

View File

@ -303,8 +303,8 @@ in
then if (backup.paths != null) then concatStringsSep " " backup.paths else ""
else "--files-from ${filesFromTmpFile}";
pruneCmd = optionals (builtins.length backup.pruneOpts > 0) [
(resticCmd + " forget --prune --cache-dir=%C/restic-backups-${name} " + (concatStringsSep " " backup.pruneOpts))
(resticCmd + " check --cache-dir=%C/restic-backups-${name} " + (concatStringsSep " " backup.checkOpts))
(resticCmd + " forget --prune " + (concatStringsSep " " backup.pruneOpts))
(resticCmd + " check " + (concatStringsSep " " backup.checkOpts))
];
# Helper functions for rclone remotes
rcloneRemoteName = builtins.elemAt (splitString ":" backup.repository) 1;
@ -314,6 +314,7 @@ in
in
nameValuePair "restic-backups-${name}" ({
environment = {
RESTIC_CACHE_DIR = "%C/restic-backups-${name}";
RESTIC_PASSWORD_FILE = backup.passwordFile;
RESTIC_REPOSITORY = backup.repository;
RESTIC_REPOSITORY_FILE = backup.repositoryFile;
@ -332,7 +333,7 @@ in
restartIfChanged = false;
serviceConfig = {
Type = "oneshot";
ExecStart = (optionals (backupPaths != "") [ "${resticCmd} backup --cache-dir=%C/restic-backups-${name} ${concatStringsSep " " (backup.extraBackupArgs ++ excludeFlags)} ${backupPaths}" ])
ExecStart = (optionals (backupPaths != "") [ "${resticCmd} backup ${concatStringsSep " " (backup.extraBackupArgs ++ excludeFlags)} ${backupPaths}" ])
++ pruneCmd;
User = backup.user;
RuntimeDirectory = "restic-backups-${name}";

View File

@ -114,6 +114,8 @@ in
options.services.sanoid = {
enable = mkEnableOption (lib.mdDoc "Sanoid ZFS snapshotting service");
package = lib.mkPackageOptionMD pkgs "sanoid" {};
interval = mkOption {
type = types.str;
default = "hourly";
@ -181,7 +183,7 @@ in
ExecStartPre = (map (buildAllowCommand "allow" [ "snapshot" "mount" "destroy" ]) datasets);
ExecStopPost = (map (buildAllowCommand "unallow" [ "snapshot" "mount" "destroy" ]) datasets);
ExecStart = lib.escapeShellArgs ([
"${pkgs.sanoid}/bin/sanoid"
"${cfg.package}/bin/sanoid"
"--cron"
"--configdir"
(pkgs.writeTextDir "sanoid.conf" configFile)

View File

@ -87,6 +87,8 @@ in
options.services.syncoid = {
enable = mkEnableOption (lib.mdDoc "Syncoid ZFS synchronization service");
package = lib.mkPackageOptionMD pkgs "sanoid" {};
interval = mkOption {
type = types.str;
default = "hourly";
@ -331,7 +333,7 @@ in
ExecStopPost =
(map (buildUnallowCommand c.localSourceAllow) (localDatasetName c.source)) ++
(map (buildUnallowCommand c.localTargetAllow) (localDatasetName c.target));
ExecStart = lib.escapeShellArgs ([ "${pkgs.sanoid}/bin/syncoid" ]
ExecStart = lib.escapeShellArgs ([ "${cfg.package}/bin/syncoid" ]
++ optionals c.useCommonArgs cfg.commonArgs
++ optional c.recursive "-r"
++ optionals (c.sshKey != null) [ "--sshkey" c.sshKey ]

View File

@ -5,11 +5,95 @@ let
cfg = config.services.hadoop;
hadoopConf = "${import ./conf.nix { inherit cfg pkgs lib; }}/";
mkIfNotNull = x: mkIf (x != null) x;
# generic hbase role options
hbaseRoleOption = name: extraOpts: {
enable = mkEnableOption (mdDoc "HBase ${name}");
openFirewall = mkOption {
type = types.bool;
default = false;
description = mdDoc "Open firewall ports for HBase ${name}.";
};
restartIfChanged = mkOption {
type = types.bool;
default = false;
description = mdDoc "Restart ${name} con config change.";
};
extraFlags = mkOption {
type = with types; listOf str;
default = [];
example = literalExpression ''[ "--backup" ]'';
description = mdDoc "Extra flags for the ${name} service.";
};
environment = mkOption {
type = with types; attrsOf str;
default = {};
example = literalExpression ''
{
HBASE_MASTER_OPTS = "-Dcom.sun.management.jmxremote.ssl=true";
}
'';
description = mdDoc "Environment variables passed to ${name}.";
};
} // extraOpts;
# generic hbase role configs
hbaseRoleConfig = name: ports: (mkIf cfg.hbase."${name}".enable {
services.hadoop.gatewayRole = {
enable = true;
enableHbaseCli = mkDefault true;
};
systemd.services."hbase-${toLower name}" = {
description = "HBase ${name}";
wantedBy = [ "multi-user.target" ];
path = with cfg; [ hbase.package ] ++ optional
(with cfg.hbase.master; enable && initHDFS) package;
preStart = mkIf (with cfg.hbase.master; enable && initHDFS)
(concatStringsSep "\n" (
map (x: "HADOOP_USER_NAME=hdfs hdfs --config /etc/hadoop-conf ${x}")[
"dfsadmin -safemode wait"
"dfs -mkdir -p ${cfg.hbase.rootdir}"
"dfs -chown hbase ${cfg.hbase.rootdir}"
]
));
inherit (cfg.hbase."${name}") environment;
script = concatStringsSep " " (
[
"hbase --config /etc/hadoop-conf/"
"${toLower name} start"
]
++ cfg.hbase."${name}".extraFlags
++ map (x: "--${toLower x} ${toString cfg.hbase.${name}.${x}}")
(filter (x: hasAttr x cfg.hbase.${name}) ["port" "infoPort"])
);
serviceConfig = {
User = "hbase";
SyslogIdentifier = "hbase-${toLower name}";
Restart = "always";
};
};
services.hadoop.hbaseSiteInternal."hbase.rootdir" = cfg.hbase.rootdir;
networking = {
firewall.allowedTCPPorts = mkIf cfg.hbase."${name}".openFirewall ports;
hosts = mkIf (with cfg.hbase.regionServer; enable && overrideHosts) {
"127.0.0.2" = mkForce [ ];
"::1" = mkForce [ ];
};
};
});
in
{
options.services.hadoop = {
gatewayRole.enableHbaseCli = mkEnableOption (lib.mdDoc "HBase CLI tools");
gatewayRole.enableHbaseCli = mkEnableOption (mdDoc "HBase CLI tools");
hbaseSiteDefault = mkOption {
default = {
@ -21,7 +105,7 @@ in
"hbase.cluster.distributed" = "true";
};
type = types.attrsOf types.anything;
description = lib.mdDoc ''
description = mdDoc ''
Default options for hbase-site.xml
'';
};
@ -29,8 +113,12 @@ in
default = {};
type = with types; attrsOf anything;
example = literalExpression ''
{
"hbase.hregion.max.filesize" = 20*1024*1024*1024;
"hbase.table.normalization.enabled" = "true";
}
'';
description = lib.mdDoc ''
description = mdDoc ''
Additional options and overrides for hbase-site.xml
<https://github.com/apache/hbase/blob/rel/2.4.11/hbase-common/src/main/resources/hbase-default.xml>
'';
@ -39,7 +127,7 @@ in
default = {};
type = with types; attrsOf anything;
internal = true;
description = lib.mdDoc ''
description = mdDoc ''
Internal option to add configs to hbase-site.xml based on module options
'';
};
@ -50,11 +138,11 @@ in
type = types.package;
default = pkgs.hbase;
defaultText = literalExpression "pkgs.hbase";
description = lib.mdDoc "HBase package";
description = mdDoc "HBase package";
};
rootdir = mkOption {
description = lib.mdDoc ''
description = mdDoc ''
This option will set "hbase.rootdir" in hbase-site.xml and determine
the directory shared by region servers and into which HBase persists.
The URL should be 'fully-qualified' to include the filesystem scheme.
@ -68,7 +156,7 @@ in
default = "/hbase";
};
zookeeperQuorum = mkOption {
description = lib.mdDoc ''
description = mdDoc ''
This option will set "hbase.zookeeper.quorum" in hbase-site.xml.
Comma separated list of servers in the ZooKeeper ensemble.
'';
@ -76,107 +164,36 @@ in
example = "zk1.internal,zk2.internal,zk3.internal";
default = null;
};
master = {
enable = mkEnableOption (lib.mdDoc "HBase Master");
initHDFS = mkEnableOption (lib.mdDoc "initialization of the hbase directory on HDFS");
openFirewall = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc ''
Open firewall ports for HBase master.
'';
} // (let
ports = port: infoPort: {
port = mkOption {
type = types.int;
default = port;
description = mdDoc "RPC port";
};
infoPort = mkOption {
type = types.int;
default = infoPort;
description = mdDoc "web UI port";
};
};
regionServer = {
enable = mkEnableOption (lib.mdDoc "HBase RegionServer");
overrideHosts = mkOption {
type = types.bool;
default = true;
description = lib.mdDoc ''
Remove /etc/hosts entries for "127.0.0.2" and "::1" defined in nixos/modules/config/networking.nix
Regionservers must be able to resolve their hostnames to their IP addresses, through PTR records
or /etc/hosts entries.
'';
};
openFirewall = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc ''
Open firewall ports for HBase master.
'';
};
in mapAttrs hbaseRoleOption {
master.initHDFS = mkEnableOption (mdDoc "initialization of the hbase directory on HDFS");
regionServer.overrideHosts = mkOption {
type = types.bool;
default = true;
description = mdDoc ''
Remove /etc/hosts entries for "127.0.0.2" and "::1" defined in nixos/modules/config/networking.nix
Regionservers must be able to resolve their hostnames to their IP addresses, through PTR records
or /etc/hosts entries.
'';
};
};
thrift = ports 9090 9095;
rest = ports 8080 8085;
});
};
config = mkMerge [
(mkIf cfg.hbase.master.enable {
services.hadoop.gatewayRole = {
enable = true;
enableHbaseCli = mkDefault true;
};
systemd.services.hbase-master = {
description = "HBase master";
wantedBy = [ "multi-user.target" ];
preStart = mkIf cfg.hbase.master.initHDFS ''
HADOOP_USER_NAME=hdfs ${cfg.package}/bin/hdfs --config ${hadoopConf} dfsadmin -safemode wait
HADOOP_USER_NAME=hdfs ${cfg.package}/bin/hdfs --config ${hadoopConf} dfs -mkdir -p ${cfg.hbase.rootdir}
HADOOP_USER_NAME=hdfs ${cfg.package}/bin/hdfs --config ${hadoopConf} dfs -chown hbase ${cfg.hbase.rootdir}
'';
serviceConfig = {
User = "hbase";
SyslogIdentifier = "hbase-master";
ExecStart = "${cfg.hbase.package}/bin/hbase --config ${hadoopConf} " +
"master start";
Restart = "always";
};
};
services.hadoop.hbaseSiteInternal."hbase.rootdir" = cfg.hbase.rootdir;
networking.firewall.allowedTCPPorts = mkIf cfg.hbase.master.openFirewall [
16000 16010
];
})
(mkIf cfg.hbase.regionServer.enable {
services.hadoop.gatewayRole = {
enable = true;
enableHbaseCli = mkDefault true;
};
systemd.services.hbase-regionserver = {
description = "HBase RegionServer";
wantedBy = [ "multi-user.target" ];
serviceConfig = {
User = "hbase";
SyslogIdentifier = "hbase-regionserver";
ExecStart = "${cfg.hbase.package}/bin/hbase --config /etc/hadoop-conf/ " +
"regionserver start";
Restart = "always";
};
};
services.hadoop.hbaseSiteInternal."hbase.rootdir" = cfg.hbase.rootdir;
networking = {
firewall.allowedTCPPorts = mkIf cfg.hbase.regionServer.openFirewall [
16020 16030
];
hosts = mkIf cfg.hbase.regionServer.overrideHosts {
"127.0.0.2" = mkForce [ ];
"::1" = mkForce [ ];
};
};
})
config = mkMerge ([
(mkIf cfg.gatewayRole.enable {
@ -192,5 +209,10 @@ in
isSystemUser = true;
};
})
];
] ++ (mapAttrsToList hbaseRoleConfig {
master = [ 16000 16010 ];
regionServer = [ 16020 16030 ];
thrift = with cfg.hbase.thrift; [ port infoPort ];
rest = with cfg.hbase.rest; [ port infoPort ];
}));
}

View File

@ -147,8 +147,8 @@ in
systemd.services.k3s = {
description = "k3s service";
after = [ "network.service" "firewall.service" ];
wants = [ "network.service" "firewall.service" ];
after = [ "firewall.service" "network-online.target" ];
wants = [ "firewall.service" "network-online.target" ];
wantedBy = [ "multi-user.target" ];
path = optional config.boot.zfs.enabled config.boot.zfs.package;
serviceConfig = {

View File

@ -270,7 +270,7 @@ in
'';
})]);
environment.etc.${cfg.etcClusterAdminKubeconfig}.source = mkIf (!isNull cfg.etcClusterAdminKubeconfig)
environment.etc.${cfg.etcClusterAdminKubeconfig}.source = mkIf (cfg.etcClusterAdminKubeconfig != null)
clusterAdminKubeconfig;
environment.systemPackages = mkIf (top.kubelet.enable || top.proxy.enable) [

View File

@ -1,99 +0,0 @@
{ config
, lib
, pkgs
, ...
}:
let
cfg = config.services.woodpecker-agent;
in
{
meta.maintainers = [ lib.maintainers.janik ];
options = {
services.woodpecker-agent = {
enable = lib.mkEnableOption (lib.mdDoc "the Woodpecker-Agent, Agents execute tasks generated by a Server, every install will need one server and at least one agent");
package = lib.mkPackageOptionMD pkgs "woodpecker-agent" { };
environment = lib.mkOption {
default = { };
type = lib.types.attrsOf lib.types.str;
example = lib.literalExpression ''
{
WOODPECKER_SERVER = "localhost:9000";
WOODPECKER_BACKEND = "docker";
DOCKER_HOST = "unix:///run/podman/podman.sock";
}
'';
description = lib.mdDoc "woodpecker-agent config envrionment variables, for other options read the [documentation](https://woodpecker-ci.org/docs/administration/agent-config)";
};
extraGroups = lib.mkOption {
default = null;
type = lib.types.nullOr (lib.types.listOf lib.types.str);
example = [ "podman" ];
description = lib.mdDoc ''
Additional groups for the systemd service.
'';
};
environmentFile = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = null;
example = "/root/woodpecker-agent.env";
description = lib.mdDoc ''
File to load environment variables
from. This is helpful for specifying secrets.
Example content of environmentFile:
```
WOODPECKER_AGENT_SECRET=your-shared-secret-goes-here
```
'';
};
};
};
config = lib.mkIf cfg.enable {
systemd.services = {
woodpecker-agent = {
description = "Woodpecker-Agent Service";
wantedBy = [ "multi-user.target" ];
after = [ "network-online.target" ];
wants = [ "network-online.target" ];
serviceConfig = {
DynamicUser = true;
SupplementaryGroups = lib.optionals (cfg.extraGroups != null) cfg.extraGroups;
EnvironmentFile = lib.optional (cfg.environmentFile != null) cfg.environmentFile;
ExecStart = "${cfg.package}/bin/woodpecker-agent";
Restart = "on-failure";
RestartSec = 15;
CapabilityBoundingSet = "";
# Security
NoNewPrivileges = true;
# Sandboxing
ProtectSystem = "strict";
PrivateTmp = true;
PrivateDevices = true;
PrivateUsers = true;
ProtectHostname = true;
ProtectClock = true;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectKernelLogs = true;
ProtectControlGroups = true;
RestrictAddressFamilies = [ "AF_UNIX AF_INET AF_INET6" ];
LockPersonality = true;
MemoryDenyWriteExecute = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
PrivateMounts = true;
# System Call Filtering
SystemCallArchitectures = "native";
SystemCallFilter = "~@clock @privileged @cpu-emulation @debug @keyring @module @mount @obsolete @raw-io @reboot @setuid @swap";
};
inherit (cfg) environment;
};
};
};
}

View File

@ -0,0 +1,144 @@
{ config
, lib
, pkgs
, ...
}:
let
cfg = config.services.woodpecker-agents;
agentModule = lib.types.submodule {
options = {
enable = lib.mkEnableOption (lib.mdDoc "this Woodpecker-Agent. Agents execute tasks generated by a Server, every install will need one server and at least one agent");
package = lib.mkPackageOptionMD pkgs "woodpecker-agent" { };
environment = lib.mkOption {
default = { };
type = lib.types.attrsOf lib.types.str;
example = lib.literalExpression ''
{
WOODPECKER_SERVER = "localhost:9000";
WOODPECKER_BACKEND = "docker";
DOCKER_HOST = "unix:///run/podman/podman.sock";
}
'';
description = lib.mdDoc "woodpecker-agent config envrionment variables, for other options read the [documentation](https://woodpecker-ci.org/docs/administration/agent-config)";
};
extraGroups = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
example = [ "podman" ];
description = lib.mdDoc ''
Additional groups for the systemd service.
'';
};
environmentFile = lib.mkOption {
type = lib.types.listOf lib.types.path;
default = [ ];
example = [ "/var/secrets/woodpecker-agent.env" ];
description = lib.mdDoc ''
File to load environment variables
from. This is helpful for specifying secrets.
Example content of environmentFile:
```
WOODPECKER_AGENT_SECRET=your-shared-secret-goes-here
```
'';
};
};
};
mkAgentService = name: agentCfg: {
name = "woodpecker-agent-${name}";
value = {
description = "Woodpecker-Agent Service - ${name}";
wantedBy = [ "multi-user.target" ];
after = [ "network-online.target" ];
wants = [ "network-online.target" ];
serviceConfig = {
DynamicUser = true;
SupplementaryGroups = agentCfg.extraGroups;
EnvironmentFile = agentCfg.environmentFile;
ExecStart = lib.getExe agentCfg.package;
Restart = "on-failure";
RestartSec = 15;
CapabilityBoundingSet = "";
NoNewPrivileges = true;
ProtectSystem = "strict";
PrivateTmp = true;
PrivateDevices = true;
PrivateUsers = true;
ProtectHostname = true;
ProtectClock = true;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectKernelLogs = true;
ProtectControlGroups = true;
RestrictAddressFamilies = [ "AF_UNIX AF_INET AF_INET6" ];
LockPersonality = true;
MemoryDenyWriteExecute = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
PrivateMounts = true;
SystemCallArchitectures = "native";
SystemCallFilter = "~@clock @privileged @cpu-emulation @debug @keyring @module @mount @obsolete @raw-io @reboot @setuid @swap";
BindReadOnlyPaths = [
"-/etc/resolv.conf"
"-/etc/nsswitch.conf"
"-/etc/ssl/certs"
"-/etc/static/ssl/certs"
"-/etc/hosts"
"-/etc/localtime"
];
};
inherit (agentCfg) environment;
};
};
in
{
meta.maintainers = with lib.maintainers; [ janik ambroisie ];
options = {
services.woodpecker-agents = {
agents = lib.mkOption {
default = { };
type = lib.types.attrsOf agentModule;
example = {
docker = {
environment = {
WOODPECKER_SERVER = "localhost:9000";
WOODPECKER_BACKEND = "docker";
DOCKER_HOST = "unix:///run/podman/podman.sock";
};
extraGroups = [ "docker" ];
environmentFile = "/run/secrets/woodpecker/agent-secret.txt";
};
exec = {
environment = {
WOODPECKER_SERVER = "localhost:9000";
WOODPECKER_BACKEND = "exec";
};
environmentFile = "/run/secrets/woodpecker/agent-secret.txt";
};
};
description = lib.mdDoc "woodpecker-agents configurations";
};
};
};
config = {
systemd.services =
let
mkServices = lib.mapAttrs' mkAgentService;
enabledAgents = lib.filterAttrs (_: agent: agent.enable) cfg.agents;
in
mkServices enabledAgents;
};
}

View File

@ -8,7 +8,7 @@ let
cfg = config.services.woodpecker-server;
in
{
meta.maintainers = [ lib.maintainers.janik ];
meta.maintainers = with lib.maintainers; [ janik ambroisie ];
options = {

Some files were not shown because too many files have changed in this diff Show More