Merge remote-tracking branch 'upstream/master'
This commit is contained in:
commit
4007aa201b
1574 changed files with 34933 additions and 29657 deletions
21
.github/PULL_REQUEST_TEMPLATE.md
vendored
21
.github/PULL_REQUEST_TEMPLATE.md
vendored
|
@ -1,13 +1,3 @@
|
|||
<!--
|
||||
To help with the large amounts of pull requests, we would appreciate your
|
||||
reviews of other pull requests, especially simple package updates. Just leave a
|
||||
comment describing what you have tested in the relevant package/service.
|
||||
Reviewing helps to reduce the average time-to-merge for everyone.
|
||||
Thanks a lot if you do!
|
||||
List of open PRs: https://github.com/NixOS/nixpkgs/pulls
|
||||
Reviewing guidelines: https://nixos.org/manual/nixpkgs/unstable/#chap-reviewing-contributions
|
||||
-->
|
||||
|
||||
###### Description of changes
|
||||
|
||||
<!--
|
||||
|
@ -38,3 +28,14 @@ For new packages please briefly describe the package or provide a link to its ho
|
|||
- [ ] (Module addition) Added a release notes entry if adding a new NixOS module
|
||||
- [ ] (Release notes changes) Ran `nixos/doc/manual/md-to-db.sh` to update generated release notes
|
||||
- [ ] Fits [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
|
||||
|
||||
<!--
|
||||
To help with the large amounts of pull requests, we would appreciate your
|
||||
reviews of other pull requests, especially simple package updates. Just leave a
|
||||
comment describing what you have tested in the relevant package/service.
|
||||
Reviewing helps to reduce the average time-to-merge for everyone.
|
||||
Thanks a lot if you do!
|
||||
|
||||
List of open PRs: https://github.com/NixOS/nixpkgs/pulls
|
||||
Reviewing guidelines: https://nixos.org/manual/nixpkgs/unstable/#chap-reviewing-contributions
|
||||
-->
|
||||
|
|
4
.github/labeler.yml
vendored
4
.github/labeler.yml
vendored
|
@ -5,10 +5,6 @@
|
|||
- pkgs/development/libraries/agda/**/*
|
||||
- pkgs/top-level/agda-packages.nix
|
||||
|
||||
"6.topic: bsd":
|
||||
- pkgs/os-specific/bsd/**/*
|
||||
- pkgs/stdenv/freebsd/**/*
|
||||
|
||||
"6.topic: cinnamon":
|
||||
- pkgs/desktops/cinnamon/**/*
|
||||
|
||||
|
|
6
.github/workflows/backport.yml
vendored
6
.github/workflows/backport.yml
vendored
|
@ -2,6 +2,12 @@ name: Backport
|
|||
on:
|
||||
pull_request_target:
|
||||
types: [closed, labeled]
|
||||
|
||||
# WARNING:
|
||||
# When extending this action, be aware that $GITHUB_TOKEN allows write access to
|
||||
# the GitHub repository. This means that it should not evaluate user input in a
|
||||
# way that allows code injection.
|
||||
|
||||
jobs:
|
||||
backport:
|
||||
name: Backport Pull Request
|
||||
|
|
5
.github/workflows/basic-eval.yml
vendored
5
.github/workflows/basic-eval.yml
vendored
|
@ -16,5 +16,10 @@ jobs:
|
|||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v16
|
||||
- uses: cachix/cachix-action@v10
|
||||
with:
|
||||
# This cache is for the nixpkgs repo checks and should not be trusted or used elsewhere.
|
||||
name: nixpkgs-ci
|
||||
signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}'
|
||||
# explicit list of supportedSystems is needed until aarch64-darwin becomes part of the trunk jobset
|
||||
- run: nix-build pkgs/top-level/release.nix -A tarball.nixpkgs-basic-release-checks --arg supportedSystems '[ "aarch64-darwin" "aarch64-linux" "x86_64-linux" "x86_64-darwin" ]'
|
||||
|
|
5
.github/workflows/labels.yml
vendored
5
.github/workflows/labels.yml
vendored
|
@ -4,6 +4,11 @@ on:
|
|||
pull_request_target:
|
||||
types: [edited, opened, synchronize, reopened]
|
||||
|
||||
# WARNING:
|
||||
# When extending this action, be aware that $GITHUB_TOKEN allows some write
|
||||
# access to the GitHub API. This means that it should not evaluate user input in
|
||||
# a way that allows code injection.
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
|
|
2
.github/workflows/manual-nixos.yml
vendored
2
.github/workflows/manual-nixos.yml
vendored
|
@ -24,7 +24,7 @@ jobs:
|
|||
extra_nix_config: sandbox = true
|
||||
- uses: cachix/cachix-action@v10
|
||||
with:
|
||||
# This cache is for the nixos/nixpkgs manual builds and should not be trusted or used elsewhere.
|
||||
# This cache is for the nixpkgs repo checks and should not be trusted or used elsewhere.
|
||||
name: nixpkgs-ci
|
||||
signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}'
|
||||
- name: Building NixOS manual
|
||||
|
|
2
.github/workflows/manual-nixpkgs.yml
vendored
2
.github/workflows/manual-nixpkgs.yml
vendored
|
@ -24,7 +24,7 @@ jobs:
|
|||
extra_nix_config: sandbox = true
|
||||
- uses: cachix/cachix-action@v10
|
||||
with:
|
||||
# This cache is for the nixos/nixpkgs manual builds and should not be trusted or used elsewhere.
|
||||
# This cache is for the nixpkgs repo checks and should not be trusted or used elsewhere.
|
||||
name: nixpkgs-ci
|
||||
signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}'
|
||||
- name: Building Nixpkgs manual
|
||||
|
|
5
.github/workflows/pending-set.yml
vendored
5
.github/workflows/pending-set.yml
vendored
|
@ -3,6 +3,11 @@ name: "set pending status"
|
|||
on:
|
||||
pull_request_target:
|
||||
|
||||
# WARNING:
|
||||
# When extending this action, be aware that $GITHUB_TOKEN allows write access to
|
||||
# the GitHub repository. This means that it should not evaluate user input in a
|
||||
# way that allows code injection.
|
||||
|
||||
jobs:
|
||||
action:
|
||||
runs-on: ubuntu-latest
|
||||
|
|
|
@ -1,5 +1,10 @@
|
|||
<p align="center">
|
||||
<a href="https://nixos.org/nixos"><img src="https://nixos.org/logo/nixos-hires.png" width="500px" alt="NixOS logo" /></a>
|
||||
<a href="https://nixos.org#gh-light-mode-only">
|
||||
<img src="https://raw.githubusercontent.com/NixOS/nixos-homepage/master/logo/nixos-hires.png" width="500px" alt="NixOS logo"/>
|
||||
</a>
|
||||
<a href="https://nixos.org#gh-dark-mode-only">
|
||||
<img src="https://raw.githubusercontent.com/NixOS/nixos-artwork/master/logo/nixos-white.png" width="500px" alt="NixOS logo"/>
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
|
|
|
@ -45,3 +45,5 @@ One can create a simple environment using a `shell.nix` like that:
|
|||
```
|
||||
|
||||
Running `nix-shell` would then drop you into a shell with these libraries and binaries available. You can use this to run closed-source applications which expect FHS structure without hassles: simply change `runScript` to the application path, e.g. `./bin/start.sh` -- relative paths are supported.
|
||||
|
||||
Additionally, the FHS builder links all relocated gsettings-schemas (the glib setup-hook moves them to `share/gsettings-schemas/${name}/glib-2.0/schemas`) to their standard FHS location. This means you don't need to wrap binaries with `wrapGAppsHook`.
|
||||
|
|
|
@ -125,7 +125,7 @@ Reviewing process:
|
|||
- Type should be appropriate (string related types differs in their merging capabilities, `optionSet` and `string` types are deprecated).
|
||||
- Description, default and example should be provided.
|
||||
- Ensure that option changes are backward compatible.
|
||||
- `mkRenamedOptionModule` and `mkAliasOptionModule` functions provide way to make option changes backward compatible.
|
||||
- `mkRenamedOptionModuleWith` provides a way to make option changes backward compatible.
|
||||
- Ensure that removed options are declared with `mkRemovedOptionModule`
|
||||
- Ensure that changes that are not backward compatible are mentioned in release notes.
|
||||
- Ensure that documentations affected by the change is updated.
|
||||
|
|
|
@ -1474,7 +1474,7 @@ lib.attrsets.zipAttrsWith
|
|||
<section xml:id="function-library-lib.attrsets.zipAttrs">
|
||||
<title><function>lib.attrsets.zipAttrs</function></title>
|
||||
|
||||
<subtitle><literal>zipAttrsWith :: [ AttrSet ] -> AttrSet</literal>
|
||||
<subtitle><literal>zipAttrs :: [ AttrSet ] -> AttrSet</literal>
|
||||
</subtitle>
|
||||
|
||||
<xi:include href="./locations.xml" xpointer="lib.attrsets.zipAttrs" />
|
||||
|
|
|
@ -38,8 +38,8 @@ Here is a simple package example.
|
|||
|
||||
- It uses the `fetchFromGitHub` fetcher to get its source.
|
||||
|
||||
- `useDune2 = true` ensures that the latest version of Dune is used for the
|
||||
build (this may become the default value in a future release).
|
||||
- `useDune2 = true` ensures that Dune version 2 is used for the
|
||||
build (this is the default; set to `false` to use Dune version 1).
|
||||
|
||||
- It sets the optional `doCheck` attribute such that tests will be run with
|
||||
`dune runtest -p angstrom` after the build (`dune build -p angstrom`) is
|
||||
|
|
|
@ -77,7 +77,7 @@ In Nixpkgs, we have multiple implementations of the BLAS/LAPACK numerical linear
|
|||
|
||||
The Nixpkgs attribute is `openblas` for ILP64 (integer width = 64 bits) and `openblasCompat` for LP64 (integer width = 32 bits). `openblasCompat` is the default.
|
||||
|
||||
- [LAPACK reference](http://www.netlib.org/lapack/) (also provides BLAS)
|
||||
- [LAPACK reference](http://www.netlib.org/lapack/) (also provides BLAS and CBLAS)
|
||||
|
||||
The Nixpkgs attribute is `lapack-reference`.
|
||||
|
||||
|
@ -117,7 +117,23 @@ $ LD_LIBRARY_PATH=$(nix-build -A mkl)/lib${LD_LIBRARY_PATH:+:}$LD_LIBRARY_PATH n
|
|||
|
||||
Intel MKL requires an `openmp` implementation when running with multiple processors. By default, `mkl` will use Intel's `iomp` implementation if no other is specified, but this is a runtime-only dependency and binary compatible with the LLVM implementation. To use that one instead, Intel recommends users set it with `LD_PRELOAD`. Note that `mkl` is only available on `x86_64-linux` and `x86_64-darwin`. Moreover, Hydra is not building and distributing pre-compiled binaries using it.
|
||||
|
||||
For BLAS/LAPACK switching to work correctly, all packages must depend on `blas` or `lapack`. This ensures that only one BLAS/LAPACK library is used at one time. There are two versions of BLAS/LAPACK currently in the wild, `LP64` (integer size = 32 bits) and `ILP64` (integer size = 64 bits). Some software needs special flags or patches to work with `ILP64`. You can check if `ILP64` is used in Nixpkgs with `blas.isILP64` and `lapack.isILP64`. Some software does NOT work with `ILP64`, and derivations need to specify an assertion to prevent this. You can prevent `ILP64` from being used with the following:
|
||||
To override `blas` and `lapack` with its reference implementations (i.e. for development purposes), one can use the following overlay:
|
||||
|
||||
```nix
|
||||
self: super:
|
||||
|
||||
{
|
||||
blas = super.blas.override {
|
||||
blasProvider = self.lapack-reference;
|
||||
};
|
||||
|
||||
lapack = super.lapack.override {
|
||||
lapackProvider = self.lapack-reference;
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
For BLAS/LAPACK switching to work correctly, all packages must depend on `blas` or `lapack`. This ensures that only one BLAS/LAPACK library is used at one time. There are two versions of BLAS/LAPACK currently in the wild, `LP64` (integer size = 32 bits) and `ILP64` (integer size = 64 bits). The attributes `blas` and `lapack` are `LP64` by default. Their `ILP64` version are provided through the attributes `blas-ilp64` and `lapack-ilp64`. Some software needs special flags or patches to work with `ILP64`. You can check if `ILP64` is used in Nixpkgs with `blas.isILP64` and `lapack.isILP64`. Some software does NOT work with `ILP64`, and derivations need to specify an assertion to prevent this. You can prevent `ILP64` from being used with the following:
|
||||
|
||||
```nix
|
||||
{ stdenv, blas, lapack, ... }:
|
||||
|
|
117
lib/attrsets.nix
117
lib/attrsets.nix
|
@ -4,8 +4,8 @@
|
|||
let
|
||||
inherit (builtins) head tail length;
|
||||
inherit (lib.trivial) id;
|
||||
inherit (lib.strings) concatStringsSep sanitizeDerivationName;
|
||||
inherit (lib.lists) foldr foldl' concatMap concatLists elemAt all;
|
||||
inherit (lib.strings) concatStringsSep concatMapStringsSep escapeNixIdentifier sanitizeDerivationName;
|
||||
inherit (lib.lists) foldr foldl' concatMap concatLists elemAt all partition groupBy take foldl;
|
||||
in
|
||||
|
||||
rec {
|
||||
|
@ -78,6 +78,103 @@ rec {
|
|||
in attrByPath attrPath (abort errorMsg);
|
||||
|
||||
|
||||
/* Update or set specific paths of an attribute set.
|
||||
|
||||
Takes a list of updates to apply and an attribute set to apply them to,
|
||||
and returns the attribute set with the updates applied. Updates are
|
||||
represented as { path = ...; update = ...; } values, where `path` is a
|
||||
list of strings representing the attribute path that should be updated,
|
||||
and `update` is a function that takes the old value at that attribute path
|
||||
as an argument and returns the new
|
||||
value it should be.
|
||||
|
||||
Properties:
|
||||
- Updates to deeper attribute paths are applied before updates to more
|
||||
shallow attribute paths
|
||||
- Multiple updates to the same attribute path are applied in the order
|
||||
they appear in the update list
|
||||
- If any but the last `path` element leads into a value that is not an
|
||||
attribute set, an error is thrown
|
||||
- If there is an update for an attribute path that doesn't exist,
|
||||
accessing the argument in the update function causes an error, but
|
||||
intermediate attribute sets are implicitly created as needed
|
||||
|
||||
Example:
|
||||
updateManyAttrsByPath [
|
||||
{
|
||||
path = [ "a" "b" ];
|
||||
update = old: { d = old.c; };
|
||||
}
|
||||
{
|
||||
path = [ "a" "b" "c" ];
|
||||
update = old: old + 1;
|
||||
}
|
||||
{
|
||||
path = [ "x" "y" ];
|
||||
update = old: "xy";
|
||||
}
|
||||
] { a.b.c = 0; }
|
||||
=> { a = { b = { d = 1; }; }; x = { y = "xy"; }; }
|
||||
*/
|
||||
updateManyAttrsByPath = let
|
||||
# When recursing into attributes, instead of updating the `path` of each
|
||||
# update using `tail`, which needs to allocate an entirely new list,
|
||||
# we just pass a prefix length to use and make sure to only look at the
|
||||
# path without the prefix length, so that we can reuse the original list
|
||||
# entries.
|
||||
go = prefixLength: hasValue: value: updates:
|
||||
let
|
||||
# Splits updates into ones on this level (split.right)
|
||||
# And ones on levels further down (split.wrong)
|
||||
split = partition (el: length el.path == prefixLength) updates;
|
||||
|
||||
# Groups updates on further down levels into the attributes they modify
|
||||
nested = groupBy (el: elemAt el.path prefixLength) split.wrong;
|
||||
|
||||
# Applies only nested modification to the input value
|
||||
withNestedMods =
|
||||
# Return the value directly if we don't have any nested modifications
|
||||
if split.wrong == [] then
|
||||
if hasValue then value
|
||||
else
|
||||
# Throw an error if there is no value. This `head` call here is
|
||||
# safe, but only in this branch since `go` could only be called
|
||||
# with `hasValue == false` for nested updates, in which case
|
||||
# it's also always called with at least one update
|
||||
let updatePath = (head split.right).path; in
|
||||
throw
|
||||
( "updateManyAttrsByPath: Path '${showAttrPath updatePath}' does "
|
||||
+ "not exist in the given value, but the first update to this "
|
||||
+ "path tries to access the existing value.")
|
||||
else
|
||||
# If there are nested modifications, try to apply them to the value
|
||||
if ! hasValue then
|
||||
# But if we don't have a value, just use an empty attribute set
|
||||
# as the value, but simplify the code a bit
|
||||
mapAttrs (name: go (prefixLength + 1) false null) nested
|
||||
else if isAttrs value then
|
||||
# If we do have a value and it's an attribute set, override it
|
||||
# with the nested modifications
|
||||
value //
|
||||
mapAttrs (name: go (prefixLength + 1) (value ? ${name}) value.${name}) nested
|
||||
else
|
||||
# However if it's not an attribute set, we can't apply the nested
|
||||
# modifications, throw an error
|
||||
let updatePath = (head split.wrong).path; in
|
||||
throw
|
||||
( "updateManyAttrsByPath: Path '${showAttrPath updatePath}' needs to "
|
||||
+ "be updated, but path '${showAttrPath (take prefixLength updatePath)}' "
|
||||
+ "of the given value is not an attribute set, so we can't "
|
||||
+ "update an attribute inside of it.");
|
||||
|
||||
# We get the final result by applying all the updates on this level
|
||||
# after having applied all the nested updates
|
||||
# We use foldl instead of foldl' so that in case of multiple updates,
|
||||
# intermediate values aren't evaluated if not needed
|
||||
in foldl (acc: el: el.update acc) withNestedMods split.right;
|
||||
|
||||
in updates: value: go 0 true value updates;
|
||||
|
||||
/* Return the specified attributes from a set.
|
||||
|
||||
Example:
|
||||
|
@ -327,7 +424,7 @@ rec {
|
|||
isDerivation "foobar"
|
||||
=> false
|
||||
*/
|
||||
isDerivation = x: isAttrs x && x ? type && x.type == "derivation";
|
||||
isDerivation = x: x.type or null == "derivation";
|
||||
|
||||
/* Converts a store path to a fake derivation. */
|
||||
toDerivation = path:
|
||||
|
@ -477,6 +574,20 @@ rec {
|
|||
overrideExisting = old: new:
|
||||
mapAttrs (name: value: new.${name} or value) old;
|
||||
|
||||
/* Turns a list of strings into a human-readable description of those
|
||||
strings represented as an attribute path. The result of this function is
|
||||
not intended to be machine-readable.
|
||||
|
||||
Example:
|
||||
showAttrPath [ "foo" "10" "bar" ]
|
||||
=> "foo.\"10\".bar"
|
||||
showAttrPath []
|
||||
=> "<root attribute path>"
|
||||
*/
|
||||
showAttrPath = path:
|
||||
if path == [] then "<root attribute path>"
|
||||
else concatMapStringsSep "." escapeNixIdentifier path;
|
||||
|
||||
/* Get a package output.
|
||||
If no output is found, fallback to `.out` and then to the default.
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ let
|
|||
inherit (self.trivial) id const pipe concat or and bitAnd bitOr bitXor
|
||||
bitNot boolToString mergeAttrs flip mapNullable inNixShell isFloat min max
|
||||
importJSON importTOML warn warnIf throwIfNot checkListOfEnum
|
||||
info showWarnings nixpkgsVersion version
|
||||
info showWarnings nixpkgsVersion version isInOldestRelease
|
||||
mod compare splitByAndCompare functionArgs setFunctionArgs isFunction
|
||||
toHexString toBaseDigits;
|
||||
inherit (self.fixedPoints) fix fix' converge extends composeExtensions
|
||||
|
@ -78,9 +78,10 @@ let
|
|||
mapAttrs' mapAttrsToList mapAttrsRecursive mapAttrsRecursiveCond
|
||||
genAttrs isDerivation toDerivation optionalAttrs
|
||||
zipAttrsWithNames zipAttrsWith zipAttrs recursiveUpdateUntil
|
||||
recursiveUpdate matchAttrs overrideExisting getOutput getBin
|
||||
recursiveUpdate matchAttrs overrideExisting showAttrPath getOutput getBin
|
||||
getLib getDev getMan chooseDevOutputs zipWithNames zip
|
||||
recurseIntoAttrs dontRecurseIntoAttrs cartesianProductOfSets;
|
||||
recurseIntoAttrs dontRecurseIntoAttrs cartesianProductOfSets
|
||||
updateManyAttrsByPath;
|
||||
inherit (self.lists) singleton forEach foldr fold foldl foldl' imap0 imap1
|
||||
concatMap flatten remove findSingle findFirst any all count
|
||||
optional optionals toList range partition zipListsWith zipLists
|
||||
|
@ -119,7 +120,8 @@ let
|
|||
mkOptionDefault mkDefault mkImageMediaOverride mkForce mkVMOverride
|
||||
mkFixStrictness mkOrder mkBefore mkAfter mkAliasDefinitions
|
||||
mkAliasAndWrapDefinitions fixMergeModules mkRemovedOptionModule
|
||||
mkRenamedOptionModule mkMergedOptionModule mkChangedOptionModule
|
||||
mkRenamedOptionModule mkRenamedOptionModuleWith
|
||||
mkMergedOptionModule mkChangedOptionModule
|
||||
mkAliasOptionModule mkDerivedConfig doRename;
|
||||
inherit (self.options) isOption mkEnableOption mkSinkUndeclaredOptions
|
||||
mergeDefaultOption mergeOneOption mergeEqualOption mergeUniqueOption
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
let
|
||||
inherit (lib.strings) toInt;
|
||||
inherit (lib.trivial) compare min;
|
||||
inherit (lib.attrsets) mapAttrs;
|
||||
in
|
||||
rec {
|
||||
|
||||
|
@ -340,15 +341,15 @@ rec {
|
|||
groupBy' builtins.add 0 (x: boolToString (x > 2)) [ 5 1 2 3 4 ]
|
||||
=> { true = 12; false = 3; }
|
||||
*/
|
||||
groupBy' = op: nul: pred: lst:
|
||||
foldl' (r: e:
|
||||
let
|
||||
key = pred e;
|
||||
in
|
||||
r // { ${key} = op (r.${key} or nul) e; }
|
||||
) {} lst;
|
||||
groupBy' = op: nul: pred: lst: mapAttrs (name: foldl op nul) (groupBy pred lst);
|
||||
|
||||
groupBy = groupBy' (sum: e: sum ++ [e]) [];
|
||||
groupBy = builtins.groupBy or (
|
||||
pred: foldl' (r: e:
|
||||
let
|
||||
key = pred e;
|
||||
in
|
||||
r // { ${key} = (r.${key} or []) ++ [e]; }
|
||||
) {});
|
||||
|
||||
/* Merges two lists of the same size together. If the sizes aren't the same
|
||||
the merging stops at the shortest. How both lists are merged is defined
|
||||
|
|
104
lib/modules.nix
104
lib/modules.nix
|
@ -9,7 +9,7 @@ let
|
|||
catAttrs
|
||||
concatLists
|
||||
concatMap
|
||||
count
|
||||
concatStringsSep
|
||||
elem
|
||||
filter
|
||||
findFirst
|
||||
|
@ -47,6 +47,20 @@ let
|
|||
showOption
|
||||
unknownModule
|
||||
;
|
||||
|
||||
showDeclPrefix = loc: decl: prefix:
|
||||
" - option(s) with prefix `${showOption (loc ++ [prefix])}' in module `${decl._file}'";
|
||||
showRawDecls = loc: decls:
|
||||
concatStringsSep "\n"
|
||||
(sort (a: b: a < b)
|
||||
(concatMap
|
||||
(decl: map
|
||||
(showDeclPrefix loc decl)
|
||||
(attrNames decl.options)
|
||||
)
|
||||
decls
|
||||
));
|
||||
|
||||
in
|
||||
|
||||
rec {
|
||||
|
@ -474,26 +488,61 @@ rec {
|
|||
[{ inherit (module) file; inherit value; }]
|
||||
) configs;
|
||||
|
||||
# Convert an option tree decl to a submodule option decl
|
||||
optionTreeToOption = decl:
|
||||
if isOption decl.options
|
||||
then decl
|
||||
else decl // {
|
||||
options = mkOption {
|
||||
type = types.submoduleWith {
|
||||
modules = [ { options = decl.options; } ];
|
||||
# `null` is not intended for use by modules. It is an internal
|
||||
# value that means "whatever the user has declared elsewhere".
|
||||
# This might become obsolete with https://github.com/NixOS/nixpkgs/issues/162398
|
||||
shorthandOnlyDefinesConfig = null;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
resultsByName = mapAttrs (name: decls:
|
||||
# We're descending into attribute ‘name’.
|
||||
let
|
||||
loc = prefix ++ [name];
|
||||
defns = defnsByName.${name} or [];
|
||||
defns' = defnsByName'.${name} or [];
|
||||
nrOptions = count (m: isOption m.options) decls;
|
||||
optionDecls = filter (m: isOption m.options) decls;
|
||||
in
|
||||
if nrOptions == length decls then
|
||||
if length optionDecls == length decls then
|
||||
let opt = fixupOptionType loc (mergeOptionDecls loc decls);
|
||||
in {
|
||||
matchedOptions = evalOptionValue loc opt defns';
|
||||
unmatchedDefns = [];
|
||||
}
|
||||
else if nrOptions != 0 then
|
||||
let
|
||||
firstOption = findFirst (m: isOption m.options) "" decls;
|
||||
firstNonOption = findFirst (m: !isOption m.options) "" decls;
|
||||
in
|
||||
throw "The option `${showOption loc}' in `${firstOption._file}' is a prefix of options in `${firstNonOption._file}'."
|
||||
else if optionDecls != [] then
|
||||
if all (x: x.options.type.name == "submodule") optionDecls
|
||||
# Raw options can only be merged into submodules. Merging into
|
||||
# attrsets might be nice, but ambiguous. Suppose we have
|
||||
# attrset as a `attrsOf submodule`. User declares option
|
||||
# attrset.foo.bar, this could mean:
|
||||
# a. option `bar` is only available in `attrset.foo`
|
||||
# b. option `foo.bar` is available in all `attrset.*`
|
||||
# c. reject and require "<name>" as a reminder that it behaves like (b).
|
||||
# d. magically combine (a) and (c).
|
||||
# All of the above are merely syntax sugar though.
|
||||
then
|
||||
let opt = fixupOptionType loc (mergeOptionDecls loc (map optionTreeToOption decls));
|
||||
in {
|
||||
matchedOptions = evalOptionValue loc opt defns';
|
||||
unmatchedDefns = [];
|
||||
}
|
||||
else
|
||||
let
|
||||
firstNonOption = findFirst (m: !isOption m.options) "" decls;
|
||||
nonOptions = filter (m: !isOption m.options) decls;
|
||||
in
|
||||
throw "The option `${showOption loc}' in module `${(lib.head optionDecls)._file}' would be a parent of the following options, but its type `${(lib.head optionDecls).options.type.description or "<no description>"}' does not support nested options.\n${
|
||||
showRawDecls loc nonOptions
|
||||
}"
|
||||
else
|
||||
mergeModules' loc decls defns) declsByName;
|
||||
|
||||
|
@ -753,21 +802,22 @@ rec {
|
|||
compare = a: b: (a.priority or 1000) < (b.priority or 1000);
|
||||
in sort compare defs';
|
||||
|
||||
/* Hack for backward compatibility: convert options of type
|
||||
optionSet to options of type submodule. FIXME: remove
|
||||
eventually. */
|
||||
fixupOptionType = loc: opt:
|
||||
let
|
||||
options = opt.options or
|
||||
(throw "Option `${showOption loc}' has type optionSet but has no option attribute, in ${showFiles opt.declarations}.");
|
||||
|
||||
# Hack for backward compatibility: convert options of type
|
||||
# optionSet to options of type submodule. FIXME: remove
|
||||
# eventually.
|
||||
f = tp:
|
||||
let optionSetIn = type: (tp.name == type) && (tp.functor.wrapped.name == "optionSet");
|
||||
in
|
||||
if tp.name == "option set" || tp.name == "submodule" then
|
||||
throw "The option ${showOption loc} uses submodules without a wrapping type, in ${showFiles opt.declarations}."
|
||||
else if optionSetIn "attrsOf" then types.attrsOf (types.submodule options)
|
||||
else if optionSetIn "listOf" then types.listOf (types.submodule options)
|
||||
else if optionSetIn "nullOr" then types.nullOr (types.submodule options)
|
||||
else if (tp.functor.wrapped.name or null) == "optionSet" then
|
||||
if tp.name == "attrsOf" then types.attrsOf (types.submodule options)
|
||||
else if tp.name == "listOf" then types.listOf (types.submodule options)
|
||||
else if tp.name == "nullOr" then types.nullOr (types.submodule options)
|
||||
else tp
|
||||
else tp;
|
||||
in
|
||||
if opt.type.getSubModules or null == null
|
||||
|
@ -904,6 +954,26 @@ rec {
|
|||
use = builtins.trace "Obsolete option `${showOption from}' is used. It was renamed to `${showOption to}'.";
|
||||
};
|
||||
|
||||
mkRenamedOptionModuleWith = {
|
||||
/* Old option path as list of strings. */
|
||||
from,
|
||||
/* New option path as list of strings. */
|
||||
to,
|
||||
|
||||
/*
|
||||
Release number of the first release that contains the rename, ignoring backports.
|
||||
Set it to the upcoming release, matching the nixpkgs/.version file.
|
||||
*/
|
||||
sinceRelease,
|
||||
|
||||
}: doRename {
|
||||
inherit from to;
|
||||
visible = false;
|
||||
warn = lib.isInOldestRelease sinceRelease;
|
||||
use = lib.warnIf (lib.isInOldestRelease sinceRelease)
|
||||
"Obsolete option `${showOption from}' is used. It was renamed to `${showOption to}'.";
|
||||
};
|
||||
|
||||
/* Return a module that causes a warning to be shown if any of the "from"
|
||||
option is defined; the defined values can be used in the "mergeFn" to set
|
||||
the "to" value.
|
||||
|
|
|
@ -231,7 +231,7 @@ rec {
|
|||
then true
|
||||
else opt.visible or true;
|
||||
readOnly = opt.readOnly or false;
|
||||
type = opt.type.description or null;
|
||||
type = opt.type.description or "unspecified";
|
||||
}
|
||||
// optionalAttrs (opt ? example) { example = scrubOptionValue opt.example; }
|
||||
// optionalAttrs (opt ? default) { default = scrubOptionValue opt.default; }
|
||||
|
|
|
@ -105,7 +105,8 @@ rec {
|
|||
else if final.isAarch64 then "arm64"
|
||||
else if final.isx86_32 then "i386"
|
||||
else if final.isx86_64 then "x86_64"
|
||||
else if final.isMips then "mips"
|
||||
else if final.isMips32 then "mips"
|
||||
else if final.isMips64 then "mips" # linux kernel does not distinguish mips32/mips64
|
||||
else if final.isPower then "powerpc"
|
||||
else if final.isRiscV then "riscv"
|
||||
else if final.isS390 then "s390"
|
||||
|
|
|
@ -26,7 +26,7 @@ let
|
|||
|
||||
# Linux
|
||||
"aarch64-linux" "armv5tel-linux" "armv6l-linux" "armv7a-linux"
|
||||
"armv7l-linux" "i686-linux" "m68k-linux" "mipsel-linux"
|
||||
"armv7l-linux" "i686-linux" "m68k-linux" "mipsel-linux" "mips64el-linux"
|
||||
"powerpc64-linux" "powerpc64le-linux" "riscv32-linux"
|
||||
"riscv64-linux" "s390-linux" "s390x-linux" "x86_64-linux"
|
||||
|
||||
|
@ -87,7 +87,11 @@ in {
|
|||
darwin = filterDoubles predicates.isDarwin;
|
||||
freebsd = filterDoubles predicates.isFreeBSD;
|
||||
# Should be better, but MinGW is unclear.
|
||||
gnu = filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnu; }) ++ filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnueabi; }) ++ filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnueabihf; });
|
||||
gnu = filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnu; })
|
||||
++ filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnueabi; })
|
||||
++ filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnueabihf; })
|
||||
++ filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnuabin32; })
|
||||
++ filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnuabi64; });
|
||||
illumos = filterDoubles predicates.isSunOS;
|
||||
linux = filterDoubles predicates.isLinux;
|
||||
netbsd = filterDoubles predicates.isNetBSD;
|
||||
|
|
|
@ -93,6 +93,26 @@ rec {
|
|||
config = "mipsel-unknown-linux-gnu";
|
||||
} // platforms.fuloong2f_n32;
|
||||
|
||||
# MIPS ABI table transcribed from here: https://wiki.debian.org/Multiarch/Tuples
|
||||
|
||||
# can execute on 32bit chip
|
||||
mips-linux-gnu = { config = "mips-linux-gnu"; } // platforms.gcc_mips32r2_o32;
|
||||
mipsel-linux-gnu = { config = "mipsel-linux-gnu"; } // platforms.gcc_mips32r2_o32;
|
||||
mipsisa32r6-linux-gnu = { config = "mipsisa32r6-linux-gnu"; } // platforms.gcc_mips32r6_o32;
|
||||
mipsisa32r6el-linux-gnu = { config = "mipsisa32r6el-linux-gnu"; } // platforms.gcc_mips32r6_o32;
|
||||
|
||||
# require 64bit chip (for more registers, 64-bit floating point, 64-bit "long long") but use 32bit pointers
|
||||
mips64-linux-gnuabin32 = { config = "mips64-linux-gnuabin32"; } // platforms.gcc_mips64r2_n32;
|
||||
mips64el-linux-gnuabin32 = { config = "mips64el-linux-gnuabin32"; } // platforms.gcc_mips64r2_n32;
|
||||
mipsisa64r6-linux-gnuabin32 = { config = "mipsisa64r6-linux-gnuabin32"; } // platforms.gcc_mips64r6_n32;
|
||||
mipsisa64r6el-linux-gnuabin32 = { config = "mipsisa64r6el-linux-gnuabin32"; } // platforms.gcc_mips64r6_n32;
|
||||
|
||||
# 64bit pointers
|
||||
mips64-linux-gnuabi64 = { config = "mips64-linux-gnuabi64"; } // platforms.gcc_mips64r2_64;
|
||||
mips64el-linux-gnuabi64 = { config = "mips64el-linux-gnuabi64"; } // platforms.gcc_mips64r2_64;
|
||||
mipsisa64r6-linux-gnuabi64 = { config = "mipsisa64r6-linux-gnuabi64"; } // platforms.gcc_mips64r6_64;
|
||||
mipsisa64r6el-linux-gnuabi64 = { config = "mipsisa64r6el-linux-gnuabi64"; } // platforms.gcc_mips64r6_64;
|
||||
|
||||
muslpi = raspberryPi // {
|
||||
config = "armv6l-unknown-linux-musleabihf";
|
||||
};
|
||||
|
|
|
@ -17,6 +17,10 @@ rec {
|
|||
isAarch32 = { cpu = { family = "arm"; bits = 32; }; };
|
||||
isAarch64 = { cpu = { family = "arm"; bits = 64; }; };
|
||||
isMips = { cpu = { family = "mips"; }; };
|
||||
isMips32 = { cpu = { family = "mips"; bits = 32; }; };
|
||||
isMips64 = { cpu = { family = "mips"; bits = 64; }; };
|
||||
isMips64n32 = { cpu = { family = "mips"; bits = 64; }; abi = { abi = "n32"; }; };
|
||||
isMips64n64 = { cpu = { family = "mips"; bits = 64; }; abi = { abi = "64"; }; };
|
||||
isMmix = { cpu = { family = "mmix"; }; };
|
||||
isRiscV = { cpu = { family = "riscv"; }; };
|
||||
isSparc = { cpu = { family = "sparc"; }; };
|
||||
|
@ -57,7 +61,7 @@ rec {
|
|||
|
||||
isAndroid = [ { abi = abis.android; } { abi = abis.androideabi; } ];
|
||||
isGnu = with abis; map (a: { abi = a; }) [ gnuabi64 gnu gnueabi gnueabihf ];
|
||||
isMusl = with abis; map (a: { abi = a; }) [ musl musleabi musleabihf ];
|
||||
isMusl = with abis; map (a: { abi = a; }) [ musl musleabi musleabihf muslabin32 muslabi64 ];
|
||||
isUClibc = with abis; map (a: { abi = a; }) [ uclibc uclibceabi uclibceabihf ];
|
||||
|
||||
isEfi = map (family: { cpu.family = family; })
|
||||
|
|
|
@ -359,6 +359,13 @@ rec {
|
|||
];
|
||||
};
|
||||
gnuabi64 = { abi = "64"; };
|
||||
muslabi64 = { abi = "64"; };
|
||||
|
||||
# NOTE: abi=n32 requires a 64-bit MIPS chip! That is not a typo.
|
||||
# It is basically the 64-bit abi with 32-bit pointers. Details:
|
||||
# https://www.linux-mips.org/pub/linux/mips/doc/ABI/MIPS-N32-ABI-Handbook.pdf
|
||||
gnuabin32 = { abi = "n32"; };
|
||||
muslabin32 = { abi = "n32"; };
|
||||
|
||||
musleabi = { float = "soft"; };
|
||||
musleabihf = { float = "hard"; };
|
||||
|
|
|
@ -1,3 +1,10 @@
|
|||
# Note: lib/systems/default.nix takes care of producing valid,
|
||||
# fully-formed "platform" values (e.g. hostPlatform, buildPlatform,
|
||||
# targetPlatform, etc) containing at least the minimal set of attrs
|
||||
# required (see types.parsedPlatform in lib/systems/parse.nix). This
|
||||
# file takes an already-valid platform and further elaborates it with
|
||||
# optional fields such as linux-kernel, gcc, etc.
|
||||
|
||||
{ lib }:
|
||||
rec {
|
||||
pc = {
|
||||
|
@ -482,6 +489,43 @@ rec {
|
|||
};
|
||||
};
|
||||
|
||||
# can execute on 32bit chip
|
||||
gcc_mips32r2_o32 = { gcc = { arch = "mips32r2"; abi = "o32"; }; };
|
||||
gcc_mips32r6_o32 = { gcc = { arch = "mips32r6"; abi = "o32"; }; };
|
||||
gcc_mips64r2_n32 = { gcc = { arch = "mips64r2"; abi = "n32"; }; };
|
||||
gcc_mips64r6_n32 = { gcc = { arch = "mips64r6"; abi = "n32"; }; };
|
||||
gcc_mips64r2_64 = { gcc = { arch = "mips64r2"; abi = "64"; }; };
|
||||
gcc_mips64r6_64 = { gcc = { arch = "mips64r6"; abi = "64"; }; };
|
||||
|
||||
# based on:
|
||||
# https://www.mail-archive.com/qemu-discuss@nongnu.org/msg05179.html
|
||||
# https://gmplib.org/~tege/qemu.html#mips64-debian
|
||||
mips64el-qemu-linux-gnuabi64 = (import ./examples).mips64el-linux-gnuabi64 // {
|
||||
linux-kernel = {
|
||||
name = "mips64el";
|
||||
baseConfig = "64r2el_defconfig";
|
||||
target = "vmlinuz";
|
||||
autoModules = false;
|
||||
DTB = true;
|
||||
# for qemu 9p passthrough filesystem
|
||||
extraConfig = ''
|
||||
MIPS_MALTA y
|
||||
PAGE_SIZE_4KB y
|
||||
CPU_LITTLE_ENDIAN y
|
||||
CPU_MIPS64_R2 y
|
||||
64BIT y
|
||||
CPU_MIPS64_R2 y
|
||||
|
||||
NET_9P y
|
||||
NET_9P_VIRTIO y
|
||||
9P_FS y
|
||||
9P_FS_POSIX_ACL y
|
||||
PCI y
|
||||
VIRTIO_PCI y
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
##
|
||||
## Other
|
||||
##
|
||||
|
@ -499,6 +543,9 @@ rec {
|
|||
};
|
||||
};
|
||||
|
||||
# This function takes a minimally-valid "platform" and returns an
|
||||
# attrset containing zero or more additional attrs which should be
|
||||
# included in the platform in order to further elaborate it.
|
||||
select = platform:
|
||||
# x86
|
||||
/**/ if platform.isx86 then pc
|
||||
|
|
|
@ -761,4 +761,156 @@ runTests {
|
|||
{ a = 3; b = 30; c = 300; }
|
||||
];
|
||||
};
|
||||
|
||||
# The example from the showAttrPath documentation
|
||||
testShowAttrPathExample = {
|
||||
expr = showAttrPath [ "foo" "10" "bar" ];
|
||||
expected = "foo.\"10\".bar";
|
||||
};
|
||||
|
||||
testShowAttrPathEmpty = {
|
||||
expr = showAttrPath [];
|
||||
expected = "<root attribute path>";
|
||||
};
|
||||
|
||||
testShowAttrPathVarious = {
|
||||
expr = showAttrPath [
|
||||
"."
|
||||
"foo"
|
||||
"2"
|
||||
"a2-b"
|
||||
"_bc'de"
|
||||
];
|
||||
expected = ''".".foo."2".a2-b._bc'de'';
|
||||
};
|
||||
|
||||
testGroupBy = {
|
||||
expr = groupBy (n: toString (mod n 5)) (range 0 16);
|
||||
expected = {
|
||||
"0" = [ 0 5 10 15 ];
|
||||
"1" = [ 1 6 11 16 ];
|
||||
"2" = [ 2 7 12 ];
|
||||
"3" = [ 3 8 13 ];
|
||||
"4" = [ 4 9 14 ];
|
||||
};
|
||||
};
|
||||
|
||||
testGroupBy' = {
|
||||
expr = groupBy' builtins.add 0 (x: boolToString (x > 2)) [ 5 1 2 3 4 ];
|
||||
expected = { false = 3; true = 12; };
|
||||
};
|
||||
|
||||
# The example from the updateManyAttrsByPath documentation
|
||||
testUpdateManyAttrsByPathExample = {
|
||||
expr = updateManyAttrsByPath [
|
||||
{
|
||||
path = [ "a" "b" ];
|
||||
update = old: { d = old.c; };
|
||||
}
|
||||
{
|
||||
path = [ "a" "b" "c" ];
|
||||
update = old: old + 1;
|
||||
}
|
||||
{
|
||||
path = [ "x" "y" ];
|
||||
update = old: "xy";
|
||||
}
|
||||
] { a.b.c = 0; };
|
||||
expected = { a = { b = { d = 1; }; }; x = { y = "xy"; }; };
|
||||
};
|
||||
|
||||
# If there are no updates, the value is passed through
|
||||
testUpdateManyAttrsByPathNone = {
|
||||
expr = updateManyAttrsByPath [] "something";
|
||||
expected = "something";
|
||||
};
|
||||
|
||||
# A single update to the root path is just like applying the function directly
|
||||
testUpdateManyAttrsByPathSingleIncrement = {
|
||||
expr = updateManyAttrsByPath [
|
||||
{
|
||||
path = [ ];
|
||||
update = old: old + 1;
|
||||
}
|
||||
] 0;
|
||||
expected = 1;
|
||||
};
|
||||
|
||||
# Multiple updates can be applied are done in order
|
||||
testUpdateManyAttrsByPathMultipleIncrements = {
|
||||
expr = updateManyAttrsByPath [
|
||||
{
|
||||
path = [ ];
|
||||
update = old: old + "a";
|
||||
}
|
||||
{
|
||||
path = [ ];
|
||||
update = old: old + "b";
|
||||
}
|
||||
{
|
||||
path = [ ];
|
||||
update = old: old + "c";
|
||||
}
|
||||
] "";
|
||||
expected = "abc";
|
||||
};
|
||||
|
||||
# If an update doesn't use the value, all previous updates are not evaluated
|
||||
testUpdateManyAttrsByPathLazy = {
|
||||
expr = updateManyAttrsByPath [
|
||||
{
|
||||
path = [ ];
|
||||
update = old: old + throw "nope";
|
||||
}
|
||||
{
|
||||
path = [ ];
|
||||
update = old: "untainted";
|
||||
}
|
||||
] (throw "start");
|
||||
expected = "untainted";
|
||||
};
|
||||
|
||||
# Deeply nested attributes can be updated without affecting others
|
||||
testUpdateManyAttrsByPathDeep = {
|
||||
expr = updateManyAttrsByPath [
|
||||
{
|
||||
path = [ "a" "b" "c" ];
|
||||
update = old: old + 1;
|
||||
}
|
||||
] {
|
||||
a.b.c = 0;
|
||||
|
||||
a.b.z = 0;
|
||||
a.y.z = 0;
|
||||
x.y.z = 0;
|
||||
};
|
||||
expected = {
|
||||
a.b.c = 1;
|
||||
|
||||
a.b.z = 0;
|
||||
a.y.z = 0;
|
||||
x.y.z = 0;
|
||||
};
|
||||
};
|
||||
|
||||
# Nested attributes are updated first
|
||||
testUpdateManyAttrsByPathNestedBeforehand = {
|
||||
expr = updateManyAttrsByPath [
|
||||
{
|
||||
path = [ "a" ];
|
||||
update = old: old // { x = old.b; };
|
||||
}
|
||||
{
|
||||
path = [ "a" "b" ];
|
||||
update = old: old + 1;
|
||||
}
|
||||
] {
|
||||
a.b = 0;
|
||||
};
|
||||
expected = {
|
||||
a.b = 1;
|
||||
a.x = 1;
|
||||
};
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -62,6 +62,13 @@ checkConfigError() {
|
|||
checkConfigOutput '^false$' config.enable ./declare-enable.nix
|
||||
checkConfigError 'The option .* does not exist. Definition values:\n\s*- In .*: true' config.enable ./define-enable.nix
|
||||
|
||||
checkConfigOutput '^1$' config.bare-submodule.nested ./declare-bare-submodule.nix ./declare-bare-submodule-nested-option.nix
|
||||
checkConfigOutput '^2$' config.bare-submodule.deep ./declare-bare-submodule.nix ./declare-bare-submodule-deep-option.nix
|
||||
checkConfigOutput '^42$' config.bare-submodule.nested ./declare-bare-submodule.nix ./declare-bare-submodule-nested-option.nix ./declare-bare-submodule-deep-option.nix ./define-bare-submodule-values.nix
|
||||
checkConfigOutput '^420$' config.bare-submodule.deep ./declare-bare-submodule.nix ./declare-bare-submodule-nested-option.nix ./declare-bare-submodule-deep-option.nix ./define-bare-submodule-values.nix
|
||||
checkConfigOutput '^2$' config.bare-submodule.deep ./declare-bare-submodule.nix ./declare-bare-submodule-deep-option.nix ./define-shorthandOnlyDefinesConfig-true.nix
|
||||
checkConfigError 'The option .bare-submodule.deep. in .*/declare-bare-submodule-deep-option.nix. is already declared in .*/declare-bare-submodule-deep-option-duplicate.nix' config.bare-submodule.deep ./declare-bare-submodule.nix ./declare-bare-submodule-deep-option.nix ./declare-bare-submodule-deep-option-duplicate.nix
|
||||
|
||||
# Check integer types.
|
||||
# unsigned
|
||||
checkConfigOutput '^42$' config.value ./declare-int-unsigned-value.nix ./define-value-int-positive.nix
|
||||
|
@ -304,6 +311,12 @@ checkConfigOutput "10" config.processedToplevel ./raw.nix
|
|||
checkConfigError "The option .multiple. is defined multiple times" config.multiple ./raw.nix
|
||||
checkConfigOutput "bar" config.priorities ./raw.nix
|
||||
|
||||
## Option collision
|
||||
checkConfigError \
|
||||
'The option .set. in module .*/declare-set.nix. would be a parent of the following options, but its type .attribute set of signed integers. does not support nested options.\n\s*- option[(]s[)] with prefix .set.enable. in module .*/declare-enable-nested.nix.' \
|
||||
config.set \
|
||||
./declare-set.nix ./declare-enable-nested.nix
|
||||
|
||||
# Test that types.optionType merges types correctly
|
||||
checkConfigOutput '^10$' config.theOption.int ./optionTypeMerging.nix
|
||||
checkConfigOutput '^"hello"$' config.theOption.str ./optionTypeMerging.nix
|
||||
|
@ -311,6 +324,9 @@ checkConfigOutput '^"hello"$' config.theOption.str ./optionTypeMerging.nix
|
|||
# Test that types.optionType correctly annotates option locations
|
||||
checkConfigError 'The option .theOption.nested. in .other.nix. is already declared in .optionTypeFile.nix.' config.theOption.nested ./optionTypeFile.nix
|
||||
|
||||
# Test that types.optionType leaves types untouched as long as they don't need to be merged
|
||||
checkConfigOutput 'ok' config.freeformItems.foo.bar ./adhoc-freeformType-survives-type-merge.nix
|
||||
|
||||
cat <<EOF
|
||||
====== module tests ======
|
||||
$pass Pass
|
||||
|
|
14
lib/tests/modules/adhoc-freeformType-survives-type-merge.nix
Normal file
14
lib/tests/modules/adhoc-freeformType-survives-type-merge.nix
Normal file
|
@ -0,0 +1,14 @@
|
|||
{ lib, ... }: {
|
||||
options.dummy = lib.mkOption { type = lib.types.anything; default = {}; };
|
||||
freeformType =
|
||||
let
|
||||
a = lib.types.attrsOf (lib.types.submodule { options.bar = lib.mkOption { }; });
|
||||
in
|
||||
# modifying types like this breaks type merging.
|
||||
# This test makes sure that type merging is not performed when only a single declaration exists.
|
||||
# Don't modify types in practice!
|
||||
a // {
|
||||
merge = loc: defs: { freeformItems = a.merge loc defs; };
|
||||
};
|
||||
config.foo.bar = "ok";
|
||||
}
|
|
@ -0,0 +1,10 @@
|
|||
{ lib, ... }:
|
||||
let
|
||||
inherit (lib) mkOption types;
|
||||
in
|
||||
{
|
||||
options.bare-submodule.deep = mkOption {
|
||||
type = types.int;
|
||||
default = 2;
|
||||
};
|
||||
}
|
10
lib/tests/modules/declare-bare-submodule-deep-option.nix
Normal file
10
lib/tests/modules/declare-bare-submodule-deep-option.nix
Normal file
|
@ -0,0 +1,10 @@
|
|||
{ lib, ... }:
|
||||
let
|
||||
inherit (lib) mkOption types;
|
||||
in
|
||||
{
|
||||
options.bare-submodule.deep = mkOption {
|
||||
type = types.int;
|
||||
default = 2;
|
||||
};
|
||||
}
|
19
lib/tests/modules/declare-bare-submodule-nested-option.nix
Normal file
19
lib/tests/modules/declare-bare-submodule-nested-option.nix
Normal file
|
@ -0,0 +1,19 @@
|
|||
{ config, lib, ... }:
|
||||
let
|
||||
inherit (lib) mkOption types;
|
||||
in
|
||||
{
|
||||
options.bare-submodule = mkOption {
|
||||
type = types.submoduleWith {
|
||||
shorthandOnlyDefinesConfig = config.shorthandOnlyDefinesConfig;
|
||||
modules = [
|
||||
{
|
||||
options.nested = mkOption {
|
||||
type = types.int;
|
||||
default = 1;
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
18
lib/tests/modules/declare-bare-submodule.nix
Normal file
18
lib/tests/modules/declare-bare-submodule.nix
Normal file
|
@ -0,0 +1,18 @@
|
|||
{ config, lib, ... }:
|
||||
let
|
||||
inherit (lib) mkOption types;
|
||||
in
|
||||
{
|
||||
options.bare-submodule = mkOption {
|
||||
type = types.submoduleWith {
|
||||
modules = [ ];
|
||||
shorthandOnlyDefinesConfig = config.shorthandOnlyDefinesConfig;
|
||||
};
|
||||
default = {};
|
||||
};
|
||||
|
||||
# config-dependent options: won't recommend, but useful for making this test parameterized
|
||||
options.shorthandOnlyDefinesConfig = mkOption {
|
||||
default = false;
|
||||
};
|
||||
}
|
12
lib/tests/modules/declare-set.nix
Normal file
12
lib/tests/modules/declare-set.nix
Normal file
|
@ -0,0 +1,12 @@
|
|||
{ lib, ... }:
|
||||
|
||||
{
|
||||
options.set = lib.mkOption {
|
||||
default = { };
|
||||
example = { a = 1; };
|
||||
type = lib.types.attrsOf lib.types.int;
|
||||
description = ''
|
||||
Some descriptive text
|
||||
'';
|
||||
};
|
||||
}
|
4
lib/tests/modules/define-bare-submodule-values.nix
Normal file
4
lib/tests/modules/define-bare-submodule-values.nix
Normal file
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
bare-submodule.nested = 42;
|
||||
bare-submodule.deep = 420;
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
{ shorthandOnlyDefinesConfig = true; }
|
|
@ -17,7 +17,7 @@ with lib.systems.doubles; lib.runTests {
|
|||
|
||||
testarm = mseteq arm [ "armv5tel-linux" "armv6l-linux" "armv6l-netbsd" "armv6l-none" "armv7a-linux" "armv7a-netbsd" "armv7l-linux" "armv7l-netbsd" "arm-none" "armv7a-darwin" ];
|
||||
testi686 = mseteq i686 [ "i686-linux" "i686-freebsd" "i686-genode" "i686-netbsd" "i686-openbsd" "i686-cygwin" "i686-windows" "i686-none" "i686-darwin" ];
|
||||
testmips = mseteq mips [ "mipsel-linux" "mipsel-netbsd" ];
|
||||
testmips = mseteq mips [ "mips64el-linux" "mipsel-linux" "mipsel-netbsd" ];
|
||||
testmmix = mseteq mmix [ "mmix-mmixware" ];
|
||||
testx86_64 = mseteq x86_64 [ "x86_64-linux" "x86_64-darwin" "x86_64-freebsd" "x86_64-genode" "x86_64-redox" "x86_64-openbsd" "x86_64-netbsd" "x86_64-cygwin" "x86_64-solaris" "x86_64-windows" "x86_64-none" ];
|
||||
|
||||
|
@ -28,7 +28,7 @@ with lib.systems.doubles; lib.runTests {
|
|||
testredox = mseteq redox [ "x86_64-redox" ];
|
||||
testgnu = mseteq gnu (linux /* ++ kfreebsd ++ ... */);
|
||||
testillumos = mseteq illumos [ "x86_64-solaris" ];
|
||||
testlinux = mseteq linux [ "aarch64-linux" "armv5tel-linux" "armv6l-linux" "armv7a-linux" "armv7l-linux" "i686-linux" "mipsel-linux" "riscv32-linux" "riscv64-linux" "x86_64-linux" "powerpc64-linux" "powerpc64le-linux" "m68k-linux" "s390-linux" "s390x-linux" ];
|
||||
testlinux = mseteq linux [ "aarch64-linux" "armv5tel-linux" "armv6l-linux" "armv7a-linux" "armv7l-linux" "i686-linux" "mips64el-linux" "mipsel-linux" "riscv32-linux" "riscv64-linux" "x86_64-linux" "powerpc64-linux" "powerpc64le-linux" "m68k-linux" "s390-linux" "s390x-linux" ];
|
||||
testnetbsd = mseteq netbsd [ "aarch64-netbsd" "armv6l-netbsd" "armv7a-netbsd" "armv7l-netbsd" "i686-netbsd" "m68k-netbsd" "mipsel-netbsd" "powerpc-netbsd" "riscv32-netbsd" "riscv64-netbsd" "x86_64-netbsd" ];
|
||||
testopenbsd = mseteq openbsd [ "i686-openbsd" "x86_64-openbsd" ];
|
||||
testwindows = mseteq windows [ "i686-cygwin" "x86_64-cygwin" "i686-windows" "x86_64-windows" ];
|
||||
|
|
|
@ -166,6 +166,30 @@ rec {
|
|||
/* Returns the current nixpkgs release number as string. */
|
||||
release = lib.strings.fileContents ../.version;
|
||||
|
||||
/* The latest release that is supported, at the time of release branch-off,
|
||||
if applicable.
|
||||
|
||||
Ideally, out-of-tree modules should be able to evaluate cleanly with all
|
||||
supported Nixpkgs versions (master, release and old release until EOL).
|
||||
So if possible, deprecation warnings should take effect only when all
|
||||
out-of-tree expressions/libs/modules can upgrade to the new way without
|
||||
losing support for supported Nixpkgs versions.
|
||||
|
||||
This release number allows deprecation warnings to be implemented such that
|
||||
they take effect as soon as the oldest release reaches end of life. */
|
||||
oldestSupportedRelease =
|
||||
# Update on master only. Do not backport.
|
||||
2111;
|
||||
|
||||
/* Whether a feature is supported in all supported releases (at the time of
|
||||
release branch-off, if applicable). See `oldestSupportedRelease`. */
|
||||
isInOldestRelease =
|
||||
/* Release number of feature introduction as an integer, e.g. 2111 for 21.11.
|
||||
Set it to the upcoming release, matching the nixpkgs/.version file.
|
||||
*/
|
||||
release:
|
||||
release <= lib.trivial.oldestSupportedRelease;
|
||||
|
||||
/* Returns the current nixpkgs release code name.
|
||||
|
||||
On each release the first letter is bumped and a new animal is chosen
|
||||
|
|
|
@ -368,13 +368,21 @@ rec {
|
|||
emptyValue = { value = {}; };
|
||||
};
|
||||
|
||||
# derivation is a reserved keyword.
|
||||
# A package is a top-level store path (/nix/store/hash-name). This includes:
|
||||
# - derivations
|
||||
# - more generally, attribute sets with an `outPath` or `__toString` attribute
|
||||
# pointing to a store path, e.g. flake inputs
|
||||
# - strings with context, e.g. "${pkgs.foo}" or (toString pkgs.foo)
|
||||
# - hardcoded store path literals (/nix/store/hash-foo) or strings without context
|
||||
# ("/nix/store/hash-foo"). These get a context added to them using builtins.storePath.
|
||||
package = mkOptionType {
|
||||
name = "package";
|
||||
check = x: isDerivation x || isStorePath x;
|
||||
merge = loc: defs:
|
||||
let res = mergeOneOption loc defs;
|
||||
in if isDerivation res then res else toDerivation res;
|
||||
in if builtins.isPath res || (builtins.isString res && ! builtins.hasContext res)
|
||||
then toDerivation res
|
||||
else res;
|
||||
};
|
||||
|
||||
shellPackage = package // {
|
||||
|
@ -535,7 +543,9 @@ rec {
|
|||
description = "optionType";
|
||||
check = value: value._type or null == "option-type";
|
||||
merge = loc: defs:
|
||||
let
|
||||
if length defs == 1
|
||||
then (head defs).value
|
||||
else let
|
||||
# Prepares the type definitions for mergeOptionDecls, which
|
||||
# annotates submodules types with file locations
|
||||
optionModules = map ({ value, file }:
|
||||
|
@ -562,14 +572,18 @@ rec {
|
|||
let
|
||||
inherit (lib.modules) evalModules;
|
||||
|
||||
coerce = unify: value: if isFunction value
|
||||
then setFunctionArgs (args: unify (value args)) (functionArgs value)
|
||||
else unify (if shorthandOnlyDefinesConfig then { config = value; } else value);
|
||||
shorthandToModule = if shorthandOnlyDefinesConfig == false
|
||||
then value: value
|
||||
else value: { config = value; };
|
||||
|
||||
allModules = defs: imap1 (n: { value, file }:
|
||||
if isAttrs value || isFunction value then
|
||||
# Annotate the value with the location of its definition for better error messages
|
||||
coerce (lib.modules.unifyModuleSyntax file "${toString file}-${toString n}") value
|
||||
if isFunction value
|
||||
then setFunctionArgs
|
||||
(args: lib.modules.unifyModuleSyntax file "${toString file}-${toString n}" (value args))
|
||||
(functionArgs value)
|
||||
else if isAttrs value
|
||||
then
|
||||
lib.modules.unifyModuleSyntax file "${toString file}-${toString n}" (shorthandToModule value)
|
||||
else value
|
||||
) defs;
|
||||
|
||||
|
@ -637,7 +651,11 @@ rec {
|
|||
then lhs.specialArgs // rhs.specialArgs
|
||||
else throw "A submoduleWith option is declared multiple times with the same specialArgs \"${toString (attrNames intersecting)}\"";
|
||||
shorthandOnlyDefinesConfig =
|
||||
if lhs.shorthandOnlyDefinesConfig == rhs.shorthandOnlyDefinesConfig
|
||||
if lhs.shorthandOnlyDefinesConfig == null
|
||||
then rhs.shorthandOnlyDefinesConfig
|
||||
else if rhs.shorthandOnlyDefinesConfig == null
|
||||
then lhs.shorthandOnlyDefinesConfig
|
||||
else if lhs.shorthandOnlyDefinesConfig == rhs.shorthandOnlyDefinesConfig
|
||||
then lhs.shorthandOnlyDefinesConfig
|
||||
else throw "A submoduleWith option is declared multiple times with conflicting shorthandOnlyDefinesConfig values";
|
||||
};
|
||||
|
|
|
@ -1301,6 +1301,12 @@
|
|||
githubId = 75235;
|
||||
name = "Michael Walker";
|
||||
};
|
||||
bartsch = {
|
||||
email = "consume.noise@gmail.com";
|
||||
github = "bartsch";
|
||||
githubId = 3390885;
|
||||
name = "Daniel Martin";
|
||||
};
|
||||
bartuka = {
|
||||
email = "wand@hey.com";
|
||||
github = "wandersoncferreira";
|
||||
|
@ -1682,6 +1688,12 @@
|
|||
githubId = 355401;
|
||||
name = "Brian Hicks";
|
||||
};
|
||||
brianmcgee = {
|
||||
name = "Brian McGee";
|
||||
email = "brian@41north.dev";
|
||||
github = "brianmcgee";
|
||||
githubId = 1173648;
|
||||
};
|
||||
Br1ght0ne = {
|
||||
email = "brightone@protonmail.com";
|
||||
github = "Br1ght0ne";
|
||||
|
@ -1889,6 +1901,12 @@
|
|||
githubId = 82591;
|
||||
name = "Carl Sverre";
|
||||
};
|
||||
carpinchomug = {
|
||||
email = "aki.suda@protonmail.com";
|
||||
github = "carpinchomug";
|
||||
githubId = 101536256;
|
||||
name = "Akiyoshi Suda";
|
||||
};
|
||||
cartr = {
|
||||
email = "carter.sande@duodecima.technology";
|
||||
github = "cartr";
|
||||
|
@ -2175,6 +2193,12 @@
|
|||
githubId = 42220376;
|
||||
name = "Charlotte Van Petegem";
|
||||
};
|
||||
cigrainger = {
|
||||
name = "Christopher Grainger";
|
||||
email = "chris@amplified.ai";
|
||||
github = "cigrainger";
|
||||
githubId = 3984794;
|
||||
};
|
||||
ciil = {
|
||||
email = "simon@lackerbauer.com";
|
||||
github = "ciil";
|
||||
|
@ -2218,7 +2242,7 @@
|
|||
ckie = {
|
||||
email = "nixpkgs-0efe364@ckie.dev";
|
||||
github = "ckiee";
|
||||
githubId = 2526321;
|
||||
githubId = 25263210;
|
||||
keys = [{
|
||||
longkeyid = "rsa4096/0x13E79449C0525215";
|
||||
fingerprint = "539F 0655 4D35 38A5 429A E253 13E7 9449 C052 5215";
|
||||
|
@ -5619,6 +5643,12 @@
|
|||
github = "jduan";
|
||||
githubId = 452450;
|
||||
};
|
||||
jdupak = {
|
||||
name = "Jakub Dupak";
|
||||
email = "dev@jakubdupak.com";
|
||||
github = "jdupak";
|
||||
githubId = 22683640;
|
||||
};
|
||||
jecaro = {
|
||||
email = "jeancharles.quillet@gmail.com";
|
||||
github = "jecaro";
|
||||
|
@ -7211,6 +7241,18 @@
|
|||
githubId = 1267527;
|
||||
name = "Daniel Firth";
|
||||
};
|
||||
lodi = {
|
||||
email = "anthony.lodi@gmail.com";
|
||||
github = "lodi";
|
||||
githubId = 918448;
|
||||
name = "Anthony Lodi";
|
||||
};
|
||||
loicreynier = {
|
||||
email = "loic@loireynier.fr";
|
||||
github = "loicreynier";
|
||||
githubId = 88983487;
|
||||
name = "Loïc Reynier";
|
||||
};
|
||||
lopsided98 = {
|
||||
email = "benwolsieffer@gmail.com";
|
||||
github = "lopsided98";
|
||||
|
@ -7701,6 +7743,16 @@
|
|||
githubId = 279868;
|
||||
name = "Matti Kariluoma";
|
||||
};
|
||||
matthewpi = {
|
||||
email = "me+nix@matthewp.io";
|
||||
github = "matthewpi";
|
||||
githubId = 26559841;
|
||||
name = "Matthew Penner";
|
||||
keys = [{
|
||||
longkeyid = "ed25519/0x31311906AD4CF6D6";
|
||||
fingerprint = "5118 F1CC B7B0 6C17 4DD1 5267 3131 1906 AD4C F6D6";
|
||||
}];
|
||||
};
|
||||
maurer = {
|
||||
email = "matthew.r.maurer+nix@gmail.com";
|
||||
github = "maurer";
|
||||
|
@ -8406,6 +8458,17 @@
|
|||
githubId = 3073833;
|
||||
name = "Massimo Redaelli";
|
||||
};
|
||||
mrhedgehog = {
|
||||
name = "Mr Hedgehog";
|
||||
email = "hedgehog@mrhedgehog.xyz";
|
||||
matrix = "@mrhedgehog:jupiterbroadcasting.com";
|
||||
github = "ModdedGamers";
|
||||
githubId = 35778371;
|
||||
keys = [{
|
||||
longkeyid = "rsa4096/0x7D5107866B1C6752";
|
||||
fingerprint = "38A0 29B0 4A7E 4C13 A4BB 86C8 7D51 0786 6B1C 6752";
|
||||
}];
|
||||
};
|
||||
mrkkrp = {
|
||||
email = "markkarpov92@gmail.com";
|
||||
github = "mrkkrp";
|
||||
|
@ -8659,6 +8722,12 @@
|
|||
fingerprint = "4BFF 0614 03A2 47F0 AA0B 4BC4 916D 8B67 2418 92AE";
|
||||
}];
|
||||
};
|
||||
nbr = {
|
||||
email = "nbr@users.noreply.github.com";
|
||||
github = "nbr";
|
||||
githubId = 3819225;
|
||||
name = "Nick Braga";
|
||||
};
|
||||
nbren12 = {
|
||||
email = "nbren12@gmail.com";
|
||||
github = "nbren12";
|
||||
|
@ -9233,6 +9302,12 @@
|
|||
githubId = 23431373;
|
||||
name = "Christoph Neidahl";
|
||||
};
|
||||
opeik = {
|
||||
email = "sandro@stikic.com";
|
||||
github = "opeik";
|
||||
githubId = 11566773;
|
||||
name = "Sandro Stikić";
|
||||
};
|
||||
orbekk = {
|
||||
email = "kjetil.orbekk@gmail.com";
|
||||
github = "orbekk";
|
||||
|
@ -9391,6 +9466,13 @@
|
|||
githubId = 71795;
|
||||
name = "Mica Semrick";
|
||||
};
|
||||
papojari = {
|
||||
email = "papojari-git.ovoid@aleeas.com";
|
||||
matrix = "@papojari:artemislena.eu";
|
||||
github = "papojari";
|
||||
githubId = 81317317;
|
||||
name = "papojari";
|
||||
};
|
||||
paraseba = {
|
||||
email = "paraseba@gmail.com";
|
||||
github = "paraseba";
|
||||
|
@ -10310,6 +10392,12 @@
|
|||
githubId = 22803888;
|
||||
name = "Lu Hongxu";
|
||||
};
|
||||
rgnns = {
|
||||
email = "jglievano@gmail.com";
|
||||
github = "rgnns";
|
||||
githubId = 811827;
|
||||
name = "Gabriel Lievano";
|
||||
};
|
||||
rgrunbla = {
|
||||
email = "remy@grunblatt.org";
|
||||
github = "rgrunbla";
|
||||
|
@ -11636,6 +11724,17 @@
|
|||
githubId = 55607356;
|
||||
name = "Stephan Heßelmann";
|
||||
};
|
||||
steinybot = {
|
||||
name = "Jason Pickens";
|
||||
email = "jasonpickensnz@gmail.com";
|
||||
matrix = "@steinybot:matrix.org";
|
||||
github = "steinybot";
|
||||
githubId = 4659562;
|
||||
keys = [{
|
||||
longkeyid = "ed25519/0x21DE1CAE59762A0F";
|
||||
fingerprint = "2709 1DEC CC42 4635 4299 569C 21DE 1CAE 5976 2A0F";
|
||||
}];
|
||||
};
|
||||
stelcodes = {
|
||||
email = "stel@stel.codes";
|
||||
github = "stelcodes";
|
||||
|
@ -14031,6 +14130,12 @@
|
|||
github = "jpagex";
|
||||
githubId = 635768;
|
||||
};
|
||||
vbrandl = {
|
||||
name = "Valentin Brandl";
|
||||
email = "mail+nixpkgs@vbrandl.net";
|
||||
github = "vbrandl";
|
||||
githubId = 20639051;
|
||||
};
|
||||
portothree = {
|
||||
name = "Gustavo Porto";
|
||||
email = "gustavoporto@ya.ru";
|
||||
|
|
|
@ -20,7 +20,7 @@ HACKAGE2NIX="${HACKAGE2NIX:-hackage2nix}"
|
|||
# See: https://github.com/NixOS/nixpkgs/pull/122023
|
||||
export LC_ALL=C.UTF-8
|
||||
|
||||
extraction_derivation='with import ./. {}; runCommand "unpacked-cabal-hashes" { } "tar xf ${all-cabal-hashes} --strip-components=1 --one-top-level=$out"'
|
||||
extraction_derivation='with import ./. {}; runCommandLocal "unpacked-cabal-hashes" { } "tar xf ${all-cabal-hashes} --strip-components=1 --one-top-level=$out"'
|
||||
unpacked_hackage="$(nix-build -E "$extraction_derivation" --no-out-link)"
|
||||
config_dir=pkgs/development/haskell-modules/configuration-hackage2nix
|
||||
|
||||
|
|
|
@ -27,9 +27,10 @@ The function `mkOption` accepts the following arguments.
|
|||
|
||||
`type`
|
||||
|
||||
: The type of the option (see [](#sec-option-types)). It may be
|
||||
omitted, but that's not advisable since it may lead to errors that
|
||||
are hard to diagnose.
|
||||
: The type of the option (see [](#sec-option-types)). This
|
||||
argument is mandatory for nixpkgs modules. Setting this is highly
|
||||
recommended for the sake of documentation and type checking. In case it is
|
||||
not set, a fallback type with unspecified behavior is used.
|
||||
|
||||
`default`
|
||||
|
||||
|
|
|
@ -22,7 +22,8 @@ merging is handled.
|
|||
|
||||
`types.package`
|
||||
|
||||
: A derivation or a store path.
|
||||
: A top-level store path. This can be an attribute set pointing
|
||||
to a store path, like a derivation or a flake input.
|
||||
|
||||
`types.anything`
|
||||
|
||||
|
|
|
@ -17,7 +17,8 @@ checks:
|
|||
them and comparing their contents. If they are different but only
|
||||
`X-Reload-Triggers` in the `[Unit]` section is changed, **reload** the unit.
|
||||
The NixOS module system allows setting these triggers with the option
|
||||
[systemd.services.\<name\>.reloadTriggers](#opt-systemd.services). If the
|
||||
[systemd.services.\<name\>.reloadTriggers](#opt-systemd.services). There are
|
||||
some additional keys in the `[Unit]` section that are ignored as well. If the
|
||||
unit files differ in any way, the following actions are performed:
|
||||
|
||||
- `.path` and `.slice` units are ignored. There is no need to restart them
|
||||
|
@ -33,6 +34,9 @@ checks:
|
|||
- The rest of the units (mostly `.service` units) are then **reload**ed if
|
||||
`X-ReloadIfChanged` in the `[Service]` section is set to `true` (exposed
|
||||
via [systemd.services.\<name\>.reloadIfChanged](#opt-systemd.services)).
|
||||
A little exception is done for units that were deactivated in the meantime,
|
||||
for example because they require a unit that got stopped before. These
|
||||
are **start**ed instead of reloaded.
|
||||
|
||||
- If the reload flag is not set, some more flags decide if the unit is
|
||||
skipped. These flags are `X-RestartIfChanged` in the `[Service]` section
|
||||
|
|
|
@ -90,6 +90,17 @@ modules: `systemd.services` (the set of all systemd services) and
|
|||
`systemd.timers` (the list of commands to be executed periodically by
|
||||
`systemd`).
|
||||
|
||||
Care must be taken when writing systemd services using `Exec*` directives. By
|
||||
default systemd performs substitution on `%<char>` specifiers in these
|
||||
directives, expands environment variables from `$FOO` and `${FOO}`, splits
|
||||
arguments on whitespace, and splits commands on `;`. All of these must be escaped
|
||||
to avoid unexpected substitution or splitting when interpolating into an `Exec*`
|
||||
directive, e.g. when using an `extraArgs` option to pass additional arguments to
|
||||
the service. The functions `utils.escapeSystemdExecArg` and
|
||||
`utils.escapeSystemdExecArgs` are provided for this, see [Example: Escaping in
|
||||
Exec directives](#exec-escaping-example) for an example. When using these
|
||||
functions system environment substitution should *not* be disabled explicitly.
|
||||
|
||||
::: {#locate-example .example}
|
||||
::: {.title}
|
||||
**Example: NixOS Module for the "locate" Service**
|
||||
|
@ -153,6 +164,37 @@ in {
|
|||
```
|
||||
:::
|
||||
|
||||
::: {#exec-escaping-example .example}
|
||||
::: {.title}
|
||||
**Example: Escaping in Exec directives**
|
||||
:::
|
||||
```nix
|
||||
{ config, lib, pkgs, utils, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.echo;
|
||||
echoAll = pkgs.writeScript "echo-all" ''
|
||||
#! ${pkgs.runtimeShell}
|
||||
for s in "$@"; do
|
||||
printf '%s\n' "$s"
|
||||
done
|
||||
'';
|
||||
args = [ "a%Nything" "lang=\${LANG}" ";" "/bin/sh -c date" ];
|
||||
in {
|
||||
systemd.services.echo =
|
||||
{ description = "Echo to the journal";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig.Type = "oneshot";
|
||||
serviceConfig.ExecStart = ''
|
||||
${echoAll} ${utils.escapeSystemdExecArgs args}
|
||||
'';
|
||||
};
|
||||
}
|
||||
```
|
||||
:::
|
||||
|
||||
```{=docbook}
|
||||
<xi:include href="option-declarations.section.xml" />
|
||||
<xi:include href="option-types.section.xml" />
|
||||
|
|
|
@ -38,9 +38,11 @@ options = {
|
|||
<listitem>
|
||||
<para>
|
||||
The type of the option (see
|
||||
<xref linkend="sec-option-types" />). It may be omitted, but
|
||||
that’s not advisable since it may lead to errors that are hard
|
||||
to diagnose.
|
||||
<xref linkend="sec-option-types" />). This argument is
|
||||
mandatory for nixpkgs modules. Setting this is highly
|
||||
recommended for the sake of documentation and type checking.
|
||||
In case it is not set, a fallback type with unspecified
|
||||
behavior is used.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
|
|
@ -43,7 +43,9 @@
|
|||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
A derivation or a store path.
|
||||
A top-level store path. This can be an attribute set
|
||||
pointing to a store path, like a derivation or a flake
|
||||
input.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
|
|
@ -38,8 +38,9 @@
|
|||
<emphasis role="strong">reload</emphasis> the unit. The NixOS
|
||||
module system allows setting these triggers with the option
|
||||
<link linkend="opt-systemd.services">systemd.services.<name>.reloadTriggers</link>.
|
||||
If the unit files differ in any way, the following actions are
|
||||
performed:
|
||||
There are some additional keys in the <literal>[Unit]</literal>
|
||||
section that are ignored as well. If the unit files differ in
|
||||
any way, the following actions are performed:
|
||||
</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
|
@ -71,6 +72,11 @@
|
|||
<literal>[Service]</literal> section is set to
|
||||
<literal>true</literal> (exposed via
|
||||
<link linkend="opt-systemd.services">systemd.services.<name>.reloadIfChanged</link>).
|
||||
A little exception is done for units that were deactivated
|
||||
in the meantime, for example because they require a unit
|
||||
that got stopped before. These are
|
||||
<emphasis role="strong">start</emphasis>ed instead of
|
||||
reloaded.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
|
|
|
@ -122,6 +122,25 @@
|
|||
services) and <literal>systemd.timers</literal> (the list of
|
||||
commands to be executed periodically by <literal>systemd</literal>).
|
||||
</para>
|
||||
<para>
|
||||
Care must be taken when writing systemd services using
|
||||
<literal>Exec*</literal> directives. By default systemd performs
|
||||
substitution on <literal>%<char></literal> specifiers in these
|
||||
directives, expands environment variables from
|
||||
<literal>$FOO</literal> and <literal>${FOO}</literal>, splits
|
||||
arguments on whitespace, and splits commands on
|
||||
<literal>;</literal>. All of these must be escaped to avoid
|
||||
unexpected substitution or splitting when interpolating into an
|
||||
<literal>Exec*</literal> directive, e.g. when using an
|
||||
<literal>extraArgs</literal> option to pass additional arguments to
|
||||
the service. The functions
|
||||
<literal>utils.escapeSystemdExecArg</literal> and
|
||||
<literal>utils.escapeSystemdExecArgs</literal> are provided for
|
||||
this, see <link linkend="exec-escaping-example">Example: Escaping in
|
||||
Exec directives</link> for an example. When using these functions
|
||||
system environment substitution should <emphasis>not</emphasis> be
|
||||
disabled explicitly.
|
||||
</para>
|
||||
<anchor xml:id="locate-example" />
|
||||
<para>
|
||||
<emphasis role="strong">Example: NixOS Module for the
|
||||
|
@ -183,6 +202,36 @@ in {
|
|||
};
|
||||
};
|
||||
}
|
||||
</programlisting>
|
||||
<anchor xml:id="exec-escaping-example" />
|
||||
<para>
|
||||
<emphasis role="strong">Example: Escaping in Exec
|
||||
directives</emphasis>
|
||||
</para>
|
||||
<programlisting language="bash">
|
||||
{ config, lib, pkgs, utils, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.echo;
|
||||
echoAll = pkgs.writeScript "echo-all" ''
|
||||
#! ${pkgs.runtimeShell}
|
||||
for s in "$@"; do
|
||||
printf '%s\n' "$s"
|
||||
done
|
||||
'';
|
||||
args = [ "a%Nything" "lang=\${LANG}" ";" "/bin/sh -c date" ];
|
||||
in {
|
||||
systemd.services.echo =
|
||||
{ description = "Echo to the journal";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig.Type = "oneshot";
|
||||
serviceConfig.ExecStart = ''
|
||||
${echoAll} ${utils.escapeSystemdExecArgs args}
|
||||
'';
|
||||
};
|
||||
}
|
||||
</programlisting>
|
||||
<xi:include href="option-declarations.section.xml" />
|
||||
<xi:include href="option-types.section.xml" />
|
||||
|
|
|
@ -35,7 +35,17 @@
|
|||
This means, <literal>ip[6]tables</literal>,
|
||||
<literal>arptables</literal> and <literal>ebtables</literal>
|
||||
commands will actually show rules from some specific tables in
|
||||
the <literal>nf_tables</literal> kernel subsystem.
|
||||
the <literal>nf_tables</literal> kernel subsystem. In case
|
||||
you’re migrating from an older release without rebooting,
|
||||
there might be cases where you end up with iptable rules
|
||||
configured both in the legacy <literal>iptables</literal>
|
||||
kernel backend, as well as in the <literal>nf_tables</literal>
|
||||
backend. This can lead to confusing firewall behaviour. An
|
||||
<literal>iptables-save</literal> after switching will complain
|
||||
about <quote>iptables-legacy tables present</quote>. It’s
|
||||
probably best to reboot after the upgrade, or manually
|
||||
removing all legacy iptables rules (via the
|
||||
<literal>iptables-legacy</literal> package).
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
|
|
|
@ -62,6 +62,14 @@
|
|||
notes</link> for details.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Module authors can use
|
||||
<literal>mkRenamedOptionModuleWith</literal> to automate the
|
||||
deprecation cycle without annoying out-of-tree module authors
|
||||
and their users.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
<section xml:id="sec-release-22.05-new-services">
|
||||
|
@ -187,6 +195,14 @@
|
|||
<link xlink:href="options.html#opt-services.mtr-exporter.enable">services.mtr-exporter</link>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<link xlink:href="https://github.com/prometheus-pve/prometheus-pve-exporter">prometheus-pve-exporter</link>,
|
||||
a tool that exposes information from the Proxmox VE API for
|
||||
use by Prometheus. Available as
|
||||
<link xlink:href="options.html#opt-services.prometheus.exporters.pve">services.prometheus.exporters.pve</link>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<link xlink:href="https://tetrd.app">tetrd</link>, share your
|
||||
|
@ -241,6 +257,17 @@
|
|||
<link linkend="opt-services.prosody-filer.enable">services.prosody-filer</link>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<link xlink:href="https://github.com/rfjakob/systembus-notify">systembus-notify</link>,
|
||||
allow system level notifications to reach the users. Available
|
||||
as
|
||||
<link xlink:href="opt-services.systembus-notify.enable">services.systembus-notify</link>.
|
||||
Please keep in mind that this service should only be enabled
|
||||
on machines with fully trusted users, as any local user is
|
||||
able to DoS user sessions by spamming notifications.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<link xlink:href="https://github.com/audreyt/ethercalc">ethercalc</link>,
|
||||
|
@ -248,6 +275,13 @@
|
|||
<link xlink:href="options.html#opt-services.ethercalc.enable">services.ethercalc</link>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<link xlink:href="https://nbd.sourceforge.io/">nbd</link>, a
|
||||
Network Block Device server. Available as
|
||||
<link xlink:href="options.html#opt-services.nbd.server.enable">services.nbd</link>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<link xlink:href="https://timetagger.app">timetagger</link>,
|
||||
|
@ -280,6 +314,12 @@
|
|||
with many features.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<link xlink:href="https://clusterlabs.org/pacemaker/">pacemaker</link>
|
||||
cluster resource manager
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
<section xml:id="sec-release-22.05-incompatibilities">
|
||||
|
@ -378,6 +418,24 @@
|
|||
in your configuration.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>fonts.fonts</literal> no longer includes ancient
|
||||
bitmap fonts when both
|
||||
<literal>config.services.xserver.enable</literal> and
|
||||
<literal>config.nixpkgs.config.allowUnfree</literal> are
|
||||
enabled. If you still want these fonts, use:
|
||||
</para>
|
||||
<programlisting language="bash">
|
||||
{
|
||||
fonts.fonts = [
|
||||
pkgs.xorg.fontbhlucidatypewriter100dpi
|
||||
pkgs.xorg.fontbhlucidatypewriter75dpi
|
||||
pkgs.xorg.fontbh100dpi
|
||||
];
|
||||
}
|
||||
</programlisting>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The DHCP server (<literal>services.dhcpd4</literal>,
|
||||
|
@ -417,6 +475,12 @@
|
|||
still supported, because you can set arbitrary values in this
|
||||
freeform type.
|
||||
</para>
|
||||
<para>
|
||||
The <literal>listeners.*.bind_address</literal> option was
|
||||
renamed to <literal>bind_addresses</literal> in order to match
|
||||
the upstream <literal>homeserver.yaml</literal> option name.
|
||||
It is now also a list of strings instead of a string.
|
||||
</para>
|
||||
<para>
|
||||
An example to make the required migration clearer:
|
||||
</para>
|
||||
|
@ -478,7 +542,7 @@
|
|||
|
||||
listeners = [ {
|
||||
port = 8448;
|
||||
bind_address = [
|
||||
bind_addresses = [
|
||||
"::"
|
||||
"0.0.0.0"
|
||||
];
|
||||
|
@ -509,7 +573,14 @@
|
|||
Additionally a few option defaults have been synced up with
|
||||
upstream default values, for example the
|
||||
<literal>max_upload_size</literal> grew from
|
||||
<literal>10M</literal> to <literal>50M</literal>.
|
||||
<literal>10M</literal> to <literal>50M</literal>. For the same
|
||||
reason, the default <literal>media_store_path</literal> was
|
||||
changed from <literal>${dataDir}/media</literal> to
|
||||
<literal>${dataDir}/media_store</literal> if
|
||||
<literal>system.stateVersion</literal> is at least
|
||||
<literal>22.05</literal>. Files will need to be manually moved
|
||||
to the new location if the <literal>stateVersion</literal> is
|
||||
updated.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
|
@ -519,6 +590,25 @@
|
|||
because Python 2 is being retired from nixpkgs.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Services in the <literal>hadoop</literal> module previously
|
||||
set <literal>openFirewall</literal> to true by default. This
|
||||
has now been changed to false. Node definitions for multi-node
|
||||
clusters would need <literal>openFirewall = true;</literal> to
|
||||
be added to to hadoop services when upgrading from NixOS
|
||||
21.11.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>services.hadoop.yarn.nodemanager</literal> now uses
|
||||
cgroup-based CPU limit enforcement by default. Additionally,
|
||||
the option <literal>useCGroups</literal> was added to
|
||||
nodemanagers as an easy way to switch back to the old
|
||||
behavior.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>wafHook</literal> hook now honors
|
||||
|
@ -691,6 +781,12 @@
|
|||
unmaintained
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>pkgs._7zz</literal> is now correctly licensed as
|
||||
LGPL3+ and BSD3 with optional unfree unRAR licensed code
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>tilp2</literal> was removed together with its module
|
||||
|
@ -748,6 +844,16 @@
|
|||
<literal>systemd.nspawn.<name>.execConfig.PrivateUsers = false</literal>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The Tor SOCKS proxy is now actually disabled if
|
||||
<literal>services.tor.client.enable</literal> is set to
|
||||
<literal>false</literal> (the default). If you are using this
|
||||
functionality but didn’t change the setting or set it to
|
||||
<literal>false</literal>, you now need to set it to
|
||||
<literal>true</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The terraform 0.12 compatibility has been removed and the
|
||||
|
@ -811,6 +917,58 @@
|
|||
include serif fonts.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>pkgs.epgstation</literal> has been upgraded from v1
|
||||
to v2, resulting in incompatible changes in the database
|
||||
scheme and configuration format.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Some top-level settings under
|
||||
<link linkend="opt-services.epgstation.enable">services.epgstation</link>
|
||||
is now deprecated because it was redudant due to the same
|
||||
options being present in
|
||||
<link linkend="opt-services.epgstation.settings">services.epgstation.settings</link>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The option <literal>services.epgstation.basicAuth</literal>
|
||||
was removed because basic authentication support was dropped
|
||||
by upstream.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The option
|
||||
<link linkend="opt-services.epgstation.database.passwordFile">services.epgstation.database.passwordFile</link>
|
||||
no longer has a default value. Make sure to set this option
|
||||
explicitly before upgrading. Change the database password if
|
||||
necessary.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The
|
||||
<link linkend="opt-services.epgstation.settings">services.epgstation.settings</link>
|
||||
option now expects options for <literal>config.yml</literal>
|
||||
in EPGStation v2.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Existing data for the
|
||||
<link linkend="opt-services.epgstation.enable">services.epgstation</link>
|
||||
module would have to be backed up prior to the upgrade. To
|
||||
back up exising data to
|
||||
<literal>/tmp/epgstation.bak</literal>, run
|
||||
<literal>sudo -u epgstation epgstation run backup /tmp/epgstation.bak</literal>.
|
||||
To import that data after to the upgrade, run
|
||||
<literal>sudo -u epgstation epgstation run v1migrate /tmp/epgstation.bak</literal>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>switch-to-configuration</literal> (the script that is
|
||||
|
@ -1050,7 +1208,8 @@
|
|||
Legacy options have been mapped to the corresponding
|
||||
options under under
|
||||
<link xlink:href="options.html#opt-nix.settings">nix.settings</link>
|
||||
but may be deprecated in the future.
|
||||
and will be deprecated when NixOS 21.11 reaches end of
|
||||
life.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
|
@ -1071,6 +1230,33 @@
|
|||
using the PyPy interpreter were added.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Some improvements have been made to the
|
||||
<literal>hadoop</literal> module:
|
||||
</para>
|
||||
<itemizedlist spacing="compact">
|
||||
<listitem>
|
||||
<para>
|
||||
A <literal>gatewayRole</literal> option has been added,
|
||||
for deploying hadoop cluster configuration files to a node
|
||||
that does not have any active services
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Support for older versions of hadoop have been added to
|
||||
the module
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Overriding and extending site XML files has been made
|
||||
easier
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
If you are using Wayland you can choose to use the Ozone
|
||||
|
@ -1094,6 +1280,14 @@
|
|||
compatibilty, but will be removed at a later date.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>unifi</literal> package was switched from
|
||||
<literal>unifi6</literal> to <literal>unifi7</literal>. Direct
|
||||
downgrades from Unifi 7 to Unifi 6 are not possible and
|
||||
require restoring from a backup made by Unifi 6.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>programs.zsh.autosuggestions.strategy</literal> now
|
||||
|
@ -1171,10 +1365,10 @@
|
|||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
A new option
|
||||
<literal>boot.initrd.extraModprobeConfig</literal> has been
|
||||
added which can be used to configure kernel modules that are
|
||||
loaded in the initrd.
|
||||
The options <literal>boot.extraModprobeConfig</literal> and
|
||||
<literal>boot.blacklistedKernelModules</literal> now also take
|
||||
effect in the initrd by copying the file
|
||||
<literal>/etc/modprobe.d/nixos.conf</literal> into the initrd.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
|
@ -1184,6 +1378,52 @@
|
|||
instead of <literal>configuration.nix</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
ORY Kratos was updated to version 0.8.3-alpha.1.pre.0, which
|
||||
introduces some breaking changes:
|
||||
</para>
|
||||
<itemizedlist spacing="compact">
|
||||
<listitem>
|
||||
<para>
|
||||
If you are relying on the SQLite images, update your
|
||||
Docker Pull commands as follows:
|
||||
</para>
|
||||
<itemizedlist spacing="compact">
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>docker pull oryd/kratos:{version}</literal>
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Additionally, all passwords now have to be at least 8
|
||||
characters long.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
For more details, see:
|
||||
</para>
|
||||
<itemizedlist spacing="compact">
|
||||
<listitem>
|
||||
<para>
|
||||
<link xlink:href="https://github.com/ory/kratos/releases/tag/v0.8.1-alpha.1">Release
|
||||
Notes for v0.8.1-alpha-1</link>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<link xlink:href="https://github.com/ory/kratos/releases/tag/v0.8.2-alpha.1">Release
|
||||
Notes for v0.8.2-alpha-1</link>
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>fetchFromSourcehut</literal> now allows fetching
|
||||
|
@ -1212,6 +1452,15 @@
|
|||
<literal>pkgs.theLoungePlugins.themes</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The option
|
||||
<literal>services.xserver.videoDriver = [ "nvidia" ];</literal>
|
||||
will now also install
|
||||
<link xlink:href="https://github.com/elFarto/nvidia-vaapi-driver">nvidia
|
||||
VA-API drivers</link> by default.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>firmwareLinuxNonfree</literal> package has been
|
||||
|
@ -1307,6 +1556,16 @@
|
|||
warning.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>pomerium-cli</literal> command has been moved out
|
||||
of the <literal>pomerium</literal> package into the
|
||||
<literal>pomerium-cli</literal> package, following upstream’s
|
||||
repository split. If you are using the
|
||||
<literal>pomerium-cli</literal> command, you should now
|
||||
install the <literal>pomerium-cli</literal> package.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The option
|
||||
|
@ -1335,6 +1594,46 @@
|
|||
desktop environments as needed.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>hadoop</literal> package has added support for
|
||||
<literal>aarch64-linux</literal> and
|
||||
<literal>aarch64-darwin</literal> as of 3.3.1
|
||||
(<link xlink:href="https://github.com/NixOS/nixpkgs/pull/158613">#158613</link>).
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>R</literal> package now builds again on
|
||||
<literal>aarch64-darwin</literal>
|
||||
(<link xlink:href="https://github.com/NixOS/nixpkgs/pull/158992">#158992</link>).
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>spark3</literal> package has been updated from
|
||||
3.1.2 to 3.2.1
|
||||
(<link xlink:href="https://github.com/NixOS/nixpkgs/pull/160075">#160075</link>):
|
||||
</para>
|
||||
<itemizedlist spacing="compact">
|
||||
<listitem>
|
||||
<para>
|
||||
Testing has been enabled for
|
||||
<literal>aarch64-linux</literal> in addition to
|
||||
<literal>x86_64-linux</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>spark3</literal> package is now usable on
|
||||
<literal>aarch64-darwin</literal> as a result of
|
||||
<link xlink:href="https://github.com/NixOS/nixpkgs/pull/158613">#158613</link>
|
||||
and
|
||||
<link xlink:href="https://github.com/NixOS/nixpkgs/pull/158992">#158992</link>.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
</section>
|
||||
|
|
|
@ -13,6 +13,13 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
[Fedora](https://fedoraproject.org/wiki/Changes/iptables-nft-default).
|
||||
This means, `ip[6]tables`, `arptables` and `ebtables` commands will actually
|
||||
show rules from some specific tables in the `nf_tables` kernel subsystem.
|
||||
In case you're migrating from an older release without rebooting, there might
|
||||
be cases where you end up with iptable rules configured both in the legacy
|
||||
`iptables` kernel backend, as well as in the `nf_tables` backend.
|
||||
This can lead to confusing firewall behaviour. An `iptables-save` after
|
||||
switching will complain about "iptables-legacy tables present".
|
||||
It's probably best to reboot after the upgrade, or manually removing all
|
||||
legacy iptables rules (via the `iptables-legacy` package).
|
||||
|
||||
- systemd got an `nftables` backend, and configures (networkd) rules in their
|
||||
own `io.systemd.*` tables. Check `nft list ruleset` to see these rules, not
|
||||
|
|
|
@ -21,6 +21,8 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
|
||||
- [`kops`](https://kops.sigs.k8s.io) defaults to 1.22.4, which will enable [Instance Metadata Service Version 2](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html) and require tokens on new clusters with Kubernetes 1.22. This will increase security by default, but may break some types of workloads. See the [release notes](https://kops.sigs.k8s.io/releases/1.22-notes/) for details.
|
||||
|
||||
- Module authors can use `mkRenamedOptionModuleWith` to automate the deprecation cycle without annoying out-of-tree module authors and their users.
|
||||
|
||||
## New Services {#sec-release-22.05-new-services}
|
||||
|
||||
- [aesmd](https://github.com/intel/linux-sgx#install-the-intelr-sgx-psw), the Intel SGX Architectural Enclave Service Manager. Available as [services.aesmd](#opt-services.aesmd.enable).
|
||||
|
@ -55,6 +57,8 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
|
||||
- [mtr-exporter](https://github.com/mgumz/mtr-exporter), a Prometheus exporter for mtr metrics. Available as [services.mtr-exporter](options.html#opt-services.mtr-exporter.enable).
|
||||
|
||||
- [prometheus-pve-exporter](https://github.com/prometheus-pve/prometheus-pve-exporter), a tool that exposes information from the Proxmox VE API for use by Prometheus. Available as [services.prometheus.exporters.pve](options.html#opt-services.prometheus.exporters.pve).
|
||||
|
||||
- [tetrd](https://tetrd.app), share your internet connection from your device to your PC and vice versa through a USB cable. Available at [services.tetrd](#opt-services.tetrd.enable).
|
||||
|
||||
- [agate](https://github.com/mbrubeck/agate), a very simple server for the Gemini hypertext protocol. Available as [services.agate](options.html#opt-services.agate.enable).
|
||||
|
@ -70,9 +74,13 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
|
||||
- [prosody-filer](https://github.com/ThomasLeister/prosody-filer), a server for handling XMPP HTTP Upload requests. Available at [services.prosody-filer](#opt-services.prosody-filer.enable).
|
||||
|
||||
- [systembus-notify](https://github.com/rfjakob/systembus-notify), allow system level notifications to reach the users. Available as [services.systembus-notify](opt-services.systembus-notify.enable). Please keep in mind that this service should only be enabled on machines with fully trusted users, as any local user is able to DoS user sessions by spamming notifications.
|
||||
|
||||
- [ethercalc](https://github.com/audreyt/ethercalc), an online collaborative
|
||||
spreadsheet. Available as [services.ethercalc](options.html#opt-services.ethercalc.enable).
|
||||
|
||||
- [nbd](https://nbd.sourceforge.io/), a Network Block Device server. Available as [services.nbd](options.html#opt-services.nbd.server.enable).
|
||||
|
||||
- [timetagger](https://timetagger.app), an open source time-tracker with an intuitive user experience and powerful reporting. [services.timetagger](options.html#opt-services.timetagger.enable).
|
||||
|
||||
- [rstudio-server](https://www.rstudio.com/products/rstudio/#rstudio-server), a browser-based version of the RStudio IDE for the R programming language. Available as [services.rstudio-server](options.html#opt-services.rstudio-server.enable).
|
||||
|
@ -81,6 +89,8 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
|
||||
- [blocky](https://0xerr0r.github.io/blocky/), fast and lightweight DNS proxy as ad-blocker for local network with many features.
|
||||
|
||||
- [pacemaker](https://clusterlabs.org/pacemaker/) cluster resource manager
|
||||
|
||||
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
|
||||
|
||||
## Backward Incompatibilities {#sec-release-22.05-incompatibilities}
|
||||
|
@ -125,6 +135,19 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
This change may require a reboot to take effect, and k3s may not be able to run if the boot cgroup hierarchy does not match its configuration.
|
||||
The previous behavior may be retained by explicitly setting `systemd.enableUnifiedCgroupHierarchy = false` in your configuration.
|
||||
|
||||
- `fonts.fonts` no longer includes ancient bitmap fonts when both `config.services.xserver.enable` and `config.nixpkgs.config.allowUnfree` are enabled.
|
||||
If you still want these fonts, use:
|
||||
|
||||
```nix
|
||||
{
|
||||
fonts.fonts = [
|
||||
pkgs.xorg.fontbhlucidatypewriter100dpi
|
||||
pkgs.xorg.fontbhlucidatypewriter75dpi
|
||||
pkgs.xorg.fontbh100dpi
|
||||
];
|
||||
}
|
||||
```
|
||||
|
||||
- The DHCP server (`services.dhcpd4`, `services.dhcpd6`) has been hardened.
|
||||
The service is now using the systemd's `DynamicUser` mechanism to run as an unprivileged dynamically-allocated user with limited capabilities.
|
||||
The dhcpd state files are now always stored in `/var/lib/dhcpd{4,6}` and the `services.dhcpd4.stateDir` and `service.dhcpd6.stateDir` options have been removed.
|
||||
|
@ -137,6 +160,9 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
module (`services.matrix-synapse`) now need to be moved into `services.matrix-synapse.settings`. And while not all options you
|
||||
may use are defined in there, they are still supported, because you can set arbitrary values in this freeform type.
|
||||
|
||||
The `listeners.*.bind_address` option was renamed to `bind_addresses` in order to match the upstream `homeserver.yaml` option
|
||||
name. It is now also a list of strings instead of a string.
|
||||
|
||||
An example to make the required migration clearer:
|
||||
|
||||
Before:
|
||||
|
@ -194,7 +220,7 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
|
||||
listeners = [ {
|
||||
port = 8448;
|
||||
bind_address = [
|
||||
bind_addresses = [
|
||||
"::"
|
||||
"0.0.0.0"
|
||||
];
|
||||
|
@ -219,10 +245,20 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
|
||||
The secrets in your original config should be migrated into a YAML file that is included via `extraConfigFiles`.
|
||||
|
||||
Additionally a few option defaults have been synced up with upstream default values, for example the `max_upload_size` grew from `10M` to `50M`.
|
||||
Additionally a few option defaults have been synced up with upstream default values, for example the `max_upload_size` grew from `10M` to `50M`. For the same reason, the default
|
||||
`media_store_path` was changed from `${dataDir}/media` to `${dataDir}/media_store` if `system.stateVersion` is at least `22.05`. Files will need to be manually moved to the new
|
||||
location if the `stateVersion` is updated.
|
||||
|
||||
- The MoinMoin wiki engine (`services.moinmoin`) has been removed, because Python 2 is being retired from nixpkgs.
|
||||
|
||||
- Services in the `hadoop` module previously set `openFirewall` to true by default.
|
||||
This has now been changed to false. Node definitions for multi-node clusters would need
|
||||
`openFirewall = true;` to be added to to hadoop services when upgrading from NixOS 21.11.
|
||||
|
||||
- `services.hadoop.yarn.nodemanager` now uses cgroup-based CPU limit enforcement by default.
|
||||
Additionally, the option `useCGroups` was added to nodemanagers as an easy way to switch
|
||||
back to the old behavior.
|
||||
|
||||
- The `wafHook` hook now honors `NIX_BUILD_CORES` when `enableParallelBuilding` is not set explicitly. Packages can restore the old behaviour by setting `enableParallelBuilding=false`.
|
||||
|
||||
- `pkgs.claws-mail-gtk2`, representing Claws Mail's older release version three, was removed in order to get rid of Python 2.
|
||||
|
@ -275,6 +311,8 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
|
||||
- `pkgs.docbookrx` was removed since it's unmaintained
|
||||
|
||||
- `pkgs._7zz` is now correctly licensed as LGPL3+ and BSD3 with optional unfree unRAR licensed code
|
||||
|
||||
- `tilp2` was removed together with its module
|
||||
|
||||
- The F-PROT antivirus (`fprot` package) and its service module were removed because it
|
||||
|
@ -288,6 +326,8 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
|
||||
- `systemd-nspawn@.service` settings have been reverted to the default systemd behaviour. User namespaces are now activated by default. If you want to keep running nspawn containers without user namespaces you need to set `systemd.nspawn.<name>.execConfig.PrivateUsers = false`
|
||||
|
||||
- The Tor SOCKS proxy is now actually disabled if `services.tor.client.enable` is set to `false` (the default). If you are using this functionality but didn't change the setting or set it to `false`, you now need to set it to `true`.
|
||||
|
||||
- The terraform 0.12 compatibility has been removed and the `terraform.withPlugins` and `terraform-providers.mkProvider` implementations simplified. Providers now need to be stored under
|
||||
`$out/libexec/terraform-providers/<registry>/<owner>/<name>/<version>/<os>_<arch>/terraform-provider-<name>_v<version>` (which mkProvider does).
|
||||
|
||||
|
@ -317,6 +357,30 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
`pkgs.noto-fonts-cjk` is currently an alias of `pkgs.noto-fonts-cjk-sans` and
|
||||
doesn't include serif fonts.
|
||||
|
||||
- `pkgs.epgstation` has been upgraded from v1 to v2, resulting in incompatible
|
||||
changes in the database scheme and configuration format.
|
||||
|
||||
- Some top-level settings under [services.epgstation](#opt-services.epgstation.enable)
|
||||
is now deprecated because it was redudant due to the same options being
|
||||
present in [services.epgstation.settings](#opt-services.epgstation.settings).
|
||||
|
||||
- The option `services.epgstation.basicAuth` was removed because basic
|
||||
authentication support was dropped by upstream.
|
||||
|
||||
- The option [services.epgstation.database.passwordFile](#opt-services.epgstation.database.passwordFile)
|
||||
no longer has a default value. Make sure to set this option explicitly before
|
||||
upgrading. Change the database password if necessary.
|
||||
|
||||
- The [services.epgstation.settings](#opt-services.epgstation.settings)
|
||||
option now expects options for `config.yml` in EPGStation v2.
|
||||
|
||||
- Existing data for the [services.epgstation](#opt-services.epgstation.enable)
|
||||
module would have to be backed up prior to the upgrade. To back up exising
|
||||
data to `/tmp/epgstation.bak`, run
|
||||
`sudo -u epgstation epgstation run backup /tmp/epgstation.bak`.
|
||||
To import that data after to the upgrade, run
|
||||
`sudo -u epgstation epgstation run v1migrate /tmp/epgstation.bak`
|
||||
|
||||
- `switch-to-configuration` (the script that is run when running `nixos-rebuild switch` for example) has been reworked
|
||||
* The interface that allows activation scripts to restart units has been streamlined. Restarting and reloading is now done by a single file `/run/nixos/activation-restart-list` that honors `restartIfChanged` and `reloadIfChanged` of the units.
|
||||
* Preferring to reload instead of restarting can still be achieved using `/run/nixos/activation-reload-list`.
|
||||
|
@ -386,11 +450,16 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
Similarly [virtualisation.vmVariantWithBootloader](#opt-virtualisation.vmVariantWithBootLoader) was added.
|
||||
|
||||
- The configuration portion of the `nix-daemon` module has been reworked and exposed as [nix.settings](options.html#opt-nix-settings):
|
||||
* Legacy options have been mapped to the corresponding options under under [nix.settings](options.html#opt-nix.settings) but may be deprecated in the future.
|
||||
* Legacy options have been mapped to the corresponding options under under [nix.settings](options.html#opt-nix.settings) and will be deprecated when NixOS 21.11 reaches end of life.
|
||||
* [nix.buildMachines.publicHostKey](options.html#opt-nix.buildMachines.publicHostKey) has been added.
|
||||
|
||||
- The `writers.writePyPy2`/`writers.writePyPy3` and corresponding `writers.writePyPy2Bin`/`writers.writePyPy3Bin` convenience functions to create executable Python 2/3 scripts using the PyPy interpreter were added.
|
||||
|
||||
- Some improvements have been made to the `hadoop` module:
|
||||
- A `gatewayRole` option has been added, for deploying hadoop cluster configuration files to a node that does not have any active services
|
||||
- Support for older versions of hadoop have been added to the module
|
||||
- Overriding and extending site XML files has been made easier
|
||||
|
||||
- If you are using Wayland you can choose to use the Ozone Wayland support
|
||||
in Chrome and several Electron apps by setting the environment variable
|
||||
`NIXOS_OZONE_WL=1` (for example via
|
||||
|
@ -404,6 +473,9 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
combined `influxdb2` package is still provided in this release for
|
||||
backwards compatibilty, but will be removed at a later date.
|
||||
|
||||
- The `unifi` package was switched from `unifi6` to `unifi7`.
|
||||
Direct downgrades from Unifi 7 to Unifi 6 are not possible and require restoring from a backup made by Unifi 6.
|
||||
|
||||
- `programs.zsh.autosuggestions.strategy` now takes a list of strings instead of a string.
|
||||
|
||||
- The `services.unifi.openPorts` option default value of `true` is now deprecated and will be changed to `false` in 22.11.
|
||||
|
@ -433,10 +505,18 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
|
||||
- The option `services.duplicati.dataDir` has been added to allow changing the location of duplicati's files.
|
||||
|
||||
- A new option `boot.initrd.extraModprobeConfig` has been added which can be used to configure kernel modules that are loaded in the initrd.
|
||||
- The options `boot.extraModprobeConfig` and `boot.blacklistedKernelModules` now also take effect in the initrd by copying the file `/etc/modprobe.d/nixos.conf` into the initrd.
|
||||
|
||||
- `nixos-generate-config` now puts the dhcp configuration in `hardware-configuration.nix` instead of `configuration.nix`.
|
||||
|
||||
- ORY Kratos was updated to version 0.8.3-alpha.1.pre.0, which introduces some breaking changes:
|
||||
- If you are relying on the SQLite images, update your Docker Pull commands as follows:
|
||||
- `docker pull oryd/kratos:{version}`
|
||||
- Additionally, all passwords now have to be at least 8 characters long.
|
||||
- For more details, see:
|
||||
- [Release Notes for v0.8.1-alpha-1](https://github.com/ory/kratos/releases/tag/v0.8.1-alpha.1)
|
||||
- [Release Notes for v0.8.2-alpha-1](https://github.com/ory/kratos/releases/tag/v0.8.2-alpha.1)
|
||||
|
||||
- `fetchFromSourcehut` now allows fetching repositories recursively
|
||||
using `fetchgit` or `fetchhg` if the argument `fetchSubmodules`
|
||||
is set to `true`.
|
||||
|
@ -447,6 +527,8 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
|
||||
- The option `services.thelounge.plugins` has been added to allow installing plugins for The Lounge. Plugins can be found in `pkgs.theLoungePlugins.plugins` and `pkgs.theLoungePlugins.themes`.
|
||||
|
||||
- The option `services.xserver.videoDriver = [ "nvidia" ];` will now also install [nvidia VA-API drivers](https://github.com/elFarto/nvidia-vaapi-driver) by default.
|
||||
|
||||
- The `firmwareLinuxNonfree` package has been renamed to `linux-firmware`.
|
||||
|
||||
- It is now possible to specify wordlists to include as handy to access environment variables using the `config.environment.wordlist` configuration options.
|
||||
|
@ -475,6 +557,11 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
Reason is that the old name has been deprecated upstream.
|
||||
Using the old option name will still work, but produce a warning.
|
||||
|
||||
- The `pomerium-cli` command has been moved out of the `pomerium` package into
|
||||
the `pomerium-cli` package, following upstream's repository split. If you are
|
||||
using the `pomerium-cli` command, you should now install the `pomerium-cli`
|
||||
package.
|
||||
|
||||
- The option
|
||||
[services.networking.networkmanager.enableFccUnlock](#opt-networking.networkmanager.enableFccUnlock)
|
||||
was added to support FCC unlock procedures. Since release 1.18.4, the ModemManager
|
||||
|
@ -485,4 +572,13 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
|
||||
- The polkit service, available at `security.polkit.enable`, is now disabled by default. It will automatically be enabled through services and desktop environments as needed.
|
||||
|
||||
- The `hadoop` package has added support for `aarch64-linux` and `aarch64-darwin` as of 3.3.1 ([#158613](https://github.com/NixOS/nixpkgs/pull/158613)).
|
||||
|
||||
- The `R` package now builds again on `aarch64-darwin` ([#158992](https://github.com/NixOS/nixpkgs/pull/158992)).
|
||||
|
||||
- The `spark3` package has been updated from 3.1.2 to 3.2.1 ([#160075](https://github.com/NixOS/nixpkgs/pull/160075)):
|
||||
|
||||
- Testing has been enabled for `aarch64-linux` in addition to `x86_64-linux`.
|
||||
- The `spark3` package is now usable on `aarch64-darwin` as a result of [#158613](https://github.com/NixOS/nixpkgs/pull/158613) and [#158992](https://github.com/NixOS/nixpkgs/pull/158992).
|
||||
|
||||
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
|
||||
|
|
|
@ -66,14 +66,21 @@ for (k, v) in overrides.items():
|
|||
elif ov is not None or cur.get(ok, None) is None:
|
||||
cur[ok] = ov
|
||||
|
||||
severity = "error" if warningsAreErrors else "warning"
|
||||
|
||||
# check that every option has a description
|
||||
hasWarnings = False
|
||||
for (k, v) in options.items():
|
||||
if v.value.get('description', None) is None:
|
||||
severity = "error" if warningsAreErrors else "warning"
|
||||
hasWarnings = True
|
||||
print(f"\x1b[1;31m{severity}: option {v.name} has no description\x1b[0m", file=sys.stderr)
|
||||
v.value['description'] = "This option has no description."
|
||||
if v.value.get('type', "unspecified") == "unspecified":
|
||||
hasWarnings = True
|
||||
print(
|
||||
f"\x1b[1;31m{severity}: option {v.name} has no type. Please specify a valid type, see " +
|
||||
"https://nixos.org/manual/nixos/stable/index.html#sec-option-types\x1b[0m", file=sys.stderr)
|
||||
|
||||
if hasWarnings and warningsAreErrors:
|
||||
print(
|
||||
"\x1b[1;31m" +
|
||||
|
|
|
@ -5,6 +5,7 @@ with lib;
|
|||
let
|
||||
cfg = config.systemd;
|
||||
lndir = "${pkgs.buildPackages.xorg.lndir}/bin/lndir";
|
||||
systemd = cfg.package;
|
||||
in rec {
|
||||
|
||||
shellEscape = s: (replaceChars [ "\\" ] [ "\\\\" ] s);
|
||||
|
@ -235,4 +236,205 @@ in rec {
|
|||
''}
|
||||
''; # */
|
||||
|
||||
makeJobScript = name: text:
|
||||
let
|
||||
scriptName = replaceChars [ "\\" "@" ] [ "-" "_" ] (shellEscape name);
|
||||
out = (pkgs.writeShellScriptBin scriptName ''
|
||||
set -e
|
||||
${text}
|
||||
'').overrideAttrs (_: {
|
||||
# The derivation name is different from the script file name
|
||||
# to keep the script file name short to avoid cluttering logs.
|
||||
name = "unit-script-${scriptName}";
|
||||
});
|
||||
in "${out}/bin/${scriptName}";
|
||||
|
||||
unitConfig = { config, options, ... }: {
|
||||
config = {
|
||||
unitConfig =
|
||||
optionalAttrs (config.requires != [])
|
||||
{ Requires = toString config.requires; }
|
||||
// optionalAttrs (config.wants != [])
|
||||
{ Wants = toString config.wants; }
|
||||
// optionalAttrs (config.after != [])
|
||||
{ After = toString config.after; }
|
||||
// optionalAttrs (config.before != [])
|
||||
{ Before = toString config.before; }
|
||||
// optionalAttrs (config.bindsTo != [])
|
||||
{ BindsTo = toString config.bindsTo; }
|
||||
// optionalAttrs (config.partOf != [])
|
||||
{ PartOf = toString config.partOf; }
|
||||
// optionalAttrs (config.conflicts != [])
|
||||
{ Conflicts = toString config.conflicts; }
|
||||
// optionalAttrs (config.requisite != [])
|
||||
{ Requisite = toString config.requisite; }
|
||||
// optionalAttrs (config.restartTriggers != [])
|
||||
{ X-Restart-Triggers = toString config.restartTriggers; }
|
||||
// optionalAttrs (config.reloadTriggers != [])
|
||||
{ X-Reload-Triggers = toString config.reloadTriggers; }
|
||||
// optionalAttrs (config.description != "") {
|
||||
Description = config.description; }
|
||||
// optionalAttrs (config.documentation != []) {
|
||||
Documentation = toString config.documentation; }
|
||||
// optionalAttrs (config.onFailure != []) {
|
||||
OnFailure = toString config.onFailure; }
|
||||
// optionalAttrs (options.startLimitIntervalSec.isDefined) {
|
||||
StartLimitIntervalSec = toString config.startLimitIntervalSec;
|
||||
} // optionalAttrs (options.startLimitBurst.isDefined) {
|
||||
StartLimitBurst = toString config.startLimitBurst;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
serviceConfig = { name, config, ... }: {
|
||||
config = mkMerge
|
||||
[ { # Default path for systemd services. Should be quite minimal.
|
||||
path = mkAfter
|
||||
[ pkgs.coreutils
|
||||
pkgs.findutils
|
||||
pkgs.gnugrep
|
||||
pkgs.gnused
|
||||
systemd
|
||||
];
|
||||
environment.PATH = "${makeBinPath config.path}:${makeSearchPathOutput "bin" "sbin" config.path}";
|
||||
}
|
||||
(mkIf (config.preStart != "")
|
||||
{ serviceConfig.ExecStartPre =
|
||||
[ (makeJobScript "${name}-pre-start" config.preStart) ];
|
||||
})
|
||||
(mkIf (config.script != "")
|
||||
{ serviceConfig.ExecStart =
|
||||
makeJobScript "${name}-start" config.script + " " + config.scriptArgs;
|
||||
})
|
||||
(mkIf (config.postStart != "")
|
||||
{ serviceConfig.ExecStartPost =
|
||||
[ (makeJobScript "${name}-post-start" config.postStart) ];
|
||||
})
|
||||
(mkIf (config.reload != "")
|
||||
{ serviceConfig.ExecReload =
|
||||
makeJobScript "${name}-reload" config.reload;
|
||||
})
|
||||
(mkIf (config.preStop != "")
|
||||
{ serviceConfig.ExecStop =
|
||||
makeJobScript "${name}-pre-stop" config.preStop;
|
||||
})
|
||||
(mkIf (config.postStop != "")
|
||||
{ serviceConfig.ExecStopPost =
|
||||
makeJobScript "${name}-post-stop" config.postStop;
|
||||
})
|
||||
];
|
||||
};
|
||||
|
||||
mountConfig = { config, ... }: {
|
||||
config = {
|
||||
mountConfig =
|
||||
{ What = config.what;
|
||||
Where = config.where;
|
||||
} // optionalAttrs (config.type != "") {
|
||||
Type = config.type;
|
||||
} // optionalAttrs (config.options != "") {
|
||||
Options = config.options;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
automountConfig = { config, ... }: {
|
||||
config = {
|
||||
automountConfig =
|
||||
{ Where = config.where;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
commonUnitText = def: ''
|
||||
[Unit]
|
||||
${attrsToSection def.unitConfig}
|
||||
'';
|
||||
|
||||
targetToUnit = name: def:
|
||||
{ inherit (def) aliases wantedBy requiredBy enable;
|
||||
text =
|
||||
''
|
||||
[Unit]
|
||||
${attrsToSection def.unitConfig}
|
||||
'';
|
||||
};
|
||||
|
||||
serviceToUnit = name: def:
|
||||
{ inherit (def) aliases wantedBy requiredBy enable;
|
||||
text = commonUnitText def +
|
||||
''
|
||||
[Service]
|
||||
${let env = cfg.globalEnvironment // def.environment;
|
||||
in concatMapStrings (n:
|
||||
let s = optionalString (env.${n} != null)
|
||||
"Environment=${builtins.toJSON "${n}=${env.${n}}"}\n";
|
||||
# systemd max line length is now 1MiB
|
||||
# https://github.com/systemd/systemd/commit/e6dde451a51dc5aaa7f4d98d39b8fe735f73d2af
|
||||
in if stringLength s >= 1048576 then throw "The value of the environment variable ‘${n}’ in systemd service ‘${name}.service’ is too long." else s) (attrNames env)}
|
||||
${if def.reloadIfChanged then ''
|
||||
X-ReloadIfChanged=true
|
||||
'' else if !def.restartIfChanged then ''
|
||||
X-RestartIfChanged=false
|
||||
'' else ""}
|
||||
${optionalString (!def.stopIfChanged) "X-StopIfChanged=false"}
|
||||
${attrsToSection def.serviceConfig}
|
||||
'';
|
||||
};
|
||||
|
||||
socketToUnit = name: def:
|
||||
{ inherit (def) aliases wantedBy requiredBy enable;
|
||||
text = commonUnitText def +
|
||||
''
|
||||
[Socket]
|
||||
${attrsToSection def.socketConfig}
|
||||
${concatStringsSep "\n" (map (s: "ListenStream=${s}") def.listenStreams)}
|
||||
${concatStringsSep "\n" (map (s: "ListenDatagram=${s}") def.listenDatagrams)}
|
||||
'';
|
||||
};
|
||||
|
||||
timerToUnit = name: def:
|
||||
{ inherit (def) aliases wantedBy requiredBy enable;
|
||||
text = commonUnitText def +
|
||||
''
|
||||
[Timer]
|
||||
${attrsToSection def.timerConfig}
|
||||
'';
|
||||
};
|
||||
|
||||
pathToUnit = name: def:
|
||||
{ inherit (def) aliases wantedBy requiredBy enable;
|
||||
text = commonUnitText def +
|
||||
''
|
||||
[Path]
|
||||
${attrsToSection def.pathConfig}
|
||||
'';
|
||||
};
|
||||
|
||||
mountToUnit = name: def:
|
||||
{ inherit (def) aliases wantedBy requiredBy enable;
|
||||
text = commonUnitText def +
|
||||
''
|
||||
[Mount]
|
||||
${attrsToSection def.mountConfig}
|
||||
'';
|
||||
};
|
||||
|
||||
automountToUnit = name: def:
|
||||
{ inherit (def) aliases wantedBy requiredBy enable;
|
||||
text = commonUnitText def +
|
||||
''
|
||||
[Automount]
|
||||
${attrsToSection def.automountConfig}
|
||||
'';
|
||||
};
|
||||
|
||||
sliceToUnit = name: def:
|
||||
{ inherit (def) aliases wantedBy requiredBy enable;
|
||||
text = commonUnitText def +
|
||||
''
|
||||
[Slice]
|
||||
${attrsToSection def.sliceConfig}
|
||||
'';
|
||||
};
|
||||
}
|
||||
|
|
|
@ -45,6 +45,26 @@ rec {
|
|||
replaceChars ["/" "-" " "] ["-" "\\x2d" "\\x20"]
|
||||
(removePrefix "/" s);
|
||||
|
||||
# Quotes an argument for use in Exec* service lines.
|
||||
# systemd accepts "-quoted strings with escape sequences, toJSON produces
|
||||
# a subset of these.
|
||||
# Additionally we escape % to disallow expansion of % specifiers. Any lone ;
|
||||
# in the input will be turned it ";" and thus lose its special meaning.
|
||||
# Every $ is escaped to $$, this makes it unnecessary to disable environment
|
||||
# substitution for the directive.
|
||||
escapeSystemdExecArg = arg:
|
||||
let
|
||||
s = if builtins.isPath arg then "${arg}"
|
||||
else if builtins.isString arg then arg
|
||||
else if builtins.isInt arg || builtins.isFloat arg then toString arg
|
||||
else throw "escapeSystemdExecArg only allows strings, paths and numbers";
|
||||
in
|
||||
replaceChars [ "%" "$" ] [ "%%" "$$" ] (builtins.toJSON s);
|
||||
|
||||
# Quotes a list of arguments into a single string for use in a Exec*
|
||||
# line.
|
||||
escapeSystemdExecArgs = concatMapStringsSep " " escapeSystemdExecArg;
|
||||
|
||||
# Returns a system path for a given shell package
|
||||
toShellPath = shell:
|
||||
if types.shellPackage.check shell then
|
||||
|
|
|
@ -39,11 +39,6 @@ let
|
|||
defaultXFonts =
|
||||
[ (if hasHidpi then fontcursormisc_hidpi else pkgs.xorg.fontcursormisc)
|
||||
pkgs.xorg.fontmiscmisc
|
||||
] ++ optionals (config.nixpkgs.config.allowUnfree or false)
|
||||
[ # these are unfree, and will make usage with xserver fail
|
||||
pkgs.xorg.fontbhlucidatypewriter100dpi
|
||||
pkgs.xorg.fontbhlucidatypewriter75dpi
|
||||
pkgs.xorg.fontbh100dpi
|
||||
];
|
||||
|
||||
in
|
||||
|
|
|
@ -289,8 +289,14 @@ in
|
|||
environment.etc."egl/egl_external_platform.d".source =
|
||||
"/run/opengl-driver/share/egl/egl_external_platform.d/";
|
||||
|
||||
hardware.opengl.extraPackages = [ nvidia_x11.out ];
|
||||
hardware.opengl.extraPackages32 = [ nvidia_x11.lib32 ];
|
||||
hardware.opengl.extraPackages = [
|
||||
nvidia_x11.out
|
||||
pkgs.nvidia-vaapi-driver
|
||||
];
|
||||
hardware.opengl.extraPackages32 = [
|
||||
nvidia_x11.lib32
|
||||
pkgs.pkgsi686Linux.nvidia-vaapi-driver
|
||||
];
|
||||
|
||||
environment.systemPackages = [ nvidia_x11.bin ]
|
||||
++ optionals cfg.nvidiaSettings [ nvidia_x11.settings ]
|
||||
|
|
|
@ -63,32 +63,32 @@ mount --rbind /sys "$mountPoint/sys"
|
|||
|
||||
# modified from https://github.com/archlinux/arch-install-scripts/blob/bb04ab435a5a89cd5e5ee821783477bc80db797f/arch-chroot.in#L26-L52
|
||||
chroot_add_resolv_conf() {
|
||||
local chrootdir=$1 resolv_conf=$1/etc/resolv.conf
|
||||
local chrootDir="$1" resolvConf="$1/etc/resolv.conf"
|
||||
|
||||
[[ -e /etc/resolv.conf ]] || return 0
|
||||
|
||||
# Handle resolv.conf as a symlink to somewhere else.
|
||||
if [[ -L $chrootdir/etc/resolv.conf ]]; then
|
||||
if [[ -L "$resolvConf" ]]; then
|
||||
# readlink(1) should always give us *something* since we know at this point
|
||||
# it's a symlink. For simplicity, ignore the case of nested symlinks.
|
||||
# We also ignore the possibility if `../`s escaping the root.
|
||||
resolv_conf=$(readlink "$chrootdir/etc/resolv.conf")
|
||||
if [[ $resolv_conf = /* ]]; then
|
||||
resolv_conf=$chrootdir$resolv_conf
|
||||
# We also ignore the possibility of `../`s escaping the root.
|
||||
resolvConf="$(readlink "$resolvConf")"
|
||||
if [[ "$resolvConf" = /* ]]; then
|
||||
resolvConf="$chrootDir$resolvConf"
|
||||
else
|
||||
resolv_conf=$chrootdir/etc/$resolv_conf
|
||||
resolvConf="$chrootDir/etc/$resolvConf"
|
||||
fi
|
||||
fi
|
||||
|
||||
# ensure file exists to bind mount over
|
||||
if [[ ! -f $resolv_conf ]]; then
|
||||
install -Dm644 /dev/null "$resolv_conf" || return 1
|
||||
if [[ ! -f "$resolvConf" ]]; then
|
||||
install -Dm644 /dev/null "$resolvConf" || return 1
|
||||
fi
|
||||
|
||||
mount --bind /etc/resolv.conf "$resolv_conf"
|
||||
mount --bind /etc/resolv.conf "$resolvConf"
|
||||
}
|
||||
|
||||
chroot_add_resolv_conf "$mountPoint" || print "ERROR: failed to set up resolv.conf"
|
||||
chroot_add_resolv_conf "$mountPoint" || echo "$0: failed to set up resolv.conf" >&2
|
||||
|
||||
(
|
||||
# If silent, write both stdout and stderr of activation script to /dev/null
|
||||
|
|
|
@ -183,7 +183,11 @@ in
|
|||
|
||||
pruneNames = mkOption {
|
||||
type = listOf str;
|
||||
default = [ ".bzr" ".cache" ".git" ".hg" ".svn" ];
|
||||
default = lib.optionals (!isFindutils) [ ".bzr" ".cache" ".git" ".hg" ".svn" ];
|
||||
defaultText = literalDocBook ''
|
||||
<literal>[ ".bzr" ".cache" ".git" ".hg" ".svn" ]</literal>, if
|
||||
supported by the locate implementation (i.e. mlocate or plocate).
|
||||
'';
|
||||
description = ''
|
||||
Directory components which should exclude paths containing them from indexing
|
||||
'';
|
||||
|
|
|
@ -8,8 +8,12 @@ let
|
|||
concatStringsSep mapAttrsToList toLower
|
||||
literalExpression mkRenamedOptionModule mkDefault mkOption trivial types;
|
||||
|
||||
needsEscaping = s: null != builtins.match "[a-zA-Z0-9]+" s;
|
||||
escapeIfNeccessary = s: if needsEscaping s then s else ''"${lib.escape [ "\$" "\"" "\\" "\`" ] s}"'';
|
||||
attrsToText = attrs:
|
||||
concatStringsSep "\n" (mapAttrsToList (n: v: ''${n}="${toString v}"'') attrs);
|
||||
concatStringsSep "\n" (
|
||||
mapAttrsToList (n: v: ''${n}=${escapeIfNeccessary (toString v)}'') attrs
|
||||
);
|
||||
|
||||
in
|
||||
{
|
||||
|
|
|
@ -180,6 +180,7 @@
|
|||
./programs/msmtp.nix
|
||||
./programs/mtr.nix
|
||||
./programs/nano.nix
|
||||
./programs/nbd.nix
|
||||
./programs/neovim.nix
|
||||
./programs/nm-applet.nix
|
||||
./programs/npm.nix
|
||||
|
@ -301,6 +302,7 @@
|
|||
./services/backup/znapzend.nix
|
||||
./services/blockchain/ethereum/geth.nix
|
||||
./services/backup/zrepl.nix
|
||||
./services/cluster/corosync/default.nix
|
||||
./services/cluster/hadoop/default.nix
|
||||
./services/cluster/k3s/default.nix
|
||||
./services/cluster/kubernetes/addons/dns.nix
|
||||
|
@ -313,6 +315,7 @@
|
|||
./services/cluster/kubernetes/pki.nix
|
||||
./services/cluster/kubernetes/proxy.nix
|
||||
./services/cluster/kubernetes/scheduler.nix
|
||||
./services/cluster/pacemaker/default.nix
|
||||
./services/cluster/spark/default.nix
|
||||
./services/computing/boinc/client.nix
|
||||
./services/computing/foldingathome/client.nix
|
||||
|
@ -819,6 +822,7 @@
|
|||
./services/networking/nar-serve.nix
|
||||
./services/networking/nat.nix
|
||||
./services/networking/nats.nix
|
||||
./services/networking/nbd.nix
|
||||
./services/networking/ndppd.nix
|
||||
./services/networking/nebula.nix
|
||||
./services/networking/networkmanager.nix
|
||||
|
@ -985,6 +989,7 @@
|
|||
./services/system/nscd.nix
|
||||
./services/system/saslauthd.nix
|
||||
./services/system/self-deploy.nix
|
||||
./services/system/systembus-notify.nix
|
||||
./services/system/uptimed.nix
|
||||
./services/torrent/deluge.nix
|
||||
./services/torrent/flexget.nix
|
||||
|
@ -1163,7 +1168,12 @@
|
|||
./system/boot/stage-1.nix
|
||||
./system/boot/stage-2.nix
|
||||
./system/boot/systemd.nix
|
||||
./system/boot/systemd-nspawn.nix
|
||||
./system/boot/systemd/coredump.nix
|
||||
./system/boot/systemd/journald.nix
|
||||
./system/boot/systemd/logind.nix
|
||||
./system/boot/systemd/nspawn.nix
|
||||
./system/boot/systemd/tmpfiles.nix
|
||||
./system/boot/systemd/user.nix
|
||||
./system/boot/timesyncd.nix
|
||||
./system/boot/tmp.nix
|
||||
./system/etc/etc-activation.nix
|
||||
|
|
|
@ -1,8 +1,12 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.programs.captive-browser;
|
||||
|
||||
inherit (lib)
|
||||
concatStringsSep escapeShellArgs optionalString
|
||||
literalExpression mkEnableOption mkIf mkOption mkOptionDefault types;
|
||||
|
||||
browserDefault = chromium: concatStringsSep " " [
|
||||
''env XDG_CONFIG_HOME="$PREV_CONFIG_HOME"''
|
||||
''${chromium}/bin/chromium''
|
||||
|
@ -15,6 +19,15 @@ let
|
|||
''-no-default-browser-check''
|
||||
''http://cache.nixos.org/''
|
||||
];
|
||||
|
||||
desktopItem = pkgs.makeDesktopItem {
|
||||
name = "captive-browser";
|
||||
desktopName = "Captive Portal Browser";
|
||||
exec = "/run/wrappers/bin/captive-browser";
|
||||
icon = "nix-snowflake";
|
||||
categories = [ "Network" ];
|
||||
};
|
||||
|
||||
in
|
||||
{
|
||||
###### interface
|
||||
|
@ -84,6 +97,11 @@ in
|
|||
###### implementation
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
environment.systemPackages = [
|
||||
(pkgs.runCommandNoCC "captive-browser-desktop-item" { } ''
|
||||
install -Dm444 -t $out/share/applications ${desktopItem}/share/applications/*.desktop
|
||||
'')
|
||||
];
|
||||
|
||||
programs.captive-browser.dhcp-dns =
|
||||
let
|
||||
|
|
|
@ -40,13 +40,15 @@ in
|
|||
KDEDIRS = [ "" ];
|
||||
QT_PLUGIN_PATH = [ "/lib/qt4/plugins" "/lib/kde4/plugins" ];
|
||||
QTWEBKIT_PLUGIN_PATH = [ "/lib/mozilla/plugins/" ];
|
||||
GTK_PATH = [ "/lib/gtk-2.0" "/lib/gtk-3.0" ];
|
||||
GTK_PATH = [ "/lib/gtk-2.0" "/lib/gtk-3.0" "/lib/gtk-4.0" ];
|
||||
XDG_CONFIG_DIRS = [ "/etc/xdg" ];
|
||||
XDG_DATA_DIRS = [ "/share" ];
|
||||
MOZ_PLUGIN_PATH = [ "/lib/mozilla/plugins" ];
|
||||
LIBEXEC_PATH = [ "/lib/libexec" ];
|
||||
};
|
||||
|
||||
environment.pathsToLink = [ "/lib/gtk-2.0" "/lib/gtk-3.0" "/lib/gtk-4.0" ];
|
||||
|
||||
environment.extraInit =
|
||||
''
|
||||
unset ASPELL_CONF
|
||||
|
|
19
nixos/modules/programs/nbd.nix
Normal file
19
nixos/modules/programs/nbd.nix
Normal file
|
@ -0,0 +1,19 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.programs.nbd;
|
||||
in
|
||||
{
|
||||
options = {
|
||||
programs.nbd = {
|
||||
enable = mkEnableOption "Network Block Device (nbd) support";
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
environment.systemPackages = with pkgs; [ nbd ];
|
||||
boot.kernelModules = [ "nbd" ];
|
||||
};
|
||||
}
|
|
@ -1,50 +1,46 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
inherit (lib) mkEnableOption mkIf mkOption optionalString types;
|
||||
|
||||
dataDir = "/var/lib/squeezelite";
|
||||
cfg = config.services.squeezelite;
|
||||
pkg = if cfg.pulseAudio then pkgs.squeezelite-pulse else pkgs.squeezelite;
|
||||
bin = "${pkg}/bin/${pkg.pname}";
|
||||
|
||||
in {
|
||||
in
|
||||
{
|
||||
|
||||
###### interface
|
||||
|
||||
options = {
|
||||
options.services.squeezelite = {
|
||||
enable = mkEnableOption "Squeezelite, a software Squeezebox emulator";
|
||||
|
||||
services.squeezelite= {
|
||||
|
||||
enable = mkEnableOption "Squeezelite, a software Squeezebox emulator";
|
||||
|
||||
extraArguments = mkOption {
|
||||
default = "";
|
||||
type = types.str;
|
||||
description = ''
|
||||
Additional command line arguments to pass to Squeezelite.
|
||||
'';
|
||||
};
|
||||
pulseAudio = mkEnableOption "pulseaudio support";
|
||||
|
||||
extraArguments = mkOption {
|
||||
default = "";
|
||||
type = types.str;
|
||||
description = ''
|
||||
Additional command line arguments to pass to Squeezelite.
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
|
||||
###### implementation
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
systemd.services.squeezelite= {
|
||||
systemd.services.squeezelite = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" "sound.target" ];
|
||||
description = "Software Squeezebox emulator";
|
||||
serviceConfig = {
|
||||
DynamicUser = true;
|
||||
ExecStart = "${pkgs.squeezelite}/bin/squeezelite -N ${dataDir}/player-name ${cfg.extraArguments}";
|
||||
ExecStart = "${bin} -N ${dataDir}/player-name ${cfg.extraArguments}";
|
||||
StateDirectory = builtins.baseNameOf dataDir;
|
||||
SupplementaryGroups = "audio";
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
|
112
nixos/modules/services/cluster/corosync/default.nix
Normal file
112
nixos/modules/services/cluster/corosync/default.nix
Normal file
|
@ -0,0 +1,112 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.services.corosync;
|
||||
in
|
||||
{
|
||||
# interface
|
||||
options.services.corosync = {
|
||||
enable = mkEnableOption "corosync";
|
||||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.corosync;
|
||||
defaultText = literalExpression "pkgs.corosync";
|
||||
description = "Package that should be used for corosync.";
|
||||
};
|
||||
|
||||
clusterName = mkOption {
|
||||
type = types.str;
|
||||
default = "nixcluster";
|
||||
description = "Name of the corosync cluster.";
|
||||
};
|
||||
|
||||
extraOptions = mkOption {
|
||||
type = with types; listOf str;
|
||||
default = [];
|
||||
description = "Additional options with which to start corosync.";
|
||||
};
|
||||
|
||||
nodelist = mkOption {
|
||||
description = "Corosync nodelist: all cluster members.";
|
||||
default = [];
|
||||
type = with types; listOf (submodule {
|
||||
options = {
|
||||
nodeid = mkOption {
|
||||
type = int;
|
||||
description = "Node ID number";
|
||||
};
|
||||
name = mkOption {
|
||||
type = str;
|
||||
description = "Node name";
|
||||
};
|
||||
ring_addrs = mkOption {
|
||||
type = listOf str;
|
||||
description = "List of addresses, one for each ring.";
|
||||
};
|
||||
};
|
||||
});
|
||||
};
|
||||
};
|
||||
|
||||
# implementation
|
||||
config = mkIf cfg.enable {
|
||||
environment.systemPackages = [ cfg.package ];
|
||||
|
||||
environment.etc."corosync/corosync.conf".text = ''
|
||||
totem {
|
||||
version: 2
|
||||
secauth: on
|
||||
cluster_name: ${cfg.clusterName}
|
||||
transport: knet
|
||||
}
|
||||
|
||||
nodelist {
|
||||
${concatMapStrings ({ nodeid, name, ring_addrs }: ''
|
||||
node {
|
||||
nodeid: ${toString nodeid}
|
||||
name: ${name}
|
||||
${concatStrings (imap0 (i: addr: ''
|
||||
ring${toString i}_addr: ${addr}
|
||||
'') ring_addrs)}
|
||||
}
|
||||
'') cfg.nodelist}
|
||||
}
|
||||
|
||||
quorum {
|
||||
# only corosync_votequorum is supported
|
||||
provider: corosync_votequorum
|
||||
wait_for_all: 0
|
||||
${optionalString (builtins.length cfg.nodelist < 3) ''
|
||||
two_node: 1
|
||||
''}
|
||||
}
|
||||
|
||||
logging {
|
||||
to_syslog: yes
|
||||
}
|
||||
'';
|
||||
|
||||
environment.etc."corosync/uidgid.d/root".text = ''
|
||||
# allow pacemaker connection by root
|
||||
uidgid {
|
||||
uid: 0
|
||||
gid: 0
|
||||
}
|
||||
'';
|
||||
|
||||
systemd.packages = [ cfg.package ];
|
||||
systemd.services.corosync = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
StateDirectory = "corosync";
|
||||
StateDirectoryMode = "0700";
|
||||
};
|
||||
};
|
||||
|
||||
environment.etc."sysconfig/corosync".text = lib.optionalString (cfg.extraOptions != []) ''
|
||||
COROSYNC_OPTIONS="${lib.escapeShellArgs cfg.extraOptions}"
|
||||
'';
|
||||
};
|
||||
}
|
|
@ -1,6 +1,6 @@
|
|||
{ cfg, pkgs, lib }:
|
||||
let
|
||||
propertyXml = name: value: ''
|
||||
propertyXml = name: value: lib.optionalString (value != null) ''
|
||||
<property>
|
||||
<name>${name}</name>
|
||||
<value>${builtins.toString value}</value>
|
||||
|
@ -29,16 +29,16 @@ let
|
|||
export HADOOP_LOG_DIR=/tmp/hadoop/$USER
|
||||
'';
|
||||
in
|
||||
pkgs.runCommand "hadoop-conf" {} ''
|
||||
pkgs.runCommand "hadoop-conf" {} (with cfg; ''
|
||||
mkdir -p $out/
|
||||
cp ${siteXml "core-site.xml" cfg.coreSite}/* $out/
|
||||
cp ${siteXml "hdfs-site.xml" cfg.hdfsSite}/* $out/
|
||||
cp ${siteXml "mapred-site.xml" cfg.mapredSite}/* $out/
|
||||
cp ${siteXml "yarn-site.xml" cfg.yarnSite}/* $out/
|
||||
cp ${siteXml "httpfs-site.xml" cfg.httpfsSite}/* $out/
|
||||
cp ${cfgFile "container-executor.cfg" cfg.containerExecutorCfg}/* $out/
|
||||
cp ${siteXml "core-site.xml" (coreSite // coreSiteInternal)}/* $out/
|
||||
cp ${siteXml "hdfs-site.xml" (hdfsSiteDefault // hdfsSite // hdfsSiteInternal)}/* $out/
|
||||
cp ${siteXml "mapred-site.xml" (mapredSiteDefault // mapredSite)}/* $out/
|
||||
cp ${siteXml "yarn-site.xml" (yarnSiteDefault // yarnSite // yarnSiteInternal)}/* $out/
|
||||
cp ${siteXml "httpfs-site.xml" httpfsSite}/* $out/
|
||||
cp ${cfgFile "container-executor.cfg" containerExecutorCfg}/* $out/
|
||||
cp ${pkgs.writeTextDir "hadoop-user-functions.sh" userFunctions}/* $out/
|
||||
cp ${pkgs.writeTextDir "hadoop-env.sh" hadoopEnv}/* $out/
|
||||
cp ${cfg.log4jProperties} $out/log4j.properties
|
||||
${lib.concatMapStringsSep "\n" (dir: "cp -r ${dir}/* $out/") cfg.extraConfDirs}
|
||||
''
|
||||
cp ${log4jProperties} $out/log4j.properties
|
||||
${lib.concatMapStringsSep "\n" (dir: "cp -r ${dir}/* $out/") extraConfDirs}
|
||||
'')
|
||||
|
|
|
@ -21,24 +21,50 @@ with lib;
|
|||
<link xlink:href="https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/core-default.xml"/>
|
||||
'';
|
||||
};
|
||||
coreSiteInternal = mkOption {
|
||||
default = {};
|
||||
type = types.attrsOf types.anything;
|
||||
internal = true;
|
||||
description = ''
|
||||
Internal option to add configs to core-site.xml based on module options
|
||||
'';
|
||||
};
|
||||
|
||||
hdfsSite = mkOption {
|
||||
hdfsSiteDefault = mkOption {
|
||||
default = {
|
||||
"dfs.namenode.rpc-bind-host" = "0.0.0.0";
|
||||
"dfs.namenode.http-address" = "0.0.0.0:9870";
|
||||
"dfs.namenode.servicerpc-bind-host" = "0.0.0.0";
|
||||
"dfs.namenode.http-bind-host" = "0.0.0.0";
|
||||
};
|
||||
type = types.attrsOf types.anything;
|
||||
description = ''
|
||||
Default options for hdfs-site.xml
|
||||
'';
|
||||
};
|
||||
hdfsSite = mkOption {
|
||||
default = {};
|
||||
type = types.attrsOf types.anything;
|
||||
example = literalExpression ''
|
||||
{
|
||||
"dfs.nameservices" = "namenode1";
|
||||
}
|
||||
'';
|
||||
description = ''
|
||||
Hadoop hdfs-site.xml definition
|
||||
Additional options and overrides for hdfs-site.xml
|
||||
<link xlink:href="https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/hdfs-default.xml"/>
|
||||
'';
|
||||
};
|
||||
hdfsSiteInternal = mkOption {
|
||||
default = {};
|
||||
type = types.attrsOf types.anything;
|
||||
internal = true;
|
||||
description = ''
|
||||
Internal option to add configs to hdfs-site.xml based on module options
|
||||
'';
|
||||
};
|
||||
|
||||
mapredSite = mkOption {
|
||||
mapredSiteDefault = mkOption {
|
||||
default = {
|
||||
"mapreduce.framework.name" = "yarn";
|
||||
"yarn.app.mapreduce.am.env" = "HADOOP_MAPRED_HOME=${cfg.package}/lib/${cfg.package.untarDir}";
|
||||
|
@ -54,18 +80,25 @@ with lib;
|
|||
}
|
||||
'';
|
||||
type = types.attrsOf types.anything;
|
||||
description = ''
|
||||
Default options for mapred-site.xml
|
||||
'';
|
||||
};
|
||||
mapredSite = mkOption {
|
||||
default = {};
|
||||
type = types.attrsOf types.anything;
|
||||
example = literalExpression ''
|
||||
options.services.hadoop.mapredSite.default // {
|
||||
{
|
||||
"mapreduce.map.java.opts" = "-Xmx900m -XX:+UseParallelGC";
|
||||
}
|
||||
'';
|
||||
description = ''
|
||||
Hadoop mapred-site.xml definition
|
||||
Additional options and overrides for mapred-site.xml
|
||||
<link xlink:href="https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/mapred-default.xml"/>
|
||||
'';
|
||||
};
|
||||
|
||||
yarnSite = mkOption {
|
||||
yarnSiteDefault = mkOption {
|
||||
default = {
|
||||
"yarn.nodemanager.admin-env" = "PATH=$PATH";
|
||||
"yarn.nodemanager.aux-services" = "mapreduce_shuffle";
|
||||
|
@ -77,19 +110,34 @@ with lib;
|
|||
"yarn.nodemanager.linux-container-executor.path" = "/run/wrappers/yarn-nodemanager/bin/container-executor";
|
||||
"yarn.nodemanager.log-dirs" = "/var/log/hadoop/yarn/nodemanager";
|
||||
"yarn.resourcemanager.bind-host" = "0.0.0.0";
|
||||
"yarn.resourcemanager.scheduler.class" = "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler";
|
||||
"yarn.resourcemanager.scheduler.class" = "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler";
|
||||
};
|
||||
type = types.attrsOf types.anything;
|
||||
description = ''
|
||||
Default options for yarn-site.xml
|
||||
'';
|
||||
};
|
||||
yarnSite = mkOption {
|
||||
default = {};
|
||||
type = types.attrsOf types.anything;
|
||||
example = literalExpression ''
|
||||
options.services.hadoop.yarnSite.default // {
|
||||
{
|
||||
"yarn.resourcemanager.hostname" = "''${config.networking.hostName}";
|
||||
}
|
||||
'';
|
||||
description = ''
|
||||
Hadoop yarn-site.xml definition
|
||||
Additional options and overrides for yarn-site.xml
|
||||
<link xlink:href="https://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-common/yarn-default.xml"/>
|
||||
'';
|
||||
};
|
||||
yarnSiteInternal = mkOption {
|
||||
default = {};
|
||||
type = types.attrsOf types.anything;
|
||||
internal = true;
|
||||
description = ''
|
||||
Internal option to add configs to yarn-site.xml based on module options
|
||||
'';
|
||||
};
|
||||
|
||||
httpfsSite = mkOption {
|
||||
default = { };
|
||||
|
@ -123,6 +171,7 @@ with lib;
|
|||
"yarn.nodemanager.linux-container-executor.group"="hadoop";
|
||||
"min.user.id"=1000;
|
||||
"feature.terminal.enabled"=1;
|
||||
"feature.mount-cgroup.enabled" = 1;
|
||||
};
|
||||
type = types.attrsOf types.anything;
|
||||
example = literalExpression ''
|
||||
|
@ -148,6 +197,8 @@ with lib;
|
|||
description = "Directories containing additional config files to be added to HADOOP_CONF_DIR";
|
||||
};
|
||||
|
||||
gatewayRole.enable = mkEnableOption "gateway role for deploying hadoop configs";
|
||||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.hadoop;
|
||||
|
@ -157,20 +208,16 @@ with lib;
|
|||
};
|
||||
|
||||
|
||||
config = mkMerge [
|
||||
(mkIf (builtins.hasAttr "yarn" config.users.users ||
|
||||
builtins.hasAttr "hdfs" config.users.users ||
|
||||
builtins.hasAttr "httpfs" config.users.users) {
|
||||
users.groups.hadoop = {
|
||||
gid = config.ids.gids.hadoop;
|
||||
};
|
||||
environment = {
|
||||
systemPackages = [ cfg.package ];
|
||||
etc."hadoop-conf".source = let
|
||||
hadoopConf = "${import ./conf.nix { inherit cfg pkgs lib; }}/";
|
||||
in "${hadoopConf}";
|
||||
};
|
||||
})
|
||||
|
||||
];
|
||||
config = mkIf cfg.gatewayRole.enable {
|
||||
users.groups.hadoop = {
|
||||
gid = config.ids.gids.hadoop;
|
||||
};
|
||||
environment = {
|
||||
systemPackages = [ cfg.package ];
|
||||
etc."hadoop-conf".source = let
|
||||
hadoopConf = "${import ./conf.nix { inherit cfg pkgs lib; }}/";
|
||||
in "${hadoopConf}";
|
||||
variables.HADOOP_CONF_DIR = "/etc/hadoop-conf/";
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,191 +1,191 @@
|
|||
{ config, lib, pkgs, ...}:
|
||||
{ config, lib, pkgs, ... }:
|
||||
with lib;
|
||||
let
|
||||
cfg = config.services.hadoop;
|
||||
|
||||
# Config files for hadoop services
|
||||
hadoopConf = "${import ./conf.nix { inherit cfg pkgs lib; }}/";
|
||||
restartIfChanged = mkOption {
|
||||
type = types.bool;
|
||||
description = ''
|
||||
Automatically restart the service on config change.
|
||||
This can be set to false to defer restarts on clusters running critical applications.
|
||||
Please consider the security implications of inadvertently running an older version,
|
||||
and the possibility of unexpected behavior caused by inconsistent versions across a cluster when disabling this option.
|
||||
'';
|
||||
default = false;
|
||||
};
|
||||
|
||||
# Generator for HDFS service options
|
||||
hadoopServiceOption = { serviceName, firewallOption ? true, extraOpts ? null }: {
|
||||
enable = mkEnableOption serviceName;
|
||||
restartIfChanged = mkOption {
|
||||
type = types.bool;
|
||||
description = ''
|
||||
Automatically restart the service on config change.
|
||||
This can be set to false to defer restarts on clusters running critical applications.
|
||||
Please consider the security implications of inadvertently running an older version,
|
||||
and the possibility of unexpected behavior caused by inconsistent versions across a cluster when disabling this option.
|
||||
'';
|
||||
default = false;
|
||||
};
|
||||
extraFlags = mkOption{
|
||||
type = with types; listOf str;
|
||||
default = [];
|
||||
description = "Extra command line flags to pass to ${serviceName}";
|
||||
example = [
|
||||
"-Dcom.sun.management.jmxremote"
|
||||
"-Dcom.sun.management.jmxremote.port=8010"
|
||||
];
|
||||
};
|
||||
extraEnv = mkOption{
|
||||
type = with types; attrsOf str;
|
||||
default = {};
|
||||
description = "Extra environment variables for ${serviceName}";
|
||||
};
|
||||
} // (optionalAttrs firewallOption {
|
||||
openFirewall = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "Open firewall ports for ${serviceName}.";
|
||||
};
|
||||
}) // (optionalAttrs (extraOpts != null) extraOpts);
|
||||
|
||||
# Generator for HDFS service configs
|
||||
hadoopServiceConfig =
|
||||
{ name
|
||||
, serviceOptions ? cfg.hdfs."${toLower name}"
|
||||
, description ? "Hadoop HDFS ${name}"
|
||||
, User ? "hdfs"
|
||||
, allowedTCPPorts ? [ ]
|
||||
, preStart ? ""
|
||||
, environment ? { }
|
||||
, extraConfig ? { }
|
||||
}: (
|
||||
|
||||
mkIf serviceOptions.enable ( mkMerge [{
|
||||
systemd.services."hdfs-${toLower name}" = {
|
||||
inherit description preStart;
|
||||
environment = environment // serviceOptions.extraEnv;
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
inherit (serviceOptions) restartIfChanged;
|
||||
serviceConfig = {
|
||||
inherit User;
|
||||
SyslogIdentifier = "hdfs-${toLower name}";
|
||||
ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} ${toLower name} ${escapeShellArgs serviceOptions.extraFlags}";
|
||||
Restart = "always";
|
||||
};
|
||||
};
|
||||
|
||||
services.hadoop.gatewayRole.enable = true;
|
||||
|
||||
networking.firewall.allowedTCPPorts = mkIf
|
||||
((builtins.hasAttr "openFirewall" serviceOptions) && serviceOptions.openFirewall)
|
||||
allowedTCPPorts;
|
||||
} extraConfig])
|
||||
);
|
||||
|
||||
in
|
||||
{
|
||||
options.services.hadoop.hdfs = {
|
||||
namenode = {
|
||||
enable = mkEnableOption "Whether to run the HDFS NameNode";
|
||||
|
||||
namenode = hadoopServiceOption { serviceName = "HDFS NameNode"; } // {
|
||||
formatOnInit = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Format HDFS namenode on first start. This is useful for quickly spinning up ephemeral HDFS clusters with a single namenode.
|
||||
For HA clusters, initialization involves multiple steps across multiple nodes. Follow [this guide](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithQJM.html)
|
||||
to initialize an HA cluster manually.
|
||||
'';
|
||||
};
|
||||
inherit restartIfChanged;
|
||||
openFirewall = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Open firewall ports for namenode
|
||||
Format HDFS namenode on first start. This is useful for quickly spinning up
|
||||
ephemeral HDFS clusters with a single namenode.
|
||||
For HA clusters, initialization involves multiple steps across multiple nodes.
|
||||
Follow this guide to initialize an HA cluster manually:
|
||||
<link xlink:href="https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithQJM.html"/>
|
||||
'';
|
||||
};
|
||||
};
|
||||
datanode = {
|
||||
enable = mkEnableOption "Whether to run the HDFS DataNode";
|
||||
inherit restartIfChanged;
|
||||
openFirewall = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Open firewall ports for datanode
|
||||
'';
|
||||
|
||||
datanode = hadoopServiceOption { serviceName = "HDFS DataNode"; } // {
|
||||
dataDirs = mkOption {
|
||||
default = null;
|
||||
description = "Tier and path definitions for datanode storage.";
|
||||
type = with types; nullOr (listOf (submodule {
|
||||
options = {
|
||||
type = mkOption {
|
||||
type = enum [ "SSD" "DISK" "ARCHIVE" "RAM_DISK" ];
|
||||
description = ''
|
||||
Storage types ([SSD]/[DISK]/[ARCHIVE]/[RAM_DISK]) for HDFS storage policies.
|
||||
'';
|
||||
};
|
||||
path = mkOption {
|
||||
type = path;
|
||||
example = [ "/var/lib/hadoop/hdfs/dn" ];
|
||||
description = "Determines where on the local filesystem a data node should store its blocks.";
|
||||
};
|
||||
};
|
||||
}));
|
||||
};
|
||||
};
|
||||
journalnode = {
|
||||
enable = mkEnableOption "Whether to run the HDFS JournalNode";
|
||||
inherit restartIfChanged;
|
||||
openFirewall = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Open firewall ports for journalnode
|
||||
'';
|
||||
};
|
||||
|
||||
journalnode = hadoopServiceOption { serviceName = "HDFS JournalNode"; };
|
||||
|
||||
zkfc = hadoopServiceOption {
|
||||
serviceName = "HDFS ZooKeeper failover controller";
|
||||
firewallOption = false;
|
||||
};
|
||||
zkfc = {
|
||||
enable = mkEnableOption "Whether to run the HDFS ZooKeeper failover controller";
|
||||
inherit restartIfChanged;
|
||||
};
|
||||
httpfs = {
|
||||
enable = mkEnableOption "Whether to run the HDFS HTTPfs server";
|
||||
|
||||
httpfs = hadoopServiceOption { serviceName = "HDFS JournalNode"; } // {
|
||||
tempPath = mkOption {
|
||||
type = types.path;
|
||||
default = "/tmp/hadoop/httpfs";
|
||||
description = ''
|
||||
HTTPFS_TEMP path used by HTTPFS
|
||||
'';
|
||||
};
|
||||
inherit restartIfChanged;
|
||||
openFirewall = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Open firewall ports for HTTPFS
|
||||
'';
|
||||
description = "HTTPFS_TEMP path used by HTTPFS";
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
config = mkMerge [
|
||||
(mkIf cfg.hdfs.namenode.enable {
|
||||
systemd.services.hdfs-namenode = {
|
||||
description = "Hadoop HDFS NameNode";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
inherit (cfg.hdfs.namenode) restartIfChanged;
|
||||
|
||||
preStart = (mkIf cfg.hdfs.namenode.formatOnInit ''
|
||||
${cfg.package}/bin/hdfs --config ${hadoopConf} namenode -format -nonInteractive || true
|
||||
'');
|
||||
|
||||
serviceConfig = {
|
||||
User = "hdfs";
|
||||
SyslogIdentifier = "hdfs-namenode";
|
||||
ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} namenode";
|
||||
Restart = "always";
|
||||
};
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.namenode.openFirewall [
|
||||
(hadoopServiceConfig {
|
||||
name = "NameNode";
|
||||
allowedTCPPorts = [
|
||||
9870 # namenode.http-address
|
||||
8020 # namenode.rpc-address
|
||||
8022 # namenode. servicerpc-address
|
||||
]);
|
||||
8022 # namenode.servicerpc-address
|
||||
8019 # dfs.ha.zkfc.port
|
||||
];
|
||||
preStart = (mkIf cfg.hdfs.namenode.formatOnInit
|
||||
"${cfg.package}/bin/hdfs --config ${hadoopConf} namenode -format -nonInteractive || true"
|
||||
);
|
||||
})
|
||||
(mkIf cfg.hdfs.datanode.enable {
|
||||
systemd.services.hdfs-datanode = {
|
||||
description = "Hadoop HDFS DataNode";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
inherit (cfg.hdfs.datanode) restartIfChanged;
|
||||
|
||||
serviceConfig = {
|
||||
User = "hdfs";
|
||||
SyslogIdentifier = "hdfs-datanode";
|
||||
ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} datanode";
|
||||
Restart = "always";
|
||||
};
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.datanode.openFirewall [
|
||||
(hadoopServiceConfig {
|
||||
name = "DataNode";
|
||||
# port numbers for datanode changed between hadoop 2 and 3
|
||||
allowedTCPPorts = if versionAtLeast cfg.package.version "3" then [
|
||||
9864 # datanode.http.address
|
||||
9866 # datanode.address
|
||||
9867 # datanode.ipc.address
|
||||
]);
|
||||
] else [
|
||||
50075 # datanode.http.address
|
||||
50010 # datanode.address
|
||||
50020 # datanode.ipc.address
|
||||
];
|
||||
extraConfig.services.hadoop.hdfsSiteInternal."dfs.datanode.data.dir" = let d = cfg.hdfs.datanode.dataDirs; in
|
||||
if (d!= null) then (concatMapStringsSep "," (x: "["+x.type+"]file://"+x.path) cfg.hdfs.datanode.dataDirs) else d;
|
||||
})
|
||||
(mkIf cfg.hdfs.journalnode.enable {
|
||||
systemd.services.hdfs-journalnode = {
|
||||
description = "Hadoop HDFS JournalNode";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
inherit (cfg.hdfs.journalnode) restartIfChanged;
|
||||
|
||||
serviceConfig = {
|
||||
User = "hdfs";
|
||||
SyslogIdentifier = "hdfs-journalnode";
|
||||
ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} journalnode";
|
||||
Restart = "always";
|
||||
};
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.journalnode.openFirewall [
|
||||
(hadoopServiceConfig {
|
||||
name = "JournalNode";
|
||||
allowedTCPPorts = [
|
||||
8480 # dfs.journalnode.http-address
|
||||
8485 # dfs.journalnode.rpc-address
|
||||
]);
|
||||
];
|
||||
})
|
||||
(mkIf cfg.hdfs.zkfc.enable {
|
||||
systemd.services.hdfs-zkfc = {
|
||||
description = "Hadoop HDFS ZooKeeper failover controller";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
inherit (cfg.hdfs.zkfc) restartIfChanged;
|
||||
|
||||
serviceConfig = {
|
||||
User = "hdfs";
|
||||
SyslogIdentifier = "hdfs-zkfc";
|
||||
ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} zkfc";
|
||||
Restart = "always";
|
||||
};
|
||||
};
|
||||
(hadoopServiceConfig {
|
||||
name = "zkfc";
|
||||
description = "Hadoop HDFS ZooKeeper failover controller";
|
||||
})
|
||||
(mkIf cfg.hdfs.httpfs.enable {
|
||||
systemd.services.hdfs-httpfs = {
|
||||
description = "Hadoop httpfs";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
inherit (cfg.hdfs.httpfs) restartIfChanged;
|
||||
|
||||
environment.HTTPFS_TEMP = cfg.hdfs.httpfs.tempPath;
|
||||
|
||||
preStart = ''
|
||||
mkdir -p $HTTPFS_TEMP
|
||||
'';
|
||||
|
||||
serviceConfig = {
|
||||
User = "httpfs";
|
||||
SyslogIdentifier = "hdfs-httpfs";
|
||||
ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} httpfs";
|
||||
Restart = "always";
|
||||
};
|
||||
};
|
||||
networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.httpfs.openFirewall [
|
||||
(hadoopServiceConfig {
|
||||
name = "HTTPFS";
|
||||
environment.HTTPFS_TEMP = cfg.hdfs.httpfs.tempPath;
|
||||
preStart = "mkdir -p $HTTPFS_TEMP";
|
||||
User = "httpfs";
|
||||
allowedTCPPorts = [
|
||||
14000 # httpfs.http.port
|
||||
]);
|
||||
];
|
||||
})
|
||||
(mkIf (
|
||||
cfg.hdfs.namenode.enable || cfg.hdfs.datanode.enable || cfg.hdfs.journalnode.enable || cfg.hdfs.zkfc.enable
|
||||
) {
|
||||
|
||||
(mkIf cfg.gatewayRole.enable {
|
||||
users.users.hdfs = {
|
||||
description = "Hadoop HDFS user";
|
||||
group = "hadoop";
|
||||
|
@ -199,5 +199,6 @@ in
|
|||
isSystemUser = true;
|
||||
};
|
||||
})
|
||||
|
||||
];
|
||||
}
|
||||
|
|
|
@ -13,23 +13,77 @@ let
|
|||
'';
|
||||
default = false;
|
||||
};
|
||||
extraFlags = mkOption{
|
||||
type = with types; listOf str;
|
||||
default = [];
|
||||
description = "Extra command line flags to pass to the service";
|
||||
example = [
|
||||
"-Dcom.sun.management.jmxremote"
|
||||
"-Dcom.sun.management.jmxremote.port=8010"
|
||||
];
|
||||
};
|
||||
extraEnv = mkOption{
|
||||
type = with types; attrsOf str;
|
||||
default = {};
|
||||
description = "Extra environment variables";
|
||||
};
|
||||
in
|
||||
{
|
||||
options.services.hadoop.yarn = {
|
||||
resourcemanager = {
|
||||
enable = mkEnableOption "Whether to run the Hadoop YARN ResourceManager";
|
||||
inherit restartIfChanged;
|
||||
enable = mkEnableOption "Hadoop YARN ResourceManager";
|
||||
inherit restartIfChanged extraFlags extraEnv;
|
||||
|
||||
openFirewall = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
default = false;
|
||||
description = ''
|
||||
Open firewall ports for resourcemanager
|
||||
'';
|
||||
};
|
||||
};
|
||||
nodemanager = {
|
||||
enable = mkEnableOption "Whether to run the Hadoop YARN NodeManager";
|
||||
inherit restartIfChanged;
|
||||
enable = mkEnableOption "Hadoop YARN NodeManager";
|
||||
inherit restartIfChanged extraFlags extraEnv;
|
||||
|
||||
resource = {
|
||||
cpuVCores = mkOption {
|
||||
description = "Number of vcores that can be allocated for containers.";
|
||||
type = with types; nullOr ints.positive;
|
||||
default = null;
|
||||
};
|
||||
maximumAllocationVCores = mkOption {
|
||||
description = "The maximum virtual CPU cores any container can be allocated.";
|
||||
type = with types; nullOr ints.positive;
|
||||
default = null;
|
||||
};
|
||||
memoryMB = mkOption {
|
||||
description = "Amount of physical memory, in MB, that can be allocated for containers.";
|
||||
type = with types; nullOr ints.positive;
|
||||
default = null;
|
||||
};
|
||||
maximumAllocationMB = mkOption {
|
||||
description = "The maximum physical memory any container can be allocated.";
|
||||
type = with types; nullOr ints.positive;
|
||||
default = null;
|
||||
};
|
||||
};
|
||||
|
||||
useCGroups = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Use cgroups to enforce resource limits on containers
|
||||
'';
|
||||
};
|
||||
|
||||
localDir = mkOption {
|
||||
description = "List of directories to store localized files in.";
|
||||
type = with types; nullOr (listOf path);
|
||||
example = [ "/var/lib/hadoop/yarn/nm" ];
|
||||
default = null;
|
||||
};
|
||||
|
||||
addBinBash = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
|
@ -39,7 +93,7 @@ in
|
|||
};
|
||||
openFirewall = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
default = false;
|
||||
description = ''
|
||||
Open firewall ports for nodemanager.
|
||||
Because containers can listen on any ephemeral port, TCP ports 1024–65535 will be opened.
|
||||
|
@ -49,10 +103,7 @@ in
|
|||
};
|
||||
|
||||
config = mkMerge [
|
||||
(mkIf (
|
||||
cfg.yarn.resourcemanager.enable || cfg.yarn.nodemanager.enable
|
||||
) {
|
||||
|
||||
(mkIf cfg.gatewayRole.enable {
|
||||
users.users.yarn = {
|
||||
description = "Hadoop YARN user";
|
||||
group = "hadoop";
|
||||
|
@ -65,15 +116,19 @@ in
|
|||
description = "Hadoop YARN ResourceManager";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
inherit (cfg.yarn.resourcemanager) restartIfChanged;
|
||||
environment = cfg.yarn.resourcemanager.extraEnv;
|
||||
|
||||
serviceConfig = {
|
||||
User = "yarn";
|
||||
SyslogIdentifier = "yarn-resourcemanager";
|
||||
ExecStart = "${cfg.package}/bin/yarn --config ${hadoopConf} " +
|
||||
" resourcemanager";
|
||||
" resourcemanager ${escapeShellArgs cfg.yarn.resourcemanager.extraFlags}";
|
||||
Restart = "always";
|
||||
};
|
||||
};
|
||||
|
||||
services.hadoop.gatewayRole.enable = true;
|
||||
|
||||
networking.firewall.allowedTCPPorts = (mkIf cfg.yarn.resourcemanager.openFirewall [
|
||||
8088 # resourcemanager.webapp.address
|
||||
8030 # resourcemanager.scheduler.address
|
||||
|
@ -94,6 +149,7 @@ in
|
|||
description = "Hadoop YARN NodeManager";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
inherit (cfg.yarn.nodemanager) restartIfChanged;
|
||||
environment = cfg.yarn.nodemanager.extraEnv;
|
||||
|
||||
preStart = ''
|
||||
# create log dir
|
||||
|
@ -101,8 +157,9 @@ in
|
|||
chown yarn:hadoop /var/log/hadoop/yarn/nodemanager
|
||||
|
||||
# set up setuid container executor binary
|
||||
umount /run/wrappers/yarn-nodemanager/cgroup/cpu || true
|
||||
rm -rf /run/wrappers/yarn-nodemanager/ || true
|
||||
mkdir -p /run/wrappers/yarn-nodemanager/{bin,etc/hadoop}
|
||||
mkdir -p /run/wrappers/yarn-nodemanager/{bin,etc/hadoop,cgroup/cpu}
|
||||
cp ${cfg.package}/lib/${cfg.package.untarDir}/bin/container-executor /run/wrappers/yarn-nodemanager/bin/
|
||||
chgrp hadoop /run/wrappers/yarn-nodemanager/bin/container-executor
|
||||
chmod 6050 /run/wrappers/yarn-nodemanager/bin/container-executor
|
||||
|
@ -114,11 +171,26 @@ in
|
|||
SyslogIdentifier = "yarn-nodemanager";
|
||||
PermissionsStartOnly = true;
|
||||
ExecStart = "${cfg.package}/bin/yarn --config ${hadoopConf} " +
|
||||
" nodemanager";
|
||||
" nodemanager ${escapeShellArgs cfg.yarn.nodemanager.extraFlags}";
|
||||
Restart = "always";
|
||||
};
|
||||
};
|
||||
|
||||
services.hadoop.gatewayRole.enable = true;
|
||||
|
||||
services.hadoop.yarnSiteInternal = with cfg.yarn.nodemanager; {
|
||||
"yarn.nodemanager.local-dirs" = localDir;
|
||||
"yarn.scheduler.maximum-allocation-vcores" = resource.maximumAllocationVCores;
|
||||
"yarn.scheduler.maximum-allocation-mb" = resource.maximumAllocationMB;
|
||||
"yarn.nodemanager.resource.cpu-vcores" = resource.cpuVCores;
|
||||
"yarn.nodemanager.resource.memory-mb" = resource.memoryMB;
|
||||
} // mkIf useCGroups {
|
||||
"yarn.nodemanager.linux-container-executor.cgroups.hierarchy" = "/hadoop-yarn";
|
||||
"yarn.nodemanager.linux-container-executor.resources-handler.class" = "org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler";
|
||||
"yarn.nodemanager.linux-container-executor.cgroups.mount" = "true";
|
||||
"yarn.nodemanager.linux-container-executor.cgroups.mount-path" = "/run/wrappers/yarn-nodemanager/cgroup";
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPortRanges = [
|
||||
(mkIf (cfg.yarn.nodemanager.openFirewall) {from = 1024; to = 65535;})
|
||||
];
|
||||
|
|
52
nixos/modules/services/cluster/pacemaker/default.nix
Normal file
52
nixos/modules/services/cluster/pacemaker/default.nix
Normal file
|
@ -0,0 +1,52 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.services.pacemaker;
|
||||
in
|
||||
{
|
||||
# interface
|
||||
options.services.pacemaker = {
|
||||
enable = mkEnableOption "pacemaker";
|
||||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.pacemaker;
|
||||
defaultText = literalExpression "pkgs.pacemaker";
|
||||
description = "Package that should be used for pacemaker.";
|
||||
};
|
||||
};
|
||||
|
||||
# implementation
|
||||
config = mkIf cfg.enable {
|
||||
assertions = [ {
|
||||
assertion = config.services.corosync.enable;
|
||||
message = ''
|
||||
Enabling services.pacemaker requires a services.corosync configuration.
|
||||
'';
|
||||
} ];
|
||||
|
||||
environment.systemPackages = [ cfg.package ];
|
||||
|
||||
# required by pacemaker
|
||||
users.users.hacluster = {
|
||||
isSystemUser = true;
|
||||
group = "pacemaker";
|
||||
home = "/var/lib/pacemaker";
|
||||
};
|
||||
users.groups.pacemaker = {};
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d /var/log/pacemaker 0700 hacluster pacemaker -"
|
||||
];
|
||||
|
||||
systemd.packages = [ cfg.package ];
|
||||
systemd.services.pacemaker = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
StateDirectory = "pacemaker";
|
||||
StateDirectoryMode = "0700";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -34,6 +34,14 @@ in
|
|||
Repository to add the runner to.
|
||||
|
||||
Changing this option triggers a new runner registration.
|
||||
|
||||
IMPORTANT: If your token is org-wide (not per repository), you need to
|
||||
provide a github org link, not a single repository, so do it like this
|
||||
<literal>https://github.com/nixos</literal>, not like this
|
||||
<literal>https://github.com/nixos/nixpkgs</literal>.
|
||||
Otherwise, you are going to get a <literal>404 NotFound</literal>
|
||||
from <literal>POST https://api.github.com/actions/runner-registration</literal>
|
||||
in the configure script.
|
||||
'';
|
||||
example = "https://github.com/nixos/nixpkgs";
|
||||
};
|
||||
|
|
|
@ -119,7 +119,7 @@ in {
|
|||
<link linkend="opt-services.matrix-synapse.settings.listeners">listeners</link> = [
|
||||
{
|
||||
<link linkend="opt-services.matrix-synapse.settings.listeners._.port">port</link> = 8008;
|
||||
<link linkend="opt-services.matrix-synapse.settings.listeners._.bind_addresses">bind_address</link> = [ "::1" ];
|
||||
<link linkend="opt-services.matrix-synapse.settings.listeners._.bind_addresses">bind_addresses</link> = [ "::1" ];
|
||||
<link linkend="opt-services.matrix-synapse.settings.listeners._.type">type</link> = "http";
|
||||
<link linkend="opt-services.matrix-synapse.settings.listeners._.tls">tls</link> = false;
|
||||
<link linkend="opt-services.matrix-synapse.settings.listeners._.x_forwarded">x_forwarded</link> = true;
|
||||
|
@ -152,10 +152,10 @@ in {
|
|||
|
||||
<para>
|
||||
If you want to run a server with public registration by anybody, you can
|
||||
then enable <literal><link linkend="opt-services.matrix-synapse.settings.enable_registration">services.matrix-synapse.enable_registration</link> =
|
||||
then enable <literal><link linkend="opt-services.matrix-synapse.settings.enable_registration">services.matrix-synapse.settings.enable_registration</link> =
|
||||
true;</literal>. Otherwise, or you can generate a registration secret with
|
||||
<command>pwgen -s 64 1</command> and set it with
|
||||
<option><link linkend="opt-services.matrix-synapse.settings.registration_shared_secret">services.matrix-synapse.registration_shared_secret</link></option>.
|
||||
<option><link linkend="opt-services.matrix-synapse.settings.registration_shared_secret">services.matrix-synapse.settings.registration_shared_secret</link></option>.
|
||||
To create a new user or admin, run the following after you have set the secret
|
||||
and have rebuilt NixOS:
|
||||
<screen>
|
||||
|
|
|
@ -70,10 +70,12 @@ in
|
|||
LockPersonality = true;
|
||||
|
||||
PrivateTmp = true;
|
||||
PrivateDevices = true;
|
||||
# Disabled to allow Jellyfin to access hw accel devices endpoints
|
||||
# PrivateDevices = true;
|
||||
PrivateUsers = true;
|
||||
|
||||
ProtectClock = true;
|
||||
# Disabled as it does not allow Jellyfin to interface with CUDA devices
|
||||
# ProtectClock = true;
|
||||
ProtectControlGroups = true;
|
||||
ProtectHostname = true;
|
||||
ProtectKernelLogs = true;
|
||||
|
@ -84,7 +86,7 @@ in
|
|||
|
||||
RestrictNamespaces = true;
|
||||
# AF_NETLINK needed because Jellyfin monitors the network connection
|
||||
RestrictAddressFamilies = [ "AF_NETLINK" "AF_INET" "AF_INET6" ];
|
||||
RestrictAddressFamilies = [ "AF_NETLINK" "AF_INET" "AF_INET6" "AF_UNIX" ];
|
||||
RestrictRealtime = true;
|
||||
RestrictSUIDSGID = true;
|
||||
|
||||
|
|
|
@ -112,11 +112,11 @@ in
|
|||
|
||||
{
|
||||
imports = [
|
||||
(mkRenamedOptionModule [ "nix" "useChroot" ] [ "nix" "useSandbox" ])
|
||||
(mkRenamedOptionModule [ "nix" "chrootDirs" ] [ "nix" "sandboxPaths" ])
|
||||
(mkRenamedOptionModule [ "nix" "daemonIONiceLevel" ] [ "nix" "daemonIOSchedPriority" ])
|
||||
(mkRenamedOptionModuleWith { sinceRelease = 2003; from = [ "nix" "useChroot" ]; to = [ "nix" "useSandbox" ]; })
|
||||
(mkRenamedOptionModuleWith { sinceRelease = 2003; from = [ "nix" "chrootDirs" ]; to = [ "nix" "sandboxPaths" ]; })
|
||||
(mkRenamedOptionModuleWith { sinceRelease = 2205; from = [ "nix" "daemonIONiceLevel" ]; to = [ "nix" "daemonIOSchedPriority" ]; })
|
||||
(mkRemovedOptionModule [ "nix" "daemonNiceLevel" ] "Consider nix.daemonCPUSchedPolicy instead.")
|
||||
] ++ mapAttrsToList (oldConf: newConf: mkRenamedOptionModule [ "nix" oldConf ] [ "nix" "settings" newConf ]) legacyConfMappings;
|
||||
] ++ mapAttrsToList (oldConf: newConf: mkRenamedOptionModuleWith { sinceRelease = 2205; from = [ "nix" oldConf ]; to = [ "nix" "settings" newConf ]; }) legacyConfMappings;
|
||||
|
||||
###### interface
|
||||
|
||||
|
|
|
@ -214,6 +214,8 @@ in
|
|||
User = cfg.user;
|
||||
ExecStart = "${cfg.package}/bin/paperless-ng qcluster";
|
||||
Restart = "on-failure";
|
||||
# The `mbind` syscall is needed for running the classifier.
|
||||
SystemCallFilter = defaultServiceConfig.SystemCallFilter ++ [ "mbind" ];
|
||||
};
|
||||
environment = env;
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
|
|
@ -29,6 +29,7 @@ let
|
|||
"blackbox"
|
||||
"buildkite-agent"
|
||||
"collectd"
|
||||
"dmarc"
|
||||
"dnsmasq"
|
||||
"domain"
|
||||
"dovecot"
|
||||
|
@ -55,6 +56,7 @@ let
|
|||
"postfix"
|
||||
"postgres"
|
||||
"process"
|
||||
"pve"
|
||||
"py-air-control"
|
||||
"redis"
|
||||
"rspamd"
|
||||
|
|
117
nixos/modules/services/monitoring/prometheus/exporters/dmarc.nix
Normal file
117
nixos/modules/services/monitoring/prometheus/exporters/dmarc.nix
Normal file
|
@ -0,0 +1,117 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.dmarc;
|
||||
|
||||
json = builtins.toJSON {
|
||||
inherit (cfg) folders port;
|
||||
listen_addr = cfg.listenAddress;
|
||||
storage_path = "$STATE_DIRECTORY";
|
||||
imap = (builtins.removeAttrs cfg.imap [ "passwordFile" ]) // { password = "$IMAP_PASSWORD"; use_ssl = true; };
|
||||
poll_interval_seconds = cfg.pollIntervalSeconds;
|
||||
deduplication_max_seconds = cfg.deduplicationMaxSeconds;
|
||||
logging = {
|
||||
version = 1;
|
||||
disable_existing_loggers = false;
|
||||
};
|
||||
};
|
||||
in {
|
||||
port = 9797;
|
||||
extraOpts = {
|
||||
imap = {
|
||||
host = mkOption {
|
||||
type = types.str;
|
||||
default = "localhost";
|
||||
description = ''
|
||||
Hostname of IMAP server to connect to.
|
||||
'';
|
||||
};
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 993;
|
||||
description = ''
|
||||
Port of the IMAP server to connect to.
|
||||
'';
|
||||
};
|
||||
username = mkOption {
|
||||
type = types.str;
|
||||
example = "postmaster@example.org";
|
||||
description = ''
|
||||
Login username for the IMAP connection.
|
||||
'';
|
||||
};
|
||||
passwordFile = mkOption {
|
||||
type = types.str;
|
||||
example = "/run/secrets/dovecot_pw";
|
||||
description = ''
|
||||
File containing the login password for the IMAP connection.
|
||||
'';
|
||||
};
|
||||
};
|
||||
folders = {
|
||||
inbox = mkOption {
|
||||
type = types.str;
|
||||
default = "INBOX";
|
||||
description = ''
|
||||
IMAP mailbox that is checked for incoming DMARC aggregate reports
|
||||
'';
|
||||
};
|
||||
done = mkOption {
|
||||
type = types.str;
|
||||
default = "Archive";
|
||||
description = ''
|
||||
IMAP mailbox that successfully processed reports are moved to.
|
||||
'';
|
||||
};
|
||||
error = mkOption {
|
||||
type = types.str;
|
||||
default = "Invalid";
|
||||
description = ''
|
||||
IMAP mailbox that emails are moved to that could not be processed.
|
||||
'';
|
||||
};
|
||||
};
|
||||
pollIntervalSeconds = mkOption {
|
||||
type = types.ints.unsigned;
|
||||
default = 60;
|
||||
description = ''
|
||||
How often to poll the IMAP server in seconds.
|
||||
'';
|
||||
};
|
||||
deduplicationMaxSeconds = mkOption {
|
||||
type = types.ints.unsigned;
|
||||
default = 604800;
|
||||
defaultText = "7 days (in seconds)";
|
||||
description = ''
|
||||
How long individual report IDs will be remembered to avoid
|
||||
counting double delivered reports twice.
|
||||
'';
|
||||
};
|
||||
debug = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to declare enable <literal>--debug</literal>.
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
path = with pkgs; [ envsubst coreutils ];
|
||||
serviceConfig = {
|
||||
StateDirectory = "prometheus-dmarc-exporter";
|
||||
WorkingDirectory = "/var/lib/prometheus-dmarc-exporter";
|
||||
ExecStart = "${pkgs.writeShellScript "setup-cfg" ''
|
||||
export IMAP_PASSWORD="$(<${cfg.imap.passwordFile})"
|
||||
envsubst \
|
||||
-i ${pkgs.writeText "dmarc-exporter.json.template" json} \
|
||||
-o ''${STATE_DIRECTORY}/dmarc-exporter.json
|
||||
|
||||
exec ${pkgs.prometheus-dmarc-exporter}/bin/prometheus-dmarc-exporter \
|
||||
--configuration /var/lib/prometheus-dmarc-exporter/dmarc-exporter.json \
|
||||
${optionalString cfg.debug "--debug"}
|
||||
''}";
|
||||
};
|
||||
};
|
||||
}
|
118
nixos/modules/services/monitoring/prometheus/exporters/pve.nix
Normal file
118
nixos/modules/services/monitoring/prometheus/exporters/pve.nix
Normal file
|
@ -0,0 +1,118 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.pve;
|
||||
|
||||
# pve exporter requires a config file so create an empty one if configFile is not provided
|
||||
emptyConfigFile = pkgs.writeTextFile {
|
||||
name = "pve.yml";
|
||||
text = "default:";
|
||||
};
|
||||
|
||||
computedConfigFile = "${if cfg.configFile == null then emptyConfigFile else cfg.configFile}";
|
||||
in
|
||||
{
|
||||
port = 9221;
|
||||
extraOpts = {
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.prometheus-pve-exporter;
|
||||
defaultText = literalExpression "pkgs.prometheus-pve-exporter";
|
||||
example = literalExpression "pkgs.prometheus-pve-exporter";
|
||||
description = ''
|
||||
The package to use for prometheus-pve-exporter
|
||||
'';
|
||||
};
|
||||
|
||||
environmentFile = mkOption {
|
||||
type = with types; nullOr path;
|
||||
default = null;
|
||||
example = "/etc/prometheus-pve-exporter/pve.env";
|
||||
description = ''
|
||||
Path to the service's environment file. This path can either be a computed path in /nix/store or a path in the local filesystem.
|
||||
|
||||
The environment file should NOT be stored in /nix/store as it contains passwords and/or keys in plain text.
|
||||
|
||||
Environment reference: https://github.com/prometheus-pve/prometheus-pve-exporter#authentication
|
||||
'';
|
||||
};
|
||||
|
||||
configFile = mkOption {
|
||||
type = with types; nullOr path;
|
||||
default = null;
|
||||
example = "/etc/prometheus-pve-exporter/pve.yml";
|
||||
description = ''
|
||||
Path to the service's config file. This path can either be a computed path in /nix/store or a path in the local filesystem.
|
||||
|
||||
The config file should NOT be stored in /nix/store as it will contain passwords and/or keys in plain text.
|
||||
|
||||
If both configFile and environmentFile are provided, the configFile option will be ignored.
|
||||
|
||||
Configuration reference: https://github.com/prometheus-pve/prometheus-pve-exporter/#authentication
|
||||
'';
|
||||
};
|
||||
|
||||
collectors = {
|
||||
status = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Collect Node/VM/CT status
|
||||
'';
|
||||
};
|
||||
version = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Collect PVE version info
|
||||
'';
|
||||
};
|
||||
node = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Collect PVE node info
|
||||
'';
|
||||
};
|
||||
cluster = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Collect PVE cluster info
|
||||
'';
|
||||
};
|
||||
resources = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Collect PVE resources info
|
||||
'';
|
||||
};
|
||||
config = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Collect PVE onboot status
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${cfg.package}/bin/pve_exporter \
|
||||
--${if cfg.collectors.status == true then "" else "no-"}collector.status \
|
||||
--${if cfg.collectors.version == true then "" else "no-"}collector.version \
|
||||
--${if cfg.collectors.node == true then "" else "no-"}collector.node \
|
||||
--${if cfg.collectors.cluster == true then "" else "no-"}collector.cluster \
|
||||
--${if cfg.collectors.resources == true then "" else "no-"}collector.resources \
|
||||
--${if cfg.collectors.config == true then "" else "no-"}collector.config \
|
||||
${computedConfigFile} \
|
||||
${toString cfg.port} ${cfg.listenAddress}
|
||||
'';
|
||||
} // optionalAttrs (cfg.environmentFile != null) {
|
||||
EnvironmentFile = cfg.environmentFile;
|
||||
};
|
||||
};
|
||||
}
|
|
@ -11,7 +11,7 @@ in {
|
|||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-systemd-exporter}/bin/systemd_exporter \
|
||||
--web.listen-address ${cfg.listenAddress}:${toString cfg.port}
|
||||
--web.listen-address ${cfg.listenAddress}:${toString cfg.port} ${concatStringsSep " " cfg.extraFlags}
|
||||
'';
|
||||
RestrictAddressFamilies = [
|
||||
# Need AF_UNIX to collect data
|
||||
|
|
|
@ -76,7 +76,7 @@ in
|
|||
|
||||
script = ''
|
||||
${pkgs.su}/bin/su -s ${pkgs.runtimeShell} ${user} \
|
||||
-c 'HOME="${cfg.dataDir}" ${pkgs.amuleDaemon}/bin/amuled'
|
||||
-c 'HOME="${cfg.dataDir}" ${pkgs.amule-daemon}/bin/amuled'
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
|
146
nixos/modules/services/networking/nbd.nix
Normal file
146
nixos/modules/services/networking/nbd.nix
Normal file
|
@ -0,0 +1,146 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.nbd;
|
||||
configFormat = pkgs.formats.ini { };
|
||||
iniFields = with types; attrsOf (oneOf [ bool int float str ]);
|
||||
serverConfig = configFormat.generate "nbd-server-config"
|
||||
({
|
||||
generic =
|
||||
(cfg.server.extraOptions // {
|
||||
user = "root";
|
||||
group = "root";
|
||||
port = cfg.server.listenPort;
|
||||
} // (optionalAttrs (cfg.server.listenAddress != null) {
|
||||
listenaddr = cfg.server.listenAddress;
|
||||
}));
|
||||
}
|
||||
// (mapAttrs
|
||||
(_: { path, allowAddresses, extraOptions }:
|
||||
extraOptions // {
|
||||
exportname = path;
|
||||
} // (optionalAttrs (allowAddresses != null) {
|
||||
authfile = pkgs.writeText "authfile" (concatStringsSep "\n" allowAddresses);
|
||||
}))
|
||||
cfg.server.exports)
|
||||
);
|
||||
splitLists =
|
||||
partition
|
||||
(path: hasPrefix "/dev/" path)
|
||||
(mapAttrsToList (_: { path, ... }: path) cfg.server.exports);
|
||||
allowedDevices = splitLists.right;
|
||||
boundPaths = splitLists.wrong;
|
||||
in
|
||||
{
|
||||
options = {
|
||||
services.nbd = {
|
||||
server = {
|
||||
enable = mkEnableOption "the Network Block Device (nbd) server";
|
||||
|
||||
listenPort = mkOption {
|
||||
type = types.port;
|
||||
default = 10809;
|
||||
description = "Port to listen on. The port is NOT automatically opened in the firewall.";
|
||||
};
|
||||
|
||||
extraOptions = mkOption {
|
||||
type = iniFields;
|
||||
default = {
|
||||
allowlist = false;
|
||||
};
|
||||
description = ''
|
||||
Extra options for the server. See
|
||||
<citerefentry><refentrytitle>nbd-server</refentrytitle>
|
||||
<manvolnum>5</manvolnum></citerefentry>.
|
||||
'';
|
||||
};
|
||||
|
||||
exports = mkOption {
|
||||
description = "Files or block devices to make available over the network.";
|
||||
default = { };
|
||||
type = with types; attrsOf
|
||||
(submodule {
|
||||
options = {
|
||||
path = mkOption {
|
||||
type = str;
|
||||
description = "File or block device to export.";
|
||||
example = "/dev/sdb1";
|
||||
};
|
||||
|
||||
allowAddresses = mkOption {
|
||||
type = nullOr (listOf str);
|
||||
default = null;
|
||||
example = [ "10.10.0.0/24" "127.0.0.1" ];
|
||||
description = "IPs and subnets that are authorized to connect for this device. If not specified, the server will allow all connections.";
|
||||
};
|
||||
|
||||
extraOptions = mkOption {
|
||||
type = iniFields;
|
||||
default = {
|
||||
flush = true;
|
||||
fua = true;
|
||||
};
|
||||
description = ''
|
||||
Extra options for this export. See
|
||||
<citerefentry><refentrytitle>nbd-server</refentrytitle>
|
||||
<manvolnum>5</manvolnum></citerefentry>.
|
||||
'';
|
||||
};
|
||||
};
|
||||
});
|
||||
};
|
||||
|
||||
listenAddress = mkOption {
|
||||
type = with types; nullOr str;
|
||||
description = "Address to listen on. If not specified, the server will listen on all interfaces.";
|
||||
default = null;
|
||||
example = "10.10.0.1";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.server.enable {
|
||||
boot.kernelModules = [ "nbd" ];
|
||||
|
||||
systemd.services.nbd-server = {
|
||||
after = [ "network-online.target" ];
|
||||
before = [ "multi-user.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.nbd}/bin/nbd-server -C ${serverConfig}";
|
||||
Type = "forking";
|
||||
|
||||
DeviceAllow = map (path: "${path} rw") allowedDevices;
|
||||
BindPaths = boundPaths;
|
||||
|
||||
CapabilityBoundingSet = "";
|
||||
DevicePolicy = "closed";
|
||||
LockPersonality = true;
|
||||
MemoryDenyWriteExecute = true;
|
||||
NoNewPrivileges = true;
|
||||
PrivateDevices = false;
|
||||
PrivateMounts = true;
|
||||
PrivateTmp = true;
|
||||
PrivateUsers = true;
|
||||
ProcSubset = "pid";
|
||||
ProtectClock = true;
|
||||
ProtectControlGroups = true;
|
||||
ProtectHome = true;
|
||||
ProtectHostname = true;
|
||||
ProtectKernelLogs = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectKernelTunables = true;
|
||||
ProtectProc = "noaccess";
|
||||
ProtectSystem = "strict";
|
||||
RestrictAddressFamilies = "AF_INET AF_INET6";
|
||||
RestrictNamespaces = true;
|
||||
RestrictRealtime = true;
|
||||
RestrictSUIDSGID = true;
|
||||
UMask = "0077";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -194,19 +194,8 @@ let
|
|||
zone.children
|
||||
);
|
||||
|
||||
# fighting infinite recursion
|
||||
zoneOptions = zoneOptionsRaw // childConfig zoneOptions1 true;
|
||||
zoneOptions1 = zoneOptionsRaw // childConfig zoneOptions2 false;
|
||||
zoneOptions2 = zoneOptionsRaw // childConfig zoneOptions3 false;
|
||||
zoneOptions3 = zoneOptionsRaw // childConfig zoneOptions4 false;
|
||||
zoneOptions4 = zoneOptionsRaw // childConfig zoneOptions5 false;
|
||||
zoneOptions5 = zoneOptionsRaw // childConfig zoneOptions6 false;
|
||||
zoneOptions6 = zoneOptionsRaw // childConfig null false;
|
||||
|
||||
childConfig = x: v: { options.children = { type = types.attrsOf x; visible = v; }; };
|
||||
|
||||
# options are ordered alphanumerically
|
||||
zoneOptionsRaw = types.submodule {
|
||||
zoneOptions = types.submodule {
|
||||
options = {
|
||||
|
||||
allowAXFRFallback = mkOption {
|
||||
|
@ -246,6 +235,13 @@ let
|
|||
};
|
||||
|
||||
children = mkOption {
|
||||
# TODO: This relies on the fact that `types.anything` doesn't set any
|
||||
# values of its own to any defaults, because in the above zoneConfigs',
|
||||
# values from children override ones from parents, but only if the
|
||||
# attributes are defined. Because of this, we can't replace the element
|
||||
# type here with `zoneConfigs`, since that would set all the attributes
|
||||
# to default values, breaking the parent inheriting function.
|
||||
type = types.attrsOf types.anything;
|
||||
default = {};
|
||||
description = ''
|
||||
Children zones inherit all options of their parents. Attributes
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
{ config, options, lib, pkgs, stdenv, ... }:
|
||||
let
|
||||
cfg = config.services.pleroma;
|
||||
cookieFile = "/var/lib/pleroma/.cookie";
|
||||
in {
|
||||
options = {
|
||||
services.pleroma = with lib; {
|
||||
|
@ -8,7 +9,7 @@ in {
|
|||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.pleroma;
|
||||
default = pkgs.pleroma.override { inherit cookieFile; };
|
||||
defaultText = literalExpression "pkgs.pleroma";
|
||||
description = "Pleroma package to use.";
|
||||
};
|
||||
|
@ -100,7 +101,6 @@ in {
|
|||
after = [ "network-online.target" "postgresql.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
restartTriggers = [ config.environment.etc."/pleroma/config.exs".source ];
|
||||
environment.RELEASE_COOKIE = "/var/lib/pleroma/.cookie";
|
||||
serviceConfig = {
|
||||
User = cfg.user;
|
||||
Group = cfg.group;
|
||||
|
@ -118,10 +118,10 @@ in {
|
|||
# Better be safe than sorry migration-wise.
|
||||
ExecStartPre =
|
||||
let preScript = pkgs.writers.writeBashBin "pleromaStartPre" ''
|
||||
if [ ! -f /var/lib/pleroma/.cookie ]
|
||||
if [ ! -f "${cookieFile}" ] || [ ! -s "${cookieFile}" ]
|
||||
then
|
||||
echo "Creating cookie file"
|
||||
dd if=/dev/urandom bs=1 count=16 | hexdump -e '16/1 "%02x"' > /var/lib/pleroma/.cookie
|
||||
dd if=/dev/urandom bs=1 count=16 | ${pkgs.hexdump}/bin/hexdump -e '16/1 "%02x"' > "${cookieFile}"
|
||||
fi
|
||||
${cfg.package}/bin/pleroma_ctl migrate
|
||||
'';
|
||||
|
|
|
@ -8,12 +8,7 @@ let
|
|||
homeDir = "/var/lib/tox-node";
|
||||
|
||||
configFile = let
|
||||
# fetchurl should be switched to getting this file from tox-node.src once
|
||||
# the dpkg directory is in a release
|
||||
src = pkgs.fetchurl {
|
||||
url = "https://raw.githubusercontent.com/tox-rs/tox-node/master/dpkg/config.yml";
|
||||
sha256 = "1431wzpzm786mcvyzk1rp7ar418n45dr75hdggxvlm7pkpam31xa";
|
||||
};
|
||||
src = "${pkg.src}/dpkg/config.yml";
|
||||
confJSON = pkgs.writeText "config.json" (
|
||||
builtins.toJSON {
|
||||
log-type = cfg.logType;
|
||||
|
|
|
@ -62,6 +62,7 @@ in {
|
|||
};
|
||||
|
||||
stateDir = mkOption {
|
||||
type = types.path;
|
||||
default = "/var/lib/unbound";
|
||||
description = "Directory holding all state for unbound to run.";
|
||||
};
|
||||
|
|
|
@ -153,6 +153,7 @@ in
|
|||
|
||||
userlist = mkOption {
|
||||
default = [];
|
||||
type = types.listOf types.str;
|
||||
description = "See <option>userlistFile</option>.";
|
||||
};
|
||||
|
||||
|
|
|
@ -102,17 +102,19 @@ in
|
|||
# Taken from: https://github.com/oauth2-proxy/oauth2-proxy/blob/master/providers/providers.go
|
||||
provider = mkOption {
|
||||
type = types.enum [
|
||||
"google"
|
||||
"adfs"
|
||||
"azure"
|
||||
"bitbucket"
|
||||
"digitalocean"
|
||||
"facebook"
|
||||
"github"
|
||||
"keycloak"
|
||||
"gitlab"
|
||||
"google"
|
||||
"keycloak"
|
||||
"keycloak-oidc"
|
||||
"linkedin"
|
||||
"login.gov"
|
||||
"bitbucket"
|
||||
"nextcloud"
|
||||
"digitalocean"
|
||||
"oidc"
|
||||
];
|
||||
default = "google";
|
||||
|
|
|
@ -910,6 +910,11 @@ in
|
|||
ORPort = mkForce [];
|
||||
PublishServerDescriptor = mkForce false;
|
||||
})
|
||||
(mkIf (!cfg.client.enable) {
|
||||
# Make sure application connections via SOCKS are disabled
|
||||
# when services.tor.client.enable is false
|
||||
SOCKSPort = mkForce [ 0 ];
|
||||
})
|
||||
(mkIf cfg.client.enable (
|
||||
{ SOCKSPort = [ cfg.client.socksListenAddress ];
|
||||
} // optionalAttrs cfg.client.transparentProxy.enable {
|
||||
|
@ -962,7 +967,7 @@ in
|
|||
'') onion.authorizedClients ++
|
||||
optional (onion.secretKey != null) ''
|
||||
install -d -o tor -g tor -m 0700 ${escapeShellArg onion.path}
|
||||
key="$(cut -f1 -d: ${escapeShellArg onion.secretKey})"
|
||||
key="$(cut -f1 -d: ${escapeShellArg onion.secretKey} | head -1)"
|
||||
case "$key" in
|
||||
("== ed25519v"*"-secret")
|
||||
install -o tor -g tor -m 0400 ${escapeShellArg onion.secretKey} ${escapeShellArg onion.path}/hs_ed25519_secret_key;;
|
||||
|
@ -1008,7 +1013,11 @@ in
|
|||
#InaccessiblePaths = [ "-+${runDir}/root" ];
|
||||
UMask = "0066";
|
||||
BindPaths = [ stateDir ];
|
||||
BindReadOnlyPaths = [ storeDir "/etc" ];
|
||||
BindReadOnlyPaths = [ storeDir "/etc" ] ++
|
||||
optionals config.services.resolved.enable [
|
||||
"/run/systemd/resolve/stub-resolv.conf"
|
||||
"/run/systemd/resolve/resolv.conf"
|
||||
];
|
||||
AmbientCapabilities = [""] ++ lib.optional bindsPrivilegedPort "CAP_NET_BIND_SERVICE";
|
||||
CapabilityBoundingSet = [""] ++ lib.optional bindsPrivilegedPort "CAP_NET_BIND_SERVICE";
|
||||
# ProtectClock= adds DeviceAllow=char-rtc r
|
||||
|
|
|
@ -1,81 +1,73 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
ecfg = config.services.earlyoom;
|
||||
cfg = config.services.earlyoom;
|
||||
|
||||
inherit (lib)
|
||||
mkDefault mkEnableOption mkIf mkOption types
|
||||
mkRemovedOptionModule
|
||||
concatStringsSep optional;
|
||||
|
||||
in
|
||||
{
|
||||
options = {
|
||||
services.earlyoom = {
|
||||
options.services.earlyoom = {
|
||||
enable = mkEnableOption "Early out of memory killing";
|
||||
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Enable early out of memory killing.
|
||||
'';
|
||||
};
|
||||
freeMemThreshold = mkOption {
|
||||
type = types.ints.between 1 100;
|
||||
default = 10;
|
||||
description = ''
|
||||
Minimum of availabe memory (in percent).
|
||||
If the free memory falls below this threshold and the analog is true for
|
||||
<option>services.earlyoom.freeSwapThreshold</option>
|
||||
the killing begins.
|
||||
'';
|
||||
};
|
||||
|
||||
freeMemThreshold = mkOption {
|
||||
type = types.int;
|
||||
default = 10;
|
||||
description = ''
|
||||
Minimum of availabe memory (in percent).
|
||||
If the free memory falls below this threshold and the analog is true for
|
||||
<option>services.earlyoom.freeSwapThreshold</option>
|
||||
the killing begins.
|
||||
'';
|
||||
};
|
||||
freeSwapThreshold = mkOption {
|
||||
type = types.ints.between 1 100;
|
||||
default = 10;
|
||||
description = ''
|
||||
Minimum of availabe swap space (in percent).
|
||||
If the available swap space falls below this threshold and the analog
|
||||
is true for <option>services.earlyoom.freeMemThreshold</option>
|
||||
the killing begins.
|
||||
'';
|
||||
};
|
||||
|
||||
freeSwapThreshold = mkOption {
|
||||
type = types.int;
|
||||
default = 10;
|
||||
description = ''
|
||||
Minimum of availabe swap space (in percent).
|
||||
If the available swap space falls below this threshold and the analog
|
||||
is true for <option>services.earlyoom.freeMemThreshold</option>
|
||||
the killing begins.
|
||||
'';
|
||||
};
|
||||
# TODO: remove or warn after 1.7 (https://github.com/rfjakob/earlyoom/commit/7ebc4554)
|
||||
ignoreOOMScoreAdjust = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Ignore oom_score_adjust values of processes.
|
||||
'';
|
||||
};
|
||||
|
||||
# TODO: remove or warn after 1.7 (https://github.com/rfjakob/earlyoom/commit/7ebc4554)
|
||||
ignoreOOMScoreAdjust = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Ignore oom_score_adjust values of processes.
|
||||
'';
|
||||
};
|
||||
enableDebugInfo = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Enable debugging messages.
|
||||
'';
|
||||
};
|
||||
|
||||
enableDebugInfo = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Enable debugging messages.
|
||||
'';
|
||||
};
|
||||
enableNotifications = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Send notifications about killed processes via the system d-bus.
|
||||
|
||||
notificationsCommand = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = ''
|
||||
This option is deprecated and ignored by earlyoom since 1.6.
|
||||
Use <option>services.earlyoom.enableNotifications</option> instead.
|
||||
'';
|
||||
};
|
||||
WARNING: enabling this option (while convenient) should *not* be done on a
|
||||
machine where you do not trust the other users as it allows any other
|
||||
local user to DoS your session by spamming notifications.
|
||||
|
||||
enableNotifications = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Send notifications about killed processes via the system d-bus.
|
||||
To actually see the notifications in your GUI session, you need to have
|
||||
<literal>systembus-notify</literal> running as your user.
|
||||
To actually see the notifications in your GUI session, you need to have
|
||||
<literal>systembus-notify</literal> running as your user which this
|
||||
option handles.
|
||||
|
||||
See <link xlink:href="https://github.com/rfjakob/earlyoom#notifications">README</link> for details.
|
||||
'';
|
||||
};
|
||||
See <link xlink:href="https://github.com/rfjakob/earlyoom#notifications">README</link> for details.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -83,37 +75,30 @@ in
|
|||
(mkRemovedOptionModule [ "services" "earlyoom" "useKernelOOMKiller" ] ''
|
||||
This option is deprecated and ignored by earlyoom since 1.2.
|
||||
'')
|
||||
(mkRemovedOptionModule [ "services" "earlyoom" "notificationsCommand" ] ''
|
||||
This option is deprecated and ignored by earlyoom since 1.6.
|
||||
'')
|
||||
];
|
||||
|
||||
config = mkIf ecfg.enable {
|
||||
assertions = [
|
||||
{ assertion = ecfg.freeMemThreshold > 0 && ecfg.freeMemThreshold <= 100;
|
||||
message = "Needs to be a positive percentage"; }
|
||||
{ assertion = ecfg.freeSwapThreshold > 0 && ecfg.freeSwapThreshold <= 100;
|
||||
message = "Needs to be a positive percentage"; }
|
||||
];
|
||||
|
||||
# TODO: reimplement this option as -N after 1.7 (https://github.com/rfjakob/earlyoom/commit/afe03606)
|
||||
warnings = optional (ecfg.notificationsCommand != null)
|
||||
"`services.earlyoom.notificationsCommand` is deprecated and ignored by earlyoom since 1.6.";
|
||||
config = mkIf cfg.enable {
|
||||
services.systembus-notify.enable = mkDefault cfg.enableNotifications;
|
||||
|
||||
systemd.services.earlyoom = {
|
||||
description = "Early OOM Daemon for Linux";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
path = optional ecfg.enableNotifications pkgs.dbus;
|
||||
path = optional cfg.enableNotifications pkgs.dbus;
|
||||
serviceConfig = {
|
||||
StandardOutput = "null";
|
||||
StandardError = "journal";
|
||||
ExecStart = concatStringsSep " " ([
|
||||
"${pkgs.earlyoom}/bin/earlyoom"
|
||||
"-m ${toString ecfg.freeMemThreshold}"
|
||||
"-s ${toString ecfg.freeSwapThreshold}"
|
||||
] ++ optional ecfg.ignoreOOMScoreAdjust "-i"
|
||||
++ optional ecfg.enableDebugInfo "-d"
|
||||
++ optional ecfg.enableNotifications "-n");
|
||||
"-m ${toString cfg.freeMemThreshold}"
|
||||
"-s ${toString cfg.freeSwapThreshold}"
|
||||
]
|
||||
++ optional cfg.ignoreOOMScoreAdjust "-i"
|
||||
++ optional cfg.enableDebugInfo "-d"
|
||||
++ optional cfg.enableNotifications "-n"
|
||||
);
|
||||
};
|
||||
};
|
||||
|
||||
environment.systemPackages = optional ecfg.enableNotifications pkgs.systembus-notify;
|
||||
};
|
||||
}
|
||||
|
|
27
nixos/modules/services/system/systembus-notify.nix
Normal file
27
nixos/modules/services/system/systembus-notify.nix
Normal file
|
@ -0,0 +1,27 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.systembus-notify;
|
||||
|
||||
inherit (lib) mkEnableOption mkIf;
|
||||
|
||||
in
|
||||
{
|
||||
options.services.systembus-notify = {
|
||||
enable = mkEnableOption ''
|
||||
System bus notification support
|
||||
|
||||
WARNING: enabling this option (while convenient) should *not* be done on a
|
||||
machine where you do not trust the other users as it allows any other
|
||||
local user to DoS your session by spamming notifications.
|
||||
'';
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
systemd = {
|
||||
packages = with pkgs; [ systembus-notify ];
|
||||
|
||||
user.services.systembus-notify.wantedBy = [ "graphical-session.target" ];
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,30 +1,40 @@
|
|||
{ config, lib, options, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.epgstation;
|
||||
opt = options.services.epgstation;
|
||||
|
||||
description = "EPGStation: DVR system for Mirakurun-managed TV tuners";
|
||||
|
||||
username = config.users.users.epgstation.name;
|
||||
groupname = config.users.users.epgstation.group;
|
||||
mirakurun = {
|
||||
sock = config.services.mirakurun.unixSocket;
|
||||
option = options.services.mirakurun.unixSocket;
|
||||
};
|
||||
|
||||
settingsFmt = pkgs.formats.json {};
|
||||
settingsTemplate = settingsFmt.generate "config.json" cfg.settings;
|
||||
yaml = pkgs.formats.yaml { };
|
||||
settingsTemplate = yaml.generate "config.yml" cfg.settings;
|
||||
preStartScript = pkgs.writeScript "epgstation-prestart" ''
|
||||
#!${pkgs.runtimeShell}
|
||||
|
||||
PASSWORD="$(head -n1 "${cfg.basicAuth.passwordFile}")"
|
||||
DB_PASSWORD="$(head -n1 "${cfg.database.passwordFile}")"
|
||||
DB_PASSWORD_FILE=${lib.escapeShellArg cfg.database.passwordFile}
|
||||
|
||||
if [[ ! -f "$DB_PASSWORD_FILE" ]]; then
|
||||
printf "[FATAL] File containing the DB password was not found in '%s'. Double check the NixOS option '%s'." \
|
||||
"$DB_PASSWORD_FILE" ${lib.escapeShellArg opt.database.passwordFile} >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DB_PASSWORD="$(head -n1 ${lib.escapeShellArg cfg.database.passwordFile})"
|
||||
|
||||
# setup configuration
|
||||
touch /etc/epgstation/config.json
|
||||
chmod 640 /etc/epgstation/config.json
|
||||
touch /etc/epgstation/config.yml
|
||||
chmod 640 /etc/epgstation/config.yml
|
||||
sed \
|
||||
-e "s,@password@,$PASSWORD,g" \
|
||||
-e "s,@dbPassword@,$DB_PASSWORD,g" \
|
||||
${settingsTemplate} > /etc/epgstation/config.json
|
||||
chown "${username}:${groupname}" /etc/epgstation/config.json
|
||||
${settingsTemplate} > /etc/epgstation/config.yml
|
||||
chown "${username}:${groupname}" /etc/epgstation/config.yml
|
||||
|
||||
# NOTE: Use password authentication, since mysqljs does not yet support auth_socket
|
||||
if [ ! -e /var/lib/epgstation/db-created ]; then
|
||||
|
@ -35,7 +45,7 @@ let
|
|||
'';
|
||||
|
||||
streamingConfig = lib.importJSON ./streaming.json;
|
||||
logConfig = {
|
||||
logConfig = yaml.generate "logConfig.yml" {
|
||||
appenders.stdout.type = "stdout";
|
||||
categories = {
|
||||
default = { appenders = [ "stdout" ]; level = "info"; };
|
||||
|
@ -45,53 +55,51 @@ let
|
|||
};
|
||||
};
|
||||
|
||||
defaultPassword = "INSECURE_GO_CHECK_CONFIGURATION_NIX\n";
|
||||
# Deprecate top level options that are redundant.
|
||||
deprecateTopLevelOption = config:
|
||||
lib.mkRenamedOptionModule
|
||||
([ "services" "epgstation" ] ++ config)
|
||||
([ "services" "epgstation" "settings" ] ++ config);
|
||||
|
||||
removeOption = config: instruction:
|
||||
lib.mkRemovedOptionModule
|
||||
([ "services" "epgstation" ] ++ config)
|
||||
instruction;
|
||||
in
|
||||
{
|
||||
options.services.epgstation = {
|
||||
enable = mkEnableOption "EPGStation: DTV Software in Japan";
|
||||
meta.maintainers = with lib.maintainers; [ midchildan ];
|
||||
|
||||
usePreconfiguredStreaming = mkOption {
|
||||
type = types.bool;
|
||||
imports = [
|
||||
(deprecateTopLevelOption [ "port" ])
|
||||
(deprecateTopLevelOption [ "socketioPort" ])
|
||||
(deprecateTopLevelOption [ "clientSocketioPort" ])
|
||||
(removeOption [ "basicAuth" ]
|
||||
"Use a TLS-terminated reverse proxy with authentication instead.")
|
||||
];
|
||||
|
||||
options.services.epgstation = {
|
||||
enable = lib.mkEnableOption description;
|
||||
|
||||
package = lib.mkOption {
|
||||
default = pkgs.epgstation;
|
||||
type = lib.types.package;
|
||||
defaultText = lib.literalExpression "pkgs.epgstation";
|
||||
description = "epgstation package to use";
|
||||
};
|
||||
|
||||
usePreconfiguredStreaming = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Use preconfigured default streaming options.
|
||||
|
||||
Upstream defaults:
|
||||
<link xlink:href="https://github.com/l3tnun/EPGStation/blob/master/config/config.sample.json"/>
|
||||
<link xlink:href="https://github.com/l3tnun/EPGStation/blob/master/config/config.yml.template"/>
|
||||
'';
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 20772;
|
||||
description = ''
|
||||
HTTP port for EPGStation to listen on.
|
||||
'';
|
||||
};
|
||||
|
||||
socketioPort = mkOption {
|
||||
type = types.port;
|
||||
default = cfg.port + 1;
|
||||
defaultText = literalExpression "config.${opt.port} + 1";
|
||||
description = ''
|
||||
Socket.io port for EPGStation to listen on.
|
||||
'';
|
||||
};
|
||||
|
||||
clientSocketioPort = mkOption {
|
||||
type = types.port;
|
||||
default = cfg.socketioPort;
|
||||
defaultText = literalExpression "config.${opt.socketioPort}";
|
||||
description = ''
|
||||
Socket.io port that the web client is going to connect to. This may be
|
||||
different from <option>socketioPort</option> if EPGStation is hidden
|
||||
behind a reverse proxy.
|
||||
'';
|
||||
};
|
||||
|
||||
openFirewall = mkOption {
|
||||
type = types.bool;
|
||||
openFirewall = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Open ports in the firewall for the EPGStation web interface.
|
||||
|
@ -106,50 +114,17 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
basicAuth = {
|
||||
user = mkOption {
|
||||
type = with types; nullOr str;
|
||||
default = null;
|
||||
example = "epgstation";
|
||||
description = ''
|
||||
Basic auth username for EPGStation. If <literal>null</literal>, basic
|
||||
auth will be disabled.
|
||||
|
||||
<warning>
|
||||
<para>
|
||||
Basic authentication has known weaknesses, the most critical being
|
||||
that it sends passwords over the network in clear text. Use this
|
||||
feature to control access to EPGStation within your family and
|
||||
friends, but don't rely on it for security.
|
||||
</para>
|
||||
</warning>
|
||||
'';
|
||||
};
|
||||
|
||||
passwordFile = mkOption {
|
||||
type = types.path;
|
||||
default = pkgs.writeText "epgstation-password" defaultPassword;
|
||||
defaultText = literalDocBook ''a file containing <literal>${defaultPassword}</literal>'';
|
||||
example = "/run/keys/epgstation-password";
|
||||
description = ''
|
||||
A file containing the password for <option>basicAuth.user</option>.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
database = {
|
||||
name = mkOption {
|
||||
type = types.str;
|
||||
database = {
|
||||
name = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "epgstation";
|
||||
description = ''
|
||||
Name of the MySQL database that holds EPGStation's data.
|
||||
'';
|
||||
};
|
||||
|
||||
passwordFile = mkOption {
|
||||
type = types.path;
|
||||
default = pkgs.writeText "epgstation-db-password" defaultPassword;
|
||||
defaultText = literalDocBook ''a file containing <literal>${defaultPassword}</literal>'';
|
||||
passwordFile = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
example = "/run/keys/epgstation-db-password";
|
||||
description = ''
|
||||
A file containing the password for the database named
|
||||
|
@ -158,69 +133,106 @@ in
|
|||
};
|
||||
};
|
||||
|
||||
settings = mkOption {
|
||||
# The defaults for some options come from the upstream template
|
||||
# configuration, which is the one that users would get if they follow the
|
||||
# upstream instructions. This is, in some cases, different from the
|
||||
# application defaults. Some options like encodeProcessNum and
|
||||
# concurrentEncodeNum doesn't have an optimal default value that works for
|
||||
# all hardware setups and/or performance requirements. For those kind of
|
||||
# options, the application default wouldn't always result in the expected
|
||||
# out-of-the-box behavior because it's the responsibility of the user to
|
||||
# configure them according to their needs. In these cases, the value in the
|
||||
# upstream template configuration should serve as a "good enough" default.
|
||||
settings = lib.mkOption {
|
||||
description = ''
|
||||
Options to add to config.json.
|
||||
Options to add to config.yml.
|
||||
|
||||
Documentation:
|
||||
<link xlink:href="https://github.com/l3tnun/EPGStation/blob/master/doc/conf-manual.md"/>
|
||||
'';
|
||||
|
||||
default = {};
|
||||
default = { };
|
||||
example = {
|
||||
recPriority = 20;
|
||||
conflictPriority = 10;
|
||||
};
|
||||
|
||||
type = types.submodule {
|
||||
freeformType = settingsFmt.type;
|
||||
type = lib.types.submodule {
|
||||
freeformType = yaml.type;
|
||||
|
||||
options.readOnlyOnce = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "Don't reload configuration files at runtime.";
|
||||
options.port = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
default = 20772;
|
||||
description = ''
|
||||
HTTP port for EPGStation to listen on.
|
||||
'';
|
||||
};
|
||||
|
||||
options.mirakurunPath = mkOption (let
|
||||
sockPath = config.services.mirakurun.unixSocket;
|
||||
in {
|
||||
type = types.str;
|
||||
default = "http+unix://${replaceStrings ["/"] ["%2F"] sockPath}";
|
||||
defaultText = literalExpression ''
|
||||
"http+unix://''${replaceStrings ["/"] ["%2F"] config.${options.services.mirakurun.unixSocket}}"
|
||||
options.socketioPort = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
default = cfg.settings.port + 1;
|
||||
defaultText = lib.literalExpression "config.${opt.settings}.port + 1";
|
||||
description = ''
|
||||
Socket.io port for EPGStation to listen on. It is valid to share
|
||||
ports with <option>${opt.settings}.port</option>.
|
||||
'';
|
||||
};
|
||||
|
||||
options.clientSocketioPort = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
default = cfg.settings.socketioPort;
|
||||
defaultText = lib.literalExpression "config.${opt.settings}.socketioPort";
|
||||
description = ''
|
||||
Socket.io port that the web client is going to connect to. This may
|
||||
be different from <option>${opt.settings}.socketioPort</option> if
|
||||
EPGStation is hidden behind a reverse proxy.
|
||||
'';
|
||||
};
|
||||
|
||||
options.mirakurunPath = with mirakurun; lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "http+unix://${lib.replaceStrings ["/"] ["%2F"] sock}";
|
||||
defaultText = lib.literalExpression ''
|
||||
"http+unix://''${lib.replaceStrings ["/"] ["%2F"] config.${option}}"
|
||||
'';
|
||||
example = "http://localhost:40772";
|
||||
description = "URL to connect to Mirakurun.";
|
||||
});
|
||||
};
|
||||
|
||||
options.encode = mkOption {
|
||||
type = with types; listOf attrs;
|
||||
options.encodeProcessNum = lib.mkOption {
|
||||
type = lib.types.ints.positive;
|
||||
default = 4;
|
||||
description = ''
|
||||
The maximum number of processes that EPGStation would allow to run
|
||||
at the same time for encoding or streaming videos.
|
||||
'';
|
||||
};
|
||||
|
||||
options.concurrentEncodeNum = lib.mkOption {
|
||||
type = lib.types.ints.positive;
|
||||
default = 1;
|
||||
description = ''
|
||||
The maximum number of encoding jobs that EPGStation would run at the
|
||||
same time.
|
||||
'';
|
||||
};
|
||||
|
||||
options.encode = lib.mkOption {
|
||||
type = with lib.types; listOf attrs;
|
||||
description = "Encoding presets for recorded videos.";
|
||||
default = [
|
||||
{
|
||||
name = "H264";
|
||||
cmd = "${pkgs.epgstation}/libexec/enc.sh main";
|
||||
name = "H.264";
|
||||
cmd = "%NODE% ${cfg.package}/libexec/enc.js";
|
||||
suffix = ".mp4";
|
||||
default = true;
|
||||
}
|
||||
{
|
||||
name = "H264-sub";
|
||||
cmd = "${pkgs.epgstation}/libexec/enc.sh sub";
|
||||
suffix = "-sub.mp4";
|
||||
}
|
||||
];
|
||||
defaultText = literalExpression ''
|
||||
defaultText = lib.literalExpression ''
|
||||
[
|
||||
{
|
||||
name = "H264";
|
||||
cmd = "''${pkgs.epgstation}/libexec/enc.sh main";
|
||||
name = "H.264";
|
||||
cmd = "%NODE% config.${opt.package}/libexec/enc.js";
|
||||
suffix = ".mp4";
|
||||
default = true;
|
||||
}
|
||||
{
|
||||
name = "H264-sub";
|
||||
cmd = "''${pkgs.epgstation}/libexec/enc.sh sub";
|
||||
suffix = "-sub.mp4";
|
||||
}
|
||||
]
|
||||
'';
|
||||
|
@ -229,14 +241,25 @@ in
|
|||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
config = lib.mkIf cfg.enable {
|
||||
assertions = [
|
||||
{
|
||||
assertion = !(lib.hasAttr "readOnlyOnce" cfg.settings);
|
||||
message = ''
|
||||
The option config.${opt.settings}.readOnlyOnce can no longer be used
|
||||
since it's been removed. No replacements are available.
|
||||
'';
|
||||
}
|
||||
];
|
||||
|
||||
environment.etc = {
|
||||
"epgstation/operatorLogConfig.json".text = builtins.toJSON logConfig;
|
||||
"epgstation/serviceLogConfig.json".text = builtins.toJSON logConfig;
|
||||
"epgstation/epgUpdaterLogConfig.yml".source = logConfig;
|
||||
"epgstation/operatorLogConfig.yml".source = logConfig;
|
||||
"epgstation/serviceLogConfig.yml".source = logConfig;
|
||||
};
|
||||
|
||||
networking.firewall = mkIf cfg.openFirewall {
|
||||
allowedTCPPorts = with cfg; [ port socketioPort ];
|
||||
networking.firewall = lib.mkIf cfg.openFirewall {
|
||||
allowedTCPPorts = with cfg.settings; [ port socketioPort ];
|
||||
};
|
||||
|
||||
users.users.epgstation = {
|
||||
|
@ -245,13 +268,13 @@ in
|
|||
isSystemUser = true;
|
||||
};
|
||||
|
||||
users.groups.epgstation = {};
|
||||
users.groups.epgstation = { };
|
||||
|
||||
services.mirakurun.enable = mkDefault true;
|
||||
services.mirakurun.enable = lib.mkDefault true;
|
||||
|
||||
services.mysql = {
|
||||
enable = mkDefault true;
|
||||
package = mkDefault pkgs.mariadb;
|
||||
enable = lib.mkDefault true;
|
||||
package = lib.mkDefault pkgs.mariadb;
|
||||
ensureDatabases = [ cfg.database.name ];
|
||||
# FIXME: enable once mysqljs supports auth_socket
|
||||
# ensureUsers = [ {
|
||||
|
@ -260,39 +283,28 @@ in
|
|||
# } ];
|
||||
};
|
||||
|
||||
services.epgstation.settings = let
|
||||
defaultSettings = {
|
||||
serverPort = cfg.port;
|
||||
socketioPort = cfg.socketioPort;
|
||||
clientSocketioPort = cfg.clientSocketioPort;
|
||||
services.epgstation.settings =
|
||||
let
|
||||
defaultSettings = {
|
||||
dbtype = lib.mkDefault "mysql";
|
||||
mysql = {
|
||||
socketPath = lib.mkDefault "/run/mysqld/mysqld.sock";
|
||||
user = username;
|
||||
password = lib.mkDefault "@dbPassword@";
|
||||
database = cfg.database.name;
|
||||
};
|
||||
|
||||
dbType = mkDefault "mysql";
|
||||
mysql = {
|
||||
user = username;
|
||||
database = cfg.database.name;
|
||||
socketPath = mkDefault "/run/mysqld/mysqld.sock";
|
||||
password = mkDefault "@dbPassword@";
|
||||
connectTimeout = mkDefault 1000;
|
||||
connectionLimit = mkDefault 10;
|
||||
ffmpeg = lib.mkDefault "${pkgs.ffmpeg-full}/bin/ffmpeg";
|
||||
ffprobe = lib.mkDefault "${pkgs.ffmpeg-full}/bin/ffprobe";
|
||||
|
||||
# for disambiguation with TypeScript files
|
||||
recordedFileExtension = lib.mkDefault ".m2ts";
|
||||
};
|
||||
|
||||
basicAuth = mkIf (cfg.basicAuth.user != null) {
|
||||
user = mkDefault cfg.basicAuth.user;
|
||||
password = mkDefault "@password@";
|
||||
};
|
||||
|
||||
ffmpeg = mkDefault "${pkgs.ffmpeg-full}/bin/ffmpeg";
|
||||
ffprobe = mkDefault "${pkgs.ffmpeg-full}/bin/ffprobe";
|
||||
|
||||
fileExtension = mkDefault ".m2ts";
|
||||
maxEncode = mkDefault 2;
|
||||
maxStreaming = mkDefault 2;
|
||||
};
|
||||
in
|
||||
mkMerge [
|
||||
defaultSettings
|
||||
(mkIf cfg.usePreconfiguredStreaming streamingConfig)
|
||||
];
|
||||
in
|
||||
lib.mkMerge [
|
||||
defaultSettings
|
||||
(lib.mkIf cfg.usePreconfiguredStreaming streamingConfig)
|
||||
];
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d '/var/lib/epgstation/streamfiles' - ${username} ${groupname} - -"
|
||||
|
@ -301,15 +313,15 @@ in
|
|||
];
|
||||
|
||||
systemd.services.epgstation = {
|
||||
description = pkgs.epgstation.meta.description;
|
||||
inherit description;
|
||||
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [
|
||||
"network.target"
|
||||
] ++ optional config.services.mirakurun.enable "mirakurun.service"
|
||||
++ optional config.services.mysql.enable "mysql.service";
|
||||
after = [ "network.target" ]
|
||||
++ lib.optional config.services.mirakurun.enable "mirakurun.service"
|
||||
++ lib.optional config.services.mysql.enable "mysql.service";
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.epgstation}/bin/epgstation start";
|
||||
ExecStart = "${cfg.package}/bin/epgstation start";
|
||||
ExecStartPre = "+${preStartScript}";
|
||||
User = username;
|
||||
Group = groupname;
|
||||
|
|
|
@ -1,119 +1,140 @@
|
|||
{
|
||||
"liveHLS": [
|
||||
{
|
||||
"name": "720p",
|
||||
"cmd": "%FFMPEG% -re -dual_mono_mode main -i pipe:0 -sn -threads 0 -map 0 -ignore_unknown -max_muxing_queue_size 1024 -f hls -hls_time 3 -hls_list_size 17 -hls_allow_cache 1 -hls_segment_filename %streamFileDir%/stream%streamNum%-%09d.ts -c:a aac -ar 48000 -b:a 192k -ac 2 -c:v libx264 -vf yadif,scale=-2:720 -b:v 3000k -preset veryfast -flags +loop-global_header %OUTPUT%"
|
||||
"urlscheme": {
|
||||
"m2ts": {
|
||||
"ios": "vlc-x-callback://x-callback-url/stream?url=PROTOCOL://ADDRESS",
|
||||
"android": "intent://ADDRESS#Intent;package=org.videolan.vlc;type=video;scheme=PROTOCOL;end"
|
||||
},
|
||||
{
|
||||
"name": "480p",
|
||||
"cmd": "%FFMPEG% -re -dual_mono_mode main -i pipe:0 -sn -threads 0 -map 0 -ignore_unknown -max_muxing_queue_size 1024 -f hls -hls_time 3 -hls_list_size 17 -hls_allow_cache 1 -hls_segment_filename %streamFileDir%/stream%streamNum%-%09d.ts -c:a aac -ar 48000 -b:a 128k -ac 2 -c:v libx264 -vf yadif,scale=-2:480 -b:v 1500k -preset veryfast -flags +loop-global_header %OUTPUT%"
|
||||
"video": {
|
||||
"ios": "infuse://x-callback-url/play?url=PROTOCOL://ADDRESS",
|
||||
"android": "intent://ADDRESS#Intent;package=com.mxtech.videoplayer.ad;type=video;scheme=PROTOCOL;end"
|
||||
},
|
||||
{
|
||||
"name": "180p",
|
||||
"cmd": "%FFMPEG% -re -dual_mono_mode main -i pipe:0 -sn -threads 0 -map 0 -ignore_unknown -max_muxing_queue_size 1024 -f hls -hls_time 3 -hls_list_size 17 -hls_allow_cache 1 -hls_segment_filename %streamFileDir%/stream%streamNum%-%09d.ts -c:a aac -ar 48000 -b:a 48k -ac 2 -c:v libx264 -vf yadif,scale=-2:180 -b:v 100k -preset veryfast -maxrate 110k -bufsize 1000k -flags +loop-global_header %OUTPUT%"
|
||||
"download": {
|
||||
"ios": "vlc-x-callback://x-callback-url/download?url=PROTOCOL://ADDRESS&filename=FILENAME"
|
||||
}
|
||||
],
|
||||
"liveMP4": [
|
||||
{
|
||||
"name": "720p",
|
||||
"cmd": "%FFMPEG% -re -dual_mono_mode main -i pipe:0 -sn -threads 0 -c:a aac -ar 48000 -b:a 192k -ac 2 -c:v libx264 -vf yadif,scale=-2:720 -b:v 3000k -profile:v baseline -preset veryfast -tune fastdecode,zerolatency -movflags frag_keyframe+empty_moov+faststart+default_base_moof -y -f mp4 pipe:1"
|
||||
},
|
||||
{
|
||||
"name": "480p",
|
||||
"cmd": "%FFMPEG% -re -dual_mono_mode main -i pipe:0 -sn -threads 0 -c:a aac -ar 48000 -b:a 128k -ac 2 -c:v libx264 -vf yadif,scale=-2:480 -b:v 1500k -profile:v baseline -preset veryfast -tune fastdecode,zerolatency -movflags frag_keyframe+empty_moov+faststart+default_base_moof -y -f mp4 pipe:1"
|
||||
}
|
||||
],
|
||||
"liveWebM": [
|
||||
{
|
||||
"name": "720p",
|
||||
"cmd": "%FFMPEG% -re -dual_mono_mode main -i pipe:0 -sn -threads 3 -c:a libvorbis -ar 48000 -b:a 192k -ac 2 -c:v libvpx-vp9 -vf yadif,scale=-2:720 -b:v 3000k -deadline realtime -speed 4 -cpu-used -8 -y -f webm pipe:1"
|
||||
},
|
||||
{
|
||||
"name": "480p",
|
||||
"cmd": "%FFMPEG% -re -dual_mono_mode main -i pipe:0 -sn -threads 2 -c:a libvorbis -ar 48000 -b:a 128k -ac 2 -c:v libvpx-vp9 -vf yadif,scale=-2:480 -b:v 1500k -deadline realtime -speed 4 -cpu-used -8 -y -f webm pipe:1"
|
||||
}
|
||||
],
|
||||
"mpegTsStreaming": [
|
||||
{
|
||||
"name": "720p",
|
||||
"cmd": "%FFMPEG% -re -dual_mono_mode main -i pipe:0 -sn -threads 0 -c:a aac -ar 48000 -b:a 192k -ac 2 -c:v libx264 -vf yadif,scale=-2:720 -b:v 3000k -preset veryfast -y -f mpegts pipe:1"
|
||||
},
|
||||
{
|
||||
"name": "480p",
|
||||
"cmd": "%FFMPEG% -re -dual_mono_mode main -i pipe:0 -sn -threads 0 -c:a aac -ar 48000 -b:a 128k -ac 2 -c:v libx264 -vf yadif,scale=-2:480 -b:v 1500k -preset veryfast -y -f mpegts pipe:1"
|
||||
},
|
||||
{
|
||||
"name": "Original"
|
||||
}
|
||||
],
|
||||
"mpegTsViewer": {
|
||||
"ios": "vlc-x-callback://x-callback-url/stream?url=http://ADDRESS",
|
||||
"android": "intent://ADDRESS#Intent;package=com.mxtech.videoplayer.ad;type=video;scheme=http;end"
|
||||
},
|
||||
"recordedDownloader": {
|
||||
"ios": "vlc-x-callback://x-callback-url/download?url=http://ADDRESS&filename=FILENAME",
|
||||
"android": "intent://ADDRESS#Intent;package=com.dv.adm;type=video;scheme=http;end"
|
||||
},
|
||||
"recordedStreaming": {
|
||||
"webm": [
|
||||
{
|
||||
"name": "720p",
|
||||
"cmd": "%FFMPEG% -dual_mono_mode main %RE% -i pipe:0 -sn -threads 3 -c:a libvorbis -ar 48000 -ac 2 -c:v libvpx-vp9 -vf yadif,scale=-2:720 %VB% %VBUFFER% %AB% %ABUFFER% -deadline realtime -speed 4 -cpu-used -8 -y -f webm pipe:1",
|
||||
"vb": "3000k",
|
||||
"ab": "192k"
|
||||
},
|
||||
{
|
||||
"name": "360p",
|
||||
"cmd": "%FFMPEG% -dual_mono_mode main %RE% -i pipe:0 -sn -threads 2 -c:a libvorbis -ar 48000 -ac 2 -c:v libvpx-vp9 -vf yadif,scale=-2:360 %VB% %VBUFFER% %AB% %ABUFFER% -deadline realtime -speed 4 -cpu-used -8 -y -f webm pipe:1",
|
||||
"vb": "1500k",
|
||||
"ab": "128k"
|
||||
"stream": {
|
||||
"live": {
|
||||
"ts": {
|
||||
"m2ts": [
|
||||
{
|
||||
"name": "720p",
|
||||
"cmd": "%FFMPEG% -re -dual_mono_mode main -i pipe:0 -sn -threads 0 -c:a aac -ar 48000 -b:a 192k -ac 2 -c:v libx264 -vf yadif,scale=-2:720 -b:v 3000k -preset veryfast -y -f mpegts pipe:1"
|
||||
},
|
||||
{
|
||||
"name": "480p",
|
||||
"cmd": "%FFMPEG% -re -dual_mono_mode main -i pipe:0 -sn -threads 0 -c:a aac -ar 48000 -b:a 128k -ac 2 -c:v libx264 -vf yadif,scale=-2:480 -b:v 1500k -preset veryfast -y -f mpegts pipe:1"
|
||||
},
|
||||
{
|
||||
"name": "無変換"
|
||||
}
|
||||
],
|
||||
"m2tsll": [
|
||||
{
|
||||
"name": "720p",
|
||||
"cmd": "%FFMPEG% -dual_mono_mode main -f mpegts -analyzeduration 500000 -i pipe:0 -map 0 -c:s copy -c:d copy -ignore_unknown -fflags nobuffer -flags low_delay -max_delay 250000 -max_interleave_delta 1 -threads 0 -c:a aac -ar 48000 -b:a 192k -ac 2 -c:v libx264 -flags +cgop -vf yadif,scale=-2:720 -b:v 3000k -preset veryfast -y -f mpegts pipe:1"
|
||||
},
|
||||
{
|
||||
"name": "480p",
|
||||
"cmd": "%FFMPEG% -dual_mono_mode main -f mpegts -analyzeduration 500000 -i pipe:0 -map 0 -c:s copy -c:d copy -ignore_unknown -fflags nobuffer -flags low_delay -max_delay 250000 -max_interleave_delta 1 -threads 0 -c:a aac -ar 48000 -b:a 128k -ac 2 -c:v libx264 -flags +cgop -vf yadif,scale=-2:480 -b:v 1500k -preset veryfast -y -f mpegts pipe:1"
|
||||
}
|
||||
],
|
||||
"webm": [
|
||||
{
|
||||
"name": "720p",
|
||||
"cmd": "%FFMPEG% -re -dual_mono_mode main -i pipe:0 -sn -threads 3 -c:a libvorbis -ar 48000 -b:a 192k -ac 2 -c:v libvpx-vp9 -vf yadif,scale=-2:720 -b:v 3000k -deadline realtime -speed 4 -cpu-used -8 -y -f webm pipe:1"
|
||||
},
|
||||
{
|
||||
"name": "480p",
|
||||
"cmd": "%FFMPEG% -re -dual_mono_mode main -i pipe:0 -sn -threads 2 -c:a libvorbis -ar 48000 -b:a 128k -ac 2 -c:v libvpx-vp9 -vf yadif,scale=-2:480 -b:v 1500k -deadline realtime -speed 4 -cpu-used -8 -y -f webm pipe:1"
|
||||
}
|
||||
],
|
||||
"mp4": [
|
||||
{
|
||||
"name": "720p",
|
||||
"cmd": "%FFMPEG% -re -dual_mono_mode main -i pipe:0 -sn -threads 0 -c:a aac -ar 48000 -b:a 192k -ac 2 -c:v libx264 -vf yadif,scale=-2:720 -b:v 3000k -profile:v baseline -preset veryfast -tune fastdecode,zerolatency -movflags frag_keyframe+empty_moov+faststart+default_base_moof -y -f mp4 pipe:1"
|
||||
},
|
||||
{
|
||||
"name": "480p",
|
||||
"cmd": "%FFMPEG% -re -dual_mono_mode main -i pipe:0 -sn -threads 0 -c:a aac -ar 48000 -b:a 128k -ac 2 -c:v libx264 -vf yadif,scale=-2:480 -b:v 1500k -profile:v baseline -preset veryfast -tune fastdecode,zerolatency -movflags frag_keyframe+empty_moov+faststart+default_base_moof -y -f mp4 pipe:1"
|
||||
}
|
||||
],
|
||||
"hls": [
|
||||
{
|
||||
"name": "720p",
|
||||
"cmd": "%FFMPEG% -re -dual_mono_mode main -i pipe:0 -sn -map 0 -threads 0 -ignore_unknown -max_muxing_queue_size 1024 -f hls -hls_time 3 -hls_list_size 17 -hls_allow_cache 1 -hls_segment_filename %streamFileDir%/stream%streamNum%-%09d.ts -hls_flags delete_segments -c:a aac -ar 48000 -b:a 192k -ac 2 -c:v libx264 -vf yadif,scale=-2:720 -b:v 3000k -preset veryfast -flags +loop-global_header %OUTPUT%"
|
||||
},
|
||||
{
|
||||
"name": "480p",
|
||||
"cmd": "%FFMPEG% -re -dual_mono_mode main -i pipe:0 -sn -map 0 -threads 0 -ignore_unknown -max_muxing_queue_size 1024 -f hls -hls_time 3 -hls_list_size 17 -hls_allow_cache 1 -hls_segment_filename %streamFileDir%/stream%streamNum%-%09d.ts -hls_flags delete_segments -c:a aac -ar 48000 -b:a 128k -ac 2 -c:v libx264 -vf yadif,scale=-2:480 -b:v 1500k -preset veryfast -flags +loop-global_header %OUTPUT%"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"mp4": [
|
||||
{
|
||||
"name": "720p",
|
||||
"cmd": "%FFMPEG% -dual_mono_mode main %RE% -i pipe:0 -sn -threads 0 -c:a aac -ar 48000 -ac 2 -c:v libx264 -vf yadif,scale=-2:720 %VB% %VBUFFER% %AB% %ABUFFER% -profile:v baseline -preset veryfast -tune fastdecode,zerolatency -movflags frag_keyframe+empty_moov+faststart+default_base_moof -y -f mp4 pipe:1",
|
||||
"vb": "3000k",
|
||||
"ab": "192k"
|
||||
},
|
||||
{
|
||||
"name": "360p",
|
||||
"cmd": "%FFMPEG% -dual_mono_mode main %RE% -i pipe:0 -sn -threads 0 -c:a aac -ar 48000 -ac 2 -c:v libx264 -vf yadif,scale=-2:360 %VB% %VBUFFER% %AB% %ABUFFER% -profile:v baseline -preset veryfast -tune fastdecode,zerolatency -movflags frag_keyframe+empty_moov+faststart+default_base_moof -y -f mp4 pipe:1",
|
||||
"vb": "1500k",
|
||||
"ab": "128k"
|
||||
}
|
||||
],
|
||||
"mpegTs": [
|
||||
{
|
||||
"name": "720p (H.264)",
|
||||
"cmd": "%FFMPEG% -dual_mono_mode main %RE% -i pipe:0 -sn -threads 0 -c:a aac -ar 48000 -ac 2 -c:v libx264 -vf yadif,scale=-2:720 %VB% %VBUFFER% %AB% %ABUFFER% -profile:v baseline -preset veryfast -tune fastdecode,zerolatency -y -f mpegts pipe:1",
|
||||
"vb": "3000k",
|
||||
"ab": "192k"
|
||||
},
|
||||
{
|
||||
"name": "360p (H.264)",
|
||||
"cmd": "%FFMPEG% -dual_mono_mode main %RE% -i pipe:0 -sn -threads 0 -c:a aac -ar 48000 -ac 2 -c:v libx264 -vf yadif,scale=-2:360 %VB% %VBUFFER% %AB% %ABUFFER% -profile:v baseline -preset veryfast -tune fastdecode,zerolatency -y -f mpegts pipe:1",
|
||||
"vb": "1500k",
|
||||
"ab": "128k"
|
||||
}
|
||||
]
|
||||
},
|
||||
"recordedHLS": [
|
||||
{
|
||||
"name": "720p",
|
||||
"cmd": "%FFMPEG% -dual_mono_mode main -i %INPUT% -sn -threads 0 -map 0 -ignore_unknown -max_muxing_queue_size 1024 -f hls -hls_time 3 -hls_list_size 0 -hls_allow_cache 1 -hls_segment_filename %streamFileDir%/stream%streamNum%-%09d.ts -c:a aac -ar 48000 -b:a 192k -ac 2 -c:v libx264 -vf yadif,scale=-2:720 -b:v 3000k -preset veryfast -flags +loop-global_header %OUTPUT%"
|
||||
},
|
||||
{
|
||||
"name": "480p",
|
||||
"cmd": "%FFMPEG% -dual_mono_mode main -i %INPUT% -sn -threads 0 -map 0 -ignore_unknown -max_muxing_queue_size 1024 -f hls -hls_time 3 -hls_list_size 0 -hls_allow_cache 1 -hls_segment_filename %streamFileDir%/stream%streamNum%-%09d.ts -c:a aac -ar 48000 -b:a 128k -ac 2 -c:v libx264 -vf yadif,scale=-2:480 -b:v 1500k -preset veryfast -flags +loop-global_header %OUTPUT%"
|
||||
},
|
||||
{
|
||||
"name": "480p(h265)",
|
||||
"cmd": "%FFMPEG% -dual_mono_mode main -i %INPUT% -sn -map 0 -ignore_unknown -max_muxing_queue_size 1024 -f hls -hls_time 3 -hls_list_size 0 -hls_allow_cache 1 -hls_segment_type fmp4 -hls_fmp4_init_filename stream%streamNum%-init.mp4 -hls_segment_filename stream%streamNum%-%09d.m4s -c:a aac -ar 48000 -b:a 128k -ac 2 -c:v libx265 -vf yadif,scale=-2:480 -b:v 350k -preset veryfast -tag:v hvc1 %OUTPUT%"
|
||||
"recorded": {
|
||||
"ts": {
|
||||
"webm": [
|
||||
{
|
||||
"name": "720p",
|
||||
"cmd": "%FFMPEG% -dual_mono_mode main -i pipe:0 -sn -threads 3 -c:a libvorbis -ar 48000 -b:a 192k -ac 2 -c:v libvpx-vp9 -vf yadif,scale=-2:720 -b:v 3000k -deadline realtime -speed 4 -cpu-used -8 -y -f webm pipe:1"
|
||||
},
|
||||
{
|
||||
"name": "480p",
|
||||
"cmd": "%FFMPEG% -dual_mono_mode main -i pipe:0 -sn -threads 3 -c:a libvorbis -ar 48000 -b:a 128k -ac 2 -c:v libvpx-vp9 -vf yadif,scale=-2:480 -b:v 1500k -deadline realtime -speed 4 -cpu-used -8 -y -f webm pipe:1"
|
||||
}
|
||||
],
|
||||
"mp4": [
|
||||
{
|
||||
"name": "720p",
|
||||
"cmd": "%FFMPEG% -dual_mono_mode main -i pipe:0 -sn -threads 0 -c:a aac -ar 48000 -b:a 192k -ac 2 -c:v libx264 -vf yadif,scale=-2:720 -b:v 3000k -profile:v baseline -preset veryfast -tune fastdecode,zerolatency -movflags frag_keyframe+empty_moov+faststart+default_base_moof -y -f mp4 pipe:1"
|
||||
},
|
||||
{
|
||||
"name": "480p",
|
||||
"cmd": "%FFMPEG% -dual_mono_mode main -i pipe:0 -sn -threads 0 -c:a aac -ar 48000 -b:a 128k -ac 2 -c:v libx264 -vf yadif,scale=-2:480 -b:v 1500k -profile:v baseline -preset veryfast -tune fastdecode,zerolatency -movflags frag_keyframe+empty_moov+faststart+default_base_moof -y -f mp4 pipe:1"
|
||||
}
|
||||
],
|
||||
"hls": [
|
||||
{
|
||||
"name": "720p",
|
||||
"cmd": "%FFMPEG% -dual_mono_mode main -i pipe:0 -sn -map 0 -threads 0 -ignore_unknown -max_muxing_queue_size 1024 -f hls -hls_time 3 -hls_list_size 0 -hls_allow_cache 1 -hls_segment_filename %streamFileDir%/stream%streamNum%-%09d.ts -hls_flags delete_segments -c:a aac -ar 48000 -b:a 192k -ac 2 -c:v libx264 -vf yadif,scale=-2:720 -b:v 3000k -preset veryfast -flags +loop-global_header %OUTPUT%"
|
||||
},
|
||||
{
|
||||
"name": "480p",
|
||||
"cmd": "%FFMPEG% -dual_mono_mode main -i pipe:0 -sn -map 0 -threads 0 -ignore_unknown -max_muxing_queue_size 1024 -f hls -hls_time 3 -hls_list_size 0 -hls_allow_cache 1 -hls_segment_filename %streamFileDir%/stream%streamNum%-%09d.ts -hls_flags delete_segments -c:a aac -ar 48000 -b:a 128k -ac 2 -c:v libx264 -vf yadif,scale=-2:480 -b:v 1500k -preset veryfast -flags +loop-global_header %OUTPUT%"
|
||||
}
|
||||
]
|
||||
},
|
||||
"encoded": {
|
||||
"webm": [
|
||||
{
|
||||
"name": "720p",
|
||||
"cmd": "%FFMPEG% -dual_mono_mode main -ss %SS% -i %INPUT% -sn -threads 3 -c:a libvorbis -ar 48000 -b:a 192k -ac 2 -c:v libvpx-vp9 -vf scale=-2:720 -b:v 3000k -deadline realtime -speed 4 -cpu-used -8 -y -f webm pipe:1"
|
||||
},
|
||||
{
|
||||
"name": "480p",
|
||||
"cmd": "%FFMPEG% -dual_mono_mode main -ss %SS% -i %INPUT% -sn -threads 3 -c:a libvorbis -ar 48000 -b:a 128k -ac 2 -c:v libvpx-vp9 -vf scale=-2:480 -b:v 1500k -deadline realtime -speed 4 -cpu-used -8 -y -f webm pipe:1"
|
||||
}
|
||||
],
|
||||
"mp4": [
|
||||
{
|
||||
"name": "720p",
|
||||
"cmd": "%FFMPEG% -dual_mono_mode main -ss %SS% -i %INPUT% -sn -threads 0 -c:a aac -ar 48000 -b:a 192k -ac 2 -c:v libx264 -vf scale=-2:720 -b:v 3000k -profile:v baseline -preset veryfast -tune fastdecode,zerolatency -movflags frag_keyframe+empty_moov+faststart+default_base_moof -y -f mp4 pipe:1"
|
||||
},
|
||||
{
|
||||
"name": "480p",
|
||||
"cmd": "%FFMPEG% -dual_mono_mode main -ss %SS% -i %INPUT% -sn -threads 0 -c:a aac -ar 48000 -b:a 128k -ac 2 -c:v libx264 -vf scale=-2:480 -b:v 1500k -profile:v baseline -preset veryfast -tune fastdecode,zerolatency -movflags frag_keyframe+empty_moov+faststart+default_base_moof -y -f mp4 pipe:1"
|
||||
}
|
||||
],
|
||||
"hls": [
|
||||
{
|
||||
"name": "720p",
|
||||
"cmd": "%FFMPEG% -dual_mono_mode main -ss %SS% -i %INPUT% -sn -threads 0 -ignore_unknown -max_muxing_queue_size 1024 -f hls -hls_time 3 -hls_list_size 0 -hls_allow_cache 1 -hls_segment_filename %streamFileDir%/stream%streamNum%-%09d.ts -hls_flags delete_segments -c:a aac -ar 48000 -b:a 192k -ac 2 -c:v libx264 -vf scale=-2:720 -b:v 3000k -preset veryfast -flags +loop-global_header %OUTPUT%"
|
||||
},
|
||||
{
|
||||
"name": "480p",
|
||||
"cmd": "%FFMPEG% -dual_mono_mode main -ss %SS% -i %INPUT% -sn -threads 0 -ignore_unknown -max_muxing_queue_size 1024 -f hls -hls_time 3 -hls_list_size 0 -hls_allow_cache 1 -hls_segment_filename %streamFileDir%/stream%streamNum%-%09d.ts -hls_flags delete_segments -c:a aac -ar 48000 -b:a 128k -ac 2 -c:v libx264 -vf scale=-2:480 -b:v 3000k -preset veryfast -flags +loop-global_header %OUTPUT%"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"recordedViewer": {
|
||||
"ios": "infuse://x-callback-url/play?url=http://ADDRESS",
|
||||
"android": "intent://ADDRESS#Intent;package=com.mxtech.videoplayer.ad;type=video;scheme=http;end"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -153,7 +153,7 @@ in {
|
|||
package = mkOption {
|
||||
type = types.package;
|
||||
description = "Which package to use for the Nextcloud instance.";
|
||||
relatedPackages = [ "nextcloud21" "nextcloud22" "nextcloud23" ];
|
||||
relatedPackages = [ "nextcloud22" "nextcloud23" ];
|
||||
};
|
||||
phpPackage = mkOption {
|
||||
type = types.package;
|
||||
|
@ -571,15 +571,6 @@ in {
|
|||
nextcloud defined in an overlay, please set `services.nextcloud.package` to
|
||||
`pkgs.nextcloud`.
|
||||
''
|
||||
# 21.03 will not be an official release - it was instead 21.05.
|
||||
# This versionOlder statement remains set to 21.03 for backwards compatibility.
|
||||
# See https://github.com/NixOS/nixpkgs/pull/108899 and
|
||||
# https://github.com/NixOS/rfcs/blob/master/rfcs/0080-nixos-release-schedule.md.
|
||||
# FIXME(@Ma27) remove this else-if as soon as 21.05 is EOL! This is only here
|
||||
# to ensure that users who are on Nextcloud 19 with a stateVersion <21.05 with
|
||||
# no explicit services.nextcloud.package don't upgrade to v21 by accident (
|
||||
# nextcloud20 throws an eval-error because it's dropped).
|
||||
else if versionOlder stateVersion "21.03" then nextcloud20
|
||||
else if versionOlder stateVersion "21.11" then nextcloud21
|
||||
else if versionOlder stateVersion "22.05" then nextcloud22
|
||||
else nextcloud23
|
||||
|
|
|
@ -20,6 +20,21 @@ in
|
|||
description = "PlantUML server package to use";
|
||||
};
|
||||
|
||||
packages = {
|
||||
jdk = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.jdk;
|
||||
defaultText = literalExpression "pkgs.jdk";
|
||||
description = "JDK package to use for the server";
|
||||
};
|
||||
jetty = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.jetty;
|
||||
defaultText = literalExpression "pkgs.jetty";
|
||||
description = "Jetty package to use for the server";
|
||||
};
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
default = "plantuml";
|
||||
|
@ -105,10 +120,10 @@ in
|
|||
ALLOW_PLANTUML_INCLUDE = if cfg.allowPlantumlInclude then "true" else "false";
|
||||
};
|
||||
script = ''
|
||||
${pkgs.jre}/bin/java \
|
||||
-jar ${pkgs.jetty}/start.jar \
|
||||
${cfg.packages.jdk}/bin/java \
|
||||
-jar ${cfg.packages.jetty}/start.jar \
|
||||
--module=deploy,http,jsp \
|
||||
jetty.home=${pkgs.jetty} \
|
||||
jetty.home=${cfg.packages.jetty} \
|
||||
jetty.base=${cfg.package} \
|
||||
jetty.http.host=${cfg.listenHost} \
|
||||
jetty.http.port=${builtins.toString cfg.listenPort}
|
||||
|
|
|
@ -69,11 +69,16 @@ in
|
|||
CERTIFICATE_KEY_FILE = "key.pem";
|
||||
};
|
||||
startLimitIntervalSec = 60;
|
||||
script = ''
|
||||
if [[ -v CREDENTIALS_DIRECTORY ]]; then
|
||||
cd "$CREDENTIALS_DIRECTORY"
|
||||
fi
|
||||
exec "${pkgs.pomerium}/bin/pomerium" -config "${cfgFile}"
|
||||
'';
|
||||
|
||||
serviceConfig = {
|
||||
DynamicUser = true;
|
||||
StateDirectory = [ "pomerium" ];
|
||||
ExecStart = "${pkgs.pomerium}/bin/pomerium -config ${cfgFile}";
|
||||
|
||||
PrivateUsers = false; # breaks CAP_NET_BIND_SERVICE
|
||||
MemoryDenyWriteExecute = false; # breaks LuaJIT
|
||||
|
@ -99,7 +104,6 @@ in
|
|||
AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
|
||||
CapabilityBoundingSet = [ "CAP_NET_BIND_SERVICE" ];
|
||||
|
||||
WorkingDirectory = mkIf (cfg.useACMEHost != null) "$CREDENTIALS_DIRECTORY";
|
||||
LoadCredential = optionals (cfg.useACMEHost != null) [
|
||||
"fullchain.pem:/var/lib/acme/${cfg.useACMEHost}/fullchain.pem"
|
||||
"key.pem:/var/lib/acme/${cfg.useACMEHost}/key.pem"
|
||||
|
@ -124,7 +128,7 @@ in
|
|||
Type = "oneshot";
|
||||
TimeoutSec = 60;
|
||||
ExecCondition = "/run/current-system/systemd/bin/systemctl -q is-active pomerium.service";
|
||||
ExecStart = "/run/current-system/systemd/bin/systemctl restart pomerium.service";
|
||||
ExecStart = "/run/current-system/systemd/bin/systemctl --no-block restart pomerium.service";
|
||||
};
|
||||
};
|
||||
});
|
||||
|
|
|
@ -23,8 +23,8 @@ in
|
|||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.tomcat85;
|
||||
defaultText = literalExpression "pkgs.tomcat85";
|
||||
default = pkgs.tomcat9;
|
||||
defaultText = literalExpression "pkgs.tomcat9";
|
||||
example = lib.literalExpression "pkgs.tomcat9";
|
||||
description = ''
|
||||
Which tomcat package to use.
|
||||
|
@ -127,7 +127,7 @@ in
|
|||
webapps = mkOption {
|
||||
type = types.listOf types.path;
|
||||
default = [ tomcat.webapps ];
|
||||
defaultText = literalExpression "[ pkgs.tomcat85.webapps ]";
|
||||
defaultText = literalExpression "[ config.services.tomcat.package.webapps ]";
|
||||
description = "List containing WAR files or directories with WAR files which are web applications to be deployed on Tomcat";
|
||||
};
|
||||
|
||||
|
@ -201,6 +201,7 @@ in
|
|||
{ uid = config.ids.uids.tomcat;
|
||||
description = "Tomcat user";
|
||||
home = "/homeless-shelter";
|
||||
group = "tomcat";
|
||||
extraGroups = cfg.extraGroups;
|
||||
};
|
||||
|
||||
|
|
|
@ -74,11 +74,9 @@ in
|
|||
# Debugging
|
||||
environment.sessionVariables.MATE_SESSION_DEBUG = mkIf cfg.debug "1";
|
||||
|
||||
environment.systemPackages =
|
||||
pkgs.mate.basePackages ++
|
||||
(pkgs.gnome.removePackagesByName
|
||||
pkgs.mate.extraPackages
|
||||
config.environment.mate.excludePackages) ++
|
||||
environment.systemPackages = pkgs.gnome.removePackagesByName
|
||||
(pkgs.mate.basePackages ++
|
||||
pkgs.mate.extraPackages ++
|
||||
[
|
||||
pkgs.desktop-file-utils
|
||||
pkgs.glib
|
||||
|
@ -87,7 +85,8 @@ in
|
|||
pkgs.xdg-user-dirs # Update user dirs as described in https://freedesktop.org/wiki/Software/xdg-user-dirs/
|
||||
pkgs.mate.mate-settings-daemon
|
||||
pkgs.yelp # for 'Contents' in 'Help' menus
|
||||
];
|
||||
])
|
||||
config.environment.mate.excludePackages;
|
||||
|
||||
programs.dconf.enable = true;
|
||||
# Shell integration for VTE terminals
|
||||
|
|
|
@ -227,6 +227,7 @@ in
|
|||
# Settings from elementary-default-settings
|
||||
environment.etc."gtk-3.0/settings.ini".source = "${pkgs.pantheon.elementary-default-settings}/etc/gtk-3.0/settings.ini";
|
||||
|
||||
xdg.portal.enable = true;
|
||||
xdg.portal.extraPortals = with pkgs.pantheon; [
|
||||
elementary-files
|
||||
elementary-settings-daemon
|
||||
|
|
|
@ -219,6 +219,7 @@ in
|
|||
|
||||
session = mkOption {
|
||||
default = [];
|
||||
type = types.listOf types.attrs;
|
||||
example = literalExpression
|
||||
''
|
||||
[ { manage = "desktop";
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue