diff --git a/doc/contributing/contributing-to-documentation.chapter.md b/doc/contributing/contributing-to-documentation.chapter.md index 889b4114acca..a732eee4b962 100644 --- a/doc/contributing/contributing-to-documentation.chapter.md +++ b/doc/contributing/contributing-to-documentation.chapter.md @@ -27,7 +27,7 @@ If the build succeeds, the manual will be in `./result/share/doc/nixpkgs/manual. As per [RFC 0072](https://github.com/NixOS/rfcs/pull/72), all new documentation content should be written in [CommonMark](https://commonmark.org/) Markdown dialect. -Additional syntax extensions are available, though not all extensions can be used in NixOS option documentation. The following extensions are currently used: +Additional syntax extensions are available, all of which can be used in NixOS option documentation. The following extensions are currently used: - []{#ssec-contributing-markup-anchors} Explicitly defined **anchors** on headings, to allow linking to sections. These should be always used, to ensure the anchors can be linked even when the heading text changes, and to prevent conflicts between [automatically assigned identifiers](https://github.com/jgm/commonmark-hs/blob/master/commonmark-extensions/test/auto_identifiers.md). @@ -38,6 +38,10 @@ Additional syntax extensions are available, though not all extensions can be use ## Syntax {#sec-contributing-markup} ``` + ::: {.note} + NixOS option documentation does not support headings in general. + ::: + - []{#ssec-contributing-markup-anchors-inline} **Inline anchors**, which allow linking arbitrary place in the text (e.g. individual list items, sentences…). @@ -67,10 +71,6 @@ Additional syntax extensions are available, though not all extensions can be use This syntax is taken from [MyST](https://myst-parser.readthedocs.io/en/latest/syntax/syntax.html#roles-an-in-line-extension-point). Though, the feature originates from [reStructuredText](https://www.sphinx-doc.org/en/master/usage/restructuredtext/roles.html#role-manpage) with slightly different syntax. - ::: {.note} - Inline roles are available for option documentation. - ::: - - []{#ssec-contributing-markup-admonitions} **Admonitions**, set off from the text to bring attention to something. @@ -96,10 +96,6 @@ Additional syntax extensions are available, though not all extensions can be use - [`tip`](https://tdg.docbook.org/tdg/5.0/tip.html) - [`warning`](https://tdg.docbook.org/tdg/5.0/warning.html) - ::: {.note} - Admonitions are available for option documentation. - ::: - - []{#ssec-contributing-markup-definition-lists} [**Definition lists**](https://github.com/jgm/commonmark-hs/blob/master/commonmark-extensions/test/definition_lists.md), for defining a group of terms: diff --git a/nixos/doc/manual/default.nix b/nixos/doc/manual/default.nix index 0ddb3fa7fbe9..913058746b35 100644 --- a/nixos/doc/manual/default.nix +++ b/nixos/doc/manual/default.nix @@ -68,12 +68,15 @@ let sources = lib.sourceFilesBySuffices ./. [".xml"]; - modulesDoc = builtins.toFile "modules.xml" '' -
- ${(lib.concatMapStrings (path: '' - - '') (lib.catAttrs "value" config.meta.doc))} -
+ modulesDoc = runCommand "modules.xml" { + nativeBuildInputs = [ pkgs.nixos-render-docs ]; + } '' + nixos-render-docs manual docbook \ + --manpage-urls ${pkgs.path + "/doc/manpage-urls.json"} \ + "$out" \ + --section \ + --section-id modules \ + --chapters ${lib.concatMapStrings (p: "${p.value} ") config.meta.doc} ''; generatedSources = runCommand "generated-docbook" {} '' diff --git a/nixos/doc/manual/development/meta-attributes.section.md b/nixos/doc/manual/development/meta-attributes.section.md index 7129cf8723e6..33b41fe74d29 100644 --- a/nixos/doc/manual/development/meta-attributes.section.md +++ b/nixos/doc/manual/development/meta-attributes.section.md @@ -23,7 +23,7 @@ file. meta = { maintainers = with lib.maintainers; [ ericsagnes ]; - doc = ./default.xml; + doc = ./default.md; buildDocsInSandbox = true; }; } @@ -31,7 +31,9 @@ file. - `maintainers` contains a list of the module maintainers. -- `doc` points to a valid DocBook file containing the module +- `doc` points to a valid [Nixpkgs-flavored CommonMark]( + https://nixos.org/manual/nixpkgs/unstable/#sec-contributing-markup + ) file containing the module documentation. Its contents is automatically added to [](#ch-configuration). Changes to a module documentation have to be checked to not break building the NixOS manual: @@ -40,26 +42,6 @@ file. $ nix-build nixos/release.nix -A manual.x86_64-linux ``` - This file should *not* usually be written by hand. Instead it is preferred - to write documentation using CommonMark and converting it to CommonMark - using pandoc. The simplest documentation can be converted using just - - ```ShellSession - $ pandoc doc.md -t docbook --top-level-division=chapter -f markdown+smart > doc.xml - ``` - - More elaborate documentation may wish to add one or more of the pandoc - filters used to build the remainder of the manual, for example the GNOME - desktop uses - - ```ShellSession - $ pandoc gnome.md -t docbook --top-level-division=chapter \ - --extract-media=media -f markdown+smart \ - --lua-filter ../../../../../doc/build-aux/pandoc-filters/myst-reader/roles.lua \ - --lua-filter ../../../../../doc/build-aux/pandoc-filters/docbook-writer/rst-roles.lua \ - > gnome.xml - ``` - - `buildDocsInSandbox` indicates whether the option documentation for the module can be built in a derivation sandbox. This option is currently only honored for modules shipped by nixpkgs. User modules and modules taken from diff --git a/nixos/doc/manual/from_md/development/meta-attributes.section.xml b/nixos/doc/manual/from_md/development/meta-attributes.section.xml index 450a5f670f3a..64234f1cc0d7 100644 --- a/nixos/doc/manual/from_md/development/meta-attributes.section.xml +++ b/nixos/doc/manual/from_md/development/meta-attributes.section.xml @@ -28,7 +28,7 @@ meta = { maintainers = with lib.maintainers; [ ericsagnes ]; - doc = ./default.xml; + doc = ./default.md; buildDocsInSandbox = true; }; } @@ -42,35 +42,16 @@ - doc points to a valid DocBook file containing - the module documentation. Its contents is automatically added to + doc points to a valid + Nixpkgs-flavored + CommonMark file containing the module documentation. Its + contents is automatically added to . Changes to a module documentation have to be checked to not break building the NixOS manual: $ nix-build nixos/release.nix -A manual.x86_64-linux - - - This file should not usually be written by - hand. Instead it is preferred to write documentation using - CommonMark and converting it to CommonMark using pandoc. The - simplest documentation can be converted using just - - -$ pandoc doc.md -t docbook --top-level-division=chapter -f markdown+smart > doc.xml - - - More elaborate documentation may wish to add one or more of the - pandoc filters used to build the remainder of the manual, for - example the GNOME desktop uses - - -$ pandoc gnome.md -t docbook --top-level-division=chapter \ - --extract-media=media -f markdown+smart \ - --lua-filter ../../../../../doc/build-aux/pandoc-filters/myst-reader/roles.lua \ - --lua-filter ../../../../../doc/build-aux/pandoc-filters/docbook-writer/rst-roles.lua \ - > gnome.xml diff --git a/nixos/doc/manual/md-to-db.sh b/nixos/doc/manual/md-to-db.sh index a7421bed532e..4698e94f508b 100755 --- a/nixos/doc/manual/md-to-db.sh +++ b/nixos/doc/manual/md-to-db.sh @@ -50,21 +50,3 @@ for mf in ${MD_FILES[*]}; do done popd - -# now handle module chapters. we'll need extra checks to ensure that we don't process -# markdown files we're not interested in, so we'll require an x.nix file for ever x.md -# that we'll convert to xml. -pushd "$DIR/../../modules" - -mapfile -t MD_FILES < <(find . -type f -regex '.*\.md$') - -for mf in ${MD_FILES[*]}; do - [ -f "${mf%.md}.nix" ] || continue - - pandoc --top-level-division=chapter "$mf" "${pandoc_flags[@]}" -o "${mf%.md}.xml" - sed -i -e '1 i ' \ - "${mf%.md}.xml" -done - -popd diff --git a/nixos/lib/make-options-doc/default.nix b/nixos/lib/make-options-doc/default.nix index 01db7bcbee9b..271af9ba1801 100644 --- a/nixos/lib/make-options-doc/default.nix +++ b/nixos/lib/make-options-doc/default.nix @@ -148,42 +148,19 @@ in rec { ''; optionsDocBook = pkgs.runCommand "options-docbook.xml" { - MANPAGE_URLS = pkgs.path + "/doc/manpage-urls.json"; - OTD_DOCUMENT_TYPE = documentType; - OTD_VARIABLE_LIST_ID = variablelistId; - OTD_OPTION_ID_PREFIX = optionIdPrefix; - OTD_REVISION = revision; - nativeBuildInputs = [ - (let - # python3Minimal can't be overridden with packages on Darwin, due to a missing framework. - # Instead of modifying stdenv, we take the easy way out, since most people on Darwin will - # just be hacking on the Nixpkgs manual (which also uses make-options-doc). - python = if pkgs.stdenv.isDarwin then pkgs.python3 else pkgs.python3Minimal; - self = (python.override { - inherit self; - includeSiteCustomize = true; - }); - in self.withPackages (p: - let - # TODO add our own small test suite when rendering is split out into a new tool - markdown-it-py = p.markdown-it-py.override { - disableTests = true; - }; - mdit-py-plugins = p.mdit-py-plugins.override { - inherit markdown-it-py; - disableTests = true; - }; - in [ - markdown-it-py - mdit-py-plugins - ])) + pkgs.nixos-render-docs ]; } '' - python ${./optionsToDocbook.py} \ + nixos-render-docs options docbook \ + --manpage-urls ${pkgs.path + "/doc/manpage-urls.json"} \ + --revision ${lib.escapeShellArg revision} \ + --document-type ${lib.escapeShellArg documentType} \ + --varlist-id ${lib.escapeShellArg variablelistId} \ + --id-prefix ${lib.escapeShellArg optionIdPrefix} \ ${lib.optionalString markdownByDefault "--markdown-by-default"} \ ${optionsJSON}/share/doc/nixos/options.json \ - > options.xml + options.xml if grep /nixpkgs/nixos/modules options.xml; then echo "The manual appears to depend on the location of Nixpkgs, which is bad" diff --git a/nixos/lib/make-options-doc/optionsToDocbook.py b/nixos/lib/make-options-doc/optionsToDocbook.py deleted file mode 100644 index 021623d10a7a..000000000000 --- a/nixos/lib/make-options-doc/optionsToDocbook.py +++ /dev/null @@ -1,343 +0,0 @@ -import collections -import json -import os -import sys -from typing import Any, Dict, List -from collections.abc import MutableMapping, Sequence -import inspect - -# for MD conversion -import markdown_it -import markdown_it.renderer -from markdown_it.token import Token -from markdown_it.utils import OptionsDict -from mdit_py_plugins.container import container_plugin -from mdit_py_plugins.deflist import deflist_plugin -from mdit_py_plugins.myst_role import myst_role_plugin -from xml.sax.saxutils import escape, quoteattr - -manpage_urls = json.load(open(os.getenv('MANPAGE_URLS'))) - -class Renderer(markdown_it.renderer.RendererProtocol): - __output__ = "docbook" - def __init__(self, parser=None): - self.rules = { - k: v - for k, v in inspect.getmembers(self, predicate=inspect.ismethod) - if not (k.startswith("render") or k.startswith("_")) - } | { - "container_{.note}_open": self._note_open, - "container_{.note}_close": self._note_close, - "container_{.important}_open": self._important_open, - "container_{.important}_close": self._important_close, - "container_{.warning}_open": self._warning_open, - "container_{.warning}_close": self._warning_close, - } - def render(self, tokens: Sequence[Token], options: OptionsDict, env: MutableMapping) -> str: - assert '-link-tag-stack' not in env - env['-link-tag-stack'] = [] - assert '-deflist-stack' not in env - env['-deflist-stack'] = [] - def do_one(i, token): - if token.type == "inline": - assert token.children is not None - return self.renderInline(token.children, options, env) - elif token.type in self.rules: - return self.rules[token.type](tokens[i], tokens, i, options, env) - else: - raise NotImplementedError("md token not supported yet", token) - return "".join(map(lambda arg: do_one(*arg), enumerate(tokens))) - def renderInline(self, tokens: Sequence[Token], options: OptionsDict, env: MutableMapping) -> str: - # HACK to support docbook links and xrefs. link handling is only necessary because the docbook - # manpage stylesheet converts - in urls to a mathematical minus, which may be somewhat incorrect. - for i, token in enumerate(tokens): - if token.type != 'link_open': - continue - token.tag = 'link' - # turn [](#foo) into xrefs - if token.attrs['href'][0:1] == '#' and tokens[i + 1].type == 'link_close': - token.tag = "xref" - # turn into links without contents - if tokens[i + 1].type == 'text' and tokens[i + 1].content == token.attrs['href']: - tokens[i + 1].content = '' - - def do_one(i, token): - if token.type in self.rules: - return self.rules[token.type](tokens[i], tokens, i, options, env) - else: - raise NotImplementedError("md node not supported yet", token) - return "".join(map(lambda arg: do_one(*arg), enumerate(tokens))) - - def text(self, token, tokens, i, options, env): - return escape(token.content) - def paragraph_open(self, token, tokens, i, options, env): - return "" - def paragraph_close(self, token, tokens, i, options, env): - return "" - def hardbreak(self, token, tokens, i, options, env): - return "\n" - def softbreak(self, token, tokens, i, options, env): - # should check options.breaks() and emit hard break if so - return "\n" - def code_inline(self, token, tokens, i, options, env): - return f"{escape(token.content)}" - def code_block(self, token, tokens, i, options, env): - return f"{escape(token.content)}" - def link_open(self, token, tokens, i, options, env): - env['-link-tag-stack'].append(token.tag) - (attr, start) = ('linkend', 1) if token.attrs['href'][0] == '#' else ('xlink:href', 0) - return f"<{token.tag} {attr}={quoteattr(token.attrs['href'][start:])}>" - def link_close(self, token, tokens, i, options, env): - return f"" - def list_item_open(self, token, tokens, i, options, env): - return "" - def list_item_close(self, token, tokens, i, options, env): - return "\n" - # HACK open and close para for docbook change size. remove soon. - def bullet_list_open(self, token, tokens, i, options, env): - return "\n" - def bullet_list_close(self, token, tokens, i, options, env): - return "\n" - def em_open(self, token, tokens, i, options, env): - return "" - def em_close(self, token, tokens, i, options, env): - return "" - def strong_open(self, token, tokens, i, options, env): - return "" - def strong_close(self, token, tokens, i, options, env): - return "" - def fence(self, token, tokens, i, options, env): - info = f" language={quoteattr(token.info)}" if token.info != "" else "" - return f"{escape(token.content)}" - def blockquote_open(self, token, tokens, i, options, env): - return "
" - def blockquote_close(self, token, tokens, i, options, env): - return "
" - def _note_open(self, token, tokens, i, options, env): - return "" - def _note_close(self, token, tokens, i, options, env): - return "" - def _important_open(self, token, tokens, i, options, env): - return "" - def _important_close(self, token, tokens, i, options, env): - return "" - def _warning_open(self, token, tokens, i, options, env): - return "" - def _warning_close(self, token, tokens, i, options, env): - return "" - # markdown-it emits tokens based on the html syntax tree, but docbook is - # slightly different. html has
{
{
}}
, - # docbook has {} - # we have to reject multiple definitions for the same term for time being. - def dl_open(self, token, tokens, i, options, env): - env['-deflist-stack'].append({}) - return "" - def dl_close(self, token, tokens, i, options, env): - env['-deflist-stack'].pop() - return "" - def dt_open(self, token, tokens, i, options, env): - env['-deflist-stack'][-1]['has-dd'] = False - return "" - def dt_close(self, token, tokens, i, options, env): - return "" - def dd_open(self, token, tokens, i, options, env): - if env['-deflist-stack'][-1]['has-dd']: - raise Exception("multiple definitions per term not supported") - env['-deflist-stack'][-1]['has-dd'] = True - return "" - def dd_close(self, token, tokens, i, options, env): - return "" - def myst_role(self, token, tokens, i, options, env): - if token.meta['name'] == 'command': - return f"{escape(token.content)}" - if token.meta['name'] == 'file': - return f"{escape(token.content)}" - if token.meta['name'] == 'var': - return f"{escape(token.content)}" - if token.meta['name'] == 'env': - return f"{escape(token.content)}" - if token.meta['name'] == 'option': - return f"" - if token.meta['name'] == 'manpage': - [page, section] = [ s.strip() for s in token.content.rsplit('(', 1) ] - section = section[:-1] - man = f"{page}({section})" - title = f"{escape(page)}" - vol = f"{escape(section)}" - ref = f"{title}{vol}" - if man in manpage_urls: - return f"{ref}" - else: - return ref - raise NotImplementedError("md node not supported yet", token) - -md = ( - markdown_it.MarkdownIt(renderer_cls=Renderer) - # TODO maybe fork the plugin and have only a single rule for all? - .use(container_plugin, name="{.note}") - .use(container_plugin, name="{.important}") - .use(container_plugin, name="{.warning}") - .use(deflist_plugin) - .use(myst_role_plugin) -) - -# converts in-place! -def convertMD(options: Dict[str, Any]) -> str: - def optionIs(option: Dict[str, Any], key: str, typ: str) -> bool: - if key not in option: return False - if type(option[key]) != dict: return False - if '_type' not in option[key]: return False - return option[key]['_type'] == typ - - def convertCode(name: str, option: Dict[str, Any], key: str): - if optionIs(option, key, 'literalMD'): - option[key] = md.render(f"*{key.capitalize()}:*\n{option[key]['text']}") - elif optionIs(option, key, 'literalExpression'): - code = option[key]['text'] - # for multi-line code blocks we only have to count ` runs at the beginning - # of a line, but this is much easier. - multiline = '\n' in code - longest, current = (0, 0) - for c in code: - current = current + 1 if c == '`' else 0 - longest = max(current, longest) - # inline literals need a space to separate ticks from content, code blocks - # need newlines. inline literals need one extra tick, code blocks need three. - ticks, sep = ('`' * (longest + (3 if multiline else 1)), '\n' if multiline else ' ') - code = f"{ticks}{sep}{code}{sep}{ticks}" - option[key] = md.render(f"*{key.capitalize()}:*\n{code}") - elif optionIs(option, key, 'literalDocBook'): - option[key] = f"{key.capitalize()}: {option[key]['text']}" - elif key in option: - raise Exception(f"{name} {key} has unrecognized type", option[key]) - - for (name, option) in options.items(): - try: - if optionIs(option, 'description', 'mdDoc'): - option['description'] = md.render(option['description']['text']) - elif markdownByDefault: - option['description'] = md.render(option['description']) - else: - option['description'] = ("" + - option['description'] + - "") - - convertCode(name, option, 'example') - convertCode(name, option, 'default') - - if 'relatedPackages' in option: - option['relatedPackages'] = md.render(option['relatedPackages']) - except Exception as e: - raise Exception(f"Failed to render option {name}") from e - - return options - -id_translate_table = { - ord('*'): ord('_'), - ord('<'): ord('_'), - ord(' '): ord('_'), - ord('>'): ord('_'), - ord('['): ord('_'), - ord(']'): ord('_'), - ord(':'): ord('_'), - ord('"'): ord('_'), -} - -def need_env(n): - if n not in os.environ: - raise RuntimeError("required environment variable not set", n) - return os.environ[n] - -OTD_REVISION = need_env('OTD_REVISION') -OTD_DOCUMENT_TYPE = need_env('OTD_DOCUMENT_TYPE') -OTD_VARIABLE_LIST_ID = need_env('OTD_VARIABLE_LIST_ID') -OTD_OPTION_ID_PREFIX = need_env('OTD_OPTION_ID_PREFIX') - -def print_decl_def(header, locs): - print(f"""{header}:""") - print(f"""""") - for loc in locs: - # locations can be either plain strings (specific to nixpkgs), or attrsets - # { name = "foo/bar.nix"; url = "https://github.com/....."; } - if isinstance(loc, str): - # Hyperlink the filename either to the NixOS github - # repository (if it’s a module and we have a revision number), - # or to the local filesystem. - if not loc.startswith('/'): - if OTD_REVISION == 'local': - href = f"https://github.com/NixOS/nixpkgs/blob/master/{loc}" - else: - href = f"https://github.com/NixOS/nixpkgs/blob/{OTD_REVISION}/{loc}" - else: - href = f"file://{loc}" - # Print the filename and make it user-friendly by replacing the - # /nix/store/ prefix by the default location of nixos - # sources. - if not loc.startswith('/'): - name = f"" - elif loc.contains('nixops') and loc.contains('/nix/'): - name = f"" - else: - name = loc - print(f"""""") - print(escape(name)) - print(f"""""") - else: - href = f" xlink:href={quoteattr(loc['url'])}" if 'url' in loc else "" - print(f"""{escape(loc['name'])}""") - print(f"""""") - -markdownByDefault = False -optOffset = 0 -for arg in sys.argv[1:]: - if arg == "--markdown-by-default": - optOffset += 1 - markdownByDefault = True - -options = convertMD(json.load(open(sys.argv[1 + optOffset], 'r'))) - -keys = list(options.keys()) -keys.sort(key=lambda opt: [ (0 if p.startswith("enable") else 1 if p.startswith("package") else 2, p) - for p in options[opt]['loc'] ]) - -print(f"""""") -if OTD_DOCUMENT_TYPE == 'appendix': - print("""""") - print(""" Configuration Options""") -print(f"""""") - -for name in keys: - opt = options[name] - id = OTD_OPTION_ID_PREFIX + name.translate(id_translate_table) - print(f"""""") - # NOTE adding extra spaces here introduces spaces into xref link expansions - print(f"""""", end='') - print(f"""""", end='') - print(f"""""") - print(f"""""") - print(opt['description']) - if typ := opt.get('type'): - ro = " (read only)" if opt.get('readOnly', False) else "" - print(f"""Type: {escape(typ)}{ro}""") - if default := opt.get('default'): - print(default) - if example := opt.get('example'): - print(example) - if related := opt.get('relatedPackages'): - print(f"""""") - print(f""" Related packages:""") - print(f"""""") - print(related) - if decl := opt.get('declarations'): - print_decl_def("Declared by", decl) - if defs := opt.get('definitions'): - print_decl_def("Defined by", defs) - print(f"""""") - print(f"""""") - -print("""""") -if OTD_DOCUMENT_TYPE == 'appendix': - print("""""") diff --git a/nixos/modules/i18n/input-method/default.nix b/nixos/modules/i18n/input-method/default.nix index 07fb86bcc25e..5f803b4f2ee7 100644 --- a/nixos/modules/i18n/input-method/default.nix +++ b/nixos/modules/i18n/input-method/default.nix @@ -66,7 +66,7 @@ in meta = { maintainers = with lib.maintainers; [ ericsagnes ]; - doc = ./default.xml; + doc = ./default.md; }; } diff --git a/nixos/modules/i18n/input-method/default.xml b/nixos/modules/i18n/input-method/default.xml deleted file mode 100644 index 7b7907cd32a6..000000000000 --- a/nixos/modules/i18n/input-method/default.xml +++ /dev/null @@ -1,275 +0,0 @@ - - - Input Methods - - Input methods are an operating system component that allows any - data, such as keyboard strokes or mouse movements, to be received as - input. In this way users can enter characters and symbols not found - on their input devices. Using an input method is obligatory for any - language that has more graphemes than there are keys on the - keyboard. - - - The following input methods are available in NixOS: - - - - - IBus: The intelligent input bus. - - - - - Fcitx: A customizable lightweight input method. - - - - - Nabi: A Korean input method based on XIM. - - - - - Uim: The universal input method, is a library with a XIM bridge. - - - - - Hime: An extremely easy-to-use input method framework. - - - - - Kime: Korean IME - - - -
- IBus - - IBus is an Intelligent Input Bus. It provides full featured and - user friendly input method user interface. - - - The following snippet can be used to configure IBus: - - -i18n.inputMethod = { - enabled = "ibus"; - ibus.engines = with pkgs.ibus-engines; [ anthy hangul mozc ]; -}; - - - i18n.inputMethod.ibus.engines is optional and - can be used to add extra IBus engines. - - - Available extra IBus engines are: - - - - - Anthy (ibus-engines.anthy): Anthy is a - system for Japanese input method. It converts Hiragana text to - Kana Kanji mixed text. - - - - - Hangul (ibus-engines.hangul): Korean input - method. - - - - - m17n (ibus-engines.m17n): m17n is an input - method that uses input methods and corresponding icons in the - m17n database. - - - - - mozc (ibus-engines.mozc): A Japanese input - method from Google. - - - - - Table (ibus-engines.table): An input method - that load tables of input methods. - - - - - table-others (ibus-engines.table-others): - Various table-based input methods. To use this, and any other - table-based input methods, it must appear in the list of - engines along with table. For example: - - -ibus.engines = with pkgs.ibus-engines; [ table table-others ]; - - - - - To use any input method, the package must be added in the - configuration, as shown above, and also (after running - nixos-rebuild) the input method must be added - from IBus’ preference dialog. - -
- Troubleshooting - - If IBus works in some applications but not others, a likely - cause of this is that IBus is depending on a different version - of glib to what the applications are - depending on. This can be checked by running - nix-store -q --requisites <path> | grep glib, - where <path> is the path of either IBus - or an application in the Nix store. The glib - packages must match exactly. If they do not, uninstalling and - reinstalling the application is a likely fix. - -
-
-
- Fcitx - - Fcitx is an input method framework with extension support. It has - three built-in Input Method Engine, Pinyin, QuWei and Table-based - input methods. - - - The following snippet can be used to configure Fcitx: - - -i18n.inputMethod = { - enabled = "fcitx"; - fcitx.engines = with pkgs.fcitx-engines; [ mozc hangul m17n ]; -}; - - - i18n.inputMethod.fcitx.engines is optional and - can be used to add extra Fcitx engines. - - - Available extra Fcitx engines are: - - - - - Anthy (fcitx-engines.anthy): Anthy is a - system for Japanese input method. It converts Hiragana text to - Kana Kanji mixed text. - - - - - Chewing (fcitx-engines.chewing): Chewing is - an intelligent Zhuyin input method. It is one of the most - popular input methods among Traditional Chinese Unix users. - - - - - Hangul (fcitx-engines.hangul): Korean input - method. - - - - - Unikey (fcitx-engines.unikey): Vietnamese - input method. - - - - - m17n (fcitx-engines.m17n): m17n is an input - method that uses input methods and corresponding icons in the - m17n database. - - - - - mozc (fcitx-engines.mozc): A Japanese input - method from Google. - - - - - table-others (fcitx-engines.table-others): - Various table-based input methods. - - - -
-
- Nabi - - Nabi is an easy to use Korean X input method. It allows you to - enter phonetic Korean characters (hangul) and pictographic Korean - characters (hanja). - - - The following snippet can be used to configure Nabi: - - -i18n.inputMethod = { - enabled = "nabi"; -}; - -
-
- Uim - - Uim (short for universal input method) is a - multilingual input method framework. Applications can use it - through so-called bridges. - - - The following snippet can be used to configure uim: - - -i18n.inputMethod = { - enabled = "uim"; -}; - - - Note: The - option can be used to choose uim toolbar. - -
-
- Hime - - Hime is an extremely easy-to-use input method framework. It is - lightweight, stable, powerful and supports many commonly used - input methods, including Cangjie, Zhuyin, Dayi, Rank, Shrimp, - Greek, Korean Pinyin, Latin Alphabet, etc… - - - The following snippet can be used to configure Hime: - - -i18n.inputMethod = { - enabled = "hime"; -}; - -
-
- Kime - - Kime is Korean IME. it’s built with Rust language and let you get - simple, safe, fast Korean typing - - - The following snippet can be used to configure Kime: - - -i18n.inputMethod = { - enabled = "kime"; -}; - -
-
diff --git a/nixos/modules/misc/meta.nix b/nixos/modules/misc/meta.nix index e1d16f802cee..95f2765aff1e 100644 --- a/nixos/modules/misc/meta.nix +++ b/nixos/modules/misc/meta.nix @@ -47,7 +47,7 @@ in doc = mkOption { type = docFile; internal = true; - example = "./meta.chapter.xml"; + example = "./meta.chapter.md"; description = lib.mdDoc '' Documentation prologue for the set of options of each module. This option should be defined at most once per module. diff --git a/nixos/modules/programs/digitalbitbox/default.nix b/nixos/modules/programs/digitalbitbox/default.nix index 054110fe5df2..5ee6cdafe63a 100644 --- a/nixos/modules/programs/digitalbitbox/default.nix +++ b/nixos/modules/programs/digitalbitbox/default.nix @@ -33,7 +33,7 @@ in }; meta = { - doc = ./default.xml; + doc = ./default.md; maintainers = with lib.maintainers; [ vidbina ]; }; } diff --git a/nixos/modules/programs/digitalbitbox/default.xml b/nixos/modules/programs/digitalbitbox/default.xml deleted file mode 100644 index ee892523223c..000000000000 --- a/nixos/modules/programs/digitalbitbox/default.xml +++ /dev/null @@ -1,70 +0,0 @@ - - - Digital Bitbox - - Digital Bitbox is a hardware wallet and second-factor authenticator. - - - The digitalbitbox programs module may be - installed by setting programs.digitalbitbox to - true in a manner similar to - - -programs.digitalbitbox.enable = true; - - - and bundles the digitalbitbox package (see - ), which contains the - dbb-app and dbb-cli binaries, - along with the hardware module (see - ) which sets up - the necessary udev rules to access the device. - - - Enabling the digitalbitbox module is pretty much the easiest way to - get a Digital Bitbox device working on your system. - - - For more information, see - https://digitalbitbox.com/start_linux. - -
- Package - - The binaries, dbb-app (a GUI tool) and - dbb-cli (a CLI tool), are available through the - digitalbitbox package which could be installed - as follows: - - -environment.systemPackages = [ - pkgs.digitalbitbox -]; - -
-
- Hardware - - The digitalbitbox hardware package enables the udev rules for - Digital Bitbox devices and may be installed as follows: - - -hardware.digitalbitbox.enable = true; - - - In order to alter the udev rules, one may provide different values - for the udevRule51 and - udevRule52 attributes by means of overriding as - follows: - - -programs.digitalbitbox = { - enable = true; - package = pkgs.digitalbitbox.override { - udevRule51 = "something else"; - }; -}; - -
-
diff --git a/nixos/modules/programs/plotinus.nix b/nixos/modules/programs/plotinus.nix index a011bb862aea..c2b6884d6490 100644 --- a/nixos/modules/programs/plotinus.nix +++ b/nixos/modules/programs/plotinus.nix @@ -8,7 +8,7 @@ in { meta = { maintainers = pkgs.plotinus.meta.maintainers; - doc = ./plotinus.xml; + doc = ./plotinus.md; }; ###### interface diff --git a/nixos/modules/programs/plotinus.xml b/nixos/modules/programs/plotinus.xml deleted file mode 100644 index 2d4db0285148..000000000000 --- a/nixos/modules/programs/plotinus.xml +++ /dev/null @@ -1,30 +0,0 @@ - - - Plotinus - - Source: - modules/programs/plotinus.nix - - - Upstream documentation: - https://github.com/p-e-w/plotinus - - - Plotinus is a searchable command palette in every modern GTK - application. - - - When in a GTK 3 application and Plotinus is enabled, you can press - Ctrl+Shift+P to open the command palette. The - command palette provides a searchable list of of all menu items in - the application. - - - To enable Plotinus, add the following to your - configuration.nix: - - -programs.plotinus.enable = true; - - diff --git a/nixos/modules/programs/zsh/oh-my-zsh.nix b/nixos/modules/programs/zsh/oh-my-zsh.nix index 41ea31b0f122..83eee1c88b3c 100644 --- a/nixos/modules/programs/zsh/oh-my-zsh.nix +++ b/nixos/modules/programs/zsh/oh-my-zsh.nix @@ -142,5 +142,5 @@ in }; - meta.doc = ./oh-my-zsh.xml; + meta.doc = ./oh-my-zsh.md; } diff --git a/nixos/modules/programs/zsh/oh-my-zsh.xml b/nixos/modules/programs/zsh/oh-my-zsh.xml deleted file mode 100644 index 2a2bba96b859..000000000000 --- a/nixos/modules/programs/zsh/oh-my-zsh.xml +++ /dev/null @@ -1,154 +0,0 @@ - - - Oh my ZSH - - oh-my-zsh - is a framework to manage your - ZSH configuration - including completion scripts for several CLI tools or custom prompt - themes. - -
- Basic usage - - The module uses the oh-my-zsh package with all - available features. The initial setup using Nix expressions is - fairly similar to the configuration format of - oh-my-zsh. - - -{ - programs.zsh.ohMyZsh = { - enable = true; - plugins = [ "git" "python" "man" ]; - theme = "agnoster"; - }; -} - - - For a detailed explanation of these arguments please refer to the - oh-my-zsh - docs. - - - The expression generates the needed configuration and writes it - into your /etc/zshrc. - -
-
- Custom additions - - Sometimes third-party or custom scripts such as a modified theme - may be needed. oh-my-zsh provides the - ZSH_CUSTOM - environment variable for this which points to a directory with - additional scripts. - - - The module can do this as well: - - -{ - programs.zsh.ohMyZsh.custom = "~/path/to/custom/scripts"; -} - -
-
- Custom environments - - There are several extensions for oh-my-zsh - packaged in nixpkgs. One of them is - nix-zsh-completions - which bundles completion scripts and a plugin for - oh-my-zsh. - - - Rather than using a single mutable path for - ZSH_CUSTOM, it’s also possible to generate this - path from a list of Nix packages: - - -{ pkgs, ... }: -{ - programs.zsh.ohMyZsh.customPkgs = [ - pkgs.nix-zsh-completions - # and even more... - ]; -} - - - Internally a single store path will be created using - buildEnv. Please refer to the docs of - buildEnv - for further reference. - - - Please keep in mind that this is not compatible with - programs.zsh.ohMyZsh.custom as it requires an - immutable store path while custom shall remain - mutable! An evaluation failure will be thrown if both - custom and customPkgs are - set. - -
-
- Package your own customizations - - If third-party customizations (e.g. new themes) are supposed to be - added to oh-my-zsh there are several pitfalls - to keep in mind: - - - - - To comply with the default structure of ZSH - the entire output needs to be written to - $out/share/zsh. - - - - - Completion scripts are supposed to be stored at - $out/share/zsh/site-functions. This - directory is part of the - fpath - and the package should be compatible with pure - ZSH setups. The module will automatically - link the contents of site-functions to - completions directory in the proper store path. - - - - - The plugins directory needs the structure - pluginname/pluginname.plugin.zsh as - structured in the - upstream - repo. - - - - - A derivation for oh-my-zsh may look like this: - - -{ stdenv, fetchFromGitHub }: - -stdenv.mkDerivation rec { - name = "exemplary-zsh-customization-${version}"; - version = "1.0.0"; - src = fetchFromGitHub { - # path to the upstream repository - }; - - dontBuild = true; - installPhase = '' - mkdir -p $out/share/zsh/site-functions - cp {themes,plugins} $out/share/zsh - cp completions $out/share/zsh/site-functions - ''; -} - -
-
diff --git a/nixos/modules/security/acme/default.nix b/nixos/modules/security/acme/default.nix index eb4f11f7dcde..ef0636258994 100644 --- a/nixos/modules/security/acme/default.nix +++ b/nixos/modules/security/acme/default.nix @@ -916,6 +916,6 @@ in { meta = { maintainers = lib.teams.acme.members; - doc = ./default.xml; + doc = ./default.md; }; } diff --git a/nixos/modules/security/acme/default.xml b/nixos/modules/security/acme/default.xml deleted file mode 100644 index e80ce3b6a494..000000000000 --- a/nixos/modules/security/acme/default.xml +++ /dev/null @@ -1,395 +0,0 @@ - - - SSL/TLS Certificates with ACME - - NixOS supports automatic domain validation & certificate - retrieval and renewal using the ACME protocol. Any provider can be - used, but by default NixOS uses Let’s Encrypt. The alternative ACME - client - lego is - used under the hood. - - - Automatic cert validation and configuration for Apache and Nginx - virtual hosts is included in NixOS, however if you would like to - generate a wildcard cert or you are not using a web server you will - have to configure DNS based validation. - -
- Prerequisites - - To use the ACME module, you must accept the provider’s terms of - service by setting - to - true. The Let’s Encrypt ToS can be found - here. - - - You must also set an email address to be used when creating - accounts with Let’s Encrypt. You can set this for all certs with - and/or on a - per-cert basis with - . This - address is only used for registration and renewal reminders, and - cannot be used to administer the certificates in any way. - - - Alternatively, you can use a different ACME server by changing the - option to a - provider of your choosing, or just change the server for one cert - with . - - - You will need an HTTP server or DNS server for verification. For - HTTP, the server must have a webroot defined that can serve - .well-known/acme-challenge. This directory - must be writeable by the user that will run the ACME client. For - DNS, you must set up credentials with your provider/server for use - with lego. - -
-
- Using ACME certificates in Nginx - - NixOS supports fetching ACME certificates for you by setting - enableACME = true; in a virtualHost config. We - first create self-signed placeholder certificates in place of the - real ACME certs. The placeholder certs are overwritten when the - ACME certs arrive. For foo.example.com the - config would look like this: - - -security.acme.acceptTerms = true; -security.acme.defaults.email = "admin+acme@example.com"; -services.nginx = { - enable = true; - virtualHosts = { - "foo.example.com" = { - forceSSL = true; - enableACME = true; - # All serverAliases will be added as extra domain names on the certificate. - serverAliases = [ "bar.example.com" ]; - locations."/" = { - root = "/var/www"; - }; - }; - - # We can also add a different vhost and reuse the same certificate - # but we have to append extraDomainNames manually beforehand: - # security.acme.certs."foo.example.com".extraDomainNames = [ "baz.example.com" ]; - "baz.example.com" = { - forceSSL = true; - useACMEHost = "foo.example.com"; - locations."/" = { - root = "/var/www"; - }; - }; - }; -} - -
-
- Using ACME certificates in Apache/httpd - - Using ACME certificates with Apache virtual hosts is identical to - using them with Nginx. The attribute names are all the same, just - replace nginx with httpd where - appropriate. - -
-
- Manual configuration of HTTP-01 validation - - First off you will need to set up a virtual host to serve the - challenges. This example uses a vhost called - certs.example.com, with the intent that you - will generate certs for all your vhosts and redirect everyone to - HTTPS. - - -security.acme.acceptTerms = true; -security.acme.defaults.email = "admin+acme@example.com"; - -# /var/lib/acme/.challenges must be writable by the ACME user -# and readable by the Nginx user. The easiest way to achieve -# this is to add the Nginx user to the ACME group. -users.users.nginx.extraGroups = [ "acme" ]; - -services.nginx = { - enable = true; - virtualHosts = { - "acmechallenge.example.com" = { - # Catchall vhost, will redirect users to HTTPS for all vhosts - serverAliases = [ "*.example.com" ]; - locations."/.well-known/acme-challenge" = { - root = "/var/lib/acme/.challenges"; - }; - locations."/" = { - return = "301 https://$host$request_uri"; - }; - }; - }; -} -# Alternative config for Apache -users.users.wwwrun.extraGroups = [ "acme" ]; -services.httpd = { - enable = true; - virtualHosts = { - "acmechallenge.example.com" = { - # Catchall vhost, will redirect users to HTTPS for all vhosts - serverAliases = [ "*.example.com" ]; - # /var/lib/acme/.challenges must be writable by the ACME user and readable by the Apache user. - # By default, this is the case. - documentRoot = "/var/lib/acme/.challenges"; - extraConfig = '' - RewriteEngine On - RewriteCond %{HTTPS} off - RewriteCond %{REQUEST_URI} !^/\.well-known/acme-challenge [NC] - RewriteRule (.*) https://%{HTTP_HOST}%{REQUEST_URI} [R=301] - ''; - }; - }; -} - - - Now you need to configure ACME to generate a certificate. - - -security.acme.certs."foo.example.com" = { - webroot = "/var/lib/acme/.challenges"; - email = "foo@example.com"; - # Ensure that the web server you use can read the generated certs - # Take a look at the group option for the web server you choose. - group = "nginx"; - # Since we have a wildcard vhost to handle port 80, - # we can generate certs for anything! - # Just make sure your DNS resolves them. - extraDomainNames = [ "mail.example.com" ]; -}; - - - The private key key.pem and certificate - fullchain.pem will be put into - /var/lib/acme/foo.example.com. - - - Refer to for all available - configuration options for the - security.acme - module. - -
-
- Configuring ACME for DNS validation - - This is useful if you want to generate a wildcard certificate, - since ACME servers will only hand out wildcard certs over DNS - validation. There are a number of supported DNS providers and - servers you can utilise, see the - lego - docs for provider/server specific configuration values. For - the sake of these docs, we will provide a fully self-hosted - example using bind. - - -services.bind = { - enable = true; - extraConfig = '' - include "/var/lib/secrets/dnskeys.conf"; - ''; - zones = [ - rec { - name = "example.com"; - file = "/var/db/bind/${name}"; - master = true; - extraConfig = "allow-update { key rfc2136key.example.com.; };"; - } - ]; -} - -# Now we can configure ACME -security.acme.acceptTerms = true; -security.acme.defaults.email = "admin+acme@example.com"; -security.acme.certs."example.com" = { - domain = "*.example.com"; - dnsProvider = "rfc2136"; - credentialsFile = "/var/lib/secrets/certs.secret"; - # We don't need to wait for propagation since this is a local DNS server - dnsPropagationCheck = false; -}; - - - The dnskeys.conf and - certs.secret must be kept secure and thus you - should not keep their contents in your Nix config. Instead, - generate them one time with a systemd service: - - -systemd.services.dns-rfc2136-conf = { - requiredBy = ["acme-example.com.service" "bind.service"]; - before = ["acme-example.com.service" "bind.service"]; - unitConfig = { - ConditionPathExists = "!/var/lib/secrets/dnskeys.conf"; - }; - serviceConfig = { - Type = "oneshot"; - UMask = 0077; - }; - path = [ pkgs.bind ]; - script = '' - mkdir -p /var/lib/secrets - chmod 755 /var/lib/secrets - tsig-keygen rfc2136key.example.com > /var/lib/secrets/dnskeys.conf - chown named:root /var/lib/secrets/dnskeys.conf - chmod 400 /var/lib/secrets/dnskeys.conf - - # extract secret value from the dnskeys.conf - while read x y; do if [ "$x" = "secret" ]; then secret="''${y:1:''${#y}-3}"; fi; done < /var/lib/secrets/dnskeys.conf - - cat > /var/lib/secrets/certs.secret << EOF - RFC2136_NAMESERVER='127.0.0.1:53' - RFC2136_TSIG_ALGORITHM='hmac-sha256.' - RFC2136_TSIG_KEY='rfc2136key.example.com' - RFC2136_TSIG_SECRET='$secret' - EOF - chmod 400 /var/lib/secrets/certs.secret - ''; -}; - - - Now you’re all set to generate certs! You should monitor the first - invocation by running - systemctl start acme-example.com.service & journalctl -fu acme-example.com.service - and watching its log output. - -
-
- Using DNS validation with web server virtual hosts - - It is possible to use DNS-01 validation with all certificates, - including those automatically configured via the Nginx/Apache - enableACME - option. This configuration pattern is fully supported and part of - the module’s test suite for Nginx + Apache. - - - You must follow the guide above on configuring DNS-01 validation - first, however instead of setting the options for one certificate - (e.g. - ) you - will set them as defaults (e.g. - ). - - -# Configure ACME appropriately -security.acme.acceptTerms = true; -security.acme.defaults.email = "admin+acme@example.com"; -security.acme.defaults = { - dnsProvider = "rfc2136"; - credentialsFile = "/var/lib/secrets/certs.secret"; - # We don't need to wait for propagation since this is a local DNS server - dnsPropagationCheck = false; -}; - -# For each virtual host you would like to use DNS-01 validation with, -# set acmeRoot = null -services.nginx = { - enable = true; - virtualHosts = { - "foo.example.com" = { - enableACME = true; - acmeRoot = null; - }; - }; -} - - - And that’s it! Next time your configuration is rebuilt, or when - you add a new virtualHost, it will be DNS-01 validated. - -
-
- Using ACME with services demanding root owned - certificates - - Some services refuse to start if the configured certificate files - are not owned by root. PostgreSQL and OpenSMTPD are examples of - these. There is no way to change the user the ACME module uses (it - will always be acme), however you can use - systemd’s LoadCredential feature to resolve - this elegantly. Below is an example configuration for OpenSMTPD, - but this pattern can be applied to any service. - - -# Configure ACME however you like (DNS or HTTP validation), adding -# the following configuration for the relevant certificate. -# Note: You cannot use `systemctl reload` here as that would mean -# the LoadCredential configuration below would be skipped and -# the service would continue to use old certificates. -security.acme.certs."mail.example.com".postRun = '' - systemctl restart opensmtpd -''; - -# Now you must augment OpenSMTPD's systemd service to load -# the certificate files. -systemd.services.opensmtpd.requires = ["acme-finished-mail.example.com.target"]; -systemd.services.opensmtpd.serviceConfig.LoadCredential = let - certDir = config.security.acme.certs."mail.example.com".directory; -in [ - "cert.pem:${certDir}/cert.pem" - "key.pem:${certDir}/key.pem" -]; - -# Finally, configure OpenSMTPD to use these certs. -services.opensmtpd = let - credsDir = "/run/credentials/opensmtpd.service"; -in { - enable = true; - setSendmail = false; - serverConfiguration = '' - pki mail.example.com cert "${credsDir}/cert.pem" - pki mail.example.com key "${credsDir}/key.pem" - listen on localhost tls pki mail.example.com - action act1 relay host smtp://127.0.0.1:10027 - match for local action act1 - ''; -}; - -
-
- Regenerating certificates - - Should you need to regenerate a particular certificate in a hurry, - such as when a vulnerability is found in Let’s Encrypt, there is - now a convenient mechanism for doing so. Running - systemctl clean --what=state acme-example.com.service - will remove all certificate files and the account data for the - given domain, allowing you to then - systemctl start acme-example.com.service to - generate fresh ones. - -
-
- Fixing JWS Verification error - - It is possible that your account credentials file may become - corrupt and need to be regenerated. In this scenario lego will - produce the error JWS verification error. The - solution is to simply delete the associated accounts file and - re-run the affected service(s). - - -# Find the accounts folder for the certificate -systemctl cat acme-example.com.service | grep -Po 'accounts/[^:]*' -export accountdir="$(!!)" -# Move this folder to some place else -mv /var/lib/acme/.lego/$accountdir{,.bak} -# Recreate the folder using systemd-tmpfiles -systemd-tmpfiles --create -# Get a new account and reissue certificates -# Note: Do this for all certs that share the same account email address -systemctl start acme-example.com.service - -
-
diff --git a/nixos/modules/services/backup/borgbackup.nix b/nixos/modules/services/backup/borgbackup.nix index c5fc09dcea02..bc2d79ac10ac 100644 --- a/nixos/modules/services/backup/borgbackup.nix +++ b/nixos/modules/services/backup/borgbackup.nix @@ -226,7 +226,7 @@ let in { meta.maintainers = with maintainers; [ dotlambda ]; - meta.doc = ./borgbackup.xml; + meta.doc = ./borgbackup.md; ###### interface diff --git a/nixos/modules/services/backup/borgbackup.xml b/nixos/modules/services/backup/borgbackup.xml deleted file mode 100644 index 2b9e0baa6d09..000000000000 --- a/nixos/modules/services/backup/borgbackup.xml +++ /dev/null @@ -1,215 +0,0 @@ - - - BorgBackup - - Source: - modules/services/backup/borgbackup.nix - - - Upstream documentation: - https://borgbackup.readthedocs.io/ - - - BorgBackup - (short: Borg) is a deduplicating backup program. Optionally, it - supports compression and authenticated encryption. - - - The main goal of Borg is to provide an efficient and secure way to - backup data. The data deduplication technique used makes Borg - suitable for daily backups since only changes are stored. The - authenticated encryption technique makes it suitable for backups to - not fully trusted targets. - -
- Configuring - - A complete list of options for the Borgbase module may be found - here. - -
-
- Basic usage for a local backup - - A very basic configuration for backing up to a locally accessible - directory is: - - -{ - opt.services.borgbackup.jobs = { - { rootBackup = { - paths = "/"; - exclude = [ "/nix" "/path/to/local/repo" ]; - repo = "/path/to/local/repo"; - doInit = true; - encryption = { - mode = "repokey"; - passphrase = "secret"; - }; - compression = "auto,lzma"; - startAt = "weekly"; - }; - } - }; -} - - - - If you do not want the passphrase to be stored in the - world-readable Nix store, use passCommand. You find an example - below. - - -
-
- Create a borg backup server - - You should use a different SSH key for each repository you write - to, because the specified keys are restricted to running borg - serve and can only access this single repository. You need the - output of the generate pub file. - - -# sudo ssh-keygen -N '' -t ed25519 -f /run/keys/id_ed25519_my_borg_repo -# cat /run/keys/id_ed25519_my_borg_repo -ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID78zmOyA+5uPG4Ot0hfAy+sLDPU1L4AiIoRYEIVbbQ/ root@nixos - - - Add the following snippet to your NixOS configuration: - - -{ - services.borgbackup.repos = { - my_borg_repo = { - authorizedKeys = [ - "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID78zmOyA+5uPG4Ot0hfAy+sLDPU1L4AiIoRYEIVbbQ/ root@nixos" - ] ; - path = "/var/lib/my_borg_repo" ; - }; - }; -} - -
-
- Backup to the borg repository server - - The following NixOS snippet creates an hourly backup to the - service (on the host nixos) as created in the section above. We - assume that you have stored a secret passphrasse in the file - /run/keys/borgbackup_passphrase, which should - be only accessible by root - - -{ - services.borgbackup.jobs = { - backupToLocalServer = { - paths = [ "/etc/nixos" ]; - doInit = true; - repo = "borg@nixos:." ; - encryption = { - mode = "repokey-blake2"; - passCommand = "cat /run/keys/borgbackup_passphrase"; - }; - environment = { BORG_RSH = "ssh -i /run/keys/id_ed25519_my_borg_repo"; }; - compression = "auto,lzma"; - startAt = "hourly"; - }; - }; -}; - - - The following few commands (run as root) let you test your backup. - - -> nixos-rebuild switch -...restarting the following units: polkit.service -> systemctl restart borgbackup-job-backupToLocalServer -> sleep 10 -> systemctl restart borgbackup-job-backupToLocalServer -> export BORG_PASSPHRASE=topSecrect -> borg list --rsh='ssh -i /run/keys/id_ed25519_my_borg_repo' borg@nixos:. -nixos-backupToLocalServer-2020-03-30T21:46:17 Mon, 2020-03-30 21:46:19 [84feb97710954931ca384182f5f3cb90665f35cef214760abd7350fb064786ac] -nixos-backupToLocalServer-2020-03-30T21:46:30 Mon, 2020-03-30 21:46:32 [e77321694ecd160ca2228611747c6ad1be177d6e0d894538898de7a2621b6e68] - -
-
- Backup to a hosting service - - Several companies offer - (paid) - hosting services for Borg repositories. - - - To backup your home directory to borgbase you have to: - - - - - Generate a SSH key without a password, to access the remote - server. E.g. - - -sudo ssh-keygen -N '' -t ed25519 -f /run/keys/id_ed25519_borgbase - - - - - Create the repository on the server by following the - instructions for your hosting server. - - - - - Initialize the repository on the server. Eg. - - -sudo borg init --encryption=repokey-blake2 \ - -rsh "ssh -i /run/keys/id_ed25519_borgbase" \ - zzz2aaaaa@zzz2aaaaa.repo.borgbase.com:repo - - - - - Add it to your NixOS configuration, e.g. - - -{ - services.borgbackup.jobs = { - my_Remote_Backup = { - paths = [ "/" ]; - exclude = [ "/nix" "'**/.cache'" ]; - repo = "zzz2aaaaa@zzz2aaaaa.repo.borgbase.com:repo"; - encryption = { - mode = "repokey-blake2"; - passCommand = "cat /run/keys/borgbackup_passphrase"; - }; - environment = { BORG_RSH = "ssh -i /run/keys/id_ed25519_borgbase"; }; - compression = "auto,lzma"; - startAt = "daily"; - }; - }; -}} - - - -
-
- Vorta backup client for the desktop - - Vorta is a backup client for macOS and Linux desktops. It - integrates the mighty BorgBackup with your desktop environment to - protect your data from disk failure, ransomware and theft. - - - It can be installed in NixOS e.g. by adding - pkgs.vorta to - . - - - Details about using Vorta can be found under - https://vorta.borgbase.com - . - -
-
diff --git a/nixos/modules/services/databases/foundationdb.nix b/nixos/modules/services/databases/foundationdb.nix index 16d539b661eb..48e9898a68c2 100644 --- a/nixos/modules/services/databases/foundationdb.nix +++ b/nixos/modules/services/databases/foundationdb.nix @@ -424,6 +424,6 @@ in }; }; - meta.doc = ./foundationdb.xml; + meta.doc = ./foundationdb.md; meta.maintainers = with lib.maintainers; [ thoughtpolice ]; } diff --git a/nixos/modules/services/databases/foundationdb.xml b/nixos/modules/services/databases/foundationdb.xml deleted file mode 100644 index 611535a9eb8a..000000000000 --- a/nixos/modules/services/databases/foundationdb.xml +++ /dev/null @@ -1,425 +0,0 @@ - - - FoundationDB - - Source: - modules/services/databases/foundationdb.nix - - - Upstream documentation: - https://apple.github.io/foundationdb/ - - - Maintainer: Austin Seipp - - - Available version(s): 5.1.x, 5.2.x, 6.0.x - - - FoundationDB (or FDB) is an open source, distributed, - transactional key-value store. - -
- Configuring and basic setup - - To enable FoundationDB, add the following to your - configuration.nix: - - -services.foundationdb.enable = true; -services.foundationdb.package = pkgs.foundationdb52; # FoundationDB 5.2.x - - - The option is - required, and must always be specified. Due to the fact - FoundationDB network protocols and on-disk storage formats may - change between (major) versions, and upgrades must be explicitly - handled by the user, you must always manually specify this - yourself so that the NixOS module will use the proper version. - Note that minor, bugfix releases are always compatible. - - - After running nixos-rebuild, you can verify - whether FoundationDB is running by executing - fdbcli (which is added to - ): - - -$ sudo -u foundationdb fdbcli -Using cluster file `/etc/foundationdb/fdb.cluster'. - -The database is available. - -Welcome to the fdbcli. For help, type `help'. -fdb> status - -Using cluster file `/etc/foundationdb/fdb.cluster'. - -Configuration: - Redundancy mode - single - Storage engine - memory - Coordinators - 1 - -Cluster: - FoundationDB processes - 1 - Machines - 1 - Memory availability - 5.4 GB per process on machine with least available - Fault Tolerance - 0 machines - Server time - 04/20/18 15:21:14 - -... - -fdb> - - - You can also write programs using the available client libraries. - For example, the following Python program can be run in order to - grab the cluster status, as a quick example. (This example uses - nix-shell shebang support to automatically - supply the necessary Python modules). - - -a@link> cat fdb-status.py -#! /usr/bin/env nix-shell -#! nix-shell -i python -p python pythonPackages.foundationdb52 - -import fdb -import json - -def main(): - fdb.api_version(520) - db = fdb.open() - - @fdb.transactional - def get_status(tr): - return str(tr['\xff\xff/status/json']) - - obj = json.loads(get_status(db)) - print('FoundationDB available: %s' % obj['client']['database_status']['available']) - -if __name__ == "__main__": - main() -a@link> chmod +x fdb-status.py -a@link> ./fdb-status.py -FoundationDB available: True -a@link> - - - FoundationDB is run under the foundationdb user - and group by default, but this may be changed in the NixOS - configuration. The systemd unit - foundationdb.service controls the - fdbmonitor process. - - - By default, the NixOS module for FoundationDB creates a single - SSD-storage based database for development and basic usage. This - storage engine is designed for SSDs and will perform poorly on - HDDs; however it can handle far more data than the alternative - memory engine and is a better default choice for - most deployments. (Note that you can change the storage backend - on-the-fly for a given FoundationDB cluster using - fdbcli.) - - - Furthermore, only 1 server process and 1 backup agent are started - in the default configuration. See below for more on scaling to - increase this. - - - FoundationDB stores all data for all server processes under - /var/lib/foundationdb. You can override this - using , e.g. - - -services.foundationdb.dataDir = "/data/fdb"; - - - Similarly, logs are stored under - /var/log/foundationdb by default, and there - is a corresponding - as well. - -
-
- Scaling processes and backup agents - - Scaling the number of server processes is quite easy; simply - specify to - be the number of FoundationDB worker processes that should be - started on the machine. - - - FoundationDB worker processes typically require 4GB of RAM - per-process at minimum for good performance, so this option is set - to 1 by default since the maximum amount of RAM is unknown. You’re - advised to abide by this restriction, so pick a number of - processes so that each has 4GB or more. - - - A similar option exists in order to scale backup agent processes, - . Backup - agents are not as performance/RAM sensitive, so feel free to - experiment with the number of available backup processes. - -
-
- Clustering - - FoundationDB on NixOS works similarly to other Linux systems, so - this section will be brief. Please refer to the full FoundationDB - documentation for more on clustering. - - - FoundationDB organizes clusters using a set of - coordinators, which are just - specially-designated worker processes. By default, every - installation of FoundationDB on NixOS will start as its own - individual cluster, with a single coordinator: the first worker - process on localhost. - - - Coordinators are specified globally using the - /etc/foundationdb/fdb.cluster file, which all - servers and client applications will use to find and join - coordinators. Note that this file can not be - managed by NixOS so easily: FoundationDB is designed so that it - will rewrite the file at runtime for all clients and nodes when - cluster coordinators change, with clients transparently handling - this without intervention. It is fundamentally a mutable file, and - you should not try to manage it in any way in NixOS. - - - When dealing with a cluster, there are two main things you want to - do: - - - - - Add a node to the cluster for storage/compute. - - - - - Promote an ordinary worker to a coordinator. - - - - - A node must already be a member of the cluster in order to - properly be promoted to a coordinator, so you must always add it - first if you wish to promote it. - - - To add a machine to a FoundationDB cluster: - - - - - Choose one of the servers to start as the initial coordinator. - - - - - Copy the /etc/foundationdb/fdb.cluster file - from this server to all the other servers. Restart - FoundationDB on all of these other servers, so they join the - cluster. - - - - - All of these servers are now connected and working together in - the cluster, under the chosen coordinator. - - - - - At this point, you can add as many nodes as you want by just - repeating the above steps. By default there will still be a single - coordinator: you can use fdbcli to change this - and add new coordinators. - - - As a convenience, FoundationDB can automatically assign - coordinators based on the redundancy mode you wish to achieve for - the cluster. Once all the nodes have been joined, simply set the - replication policy, and then issue the - coordinators auto command - - - For example, assuming we have 3 nodes available, we can enable - double redundancy mode, then auto-select coordinators. For double - redundancy, 3 coordinators is ideal: therefore FoundationDB will - make every node a coordinator automatically: - - -fdbcli> configure double ssd -fdbcli> coordinators auto - - - This will transparently update all the servers within seconds, and - appropriately rewrite the fdb.cluster file, as - well as informing all client processes to do the same. - -
-
- Client connectivity - - By default, all clients must use the current - fdb.cluster file to access a given FoundationDB - cluster. This file is located by default in - /etc/foundationdb/fdb.cluster on all machines - with the FoundationDB service enabled, so you may copy the active - one from your cluster to a new node in order to connect, if it is - not part of the cluster. - -
-
- Client authorization and TLS - - By default, any user who can connect to a FoundationDB process - with the correct cluster configuration can access anything. - FoundationDB uses a pluggable design to transport security, and - out of the box it supports a LibreSSL-based plugin for TLS - support. This plugin not only does in-flight encryption, but also - performs client authorization based on the given endpoint’s - certificate chain. For example, a FoundationDB server may be - configured to only accept client connections over TLS, where the - client TLS certificate is from organization Acme - Co in the Research and Development - unit. - - - Configuring TLS with FoundationDB is done using the - options in order to - control the peer verification string, as well as the certificate - and its private key. - - - Note that the certificate and its private key must be accessible - to the FoundationDB user account that the server runs under. These - files are also NOT managed by NixOS, as putting them into the - store may reveal private information. - - - After you have a key and certificate file in place, it is not - enough to simply set the NixOS module options – you must also - configure the fdb.cluster file to specify that - a given set of coordinators use TLS. This is as simple as adding - the suffix :tls to your cluster coordinator - configuration, after the port number. For example, assuming you - have a coordinator on localhost with the default configuration, - simply specifying: - - -XXXXXX:XXXXXX@127.0.0.1:4500:tls - - - will configure all clients and server processes to use TLS from - now on. - -
-
- Backups and Disaster Recovery - - The usual rules for doing FoundationDB backups apply on NixOS as - written in the FoundationDB manual. However, one important - difference is the security profile for NixOS: by default, the - foundationdb systemd unit uses Linux - namespaces to restrict write access to the system, - except for the log directory, data directory, and the - /etc/foundationdb/ directory. This is enforced - by default and cannot be disabled. - - - However, a side effect of this is that the - fdbbackup command doesn’t work properly for - local filesystem backups: FoundationDB uses a server process - alongside the database processes to perform backups and copy the - backups to the filesystem. As a result, this process is put under - the restricted namespaces above: the backup process can only write - to a limited number of paths. - - - In order to allow flexible backup locations on local disks, the - FoundationDB NixOS module supports a - option. - This option takes a list of paths, and adds them to the systemd - unit, allowing the processes inside the service to write (and - read) the specified directories. - - - For example, to create backups in - /opt/fdb-backups, first set up the paths in the - module options: - - -services.foundationdb.extraReadWritePaths = [ "/opt/fdb-backups" ]; - - - Restart the FoundationDB service, and it will now be able to write - to this directory (even if it does not yet exist.) Note: this path - must exist before restarting the unit. - Otherwise, systemd will not include it in the private FoundationDB - namespace (and it will not add it dynamically at runtime). - - - You can now perform a backup: - - -$ sudo -u foundationdb fdbbackup start -t default -d file:///opt/fdb-backups -$ sudo -u foundationdb fdbbackup status -t default - -
-
- Known limitations - - The FoundationDB setup for NixOS should currently be considered - beta. FoundationDB is not new software, but the NixOS compilation - and integration has only undergone fairly basic testing of all the - available functionality. - - - - - There is no way to specify individual parameters for - individual fdbserver processes. Currently, - all server processes inherit all the global - fdbmonitor settings. - - - - - Ruby bindings are not currently installed. - - - - - Go bindings are not currently installed. - - - -
-
- Options - - NixOS’s FoundationDB module allows you to configure all of the - most relevant configuration options for - fdbmonitor, matching it quite closely. A - complete list of options for the FoundationDB module may be found - here. You - should also read the FoundationDB documentation as well. - -
-
- Full documentation - - FoundationDB is a complex piece of software, and requires careful - administration to properly use. Full documentation for - administration can be found here: - https://apple.github.io/foundationdb/. - -
-
diff --git a/nixos/modules/services/databases/postgresql.nix b/nixos/modules/services/databases/postgresql.nix index 6665e7a088fc..7bbe1ad22595 100644 --- a/nixos/modules/services/databases/postgresql.nix +++ b/nixos/modules/services/databases/postgresql.nix @@ -585,6 +585,6 @@ in }; - meta.doc = ./postgresql.xml; + meta.doc = ./postgresql.md; meta.maintainers = with lib.maintainers; [ thoughtpolice danbst ]; } diff --git a/nixos/modules/services/databases/postgresql.xml b/nixos/modules/services/databases/postgresql.xml deleted file mode 100644 index 2f62d5d80b19..000000000000 --- a/nixos/modules/services/databases/postgresql.xml +++ /dev/null @@ -1,250 +0,0 @@ - - - PostgreSQL - - Source: - modules/services/databases/postgresql.nix - - - Upstream documentation: - http://www.postgresql.org/docs/ - - - PostgreSQL is an advanced, free relational database. - -
- Configuring - - To enable PostgreSQL, add the following to your - configuration.nix: - - -services.postgresql.enable = true; -services.postgresql.package = pkgs.postgresql_11; - - - Note that you are required to specify the desired version of - PostgreSQL (e.g. pkgs.postgresql_11). Since - upgrading your PostgreSQL version requires a database dump and - reload (see below), NixOS cannot provide a default value for - such as the - most recent release of PostgreSQL. - - - By default, PostgreSQL stores its databases in - /var/lib/postgresql/$psqlSchema. You can - override this using - , e.g. - - -services.postgresql.dataDir = "/data/postgresql"; - -
-
- Upgrading - - - The steps below demonstrate how to upgrade from an older version - to pkgs.postgresql_13. These instructions are - also applicable to other versions. - - - - Major PostgreSQL upgrades require a downtime and a few imperative - steps to be called. This is the case because each major version - has some internal changes in the databases’ state during major - releases. Because of that, NixOS places the state into - /var/lib/postgresql/<version> where - each version can be obtained like this: - - -$ nix-instantiate --eval -A postgresql_13.psqlSchema -"13" - - - For an upgrade, a script like this can be used to simplify the - process: - - -{ config, pkgs, ... }: -{ - environment.systemPackages = [ - (let - # XXX specify the postgresql package you'd like to upgrade to. - # Do not forget to list the extensions you need. - newPostgres = pkgs.postgresql_13.withPackages (pp: [ - # pp.plv8 - ]); - in pkgs.writeScriptBin "upgrade-pg-cluster" '' - set -eux - # XXX it's perhaps advisable to stop all services that depend on postgresql - systemctl stop postgresql - - export NEWDATA="/var/lib/postgresql/${newPostgres.psqlSchema}" - - export NEWBIN="${newPostgres}/bin" - - export OLDDATA="${config.services.postgresql.dataDir}" - export OLDBIN="${config.services.postgresql.package}/bin" - - install -d -m 0700 -o postgres -g postgres "$NEWDATA" - cd "$NEWDATA" - sudo -u postgres $NEWBIN/initdb -D "$NEWDATA" - - sudo -u postgres $NEWBIN/pg_upgrade \ - --old-datadir "$OLDDATA" --new-datadir "$NEWDATA" \ - --old-bindir $OLDBIN --new-bindir $NEWBIN \ - "$@" - '') - ]; -} - - - The upgrade process is: - - - - - Rebuild nixos configuration with the configuration above added - to your configuration.nix. Alternatively, - add that into separate file and reference it in - imports list. - - - - - Login as root (sudo su -) - - - - - Run upgrade-pg-cluster. It will stop old - postgresql, initialize a new one and migrate the old one to - the new one. You may supply arguments like - --jobs 4 and --link to - speedup migration process. See - https://www.postgresql.org/docs/current/pgupgrade.html - for details. - - - - - Change postgresql package in NixOS configuration to the one - you were upgrading to via - . Rebuild - NixOS. This should start new postgres using upgraded data - directory and all services you stopped during the upgrade. - - - - - After the upgrade it’s advisable to analyze the new cluster. - - - - - For PostgreSQL ≥ 14, use the vacuumdb - command printed by the upgrades script. - - - - - For PostgreSQL < 14, run (as - su -l postgres in the - , in - this example /var/lib/postgresql/13): - - -$ ./analyze_new_cluster.sh - - - - - - The next step removes the old state-directory! - - - -$ ./delete_old_cluster.sh - - - -
-
- Options - - A complete list of options for the PostgreSQL module may be found - here. - -
-
- Plugins - - Plugins collection for each PostgreSQL version can be accessed - with .pkgs. For example, for - pkgs.postgresql_11 package, its plugin - collection is accessed by - pkgs.postgresql_11.pkgs: - - -$ nix repl '<nixpkgs>' - -Loading '<nixpkgs>'... -Added 10574 variables. - -nix-repl> postgresql_11.pkgs.<TAB><TAB> -postgresql_11.pkgs.cstore_fdw postgresql_11.pkgs.pg_repack -postgresql_11.pkgs.pg_auto_failover postgresql_11.pkgs.pg_safeupdate -postgresql_11.pkgs.pg_bigm postgresql_11.pkgs.pg_similarity -postgresql_11.pkgs.pg_cron postgresql_11.pkgs.pg_topn -postgresql_11.pkgs.pg_hll postgresql_11.pkgs.pgjwt -postgresql_11.pkgs.pg_partman postgresql_11.pkgs.pgroonga -... - - - To add plugins via NixOS configuration, set - services.postgresql.extraPlugins: - - -services.postgresql.package = pkgs.postgresql_11; -services.postgresql.extraPlugins = with pkgs.postgresql_11.pkgs; [ - pg_repack - postgis -]; - - - You can build custom PostgreSQL-with-plugins (to be used outside - of NixOS) using function .withPackages. For - example, creating a custom PostgreSQL package in an overlay can - look like: - - -self: super: { - postgresql_custom = self.postgresql_11.withPackages (ps: [ - ps.pg_repack - ps.postgis - ]); -} - - - Here’s a recipe on how to override a particular plugin through an - overlay: - - -self: super: { - postgresql_11 = super.postgresql_11.override { this = self.postgresql_11; } // { - pkgs = super.postgresql_11.pkgs // { - pg_repack = super.postgresql_11.pkgs.pg_repack.overrideAttrs (_: { - name = "pg_repack-v20181024"; - src = self.fetchzip { - url = "https://github.com/reorg/pg_repack/archive/923fa2f3c709a506e111cc963034bf2fd127aa00.tar.gz"; - sha256 = "17k6hq9xaax87yz79j773qyigm4fwk8z4zh5cyp6z0sxnwfqxxw5"; - }; - }); - }; - }; -} - -
-
diff --git a/nixos/modules/services/desktops/flatpak.nix b/nixos/modules/services/desktops/flatpak.nix index 3b14ad75ab30..d99faf381e01 100644 --- a/nixos/modules/services/desktops/flatpak.nix +++ b/nixos/modules/services/desktops/flatpak.nix @@ -7,7 +7,7 @@ let cfg = config.services.flatpak; in { meta = { - doc = ./flatpak.xml; + doc = ./flatpak.md; maintainers = pkgs.flatpak.meta.maintainers; }; diff --git a/nixos/modules/services/desktops/flatpak.xml b/nixos/modules/services/desktops/flatpak.xml deleted file mode 100644 index cdc3278fa996..000000000000 --- a/nixos/modules/services/desktops/flatpak.xml +++ /dev/null @@ -1,59 +0,0 @@ - - - Flatpak - - Source: - modules/services/desktop/flatpak.nix - - - Upstream documentation: - https://github.com/flatpak/flatpak/wiki - - - Flatpak is a system for building, distributing, and running - sandboxed desktop applications on Linux. - - - To enable Flatpak, add the following to your - configuration.nix: - - - services.flatpak.enable = true; - - - For the sandboxed apps to work correctly, desktop integration - portals need to be installed. If you run GNOME, this will be handled - automatically for you; in other cases, you will need to add - something like the following to your - configuration.nix: - - - xdg.portal.extraPortals = [ pkgs.xdg-desktop-portal-gtk ]; - - - Then, you will need to add a repository, for example, - Flathub, - either using the following commands: - - -$ flatpak remote-add --if-not-exists flathub https://flathub.org/repo/flathub.flatpakrepo -$ flatpak update - - - or by opening the - repository - file in GNOME Software. - - - Finally, you can search and install programs: - - -$ flatpak search bustle -$ flatpak install flathub org.freedesktop.Bustle -$ flatpak run org.freedesktop.Bustle - - - Again, GNOME Software offers graphical interface for these tasks. - - diff --git a/nixos/modules/services/development/blackfire.nix b/nixos/modules/services/development/blackfire.nix index 054cef9ae80b..3c98d7a281c6 100644 --- a/nixos/modules/services/development/blackfire.nix +++ b/nixos/modules/services/development/blackfire.nix @@ -11,7 +11,7 @@ let in { meta = { maintainers = pkgs.blackfire.meta.maintainers; - doc = ./blackfire.xml; + doc = ./blackfire.md; }; options = { diff --git a/nixos/modules/services/development/blackfire.xml b/nixos/modules/services/development/blackfire.xml deleted file mode 100644 index 842e5bec97d5..000000000000 --- a/nixos/modules/services/development/blackfire.xml +++ /dev/null @@ -1,61 +0,0 @@ - - - Blackfire profiler - - Source: - modules/services/development/blackfire.nix - - - Upstream documentation: - https://blackfire.io/docs/introduction - - - Blackfire is a - proprietary tool for profiling applications. There are several - languages supported by the product but currently only PHP support is - packaged in Nixpkgs. The back-end consists of a module that is - loaded into the language runtime (called probe) - and a service (agent) that the probe connects - to and that sends the profiles to the server. - - - To use it, you will need to enable the agent and the probe on your - server. The exact method will depend on the way you use PHP but here - is an example of NixOS configuration for PHP-FPM: - - -let - php = pkgs.php.withExtensions ({ enabled, all }: enabled ++ (with all; [ - blackfire - ])); -in { - # Enable the probe extension for PHP-FPM. - services.phpfpm = { - phpPackage = php; - }; - - # Enable and configure the agent. - services.blackfire-agent = { - enable = true; - settings = { - # You will need to get credentials at https://blackfire.io/my/settings/credentials - # You can also use other options described in https://blackfire.io/docs/up-and-running/configuration/agent - server-id = "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"; - server-token = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"; - }; - }; - - # Make the agent run on start-up. - # (WantedBy= from the upstream unit not respected: https://github.com/NixOS/nixpkgs/issues/81138) - # Alternately, you can start it manually with `systemctl start blackfire-agent`. - systemd.services.blackfire-agent.wantedBy = [ "phpfpm-foo.service" ]; -} - - - On your developer machine, you will also want to install - the - client (see blackfire package) or the - browser extension to actually trigger the profiling. - - diff --git a/nixos/modules/services/editors/emacs.nix b/nixos/modules/services/editors/emacs.nix index 5ae28cd9bbb3..2be46e47d64c 100644 --- a/nixos/modules/services/editors/emacs.nix +++ b/nixos/modules/services/editors/emacs.nix @@ -99,5 +99,5 @@ in environment.variables.EDITOR = mkIf cfg.defaultEditor (mkOverride 900 "${editorScript}/bin/emacseditor"); }; - meta.doc = ./emacs.xml; + meta.doc = ./emacs.md; } diff --git a/nixos/modules/services/editors/emacs.xml b/nixos/modules/services/editors/emacs.xml deleted file mode 100644 index 37d7a93a12b3..000000000000 --- a/nixos/modules/services/editors/emacs.xml +++ /dev/null @@ -1,490 +0,0 @@ - - - Emacs - - Emacs - is an extensible, customizable, self-documenting real-time display - editor — and more. At its core is an interpreter for Emacs Lisp, a - dialect of the Lisp programming language with extensions to support - text editing. - - - Emacs runs within a graphical desktop environment using the X Window - System, but works equally well on a text terminal. Under macOS, a - Mac port edition is available, which uses Apple’s - native GUI frameworks. - - - Nixpkgs provides a superior environment for running Emacs. It’s - simple to create custom builds by overriding the default packages. - Chaotic collections of Emacs Lisp code and extensions can be brought - under control using declarative package management. NixOS even - provides a systemd user service for automatically - starting the Emacs daemon. - -
- Installing Emacs - - Emacs can be installed in the normal way for Nix (see - ). In addition, a NixOS - service can be enabled. - -
- The Different Releases of Emacs - - Nixpkgs defines several basic Emacs packages. The following are - attributes belonging to the pkgs set: - - - - - emacs - - - - The latest stable version of Emacs using the - GTK 2 widget - toolkit. - - - - - - emacs-nox - - - - Emacs built without any dependency on X11 libraries. - - - - - - emacsMacport - - - - Emacs with the Mac port patches, providing - a more native look and feel under macOS. - - - - - - If those aren’t suitable, then the following imitation Emacs - editors are also available in Nixpkgs: - Zile, - mg, - Yi, - jmacs. - -
-
- Adding Packages to Emacs - - Emacs includes an entire ecosystem of functionality beyond text - editing, including a project planner, mail and news reader, - debugger interface, calendar, and more. - - - Most extensions are gotten with the Emacs packaging system - (package.el) from - Emacs Lisp Package - Archive (ELPA), - MELPA, - MELPA - Stable, and - Org ELPA. - Nixpkgs is regularly updated to mirror all these archives. - - - Under NixOS, you can continue to use - package-list-packages and - package-install to install packages. You can - also declare the set of Emacs packages you need using the - derivations from Nixpkgs. The rest of this section discusses - declarative installation of Emacs packages through nixpkgs. - - - The first step to declare the list of packages you want in your - Emacs installation is to create a dedicated derivation. This can - be done in a dedicated emacs.nix file such - as: - - - - - -/* -This is a nix expression to build Emacs and some Emacs packages I like -from source on any distribution where Nix is installed. This will install -all the dependencies from the nixpkgs repository and build the binary files -without interfering with the host distribution. - -To build the project, type the following from the current directory: - -$ nix-build emacs.nix - -To run the newly compiled executable: - -$ ./result/bin/emacs -*/ - -# The first non-comment line in this file indicates that -# the whole file represents a function. -{ pkgs ? import <nixpkgs> {} }: - -let - # The let expression below defines a myEmacs binding pointing to the - # current stable version of Emacs. This binding is here to separate - # the choice of the Emacs binary from the specification of the - # required packages. - myEmacs = pkgs.emacs; - # This generates an emacsWithPackages function. It takes a single - # argument: a function from a package set to a list of packages - # (the packages that will be available in Emacs). - emacsWithPackages = (pkgs.emacsPackagesFor myEmacs).emacsWithPackages; -in - # The rest of the file specifies the list of packages to install. In the - # example, two packages (magit and zerodark-theme) are taken from - # MELPA stable. - emacsWithPackages (epkgs: (with epkgs.melpaStablePackages; [ - magit # ; Integrate git <C-x g> - zerodark-theme # ; Nicolas' theme - ]) - # Two packages (undo-tree and zoom-frm) are taken from MELPA. - ++ (with epkgs.melpaPackages; [ - undo-tree # ; <C-x u> to show the undo tree - zoom-frm # ; increase/decrease font size for all buffers %lt;C-x C-+> - ]) - # Three packages are taken from GNU ELPA. - ++ (with epkgs.elpaPackages; [ - auctex # ; LaTeX mode - beacon # ; highlight my cursor when scrolling - nameless # ; hide current package name everywhere in elisp code - ]) - # notmuch is taken from a nixpkgs derivation which contains an Emacs mode. - ++ [ - pkgs.notmuch # From main packages set - ]) - - - The result of this configuration will be an - emacs command which launches Emacs with all - of your chosen packages in the load-path. - - - You can check that it works by executing this in a terminal: - - -$ nix-build emacs.nix -$ ./result/bin/emacs -q - - - and then typing M-x package-initialize. Check - that you can use all the packages you want in this Emacs - instance. For example, try switching to the zerodark theme - through - M-x load-theme <RET> zerodark <RET> y. - - - - A few popular extensions worth checking out are: auctex, - company, edit-server, flycheck, helm, iedit, magit, - multiple-cursors, projectile, and yasnippet. - - - - The list of available packages in the various ELPA repositories - can be seen with the following commands: - - - -nix-env -f "<nixpkgs>" -qaP -A emacs.pkgs.elpaPackages -nix-env -f "<nixpkgs>" -qaP -A emacs.pkgs.melpaPackages -nix-env -f "<nixpkgs>" -qaP -A emacs.pkgs.melpaStablePackages -nix-env -f "<nixpkgs>" -qaP -A emacs.pkgs.orgPackages - - - If you are on NixOS, you can install this particular Emacs for - all users by adding it to the list of system packages (see - ). Simply modify - your file configuration.nix to make it - contain: - - - -{ - environment.systemPackages = [ - # [...] - (import /path/to/emacs.nix { inherit pkgs; }) - ]; -} - - - In this case, the next nixos-rebuild switch - will take care of adding your emacs to the - PATH environment variable (see - ). - - - If you are not on NixOS or want to install this particular Emacs - only for yourself, you can do so by adding it to your - ~/.config/nixpkgs/config.nix (see - Nixpkgs - manual): - - - -{ - packageOverrides = super: let self = super.pkgs; in { - myemacs = import /path/to/emacs.nix { pkgs = self; }; - }; -} - - - In this case, the next - nix-env -f '<nixpkgs>' -iA myemacs will - take care of adding your emacs to the PATH - environment variable. - -
-
- Advanced Emacs Configuration - - If you want, you can tweak the Emacs package itself from your - emacs.nix. For example, if you want to have - a GTK 3-based Emacs instead of the default GTK 2-based binary - and remove the automatically generated - emacs.desktop (useful if you only use - emacsclient), you can change your file - emacs.nix in this way: - - - - - -{ pkgs ? import <nixpkgs> {} }: -let - myEmacs = (pkgs.emacs.override { - # Use gtk3 instead of the default gtk2 - withGTK3 = true; - withGTK2 = false; - }).overrideAttrs (attrs: { - # I don't want emacs.desktop file because I only use - # emacsclient. - postInstall = (attrs.postInstall or "") + '' - rm $out/share/applications/emacs.desktop - ''; - }); -in [...] - - - After building this file as shown in - the example above, you will - get an GTK 3-based Emacs binary pre-loaded with your favorite - packages. - -
-
-
- Running Emacs as a Service - - NixOS provides an optional systemd service - which launches - Emacs - daemon with the user’s login session. - - - Source: - modules/services/editors/emacs.nix - -
- Enabling the Service - - To install and enable the systemd user - service for Emacs daemon, add the following to your - configuration.nix: - - -services.emacs.enable = true; -services.emacs.package = import /home/cassou/.emacs.d { pkgs = pkgs; }; - - - The services.emacs.package option allows a - custom derivation to be used, for example, one created by - emacsWithPackages. - - - Ensure that the Emacs server is enabled for your user’s Emacs - configuration, either by customizing the - server-mode variable, or by adding - (server-start) to - ~/.emacs.d/init.el. - - - To start the daemon, execute the following: - - -$ nixos-rebuild switch # to activate the new configuration.nix -$ systemctl --user daemon-reload # to force systemd reload -$ systemctl --user start emacs.service # to start the Emacs daemon - - - The server should now be ready to serve Emacs clients. - -
-
- Starting the client - - Ensure that the emacs server is enabled, either by customizing - the server-mode variable, or by adding - (server-start) to - ~/.emacs. - - - To connect to the emacs daemon, run one of the following: - - -emacsclient FILENAME -emacsclient --create-frame # opens a new frame (window) -emacsclient --create-frame --tty # opens a new frame on the current terminal - -
-
- Configuring the <varname>EDITOR</varname> variable - - If is - true, the EDITOR variable - will be set to a wrapper script which launches - emacsclient. - - - Any setting of EDITOR in the shell config - files will override - services.emacs.defaultEditor. To make sure - EDITOR refers to the Emacs wrapper script, - remove any existing EDITOR assignment from - .profile, .bashrc, - .zshenv or any other shell config file. - - - If you have formed certain bad habits when editing files, these - can be corrected with a shell alias to the wrapper script: - - -alias vi=$EDITOR - -
-
- Per-User Enabling of the Service - - In general, systemd user services are - globally enabled by symlinks in - /etc/systemd/user. In the case where Emacs - daemon is not wanted for all users, it is possible to install - the service but not globally enable it: - - -services.emacs.enable = false; -services.emacs.install = true; - - - To enable the systemd user service for just - the currently logged in user, run: - - -systemctl --user enable emacs - - - This will add the symlink - ~/.config/systemd/user/emacs.service. - -
-
-
- Configuring Emacs - - The Emacs init file should be changed to load the extension - packages at startup: - - - -(require 'package) - -;; optional. makes unpure packages archives unavailable -(setq package-archives nil) - -(setq package-enable-at-startup nil) -(package-initialize) - - - After the declarative emacs package configuration has been tested, - previously downloaded packages can be cleaned up by removing - ~/.emacs.d/elpa (do make a backup first, in - case you forgot a package). - -
- A Major Mode for Nix Expressions - - Of interest may be melpaPackages.nix-mode, - which provides syntax highlighting for the Nix language. This is - particularly convenient if you regularly edit Nix files. - -
-
- Accessing man pages - - You can use woman to get completion of all - available man pages. For example, type - M-x woman <RET> nixos-rebuild <RET>. - -
-
- Editing DocBook 5 XML Documents - - Emacs includes - nXML, - a major-mode for validating and editing XML documents. When - editing DocBook 5.0 documents, such as - this one, nXML needs to - be configured with the relevant schema, which is not included. - - - To install the DocBook 5.0 schemas, either add - pkgs.docbook5 to - - (NixOS), or - run nix-env -f '<nixpkgs>' -iA docbook5 - (Nix). - - - Then customize the variable - rng-schema-locating-files to include - ~/.emacs.d/schemas.xml and put the - following text into that file: - - - -<?xml version="1.0"?> -<!-- - To let emacs find this file, evaluate: - (add-to-list 'rng-schema-locating-files "~/.emacs.d/schemas.xml") ---> -<locatingRules xmlns="http://thaiopensource.com/ns/locating-rules/1.0"> - <!-- - Use this variation if pkgs.docbook5 is added to environment.systemPackages - --> - <namespace ns="http://docbook.org/ns/docbook" - uri="/run/current-system/sw/share/xml/docbook-5.0/rng/docbookxi.rnc"/> - <!-- - Use this variation if installing schema with "nix-env -iA pkgs.docbook5". - <namespace ns="http://docbook.org/ns/docbook" - uri="../.nix-profile/share/xml/docbook-5.0/rng/docbookxi.rnc"/> - --> -</locatingRules> - -
-
-
diff --git a/nixos/modules/services/hardware/trezord.nix b/nixos/modules/services/hardware/trezord.nix index 70c1fd09860e..b2217fc97124 100644 --- a/nixos/modules/services/hardware/trezord.nix +++ b/nixos/modules/services/hardware/trezord.nix @@ -8,7 +8,7 @@ in { ### docs meta = { - doc = ./trezord.xml; + doc = ./trezord.md; }; ### interface diff --git a/nixos/modules/services/hardware/trezord.xml b/nixos/modules/services/hardware/trezord.xml deleted file mode 100644 index 1ba9dc1f1887..000000000000 --- a/nixos/modules/services/hardware/trezord.xml +++ /dev/null @@ -1,29 +0,0 @@ - - - Trezor - - Trezor is an open-source cryptocurrency hardware wallet and security - token allowing secure storage of private keys. - - - It offers advanced features such U2F two-factor authorization, SSH - login through - Trezor SSH - agent, - GPG and a - password - manager. For more information, guides and documentation, see - https://wiki.trezor.io. - - - To enable Trezor support, add the following to your - configuration.nix: - - -services.trezord.enable = true; - - - This will add all necessary udev rules and start Trezor Bridge. - - diff --git a/nixos/modules/services/mail/mailman.nix b/nixos/modules/services/mail/mailman.nix index 2adc7427abf4..9273f71db7d5 100644 --- a/nixos/modules/services/mail/mailman.nix +++ b/nixos/modules/services/mail/mailman.nix @@ -642,7 +642,7 @@ in { meta = { maintainers = with lib.maintainers; [ lheckemann qyliss ma27 ]; - doc = ./mailman.xml; + doc = ./mailman.md; }; } diff --git a/nixos/modules/services/mail/mailman.xml b/nixos/modules/services/mail/mailman.xml deleted file mode 100644 index 23b0d0b7da4c..000000000000 --- a/nixos/modules/services/mail/mailman.xml +++ /dev/null @@ -1,112 +0,0 @@ - - - Mailman - - Mailman is free - software for managing electronic mail discussion and e-newsletter - lists. Mailman and its web interface can be configured using the - corresponding NixOS module. Note that this service is best used with - an existing, securely configured Postfix setup, as it does not - automatically configure this. - -
- Basic usage with Postfix - - For a basic configuration with Postfix as the MTA, the following - settings are suggested: - - -{ config, ... }: { - services.postfix = { - enable = true; - relayDomains = ["hash:/var/lib/mailman/data/postfix_domains"]; - sslCert = config.security.acme.certs."lists.example.org".directory + "/full.pem"; - sslKey = config.security.acme.certs."lists.example.org".directory + "/key.pem"; - config = { - transport_maps = ["hash:/var/lib/mailman/data/postfix_lmtp"]; - local_recipient_maps = ["hash:/var/lib/mailman/data/postfix_lmtp"]; - }; - }; - services.mailman = { - enable = true; - serve.enable = true; - hyperkitty.enable = true; - webHosts = ["lists.example.org"]; - siteOwner = "mailman@example.org"; - }; - services.nginx.virtualHosts."lists.example.org".enableACME = true; - networking.firewall.allowedTCPPorts = [ 25 80 443 ]; -} - - - DNS records will also be required: - - - - - AAAA and A records - pointing to the host in question, in order for browsers to be - able to discover the address of the web server; - - - - - An MX record pointing to a domain name at - which the host is reachable, in order for other mail servers - to be able to deliver emails to the mailing lists it hosts. - - - - - After this has been done and appropriate DNS records have been set - up, the Postorius mailing list manager and the Hyperkitty archive - browser will be available at https://lists.example.org/. Note that - this setup is not sufficient to deliver emails to most email - providers nor to avoid spam – a number of additional measures for - authenticating incoming and outgoing mails, such as SPF, DMARC and - DKIM are necessary, but outside the scope of the Mailman module. - -
-
- Using with other MTAs - - Mailman also supports other MTA, though with a little bit more - configuration. For example, to use Mailman with Exim, you can use - the following settings: - - -{ config, ... }: { - services = { - mailman = { - enable = true; - siteOwner = "mailman@example.org"; - enablePostfix = false; - settings.mta = { - incoming = "mailman.mta.exim4.LMTP"; - outgoing = "mailman.mta.deliver.deliver"; - lmtp_host = "localhost"; - lmtp_port = "8024"; - smtp_host = "localhost"; - smtp_port = "25"; - configuration = "python:mailman.config.exim4"; - }; - }; - exim = { - enable = true; - # You can configure Exim in a separate file to reduce configuration.nix clutter - config = builtins.readFile ./exim.conf; - }; - }; -} - - - The exim config needs some special additions to work with Mailman. - Currently NixOS can’t manage Exim config with such granularity. - Please refer to - Mailman - documentation for more info on configuring Mailman for - working with Exim. - -
-
diff --git a/nixos/modules/services/matrix/mjolnir.nix b/nixos/modules/services/matrix/mjolnir.nix index cbf7b93329d7..b6a3e5e8c730 100644 --- a/nixos/modules/services/matrix/mjolnir.nix +++ b/nixos/modules/services/matrix/mjolnir.nix @@ -236,7 +236,7 @@ in }; meta = { - doc = ./mjolnir.xml; + doc = ./mjolnir.md; maintainers = with maintainers; [ jojosch ]; }; } diff --git a/nixos/modules/services/matrix/mjolnir.xml b/nixos/modules/services/matrix/mjolnir.xml deleted file mode 100644 index 5bd2919e437c..000000000000 --- a/nixos/modules/services/matrix/mjolnir.xml +++ /dev/null @@ -1,148 +0,0 @@ - - - Mjolnir (Matrix Moderation Tool) - - This chapter will show you how to set up your own, self-hosted - Mjolnir - instance. - - - As an all-in-one moderation tool, it can protect your server from - malicious invites, spam messages, and whatever else you don’t want. - In addition to server-level protection, Mjolnir is great for - communities wanting to protect their rooms without having to use - their personal accounts for moderation. - - - The bot by default includes support for bans, redactions, anti-spam, - server ACLs, room directory changes, room alias transfers, account - deactivation, room shutdown, and more. - - - See the - README - page and the - Moderator’s - guide for additional instructions on how to setup and use - Mjolnir. - - - For additional - settings see - the - default configuration. - -
- Mjolnir Setup - - First create a new Room which will be used as a management room - for Mjolnir. In this room, Mjolnir will log possible errors and - debugging information. You’ll need to set this Room-ID in - services.mjolnir.managementRoom. - - - Next, create a new user for Mjolnir on your homeserver, if not - present already. - - - The Mjolnir Matrix user expects to be free of any rate limiting. - See - Synapse - #6286 for an example on how to achieve this. - - - If you want Mjolnir to be able to deactivate users, move room - aliases, shutdown rooms, etc. you’ll need to make the Mjolnir user - a Matrix server admin. - - - Now invite the Mjolnir user to the management room. - - - It is recommended to use - Pantalaimon, - so your management room can be encrypted. This also applies if you - are looking to moderate an encrypted room. - - - To enable the Pantalaimon E2E Proxy for mjolnir, enable - services.mjolnir.pantalaimon. - This will autoconfigure a new Pantalaimon instance, which will - connect to the homeserver set in - services.mjolnir.homeserverUrl - and Mjolnir itself will be configured to connect to the new - Pantalaimon instance. - - -{ - services.mjolnir = { - enable = true; - homeserverUrl = "https://matrix.domain.tld"; - pantalaimon = { - enable = true; - username = "mjolnir"; - passwordFile = "/run/secrets/mjolnir-password"; - }; - protectedRooms = [ - "https://matrix.to/#/!xxx:domain.tld" - ]; - managementRoom = "!yyy:domain.tld"; - }; -} - -
- Element Matrix Services (EMS) - - If you are using a managed - Element Matrix - Services (EMS) server, you will need to consent - to the terms and conditions. Upon startup, an error log entry - with a URL to the consent page will be generated. - -
-
-
- Synapse Antispam Module - - A Synapse module is also available to apply the same rulesets the - bot uses across an entire homeserver. - - - To use the Antispam Module, add - matrix-synapse-plugins.matrix-synapse-mjolnir-antispam - to the Synapse plugin list and enable the - mjolnir.Module module. - - -{ - services.matrix-synapse = { - plugins = with pkgs; [ - matrix-synapse-plugins.matrix-synapse-mjolnir-antispam - ]; - extraConfig = '' - modules: - - module: mjolnir.Module - config: - # Prevent servers/users in the ban lists from inviting users on this - # server to rooms. Default true. - block_invites: true - # Flag messages sent by servers/users in the ban lists as spam. Currently - # this means that spammy messages will appear as empty to users. Default - # false. - block_messages: false - # Remove users from the user directory search by filtering matrix IDs and - # display names by the entries in the user ban list. Default false. - block_usernames: false - # The room IDs of the ban lists to honour. Unlike other parts of Mjolnir, - # this list cannot be room aliases or permalinks. This server is expected - # to already be joined to the room - Mjolnir will not automatically join - # these rooms. - ban_lists: - - "!roomid:example.org" - ''; - }; -} - -
-
diff --git a/nixos/modules/services/matrix/synapse.nix b/nixos/modules/services/matrix/synapse.nix index 3087d879b9d2..aee275dab1ec 100644 --- a/nixos/modules/services/matrix/synapse.nix +++ b/nixos/modules/services/matrix/synapse.nix @@ -801,7 +801,7 @@ in { meta = { buildDocsInSandbox = false; - doc = ./synapse.xml; + doc = ./synapse.md; maintainers = teams.matrix.members; }; diff --git a/nixos/modules/services/matrix/synapse.xml b/nixos/modules/services/matrix/synapse.xml deleted file mode 100644 index 686aec93ab67..000000000000 --- a/nixos/modules/services/matrix/synapse.xml +++ /dev/null @@ -1,263 +0,0 @@ - - - Matrix - - Matrix is an open - standard for interoperable, decentralised, real-time communication - over IP. It can be used to power Instant Messaging, VoIP/WebRTC - signalling, Internet of Things communication - or anywhere you need - a standard HTTP API for publishing and subscribing to data whilst - tracking the conversation history. - - - This chapter will show you how to set up your own, self-hosted - Matrix homeserver using the Synapse reference homeserver, and how to - serve your own copy of the Element web client. See the - Try - Matrix Now! overview page for links to Element Apps for - Android and iOS, desktop clients, as well as bridges to other - networks and other projects around Matrix. - -
- Synapse Homeserver - - Synapse - is the reference homeserver implementation of Matrix from the core - development team at matrix.org. The following configuration - example will set up a synapse server for the - example.org domain, served from the host - myhostname.example.org. For more information, - please refer to the - installation - instructions of Synapse . - - -{ pkgs, lib, config, ... }: -let - fqdn = "${config.networking.hostName}.${config.networking.domain}"; - clientConfig = { - "m.homeserver".base_url = "https://${fqdn}"; - "m.identity_server" = {}; - }; - serverConfig."m.server" = "${config.services.matrix-synapse.settings.server_name}:443"; - mkWellKnown = data: '' - add_header Content-Type application/json; - add_header Access-Control-Allow-Origin *; - return 200 '${builtins.toJSON data}'; - ''; -in { - networking.hostName = "myhostname"; - networking.domain = "example.org"; - networking.firewall.allowedTCPPorts = [ 80 443 ]; - - services.postgresql.enable = true; - services.postgresql.initialScript = pkgs.writeText "synapse-init.sql" '' - CREATE ROLE "matrix-synapse" WITH LOGIN PASSWORD 'synapse'; - CREATE DATABASE "matrix-synapse" WITH OWNER "matrix-synapse" - TEMPLATE template0 - LC_COLLATE = "C" - LC_CTYPE = "C"; - ''; - - services.nginx = { - enable = true; - recommendedTlsSettings = true; - recommendedOptimisation = true; - recommendedGzipSettings = true; - recommendedProxySettings = true; - virtualHosts = { - # If the A and AAAA DNS records on example.org do not point on the same host as the - # records for myhostname.example.org, you can easily move the /.well-known - # virtualHost section of the code to the host that is serving example.org, while - # the rest stays on myhostname.example.org with no other changes required. - # This pattern also allows to seamlessly move the homeserver from - # myhostname.example.org to myotherhost.example.org by only changing the - # /.well-known redirection target. - "${config.networking.domain}" = { - enableACME = true; - forceSSL = true; - # This section is not needed if the server_name of matrix-synapse is equal to - # the domain (i.e. example.org from @foo:example.org) and the federation port - # is 8448. - # Further reference can be found in the docs about delegation under - # https://matrix-org.github.io/synapse/latest/delegate.html - locations."= /.well-known/matrix/server".extraConfig = mkWellKnown serverConfig; - # This is usually needed for homeserver discovery (from e.g. other Matrix clients). - # Further reference can be found in the upstream docs at - # https://spec.matrix.org/latest/client-server-api/#getwell-knownmatrixclient - locations."= /.well-known/matrix/client".extraConfig = mkWellKnown clientConfig; - }; - "${fqdn}" = { - enableACME = true; - forceSSL = true; - # It's also possible to do a redirect here or something else, this vhost is not - # needed for Matrix. It's recommended though to *not put* element - # here, see also the section about Element. - locations."/".extraConfig = '' - return 404; - ''; - # Forward all Matrix API calls to the synapse Matrix homeserver. A trailing slash - # *must not* be used here. - locations."/_matrix".proxyPass = "http://[::1]:8008"; - # Forward requests for e.g. SSO and password-resets. - locations."/_synapse/client".proxyPass = "http://[::1]:8008"; - }; - }; - }; - - services.matrix-synapse = { - enable = true; - settings.server_name = config.networking.domain; - settings.listeners = [ - { port = 8008; - bind_addresses = [ "::1" ]; - type = "http"; - tls = false; - x_forwarded = true; - resources = [ { - names = [ "client" "federation" ]; - compress = true; - } ]; - } - ]; - }; -} - -
-
- Registering Matrix users - - If you want to run a server with public registration by anybody, - you can then enable - services.matrix-synapse.settings.enable_registration = true;. - Otherwise, or you can generate a registration secret with - pwgen -s 64 1 and set it with - . - To create a new user or admin, run the following after you have - set the secret and have rebuilt NixOS: - - -$ nix-shell -p matrix-synapse -$ register_new_matrix_user -k your-registration-shared-secret http://localhost:8008 -New user localpart: your-username -Password: -Confirm password: -Make admin [no]: -Success! - - - In the example, this would create a user with the Matrix - Identifier @your-username:example.org. - - - - When using - , - the secret will end up in the world-readable store. Instead it’s - recommended to deploy the secret in an additional file like - this: - - - - - Create a file with the following contents: - - -registration_shared_secret: your-very-secret-secret - - - - - Deploy the file with a secret-manager such as - - from - nixops1 - or - sops-nix - to e.g. - /run/secrets/matrix-shared-secret and - ensure that it’s readable by - matrix-synapse. - - - - - Include the file like this in your configuration: - - -{ - services.matrix-synapse.extraConfigFiles = [ - "/run/secrets/matrix-shared-secret" - ]; -} - - - - - - - It’s also possible to user alternative authentication mechanism - such as - LDAP - (via matrix-synapse-ldap3) or - OpenID. - - -
-
- Element (formerly known as Riot) Web Client - - Element - Web is the reference web client for Matrix and developed by - the core team at matrix.org. Element was formerly known as - Riot.im, see the - Element - introductory blog post for more information. The following - snippet can be optionally added to the code before to complete the - synapse installation with a web client served at - https://element.myhostname.example.org and - https://element.example.org. Alternatively, you - can use the hosted copy at - https://app.element.io/, - or use other web clients or native client applications. Due to the - /.well-known urls set up done above, many - clients should fill in the required connection details - automatically when you enter your Matrix Identifier. See - Try - Matrix Now! for a list of existing clients and their - supported featureset. - - -{ - services.nginx.virtualHosts."element.${fqdn}" = { - enableACME = true; - forceSSL = true; - serverAliases = [ - "element.${config.networking.domain}" - ]; - - root = pkgs.element-web.override { - conf = { - default_server_config = clientConfig; # see `clientConfig` from the snippet above. - }; - }; - }; -} - - - - The Element developers do not recommend running Element and your - Matrix homeserver on the same fully-qualified domain name for - security reasons. In the example, this means that you should not - reuse the myhostname.example.org virtualHost - to also serve Element, but instead serve it on a different - subdomain, like element.example.org in the - example. See the - Element - Important Security Notes for more information on this - subject. - - -
-
diff --git a/nixos/modules/services/misc/gitlab.nix b/nixos/modules/services/misc/gitlab.nix index e6689217ad9a..179359c97a3a 100644 --- a/nixos/modules/services/misc/gitlab.nix +++ b/nixos/modules/services/misc/gitlab.nix @@ -1504,6 +1504,6 @@ in { }; - meta.doc = ./gitlab.xml; + meta.doc = ./gitlab.md; } diff --git a/nixos/modules/services/misc/gitlab.xml b/nixos/modules/services/misc/gitlab.xml deleted file mode 100644 index a193657b0b76..000000000000 --- a/nixos/modules/services/misc/gitlab.xml +++ /dev/null @@ -1,143 +0,0 @@ - - - GitLab - - GitLab is a feature-rich git hosting service. - -
- Prerequisites - - The gitlab service exposes only an Unix socket - at /run/gitlab/gitlab-workhorse.socket. You - need to configure a webserver to proxy HTTP requests to the - socket. - - - For instance, the following configuration could be used to use - nginx as frontend proxy: - - -services.nginx = { - enable = true; - recommendedGzipSettings = true; - recommendedOptimisation = true; - recommendedProxySettings = true; - recommendedTlsSettings = true; - virtualHosts."git.example.com" = { - enableACME = true; - forceSSL = true; - locations."/".proxyPass = "http://unix:/run/gitlab/gitlab-workhorse.socket"; - }; -}; - -
-
- Configuring - - GitLab depends on both PostgreSQL and Redis and will automatically - enable both services. In the case of PostgreSQL, a database and a - role will be created. - - - The default state dir is /var/gitlab/state. - This is where all data like the repositories and uploads will be - stored. - - - A basic configuration with some custom settings could look like - this: - - -services.gitlab = { - enable = true; - databasePasswordFile = "/var/keys/gitlab/db_password"; - initialRootPasswordFile = "/var/keys/gitlab/root_password"; - https = true; - host = "git.example.com"; - port = 443; - user = "git"; - group = "git"; - smtp = { - enable = true; - address = "localhost"; - port = 25; - }; - secrets = { - dbFile = "/var/keys/gitlab/db"; - secretFile = "/var/keys/gitlab/secret"; - otpFile = "/var/keys/gitlab/otp"; - jwsFile = "/var/keys/gitlab/jws"; - }; - extraConfig = { - gitlab = { - email_from = "gitlab-no-reply@example.com"; - email_display_name = "Example GitLab"; - email_reply_to = "gitlab-no-reply@example.com"; - default_projects_features = { builds = false; }; - }; - }; -}; - - - If you’re setting up a new GitLab instance, generate new secrets. - You for instance use - tr -dc A-Za-z0-9 < /dev/urandom | head -c 128 > /var/keys/gitlab/db - to generate a new db secret. Make sure the files can be read by, - and only by, the user specified by - services.gitlab.user. - GitLab encrypts sensitive data stored in the database. If you’re - restoring an existing GitLab instance, you must specify the - secrets secret from config/secrets.yml located - in your GitLab state folder. - - - When incoming_mail.enabled is set to - true in - extraConfig - an additional service called gitlab-mailroom is - enabled for fetching incoming mail. - - - Refer to for all available - configuration options for the - services.gitlab - module. - -
-
- Maintenance -
- Backups - - Backups can be configured with the options in - services.gitlab.backup. - Use the - services.gitlab.backup.startAt - option to configure regular backups. - - - To run a manual backup, start the - gitlab-backup service: - - -$ systemctl start gitlab-backup.service - -
-
- Rake tasks - - You can run GitLab’s rake tasks with - gitlab-rake which will be available on the - system when GitLab is enabled. You will have to run the command - as the user that you configured to run GitLab with. - - - A list of all available rake tasks can be obtained by running: - - -$ sudo -u git -H gitlab-rake -T - -
-
-
diff --git a/nixos/modules/services/misc/sourcehut/default.nix b/nixos/modules/services/misc/sourcehut/default.nix index b03cf0739e9d..b2cadbf0c436 100644 --- a/nixos/modules/services/misc/sourcehut/default.nix +++ b/nixos/modules/services/misc/sourcehut/default.nix @@ -1390,6 +1390,6 @@ in '') ]; - meta.doc = ./default.xml; + meta.doc = ./default.md; meta.maintainers = with maintainers; [ tomberek ]; } diff --git a/nixos/modules/services/misc/sourcehut/default.xml b/nixos/modules/services/misc/sourcehut/default.xml deleted file mode 100644 index 1d8330931ddf..000000000000 --- a/nixos/modules/services/misc/sourcehut/default.xml +++ /dev/null @@ -1,113 +0,0 @@ - - - Sourcehut - - Sourcehut is an - open-source, self-hostable software development platform. The server - setup can be automated using - services.sourcehut. - -
- Basic usage - - Sourcehut is a Python and Go based set of applications. This NixOS - module also provides basic configuration integrating Sourcehut - into locally running services.nginx, - services.redis.servers.sourcehut, - services.postfix and - services.postgresql services. - - - A very basic configuration may look like this: - - -{ pkgs, ... }: -let - fqdn = - let - join = hostName: domain: hostName + optionalString (domain != null) ".${domain}"; - in join config.networking.hostName config.networking.domain; -in { - - networking = { - hostName = "srht"; - domain = "tld"; - firewall.allowedTCPPorts = [ 22 80 443 ]; - }; - - services.sourcehut = { - enable = true; - git.enable = true; - man.enable = true; - meta.enable = true; - nginx.enable = true; - postfix.enable = true; - postgresql.enable = true; - redis.enable = true; - settings = { - "sr.ht" = { - environment = "production"; - global-domain = fqdn; - origin = "https://${fqdn}"; - # Produce keys with srht-keygen from sourcehut.coresrht. - network-key = "/run/keys/path/to/network-key"; - service-key = "/run/keys/path/to/service-key"; - }; - webhooks.private-key= "/run/keys/path/to/webhook-key"; - }; - }; - - security.acme.certs."${fqdn}".extraDomainNames = [ - "meta.${fqdn}" - "man.${fqdn}" - "git.${fqdn}" - ]; - - services.nginx = { - enable = true; - # only recommendedProxySettings are strictly required, but the rest make sense as well. - recommendedTlsSettings = true; - recommendedOptimisation = true; - recommendedGzipSettings = true; - recommendedProxySettings = true; - - # Settings to setup what certificates are used for which endpoint. - virtualHosts = { - "${fqdn}".enableACME = true; - "meta.${fqdn}".useACMEHost = fqdn: - "man.${fqdn}".useACMEHost = fqdn: - "git.${fqdn}".useACMEHost = fqdn: - }; - }; -} - - - The hostName option is used internally to - configure the nginx reverse-proxy. The settings - attribute set is used by the configuration generator and the - result is placed in /etc/sr.ht/config.ini. - -
-
- Configuration - - All configuration parameters are also stored in - /etc/sr.ht/config.ini which is generated by the - module and linked from the store to ensure that all values from - config.ini can be modified by the module. - -
-
- Using an alternative webserver as reverse-proxy (e.g. - <literal>httpd</literal>) - - By default, nginx is used as reverse-proxy for - sourcehut. However, it’s possible to use e.g. - httpd by explicitly disabling - nginx using - and fixing the - settings. - -
-
diff --git a/nixos/modules/services/misc/taskserver/default.nix b/nixos/modules/services/misc/taskserver/default.nix index 7331c323adba..775b3b6d2eae 100644 --- a/nixos/modules/services/misc/taskserver/default.nix +++ b/nixos/modules/services/misc/taskserver/default.nix @@ -566,5 +566,5 @@ in { }) ]; - meta.doc = ./default.xml; + meta.doc = ./default.md; } diff --git a/nixos/modules/services/misc/taskserver/default.xml b/nixos/modules/services/misc/taskserver/default.xml deleted file mode 100644 index bbb38211b7ca..000000000000 --- a/nixos/modules/services/misc/taskserver/default.xml +++ /dev/null @@ -1,130 +0,0 @@ - - - Taskserver - - Taskserver is the server component of - Taskwarrior, a - free and open source todo list application. - - - Upstream documentation: - https://taskwarrior.org/docs/#taskd - -
- Configuration - - Taskserver does all of its authentication via TLS using client - certificates, so you either need to roll your own CA or purchase a - certificate from a known CA, which allows creation of client - certificates. These certificates are usually advertised as - server certificates. - - - So in order to make it easier to handle your own CA, there is a - helper tool called nixos-taskserver which - manages the custom CA along with Taskserver organisations, users - and groups. - - - While the client certificates in Taskserver only authenticate - whether a user is allowed to connect, every user has its own UUID - which identifies it as an entity. - - - With nixos-taskserver the client certificate is - created along with the UUID of the user, so it handles all of the - credentials needed in order to setup the Taskwarrior client to - work with a Taskserver. - -
-
- The nixos-taskserver tool - - Because Taskserver by default only provides scripts to setup users - imperatively, the nixos-taskserver tool is used - for addition and deletion of organisations along with users and - groups defined by - and as - well for imperative set up. - - - The tool is designed to not interfere if the command is used to - manually set up some organisations, users or groups. - - - For example if you add a new organisation using - nixos-taskserver org add foo, the organisation - is not modified and deleted no matter what you define in - , even if you’re - adding the same organisation in that option. - - - The tool is modelled to imitate the official - taskd command, documentation for each - subcommand can be shown by using the - switch. - -
-
- Declarative/automatic CA management - - Everything is done according to what you specify in the module - options, however in order to set up a Taskwarrior client for - synchronisation with a Taskserver instance, you have to transfer - the keys and certificates to the client machine. - - - This is done using - nixos-taskserver user export $orgname $username - which is printing a shell script fragment to stdout which can - either be used verbatim or adjusted to import the user on the - client machine. - - - For example, let’s say you have the following configuration: - - -{ - services.taskserver.enable = true; - services.taskserver.fqdn = "server"; - services.taskserver.listenHost = "::"; - services.taskserver.organisations.my-company.users = [ "alice" ]; -} - - - This creates an organisation called my-company - with the user alice. - - - Now in order to import the alice user to - another machine alicebox, all we need to do is - something like this: - - -$ ssh server nixos-taskserver user export my-company alice | sh - - - Of course, if no SSH daemon is available on the server you can - also copy & paste it directly into a shell. - - - After this step the user should be set up and you can start - synchronising your tasks for the first time with - task sync init on alicebox. - - - Subsequent synchronisation requests merely require the command - task sync after that stage. - -
-
- Manual CA management - - If you set any options within - service.taskserver.pki.manual.*, - nixos-taskserver won’t issue certificates, but - you can still use it for adding or removing user accounts. - -
-
diff --git a/nixos/modules/services/misc/weechat.nix b/nixos/modules/services/misc/weechat.nix index aa5b9b22837e..338493e3cd37 100644 --- a/nixos/modules/services/misc/weechat.nix +++ b/nixos/modules/services/misc/weechat.nix @@ -59,5 +59,5 @@ in }; }; - meta.doc = ./weechat.xml; + meta.doc = ./weechat.md; } diff --git a/nixos/modules/services/misc/weechat.xml b/nixos/modules/services/misc/weechat.xml deleted file mode 100644 index 83ae171217d2..000000000000 --- a/nixos/modules/services/misc/weechat.xml +++ /dev/null @@ -1,63 +0,0 @@ - - - WeeChat - - WeeChat is a fast and - extensible IRC client. - -
- Basic Usage - - By default, the module creates a - systemd - unit which runs the chat client in a detached - screen - session. - - - This can be done by enabling the weechat - service: - - -{ ... }: - -{ - services.weechat.enable = true; -} - - - The service is managed by a dedicated user named - weechat in the state directory - /var/lib/weechat. - -
-
- Re-attaching to WeeChat - - WeeChat runs in a screen session owned by a dedicated user. To - explicitly allow your another user to attach to this session, the - screenrc needs to be tweaked by adding - multiuser - support: - - -{ - programs.screen.screenrc = '' - multiuser on - acladd normal_user - ''; -} - - - Now, the session can be re-attached like this: - - -screen -x weechat/weechat-screen - - - The session name can be changed using - services.weechat.sessionName. - -
-
diff --git a/nixos/modules/services/monitoring/parsedmarc.nix b/nixos/modules/services/monitoring/parsedmarc.nix index 2e7c4fd00b42..fdaefbe17f2d 100644 --- a/nixos/modules/services/monitoring/parsedmarc.nix +++ b/nixos/modules/services/monitoring/parsedmarc.nix @@ -539,6 +539,6 @@ in }; }; - meta.doc = ./parsedmarc.xml; + meta.doc = ./parsedmarc.md; meta.maintainers = [ lib.maintainers.talyz ]; } diff --git a/nixos/modules/services/monitoring/parsedmarc.xml b/nixos/modules/services/monitoring/parsedmarc.xml deleted file mode 100644 index 4d9b12c9a429..000000000000 --- a/nixos/modules/services/monitoring/parsedmarc.xml +++ /dev/null @@ -1,126 +0,0 @@ - - - parsedmarc - - parsedmarc - is a service which parses incoming - DMARC reports and - stores or sends them to a downstream service for further analysis. - In combination with Elasticsearch, Grafana and the included Grafana - dashboard, it provides a handy overview of DMARC reports over time. - -
- Basic usage - - A very minimal setup which reads incoming reports from an external - email address and saves them to a local Elasticsearch instance - looks like this: - - -services.parsedmarc = { - enable = true; - settings.imap = { - host = "imap.example.com"; - user = "alice@example.com"; - password = "/path/to/imap_password_file"; - }; - provision.geoIp = false; # Not recommended! -}; - - - Note that GeoIP provisioning is disabled in the example for - simplicity, but should be turned on for fully functional reports. - -
-
- Local mail - - Instead of watching an external inbox, a local inbox can be - automatically provisioned. The recipient’s name is by default set - to dmarc, but can be configured in - services.parsedmarc.provision.localMail.recipientName. - You need to add an MX record pointing to the host. More - concretely: for the example to work, an MX record needs to be set - up for monitoring.example.com and the complete - email address that should be configured in the domain’s dmarc - policy is dmarc@monitoring.example.com. - - -services.parsedmarc = { - enable = true; - provision = { - localMail = { - enable = true; - hostname = monitoring.example.com; - }; - geoIp = false; # Not recommended! - }; -}; - -
-
- Grafana and GeoIP - - The reports can be visualized and summarized with parsedmarc’s - official Grafana dashboard. For all views to work, and for the - data to be complete, GeoIP databases are also required. The - following example shows a basic deployment where the provisioned - Elasticsearch instance is automatically added as a Grafana - datasource, and the dashboard is added to Grafana as well. - - -services.parsedmarc = { - enable = true; - provision = { - localMail = { - enable = true; - hostname = url; - }; - grafana = { - datasource = true; - dashboard = true; - }; - }; -}; - -# Not required, but recommended for full functionality -services.geoipupdate = { - settings = { - AccountID = 000000; - LicenseKey = "/path/to/license_key_file"; - }; -}; - -services.grafana = { - enable = true; - addr = "0.0.0.0"; - domain = url; - rootUrl = "https://" + url; - protocol = "socket"; - security = { - adminUser = "admin"; - adminPasswordFile = "/path/to/admin_password_file"; - secretKeyFile = "/path/to/secret_key_file"; - }; -}; - -services.nginx = { - enable = true; - recommendedTlsSettings = true; - recommendedOptimisation = true; - recommendedGzipSettings = true; - recommendedProxySettings = true; - upstreams.grafana.servers."unix:/${config.services.grafana.socket}" = {}; - virtualHosts.${url} = { - root = config.services.grafana.staticRootPath; - enableACME = true; - forceSSL = true; - locations."/".tryFiles = "$uri @grafana"; - locations."@grafana".proxyPass = "http://grafana"; - }; -}; -users.users.nginx.extraGroups = [ "grafana" ]; - -
-
diff --git a/nixos/modules/services/monitoring/prometheus/exporters.nix b/nixos/modules/services/monitoring/prometheus/exporters.nix index f3fbfb149ad7..23cac6793cc6 100644 --- a/nixos/modules/services/monitoring/prometheus/exporters.nix +++ b/nixos/modules/services/monitoring/prometheus/exporters.nix @@ -323,7 +323,7 @@ in ); meta = { - doc = ./exporters.xml; + doc = ./exporters.md; maintainers = [ maintainers.willibutz ]; }; } diff --git a/nixos/modules/services/monitoring/prometheus/exporters.xml b/nixos/modules/services/monitoring/prometheus/exporters.xml deleted file mode 100644 index 0ea95e513ff3..000000000000 --- a/nixos/modules/services/monitoring/prometheus/exporters.xml +++ /dev/null @@ -1,245 +0,0 @@ - - - Prometheus exporters - - Prometheus exporters provide metrics for the - prometheus monitoring - system. - -
- Configuration - - One of the most common exporters is the - node - exporter, it provides hardware and OS metrics from the host - it’s running on. The exporter could be configured as follows: - - - services.prometheus.exporters.node = { - enable = true; - port = 9100; - enabledCollectors = [ - "logind" - "systemd" - ]; - disabledCollectors = [ - "textfile" - ]; - openFirewall = true; - firewallFilter = "-i br0 -p tcp -m tcp --dport 9100"; - }; - - - It should now serve all metrics from the collectors that are - explicitly enabled and the ones that are - enabled - by default, via http under /metrics. In - this example the firewall should just allow incoming connections - to the exporter’s port on the bridge interface - br0 (this would have to be configured - separately of course). For more information about configuration - see man configuration.nix or search through the - available - options. - - - Prometheus can now be configured to consume the metrics produced - by the exporter: - - - services.prometheus = { - # ... - - scrapeConfigs = [ - { - job_name = "node"; - static_configs = [{ - targets = [ "localhost:${toString config.services.prometheus.exporters.node.port}" ]; - }]; - } - ]; - - # ... - } - -
-
- Adding a new exporter - - To add a new exporter, it has to be packaged first (see - nixpkgs/pkgs/servers/monitoring/prometheus/ for - examples), then a module can be added. The postfix exporter is - used in this example: - - - - - Some default options for all exporters are provided by - nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.nix: - - - - - enable - - - - - port - - - - - listenAddress - - - - - extraFlags - - - - - openFirewall - - - - - firewallFilter - - - - - user - - - - - group - - - - - - - As there is already a package available, the module can now be - added. This is accomplished by adding a new file to the - nixos/modules/services/monitoring/prometheus/exporters/ - directory, which will be called postfix.nix and contains all - exporter specific options and configuration: - - -# nixpgs/nixos/modules/services/prometheus/exporters/postfix.nix -{ config, lib, pkgs, options }: - -with lib; - -let - # for convenience we define cfg here - cfg = config.services.prometheus.exporters.postfix; -in -{ - port = 9154; # The postfix exporter listens on this port by default - - # `extraOpts` is an attribute set which contains additional options - # (and optional overrides for default options). - # Note that this attribute is optional. - extraOpts = { - telemetryPath = mkOption { - type = types.str; - default = "/metrics"; - description = '' - Path under which to expose metrics. - ''; - }; - logfilePath = mkOption { - type = types.path; - default = /var/log/postfix_exporter_input.log; - example = /var/log/mail.log; - description = '' - Path where Postfix writes log entries. - This file will be truncated by this exporter! - ''; - }; - showqPath = mkOption { - type = types.path; - default = /var/spool/postfix/public/showq; - example = /var/lib/postfix/queue/public/showq; - description = '' - Path at which Postfix places its showq socket. - ''; - }; - }; - - # `serviceOpts` is an attribute set which contains configuration - # for the exporter's systemd service. One of - # `serviceOpts.script` and `serviceOpts.serviceConfig.ExecStart` - # has to be specified here. This will be merged with the default - # service configuration. - # Note that by default 'DynamicUser' is 'true'. - serviceOpts = { - serviceConfig = { - DynamicUser = false; - ExecStart = '' - ${pkgs.prometheus-postfix-exporter}/bin/postfix_exporter \ - --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \ - --web.telemetry-path ${cfg.telemetryPath} \ - ${concatStringsSep " \\\n " cfg.extraFlags} - ''; - }; - }; -} - - - - - This should already be enough for the postfix exporter. - Additionally one could now add assertions and conditional - default values. This can be done in the - meta-module that combines all exporter - definitions and generates the submodules: - nixpkgs/nixos/modules/services/prometheus/exporters.nix - - - -
-
- Updating an exporter module - - Should an exporter option change at some point, it is possible to - add information about the change to the exporter definition - similar to nixpkgs/nixos/modules/rename.nix: - - -{ config, lib, pkgs, options }: - -with lib; - -let - cfg = config.services.prometheus.exporters.nginx; -in -{ - port = 9113; - extraOpts = { - # additional module options - # ... - }; - serviceOpts = { - # service configuration - # ... - }; - imports = [ - # 'services.prometheus.exporters.nginx.telemetryEndpoint' -> 'services.prometheus.exporters.nginx.telemetryPath' - (mkRenamedOptionModule [ "telemetryEndpoint" ] [ "telemetryPath" ]) - - # removed option 'services.prometheus.exporters.nginx.insecure' - (mkRemovedOptionModule [ "insecure" ] '' - This option was replaced by 'prometheus.exporters.nginx.sslVerify' which defaults to true. - '') - ({ options.warnings = options.warnings; }) - ]; -} - -
-
diff --git a/nixos/modules/services/network-filesystems/litestream/default.nix b/nixos/modules/services/network-filesystems/litestream/default.nix index 0d987f12a324..6e2ec1ccaa3c 100644 --- a/nixos/modules/services/network-filesystems/litestream/default.nix +++ b/nixos/modules/services/network-filesystems/litestream/default.nix @@ -95,5 +95,5 @@ in users.groups.litestream = {}; }; - meta.doc = ./default.xml; + meta.doc = ./default.md; } diff --git a/nixos/modules/services/network-filesystems/litestream/default.xml b/nixos/modules/services/network-filesystems/litestream/default.xml deleted file mode 100644 index 756899fdb88d..000000000000 --- a/nixos/modules/services/network-filesystems/litestream/default.xml +++ /dev/null @@ -1,62 +0,0 @@ - - - Litestream - - Litestream is a - standalone streaming replication tool for SQLite. - -
- Configuration - - Litestream service is managed by a dedicated user named - litestream which needs permission to the - database file. Here’s an example config which gives required - permissions to access - grafana - database: - - -{ pkgs, ... }: -{ - users.users.litestream.extraGroups = [ "grafana" ]; - - systemd.services.grafana.serviceConfig.ExecStartPost = "+" + pkgs.writeShellScript "grant-grafana-permissions" '' - timeout=10 - - while [ ! -f /var/lib/grafana/data/grafana.db ]; - do - if [ "$timeout" == 0 ]; then - echo "ERROR: Timeout while waiting for /var/lib/grafana/data/grafana.db." - exit 1 - fi - - sleep 1 - - ((timeout--)) - done - - find /var/lib/grafana -type d -exec chmod -v 775 {} \; - find /var/lib/grafana -type f -exec chmod -v 660 {} \; - ''; - - services.litestream = { - enable = true; - - environmentFile = "/run/secrets/litestream"; - - settings = { - dbs = [ - { - path = "/var/lib/grafana/data/grafana.db"; - replicas = [{ - url = "s3://mybkt.litestream.io/grafana"; - }]; - } - ]; - }; - }; -} - -
-
diff --git a/nixos/modules/services/networking/firefox-syncserver.nix b/nixos/modules/services/networking/firefox-syncserver.nix index c26a6ae265ff..a06b9573a850 100644 --- a/nixos/modules/services/networking/firefox-syncserver.nix +++ b/nixos/modules/services/networking/firefox-syncserver.nix @@ -311,6 +311,6 @@ in meta = { maintainers = with lib.maintainers; [ pennae ]; - doc = ./firefox-syncserver.xml; + doc = ./firefox-syncserver.md; }; } diff --git a/nixos/modules/services/networking/firefox-syncserver.xml b/nixos/modules/services/networking/firefox-syncserver.xml deleted file mode 100644 index 440922cbba00..000000000000 --- a/nixos/modules/services/networking/firefox-syncserver.xml +++ /dev/null @@ -1,79 +0,0 @@ - - - Firefox Sync server - - A storage server for Firefox Sync that you can easily host yourself. - -
- Quickstart - - The absolute minimal configuration for the sync server looks like - this: - - -services.mysql.package = pkgs.mariadb; - -services.firefox-syncserver = { - enable = true; - secrets = builtins.toFile "sync-secrets" '' - SYNC_MASTER_SECRET=this-secret-is-actually-leaked-to-/nix/store - ''; - singleNode = { - enable = true; - hostname = "localhost"; - url = "http://localhost:5000"; - }; -}; - - - This will start a sync server that is only accessible locally. - Once the services is running you can navigate to - about:config in your Firefox profile and set - identity.sync.tokenserver.uri to - http://localhost:5000/1.0/sync/1.5. Your - browser will now use your local sync server for data storage. - - - - This configuration should never be used in production. It is not - encrypted and stores its secrets in a world-readable location. - - -
-
- More detailed setup - - The firefox-syncserver service provides a - number of options to make setting up small deployment easier. - These are grouped under the singleNode element - of the option tree and allow simple configuration of the most - important parameters. - - - Single node setup is split into two kinds of options: those that - affect the sync server itself, and those that affect its - surroundings. Options that affect the sync server are - capacity, which configures how many accounts - may be active on this instance, and url, which - holds the URL under which the sync server can be accessed. The - url can be configured automatically when using - nginx. - - - Options that affect the surroundings of the sync server are - enableNginx, enableTLS and - hostnam. If enableNginx is - set the sync server module will automatically add an nginx virtual - host to the system using hostname as the domain - and set url accordingly. If - enableTLS is set the module will also enable - ACME certificates on the new virtual host and force all - connections to be made via TLS. - - - For actual deployment it is also recommended to store the - secrets file in a secure location. - -
-
diff --git a/nixos/modules/services/networking/mosquitto.nix b/nixos/modules/services/networking/mosquitto.nix index 563412025561..a4fd2fd7c89f 100644 --- a/nixos/modules/services/networking/mosquitto.nix +++ b/nixos/modules/services/networking/mosquitto.nix @@ -671,6 +671,6 @@ in meta = { maintainers = with lib.maintainers; [ pennae ]; - doc = ./mosquitto.xml; + doc = ./mosquitto.md; }; } diff --git a/nixos/modules/services/networking/mosquitto.xml b/nixos/modules/services/networking/mosquitto.xml deleted file mode 100644 index 91934617c56d..000000000000 --- a/nixos/modules/services/networking/mosquitto.xml +++ /dev/null @@ -1,149 +0,0 @@ - - - Mosquitto - - Mosquitto is a MQTT broker often used for IoT or home automation - data transport. - -
- Quickstart - - A minimal configuration for Mosquitto is - - -services.mosquitto = { - enable = true; - listeners = [ { - acl = [ "pattern readwrite #" ]; - omitPasswordAuth = true; - settings.allow_anonymous = true; - } ]; -}; - - - This will start a broker on port 1883, listening on all interfaces - of the machine, allowing read/write access to all topics to any - user without password requirements. - - - User authentication can be configured with the - users key of listeners. A config that gives - full read access to a user monitor and - restricted write access to a user service could - look like - - -services.mosquitto = { - enable = true; - listeners = [ { - users = { - monitor = { - acl = [ "read #" ]; - password = "monitor"; - }; - service = { - acl = [ "write service/#" ]; - password = "service"; - }; - }; - } ]; -}; - - - TLS authentication is configured by setting TLS-related options of - the listener: - - -services.mosquitto = { - enable = true; - listeners = [ { - port = 8883; # port change is not required, but helpful to avoid mistakes - # ... - settings = { - cafile = "/path/to/mqtt.ca.pem"; - certfile = "/path/to/mqtt.pem"; - keyfile = "/path/to/mqtt.key"; - }; - } ]; - -
-
- Configuration - - The Mosquitto configuration has four distinct types of settings: - the global settings of the daemon, listeners, plugins, and - bridges. Bridges and listeners are part of the global - configuration, plugins are part of listeners. Users of the broker - are configured as parts of listeners rather than globally, - allowing configurations in which a given user is only allowed to - log in to the broker using specific listeners (eg to configure an - admin user with full access to all topics, but restricted to - localhost). - - - Almost all options of Mosquitto are available for configuration at - their appropriate levels, some as NixOS options written in camel - case, the remainders under settings with their - exact names in the Mosquitto config file. The exceptions are - acl_file (which is always set according to the - acl attributes of a listener and its users) and - per_listener_settings (which is always set to - true). - -
- Password authentication - - Mosquitto can be run in two modes, with a password file or - without. Each listener has its own password file, and different - listeners may use different password files. Password file - generation can be disabled by setting - omitPasswordAuth = true for a listener; in - this case it is necessary to either set - settings.allow_anonymous = true to allow all - logins, or to configure other authentication methods like TLS - client certificates with - settings.use_identity_as_username = true. - - - The default is to generate a password file for each listener - from the users configured to that listener. Users with no - configured password will not be added to the password file and - thus will not be able to use the broker. - -
-
- ACL format - - Every listener has a Mosquitto acl_file - attached to it. This ACL is configured via two attributes of the - config: - - - - - the acl attribute of the listener - configures pattern ACL entries and topic ACL entries for - anonymous users. Each entry must be prefixed with - pattern or topic to - distinguish between these two cases. - - - - - the acl attribute of every user - configures in the listener configured the ACL for that given - user. Only topic ACLs are supported by Mosquitto in this - setting, so no prefix is required or allowed. - - - - - The default ACL for a listener is empty, disallowing all - accesses from all clients. To configure a completely open ACL, - set acl = [ "pattern readwrite #" ] - in the listener. - -
-
-
diff --git a/nixos/modules/services/networking/pleroma.nix b/nixos/modules/services/networking/pleroma.nix index f317510258ba..e9db7f3eab8e 100644 --- a/nixos/modules/services/networking/pleroma.nix +++ b/nixos/modules/services/networking/pleroma.nix @@ -147,5 +147,5 @@ in { }; meta.maintainers = with lib.maintainers; [ ninjatrappeur ]; - meta.doc = ./pleroma.xml; + meta.doc = ./pleroma.md; } diff --git a/nixos/modules/services/networking/pleroma.xml b/nixos/modules/services/networking/pleroma.xml deleted file mode 100644 index 97954f4b9514..000000000000 --- a/nixos/modules/services/networking/pleroma.xml +++ /dev/null @@ -1,244 +0,0 @@ - - - Pleroma - - Pleroma is a - lightweight activity pub server. - -
- Generating the Pleroma config - - The pleroma_ctl CLI utility will prompt you - some questions and it will generate an initial config file. This - is an example of usage - - -$ mkdir tmp-pleroma -$ cd tmp-pleroma -$ nix-shell -p pleroma-otp -$ pleroma_ctl instance gen --output config.exs --output-psql setup.psql - - - The config.exs file can be further customized - following the instructions on the - upstream - documentation. Many refinements can be applied also after - the service is running. - -
-
- Initializing the database - - First, the Postgresql service must be enabled in the NixOS - configuration - - -services.postgresql = { - enable = true; - package = pkgs.postgresql_13; -}; - - - and activated with the usual - - -$ nixos-rebuild switch - - - Then you can create and seed the database, using the - setup.psql file that you generated in the - previous section, by running - - -$ sudo -u postgres psql -f setup.psql - -
-
- Enabling the Pleroma service locally - - In this section we will enable the Pleroma service only locally, - so its configurations can be improved incrementally. - - - This is an example of configuration, where - option contains - the content of the file config.exs, generated - in the - first section, but with the secrets (database password, - endpoint secret key, salts, etc.) removed. Removing secrets is - important, because otherwise they will be stored publicly in the - Nix store. - - -services.pleroma = { - enable = true; - secretConfigFile = "/var/lib/pleroma/secrets.exs"; - configs = [ - '' - import Config - - config :pleroma, Pleroma.Web.Endpoint, - url: [host: "pleroma.example.net", scheme: "https", port: 443], - http: [ip: {127, 0, 0, 1}, port: 4000] - - config :pleroma, :instance, - name: "Test", - email: "admin@example.net", - notify_email: "admin@example.net", - limit: 5000, - registrations_open: true - - config :pleroma, :media_proxy, - enabled: false, - redirect_on_failure: true - - config :pleroma, Pleroma.Repo, - adapter: Ecto.Adapters.Postgres, - username: "pleroma", - database: "pleroma", - hostname: "localhost" - - # Configure web push notifications - config :web_push_encryption, :vapid_details, - subject: "mailto:admin@example.net" - - # ... TO CONTINUE ... - '' - ]; -}; - - - Secrets must be moved into a file pointed by - , in our - case /var/lib/pleroma/secrets.exs. This file - can be created copying the previously generated - config.exs file and then removing all the - settings, except the secrets. This is an example - - -# Pleroma instance passwords - -import Config - -config :pleroma, Pleroma.Web.Endpoint, - secret_key_base: "<the secret generated by pleroma_ctl>", - signing_salt: "<the secret generated by pleroma_ctl>" - -config :pleroma, Pleroma.Repo, - password: "<the secret generated by pleroma_ctl>" - -# Configure web push notifications -config :web_push_encryption, :vapid_details, - public_key: "<the secret generated by pleroma_ctl>", - private_key: "<the secret generated by pleroma_ctl>" - -# ... TO CONTINUE ... - - - Note that the lines of the same configuration group are comma - separated (i.e. all the lines end with a comma, except the last - one), so when the lines with passwords are added or removed, - commas must be adjusted accordingly. - - - The service can be enabled with the usual - - -$ nixos-rebuild switch - - - The service is accessible only from the local - 127.0.0.1:4000 port. It can be tested using a - port forwarding like this - - -$ ssh -L 4000:localhost:4000 myuser@example.net - - - and then accessing - http://localhost:4000 - from a web browser. - -
-
- Creating the admin user - - After Pleroma service is running, all - Pleroma - administration utilities can be used. In particular an - admin user can be created with - - -$ pleroma_ctl user new <nickname> <email> --admin --moderator --password <password> - -
-
- Configuring Nginx - - In this configuration, Pleroma is listening only on the local port - 4000. Nginx can be configured as a Reverse Proxy, for forwarding - requests from public ports to the Pleroma service. This is an - example of configuration, using - Let’s Encrypt - for the TLS certificates - - -security.acme = { - email = "root@example.net"; - acceptTerms = true; -}; - -services.nginx = { - enable = true; - addSSL = true; - - recommendedTlsSettings = true; - recommendedOptimisation = true; - recommendedGzipSettings = true; - - recommendedProxySettings = false; - # NOTE: if enabled, the NixOS proxy optimizations will override the Pleroma - # specific settings, and they will enter in conflict. - - virtualHosts = { - "pleroma.example.net" = { - http2 = true; - enableACME = true; - forceSSL = true; - - locations."/" = { - proxyPass = "http://127.0.0.1:4000"; - - extraConfig = '' - etag on; - gzip on; - - add_header 'Access-Control-Allow-Origin' '*' always; - add_header 'Access-Control-Allow-Methods' 'POST, PUT, DELETE, GET, PATCH, OPTIONS' always; - add_header 'Access-Control-Allow-Headers' 'Authorization, Content-Type, Idempotency-Key' always; - add_header 'Access-Control-Expose-Headers' 'Link, X-RateLimit-Reset, X-RateLimit-Limit, X-RateLimit-Remaining, X-Request-Id' always; - if ($request_method = OPTIONS) { - return 204; - } - add_header X-XSS-Protection "1; mode=block"; - add_header X-Permitted-Cross-Domain-Policies none; - add_header X-Frame-Options DENY; - add_header X-Content-Type-Options nosniff; - add_header Referrer-Policy same-origin; - add_header X-Download-Options noopen; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "upgrade"; - proxy_set_header Host $host; - - client_max_body_size 16m; - # NOTE: increase if users need to upload very big files - ''; - }; - }; - }; -}; - -
-
diff --git a/nixos/modules/services/networking/prosody.nix b/nixos/modules/services/networking/prosody.nix index 0746bbf184fc..9f68853f9fa8 100644 --- a/nixos/modules/services/networking/prosody.nix +++ b/nixos/modules/services/networking/prosody.nix @@ -905,5 +905,5 @@ in }; - meta.doc = ./prosody.xml; + meta.doc = ./prosody.md; } diff --git a/nixos/modules/services/networking/prosody.xml b/nixos/modules/services/networking/prosody.xml deleted file mode 100644 index 5df046f81459..000000000000 --- a/nixos/modules/services/networking/prosody.xml +++ /dev/null @@ -1,92 +0,0 @@ - - - Prosody - - Prosody is an - open-source, modern XMPP server. - -
- Basic usage - - A common struggle for most XMPP newcomers is to find the right set - of XMPP Extensions (XEPs) to setup. Forget to activate a few of - those and your XMPP experience might turn into a nightmare! - - - The XMPP community tackles this problem by creating a meta-XEP - listing a decent set of XEPs you should implement. This meta-XEP - is issued every year, the 2020 edition being - XEP-0423. - - - The NixOS Prosody module will implement most of these recommendend - XEPs out of the box. That being said, two components still require - some manual configuration: the - Multi - User Chat (MUC) and the - HTTP - File Upload ones. You’ll need to create a DNS subdomain for - each of those. The current convention is to name your MUC endpoint - conference.example.org and your HTTP upload - domain upload.example.org. - - - A good configuration to start with, including a - Multi - User Chat (MUC) endpoint as well as a - HTTP - File Upload endpoint will look like this: - - -services.prosody = { - enable = true; - admins = [ "root@example.org" ]; - ssl.cert = "/var/lib/acme/example.org/fullchain.pem"; - ssl.key = "/var/lib/acme/example.org/key.pem"; - virtualHosts."example.org" = { - enabled = true; - domain = "example.org"; - ssl.cert = "/var/lib/acme/example.org/fullchain.pem"; - ssl.key = "/var/lib/acme/example.org/key.pem"; - }; - muc = [ { - domain = "conference.example.org"; - } ]; - uploadHttp = { - domain = "upload.example.org"; - }; -}; - -
-
- Let’s Encrypt Configuration - - As you can see in the code snippet from the - previous - section, you’ll need a single TLS certificate covering your - main endpoint, the MUC one as well as the HTTP Upload one. We can - generate such a certificate by leveraging the ACME - extraDomainNames - module option. - - - Provided the setup detailed in the previous section, you’ll need - the following acme configuration to generate a TLS certificate for - the three endponits: - - -security.acme = { - email = "root@example.org"; - acceptTerms = true; - certs = { - "example.org" = { - webroot = "/var/www/example.org"; - email = "root@example.org"; - extraDomainNames = [ "conference.example.org" "upload.example.org" ]; - }; - }; -}; - -
-
diff --git a/nixos/modules/services/networking/yggdrasil.nix b/nixos/modules/services/networking/yggdrasil.nix index 3d5cbdd2dc3e..fd7193154c6c 100644 --- a/nixos/modules/services/networking/yggdrasil.nix +++ b/nixos/modules/services/networking/yggdrasil.nix @@ -193,7 +193,7 @@ in { environment.systemPackages = [ cfg.package ]; }); meta = { - doc = ./yggdrasil.xml; + doc = ./yggdrasil.md; maintainers = with lib.maintainers; [ gazally ehmry ]; }; } diff --git a/nixos/modules/services/networking/yggdrasil.xml b/nixos/modules/services/networking/yggdrasil.xml deleted file mode 100644 index 39faacbf30ef..000000000000 --- a/nixos/modules/services/networking/yggdrasil.xml +++ /dev/null @@ -1,157 +0,0 @@ - - - Yggdrasil - - Source: - modules/services/networking/yggdrasil/default.nix - - - Upstream documentation: - https://yggdrasil-network.github.io/ - - - Yggdrasil is an early-stage implementation of a fully end-to-end - encrypted, self-arranging IPv6 network. - -
- Configuration -
- Simple ephemeral node - - An annotated example of a simple configuration: - - -{ - services.yggdrasil = { - enable = true; - persistentKeys = false; - # The NixOS module will generate new keys and a new IPv6 address each time - # it is started if persistentKeys is not enabled. - - settings = { - Peers = [ - # Yggdrasil will automatically connect and "peer" with other nodes it - # discovers via link-local multicast announcements. Unless this is the - # case (it probably isn't) a node needs peers within the existing - # network that it can tunnel to. - "tcp://1.2.3.4:1024" - "tcp://1.2.3.5:1024" - # Public peers can be found at - # https://github.com/yggdrasil-network/public-peers - ]; - }; - }; -} - -
-
- Persistent node with prefix - - A node with a fixed address that announces a prefix: - - -let - address = "210:5217:69c0:9afc:1b95:b9f:8718:c3d2"; - prefix = "310:5217:69c0:9afc"; - # taken from the output of "yggdrasilctl getself". -in { - - services.yggdrasil = { - enable = true; - persistentKeys = true; # Maintain a fixed public key and IPv6 address. - settings = { - Peers = [ "tcp://1.2.3.4:1024" "tcp://1.2.3.5:1024" ]; - NodeInfo = { - # This information is visible to the network. - name = config.networking.hostName; - location = "The North Pole"; - }; - }; - }; - - boot.kernel.sysctl."net.ipv6.conf.all.forwarding" = 1; - # Forward traffic under the prefix. - - networking.interfaces.${eth0}.ipv6.addresses = [{ - # Set a 300::/8 address on the local physical device. - address = prefix + "::1"; - prefixLength = 64; - }]; - - services.radvd = { - # Announce the 300::/8 prefix to eth0. - enable = true; - config = '' - interface eth0 - { - AdvSendAdvert on; - prefix ${prefix}::/64 { - AdvOnLink on; - AdvAutonomous on; - }; - route 200::/8 {}; - }; - ''; - }; -} - -
-
- Yggdrasil attached Container - - A NixOS container attached to the Yggdrasil network via a node - running on the host: - - -let - yggPrefix64 = "310:5217:69c0:9afc"; - # Again, taken from the output of "yggdrasilctl getself". -in -{ - boot.kernel.sysctl."net.ipv6.conf.all.forwarding" = 1; - # Enable IPv6 forwarding. - - networking = { - bridges.br0.interfaces = [ ]; - # A bridge only to containers… - - interfaces.br0 = { - # … configured with a prefix address. - ipv6.addresses = [{ - address = "${yggPrefix64}::1"; - prefixLength = 64; - }]; - }; - }; - - containers.foo = { - autoStart = true; - privateNetwork = true; - hostBridge = "br0"; - # Attach the container to the bridge only. - config = { config, pkgs, ... }: { - networking.interfaces.eth0.ipv6 = { - addresses = [{ - # Configure a prefix address. - address = "${yggPrefix64}::2"; - prefixLength = 64; - }]; - routes = [{ - # Configure the prefix route. - address = "200::"; - prefixLength = 7; - via = "${yggPrefix64}::1"; - }]; - }; - - services.httpd.enable = true; - networking.firewall.allowedTCPPorts = [ 80 ]; - }; - }; - -} - -
-
-
diff --git a/nixos/modules/services/search/meilisearch.nix b/nixos/modules/services/search/meilisearch.nix index 9b727b76b1c6..73567ca0bc98 100644 --- a/nixos/modules/services/search/meilisearch.nix +++ b/nixos/modules/services/search/meilisearch.nix @@ -9,7 +9,7 @@ in { meta.maintainers = with maintainers; [ Br1ght0ne happysalada ]; - meta.doc = ./meilisearch.xml; + meta.doc = ./meilisearch.md; ###### interface diff --git a/nixos/modules/services/search/meilisearch.xml b/nixos/modules/services/search/meilisearch.xml deleted file mode 100644 index 8bfd64920b03..000000000000 --- a/nixos/modules/services/search/meilisearch.xml +++ /dev/null @@ -1,87 +0,0 @@ - - - Meilisearch - - Meilisearch is a lightweight, fast and powerful search engine. Think - elastic search with a much smaller footprint. - -
- Quickstart - - the minimum to start meilisearch is - - -services.meilisearch.enable = true; - - - this will start the http server included with meilisearch on port - 7700. - - - test with - curl -X GET 'http://localhost:7700/health' - -
-
- Usage - - you first need to add documents to an index before you can search - for documents. - -
- Add a documents to the <literal>movies</literal> - index - - curl -X POST 'http://127.0.0.1:7700/indexes/movies/documents' --data '[{"id": "123", "title": "Superman"}, {"id": 234, "title": "Batman"}]' - -
-
- Search documents in the <literal>movies</literal> - index - - curl 'http://127.0.0.1:7700/indexes/movies/search' --data '{ "q": "botman" }' - (note the typo is intentional and there to demonstrate the typo - tolerant capabilities) - -
-
-
- Defaults - - - - The default nixos package doesn’t come with the - dashboard, - since the dashboard features makes some assets downloads at - compile time. - - - - - Anonimized Analytics sent to meilisearch are disabled by - default. - - - - - Default deployment is development mode. It doesn’t require a - secret master key. All routes are not protected and - accessible. - - - -
-
- Missing - - - - the snapshot feature is not yet configurable from the module, - it’s just a matter of adding the relevant environment - variables. - - - -
-
diff --git a/nixos/modules/services/web-apps/akkoma.nix b/nixos/modules/services/web-apps/akkoma.nix index 47ba53e42221..fc482ff32deb 100644 --- a/nixos/modules/services/web-apps/akkoma.nix +++ b/nixos/modules/services/web-apps/akkoma.nix @@ -1082,5 +1082,5 @@ in { }; meta.maintainers = with maintainers; [ mvs ]; - meta.doc = ./akkoma.xml; + meta.doc = ./akkoma.md; } diff --git a/nixos/modules/services/web-apps/akkoma.xml b/nixos/modules/services/web-apps/akkoma.xml deleted file mode 100644 index 49cbcc911e1d..000000000000 --- a/nixos/modules/services/web-apps/akkoma.xml +++ /dev/null @@ -1,398 +0,0 @@ - - - Akkoma - - Akkoma is a - lightweight ActivityPub microblogging server forked from Pleroma. - -
- Service configuration - - The Elixir configuration file required by Akkoma is generated - automatically from - . - Secrets must be included from external files outside of the Nix - store by setting the configuration option to an attribute set - containing the attribute – a string - pointing to the file containing the actual value of the option. - - - For the mandatory configuration settings these secrets will be - generated automatically if the referenced file does not exist - during startup, unless disabled through - . - - - The following configuration binds Akkoma to the Unix socket - /run/akkoma/socket, expecting to be run behind - a HTTP proxy on fediverse.example.com. - - -services.akkoma.enable = true; -services.akkoma.config = { - ":pleroma" = { - ":instance" = { - name = "My Akkoma instance"; - description = "More detailed description"; - email = "admin@example.com"; - registration_open = false; - }; - - "Pleroma.Web.Endpoint" = { - url.host = "fediverse.example.com"; - }; - }; -}; - - - Please refer to the - configuration - cheat sheet for additional configuration options. - -
-
- User management - - After the Akkoma service is running, the administration utility - can be used to - manage - users. In particular an administrative user can be created - with - - -$ pleroma_ctl user new <nickname> <email> --admin --moderator --password <password> - -
-
- Proxy configuration - - Although it is possible to expose Akkoma directly, it is common - practice to operate it behind an HTTP reverse proxy such as nginx. - - -services.akkoma.nginx = { - enableACME = true; - forceSSL = true; -}; - -services.nginx = { - enable = true; - - clientMaxBodySize = "16m"; - recommendedTlsSettings = true; - recommendedOptimisation = true; - recommendedGzipSettings = true; -}; - - - Please refer to for - details on how to provision an SSL/TLS certificate. - -
- Media proxy - - Without the media proxy function, Akkoma does not store any - remote media like pictures or video locally, and clients have to - fetch them directly from the source server. - - -# Enable nginx slice module distributed with Tengine -services.nginx.package = pkgs.tengine; - -# Enable media proxy -services.akkoma.config.":pleroma".":media_proxy" = { - enabled = true; - proxy_opts.redirect_on_failure = true; -}; - -# Adjust the persistent cache size as needed: -# Assuming an average object size of 128 KiB, around 1 MiB -# of memory is required for the key zone per GiB of cache. -# Ensure that the cache directory exists and is writable by nginx. -services.nginx.commonHttpConfig = '' - proxy_cache_path /var/cache/nginx/cache/akkoma-media-cache - levels= keys_zone=akkoma_media_cache:16m max_size=16g - inactive=1y use_temp_path=off; -''; - -services.akkoma.nginx = { - locations."/proxy" = { - proxyPass = "http://unix:/run/akkoma/socket"; - - extraConfig = '' - proxy_cache akkoma_media_cache; - - # Cache objects in slices of 1 MiB - slice 1m; - proxy_cache_key $host$uri$is_args$args$slice_range; - proxy_set_header Range $slice_range; - - # Decouple proxy and upstream responses - proxy_buffering on; - proxy_cache_lock on; - proxy_ignore_client_abort on; - - # Default cache times for various responses - proxy_cache_valid 200 1y; - proxy_cache_valid 206 301 304 1h; - - # Allow serving of stale items - proxy_cache_use_stale error timeout invalid_header updating; - ''; - }; -}; - -
- Prefetch remote media - - The following example enables the - MediaProxyWarmingPolicy MRF policy which - automatically fetches all media associated with a post through - the media proxy, as soon as the post is received by the - instance. - - -services.akkoma.config.":pleroma".":mrf".policies = - map (pkgs.formats.elixirConf { }).lib.mkRaw [ - "Pleroma.Web.ActivityPub.MRF.MediaProxyWarmingPolicy" -]; - -
-
- Media previews - - Akkoma can generate previews for media. - - -services.akkoma.config.":pleroma".":media_preview_proxy" = { - enabled = true; - thumbnail_max_width = 1920; - thumbnail_max_height = 1080; -}; - -
-
-
-
- Frontend management - - Akkoma will be deployed with the pleroma-fe and - admin-fe frontends by default. These can be - modified by setting - . - - - The following example overrides the primary frontend’s default - configuration using a custom derivation. - - -services.akkoma.frontends.primary.package = pkgs.runCommand "pleroma-fe" { - config = builtins.toJSON { - expertLevel = 1; - collapseMessageWithSubject = false; - stopGifs = false; - replyVisibility = "following"; - webPushHideIfCW = true; - hideScopeNotice = true; - renderMisskeyMarkdown = false; - hideSiteFavicon = true; - postContentType = "text/markdown"; - showNavShortcuts = false; - }; - nativeBuildInputs = with pkgs; [ jq xorg.lndir ]; - passAsFile = [ "config" ]; -} '' - mkdir $out - lndir ${pkgs.akkoma-frontends.pleroma-fe} $out - - rm $out/static/config.json - jq -s add ${pkgs.akkoma-frontends.pleroma-fe}/static/config.json ${config} \ - >$out/static/config.json -''; - -
-
- Federation policies - - Akkoma comes with a number of modules to police federation with - other ActivityPub instances. The most valuable for typical users - is the - :mrf_simple - module which allows limiting federation based on instance - hostnames. - - - This configuration snippet provides an example on how these can be - used. Choosing an adequate federation policy is not trivial and - entails finding a balance between connectivity to the rest of the - fediverse and providing a pleasant experience to the users of an - instance. - - -services.akkoma.config.":pleroma" = with (pkgs.formats.elixirConf { }).lib; { - ":mrf".policies = map mkRaw [ - "Pleroma.Web.ActivityPub.MRF.SimplePolicy" - ]; - - ":mrf_simple" = { - # Tag all media as sensitive - media_nsfw = mkMap { - "nsfw.weird.kinky" = "Untagged NSFW content"; - }; - - # Reject all activities except deletes - reject = mkMap { - "kiwifarms.cc" = "Persistent harassment of users, no moderation"; - }; - - # Force posts to be visible by followers only - followers_only = mkMap { - "beta.birdsite.live" = "Avoid polluting timelines with Twitter posts"; - }; - }; -}; - -
-
- Upload filters - - This example strips GPS and location metadata from uploads, - deduplicates them and anonymises the the file name. - - -services.akkoma.config.":pleroma"."Pleroma.Upload".filters = - map (pkgs.formats.elixirConf { }).lib.mkRaw [ - "Pleroma.Upload.Filter.Exiftool" - "Pleroma.Upload.Filter.Dedupe" - "Pleroma.Upload.Filter.AnonymizeFilename" - ]; - -
-
- Migration from Pleroma - - Pleroma instances can be migrated to Akkoma either by copying the - database and upload data or by pointing Akkoma to the existing - data. The necessary database migrations are run automatically - during startup of the service. - - - The configuration has to be copy‐edited manually. - - - Depending on the size of the database, the initial migration may - take a long time and exceed the startup timeout of the system - manager. To work around this issue one may adjust the startup - timeout - - or simply run the migrations manually: - - -pleroma_ctl migrate - -
- Copying data - - Copying the Pleroma data instead of re‐using it in place may - permit easier reversion to Pleroma, but allows the two data sets - to diverge. - - - First disable Pleroma and then copy its database and upload - data: - - -# Create a copy of the database -nix-shell -p postgresql --run 'createdb -T pleroma akkoma' - -# Copy upload data -mkdir /var/lib/akkoma -cp -R --reflink=auto /var/lib/pleroma/uploads /var/lib/akkoma/ - - - After the data has been copied, enable the Akkoma service and - verify that the migration has been successful. If no longer - required, the original data may then be deleted: - - -# Delete original database -nix-shell -p postgresql --run 'dropdb pleroma' - -# Delete original Pleroma state -rm -r /var/lib/pleroma - -
-
- Re‐using data - - To re‐use the Pleroma data in place, disable Pleroma and enable - Akkoma, pointing it to the Pleroma database and upload - directory. - - -# Adjust these settings according to the database name and upload directory path used by Pleroma -services.akkoma.config.":pleroma"."Pleroma.Repo".database = "pleroma"; -services.akkoma.config.":pleroma".":instance".upload_dir = "/var/lib/pleroma/uploads"; - - - Please keep in mind that after the Akkoma service has been - started, any migrations applied by Akkoma have to be rolled back - before the database can be used again with Pleroma. This can be - achieved through pleroma_ctl ecto.rollback. - Refer to the - Ecto - SQL documentation for details. - -
-
-
- Advanced deployment options -
- Confinement - - The Akkoma systemd service may be confined to a chroot with - - -services.systemd.akkoma.confinement.enable = true; - - - Confinement of services is not generally supported in NixOS and - therefore disabled by default. Depending on the Akkoma - configuration, the default confinement settings may be - insufficient and lead to subtle errors at run time, requiring - adjustment: - - - Use - - to make packages available in the chroot. - - - - and - - permit access to outside paths through bind mounts. Refer to - systemd.exec5 - for details. - -
-
- Distributed deployment - - Being an Elixir application, Akkoma can be deployed in a - distributed fashion. - - - This requires setting - - and - . - The specifics depend strongly on the deployment environment. For - more information please check the relevant - Erlang - documentation. - -
-
-
diff --git a/nixos/modules/services/web-apps/discourse.nix b/nixos/modules/services/web-apps/discourse.nix index b8104ade4676..5565a4f45d1e 100644 --- a/nixos/modules/services/web-apps/discourse.nix +++ b/nixos/modules/services/web-apps/discourse.nix @@ -1080,6 +1080,6 @@ in ]; }; - meta.doc = ./discourse.xml; + meta.doc = ./discourse.md; meta.maintainers = [ lib.maintainers.talyz ]; } diff --git a/nixos/modules/services/web-apps/discourse.xml b/nixos/modules/services/web-apps/discourse.xml deleted file mode 100644 index a5e8b3656b7d..000000000000 --- a/nixos/modules/services/web-apps/discourse.xml +++ /dev/null @@ -1,331 +0,0 @@ - - - Discourse - - Discourse is a - modern and open source discussion platform. - -
- Basic usage - - A minimal configuration using Let’s Encrypt for TLS certificates - looks like this: - - -services.discourse = { - enable = true; - hostname = "discourse.example.com"; - admin = { - email = "admin@example.com"; - username = "admin"; - fullName = "Administrator"; - passwordFile = "/path/to/password_file"; - }; - secretKeyBaseFile = "/path/to/secret_key_base_file"; -}; -security.acme.email = "me@example.com"; -security.acme.acceptTerms = true; - - - Provided a proper DNS setup, you’ll be able to connect to the - instance at discourse.example.com and log in - using the credentials provided in - services.discourse.admin. - -
-
- Using a regular TLS certificate - - To set up TLS using a regular certificate and key on file, use the - and - - options: - - -services.discourse = { - enable = true; - hostname = "discourse.example.com"; - sslCertificate = "/path/to/ssl_certificate"; - sslCertificateKey = "/path/to/ssl_certificate_key"; - admin = { - email = "admin@example.com"; - username = "admin"; - fullName = "Administrator"; - passwordFile = "/path/to/password_file"; - }; - secretKeyBaseFile = "/path/to/secret_key_base_file"; -}; - -
-
- Database access - - Discourse uses PostgreSQL to store most of its data. A database - will automatically be enabled and a database and role created - unless is - changed from its default of null or - - is set to false. - - - External database access can also be configured by setting - , - and - as - appropriate. Note that you need to manually create a database - called discourse (or the name you chose in - ) and allow - the configured database user full access to it. - -
-
- Email - - In addition to the basic setup, you’ll want to configure an SMTP - server Discourse can use to send user registration and password - reset emails, among others. You can also optionally let Discourse - receive email, which enables people to reply to threads and - conversations via email. - - - A basic setup which assumes you want to use your configured - hostname as - email domain can be done like this: - - -services.discourse = { - enable = true; - hostname = "discourse.example.com"; - sslCertificate = "/path/to/ssl_certificate"; - sslCertificateKey = "/path/to/ssl_certificate_key"; - admin = { - email = "admin@example.com"; - username = "admin"; - fullName = "Administrator"; - passwordFile = "/path/to/password_file"; - }; - mail.outgoing = { - serverAddress = "smtp.emailprovider.com"; - port = 587; - username = "user@emailprovider.com"; - passwordFile = "/path/to/smtp_password_file"; - }; - mail.incoming.enable = true; - secretKeyBaseFile = "/path/to/secret_key_base_file"; -}; - - - This assumes you have set up an MX record for the address you’ve - set in - hostname - and requires proper SPF, DKIM and DMARC configuration to be done - for the domain you’re sending from, in order for email to be - reliably delivered. - - - If you want to use a different domain for your outgoing email (for - example example.com instead of - discourse.example.com) you should set - - and - - manually. - - - - Setup of TLS for incoming email is currently only configured - automatically when a regular TLS certificate is used, i.e. when - and - are - set. - - -
-
- Additional settings - - Additional site settings and backend settings, for which no - explicit NixOS options are provided, can be set in - and - - respectively. - -
- Site settings - - Site settings are the settings that can be - changed through the Discourse UI. Their - default values can be set using - . - - - Settings are expressed as a Nix attribute set which matches the - structure of the configuration in - config/site_settings.yml. - To find a setting’s path, you only need to care about the first - two levels; i.e. its category (e.g. login) - and name (e.g. invite_only). - - - Settings containing secret data should be set to an attribute - set containing the attribute _secret - a - string pointing to a file containing the value the option should - be set to. See the example. - -
-
- Backend settings - - Settings are expressed as a Nix attribute set which matches the - structure of the configuration in - config/discourse.conf. - Empty parameters can be defined by setting them to - null. - -
-
- Example - - The following example sets the title and description of the - Discourse instance and enables GitHub login in the site - settings, and changes a few request limits in the backend - settings: - - -services.discourse = { - enable = true; - hostname = "discourse.example.com"; - sslCertificate = "/path/to/ssl_certificate"; - sslCertificateKey = "/path/to/ssl_certificate_key"; - admin = { - email = "admin@example.com"; - username = "admin"; - fullName = "Administrator"; - passwordFile = "/path/to/password_file"; - }; - mail.outgoing = { - serverAddress = "smtp.emailprovider.com"; - port = 587; - username = "user@emailprovider.com"; - passwordFile = "/path/to/smtp_password_file"; - }; - mail.incoming.enable = true; - siteSettings = { - required = { - title = "My Cats"; - site_description = "Discuss My Cats (and be nice plz)"; - }; - login = { - enable_github_logins = true; - github_client_id = "a2f6dfe838cb3206ce20"; - github_client_secret._secret = /run/keys/discourse_github_client_secret; - }; - }; - backendSettings = { - max_reqs_per_ip_per_minute = 300; - max_reqs_per_ip_per_10_seconds = 60; - max_asset_reqs_per_ip_per_10_seconds = 250; - max_reqs_per_ip_mode = "warn+block"; - }; - secretKeyBaseFile = "/path/to/secret_key_base_file"; -}; - - - In the resulting site settings file, the - login.github_client_secret key will be set to - the contents of the - /run/keys/discourse_github_client_secret - file. - -
-
-
- Plugins - - You can install Discourse plugins using the - option. - Pre-packaged plugins are provided in - <your_discourse_package_here>.plugins. If - you want the full suite of plugins provided through - nixpkgs, you can also set the - option to - pkgs.discourseAllPlugins. - - - Plugins can be built with the - <your_discourse_package_here>.mkDiscoursePlugin - function. Normally, it should suffice to provide a - name and src attribute. If - the plugin has Ruby dependencies, however, they need to be - packaged in accordance with the - Developing - with Ruby section of the Nixpkgs manual and the appropriate - gem options set in bundlerEnvArgs (normally - gemdir is sufficient). A plugin’s Ruby - dependencies are listed in its plugin.rb file - as function calls to gem. To construct the - corresponding Gemfile manually, run - bundle init, then add the - gem lines to it verbatim. - - - Much of the packaging can be done automatically by the - nixpkgs/pkgs/servers/web-apps/discourse/update.py - script - just add the plugin to the plugins - list in the update_plugins function and run the - script: - - -./update.py update-plugins - - - Some plugins provide - site - settings. Their defaults can be configured using - , just like - regular site settings. To find the names of these settings, look - in the config/settings.yml file of the plugin - repo. - - - For example, to add the - discourse-spoiler-alert - and - discourse-solved - plugins, and disable discourse-spoiler-alert by - default: - - -services.discourse = { - enable = true; - hostname = "discourse.example.com"; - sslCertificate = "/path/to/ssl_certificate"; - sslCertificateKey = "/path/to/ssl_certificate_key"; - admin = { - email = "admin@example.com"; - username = "admin"; - fullName = "Administrator"; - passwordFile = "/path/to/password_file"; - }; - mail.outgoing = { - serverAddress = "smtp.emailprovider.com"; - port = 587; - username = "user@emailprovider.com"; - passwordFile = "/path/to/smtp_password_file"; - }; - mail.incoming.enable = true; - plugins = with config.services.discourse.package.plugins; [ - discourse-spoiler-alert - discourse-solved - ]; - siteSettings = { - plugins = { - spoiler_enabled = false; - }; - }; - secretKeyBaseFile = "/path/to/secret_key_base_file"; -}; - -
-
diff --git a/nixos/modules/services/web-apps/grocy.nix b/nixos/modules/services/web-apps/grocy.nix index 6efc2ccfd302..3bcda3caedac 100644 --- a/nixos/modules/services/web-apps/grocy.nix +++ b/nixos/modules/services/web-apps/grocy.nix @@ -167,6 +167,6 @@ in { meta = { maintainers = with maintainers; [ ma27 ]; - doc = ./grocy.xml; + doc = ./grocy.md; }; } diff --git a/nixos/modules/services/web-apps/grocy.xml b/nixos/modules/services/web-apps/grocy.xml deleted file mode 100644 index 08de25b4ce2b..000000000000 --- a/nixos/modules/services/web-apps/grocy.xml +++ /dev/null @@ -1,84 +0,0 @@ - - - Grocy - - Grocy is a web-based - self-hosted groceries & household management solution for your - home. - -
- Basic usage - - A very basic configuration may look like this: - - -{ pkgs, ... }: -{ - services.grocy = { - enable = true; - hostName = "grocy.tld"; - }; -} - - - This configures a simple vhost using - nginx which - listens to grocy.tld with fully configured - ACME/LE (this can be disabled by setting - services.grocy.nginx.enableSSL - to false). After the initial setup the - credentials admin:admin can be used to login. - - - The application’s state is persisted at - /var/lib/grocy/grocy.db in a - sqlite3 database. The migration is applied when - requesting the /-route of the application. - -
-
- Settings - - The configuration for grocy is located at - /etc/grocy/config.php. By default, the - following settings can be defined in the NixOS-configuration: - - -{ pkgs, ... }: -{ - services.grocy.settings = { - # The default currency in the system for invoices etc. - # Please note that exchange rates aren't taken into account, this - # is just the setting for what's shown in the frontend. - currency = "EUR"; - - # The display language (and locale configuration) for grocy. - culture = "de"; - - calendar = { - # Whether or not to show the week-numbers - # in the calendar. - showWeekNumber = true; - - # Index of the first day to be shown in the calendar (0=Sunday, 1=Monday, - # 2=Tuesday and so on). - firstDayOfWeek = 2; - }; - }; -} - - - If you want to alter the configuration file on your own, you can - do this manually with an expression like this: - - -{ lib, ... }: -{ - environment.etc."grocy/config.php".text = lib.mkAfter '' - // Arbitrary PHP code in grocy's configuration file - ''; -} - -
-
diff --git a/nixos/modules/services/web-apps/jitsi-meet.nix b/nixos/modules/services/web-apps/jitsi-meet.nix index 5b0934b2fb76..28be3a3702eb 100644 --- a/nixos/modules/services/web-apps/jitsi-meet.nix +++ b/nixos/modules/services/web-apps/jitsi-meet.nix @@ -451,6 +451,6 @@ in }; }; - meta.doc = ./jitsi-meet.xml; + meta.doc = ./jitsi-meet.md; meta.maintainers = lib.teams.jitsi.members; } diff --git a/nixos/modules/services/web-apps/jitsi-meet.xml b/nixos/modules/services/web-apps/jitsi-meet.xml deleted file mode 100644 index 4d2d8aa55e19..000000000000 --- a/nixos/modules/services/web-apps/jitsi-meet.xml +++ /dev/null @@ -1,55 +0,0 @@ - - - Jitsi Meet - - With Jitsi Meet on NixOS you can quickly configure a complete, - private, self-hosted video conferencing solution. - -
- Basic usage - - A minimal configuration using Let’s Encrypt for TLS certificates - looks like this: - - -{ - services.jitsi-meet = { - enable = true; - hostName = "jitsi.example.com"; - }; - services.jitsi-videobridge.openFirewall = true; - networking.firewall.allowedTCPPorts = [ 80 443 ]; - security.acme.email = "me@example.com"; - security.acme.acceptTerms = true; -} - -
-
- Configuration - - Here is the minimal configuration with additional configurations: - - -{ - services.jitsi-meet = { - enable = true; - hostName = "jitsi.example.com"; - config = { - enableWelcomePage = false; - prejoinPageEnabled = true; - defaultLang = "fi"; - }; - interfaceConfig = { - SHOW_JITSI_WATERMARK = false; - SHOW_WATERMARK_FOR_GUESTS = false; - }; - }; - services.jitsi-videobridge.openFirewall = true; - networking.firewall.allowedTCPPorts = [ 80 443 ]; - security.acme.email = "me@example.com"; - security.acme.acceptTerms = true; -} - -
-
diff --git a/nixos/modules/services/web-apps/keycloak.nix b/nixos/modules/services/web-apps/keycloak.nix index d52190a28648..a7e4fab8ea28 100644 --- a/nixos/modules/services/web-apps/keycloak.nix +++ b/nixos/modules/services/web-apps/keycloak.nix @@ -674,6 +674,6 @@ in mkIf createLocalMySQL (mkDefault dbPkg); }; - meta.doc = ./keycloak.xml; + meta.doc = ./keycloak.md; meta.maintainers = [ maintainers.talyz ]; } diff --git a/nixos/modules/services/web-apps/keycloak.xml b/nixos/modules/services/web-apps/keycloak.xml deleted file mode 100644 index 148782d30f39..000000000000 --- a/nixos/modules/services/web-apps/keycloak.xml +++ /dev/null @@ -1,177 +0,0 @@ - - - Keycloak - - Keycloak is an - open source identity and access management server with support for - OpenID - Connect, OAUTH - 2.0 and - SAML - 2.0. - -
- Administration - - An administrative user with the username admin - is automatically created in the master realm. - Its initial password can be configured by setting - and - defaults to changeme. The password is not - stored safely and should be changed immediately in the admin - panel. - - - Refer to the - Keycloak - Server Administration Guide for information on how to - administer your Keycloak instance. - -
-
- Database access - - Keycloak can be used with either PostgreSQL, MariaDB or MySQL. - Which one is used can be configured in - . The - selected database will automatically be enabled and a database and - role created unless - is changed - from its default of localhost or - is - set to false. - - - External database access can also be configured by setting - , - , - , - and - as - appropriate. Note that you need to manually create the database - and allow the configured database user full access to it. - - - - must be set to the path to a file containing the password used to - log in to the database. If - and - - are kept at their defaults, the database role - keycloak with that password is provisioned on - the local database instance. - - - - The path should be provided as a string, not a Nix path, since - Nix paths are copied into the world readable Nix store. - - -
-
- Hostname - - The hostname is used to build the public URL used as base for all - frontend requests and must be configured through - . - - - - If you’re migrating an old Wildfly based Keycloak instance and - want to keep compatibility with your current clients, you’ll - likely want to set - - to /auth. See the option description for more - details. - - - - - determines whether Keycloak should force all requests to go - through the frontend URL. By default, Keycloak allows backend - requests to instead use its local hostname or IP address and may - also advertise it to clients through its OpenID Connect Discovery - endpoint. - - - For more information on hostname configuration, see the - Hostname - section of the Keycloak Server Installation and Configuration - Guide. - -
-
- Setting up TLS/SSL - - By default, Keycloak won’t accept unsecured HTTP connections - originating from outside its local network. - - - HTTPS support requires a TLS/SSL certificate and a private key, - both - PEM - formatted. Their paths should be set through - and - . - - - - The paths should be provided as a strings, not a Nix paths, - since Nix paths are copied into the world readable Nix store. - - -
-
- Themes - - You can package custom themes and make them visible to Keycloak - through . See the - Themes - section of the Keycloak Server Development Guide and the - description of the aforementioned NixOS option for more - information. - -
-
- Configuration file settings - - Keycloak server configuration parameters can be set in - . These - correspond directly to options in - conf/keycloak.conf. Some of the most - important parameters are documented as suboptions, the rest can be - found in the - All - configuration section of the Keycloak Server Installation and - Configuration Guide. - - - Options containing secret data should be set to an attribute set - containing the attribute _secret - a string - pointing to a file containing the value the option should be set - to. See the description of - for an example. - -
-
- Example configuration - - A basic configuration with some custom settings could look like - this: - - -services.keycloak = { - enable = true; - settings = { - hostname = "keycloak.example.com"; - hostname-strict-backchannel = true; - }; - initialAdminPassword = "e6Wcm0RrtegMEHl"; # change on first login - sslCertificate = "/run/keys/ssl_cert"; - sslCertificateKey = "/run/keys/ssl_key"; - database.passwordFile = "/run/keys/db_password"; -}; - -
-
diff --git a/nixos/modules/services/web-apps/lemmy.nix b/nixos/modules/services/web-apps/lemmy.nix index f2eb6e726b90..af0fb38121a3 100644 --- a/nixos/modules/services/web-apps/lemmy.nix +++ b/nixos/modules/services/web-apps/lemmy.nix @@ -6,7 +6,7 @@ let in { meta.maintainers = with maintainers; [ happysalada ]; - meta.doc = ./lemmy.xml; + meta.doc = ./lemmy.md; imports = [ (mkRemovedOptionModule [ "services" "lemmy" "jwtSecretPath" ] "As of v0.13.0, Lemmy auto-generates the JWT secret.") diff --git a/nixos/modules/services/web-apps/lemmy.xml b/nixos/modules/services/web-apps/lemmy.xml deleted file mode 100644 index 114e11f3488a..000000000000 --- a/nixos/modules/services/web-apps/lemmy.xml +++ /dev/null @@ -1,53 +0,0 @@ - - - Lemmy - - Lemmy is a federated alternative to reddit in rust. - -
- Quickstart - - the minimum to start lemmy is - - -services.lemmy = { - enable = true; - settings = { - hostname = "lemmy.union.rocks"; - database.createLocally = true; - }; - caddy.enable = true; -} - - - this will start the backend on port 8536 and the frontend on port - 1234. It will expose your instance with a caddy reverse proxy to - the hostname you’ve provided. Postgres will be initialized on that - same instance automatically. - -
-
- Usage - - On first connection you will be asked to define an admin user. - -
-
- Missing - - - - Exposing with nginx is not implemented yet. - - - - - This has been tested using a local database with a unix socket - connection. Using different database settings will likely - require modifications - - - -
-
diff --git a/nixos/modules/services/web-apps/matomo.nix b/nixos/modules/services/web-apps/matomo.nix index 984510659952..eadf8b62b977 100644 --- a/nixos/modules/services/web-apps/matomo.nix +++ b/nixos/modules/services/web-apps/matomo.nix @@ -325,7 +325,7 @@ in { }; meta = { - doc = ./matomo.xml; + doc = ./matomo.md; maintainers = with lib.maintainers; [ florianjacob ]; }; } diff --git a/nixos/modules/services/web-apps/matomo.xml b/nixos/modules/services/web-apps/matomo.xml deleted file mode 100644 index 30994cc9f1da..000000000000 --- a/nixos/modules/services/web-apps/matomo.xml +++ /dev/null @@ -1,107 +0,0 @@ - - - Matomo - - Matomo is a real-time web analytics application. This module - configures php-fpm as backend for Matomo, optionally configuring an - nginx vhost as well. - - - An automatic setup is not suported by Matomo, so you need to - configure Matomo itself in the browser-based Matomo setup. - -
- Database Setup - - You also need to configure a MariaDB or MySQL database and -user - for Matomo yourself, and enter those credentials in your browser. - You can use passwordless database authentication via the - UNIX_SOCKET authentication plugin with the following SQL commands: - - -# For MariaDB -INSTALL PLUGIN unix_socket SONAME 'auth_socket'; -CREATE DATABASE matomo; -CREATE USER 'matomo'@'localhost' IDENTIFIED WITH unix_socket; -GRANT ALL PRIVILEGES ON matomo.* TO 'matomo'@'localhost'; - -# For MySQL -INSTALL PLUGIN auth_socket SONAME 'auth_socket.so'; -CREATE DATABASE matomo; -CREATE USER 'matomo'@'localhost' IDENTIFIED WITH auth_socket; -GRANT ALL PRIVILEGES ON matomo.* TO 'matomo'@'localhost'; - - - Then fill in matomo as database user and - database name, and leave the password field blank. This - authentication works by allowing only the - matomo unix user to authenticate as the - matomo database user (without needing a - password), but no other users. For more information on - passwordless login, see - https://mariadb.com/kb/en/mariadb/unix_socket-authentication-plugin/. - - - Of course, you can use password based authentication as well, e.g. - when the database is not on the same host. - -
-
- Archive Processing - - This module comes with the systemd service - matomo-archive-processing.service and a timer - that automatically triggers archive processing every hour. This - means that you can safely - disable - browser triggers for Matomo archiving at - Administration > System > General Settings. - - - With automatic archive processing, you can now also enable to - delete - old visitor logs at - Administration > System > Privacy, but - make sure that you run - systemctl start matomo-archive-processing.service - at least once without errors if you have already collected data - before, so that the reports get archived before the source data - gets deleted. - -
-
- Backup - - You only need to take backups of your MySQL database and the - /var/lib/matomo/config/config.ini.php file. - Use a user in the matomo group or root to - access the file. For more information, see - https://matomo.org/faq/how-to-install/faq_138/. - -
-
- Issues - - - - Matomo will warn you that the JavaScript tracker is not - writable. This is because it’s located in the read-only nix - store. You can safely ignore this, unless you need a plugin - that needs JavaScript tracker access. - - - -
-
- Using other Web Servers than nginx - - You can use other web servers by forwarding calls for - index.php and piwik.php - to the - services.phpfpm.pools.<name>.socket - fastcgi unix socket. You can use the nginx configuration in the - module code as a reference to what else should be configured. - -
-
diff --git a/nixos/modules/services/web-apps/nextcloud.nix b/nixos/modules/services/web-apps/nextcloud.nix index 90801e996817..50c2d68c77e4 100644 --- a/nixos/modules/services/web-apps/nextcloud.nix +++ b/nixos/modules/services/web-apps/nextcloud.nix @@ -1146,5 +1146,5 @@ in { } ]); - meta.doc = ./nextcloud.xml; + meta.doc = ./nextcloud.md; } diff --git a/nixos/modules/services/web-apps/nextcloud.xml b/nixos/modules/services/web-apps/nextcloud.xml deleted file mode 100644 index a5ac05723ef4..000000000000 --- a/nixos/modules/services/web-apps/nextcloud.xml +++ /dev/null @@ -1,333 +0,0 @@ - - - Nextcloud - - Nextcloud is an - open-source, self-hostable cloud platform. The server setup can be - automated using - services.nextcloud. - A desktop client is packaged at - pkgs.nextcloud-client. - - - The current default by NixOS is nextcloud25 which - is also the latest major version available. - -
- Basic usage - - Nextcloud is a PHP-based application which requires an HTTP server - (services.nextcloud - optionally supports - services.nginx) - and a database (it’s recommended to use - services.postgresql). - - - A very basic configuration may look like this: - - -{ pkgs, ... }: -{ - services.nextcloud = { - enable = true; - hostName = "nextcloud.tld"; - config = { - dbtype = "pgsql"; - dbuser = "nextcloud"; - dbhost = "/run/postgresql"; # nextcloud will add /.s.PGSQL.5432 by itself - dbname = "nextcloud"; - adminpassFile = "/path/to/admin-pass-file"; - adminuser = "root"; - }; - }; - - services.postgresql = { - enable = true; - ensureDatabases = [ "nextcloud" ]; - ensureUsers = [ - { name = "nextcloud"; - ensurePermissions."DATABASE nextcloud" = "ALL PRIVILEGES"; - } - ]; - }; - - # ensure that postgres is running *before* running the setup - systemd.services."nextcloud-setup" = { - requires = ["postgresql.service"]; - after = ["postgresql.service"]; - }; - - networking.firewall.allowedTCPPorts = [ 80 443 ]; -} - - - The hostName option is used internally to - configure an HTTP server using - PHP-FPM - and nginx. The config - attribute set is used by the imperative installer and all values - are written to an additional file to ensure that changes can be - applied by changing the module’s options. - - - In case the application serves multiple domains (those are checked - with - $_SERVER['HTTP_HOST']) - it’s needed to add them to - services.nextcloud.config.extraTrustedDomains. - - - Auto updates for Nextcloud apps can be enabled using - services.nextcloud.autoUpdateApps. - -
-
- Common problems - - - - General notes. - Unfortunately Nextcloud appears to be very stateful when it - comes to managing its own configuration. The config file lives - in the home directory of the nextcloud user - (by default - /var/lib/nextcloud/config/config.php) and - is also used to track several states of the application (e.g., - whether installed or not). - - - All configuration parameters are also stored in - /var/lib/nextcloud/config/override.config.php - which is generated by the module and linked from the store to - ensure that all values from config.php - can be modified by the module. However - config.php manages the application’s - state and shouldn’t be touched manually because of that. - - - - Don’t delete config.php! This file - tracks the application’s state and a deletion can cause - unwanted side-effects! - - - - - Don’t rerun - nextcloud-occ maintenance:install! This - command tries to install the application and can cause - unwanted side-effects! - - - - - - Multiple version upgrades. - Nextcloud doesn’t allow to move more than one major-version - forward. E.g., if you’re on v16, you cannot - upgrade to v18, you need to upgrade to - v17 first. This is ensured automatically as - long as the - stateVersion is - declared properly. In that case the oldest version available - (one major behind the one from the previous NixOS release) - will be selected by default and the module will generate a - warning that reminds the user to upgrade to latest Nextcloud - after that deploy. - - - - - Error: Command "upgrade" is not defined. - This error usually occurs if the initial installation - (nextcloud-occ maintenance:install) has - failed. After that, the application is not installed, but the - upgrade is attempted to be executed. Further context can be - found in - NixOS/nixpkgs#111175. - - - First of all, it makes sense to find out what went wrong by - looking at the logs of the installation via - journalctl -u nextcloud-setup and try to - fix the underlying issue. - - - - - If this occurs on an existing setup, - this is most likely because the maintenance mode is - active. It can be deactivated by running - nextcloud-occ maintenance:mode --off. - It’s advisable though to check the logs first on why the - maintenance mode was activated. - - - - - - Only perform the following measures on freshly - installed instances! - - - - A re-run of the installer can be forced by - deleting - /var/lib/nextcloud/config/config.php. - This is the only time advisable because the fresh install - doesn’t have any state that can be lost. In case that - doesn’t help, an entire re-creation can be forced via - rm -rf ~nextcloud/. - - - - - - - Server-side encryption. - Nextcloud supports - server-side - encryption (SSE). This is not an end-to-end encryption, - but can be used to encrypt files that will be persisted to - external storage such as S3. Please note that this won’t work - anymore when using OpenSSL 3 for PHP’s openssl extension - because this is implemented using the legacy cipher RC4. If - is - above 22.05, this is - disabled by default. To turn it on again and for further - information please refer to - . - - - -
-
- Using an alternative webserver as reverse-proxy (e.g. - <literal>httpd</literal>) - - By default, nginx is used as reverse-proxy for - nextcloud. However, it’s possible to use e.g. - httpd by explicitly disabling - nginx using - and fixing the - settings listen.owner & - listen.group in the - corresponding - phpfpm pool. - - - An exemplary configuration may look like this: - - -{ config, lib, pkgs, ... }: { - services.nginx.enable = false; - services.nextcloud = { - enable = true; - hostName = "localhost"; - - /* further, required options */ - }; - services.phpfpm.pools.nextcloud.settings = { - "listen.owner" = config.services.httpd.user; - "listen.group" = config.services.httpd.group; - }; - services.httpd = { - enable = true; - adminAddr = "webmaster@localhost"; - extraModules = [ "proxy_fcgi" ]; - virtualHosts."localhost" = { - documentRoot = config.services.nextcloud.package; - extraConfig = '' - <Directory "${config.services.nextcloud.package}"> - <FilesMatch "\.php$"> - <If "-f %{REQUEST_FILENAME}"> - SetHandler "proxy:unix:${config.services.phpfpm.pools.nextcloud.socket}|fcgi://localhost/" - </If> - </FilesMatch> - <IfModule mod_rewrite.c> - RewriteEngine On - RewriteBase / - RewriteRule ^index\.php$ - [L] - RewriteCond %{REQUEST_FILENAME} !-f - RewriteCond %{REQUEST_FILENAME} !-d - RewriteRule . /index.php [L] - </IfModule> - DirectoryIndex index.php - Require all granted - Options +FollowSymLinks - </Directory> - ''; - }; - }; -} - -
-
- Installing Apps and PHP extensions - - Nextcloud apps are installed statefully through the web interface. - Some apps may require extra PHP extensions to be installed. This - can be configured with the - - setting. - - - Alternatively, extra apps can also be declared with the - setting. When - using this setting, apps can no longer be managed statefully - because this can lead to Nextcloud updating apps that are managed - by Nix. If you want automatic updates it is recommended that you - use web interface to install apps. - -
-
- Maintainer information - - As stated in the previous paragraph, we must provide a clean - upgrade-path for Nextcloud since it cannot move more than one - major version forward on a single upgrade. This chapter adds some - notes how Nextcloud updates should be rolled out in the future. - - - While minor and patch-level updates are no problem and can be done - directly in the package-expression (and should be backported to - supported stable branches after that), major-releases should be - added in a new attribute (e.g. Nextcloud - v19.0.0 should be available in - nixpkgs as - pkgs.nextcloud19). To provide simple upgrade - paths it’s generally useful to backport those as well to stable - branches. As long as the package-default isn’t altered, this won’t - break existing setups. After that, the versioning-warning in the - nextcloud-module should be updated to make sure - that the - package-option - selects the latest version on fresh setups. - - - If major-releases will be abandoned by upstream, we should check - first if those are needed in NixOS for a safe upgrade-path before - removing those. In that case we should keep those packages, but - mark them as insecure in an expression like this (in - <nixpkgs/pkgs/servers/nextcloud/default.nix>): - - -/* ... */ -{ - nextcloud17 = generic { - version = "17.0.x"; - sha256 = "0000000000000000000000000000000000000000000000000000"; - eol = true; - }; -} - - - Ideally we should make sure that it’s possible to jump two NixOS - versions forward: i.e. the warnings and the logic in the module - should guard a user to upgrade from a Nextcloud on e.g. 19.09 to a - Nextcloud on 20.09. - -
-
diff --git a/nixos/modules/services/web-apps/pict-rs.nix b/nixos/modules/services/web-apps/pict-rs.nix index ad07507ca37d..0f13b2ae6db1 100644 --- a/nixos/modules/services/web-apps/pict-rs.nix +++ b/nixos/modules/services/web-apps/pict-rs.nix @@ -5,7 +5,7 @@ let in { meta.maintainers = with maintainers; [ happysalada ]; - meta.doc = ./pict-rs.xml; + meta.doc = ./pict-rs.md; options.services.pict-rs = { enable = mkEnableOption (lib.mdDoc "pict-rs server"); diff --git a/nixos/modules/services/web-apps/pict-rs.xml b/nixos/modules/services/web-apps/pict-rs.xml deleted file mode 100644 index 3f5900c55f15..000000000000 --- a/nixos/modules/services/web-apps/pict-rs.xml +++ /dev/null @@ -1,185 +0,0 @@ - - - Pict-rs - - pict-rs is a a simple image hosting service. - -
- Quickstart - - the minimum to start pict-rs is - - -services.pict-rs.enable = true; - - - this will start the http server on port 8080 by default. - -
-
- Usage - - pict-rs offers the following endpoints: - - - - - POST /image for uploading an image. - Uploaded content must be valid multipart/form-data with an - image array located within the images[] key - - - This endpoint returns the following JSON structure on success - with a 201 Created status - - -{ - "files": [ - { - "delete_token": "JFvFhqJA98", - "file": "lkWZDRvugm.jpg" - }, - { - "delete_token": "kAYy9nk2WK", - "file": "8qFS0QooAn.jpg" - }, - { - "delete_token": "OxRpM3sf0Y", - "file": "1hJaYfGE01.jpg" - } - ], - "msg": "ok" -} - - - - - GET /image/download?url=... Download an - image from a remote server, returning the same JSON payload as - the POST endpoint - - - - - GET /image/original/{file} for getting a - full-resolution image. file here is the - file key from the /image - endpoint’s JSON - - - - - GET /image/details/original/{file} for - getting the details of a full-resolution image. The returned - JSON is structured like so: - - -{ - "width": 800, - "height": 537, - "content_type": "image/webp", - "created_at": [ - 2020, - 345, - 67376, - 394363487 - ] -} - - - - - GET /image/process.{ext}?src={file}&... - get a file with transformations applied. existing - transformations include - - - - - identity=true: apply no changes - - - - - blur={float}: apply a gaussian blur to - the file - - - - - thumbnail={int}: produce a thumbnail of - the image fitting inside an {int} by - {int} square using raw pixel sampling - - - - - resize={int}: produce a thumbnail of - the image fitting inside an {int} by - {int} square using a Lanczos2 filter. - This is slower than sampling but looks a bit better in - some cases - - - - - crop={int-w}x{int-h}: produce a cropped - version of the image with an {int-w} by - {int-h} aspect ratio. The resulting - crop will be centered on the image. Either the width or - height of the image will remain full-size, depending on - the image’s aspect ratio and the requested aspect ratio. - For example, a 1600x900 image cropped with a 1x1 aspect - ratio will become 900x900. A 1600x1100 image cropped with - a 16x9 aspect ratio will become 1600x900. - - - - - Supported ext file extensions include - png, jpg, and - webp - - - An example of usage could be - - -GET /image/process.jpg?src=asdf.png&thumbnail=256&blur=3.0 - - - which would create a 256x256px JPEG thumbnail and blur it - - - - - GET /image/details/process.{ext}?src={file}&... - for getting the details of a processed image. The returned - JSON is the same format as listed for the full-resolution - details endpoint. - - - - - DELETE /image/delete/{delete_token}/{file} - or GET /image/delete/{delete_token}/{file} - to delete a file, where delete_token and - file are from the /image - endpoint’s JSON - - - -
-
- Missing - - - - Configuring the secure-api-key is not included yet. The - envisioned basic use case is consumption on localhost by other - services without exposing the service to the internet. - - - -
-
diff --git a/nixos/modules/services/web-apps/plausible.nix b/nixos/modules/services/web-apps/plausible.nix index e5dc1b103601..f64254d62524 100644 --- a/nixos/modules/services/web-apps/plausible.nix +++ b/nixos/modules/services/web-apps/plausible.nix @@ -292,5 +292,5 @@ in { }; meta.maintainers = with maintainers; [ ma27 ]; - meta.doc = ./plausible.xml; + meta.doc = ./plausible.md; } diff --git a/nixos/modules/services/web-apps/plausible.xml b/nixos/modules/services/web-apps/plausible.xml deleted file mode 100644 index 39ff004ffd95..000000000000 --- a/nixos/modules/services/web-apps/plausible.xml +++ /dev/null @@ -1,45 +0,0 @@ - - - Plausible - - Plausible is a - privacy-friendly alternative to Google analytics. - -
- Basic Usage - - At first, a secret key is needed to be generated. This can be done - with e.g. - - -$ openssl rand -base64 64 - - - After that, plausible can be deployed like - this: - - -{ - services.plausible = { - enable = true; - adminUser = { - # activate is used to skip the email verification of the admin-user that's - # automatically created by plausible. This is only supported if - # postgresql is configured by the module. This is done by default, but - # can be turned off with services.plausible.database.postgres.setup. - activate = true; - email = "admin@localhost"; - passwordFile = "/run/secrets/plausible-admin-pwd"; - }; - server = { - baseUrl = "http://analytics.example.org"; - # secretKeybaseFile is a path to the file which contains the secret generated - # with openssl as described above. - secretKeybaseFile = "/run/secrets/plausible-secret-key-base"; - }; - }; -} - -
-
diff --git a/nixos/modules/services/web-servers/garage.nix b/nixos/modules/services/web-servers/garage.nix index 1c25d865f980..2491c788d6c5 100644 --- a/nixos/modules/services/web-servers/garage.nix +++ b/nixos/modules/services/web-servers/garage.nix @@ -9,7 +9,7 @@ let in { meta = { - doc = ./garage.xml; + doc = ./garage.md; maintainers = with pkgs.lib.maintainers; [ raitobezarius ]; }; diff --git a/nixos/modules/services/web-servers/garage.xml b/nixos/modules/services/web-servers/garage.xml deleted file mode 100644 index 6a16b1693daf..000000000000 --- a/nixos/modules/services/web-servers/garage.xml +++ /dev/null @@ -1,206 +0,0 @@ - - - Garage - - Garage is - an open-source, self-hostable S3 store, simpler than MinIO, for - geodistributed stores. The server setup can be automated using - services.garage. A - client configured to your local Garage instance is available in the - global environment as garage-manage. - - - The current default by NixOS is garage_0_8 which - is also the latest major version available. - -
- General considerations on upgrades - - Garage provides a cookbook documentation on how to upgrade: - https://garagehq.deuxfleurs.fr/documentation/cookbook/upgrading/ - - - - Garage has two types of upgrades: patch-level upgrades and - minor/major version upgrades. - - - In all cases, you should read the changelog and ideally test the - upgrade on a staging cluster. - - - Checking the health of your cluster can be achieved using - garage-manage repair. - - - - - Until 1.0 is released, patch-level upgrades are considered as - minor version upgrades. Minor version upgrades are considered as - major version upgrades. i.e. 0.6 to 0.7 is a major version - upgrade. - - - - - - Straightforward upgrades (patch-level - upgrades). Upgrades must be performed one by one, - i.e. for each node, stop it, upgrade it : change - stateVersion or - services.garage.package, - restart it if it was not already by switching. - - - - - Multiple version upgrades. - Garage do not provide any guarantee on moving more than one - major-version forward. E.g., if you’re on - 0.7, you cannot upgrade to - 0.9. You need to upgrade to - 0.8 first. As long as - stateVersion is - declared properly, this is enforced automatically. The module - will issue a warning to remind the user to upgrade to latest - Garage after that deploy. - - - -
-
- Advanced upgrades (minor/major version upgrades) - - Here are some baseline instructions to handle advanced upgrades in - Garage, when in doubt, please refer to upstream instructions. - - - - - Disable API and web access to Garage. - - - - - Perform - garage-manage repair --all-nodes --yes tables - and - garage-manage repair --all-nodes --yes blocks. - - - - - Verify the resulting logs and check that data is synced - properly between all nodes. If you have time, do additional - checks (scrub, - block_refs, etc.). - - - - - Check if queues are empty by - garage-manage stats or through monitoring - tools. - - - - - Run systemctl stop garage to stop the - actual Garage version. - - - - - Backup the metadata folder of ALL your nodes, e.g. for a - metadata directory (the default one) in - /var/lib/garage/meta, you can run - pushd /var/lib/garage; tar -acf meta-v0.7.tar.zst meta/; popd. - - - - - Run the offline migration: - nix-shell -p garage_0_8 --run "garage offline-repair --yes", - this can take some time depending on how many objects are - stored in your cluster. - - - - - Bump Garage version in your NixOS configuration, either by - changing - stateVersion or - bumping - services.garage.package, - this should restart Garage automatically. - - - - - Perform - garage-manage repair --all-nodes --yes tables - and - garage-manage repair --all-nodes --yes blocks. - - - - - Wait for a full table sync to run. - - - - - Your upgraded cluster should be in a working state, re-enable API - and web access. - -
-
- Maintainer information - - As stated in the previous paragraph, we must provide a clean - upgrade-path for Garage since it cannot move more than one major - version forward on a single upgrade. This chapter adds some notes - how Garage updates should be rolled out in the future. This is - inspired from how Nextcloud does it. - - - While patch-level updates are no problem and can be done directly - in the package-expression (and should be backported to supported - stable branches after that), major-releases should be added in a - new attribute (e.g. Garage v0.8.0 should be - available in nixpkgs as - pkgs.garage_0_8_0). To provide simple upgrade - paths it’s generally useful to backport those as well to stable - branches. As long as the package-default isn’t altered, this won’t - break existing setups. After that, the versioning-warning in the - garage-module should be updated to make sure - that the - package-option - selects the latest version on fresh setups. - - - If major-releases will be abandoned by upstream, we should check - first if those are needed in NixOS for a safe upgrade-path before - removing those. In that case we shold keep those packages, but - mark them as insecure in an expression like this (in - <nixpkgs/pkgs/tools/filesystem/garage/default.nix>): - - -/* ... */ -{ - garage_0_7_3 = generic { - version = "0.7.3"; - sha256 = "0000000000000000000000000000000000000000000000000000"; - eol = true; - }; -} - - - Ideally we should make sure that it’s possible to jump two NixOS - versions forward: i.e. the warnings and the logic in the module - should guard a user to upgrade from a Garage on e.g. 22.11 to a - Garage on 23.11. - -
-
diff --git a/nixos/modules/services/x11/desktop-managers/gnome.nix b/nixos/modules/services/x11/desktop-managers/gnome.nix index dadfb421d3a8..79b2e7c6ead7 100644 --- a/nixos/modules/services/x11/desktop-managers/gnome.nix +++ b/nixos/modules/services/x11/desktop-managers/gnome.nix @@ -66,7 +66,7 @@ in { meta = { - doc = ./gnome.xml; + doc = ./gnome.md; maintainers = teams.gnome.members; }; diff --git a/nixos/modules/services/x11/desktop-managers/gnome.xml b/nixos/modules/services/x11/desktop-managers/gnome.xml deleted file mode 100644 index 6613f49eec7a..000000000000 --- a/nixos/modules/services/x11/desktop-managers/gnome.xml +++ /dev/null @@ -1,261 +0,0 @@ - - - GNOME Desktop - - GNOME provides a simple, yet full-featured desktop environment with - a focus on productivity. Its Mutter compositor supports both Wayland - and X server, and the GNOME Shell user interface is fully - customizable by extensions. - -
- Enabling GNOME - - All of the core apps, optional apps, games, and core developer - tools from GNOME are available. - - - To enable the GNOME desktop use: - - -services.xserver.desktopManager.gnome.enable = true; -services.xserver.displayManager.gdm.enable = true; - - - - While it is not strictly necessary to use GDM as the display - manager with GNOME, it is recommended, as some features such as - screen lock - might - not work without it. - - - - The default applications used in NixOS are very minimal, inspired - by the defaults used in - gnome-build-meta. - -
- GNOME without the apps - - If you’d like to only use the GNOME desktop and not the apps, - you can disable them with: - - -services.gnome.core-utilities.enable = false; - - - and none of them will be installed. - - - If you’d only like to omit a subset of the core utilities, you - can use - . Note - that this mechanism can only exclude core utilities, games and - core developer tools. - -
-
- Disabling GNOME services - - It is also possible to disable many of the - core - services. For example, if you do not need indexing files, - you can disable Tracker with: - - -services.gnome.tracker-miners.enable = false; -services.gnome.tracker.enable = false; - - - Note, however, that doing so is not supported and might break - some applications. Notably, GNOME Music cannot work without - Tracker. - -
-
- GNOME games - - You can install all of the GNOME games with: - - -services.gnome.games.enable = true; - -
-
- GNOME core developer tools - - You can install GNOME core developer tools with: - - -services.gnome.core-developer-tools.enable = true; - -
-
-
- Enabling GNOME Flashback - - GNOME Flashback provides a desktop environment based on the - classic GNOME 2 architecture. You can enable the default GNOME - Flashback session, which uses the Metacity window manager, with: - - -services.xserver.desktopManager.gnome.flashback.enableMetacity = true; - - - It is also possible to create custom sessions that replace - Metacity with a different window manager using - . - - - The following example uses xmonad window - manager: - - -services.xserver.desktopManager.gnome.flashback.customSessions = [ - { - wmName = "xmonad"; - wmLabel = "XMonad"; - wmCommand = "${pkgs.haskellPackages.xmonad}/bin/xmonad"; - enableGnomePanel = false; - } -]; - -
-
- Icons and GTK Themes - - Icon themes and GTK themes don’t require any special option to - install in NixOS. - - - You can add them to - and switch to - them with GNOME Tweaks. If you’d like to do this manually in - dconf, change the values of the following keys: - - -/org/gnome/desktop/interface/gtk-theme -/org/gnome/desktop/interface/icon-theme - - - in dconf-editor - -
-
- Shell Extensions - - Most Shell extensions are packaged under the - gnomeExtensions attribute. Some packages that - include Shell extensions, like gnome.gpaste, - don’t have their extension decoupled under this attribute. - - - You can install them like any other package: - - -environment.systemPackages = [ - gnomeExtensions.dash-to-dock - gnomeExtensions.gsconnect - gnomeExtensions.mpris-indicator-button -]; - - - Unfortunately, we lack a way for these to be managed in a - completely declarative way. So you have to enable them manually - with an Extensions application. It is possible to use a - GSettings - override for this on - org.gnome.shell.enabled-extensions, but that - will only influence the default value. - -
-
- GSettings Overrides - - Majority of software building on the GNOME platform use GLib’s - GSettings - system to manage runtime configuration. For our purposes, the - system consists of XML schemas describing the individual - configuration options, stored in the package, and a settings - backend, where the values of the settings are stored. On NixOS, - like on most Linux distributions, dconf database is used as the - backend. - - - GSettings - vendor overrides can be used to adjust the default values - for settings of the GNOME desktop and apps by replacing the - default values specified in the XML schemas. Using overrides will - allow you to pre-seed user settings before you even start the - session. - - - - Overrides really only change the default values for GSettings - keys so if you or an application changes the setting value, the - value set by the override will be ignored. Until - NixOS’s - dconf module implements changing values, you will either - need to keep that in mind and clear the setting from the backend - using dconf reset command when that happens, - or use the - module - from home-manager. - - - - You can override the default GSettings values using the - - option. - - - Take note that whatever packages you want to override GSettings - for, you need to add them to - . - - - You can use dconf-editor tool to explore which - GSettings you can set. - -
- Example - -services.xserver.desktopManager.gnome = { - extraGSettingsOverrides = '' - # Change default background - [org.gnome.desktop.background] - picture-uri='file://${pkgs.nixos-artwork.wallpapers.mosaic-blue.gnomeFilePath}' - - # Favorite apps in gnome-shell - [org.gnome.shell] - favorite-apps=['org.gnome.Photos.desktop', 'org.gnome.Nautilus.desktop'] - ''; - - extraGSettingsOverridePackages = [ - pkgs.gsettings-desktop-schemas # for org.gnome.desktop - pkgs.gnome.gnome-shell # for org.gnome.shell - ]; -}; - -
-
-
- Frequently Asked Questions -
- Can I use LightDM with GNOME? - - Yes you can, and any other display-manager in NixOS. - - - However, it doesn’t work correctly for the Wayland session of - GNOME Shell yet, and won’t be able to lock your screen. - - - See - this - issue. - -
-
-
diff --git a/nixos/modules/services/x11/desktop-managers/pantheon.nix b/nixos/modules/services/x11/desktop-managers/pantheon.nix index f5cc2d8187da..7791a98965d1 100644 --- a/nixos/modules/services/x11/desktop-managers/pantheon.nix +++ b/nixos/modules/services/x11/desktop-managers/pantheon.nix @@ -17,7 +17,7 @@ in { meta = { - doc = ./pantheon.xml; + doc = ./pantheon.md; maintainers = teams.pantheon.members; }; diff --git a/nixos/modules/services/x11/desktop-managers/pantheon.xml b/nixos/modules/services/x11/desktop-managers/pantheon.xml deleted file mode 100644 index 0e98b08fb658..000000000000 --- a/nixos/modules/services/x11/desktop-managers/pantheon.xml +++ /dev/null @@ -1,171 +0,0 @@ - - - Pantheon Desktop - - Pantheon is the desktop environment created for the elementary OS - distribution. It is written from scratch in Vala, utilizing GNOME - technologies with GTK and Granite. - -
- Enabling Pantheon - - All of Pantheon is working in NixOS and the applications should be - available, aside from a few - exceptions. - To enable Pantheon, set - - -services.xserver.desktopManager.pantheon.enable = true; - - - This automatically enables LightDM and Pantheon’s LightDM greeter. - If you’d like to disable this, set - - -services.xserver.displayManager.lightdm.greeters.pantheon.enable = false; -services.xserver.displayManager.lightdm.enable = false; - - - but please be aware using Pantheon without LightDM as a display - manager will break screenlocking from the UI. The NixOS module for - Pantheon installs all of Pantheon’s default applications. If you’d - like to not install Pantheon’s apps, set - - -services.pantheon.apps.enable = false; - - - You can also use - to - remove any other app (like elementary-mail). - -
-
- Wingpanel and Switchboard plugins - - Wingpanel and Switchboard work differently than they do in other - distributions, as far as using plugins. You cannot install a - plugin globally (like with - ) to start using it. - You should instead be using the following options: - - - - - - - - - - - - - - - to configure the programs with plugs or indicators. - - - The difference in NixOS is both these programs are patched to load - plugins from a directory that is the value of an environment - variable. All of which is controlled in Nix. If you need to - configure the particular packages manually you can override the - packages like: - - -wingpanel-with-indicators.override { - indicators = [ - pkgs.some-special-indicator - ]; -}; - -switchboard-with-plugs.override { - plugs = [ - pkgs.some-special-plug - ]; -}; - - - please note that, like how the NixOS options describe these as - extra plugins, this would only add to the default plugins included - with the programs. If for some reason you’d like to configure - which plugins to use exactly, both packages have an argument for - this: - - -wingpanel-with-indicators.override { - useDefaultIndicators = false; - indicators = specialListOfIndicators; -}; - -switchboard-with-plugs.override { - useDefaultPlugs = false; - plugs = specialListOfPlugs; -}; - - - this could be most useful for testing a particular plug-in in - isolation. - -
-
- FAQ - - - - I have - switched from a different desktop and Pantheon’s theming looks - messed up. - - - - Open Switchboard and go to: Administration → About → Restore - Default Settings → Restore Settings. This will reset any - dconf settings to their Pantheon defaults. Note this could - reset certain GNOME specific preferences if that desktop was - used prior. - - - - - - I - cannot enable both GNOME and Pantheon. - - - - This is a known - issue - and there is no known workaround. - - - - - - Does AppCenter - work, or is it available? - - - - AppCenter has been available since 20.03. Starting from - 21.11, the Flatpak backend should work so you can install - some Flatpak applications using it. However, due to missing - appstream metadata, the Packagekit backend does not function - currently. See this - issue. - - - If you are using Pantheon, AppCenter should be installed by - default if you have - Flatpak - support enabled. If you also wish to add the - appcenter Flatpak remote: - - -$ flatpak remote-add --if-not-exists appcenter https://flatpak.elementary.io/repo.flatpakrepo - - - - -
-
diff --git a/nixos/modules/system/boot/loader/external/external.nix b/nixos/modules/system/boot/loader/external/external.nix index 7c5455bb47aa..926cbd2b4b3f 100644 --- a/nixos/modules/system/boot/loader/external/external.nix +++ b/nixos/modules/system/boot/loader/external/external.nix @@ -8,7 +8,7 @@ in { meta = { maintainers = with maintainers; [ cole-h grahamc raitobezarius ]; - doc = ./external.xml; + doc = ./external.md; }; options.boot.loader.external = { diff --git a/nixos/modules/system/boot/loader/external/external.xml b/nixos/modules/system/boot/loader/external/external.xml deleted file mode 100644 index 9a392c27441d..000000000000 --- a/nixos/modules/system/boot/loader/external/external.xml +++ /dev/null @@ -1,43 +0,0 @@ - - - External Bootloader Backends - - NixOS has support for several bootloader backends by default: - systemd-boot, grub, uboot, etc. The built-in bootloader backend - support is generic and supports most use cases. Some users may - prefer to create advanced workflows around managing the bootloader - and bootable entries. - - - You can replace the built-in bootloader support with your own - tooling using the external bootloader option. - - - Imagine you have created a new package called FooBoot. FooBoot - provides a program at - ${pkgs.fooboot}/bin/fooboot-install which takes - the system closure’s path as its only argument and configures the - system’s bootloader. - - - You can enable FooBoot like this: - - -{ pkgs, ... }: { - boot.loader.external = { - enable = true; - installHook = "${pkgs.fooboot}/bin/fooboot-install"; - }; -} - -
- Developing Custom Bootloader Backends - - Bootloaders should use - RFC-0125’s - Bootspec format and synthesis tools to identify the key properties - for bootable system generations. - -
-
diff --git a/pkgs/tools/nix/nixos-render-docs/default.nix b/pkgs/tools/nix/nixos-render-docs/default.nix new file mode 100644 index 000000000000..55bbbd9ffa56 --- /dev/null +++ b/pkgs/tools/nix/nixos-render-docs/default.nix @@ -0,0 +1,63 @@ +{ lib +, stdenv +, python3 +, python3Minimal +}: + +let + # python3Minimal can't be overridden with packages on Darwin, due to a missing framework. + # Instead of modifying stdenv, we take the easy way out, since most people on Darwin will + # just be hacking on the Nixpkgs manual (which also uses make-options-doc). + python = ((if stdenv.isDarwin then python3 else python3Minimal).override { + self = python; + includeSiteCustomize = true; + }); + + # TODO add our own small test suite, maybe add tests for these deps to channels? + markdown-it-py-no-tests = python.pkgs.markdown-it-py.override { + disableTests = true; + }; + mdit-py-plugins-no-tests = python.pkgs.mdit-py-plugins.override { + markdown-it-py = markdown-it-py-no-tests; + disableTests = true; + }; +in + +python.pkgs.buildPythonApplication { + pname = "nixos-render-docs"; + version = "0.0"; + format = "pyproject"; + + src = lib.cleanSourceWith { + filter = name: type: + lib.cleanSourceFilter name type + && ! (type == "directory" + && builtins.elem + (baseNameOf name) + [ + ".pytest_cache" + ".mypy_cache" + "__pycache__" + ]); + src = ./src; + }; + + nativeBuildInputs = [ + python.pkgs.setuptools + python.pkgs.pytestCheckHook + ]; + + propagatedBuildInputs = [ + markdown-it-py-no-tests + mdit-py-plugins-no-tests + python.pkgs.frozendict + ]; + + pytestFlagsArray = [ "-vvrP" "tests/" ]; + + meta = with lib; { + description = "Renderer for NixOS manual and option docs"; + license = licenses.mit; + maintainers = [ ]; + }; +} diff --git a/pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs/__init__.py b/pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs/__init__.py new file mode 100644 index 000000000000..5af13267b14d --- /dev/null +++ b/pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs/__init__.py @@ -0,0 +1,24 @@ +import argparse +import os +import sys +from typing import Any, Dict + +from .md import Converter +from . import manual +from . import options + +def main() -> None: + parser = argparse.ArgumentParser(description='render nixos manual bits') + + commands = parser.add_subparsers(dest='command', required=True) + + options.build_cli(commands.add_parser('options')) + manual.build_cli(commands.add_parser('manual')) + + args = parser.parse_args() + if args.command == 'options': + options.run_cli(args) + elif args.command == 'manual': + manual.run_cli(args) + else: + raise RuntimeError('command not hooked up', args) diff --git a/pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs/docbook.py b/pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs/docbook.py new file mode 100644 index 000000000000..bad36e57a2f3 --- /dev/null +++ b/pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs/docbook.py @@ -0,0 +1,254 @@ +from collections.abc import Mapping, MutableMapping, Sequence +from frozendict import frozendict # type: ignore[attr-defined] +from typing import Any, cast, Optional, NamedTuple + +import markdown_it +from markdown_it.token import Token +from markdown_it.utils import OptionsDict +from xml.sax.saxutils import escape, quoteattr + +from .md import Renderer + +_xml_id_translate_table = { + ord('*'): ord('_'), + ord('<'): ord('_'), + ord(' '): ord('_'), + ord('>'): ord('_'), + ord('['): ord('_'), + ord(']'): ord('_'), + ord(':'): ord('_'), + ord('"'): ord('_'), +} +def make_xml_id(s: str) -> str: + return s.translate(_xml_id_translate_table) + +class Deflist: + has_dd = False + +class Heading(NamedTuple): + container_tag: str + level: int + +class DocBookRenderer(Renderer): + __output__ = "docbook" + _link_tags: list[str] + _deflists: list[Deflist] + _headings: list[Heading] + + def __init__(self, manpage_urls: Mapping[str, str], parser: Optional[markdown_it.MarkdownIt] = None): + super().__init__(manpage_urls, parser) + self._link_tags = [] + self._deflists = [] + self._headings = [] + + def render(self, tokens: Sequence[Token], options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + result = super().render(tokens, options, env) + result += self._close_headings(None, env) + return result + def renderInline(self, tokens: Sequence[Token], options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + # HACK to support docbook links and xrefs. link handling is only necessary because the docbook + # manpage stylesheet converts - in urls to a mathematical minus, which may be somewhat incorrect. + for i, token in enumerate(tokens): + if token.type != 'link_open': + continue + token.tag = 'link' + # turn [](#foo) into xrefs + if token.attrs['href'][0:1] == '#' and tokens[i + 1].type == 'link_close': # type: ignore[index] + token.tag = "xref" + # turn into links without contents + if tokens[i + 1].type == 'text' and tokens[i + 1].content == token.attrs['href']: + tokens[i + 1].content = '' + + return super().renderInline(tokens, options, env) + + def text(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return escape(token.content) + def paragraph_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return "" + def paragraph_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return "" + def hardbreak(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return "\n" + def softbreak(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + # should check options.breaks() and emit hard break if so + return "\n" + def code_inline(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return f"{escape(token.content)}" + def code_block(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return f"{escape(token.content)}" + def link_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + self._link_tags.append(token.tag) + href = cast(str, token.attrs['href']) + (attr, start) = ('linkend', 1) if href[0] == '#' else ('xlink:href', 0) + return f"<{token.tag} {attr}={quoteattr(href[start:])}>" + def link_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return f"" + def list_item_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return "" + def list_item_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return "\n" + # HACK open and close para for docbook change size. remove soon. + def bullet_list_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + spacing = ' spacing="compact"' if token.attrs.get('compact', False) else '' + return f"\n" + def bullet_list_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return "\n" + def em_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return "" + def em_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return "" + def strong_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return "" + def strong_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return "" + def fence(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + info = f" language={quoteattr(token.info)}" if token.info != "" else "" + return f"{escape(token.content)}" + def blockquote_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return "
" + def blockquote_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return "
" + def note_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return "" + def note_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return "" + def caution_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return "" + def caution_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return "" + def important_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return "" + def important_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return "" + def tip_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return "" + def tip_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return "" + def warning_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return "" + def warning_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return "" + # markdown-it emits tokens based on the html syntax tree, but docbook is + # slightly different. html has
{
{
}}
, + # docbook has {} + # we have to reject multiple definitions for the same term for time being. + def dl_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + self._deflists.append(Deflist()) + return "" + def dl_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + self._deflists.pop() + return "" + def dt_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + self._deflists[-1].has_dd = False + return "" + def dt_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return "" + def dd_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + if self._deflists[-1].has_dd: + raise Exception("multiple definitions per term not supported") + self._deflists[-1].has_dd = True + return "" + def dd_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return "" + def myst_role(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + if token.meta['name'] == 'command': + return f"{escape(token.content)}" + if token.meta['name'] == 'file': + return f"{escape(token.content)}" + if token.meta['name'] == 'var': + return f"{escape(token.content)}" + if token.meta['name'] == 'env': + return f"{escape(token.content)}" + if token.meta['name'] == 'option': + return f"" + if token.meta['name'] == 'manpage': + [page, section] = [ s.strip() for s in token.content.rsplit('(', 1) ] + section = section[:-1] + man = f"{page}({section})" + title = f"{escape(page)}" + vol = f"{escape(section)}" + ref = f"{title}{vol}" + if man in self._manpage_urls: + return f"{ref}" + else: + return ref + raise NotImplementedError("md node not supported yet", token) + def inline_anchor(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return f'' + def ordered_list_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + start = f' startingnumber="{token.attrs["start"]}"' if 'start' in token.attrs else "" + spacing = ' spacing="compact"' if token.attrs.get('compact', False) else '' + return f"" + def ordered_list_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return f"" + def heading_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + hlevel = int(token.tag[1:]) + result = self._close_headings(hlevel, env) + (tag, attrs) = self._heading_tag(token, tokens, i, options, env) + self._headings.append(Heading(tag, hlevel)) + attrs_str = "".join([ f" {k}={quoteattr(v)}" for k, v in attrs.items() ]) + return result + f'<{tag}{attrs_str}>\n' + def heading_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return '' + + def _close_headings(self, level: Optional[int], env: MutableMapping[str, Any]) -> str: + # we rely on markdown-it producing h{1..6} tags in token.tag for this to work + result = [] + while len(self._headings): + if level is None or self._headings[-1].level >= level: + result.append(f"") + self._headings.pop() + else: + break + return "\n".join(result) + + def _heading_tag(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> tuple[str, dict[str, str]]: + attrs = {} + if id := token.attrs.get('id'): + attrs['xml:id'] = cast(str, id) + return ("section", attrs) diff --git a/pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs/manual.py b/pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs/manual.py new file mode 100644 index 000000000000..361bc0877893 --- /dev/null +++ b/pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs/manual.py @@ -0,0 +1,143 @@ +import argparse +import json + +from abc import abstractmethod +from collections.abc import MutableMapping, Sequence +from typing import Any, cast, NamedTuple, Optional, Union +from xml.sax.saxutils import escape, quoteattr +from markdown_it.token import Token +from markdown_it.utils import OptionsDict + +from .docbook import DocBookRenderer +from .md import Converter + +class RenderedSection: + id: Optional[str] + chapters: list[str] + + def __init__(self, id: Optional[str]) -> None: + self.id = id + self.chapters = [] + +class BaseConverter(Converter): + _sections: list[RenderedSection] + + def __init__(self, manpage_urls: dict[str, str]): + super().__init__(manpage_urls) + self._sections = [] + + def add_section(self, id: Optional[str], chapters: list[str]) -> None: + self._sections.append(RenderedSection(id)) + for content in chapters: + self._md.renderer._title_seen = False # type: ignore[attr-defined] + self._sections[-1].chapters.append(self._render(content)) + + @abstractmethod + def finalize(self) -> str: raise NotImplementedError() + +class ManualDocBookRenderer(DocBookRenderer): + # needed to check correctness of chapters. + # we may want to use front matter instead of this kind of heuristic. + _title_seen = False + + def _heading_tag(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> tuple[str, dict[str, str]]: + (tag, attrs) = super()._heading_tag(token, tokens, i, options, env) + if self._title_seen: + if token.tag == 'h1': + raise RuntimeError("only one title heading allowed", token) + return (tag, attrs) + self._title_seen = True + return ("chapter", attrs | { + 'xmlns': "http://docbook.org/ns/docbook", + 'xmlns:xlink': "http://www.w3.org/1999/xlink", + }) + + # TODO minimize docbook diffs with existing conversions. remove soon. + def paragraph_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return super().paragraph_open(token, tokens, i, options, env) + "\n " + def paragraph_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return "\n" + super().paragraph_close(token, tokens, i, options, env) + def code_block(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return f"\n{escape(token.content)}" + def fence(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + info = f" language={quoteattr(token.info)}" if token.info != "" else "" + return f"\n{escape(token.content)}" + +class DocBookConverter(BaseConverter): + __renderer__ = ManualDocBookRenderer + + def finalize(self) -> str: + result = [] + + for section in self._sections: + id = "id=" + quoteattr(section.id) if section.id is not None else "" + result.append(f'
') + result += section.chapters + result.append(f'
') + + return "\n".join(result) + + + +class Section: + id: Optional[str] = None + chapters: list[str] + + def __init__(self) -> None: + self.chapters = [] + +class SectionAction(argparse.Action): + def __call__(self, parser: argparse.ArgumentParser, ns: argparse.Namespace, + values: Union[str, Sequence[Any], None], opt_str: Optional[str] = None) -> None: + sections = getattr(ns, self.dest) + if sections is None: sections = [] + sections.append(Section()) + setattr(ns, self.dest, sections) + +class SectionIDAction(argparse.Action): + def __call__(self, parser: argparse.ArgumentParser, ns: argparse.Namespace, + values: Union[str, Sequence[Any], None], opt_str: Optional[str] = None) -> None: + sections = getattr(ns, self.dest) + if sections is None: raise argparse.ArgumentError(self, "no active section") + sections[-1].id = cast(str, values) + +class ChaptersAction(argparse.Action): + def __call__(self, parser: argparse.ArgumentParser, ns: argparse.Namespace, + values: Union[str, Sequence[Any], None], opt_str: Optional[str] = None) -> None: + sections = getattr(ns, self.dest) + if sections is None: raise argparse.ArgumentError(self, "no active section") + sections[-1].chapters.extend(cast(Sequence[str], values)) + +def _build_cli_db(p: argparse.ArgumentParser) -> None: + p.add_argument('--manpage-urls', required=True) + p.add_argument("outfile") + p.add_argument("--section", dest="contents", action=SectionAction, nargs=0) + p.add_argument("--section-id", dest="contents", action=SectionIDAction) + p.add_argument("--chapters", dest="contents", action=ChaptersAction, nargs='+') + +def _run_cli_db(args: argparse.Namespace) -> None: + with open(args.manpage_urls, 'r') as manpage_urls: + md = DocBookConverter(json.load(manpage_urls)) + for section in args.contents: + chapters = [] + for p in section.chapters: + with open(p, 'r') as f: + chapters.append(f.read()) + md.add_section(section.id, chapters) + with open(args.outfile, 'w') as f: + f.write(md.finalize()) + +def build_cli(p: argparse.ArgumentParser) -> None: + formats = p.add_subparsers(dest='format', required=True) + _build_cli_db(formats.add_parser('docbook')) + +def run_cli(args: argparse.Namespace) -> None: + if args.format == 'docbook': + _run_cli_db(args) + else: + raise RuntimeError('format not hooked up', args) diff --git a/pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs/md.py b/pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs/md.py new file mode 100644 index 000000000000..c08675870f6a --- /dev/null +++ b/pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs/md.py @@ -0,0 +1,385 @@ +from abc import ABC +from collections.abc import Mapping, MutableMapping, Sequence +from frozendict import frozendict # type: ignore[attr-defined] +from typing import Any, Callable, Optional + +import re + +from .types import RenderFn + +import markdown_it +from markdown_it.token import Token +from markdown_it.utils import OptionsDict +from mdit_py_plugins.container import container_plugin # type: ignore[attr-defined] +from mdit_py_plugins.deflist import deflist_plugin # type: ignore[attr-defined] +from mdit_py_plugins.myst_role import myst_role_plugin # type: ignore[attr-defined] + +_md_escape_table = { + ord('*'): '\\*', + ord('<'): '\\<', + ord('['): '\\[', + ord('`'): '\\`', + ord('.'): '\\.', + ord('#'): '\\#', + ord('&'): '\\&', + ord('\\'): '\\\\', +} +def md_escape(s: str) -> str: + return s.translate(_md_escape_table) + +class Renderer(markdown_it.renderer.RendererProtocol): + _admonitions: dict[str, tuple[RenderFn, RenderFn]] + _admonition_stack: list[str] + + def __init__(self, manpage_urls: Mapping[str, str], parser: Optional[markdown_it.MarkdownIt] = None): + self._manpage_urls = manpage_urls + self.rules = { + 'text': self.text, + 'paragraph_open': self.paragraph_open, + 'paragraph_close': self.paragraph_close, + 'hardbreak': self.hardbreak, + 'softbreak': self.softbreak, + 'code_inline': self.code_inline, + 'code_block': self.code_block, + 'link_open': self.link_open, + 'link_close': self.link_close, + 'list_item_open': self.list_item_open, + 'list_item_close': self.list_item_close, + 'bullet_list_open': self.bullet_list_open, + 'bullet_list_close': self.bullet_list_close, + 'em_open': self.em_open, + 'em_close': self.em_close, + 'strong_open': self.strong_open, + 'strong_close': self.strong_close, + 'fence': self.fence, + 'blockquote_open': self.blockquote_open, + 'blockquote_close': self.blockquote_close, + 'dl_open': self.dl_open, + 'dl_close': self.dl_close, + 'dt_open': self.dt_open, + 'dt_close': self.dt_close, + 'dd_open': self.dd_open, + 'dd_close': self.dd_close, + 'myst_role': self.myst_role, + "container_admonition_open": self.admonition_open, + "container_admonition_close": self.admonition_close, + "inline_anchor": self.inline_anchor, + "heading_open": self.heading_open, + "heading_close": self.heading_close, + "ordered_list_open": self.ordered_list_open, + "ordered_list_close": self.ordered_list_close, + } + + self._admonitions = { + "{.note}": (self.note_open, self.note_close), + "{.caution}": (self.caution_open,self.caution_close), + "{.tip}": (self.tip_open, self.tip_close), + "{.important}": (self.important_open, self.important_close), + "{.warning}": (self.warning_open, self.warning_close), + } + self._admonition_stack = [] + + def admonition_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + tag = token.info.strip() + self._admonition_stack.append(tag) + return self._admonitions[tag][0](token, tokens, i, options, env) + def admonition_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + return self._admonitions[self._admonition_stack.pop()][1](token, tokens, i, options, env) + + def render(self, tokens: Sequence[Token], options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + def do_one(i: int, token: Token) -> str: + if token.type == "inline": + assert token.children is not None + return self.renderInline(token.children, options, env) + elif token.type in self.rules: + return self.rules[token.type](tokens[i], tokens, i, options, env) + else: + raise NotImplementedError("md token not supported yet", token) + return "".join(map(lambda arg: do_one(*arg), enumerate(tokens))) + def renderInline(self, tokens: Sequence[Token], options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + def do_one(i: int, token: Token) -> str: + if token.type in self.rules: + return self.rules[token.type](tokens[i], tokens, i, options, env) + else: + raise NotImplementedError("md token not supported yet", token) + return "".join(map(lambda arg: do_one(*arg), enumerate(tokens))) + + def text(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def paragraph_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def paragraph_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def hardbreak(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def softbreak(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def code_inline(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def code_block(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def link_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def link_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def list_item_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def list_item_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def bullet_list_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def bullet_list_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def em_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def em_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def strong_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def strong_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def fence(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def blockquote_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def blockquote_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def note_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def note_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def caution_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def caution_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def important_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def important_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def tip_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def tip_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def warning_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def warning_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def dl_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def dl_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def dt_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def dt_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def dd_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def dd_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def myst_role(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def inline_anchor(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def heading_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def heading_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def ordered_list_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + def ordered_list_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported", token) + +def _is_escaped(src: str, pos: int) -> bool: + found = 0 + while pos >= 0 and src[pos] == '\\': + found += 1 + pos -= 1 + return found % 2 == 1 + +_INLINE_ANCHOR_PATTERN = re.compile(r"\{\s*#([\w-]+)\s*\}") + +def _inline_anchor_plugin(md: markdown_it.MarkdownIt) -> None: + def inline_anchor(state: markdown_it.rules_inline.StateInline, silent: bool) -> bool: + if state.src[state.pos] != '[': + return False + if _is_escaped(state.src, state.pos - 1): + return False + + # treat the inline span like a link label for simplicity. + label_begin = state.pos + 1 + label_end = markdown_it.helpers.parseLinkLabel(state, state.pos) + input_end = state.posMax + if label_end < 0: + return False + + # match id + match = _INLINE_ANCHOR_PATTERN.match(state.src[label_end + 1 : ]) + if not match: + return False + + if not silent: + token = state.push("inline_anchor", "", 0) # type: ignore[no-untyped-call] + token.attrs['id'] = match[1] + + state.pos = label_begin + state.posMax = label_end + state.md.inline.tokenize(state) + + state.pos = label_end + match.end() + 1 + state.posMax = input_end + return True + + md.inline.ruler.before("link", "inline_anchor", inline_anchor) + +def _inline_comment_plugin(md: markdown_it.MarkdownIt) -> None: + def inline_comment(state: markdown_it.rules_inline.StateInline, silent: bool) -> bool: + if state.src[state.pos : state.pos + 4] != '': # --> + state.pos = i + 3 + return True + + return False + + md.inline.ruler.after("autolink", "inline_comment", inline_comment) + +def _block_comment_plugin(md: markdown_it.MarkdownIt) -> None: + def block_comment(state: markdown_it.rules_block.StateBlock, startLine: int, endLine: int, + silent: bool) -> bool: + pos = state.bMarks[startLine] + state.tShift[startLine] + posMax = state.eMarks[startLine] + + if state.src[pos : pos + 4] != '': + state.line = nextLine + 1 + return True + + nextLine += 1 + + return False + + md.block.ruler.after("code", "block_comment", block_comment) + +_HEADER_ID_RE = re.compile(r"\s*\{\s*\#([\w-]+)\s*\}\s*$") + +class Converter(ABC): + __renderer__: Callable[[Mapping[str, str], markdown_it.MarkdownIt], Renderer] + + def __init__(self, manpage_urls: Mapping[str, str]): + self._manpage_urls = frozendict(manpage_urls) + + self._md = markdown_it.MarkdownIt( + "commonmark", + { + 'maxNesting': 100, # default is 20 + 'html': False, # not useful since we target many formats + 'typographer': True, # required for smartquotes + }, + renderer_cls=lambda parser: self.__renderer__(self._manpage_urls, parser) + ) + self._md.use( + container_plugin, + name="admonition", + validate=lambda name, *args: ( + name.strip() in self._md.renderer._admonitions # type: ignore[attr-defined] + ) + ) + self._md.use(deflist_plugin) + self._md.use(myst_role_plugin) + self._md.use(_inline_anchor_plugin) + self._md.use(_inline_comment_plugin) + self._md.use(_block_comment_plugin) + self._md.enable(["smartquotes", "replacements"]) + + def _post_parse(self, tokens: list[Token]) -> list[Token]: + for i in range(0, len(tokens)): + # parse header IDs. this is purposely simple and doesn't support + # classes or other inds of attributes. + if tokens[i].type == 'heading_open': + children = tokens[i + 1].children + assert children is not None + if len(children) == 0 or children[-1].type != 'text': + continue + if m := _HEADER_ID_RE.search(children[-1].content): + tokens[i].attrs['id'] = m[1] + children[-1].content = children[-1].content[:-len(m[0])].rstrip() + + # markdown-it signifies wide lists by setting the wrapper paragraphs + # of each item to hidden. this is not useful for our stylesheets, which + # signify this with a special css class on list elements instead. + wide_stack = [] + for i in range(0, len(tokens)): + if tokens[i].type in [ 'bullet_list_open', 'ordered_list_open' ]: + wide_stack.append([i, True]) + elif tokens[i].type in [ 'bullet_list_close', 'ordered_list_close' ]: + (idx, compact) = wide_stack.pop() + tokens[idx].attrs['compact'] = compact + elif len(wide_stack) > 0 and tokens[i].type == 'paragraph_open' and not tokens[i].hidden: + wide_stack[-1][1] = False + + return tokens + + def _parse(self, src: str, env: Optional[MutableMapping[str, Any]] = None) -> list[Token]: + tokens = self._md.parse(src, env if env is not None else {}) + return self._post_parse(tokens) + + def _render(self, src: str) -> str: + env: dict[str, Any] = {} + tokens = self._parse(src, env) + return self._md.renderer.render(tokens, self._md.options, env) # type: ignore[no-any-return] diff --git a/pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs/options.py b/pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs/options.py new file mode 100644 index 000000000000..3667c7bbcdeb --- /dev/null +++ b/pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs/options.py @@ -0,0 +1,284 @@ +import argparse +import json + +from abc import abstractmethod +from collections.abc import MutableMapping, Sequence +from markdown_it.utils import OptionsDict +from markdown_it.token import Token +from typing import Any, Optional +from xml.sax.saxutils import escape, quoteattr + +from .docbook import DocBookRenderer, make_xml_id +from .md import Converter, md_escape +from .types import OptionLoc, Option, RenderedOption + +def option_is(option: Option, key: str, typ: str) -> Optional[dict[str, str]]: + if key not in option: + return None + if type(option[key]) != dict: + return None + if option[key].get('_type') != typ: # type: ignore[union-attr] + return None + return option[key] # type: ignore[return-value] + +class BaseConverter(Converter): + _options: dict[str, RenderedOption] + + def __init__(self, manpage_urls: dict[str, str], + revision: str, + document_type: str, + varlist_id: str, + id_prefix: str, + markdown_by_default: bool): + super().__init__(manpage_urls) + self._options = {} + self._revision = revision + self._document_type = document_type + self._varlist_id = varlist_id + self._id_prefix = id_prefix + self._markdown_by_default = markdown_by_default + + def _format_decl_def_loc(self, loc: OptionLoc) -> tuple[Optional[str], str]: + # locations can be either plain strings (specific to nixpkgs), or attrsets + # { name = "foo/bar.nix"; url = "https://github.com/....."; } + if isinstance(loc, str): + # Hyperlink the filename either to the NixOS github + # repository (if it’s a module and we have a revision number), + # or to the local filesystem. + if not loc.startswith('/'): + if self._revision == 'local': + href = f"https://github.com/NixOS/nixpkgs/blob/master/{loc}" + else: + href = f"https://github.com/NixOS/nixpkgs/blob/{self._revision}/{loc}" + else: + href = f"file://{loc}" + # Print the filename and make it user-friendly by replacing the + # /nix/store/ prefix by the default location of nixos + # sources. + if not loc.startswith('/'): + name = f"" + elif 'nixops' in loc and '/nix/' in loc: + name = f"" + else: + name = loc + return (href, name) + else: + return (loc['url'] if 'url' in loc else None, loc['name']) + + @abstractmethod + def _decl_def_header(self, header: str) -> list[str]: raise NotImplementedError() + + @abstractmethod + def _decl_def_entry(self, href: Optional[str], name: str) -> list[str]: raise NotImplementedError() + + @abstractmethod + def _decl_def_footer(self) -> list[str]: raise NotImplementedError() + + def _render_decl_def(self, header: str, locs: list[OptionLoc]) -> list[str]: + result = [] + result += self._decl_def_header(header) + for loc in locs: + href, name = self._format_decl_def_loc(loc) + result += self._decl_def_entry(href, name) + result += self._decl_def_footer() + return result + + def _render_code(self, option: Option, key: str) -> list[str]: + if lit := option_is(option, key, 'literalMD'): + return [ self._render(f"*{key.capitalize()}:*\n{lit['text']}") ] + elif lit := option_is(option, key, 'literalExpression'): + code = lit['text'] + # for multi-line code blocks we only have to count ` runs at the beginning + # of a line, but this is much easier. + multiline = '\n' in code + longest, current = (0, 0) + for c in code: + current = current + 1 if c == '`' else 0 + longest = max(current, longest) + # inline literals need a space to separate ticks from content, code blocks + # need newlines. inline literals need one extra tick, code blocks need three. + ticks, sep = ('`' * (longest + (3 if multiline else 1)), '\n' if multiline else ' ') + code = f"{ticks}{sep}{code}{sep}{ticks}" + return [ self._render(f"*{key.capitalize()}:*\n{code}") ] + elif key in option: + raise Exception(f"{key} has unrecognized type", option[key]) + else: + return [] + + def _render_description(self, desc: str | dict[str, str]) -> list[str]: + if isinstance(desc, str) and self._markdown_by_default: + return [ self._render(desc) ] + elif isinstance(desc, dict) and desc.get('_type') == 'mdDoc': + return [ self._render(desc['text']) ] + else: + raise Exception("description has unrecognized type", desc) + + @abstractmethod + def _related_packages_header(self) -> list[str]: raise NotImplementedError() + + def _convert_one(self, option: dict[str, Any]) -> list[str]: + result = [] + + if desc := option.get('description'): + result += self._render_description(desc) + if typ := option.get('type'): + ro = " *(read only)*" if option.get('readOnly', False) else "" + result.append(self._render(f"*Type:* {md_escape(typ)}{ro}")) + + result += self._render_code(option, 'default') + result += self._render_code(option, 'example') + + if related := option.get('relatedPackages'): + result += self._related_packages_header() + result.append(self._render(related)) + if decl := option.get('declarations'): + result += self._render_decl_def("Declared by", decl) + if defs := option.get('definitions'): + result += self._render_decl_def("Defined by", defs) + + return result + + def add_options(self, options: dict[str, Any]) -> None: + for (name, option) in options.items(): + try: + self._options[name] = RenderedOption(option['loc'], self._convert_one(option)) + except Exception as e: + raise Exception(f"Failed to render option {name}") from e + + @abstractmethod + def finalize(self) -> str: raise NotImplementedError() + +class OptionsDocBookRenderer(DocBookRenderer): + def heading_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported in options doc", token) + def heading_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + raise RuntimeError("md token not supported in options doc", token) + + # TODO keep optionsDocBook diff small. remove soon if rendering is still good. + def ordered_list_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + token.attrs['compact'] = False + return super().ordered_list_open(token, tokens, i, options, env) + def bullet_list_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict, + env: MutableMapping[str, Any]) -> str: + token.attrs['compact'] = False + return super().bullet_list_open(token, tokens, i, options, env) + +class DocBookConverter(BaseConverter): + __renderer__ = OptionsDocBookRenderer + + def _render_code(self, option: dict[str, Any], key: str) -> list[str]: + if lit := option_is(option, key, 'literalDocBook'): + return [ f"{key.capitalize()}: {lit['text']}" ] + else: + return super()._render_code(option, key) + + def _render_description(self, desc: str | dict[str, Any]) -> list[str]: + if isinstance(desc, str) and not self._markdown_by_default: + return [ f"{desc}" ] + else: + return super()._render_description(desc) + + def _related_packages_header(self) -> list[str]: + return [ + "", + " Related packages:", + "", + ] + + def _decl_def_header(self, header: str) -> list[str]: + return [ + f"{header}:", + "" + ] + + def _decl_def_entry(self, href: Optional[str], name: str) -> list[str]: + if href is not None: + href = " xlink:href=" + quoteattr(href) + return [ + f"", + escape(name), + "" + ] + + def _decl_def_footer(self) -> list[str]: + return [ "" ] + + def finalize(self) -> str: + keys = list(self._options.keys()) + keys.sort(key=lambda opt: [ (0 if p.startswith("enable") else 1 if p.startswith("package") else 2, p) + for p in self._options[opt].loc ]) + + result = [] + + result.append('') + if self._document_type == 'appendix': + result += [ + '', + ' Configuration Options', + ] + result += [ + f'', + ] + + for name in keys: + id = make_xml_id(self._id_prefix + name) + result += [ + "", + # NOTE adding extra spaces here introduces spaces into xref link expansions + (f"" + + f""), + "" + ] + result += self._options[name].lines + result += [ + "", + "" + ] + + result.append("") + if self._document_type == 'appendix': + result.append("") + + return "\n".join(result) + +def _build_cli_db(p: argparse.ArgumentParser) -> None: + p.add_argument('--manpage-urls', required=True) + p.add_argument('--revision', required=True) + p.add_argument('--document-type', required=True) + p.add_argument('--varlist-id', required=True) + p.add_argument('--id-prefix', required=True) + p.add_argument('--markdown-by-default', default=False, action='store_true') + p.add_argument("infile") + p.add_argument("outfile") + +def _run_cli_db(args: argparse.Namespace) -> None: + with open(args.manpage_urls, 'r') as manpage_urls: + md = DocBookConverter( + json.load(manpage_urls), + revision = args.revision, + document_type = args.document_type, + varlist_id = args.varlist_id, + id_prefix = args.id_prefix, + markdown_by_default = args.markdown_by_default) + + with open(args.infile, 'r') as f: + md.add_options(json.load(f)) + with open(args.outfile, 'w') as f: + f.write(md.finalize()) + +def build_cli(p: argparse.ArgumentParser) -> None: + formats = p.add_subparsers(dest='format', required=True) + _build_cli_db(formats.add_parser('docbook')) + +def run_cli(args: argparse.Namespace) -> None: + if args.format == 'docbook': + _run_cli_db(args) + else: + raise RuntimeError('format not hooked up', args) diff --git a/pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs/types.py b/pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs/types.py new file mode 100644 index 000000000000..7814b3a4854b --- /dev/null +++ b/pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs/types.py @@ -0,0 +1,13 @@ +from collections.abc import Sequence, MutableMapping +from typing import Any, Callable, Optional, Tuple, NamedTuple + +from markdown_it.token import Token +from markdown_it.utils import OptionsDict + +OptionLoc = str | dict[str, str] +Option = dict[str, str | dict[str, str] | list[OptionLoc]] + +RenderedOption = NamedTuple('RenderedOption', [('loc', list[str]), + ('lines', list[str])]) + +RenderFn = Callable[[Token, Sequence[Token], int, OptionsDict, MutableMapping[str, Any]], str] diff --git a/pkgs/tools/nix/nixos-render-docs/src/pyproject.toml b/pkgs/tools/nix/nixos-render-docs/src/pyproject.toml new file mode 100644 index 000000000000..d66643ef8421 --- /dev/null +++ b/pkgs/tools/nix/nixos-render-docs/src/pyproject.toml @@ -0,0 +1,15 @@ +[project] +name = "nixos-render-docs" +version = "0.0" +description = "Renderer for NixOS manual and option docs" +classifiers = [ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", +] + +[project.scripts] +nixos-render-docs = "nixos_render_docs:main" + +[build-system] +requires = ["setuptools"] diff --git a/pkgs/tools/nix/nixos-render-docs/src/tests/test_headings.py b/pkgs/tools/nix/nixos-render-docs/src/tests/test_headings.py new file mode 100644 index 000000000000..0b73cdc8e7c7 --- /dev/null +++ b/pkgs/tools/nix/nixos-render-docs/src/tests/test_headings.py @@ -0,0 +1,102 @@ +import nixos_render_docs + +from markdown_it.token import Token + +class Converter(nixos_render_docs.md.Converter): + # actual renderer doesn't matter, we're just parsing. + __renderer__ = nixos_render_docs.docbook.DocBookRenderer + +def test_heading_id_absent() -> None: + c = Converter({}) + assert c._parse("# foo") == [ + Token(type='heading_open', tag='h1', nesting=1, attrs={}, map=[0, 1], level=0, children=None, + content='', markup='#', info='', meta={}, block=True, hidden=False), + Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, + children=[ + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content='foo', markup='', info='', meta={}, block=False, hidden=False) + ], + content='foo', markup='', info='', meta={}, block=True, hidden=False), + Token(type='heading_close', tag='h1', nesting=-1, attrs={}, map=None, level=0, children=None, + content='', markup='#', info='', meta={}, block=True, hidden=False) + ] + +def test_heading_id_present() -> None: + c = Converter({}) + assert c._parse("# foo {#foo}\n## bar { #bar}\n### bal { #bal} ") == [ + Token(type='heading_open', tag='h1', nesting=1, attrs={'id': 'foo'}, map=[0, 1], level=0, + children=None, content='', markup='#', info='', meta={}, block=True, hidden=False), + Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, + content='foo {#foo}', markup='', info='', meta={}, block=True, hidden=False, + children=[ + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content='foo', markup='', info='', meta={}, block=False, hidden=False) + ]), + Token(type='heading_close', tag='h1', nesting=-1, attrs={}, map=None, level=0, children=None, + content='', markup='#', info='', meta={}, block=True, hidden=False), + Token(type='heading_open', tag='h2', nesting=1, attrs={'id': 'bar'}, map=[1, 2], level=0, + children=None, content='', markup='##', info='', meta={}, block=True, hidden=False), + Token(type='inline', tag='', nesting=0, attrs={}, map=[1, 2], level=1, + content='bar { #bar}', markup='', info='', meta={}, block=True, hidden=False, + children=[ + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content='bar', markup='', info='', meta={}, block=False, hidden=False) + ]), + Token(type='heading_close', tag='h2', nesting=-1, attrs={}, map=None, level=0, children=None, + content='', markup='##', info='', meta={}, block=True, hidden=False), + Token(type='heading_open', tag='h3', nesting=1, attrs={'id': 'bal'}, map=[2, 3], level=0, + children=None, content='', markup='###', info='', meta={}, block=True, hidden=False), + Token(type='inline', tag='', nesting=0, attrs={}, map=[2, 3], level=1, + content='bal { #bal}', markup='', info='', meta={}, block=True, hidden=False, + children=[ + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content='bal', markup='', info='', meta={}, block=False, hidden=False) + ]), + Token(type='heading_close', tag='h3', nesting=-1, attrs={}, map=None, level=0, children=None, + content='', markup='###', info='', meta={}, block=True, hidden=False) + ] + +def test_heading_id_incomplete() -> None: + c = Converter({}) + assert c._parse("# foo {#}") == [ + Token(type='heading_open', tag='h1', nesting=1, attrs={}, map=[0, 1], level=0, children=None, + content='', markup='#', info='', meta={}, block=True, hidden=False), + Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, + content='foo {#}', markup='', info='', meta={}, block=True, hidden=False, + children=[ + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content='foo {#}', markup='', info='', meta={}, block=False, hidden=False) + ]), + Token(type='heading_close', tag='h1', nesting=-1, attrs={}, map=None, level=0, children=None, + content='', markup='#', info='', meta={}, block=True, hidden=False) + ] + +def test_heading_id_double() -> None: + c = Converter({}) + assert c._parse("# foo {#a} {#b}") == [ + Token(type='heading_open', tag='h1', nesting=1, attrs={'id': 'b'}, map=[0, 1], level=0, + children=None, content='', markup='#', info='', meta={}, block=True, hidden=False), + Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, + content='foo {#a} {#b}', markup='', info='', meta={}, block=True, hidden=False, + children=[ + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content='foo {#a}', markup='', info='', meta={}, block=False, hidden=False) + ]), + Token(type='heading_close', tag='h1', nesting=-1, attrs={}, map=None, level=0, children=None, + content='', markup='#', info='', meta={}, block=True, hidden=False) + ] + +def test_heading_id_suffixed() -> None: + c = Converter({}) + assert c._parse("# foo {#a} s") == [ + Token(type='heading_open', tag='h1', nesting=1, attrs={}, map=[0, 1], level=0, + children=None, content='', markup='#', info='', meta={}, block=True, hidden=False), + Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, + content='foo {#a} s', markup='', info='', meta={}, block=True, hidden=False, + children=[ + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content='foo {#a} s', markup='', info='', meta={}, block=False, hidden=False) + ]), + Token(type='heading_close', tag='h1', nesting=-1, attrs={}, map=None, level=0, children=None, + content='', markup='#', info='', meta={}, block=True, hidden=False) + ] diff --git a/pkgs/tools/nix/nixos-render-docs/src/tests/test_lists.py b/pkgs/tools/nix/nixos-render-docs/src/tests/test_lists.py new file mode 100644 index 000000000000..0f5d28407367 --- /dev/null +++ b/pkgs/tools/nix/nixos-render-docs/src/tests/test_lists.py @@ -0,0 +1,182 @@ +import nixos_render_docs +import pytest + +from markdown_it.token import Token + +class Converter(nixos_render_docs.md.Converter): + # actual renderer doesn't matter, we're just parsing. + __renderer__ = nixos_render_docs.docbook.DocBookRenderer + +@pytest.mark.parametrize("ordered", [True, False]) +def test_list_wide(ordered: bool) -> None: + t, tag, m, e1, e2, i1, i2 = ( + ("ordered", "ol", ".", "1.", "2.", "1", "2") if ordered else ("bullet", "ul", "-", "-", "-", "", "") + ) + c = Converter({}) + assert c._parse(f"{e1} a\n\n{e2} b") == [ + Token(type=f'{t}_list_open', tag=tag, nesting=1, attrs={'compact': False}, map=[0, 3], level=0, + children=None, content='', markup=m, info='', meta={}, block=True, hidden=False), + Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[0, 2], level=1, children=None, + content='', markup=m, info=i1, meta={}, block=True, hidden=False), + Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=2, children=None, + content='', markup='', info='', meta={}, block=True, hidden=False), + Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=3, + content='a', markup='', info='', meta={}, block=True, hidden=False, + children=[ + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content='a', markup='', info='', meta={}, block=False, hidden=False) + ]), + Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=2, children=None, + content='', markup='', info='', meta={}, block=True, hidden=False), + Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=1, children=None, + content='', markup=m, info='', meta={}, block=True, hidden=False), + Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[2, 3], level=1, children=None, + content='', markup=m, info=i2, meta={}, block=True, hidden=False), + Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[2, 3], level=2, children=None, + content='', markup='', info='', meta={}, block=True, hidden=False), + Token(type='inline', tag='', nesting=0, attrs={}, map=[2, 3], level=3, + content='b', markup='', info='', meta={}, block=True, hidden=False, + children=[ + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content='b', markup='', info='', meta={}, block=False, hidden=False) + ]), + Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=2, children=None, + content='', markup='', info='', meta={}, block=True, hidden=False), + Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=1, children=None, + content='', markup=m, info='', meta={}, block=True, hidden=False), + Token(type=f'{t}_list_close', tag=tag, nesting=-1, attrs={}, map=None, level=0, children=None, + content='', markup=m, info='', meta={}, block=True, hidden=False) + ] + +@pytest.mark.parametrize("ordered", [True, False]) +def test_list_narrow(ordered: bool) -> None: + t, tag, m, e1, e2, i1, i2 = ( + ("ordered", "ol", ".", "1.", "2.", "1", "2") if ordered else ("bullet", "ul", "-", "-", "-", "", "") + ) + c = Converter({}) + assert c._parse(f"{e1} a\n{e2} b") == [ + Token(type=f'{t}_list_open', tag=tag, nesting=1, attrs={'compact': True}, map=[0, 2], level=0, + children=None, content='', markup=m, info='', meta={}, block=True, hidden=False), + Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[0, 1], level=1, children=None, + content='', markup=m, info=i1, meta={}, block=True, hidden=False), + Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=2, children=None, + content='', markup='', info='', meta={}, block=True, hidden=True), + Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=3, + content='a', markup='', info='', meta={}, block=True, hidden=False, + children=[ + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content='a', markup='', info='', meta={}, block=False, hidden=False) + ]), + Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=2, children=None, + content='', markup='', info='', meta={}, block=True, hidden=True), + Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=1, children=None, + content='', markup=m, info='', meta={}, block=True, hidden=False), + Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[1, 2], level=1, children=None, + content='', markup=m, info=i2, meta={}, block=True, hidden=False), + Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[1, 2], level=2, children=None, + content='', markup='', info='', meta={}, block=True, hidden=True), + Token(type='inline', tag='', nesting=0, attrs={}, map=[1, 2], level=3, + content='b', markup='', info='', meta={}, block=True, hidden=False, + children=[ + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content='b', markup='', info='', meta={}, block=False, hidden=False) + ]), + Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=2, children=None, + content='', markup='', info='', meta={}, block=True, hidden=True), + Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=1, children=None, + content='', markup=m, info='', meta={}, block=True, hidden=False), + Token(type=f'{t}_list_close', tag=tag, nesting=-1, attrs={}, map=None, level=0, children=None, + content='', markup=m, info='', meta={}, block=True, hidden=False) + ] + assert c._parse(f"{e1} - a\n{e2} b") == [ + Token(type=f'{t}_list_open', tag=tag, nesting=1, attrs={'compact': True}, map=[0, 2], level=0, + children=None, content='', markup=m, info='', meta={}, block=True, hidden=False), + Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[0, 1], level=1, children=None, + content='', markup=m, info=i1, meta={}, block=True, hidden=False), + Token(type='bullet_list_open', tag='ul', nesting=1, attrs={'compact': True}, map=[0, 1], level=2, + children=None, content='', markup='-', info='', meta={}, block=True, hidden=False), + Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[0, 1], level=3, children=None, + content='', markup='-', info='', meta={}, block=True, hidden=False), + Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=4, children=None, + content='', markup='', info='', meta={}, block=True, hidden=True), + Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=5, + content='a', markup='', info='', meta={}, block=True, hidden=False, + children=[ + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content='a', markup='', info='', meta={}, block=False, hidden=False) + ]), + Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=4, children=None, + content='', markup='', info='', meta={}, block=True, hidden=True), + Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=3, children=None, + content='', markup='-', info='', meta={}, block=True, hidden=False), + Token(type='bullet_list_close', tag='ul', nesting=-1, attrs={}, map=None, level=2, children=None, + content='', markup='-', info='', meta={}, block=True, hidden=False), + Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=1, children=None, + content='', markup=m, info='', meta={}, block=True, hidden=False), + Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[1, 2], level=1, children=None, + content='', markup=m, info=i2, meta={}, block=True, hidden=False), + Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[1, 2], level=2, children=None, + content='', markup='', info='', meta={}, block=True, hidden=True), + Token(type='inline', tag='', nesting=0, attrs={}, map=[1, 2], level=3, + content='b', markup='', info='', meta={}, block=True, hidden=False, + children=[ + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content='b', markup='', info='', meta={}, block=False, hidden=False) + ]), + Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=2, children=None, + content='', markup='', info='', meta={}, block=True, hidden=True), + Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=1, children=None, + content='', markup=m, info='', meta={}, block=True, hidden=False), + Token(type=f'{t}_list_close', tag=tag, nesting=-1, attrs={}, map=None, level=0, children=None, + content='', markup=m, info='', meta={}, block=True, hidden=False) + ] + assert c._parse(f"{e1} - a\n{e2} - b") == [ + Token(type=f'{t}_list_open', tag=tag, nesting=1, attrs={'compact': True}, map=[0, 2], level=0, + children=None, content='', markup=m, info='', meta={}, block=True, hidden=False), + Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[0, 1], level=1, children=None, + content='', markup=m, info=i1, meta={}, block=True, hidden=False), + Token(type='bullet_list_open', tag='ul', nesting=1, attrs={'compact': True}, map=[0, 1], level=2, + children=None, content='', markup='-', info='', meta={}, block=True, hidden=False), + Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[0, 1], level=3, children=None, + content='', markup='-', info='', meta={}, block=True, hidden=False), + Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=4, children=None, + content='', markup='', info='', meta={}, block=True, hidden=True), + Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=5, + content='a', markup='', info='', meta={}, block=True, hidden=False, + children=[ + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content='a', markup='', info='', meta={}, block=False, hidden=False) + ]), + Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=4, children=None, + content='', markup='', info='', meta={}, block=True, hidden=True), + Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=3, children=None, + content='', markup='-', info='', meta={}, block=True, hidden=False), + Token(type='bullet_list_close', tag='ul', nesting=-1, attrs={}, map=None, level=2, children=None, + content='', markup='-', info='', meta={}, block=True, hidden=False), + Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=1, children=None, + content='', markup=m, info='', meta={}, block=True, hidden=False), + Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[1, 2], level=1, children=None, + content='', markup=m, info=i2, meta={}, block=True, hidden=False), + Token(type='bullet_list_open', tag='ul', nesting=1, attrs={'compact': True}, map=[1, 2], level=2, + children=None, content='', markup='-', info='', meta={}, block=True, hidden=False), + Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[1, 2], level=3, children=None, + content='', markup='-', info='', meta={}, block=True, hidden=False), + Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[1, 2], level=4, children=None, + content='', markup='', info='', meta={}, block=True, hidden=True), + Token(type='inline', tag='', nesting=0, attrs={}, map=[1, 2], level=5, + content='b', markup='', info='', meta={}, block=True, hidden=False, + children=[ + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content='b', markup='', info='', meta={}, block=False, hidden=False) + ]), + Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=4, children=None, + content='', markup='', info='', meta={}, block=True, hidden=True), + Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=3, children=None, + content='', markup='-', info='', meta={}, block=True, hidden=False), + Token(type='bullet_list_close', tag='ul', nesting=-1, attrs={}, map=None, level=2, children=None, + content='', markup='-', info='', meta={}, block=True, hidden=False), + Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=1, children=None, + content='', markup=m, info='', meta={}, block=True, hidden=False), + Token(type=f'{t}_list_close', tag=tag, nesting=-1, attrs={}, map=None, level=0, children=None, + content='', markup=m, info='', meta={}, block=True, hidden=False) + ] diff --git a/pkgs/tools/nix/nixos-render-docs/src/tests/test_options.py b/pkgs/tools/nix/nixos-render-docs/src/tests/test_options.py new file mode 100644 index 000000000000..5a02fabde0fb --- /dev/null +++ b/pkgs/tools/nix/nixos-render-docs/src/tests/test_options.py @@ -0,0 +1,14 @@ +import nixos_render_docs + +from markdown_it.token import Token +import pytest + +def test_option_headings() -> None: + c = nixos_render_docs.options.DocBookConverter({}, 'local', 'none', 'vars', 'opt-', False) + with pytest.raises(RuntimeError) as exc: + c._render("# foo") + assert exc.value.args[0] == 'md token not supported in options doc' + assert exc.value.args[1] == Token( + type='heading_open', tag='h1', nesting=1, attrs={}, map=[0, 1], level=0, children=None, + content='', markup='#', info='', meta={}, block=True, hidden=False + ) diff --git a/pkgs/tools/nix/nixos-render-docs/src/tests/test_plugins.py b/pkgs/tools/nix/nixos-render-docs/src/tests/test_plugins.py new file mode 100644 index 000000000000..4efcb9bdfc73 --- /dev/null +++ b/pkgs/tools/nix/nixos-render-docs/src/tests/test_plugins.py @@ -0,0 +1,279 @@ +import nixos_render_docs + +from markdown_it.token import Token + +class Converter(nixos_render_docs.md.Converter): + # actual renderer doesn't matter, we're just parsing. + __renderer__ = nixos_render_docs.docbook.DocBookRenderer + +def test_inline_anchor_simple() -> None: + c = Converter({}) + assert c._parse("[]{#test}") == [ + Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None, + content='', markup='', info='', meta={}, block=True, hidden=False), + Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, content='[]{#test}', + markup='', info='', meta={}, block=True, hidden=False, + children=[ + Token(type='inline_anchor', tag='', nesting=0, attrs={'id': 'test'}, map=None, level=0, + children=None, content='', markup='', info='', meta={}, block=False, hidden=False) + ]), + Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, + children=None, content='', markup='', info='', meta={}, block=True, hidden=False) + ] + +def test_inline_anchor_formatted() -> None: + c = Converter({}) + assert c._parse("a[b c `d` ***e***]{#test}f") == [ + Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, + children=None, content='', markup='', info='', meta={}, block=True, hidden=False), + Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, + content='a[b c `d` ***e***]{#test}f', markup='', info='', meta={}, block=True, hidden=False, + children=[ + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, + children=None, content='a', markup='', info='', meta={}, block=False, hidden=False), + Token(type='inline_anchor', tag='', nesting=0, attrs={'id': 'test'}, map=None, level=0, + children=None, content='', markup='', info='', meta={}, block=False, hidden=False), + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content='b c ', markup='', info='', meta={}, block=False, hidden=False), + Token(type='code_inline', tag='code', nesting=0, attrs={}, map=None, level=0, + children=None, content='d', markup='`', info='', meta={}, block=False, hidden=False), + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content=' ', markup='', info='', meta={}, block=False, hidden=False), + Token(type='em_open', tag='em', nesting=1, attrs={}, map=None, level=0, children=None, + content='', markup='*', info='', meta={}, block=False, hidden=False), + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=1, children=None, + content='', markup='', info='', meta={}, block=False, hidden=False), + Token(type='strong_open', tag='strong', nesting=1, attrs={}, map=None, level=1, + children=None, content='', markup='**', info='', meta={}, block=False, hidden=False), + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=2, children=None, + content='e', markup='', info='', meta={}, block=False, hidden=False), + Token(type='strong_close', tag='strong', nesting=-1, attrs={}, map=None, level=1, + children=None, content='', markup='**', info='', meta={}, block=False, hidden=False), + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=1, children=None, + content='', markup='', info='', meta={}, block=False, hidden=False), + Token(type='em_close', tag='em', nesting=-1, attrs={}, map=None, level=0, children=None, + content='', markup='*', info='', meta={}, block=False, hidden=False), + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content='f', markup='', info='', meta={}, block=False, hidden=False) + ]), + Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None, + content='', markup='', info='', meta={}, block=True, hidden=False) + ] + +def test_inline_anchor_in_heading() -> None: + c = Converter({}) + # inline anchors in headers are allowed, but header attributes should be preferred + assert c._parse("# foo []{#bar} baz") == [ + Token(type='heading_open', tag='h1', nesting=1, attrs={}, map=[0, 1], level=0, children=None, + content='', markup='#', info='', meta={}, block=True, hidden=False), + Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, + content='foo []{#bar} baz', markup='', info='', meta={}, block=True, hidden=False, + children=[ + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content='foo ', markup='', info='', meta={}, block=False, hidden=False), + Token(type='inline_anchor', tag='', nesting=0, attrs={'id': 'bar'}, map=None, level=0, + children=None, content='', markup='', info='', meta={}, block=False, hidden=False), + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content=' baz', markup='', info='', meta={}, block=False, hidden=False) + ]), + Token(type='heading_close', tag='h1', nesting=-1, attrs={}, map=None, level=0, children=None, + content='', markup='#', info='', meta={}, block=True, hidden=False) + ] + +def test_inline_anchor_on_links() -> None: + c = Converter({}) + assert c._parse("[ [a](#bar) ]{#foo}") == [ + Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None, + content='', markup='', info='', meta={}, block=True, hidden=False), + Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, content='[ [a](#bar) ]{#foo}', + markup='', info='', meta={}, block=True, hidden=False, + children=[ + Token(type='inline_anchor', tag='', nesting=0, attrs={'id': 'foo'}, map=None, level=0, + children=None, content='', markup='', info='', meta={}, block=False, hidden=False), + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content=' ', markup='', info='', meta={}, block=False, hidden=False), + Token(type='link_open', tag='a', nesting=1, attrs={'href': '#bar'}, map=None, level=0, + children=None, content='', markup='', info='', meta={}, block=False, hidden=False), + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=1, children=None, + content='a', markup='', info='', meta={}, block=False, hidden=False), + Token(type='link_close', tag='a', nesting=-1, attrs={}, map=None, level=0, children=None, + content='', markup='', info='', meta={}, block=False, hidden=False), + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content=' ', markup='', info='', meta={}, block=False, hidden=False) + ]), + Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None, + content='', markup='', info='', meta={}, block=True, hidden=False) + ] + +def test_inline_anchor_nested() -> None: + # inline anchors may contain more anchors (even though this is a bit pointless) + c = Converter({}) + assert c._parse("[ [a]{#bar} ]{#foo}") == [ + Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None, + content='', markup='', info='', meta={}, block=True, hidden=False), + Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, + content='[ [a]{#bar} ]{#foo}', markup='', info='', meta={}, block=True, hidden=False, + children=[ + Token(type='inline_anchor', tag='', nesting=0, attrs={'id': 'foo'}, map=None, level=0, + children=None, content='', markup='', info='', meta={}, block=False, hidden=False), + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content=' ', markup='', info='', meta={}, block=False, hidden=False), + Token(type='inline_anchor', tag='', nesting=0, attrs={'id': 'bar'}, map=None, level=0, + children=None, content='', markup='', info='', meta={}, block=False, hidden=False), + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content='a ', markup='', info='', meta={}, block=False, hidden=False) + ]), + Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None, + content='', markup='', info='', meta={}, block=True, hidden=False) + ] + +def test_inline_anchor_escaping() -> None: + c = Converter({}) + assert c._parse("\\[a]{#bar}") == [ + Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None, + content='', markup='', info='', meta={}, block=True, hidden=False), + Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, + content='\\[a]{#bar}', markup='', info='', meta={}, block=True, hidden=False, + children=[ + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content='[a]{#bar}', markup='', info='', meta={}, block=False, hidden=False) + ]), + Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None, + content='', markup='', info='', meta={}, block=True, hidden=False) + ] + assert c._parse("\\\\[a]{#bar}") == [ + Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None, + content='', markup='', info='', meta={}, block=True, hidden=False), + Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, + content='\\\\[a]{#bar}', markup='', info='', meta={}, block=True, hidden=False, + children=[ + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content='\\', markup='', info='', meta={}, block=False, hidden=False), + Token(type='inline_anchor', tag='', nesting=0, attrs={'id': 'bar'}, map=None, level=0, + children=None, content='', markup='', info='', meta={}, block=False, hidden=False), + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content='a', markup='', info='', meta={}, block=False, hidden=False) + ]), + Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None, + content='', markup='', info='', meta={}, block=True, hidden=False) + ] + assert c._parse("\\\\\\[a]{#bar}") == [ + Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None, + content='', markup='', info='', meta={}, block=True, hidden=False), + Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, + children=[ + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content='\\[a]{#bar}', markup='', info='', meta={}, block=False, hidden=False) + ], + content='\\\\\\[a]{#bar}', markup='', info='', meta={}, block=True, hidden=False), + Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None, + content='', markup='', info='', meta={}, block=True, hidden=False) + ] + +def test_inline_comment_basic() -> None: + c = Converter({}) + assert c._parse("a b") == [ + Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None, + content='', markup='', info='', meta={}, block=True, hidden=False), + Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, + content='a b', markup='', info='', meta={}, block=True, hidden=False, + children=[ + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content='a b', markup='', info='', meta={}, block=False, hidden=False) + ]), + Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None, + content='', markup='', info='', meta={}, block=True, hidden=False) + ] + assert c._parse("a") == [ + Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None, + content='', markup='', info='', meta={}, block=True, hidden=False), + Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, + content='a', markup='', info='', meta={}, block=True, hidden=False, + children=[ + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content='a', markup='', info='', meta={}, block=False, hidden=False) + ]), + Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None, + content='', markup='', info='', meta={}, block=True, hidden=False) + ] + +def test_inline_comment_does_not_nest_in_code() -> None: + c = Converter({}) + assert c._parse("`ac`") == [ + Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None, + content='', markup='', info='', meta={}, block=True, hidden=False), + Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, + content='`ac`', markup='', info='', meta={}, block=True, hidden=False, + children=[ + Token(type='code_inline', tag='code', nesting=0, attrs={}, map=None, level=0, children=None, + content='ac', markup='`', info='', meta={}, block=False, hidden=False) + ]), + Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None, + content='', markup='', info='', meta={}, block=True, hidden=False) + ] + +def test_inline_comment_does_not_nest_elsewhere() -> None: + c = Converter({}) + assert c._parse("*ac*") == [ + Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None, + content='', markup='', info='', meta={}, block=True, hidden=False), + Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, + content='*ac*', markup='', info='', meta={}, block=True, hidden=False, + children=[ + Token(type='em_open', tag='em', nesting=1, attrs={}, map=None, level=0, children=None, + content='', markup='*', info='', meta={}, block=False, hidden=False), + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=1, children=None, + content='ac', markup='', info='', meta={}, block=False, hidden=False), + Token(type='em_close', tag='em', nesting=-1, attrs={}, map=None, level=0, children=None, + content='', markup='*', info='', meta={}, block=False, hidden=False) + ]), + Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None, + content='', markup='', info='', meta={}, block=True, hidden=False) + ] + +def test_inline_comment_can_be_escaped() -> None: + c = Converter({}) + assert c._parse("a\\c") == [ + Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None, + content='', markup='', info='', meta={}, block=True, hidden=False), + Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, + content='a\\c', markup='', info='', meta={}, block=True, hidden=False, + children=[ + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content='ac', markup='', info='', meta={}, block=False, hidden=False) + ]), + Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None, + content='', markup='', info='', meta={}, block=True, hidden=False) + ] + assert c._parse("a\\\\c") == [ + Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None, + content='', markup='', info='', meta={}, block=True, hidden=False), + Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, + children=[ + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content='a\\c', markup='', info='', meta={}, block=False, hidden=False) + ], + content='a\\\\c', markup='', info='', meta={}, block=True, hidden=False), + Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None, + content='', markup='', info='', meta={}, block=True, hidden=False) + ] + assert c._parse("a\\\\\\c") == [ + Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None, + content='', markup='', info='', meta={}, block=True, hidden=False), + Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, + children=[ + Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, + content='a\\c', markup='', info='', meta={}, block=False, hidden=False) + ], + content='a\\\\\\c', markup='', info='', meta={}, block=True, hidden=False), + Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None, + content='', markup='', info='', meta={}, block=True, hidden=False) + ] + +def test_block_comment() -> None: + c = Converter({}) + assert c._parse("") == [] + assert c._parse("") == [] + assert c._parse("") == [] + assert c._parse("") == [] + assert c._parse("") == [] diff --git a/pkgs/top-level/all-packages.nix b/pkgs/top-level/all-packages.nix index b30ee9058e03..286678355b4d 100644 --- a/pkgs/top-level/all-packages.nix +++ b/pkgs/top-level/all-packages.nix @@ -37661,6 +37661,8 @@ with pkgs; nixos-install-tools = callPackage ../tools/nix/nixos-install-tools { }; + nixos-render-docs = callPackage ../tools/nix/nixos-render-docs { }; + nixdoc = callPackage ../tools/nix/nixdoc {}; dnadd = callPackage ../tools/nix/dnadd { };