nixos/modules: fix systemd start rate-limits
These were broken since 2016:f0367da7d1
since StartLimitIntervalSec got moved into [Unit] from [Service]. StartLimitBurst has also been moved accordingly, so let's fix that one too. NixOS systems have been producing logs such as: /nix/store/wf98r55aszi1bkmln1lvdbp7znsfr70i-unit-caddy.service/caddy.service:31: Unknown key name 'StartLimitIntervalSec' in section 'Service', ignoring. I have also removed some unnecessary duplication in units disabling rate limiting since setting either interval or burst to zero disables it (ad16158c10/src/basic/ratelimit.c (L16)
)
This commit is contained in:
parent
2df221ec8a
commit
b37bbca521
26 changed files with 57 additions and 51 deletions
|
@ -25,19 +25,18 @@ in
|
|||
];
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
systemd.services.hercules-ci-agent = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network-online.target" ];
|
||||
wants = [ "network-online.target" ];
|
||||
path = [ config.nix.package ];
|
||||
startLimitBurst = 30 * 1000000; # practically infinite
|
||||
serviceConfig = {
|
||||
User = "hercules-ci-agent";
|
||||
ExecStart = command;
|
||||
ExecStartPre = testCommand;
|
||||
Restart = "on-failure";
|
||||
RestartSec = 120;
|
||||
StartLimitBurst = 30 * 1000000; # practically infinite
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -40,10 +40,10 @@ let cfg = config.services.victoriametrics; in
|
|||
systemd.services.victoriametrics = {
|
||||
description = "VictoriaMetrics time series database";
|
||||
after = [ "network.target" ];
|
||||
startLimitBurst = 5;
|
||||
serviceConfig = {
|
||||
Restart = "on-failure";
|
||||
RestartSec = 1;
|
||||
StartLimitBurst = 5;
|
||||
StateDirectory = "victoriametrics";
|
||||
DynamicUser = true;
|
||||
ExecStart = ''
|
||||
|
|
|
@ -151,14 +151,13 @@ in with lib; {
|
|||
description = "LCDproc - client";
|
||||
after = [ "lcdd.service" ];
|
||||
wantedBy = [ "lcd.target" ];
|
||||
# Allow restarting for eternity
|
||||
startLimitIntervalSec = lib.mkIf cfg.client.restartForever 0;
|
||||
serviceConfig = serviceCfg // {
|
||||
ExecStart = "${pkg}/bin/lcdproc -f -c ${clientCfg}";
|
||||
# If the server is being restarted at the same time, the client will
|
||||
# fail as it cannot connect, so space it out a bit.
|
||||
RestartSec = "5";
|
||||
# Allow restarting for eternity
|
||||
StartLimitIntervalSec = lib.mkIf cfg.client.restartForever "0";
|
||||
StartLimitBurst = lib.mkIf cfg.client.restartForever "0";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -427,12 +427,12 @@ in
|
|||
wantedBy = [ "multi-user.target" ];
|
||||
restartTriggers = [ cfg.configFile modulesDir ];
|
||||
|
||||
startLimitIntervalSec = 60; # 1 min
|
||||
serviceConfig = {
|
||||
ExecStart = "${dovecotPkg}/sbin/dovecot -F";
|
||||
ExecReload = "${dovecotPkg}/sbin/doveadm reload";
|
||||
Restart = "on-failure";
|
||||
RestartSec = "1s";
|
||||
StartLimitInterval = "1min";
|
||||
RuntimeDirectory = [ "dovecot2" ];
|
||||
};
|
||||
|
||||
|
|
|
@ -37,9 +37,9 @@ in {
|
|||
description = "Autorandr execution hook";
|
||||
after = [ "sleep.target" ];
|
||||
|
||||
startLimitIntervalSec = 5;
|
||||
startLimitBurst = 1;
|
||||
serviceConfig = {
|
||||
StartLimitInterval = 5;
|
||||
StartLimitBurst = 1;
|
||||
ExecStart = "${pkgs.autorandr}/bin/autorandr --batch --change --default ${cfg.defaultTarget}";
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = false;
|
||||
|
|
|
@ -126,12 +126,12 @@ in
|
|||
GPU_USE_SYNC_OBJECTS = "1";
|
||||
};
|
||||
|
||||
startLimitIntervalSec = 60; # 1 min
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.cgminer}/bin/cgminer --syslog --text-only --config ${cgminerConfig}";
|
||||
User = cfg.user;
|
||||
RestartSec = "30s";
|
||||
Restart = "always";
|
||||
StartLimitInterval = "1m";
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -32,14 +32,14 @@ in
|
|||
wantedBy = [ "graphical-session.target" ];
|
||||
partOf = [ "graphical-session.target" ];
|
||||
|
||||
startLimitIntervalSec = 350;
|
||||
startLimitBurst = 10;
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.safeeyes}/bin/safeeyes
|
||||
'';
|
||||
Restart = "on-failure";
|
||||
RestartSec = 3;
|
||||
StartLimitInterval = 350;
|
||||
StartLimitBurst = 10;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -31,14 +31,14 @@ in
|
|||
after = [ "NetworkManager-wait-online.service" "network.target" ];
|
||||
preStart = "mkdir -pv /var/lib/teamviewer /var/log/teamviewer";
|
||||
|
||||
startLimitIntervalSec = 60;
|
||||
startLimitBurst = 10;
|
||||
serviceConfig = {
|
||||
Type = "forking";
|
||||
ExecStart = "${pkgs.teamviewer}/bin/teamviewerd -d";
|
||||
PIDFile = "/run/teamviewerd.pid";
|
||||
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
|
||||
Restart = "on-abort";
|
||||
StartLimitInterval = "60";
|
||||
StartLimitBurst = "10";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -28,6 +28,9 @@ let
|
|||
|
||||
# Don't start services that are not yet initialized
|
||||
unitConfig.ConditionPathExists = "/var/lib/${stateDirectory}/keyring";
|
||||
startLimitBurst =
|
||||
if daemonType == "osd" then 30 else if lib.elem daemonType ["mgr" "mds"] then 3 else 5;
|
||||
startLimitIntervalSec = 60 * 30; # 30 mins
|
||||
|
||||
serviceConfig = {
|
||||
LimitNOFILE = 1048576;
|
||||
|
@ -39,8 +42,6 @@ let
|
|||
ProtectHome = "true";
|
||||
ProtectSystem = "full";
|
||||
Restart = "on-failure";
|
||||
StartLimitBurst = "5";
|
||||
StartLimitInterval = "30min";
|
||||
StateDirectory = stateDirectory;
|
||||
User = "ceph";
|
||||
Group = if daemonType == "osd" then "disk" else "ceph";
|
||||
|
@ -48,13 +49,10 @@ let
|
|||
-f --cluster ${clusterName} --id ${daemonId}'';
|
||||
} // optionalAttrs (daemonType == "osd") {
|
||||
ExecStartPre = ''${ceph.lib}/libexec/ceph/ceph-osd-prestart.sh --id ${daemonId} --cluster ${clusterName}'';
|
||||
StartLimitBurst = "30";
|
||||
RestartSec = "20s";
|
||||
PrivateDevices = "no"; # osd needs disk access
|
||||
} // optionalAttrs ( daemonType == "mon") {
|
||||
RestartSec = "10";
|
||||
} // optionalAttrs (lib.elem daemonType ["mgr" "mds"]) {
|
||||
StartLimitBurst = "3";
|
||||
};
|
||||
});
|
||||
|
||||
|
|
|
@ -264,10 +264,10 @@ in
|
|||
''
|
||||
);
|
||||
|
||||
startLimitIntervalSec = 0;
|
||||
serviceConfig = {
|
||||
Type = "forking";
|
||||
Restart = "always";
|
||||
StartLimitInterval = 0;
|
||||
RestartSec = 1;
|
||||
CapabilityBoundingSet = "CAP_NET_ADMIN CAP_NET_RAW CAP_SETUID";
|
||||
ProtectSystem = true;
|
||||
|
|
|
@ -41,6 +41,7 @@ in {
|
|||
systemd.services.dnsdist = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
startLimitIntervalSec = 0;
|
||||
serviceConfig = {
|
||||
DynamicUser = true;
|
||||
|
||||
|
|
|
@ -29,9 +29,9 @@ with lib;
|
|||
# Needed for ping
|
||||
"/run/wrappers"
|
||||
];
|
||||
startLimitBurst = 5;
|
||||
startLimitIntervalSec = 20;
|
||||
serviceConfig = {
|
||||
StartLimitBurst = 5;
|
||||
StartLimitIntervalSec = 20;
|
||||
ExecStart = "${pkgs.mullvad-vpn}/bin/mullvad-daemon -v --disable-stdout-timestamps";
|
||||
Restart = "always";
|
||||
RestartSec = 1;
|
||||
|
|
|
@ -165,6 +165,8 @@ in
|
|||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
startLimitIntervalSec = 120;
|
||||
startLimitBurst = 5;
|
||||
serviceConfig = {
|
||||
User = "namecoin";
|
||||
Group = "namecoin";
|
||||
|
@ -176,8 +178,6 @@ in
|
|||
TimeoutStopSec = "60s";
|
||||
TimeoutStartSec = "2s";
|
||||
Restart = "always";
|
||||
StartLimitInterval = "120s";
|
||||
StartLimitBurst = "5";
|
||||
};
|
||||
|
||||
preStart = optionalString (cfg.wallet != "${dataDir}/wallet.dat") ''
|
||||
|
|
|
@ -28,9 +28,9 @@ in {
|
|||
environment = {
|
||||
SERVICE_RUN_MODE = "1";
|
||||
};
|
||||
startLimitIntervalSec = 5;
|
||||
startLimitBurst = 10;
|
||||
serviceConfig = {
|
||||
StartLimitInterval = 5;
|
||||
StartLimitBurst = 10;
|
||||
ExecStart = "${pkgs.nextdns}/bin/nextdns run ${escapeShellArgs config.services.nextdns.arguments}";
|
||||
RestartSec = 120;
|
||||
LimitMEMLOCK = "infinity";
|
||||
|
|
|
@ -42,9 +42,9 @@ in
|
|||
description = "A HTTP nix store that proxies requests to Google Storage";
|
||||
wantedBy = ["multi-user.target"];
|
||||
|
||||
startLimitIntervalSec = 10;
|
||||
serviceConfig = {
|
||||
RestartSec = 5;
|
||||
StartLimitInterval = 10;
|
||||
ExecStart = ''
|
||||
${pkgs.nix-store-gcs-proxy}/bin/nix-store-gcs-proxy \
|
||||
--bucket-name ${cfg.bucketName} \
|
||||
|
|
|
@ -916,14 +916,14 @@ in
|
|||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
startLimitBurst = 4;
|
||||
startLimitIntervalSec = 5 * 60; # 5 mins
|
||||
serviceConfig = {
|
||||
ExecStart = "${nsdPkg}/sbin/nsd -d -c ${nsdEnv}/nsd.conf";
|
||||
StandardError = "null";
|
||||
PIDFile = pidFile;
|
||||
Restart = "always";
|
||||
RestartSec = "4s";
|
||||
StartLimitBurst = 4;
|
||||
StartLimitInterval = "5min";
|
||||
};
|
||||
|
||||
preStart = ''
|
||||
|
|
|
@ -103,6 +103,8 @@ in
|
|||
rm -f '${cfg.stateDir}/supybot.cfg.bak'
|
||||
'';
|
||||
|
||||
startLimitIntervalSec = 5 * 60; # 5 min
|
||||
startLimitBurst = 1;
|
||||
serviceConfig = {
|
||||
ExecStart = "${pyEnv}/bin/supybot ${cfg.stateDir}/supybot.cfg";
|
||||
PIDFile = "/run/supybot.pid";
|
||||
|
@ -110,8 +112,6 @@ in
|
|||
Group = "supybot";
|
||||
UMask = "0007";
|
||||
Restart = "on-abort";
|
||||
StartLimitInterval = "5m";
|
||||
StartLimitBurst = "1";
|
||||
|
||||
NoNewPrivileges = true;
|
||||
PrivateDevices = true;
|
||||
|
|
|
@ -25,10 +25,7 @@ in {
|
|||
wants = [ "network-pre.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
unitConfig = {
|
||||
StartLimitIntervalSec = 0;
|
||||
StartLimitBurst = 0;
|
||||
};
|
||||
startLimitIntervalSec = 0;
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart =
|
||||
|
|
|
@ -131,6 +131,8 @@ in
|
|||
|
||||
restartIfChanged = false; # do not restart on "nixos-rebuild switch". It would seal the storage and disrupt the clients.
|
||||
|
||||
startLimitIntervalSec = 60;
|
||||
startLimitBurst = 3;
|
||||
serviceConfig = {
|
||||
User = "vault";
|
||||
Group = "vault";
|
||||
|
@ -145,8 +147,6 @@ in
|
|||
KillSignal = "SIGINT";
|
||||
TimeoutStopSec = "30s";
|
||||
Restart = "on-failure";
|
||||
StartLimitInterval = "60s";
|
||||
StartLimitBurst = 3;
|
||||
};
|
||||
|
||||
unitConfig.RequiresMountsFor = optional (cfg.storagePath != null) cfg.storagePath;
|
||||
|
|
|
@ -224,6 +224,8 @@ in
|
|||
chmod -R u+w ${dataDir}/${wikiIdent}/underlay
|
||||
'';
|
||||
|
||||
startLimitIntervalSec = 30;
|
||||
|
||||
serviceConfig = {
|
||||
User = user;
|
||||
Group = group;
|
||||
|
@ -237,7 +239,6 @@ in
|
|||
|
||||
Restart = "on-failure";
|
||||
RestartSec = "2s";
|
||||
StartLimitIntervalSec = "30s";
|
||||
|
||||
StateDirectory = "moin/${wikiIdent}";
|
||||
StateDirectoryMode = "0750";
|
||||
|
|
|
@ -101,6 +101,8 @@ in {
|
|||
after = [ "network-online.target" ];
|
||||
wants = [ "network-online.target" ]; # systemd-networkd-wait-online.service
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
startLimitIntervalSec = 14400;
|
||||
startLimitBurst = 10;
|
||||
serviceConfig = {
|
||||
ExecStart = "${cfg.package}/bin/caddy run --config ${configJSON}";
|
||||
ExecReload = "${cfg.package}/bin/caddy reload --config ${configJSON}";
|
||||
|
@ -108,8 +110,6 @@ in {
|
|||
User = "caddy";
|
||||
Group = "caddy";
|
||||
Restart = "on-abnormal";
|
||||
StartLimitIntervalSec = 14400;
|
||||
StartLimitBurst = 10;
|
||||
AmbientCapabilities = "cap_net_bind_service";
|
||||
CapabilityBoundingSet = "cap_net_bind_service";
|
||||
NoNewPrivileges = true;
|
||||
|
|
|
@ -693,6 +693,8 @@ in
|
|||
${cfg.preStart}
|
||||
${execCommand} -t
|
||||
'';
|
||||
|
||||
startLimitIntervalSec = 60;
|
||||
serviceConfig = {
|
||||
ExecStart = execCommand;
|
||||
ExecReload = [
|
||||
|
@ -701,7 +703,6 @@ in
|
|||
];
|
||||
Restart = "always";
|
||||
RestartSec = "10s";
|
||||
StartLimitInterval = "1min";
|
||||
# User and group
|
||||
User = cfg.user;
|
||||
Group = cfg.group;
|
||||
|
|
|
@ -136,6 +136,8 @@ in {
|
|||
description = "Traefik web server";
|
||||
after = [ "network-online.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
startLimitIntervalSec = 86400;
|
||||
startLimitBurst = 5;
|
||||
serviceConfig = {
|
||||
ExecStart =
|
||||
"${cfg.package}/bin/traefik --configfile=${staticConfigFile}";
|
||||
|
@ -143,8 +145,6 @@ in {
|
|||
User = "traefik";
|
||||
Group = cfg.group;
|
||||
Restart = "on-failure";
|
||||
StartLimitInterval = 86400;
|
||||
StartLimitBurst = 5;
|
||||
AmbientCapabilities = "cap_net_bind_service";
|
||||
CapabilityBoundingSet = "cap_net_bind_service";
|
||||
NoNewPrivileges = true;
|
||||
|
|
|
@ -678,14 +678,14 @@ in
|
|||
|
||||
script = "${cfg.displayManager.job.execCmd}";
|
||||
|
||||
# Stop restarting if the display manager stops (crashes) 2 times
|
||||
# in one minute. Starting X typically takes 3-4s.
|
||||
startLimitIntervalSec = 30;
|
||||
startLimitBurst = 3;
|
||||
serviceConfig = {
|
||||
Restart = "always";
|
||||
RestartSec = "200ms";
|
||||
SyslogIdentifier = "display-manager";
|
||||
# Stop restarting if the display manager stops (crashes) 2 times
|
||||
# in one minute. Starting X typically takes 3-4s.
|
||||
StartLimitInterval = "30s";
|
||||
StartLimitBurst = "3";
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -210,12 +210,21 @@ in rec {
|
|||
'';
|
||||
};
|
||||
|
||||
startLimitBurst = mkOption {
|
||||
type = types.int;
|
||||
description = ''
|
||||
Configure unit start rate limiting. Units which are started
|
||||
more than startLimitBurst times within an interval time
|
||||
interval are not permitted to start any more.
|
||||
'';
|
||||
};
|
||||
|
||||
startLimitIntervalSec = mkOption {
|
||||
type = types.int;
|
||||
description = ''
|
||||
Configure unit start rate limiting. Units which are started
|
||||
more than burst times within an interval time interval are
|
||||
not permitted to start any more.
|
||||
more than startLimitBurst times within an interval time
|
||||
interval are not permitted to start any more.
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -245,8 +254,7 @@ in rec {
|
|||
serviceConfig = mkOption {
|
||||
default = {};
|
||||
example =
|
||||
{ StartLimitInterval = 10;
|
||||
RestartSec = 5;
|
||||
{ RestartSec = 5;
|
||||
};
|
||||
type = types.addCheck (types.attrsOf unitOption) checkService;
|
||||
description = ''
|
||||
|
|
|
@ -243,6 +243,8 @@ let
|
|||
OnFailure = toString config.onFailure; }
|
||||
// optionalAttrs (options.startLimitIntervalSec.isDefined) {
|
||||
StartLimitIntervalSec = toString config.startLimitIntervalSec;
|
||||
} // optionalAttrs (options.startLimitBurst.isDefined) {
|
||||
StartLimitBurst = toString config.startLimitBurst;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
Loading…
Reference in a new issue