diff --git a/nixos/modules/profiles/qemu-guest.nix b/nixos/modules/profiles/qemu-guest.nix index d4335edfcf2d..8b3df97ae0db 100644 --- a/nixos/modules/profiles/qemu-guest.nix +++ b/nixos/modules/profiles/qemu-guest.nix @@ -1,13 +1,13 @@ # Common configuration for virtual machines running under QEMU (using # virtio). -{ ... }: +{ config, lib, ... }: { boot.initrd.availableKernelModules = [ "virtio_net" "virtio_pci" "virtio_mmio" "virtio_blk" "virtio_scsi" "9p" "9pnet_virtio" ]; boot.initrd.kernelModules = [ "virtio_balloon" "virtio_console" "virtio_rng" ]; - boot.initrd.postDeviceCommands = + boot.initrd.postDeviceCommands = lib.mkIf (!config.boot.initrd.systemd.enable) '' # Set the system time from the hardware clock to work around a # bug in qemu-kvm > 1.5.2 (where the VM clock is initialised diff --git a/nixos/modules/testing/test-instrumentation.nix b/nixos/modules/testing/test-instrumentation.nix index 01447e6ada87..81541477b9e0 100644 --- a/nixos/modules/testing/test-instrumentation.nix +++ b/nixos/modules/testing/test-instrumentation.nix @@ -65,33 +65,26 @@ in }; }; - boot.initrd.preDeviceCommands = - '' - echo 600 > /proc/sys/kernel/hung_task_timeout_secs - ''; + boot.kernel.sysctl = { + "kernel.hung_task_timeout_secs" = 600; + # Panic on out-of-memory conditions rather than letting the + # OOM killer randomly get rid of processes, since this leads + # to failures that are hard to diagnose. + "vm.panic_on_oom" = lib.mkDefault 2; + }; - boot.initrd.postDeviceCommands = - '' - # Using acpi_pm as a clock source causes the guest clock to - # slow down under high host load. This is usually a bad - # thing, but for VM tests it should provide a bit more - # determinism (e.g. if the VM runs at lower speed, then - # timeouts in the VM should also be delayed). - echo acpi_pm > /sys/devices/system/clocksource/clocksource0/current_clocksource - ''; - - boot.postBootCommands = - '' - # Panic on out-of-memory conditions rather than letting the - # OOM killer randomly get rid of processes, since this leads - # to failures that are hard to diagnose. - echo 2 > /proc/sys/vm/panic_on_oom - ''; - - # Panic if an error occurs in stage 1 (rather than waiting for - # user intervention). - boot.kernelParams = - [ "console=${qemu-common.qemuSerialDevice}" "panic=1" "boot.panic_on_fail" ]; + boot.kernelParams = [ + "console=${qemu-common.qemuSerialDevice}" + # Panic if an error occurs in stage 1 (rather than waiting for + # user intervention). + "panic=1" "boot.panic_on_fail" + # Using acpi_pm as a clock source causes the guest clock to + # slow down under high host load. This is usually a bad + # thing, but for VM tests it should provide a bit more + # determinism (e.g. if the VM runs at lower speed, then + # timeouts in the VM should also be delayed). + "clock=acpi_pm" + ]; # `xwininfo' is used by the test driver to query open windows. environment.systemPackages = [ pkgs.xorg.xwininfo ]; diff --git a/nixos/modules/virtualisation/qemu-vm.nix b/nixos/modules/virtualisation/qemu-vm.nix index b1c5a7a6c95f..f622897aa620 100644 --- a/nixos/modules/virtualisation/qemu-vm.nix +++ b/nixos/modules/virtualisation/qemu-vm.nix @@ -754,13 +754,13 @@ in ); boot.loader.grub.gfxmodeBios = with cfg.resolution; "${toString x}x${toString y}"; - boot.initrd.extraUtilsCommands = + boot.initrd.extraUtilsCommands = lib.mkIf (!config.boot.initrd.systemd.enable) '' # We need mke2fs in the initrd. copy_bin_and_libs ${pkgs.e2fsprogs}/bin/mke2fs ''; - boot.initrd.postDeviceCommands = + boot.initrd.postDeviceCommands = lib.mkIf (!config.boot.initrd.systemd.enable) '' # If the disk image appears to be empty, run mke2fs to # initialise. @@ -770,7 +770,7 @@ in fi ''; - boot.initrd.postMountCommands = + boot.initrd.postMountCommands = lib.mkIf (!config.boot.initrd.systemd.enable) '' # Mark this as a NixOS machine. mkdir -p $targetRoot/etc @@ -789,6 +789,11 @@ in ''} ''; + systemd.tmpfiles.rules = lib.mkIf config.boot.initrd.systemd.enable [ + "f /etc/NIXOS 0644 root root -" + "d /boot 0644 root root -" + ]; + # After booting, register the closure of the paths in # `virtualisation.additionalPaths' in the Nix database in the VM. This # allows Nix operations to work in the VM. The path to the