diff --git a/nixos/modules/services/misc/ollama.nix b/nixos/modules/services/misc/ollama.nix index 0d3574a2bac0..7784f3170cef 100644 --- a/nixos/modules/services/misc/ollama.nix +++ b/nixos/modules/services/misc/ollama.nix @@ -1,11 +1,13 @@ { config, lib, pkgs, ... }: let - inherit (lib.types) nullOr enum; + inherit (lib) types; cfg = config.services.ollama; ollamaPackage = cfg.package.override { inherit (cfg) acceleration; - linuxPackages.nvidia_x11 = config.hardware.nvidia.package; + linuxPackages = config.boot.kernelPackages.overrideAttrs { + nvidia_x11 = config.hardware.nvidia.package; + }; }; in { @@ -15,14 +17,14 @@ in lib.mdDoc "Server for local large language models" ); listenAddress = lib.mkOption { - type = lib.types.str; + type = types.str; default = "127.0.0.1:11434"; description = lib.mdDoc '' Specifies the bind address on which the ollama server HTTP interface listens. ''; }; acceleration = lib.mkOption { - type = nullOr (enum [ "rocm" "cuda" ]); + type = types.nullOr (types.enum [ "rocm" "cuda" ]); default = null; example = "rocm"; description = lib.mdDoc '' diff --git a/pkgs/tools/misc/ollama/default.nix b/pkgs/tools/misc/ollama/default.nix index 32b7f5fcbfc3..7a0fea0c116b 100644 --- a/pkgs/tools/misc/ollama/default.nix +++ b/pkgs/tools/misc/ollama/default.nix @@ -24,7 +24,14 @@ let pname = "ollama"; - version = "0.1.26"; + version = "0.1.27"; + src = fetchFromGitHub { + owner = "jmorganca"; + repo = "ollama"; + rev = "v${version}"; + hash = "sha256-+ayby+yVknFHLTyLjMAPMnOTMSzTKqzi9caN/TppcEg="; + fetchSubmodules = true; + }; validAccel = lib.assertOneOf "ollama.acceleration" acceleration [ null "rocm" "cuda" ]; @@ -74,14 +81,6 @@ let buildGoModule.override { stdenv = overrideCC stdenv gcc12; } else buildGoModule; - - src = fetchFromGitHub { - owner = "jmorganca"; - repo = "ollama"; - rev = "v${version}"; - hash = "sha256-Kw3tt9ayEMgI2V6OeaOkWfNwqfCL7MDD/nN5iXk5LnY="; - fetchSubmodules = true; - }; preparePatch = patch: hash: fetchpatch { url = "file://${src}/llm/patches/${patch}"; inherit hash;