Merge pull request #178397 from aidalgol/tensorrt
This commit is contained in:
commit
12a7360426
6 changed files with 211 additions and 3 deletions
|
@ -85,7 +85,7 @@ final: prev: let
|
|||
rec {
|
||||
fileVersion = "10.2";
|
||||
fullVersion = "8.3.2.44";
|
||||
hash = "sha256-mKh4TpKGLyABjSDCgbMNSgzZUfk2lPZDPM9K6cUCumo=";
|
||||
hash = "sha256-1vVu+cqM+PketzIQumw9ykm6REbBZhv6/lXB7EC2aaw=";
|
||||
url = "${urlPrefix}/v${majorMinorPatch fullVersion}/local_installers/${fileVersion}/cudnn-linux-x86_64-${fullVersion}_cuda${fileVersion}-archive.tar.xz";
|
||||
supportedCudaVersions = [ "10.2" ];
|
||||
}
|
||||
|
|
|
@ -0,0 +1,63 @@
|
|||
final: prev: let
|
||||
|
||||
inherit (final) callPackage;
|
||||
inherit (prev) cudatoolkit cudaVersion lib pkgs;
|
||||
|
||||
### TensorRT
|
||||
|
||||
buildTensorRTPackage = args:
|
||||
callPackage ./generic.nix { } args;
|
||||
|
||||
toUnderscore = str: lib.replaceStrings ["."] ["_"] str;
|
||||
|
||||
majorMinorPatch = str: lib.concatStringsSep "." (lib.take 3 (lib.splitVersion str));
|
||||
|
||||
tensorRTPackages = with lib; let
|
||||
# Check whether a file is supported for our cuda version
|
||||
isSupported = fileData: elem cudaVersion fileData.supportedCudaVersions;
|
||||
# Return the first file that is supported. In practice there should only ever be one anyway.
|
||||
supportedFile = files: findFirst isSupported null files;
|
||||
# Supported versions with versions as keys and file as value
|
||||
supportedVersions = filterAttrs (version: file: file !=null ) (mapAttrs (version: files: supportedFile files) tensorRTVersions);
|
||||
# Compute versioned attribute name to be used in this package set
|
||||
computeName = version: "tensorrt_${toUnderscore version}";
|
||||
# Add all supported builds as attributes
|
||||
allBuilds = mapAttrs' (version: file: nameValuePair (computeName version) (buildTensorRTPackage (removeAttrs file ["fileVersionCuda"]))) supportedVersions;
|
||||
# Set the default attributes, e.g. tensorrt = tensorrt_8_4;
|
||||
defaultBuild = { "tensorrt" = allBuilds.${computeName tensorRTDefaultVersion}; };
|
||||
in allBuilds // defaultBuild;
|
||||
|
||||
tensorRTVersions = {
|
||||
"8.4.0" = [
|
||||
rec {
|
||||
fileVersionCuda = "11.6";
|
||||
fileVersionCudnn = "8.3";
|
||||
fullVersion = "8.4.0.6";
|
||||
sha256 = "sha256-DNgHHXF/G4cK2nnOWImrPXAkOcNW6Wy+8j0LRpAH/LQ=";
|
||||
tarball = "TensorRT-${fullVersion}.Linux.x86_64-gnu.cuda-${fileVersionCuda}.cudnn${fileVersionCudnn}.tar.gz";
|
||||
supportedCudaVersions = [ "11.0" "11.1" "11.2" "11.3" "11.4" "11.5" "11.6" ];
|
||||
}
|
||||
rec {
|
||||
fileVersionCuda = "10.2";
|
||||
fileVersionCudnn = "8.3";
|
||||
fullVersion = "8.4.0.6";
|
||||
sha256 = "sha256-aCzH0ZI6BrJ0v+e5Bnm7b8mNltA7NNuIa8qRKzAQv+I=";
|
||||
tarball = "TensorRT-${fullVersion}.Linux.x86_64-gnu.cuda-${fileVersionCuda}.cudnn${fileVersionCudnn}.tar.gz";
|
||||
supportedCudaVersions = [ "10.2" ];
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
# Default attributes
|
||||
tensorRTDefaultVersion = {
|
||||
"10.2" = "8.4.0";
|
||||
"11.0" = "8.4.0";
|
||||
"11.1" = "8.4.0";
|
||||
"11.2" = "8.4.0";
|
||||
"11.3" = "8.4.0";
|
||||
"11.4" = "8.4.0";
|
||||
"11.5" = "8.4.0";
|
||||
"11.6" = "8.4.0";
|
||||
}.${cudaVersion};
|
||||
|
||||
in tensorRTPackages
|
90
pkgs/development/libraries/science/math/tensorrt/generic.nix
Normal file
90
pkgs/development/libraries/science/math/tensorrt/generic.nix
Normal file
|
@ -0,0 +1,90 @@
|
|||
{ lib
|
||||
, stdenv
|
||||
, requireFile
|
||||
, autoPatchelfHook
|
||||
, autoAddOpenGLRunpathHook
|
||||
, cudaVersion
|
||||
, cudatoolkit
|
||||
, cudnn
|
||||
}:
|
||||
|
||||
{ fullVersion
|
||||
, fileVersionCudnn
|
||||
, tarball
|
||||
, sha256
|
||||
, supportedCudaVersions ? [ ]
|
||||
}:
|
||||
|
||||
assert lib.assertMsg (lib.strings.versionAtLeast cudnn.version fileVersionCudnn)
|
||||
"This version of TensorRT requires at least cuDNN ${fileVersionCudnn} (current version is ${cudnn.version})";
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "cudatoolkit-${cudatoolkit.majorVersion}-tensorrt";
|
||||
version = fullVersion;
|
||||
src = requireFile rec {
|
||||
name = tarball;
|
||||
inherit sha256;
|
||||
message = ''
|
||||
To use the TensorRT derivation, you must join the NVIDIA Developer Program and
|
||||
download the ${version} Linux x86_64 TAR package for CUDA ${cudaVersion} from
|
||||
${meta.homepage}.
|
||||
|
||||
Once you have downloaded the file, add it to the store with the following
|
||||
command, and try building this derivation again.
|
||||
|
||||
$ nix-store --add-fixed sha256 ${name}
|
||||
'';
|
||||
};
|
||||
|
||||
outputs = [ "out" "dev" ];
|
||||
|
||||
nativeBuildInputs = [
|
||||
autoPatchelfHook
|
||||
autoAddOpenGLRunpathHook
|
||||
];
|
||||
|
||||
# Used by autoPatchelfHook
|
||||
buildInputs = [
|
||||
cudatoolkit.cc.cc.lib # libstdc++
|
||||
cudatoolkit
|
||||
cudnn
|
||||
];
|
||||
|
||||
sourceRoot = "TensorRT-${version}";
|
||||
|
||||
installPhase = ''
|
||||
install --directory "$dev" "$out"
|
||||
mv include "$dev"
|
||||
mv targets/x86_64-linux-gnu/lib "$out"
|
||||
install -D --target-directory="$out/bin" targets/x86_64-linux-gnu/bin/trtexec
|
||||
'';
|
||||
|
||||
# Tell autoPatchelf about runtime dependencies.
|
||||
# (postFixup phase is run before autoPatchelfHook.)
|
||||
postFixup =
|
||||
let
|
||||
mostOfVersion = builtins.concatStringsSep "."
|
||||
(lib.take 3 (lib.versions.splitVersion version));
|
||||
in
|
||||
''
|
||||
echo 'Patching RPATH of libnvinfer libs'
|
||||
patchelf --debug --add-needed libnvinfer.so \
|
||||
"$out/lib/libnvinfer.so.${mostOfVersion}" \
|
||||
"$out/lib/libnvinfer_plugin.so.${mostOfVersion}" \
|
||||
"$out/lib/libnvinfer_builder_resource.so.${mostOfVersion}"
|
||||
'';
|
||||
|
||||
meta = with lib; {
|
||||
# Check that the cudatoolkit version satisfies our min/max constraints (both
|
||||
# inclusive). We mark the package as broken if it fails to satisfies the
|
||||
# official version constraints (as recorded in default.nix). In some cases
|
||||
# you _may_ be able to smudge version constraints, just know that you're
|
||||
# embarking into unknown and unsupported territory when doing so.
|
||||
broken = !(elem cudaVersion supportedCudaVersions);
|
||||
description = "TensorRT: a high-performance deep learning interface";
|
||||
homepage = "https://developer.nvidia.com/tensorrt";
|
||||
license = licenses.unfree;
|
||||
platforms = [ "x86_64-linux" ];
|
||||
maintainers = with maintainers; [ aidalgol ];
|
||||
};
|
||||
}
|
52
pkgs/development/python-modules/tensorrt/default.nix
Normal file
52
pkgs/development/python-modules/tensorrt/default.nix
Normal file
|
@ -0,0 +1,52 @@
|
|||
{ lib
|
||||
, python
|
||||
, buildPythonPackage
|
||||
, autoPatchelfHook
|
||||
, unzip
|
||||
, cudaPackages
|
||||
}:
|
||||
|
||||
let
|
||||
pyVersion = "${lib.versions.major python.version}${lib.versions.minor python.version}";
|
||||
in
|
||||
buildPythonPackage rec {
|
||||
pname = "tensorrt";
|
||||
version = cudaPackages.tensorrt.version;
|
||||
|
||||
src = cudaPackages.tensorrt.src;
|
||||
|
||||
format = "wheel";
|
||||
# We unpack the wheel ourselves because of the odd packaging.
|
||||
dontUseWheelUnpack = true;
|
||||
|
||||
nativeBuildInputs = [
|
||||
unzip
|
||||
autoPatchelfHook
|
||||
cudaPackages.autoAddOpenGLRunpathHook
|
||||
];
|
||||
|
||||
preUnpack = ''
|
||||
mkdir -p dist
|
||||
tar --strip-components=2 -xf "$src" --directory=dist \
|
||||
"TensorRT-${version}/python/tensorrt-${version}-cp${pyVersion}-none-linux_x86_64.whl"
|
||||
'';
|
||||
|
||||
sourceRoot = ".";
|
||||
|
||||
buildInputs = [
|
||||
cudaPackages.cudnn
|
||||
cudaPackages.tensorrt
|
||||
];
|
||||
|
||||
pythonCheckImports = [
|
||||
"tensorrt"
|
||||
];
|
||||
|
||||
meta = with lib; {
|
||||
description = "Python bindings for TensorRT, a high-performance deep learning interface";
|
||||
homepage = "https://developer.nvidia.com/tensorrt";
|
||||
license = licenses.unfree;
|
||||
platforms = [ "x86_64-linux" ];
|
||||
maintainers = with maintainers; [ aidalgol ];
|
||||
};
|
||||
}
|
|
@ -58,15 +58,16 @@ let
|
|||
|
||||
};
|
||||
|
||||
composedExtension = composeManyExtensions [
|
||||
composedExtension = composeManyExtensions ([
|
||||
extraPackagesExtension
|
||||
(import ../development/compilers/cudatoolkit/extension.nix)
|
||||
(import ../development/compilers/cudatoolkit/redist/extension.nix)
|
||||
(import ../development/compilers/cudatoolkit/redist/overrides.nix)
|
||||
(import ../development/libraries/science/math/cudnn/extension.nix)
|
||||
(import ../development/libraries/science/math/tensorrt/extension.nix)
|
||||
(import ../test/cuda/cuda-samples/extension.nix)
|
||||
(import ../test/cuda/cuda-library-samples/extension.nix)
|
||||
cutensorExtension
|
||||
];
|
||||
]);
|
||||
|
||||
in (scope.overrideScope' composedExtension)
|
||||
|
|
|
@ -10520,6 +10520,8 @@ in {
|
|||
|
||||
tensorly = callPackage ../development/python-modules/tensorly { };
|
||||
|
||||
tensorrt = callPackage ../development/python-modules/tensorrt { };
|
||||
|
||||
tellduslive = callPackage ../development/python-modules/tellduslive { };
|
||||
|
||||
termcolor = callPackage ../development/python-modules/termcolor { };
|
||||
|
|
Loading…
Reference in a new issue