7ba89a2e22
the nvidia driver should be picked up from /run/opengl-driver/lib (and is, since we're already using addOpenGLRunpath)
195 lines
5.4 KiB
Nix
195 lines
5.4 KiB
Nix
{ stdenv
|
|
, lib
|
|
, fetchurl
|
|
, buildPythonPackage
|
|
, isPy3k, pythonOlder, pythonAtLeast, isPy38
|
|
, astor
|
|
, gast
|
|
, google-pasta
|
|
, wrapt
|
|
, numpy
|
|
, six
|
|
, termcolor
|
|
, protobuf
|
|
, absl-py
|
|
, grpcio
|
|
, mock
|
|
, scipy
|
|
, wheel
|
|
, opt-einsum
|
|
, backports_weakref
|
|
, tensorflow-estimator_2
|
|
, tensorflow-tensorboard_2
|
|
, cudaSupport ? false
|
|
, cudatoolkit ? null
|
|
, cudnn ? null
|
|
, zlib
|
|
, python
|
|
, symlinkJoin
|
|
, keras-applications
|
|
, keras-preprocessing
|
|
, addOpenGLRunpath
|
|
, astunparse
|
|
, flatbuffers
|
|
, h5py
|
|
, typing-extensions
|
|
}:
|
|
|
|
# We keep this binary build for two reasons:
|
|
# - the source build doesn't work on Darwin.
|
|
# - the source build is currently brittle and not easy to maintain
|
|
|
|
assert cudaSupport -> cudatoolkit != null
|
|
&& cudnn != null;
|
|
|
|
# unsupported combination
|
|
assert ! (stdenv.isDarwin && cudaSupport);
|
|
|
|
let
|
|
packages = import ./binary-hashes.nix;
|
|
|
|
variant = if cudaSupport then "-gpu" else "";
|
|
pname = "tensorflow${variant}";
|
|
metadataPatch = ./relax-dependencies-metadata.patch;
|
|
patch = ./relax-dependencies.patch;
|
|
in buildPythonPackage {
|
|
inherit pname;
|
|
inherit (packages) version;
|
|
format = "wheel";
|
|
|
|
disabled = pythonAtLeast "3.9";
|
|
|
|
src = let
|
|
pyVerNoDot = lib.strings.stringAsChars (x: if x == "." then "" else x) python.pythonVersion;
|
|
platform = if stdenv.isDarwin then "mac" else "linux";
|
|
unit = if cudaSupport then "gpu" else "cpu";
|
|
key = "${platform}_py_${pyVerNoDot}_${unit}";
|
|
in fetchurl packages.${key};
|
|
|
|
propagatedBuildInputs = [
|
|
astunparse
|
|
flatbuffers
|
|
typing-extensions
|
|
protobuf
|
|
numpy
|
|
scipy
|
|
termcolor
|
|
grpcio
|
|
six
|
|
astor
|
|
absl-py
|
|
gast
|
|
opt-einsum
|
|
google-pasta
|
|
wrapt
|
|
tensorflow-estimator_2
|
|
tensorflow-tensorboard_2
|
|
keras-applications
|
|
keras-preprocessing
|
|
h5py
|
|
] ++ lib.optional (!isPy3k) mock
|
|
++ lib.optionals (pythonOlder "3.4") [ backports_weakref ];
|
|
|
|
nativeBuildInputs = [ wheel ] ++ lib.optional cudaSupport addOpenGLRunpath;
|
|
|
|
preConfigure = ''
|
|
unset SOURCE_DATE_EPOCH
|
|
|
|
# Make sure that dist and the wheel file are writable.
|
|
chmod u+rwx -R ./dist
|
|
|
|
pushd dist
|
|
|
|
wheel unpack --dest unpacked ./*.whl
|
|
(
|
|
cd unpacked/tensorflow*
|
|
# relax too strict versions in setup.py
|
|
patch -p 1 < ${patch}
|
|
cd *.dist-info
|
|
# relax too strict versions in *.dist-info/METADATA
|
|
patch -p 3 < ${metadataPatch}
|
|
)
|
|
wheel pack ./unpacked/tensorflow*
|
|
|
|
popd
|
|
'';
|
|
|
|
# Note that we need to run *after* the fixup phase because the
|
|
# libraries are loaded at runtime. If we run in preFixup then
|
|
# patchelf --shrink-rpath will remove the cuda libraries.
|
|
postFixup =
|
|
let
|
|
# rpaths we only need to add if CUDA is enabled.
|
|
cudapaths = lib.optionals cudaSupport [
|
|
cudatoolkit.out
|
|
cudatoolkit.lib
|
|
cudnn
|
|
];
|
|
|
|
libpaths = [
|
|
stdenv.cc.cc.lib
|
|
zlib
|
|
];
|
|
|
|
rpath = lib.makeLibraryPath (libpaths ++ cudapaths);
|
|
in
|
|
lib.optionalString stdenv.isLinux ''
|
|
# This is an array containing all the directories in the tensorflow2
|
|
# package that contain .so files.
|
|
#
|
|
# TODO: Create this list programmatically, and remove paths that aren't
|
|
# actually needed.
|
|
rrPathArr=(
|
|
"$out/${python.sitePackages}/tensorflow/"
|
|
"$out/${python.sitePackages}/tensorflow/core/kernels"
|
|
"$out/${python.sitePackages}/tensorflow/compiler/tf2tensorrt/"
|
|
"$out/${python.sitePackages}/tensorflow/compiler/tf2xla/ops/"
|
|
"$out/${python.sitePackages}/tensorflow/lite/experimental/microfrontend/python/ops/"
|
|
"$out/${python.sitePackages}/tensorflow/lite/python/interpreter_wrapper/"
|
|
"$out/${python.sitePackages}/tensorflow/lite/python/optimize/"
|
|
"$out/${python.sitePackages}/tensorflow/python/"
|
|
"$out/${python.sitePackages}/tensorflow/python/framework/"
|
|
"$out/${python.sitePackages}/tensorflow/python/autograph/impl/testing"
|
|
"$out/${python.sitePackages}/tensorflow/python/data/experimental/service"
|
|
"$out/${python.sitePackages}/tensorflow/python/framework"
|
|
"$out/${python.sitePackages}/tensorflow/python/profiler/internal"
|
|
"${rpath}"
|
|
)
|
|
|
|
# The the bash array into a colon-separated list of RPATHs.
|
|
rrPath=$(IFS=$':'; echo "''${rrPathArr[*]}")
|
|
echo "about to run patchelf with the following rpath: $rrPath"
|
|
|
|
find $out -type f \( -name '*.so' -or -name '*.so.*' \) | while read lib; do
|
|
echo "about to patchelf $lib..."
|
|
chmod a+rx "$lib"
|
|
patchelf --set-rpath "$rrPath" "$lib"
|
|
${lib.optionalString cudaSupport ''
|
|
addOpenGLRunpath "$lib"
|
|
''}
|
|
done
|
|
'';
|
|
|
|
# Upstream has a pip hack that results in bin/tensorboard being in both tensorflow
|
|
# and the propagated input tensorflow-tensorboard, which causes environment collisions.
|
|
# Another possibility would be to have tensorboard only in the buildInputs
|
|
# See https://github.com/NixOS/nixpkgs/pull/44381 for more information.
|
|
postInstall = ''
|
|
rm $out/bin/tensorboard
|
|
'';
|
|
|
|
pythonImportsCheck = [
|
|
"tensorflow"
|
|
"tensorflow.keras"
|
|
"tensorflow.python"
|
|
"tensorflow.python.framework"
|
|
];
|
|
|
|
meta = with lib; {
|
|
description = "Computation using data flow graphs for scalable machine learning";
|
|
homepage = "http://tensorflow.org";
|
|
license = licenses.asl20;
|
|
maintainers = with maintainers; [ jyp abbradar cdepillabout ];
|
|
platforms = [ "x86_64-linux" "x86_64-darwin" ];
|
|
};
|
|
}
|