forked from pytorch/builder
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
4 changed files
with
304 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,92 @@ | ||
@echo off | ||
|
||
set TH_BINARY_BUILD=1 | ||
set PYTORCH_BUILD_VERSION=%PKG_VERSION% | ||
set PYTORCH_BUILD_NUMBER=%PKG_BUILDNUM% | ||
|
||
if "%NO_CUDA%" == "" ( | ||
set build_with_cuda=1 | ||
set desired_cuda=%CUDA_VERSION:~0,-1%.%CUDA_VERSION:~-1,1% | ||
) else ( | ||
set build_with_cuda= | ||
set USE_CUDA=0 | ||
) | ||
|
||
if "%build_with_cuda%" == "" goto cuda_flags_end | ||
|
||
set CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v%desired_cuda% | ||
set CUDA_BIN_PATH=%CUDA_PATH%\bin | ||
set TORCH_CUDA_ARCH_LIST=3.5;5.0+PTX | ||
if "%desired_cuda%" == "8.0" set TORCH_CUDA_ARCH_LIST=%TORCH_CUDA_ARCH_LIST%;6.0;6.1 | ||
if "%desired_cuda%" == "9.0" set TORCH_CUDA_ARCH_LIST=%TORCH_CUDA_ARCH_LIST%;6.0;7.0 | ||
if "%desired_cuda%" == "9.2" set TORCH_CUDA_ARCH_LIST=%TORCH_CUDA_ARCH_LIST%;6.0;6.1;7.0 | ||
if "%desired_cuda%" == "10.0" set TORCH_CUDA_ARCH_LIST=%TORCH_CUDA_ARCH_LIST%;6.0;6.1;7.0;7.5 | ||
set TORCH_NVCC_FLAGS=-Xfatbin -compress-all | ||
|
||
:cuda_flags_end | ||
|
||
set DISTUTILS_USE_SDK=1 | ||
|
||
curl https://s3.amazonaws.com/ossci-windows/mkl_2018.2.185.7z -k -O | ||
7z x -aoa mkl_2018.2.185.7z -omkl | ||
set CMAKE_INCLUDE_PATH=%SRC_DIR%\mkl\include | ||
set LIB=%SRC_DIR%\mkl\lib;%LIB% | ||
|
||
IF "%USE_SCCACHE%" == "1" ( | ||
mkdir %SRC_DIR%\tmp_bin | ||
curl -k https://s3.amazonaws.com/ossci-windows/sccache.exe --output %SRC_DIR%\tmp_bin\sccache.exe | ||
copy %SRC_DIR%\tmp_bin\sccache.exe %SRC_DIR%\tmp_bin\nvcc.exe | ||
set "PATH=%SRC_DIR%\tmp_bin;%PATH%" | ||
) | ||
|
||
IF "%build_with_cuda%" == "" goto cuda_end | ||
|
||
IF "%desired_cuda%" == "8.0" ( | ||
set MAGMA_VERSION=2.4.0 | ||
) ELSE ( | ||
set MAGMA_VERSION=2.5.0 | ||
) | ||
|
||
curl https://s3.amazonaws.com/ossci-windows/magma_%MAGMA_VERSION%_cuda%CUDA_VERSION%_release.7z -k -O | ||
7z x -aoa magma_%MAGMA_VERSION%_cuda%CUDA_VERSION%_release.7z -omagma_cuda%CUDA_VERSION%_release | ||
set MAGMA_HOME=%cd%\magma_cuda%CUDA_VERSION%_release | ||
|
||
IF "%USE_SCCACHE%" == "1" ( | ||
set CUDA_NVCC_EXECUTABLE=%SRC_DIR%\tmp_bin\nvcc | ||
) | ||
|
||
set "PATH=%CUDA_BIN_PATH%;%PATH%" | ||
|
||
if "%CUDA_VERSION%" == "80" ( | ||
:: Only if you use Ninja with CUDA 8 | ||
set "CUDAHOSTCXX=%VS140COMNTOOLS%\..\..\VC\bin\amd64\cl.exe" | ||
) | ||
|
||
:cuda_end | ||
|
||
set CMAKE_GENERATOR=Ninja | ||
|
||
IF NOT "%USE_SCCACHE%" == "1" goto sccache_end | ||
|
||
sccache --stop-server | ||
sccache --start-server | ||
sccache --zero-stats | ||
|
||
set CC=sccache cl | ||
set CXX=sccache cl | ||
|
||
:sccache_end | ||
|
||
python setup.py install | ||
if errorlevel 1 exit /b 1 | ||
|
||
IF "%USE_SCCACHE%" == "1" ( | ||
taskkill /im sccache.exe /f /t || ver > nul | ||
taskkill /im nvcc.exe /f /t || ver > nul | ||
) | ||
|
||
if NOT "%build_with_cuda%" == "" ( | ||
copy "%CUDA_BIN_PATH%\cudnn64_%CUDNN_VERSION%.dll*" %SP_DIR%\torch\lib | ||
) | ||
|
||
exit /b 0 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,124 @@ | ||
#!/usr/bin/env bash | ||
set -ex | ||
|
||
export CMAKE_LIBRARY_PATH=$PREFIX/lib:$PREFIX/include:$CMAKE_LIBRARY_PATH | ||
export CMAKE_PREFIX_PATH=$PREFIX | ||
export TH_BINARY_BUILD=1 # links CPU BLAS libraries thrice in a row (was needed for some MKL static linkage) | ||
export PYTORCH_BUILD_VERSION=$PKG_VERSION | ||
export PYTORCH_BUILD_NUMBER=$PKG_BUILDNUM | ||
|
||
# Why do we disable Ninja when ninja is included in the meta.yaml? Well, using | ||
# ninja in the conda builds leads to a system python2.7 library being called | ||
# which leads to ascii decode errors when building third_party/onnx. Is the | ||
# ninja n this conda env being picked up? We still need ninja in the meta.yaml | ||
# for cpp_tests I believe though. TODO figure out what's going on here and fix | ||
# it. It would be nice to use ninja in the builds of the conda binaries as well | ||
export USE_NINJA=OFF | ||
|
||
# MacOS build is simple, and will not be for CUDA | ||
if [[ "$OSTYPE" == "darwin"* ]]; then | ||
MACOSX_DEPLOYMENT_TARGET=10.9 \ | ||
CXX=clang++ \ | ||
CC=clang \ | ||
python setup.py install | ||
exit 0 | ||
fi | ||
|
||
|
||
if [[ -z "$NO_CUDA" || "$NO_CUDA" == 0 ]]; then | ||
build_with_cuda=1 | ||
fi | ||
if [[ -n "$build_with_cuda" ]]; then | ||
export TORCH_CUDA_ARCH_LIST="3.5;5.0+PTX" | ||
if [[ $CUDA_VERSION == 8.0* ]]; then | ||
export TORCH_CUDA_ARCH_LIST="$TORCH_CUDA_ARCH_LIST;6.0;6.1" | ||
elif [[ $CUDA_VERSION == 9.0* ]]; then | ||
export TORCH_CUDA_ARCH_LIST="$TORCH_CUDA_ARCH_LIST;6.0;7.0" | ||
elif [[ $CUDA_VERSION == 9.2* ]]; then | ||
export TORCH_CUDA_ARCH_LIST="$TORCH_CUDA_ARCH_LIST;6.0;6.1;7.0" | ||
elif [[ $CUDA_VERSION == 10.0* ]]; then | ||
export TORCH_CUDA_ARCH_LIST="$TORCH_CUDA_ARCH_LIST;6.0;6.1;7.0;7.5" | ||
fi | ||
export TORCH_NVCC_FLAGS="-Xfatbin -compress-all" | ||
export NCCL_ROOT_DIR=/usr/local/cuda | ||
export USE_STATIC_CUDNN=1 # links cudnn statically (driven by tools/setup_helpers/cudnn.py) | ||
export USE_STATIC_NCCL=1 # links nccl statically (driven by tools/setup_helpers/nccl.py, some of the NCCL cmake files such as FindNCCL.cmake and gloo/FindNCCL.cmake) | ||
|
||
# not needed if using conda's cudatoolkit package. Uncomment to statically link a new CUDA version that's not available in conda yet | ||
# export ATEN_STATIC_CUDA=1 # links ATen / libcaffe2_gpu.so with static CUDA libs, also sets up special cufft linkage | ||
# export USE_CUDA_STATIC_LINK=1 # links libcaffe2_gpu.so with static CUDA libs. Likely both these flags can be de-duplicated | ||
fi | ||
|
||
fname_with_sha256() { | ||
HASH=$(sha256sum $1 | cut -c1-8) | ||
DIRNAME=$(dirname $1) | ||
BASENAME=$(basename $1) | ||
if [[ $BASENAME == "libnvrtc-builtins.so" ]]; then | ||
echo $1 | ||
else | ||
INITNAME=$(echo $BASENAME | cut -f1 -d".") | ||
ENDNAME=$(echo $BASENAME | cut -f 2- -d".") | ||
echo "$DIRNAME/$INITNAME-$HASH.$ENDNAME" | ||
fi | ||
} | ||
|
||
DEPS_LIST=() | ||
# not needed if using conda's cudatoolkit package. Uncomment to statically link a new CUDA version that's not available in conda yet | ||
# if [[ -n "$build_with_cuda" ]]; then | ||
# cuda_majmin="$(echo $CUDA_VERSION | cut -f1,2 -d'.')" | ||
# DEPS_LIST+=("/usr/local/cuda/lib64/libcudart.so.$cuda_majmin") | ||
# DEPS_LIST+=("/usr/local/cuda/lib64/libnvToolsExt.so.1") | ||
# DEPS_LIST+=("/usr/local/cuda/lib64/libnvrtc.so.$cuda_majmin") | ||
# DEPS_LIST+=("/usr/local/cuda/lib64/libnvrtc-builtins.so") | ||
# fi | ||
|
||
|
||
# install | ||
python setup.py install | ||
|
||
# copy over needed dependent .so files over and tag them with their hash | ||
patched=() | ||
for filepath in "${DEPS_LIST[@]}"; do | ||
filename=$(basename $filepath) | ||
destpath=$SP_DIR/torch/lib/$filename | ||
cp $filepath $destpath | ||
|
||
patchedpath=$(fname_with_sha256 $destpath) | ||
patchedname=$(basename $patchedpath) | ||
if [[ "$destpath" != "$patchedpath" ]]; then | ||
mv $destpath $patchedpath | ||
fi | ||
|
||
patched+=("$patchedname") | ||
echo "Copied $filepath to $patchedpath" | ||
done | ||
|
||
# run patchelf to fix the so names to the hashed names | ||
for ((i=0;i<${#DEPS_LIST[@]};++i)); do | ||
find $SP_DIR/torch -name '*.so*' | while read sofile; do | ||
origname="$(basename ${DEPS_LIST[i]})" | ||
patchedname=${patched[i]} | ||
set +e | ||
patchelf --print-needed $sofile | grep $origname 2>&1 >/dev/null | ||
ERRCODE=$? | ||
set -e | ||
if [ "$ERRCODE" -eq "0" ]; then | ||
echo "patching $sofile entry $origname to $patchedname" | ||
patchelf --replace-needed $origname $patchedname $sofile | ||
fi | ||
done | ||
done | ||
|
||
# set RPATH of _C.so and similar to $ORIGIN, $ORIGIN/lib and conda/lib | ||
find $SP_DIR/torch -name "*.so*" -maxdepth 1 -type f | while read sofile; do | ||
echo "Setting rpath of $sofile to " '$ORIGIN:$ORIGIN/lib:$ORIGIN/../../..' | ||
patchelf --set-rpath '$ORIGIN:$ORIGIN/lib:$ORIGIN/../../..' $sofile | ||
patchelf --print-rpath $sofile | ||
done | ||
|
||
# set RPATH of lib/ files to $ORIGIN and conda/lib | ||
find $SP_DIR/torch/lib -name "*.so*" -maxdepth 1 -type f | while read sofile; do | ||
echo "Setting rpath of $sofile to " '$ORIGIN:$ORIGIN/lib:$ORIGIN/../../../..' | ||
patchelf --set-rpath '$ORIGIN:$ORIGIN/../../../..' $sofile | ||
patchelf --print-rpath $sofile | ||
done |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,24 @@ | ||
blas_impl: | ||
- mkl # [x86_64] | ||
c_compiler: | ||
- vs2017 # [win] | ||
cxx_compiler: | ||
- vs2017 # [win] | ||
python: | ||
- 3.5 | ||
- 3.6 | ||
# This differs from target_platform in that it determines what subdir the compiler | ||
# will target, not what subdir the compiler package will be itself. | ||
# For example, we need a win-64 vs2008_win-32 package, so that we compile win-32 | ||
# code on win-64 miniconda. | ||
cross_compiler_target_platform: | ||
- win-64 # [win] | ||
target_platform: | ||
- win-64 # [win] | ||
vc: | ||
- 14 | ||
zip_keys: | ||
- # [win] | ||
- vc # [win] | ||
- c_compiler # [win] | ||
- cxx_compiler # [win] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,64 @@ | ||
package: | ||
name: pytorch{{ environ.get('PYTORCH_PACKAGE_SUFFIX') }} | ||
version: "{{ environ.get('PYTORCH_BUILD_VERSION') }}" | ||
|
||
source: | ||
path: "{{ environ.get('PYTORCH_GITHUB_ROOT_DIR') }}" | ||
|
||
requirements: | ||
build: | ||
- cmake | ||
- {{ compiler('c') }} # [win] | ||
|
||
host: | ||
- python | ||
- numpy 1.11.* | ||
- setuptools | ||
- pyyaml | ||
- cffi | ||
- mkl >=2018 | ||
- mkl-include | ||
- typing | ||
- ninja | ||
{{ environ.get('MAGMA_PACKAGE') }} | ||
|
||
run: | ||
- python | ||
- numpy >=1.11 | ||
- mkl >=2018 | ||
- cffi | ||
- ninja | ||
{{ environ.get('CONDA_CUDATOOLKIT_CONSTRAINT') }} | ||
|
||
build: | ||
number: {{ environ.get('PYTORCH_BUILD_NUMBER') }} | ||
detect_binary_files_with_prefix: False | ||
string: "{{ environ.get('PYTORCH_BUILD_STRING') }}" | ||
script_env: | ||
- CUDA_VERSION | ||
- CUDNN_VERSION | ||
- CONDA_CUDATOOLKIT_CONSTRAINT | ||
- NO_CUDA | ||
- CMAKE_ARGS | ||
- EXTRA_CAFFE2_CMAKE_FLAGS | ||
- DEVELOPER_DIR | ||
- DEBUG | ||
- NO_FBGEMM | ||
- USE_SCCACHE # [win] | ||
|
||
test: | ||
imports: | ||
- torch | ||
source_files: | ||
- test | ||
commands: | ||
- OMP_NUM_THREADS=4 python ./test/run_test.py || true # [not win] | ||
- python ./test/run_test.py # [win] | ||
|
||
|
||
about: | ||
home: http://pytorch.org/ | ||
license: BSD 3-Clause | ||
license_family: BSD | ||
license_file: LICENSE | ||
summary: PyTorch is an optimized tensor library for deep learning using GPUs and CPUs. |