Skip to content

Commit 37a8606

Browse files
author
Raimondas Galvelis
authored
Enable CPU-only builds (#48)
* Add ENABLE_CUDA option * Conditionally build SymmeryFunctions * Fix the conditional build * Fix the conditional build * Enable CI with ENABLE_CUDA=OFF * Fix CI * Unify the file names * Fix CI * Fix the conditional build * By default enable the CUDA build
1 parent 0be9e61 commit 37a8606

File tree

4 files changed

+73
-17
lines changed

4 files changed

+73
-17
lines changed

.github/workflows/ci.yml

+28-4
Original file line numberDiff line numberDiff line change
@@ -13,17 +13,24 @@ jobs:
1313
matrix:
1414
include:
1515
# Oldest supported versions
16-
- cuda: 10.2.89
16+
- enable_cuda: true
17+
cuda: 10.2.89
1718
gcc: 8.5.0
1819
nvcc: 10.2
1920
python: 3.8
2021
pytorch: 1.8.0
2122
# Latest supported versions
22-
- cuda: 11.2.2
23+
- enable_cuda: true
24+
cuda: 11.2.2
2325
gcc: 10.3.0
2426
nvcc: 11.2
2527
python: 3.9
2628
pytorch: 1.10.0
29+
# Without CUDA
30+
- enable_cuda: false
31+
gcc: 10.3.0
32+
python: 3.9
33+
pytorch: 1.10.0
2734

2835
steps:
2936
- name: Check out
@@ -33,6 +40,7 @@ jobs:
3340
uses: Jimver/cuda-toolkit@v0.2.4
3441
with:
3542
cuda: ${{ matrix.cuda }}
43+
if: ${{ matrix.enable_cuda }}
3644

3745
- name: Install Miniconda
3846
uses: conda-incubator/setup-miniconda@v2
@@ -41,23 +49,39 @@ jobs:
4149
auto-activate-base: true
4250
miniforge-variant: Miniforge3
4351

44-
- name: Install dependencies
52+
- name: Prepare dependencies (with CUDA)
4553
shell: bash -l {0}
54+
if: ${{ matrix.enable_cuda }}
4655
run: |
4756
sed -i -e "/cudatoolkit/c\ - cudatoolkit ${{ matrix.cuda }}" \
4857
-e "/gxx_linux-64/c\ - gxx_linux-64 ${{ matrix.gcc }}" \
4958
-e "/nvcc_linux-64/c\ - nvcc_linux-64 ${{ matrix.nvcc }}" \
5059
-e "/python/c\ - python ${{ matrix.python }}" \
5160
-e "/pytorch-gpu/c\ - pytorch-gpu ${{ matrix.pytorch }}" \
5261
environment.yml
53-
conda env create -n nnpops -f environment.yml
62+
63+
- name: Prepare dependencies (without CUDA)
64+
shell: bash -l {0}
65+
if: ${{ !matrix.enable_cuda }}
66+
run: |
67+
sed -i -e "/cudatoolkit/c\ # - cudatoolkit" \
68+
-e "/gxx_linux-64/c\ - gxx_linux-64 ${{ matrix.gcc }}" \
69+
-e "/nvcc_linux-64/c\ # - nvcc_linux-64" \
70+
-e "/python/c\ - python ${{ matrix.python }}" \
71+
-e "/pytorch-gpu/c\ - pytorch-cpu ${{ matrix.pytorch }}" \
72+
environment.yml
73+
74+
- name: Install dependencies
75+
shell: bash -l {0}
76+
run: conda env create -n nnpops -f environment.yml
5477

5578
- name: Configure, compile, and install
5679
shell: bash -l {0}
5780
run: |
5881
conda activate nnpops
5982
mkdir build && cd build
6083
cmake .. \
84+
-DENABLE_CUDA=${{ matrix.enable_cuda }} \
6185
-DTorch_DIR=$CONDA_PREFIX/lib/python${{ matrix.python }}/site-packages/torch/share/cmake/Torch \
6286
-DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX
6387
make install

CMakeLists.txt

+29-11
Original file line numberDiff line numberDiff line change
@@ -1,40 +1,58 @@
11
cmake_minimum_required(VERSION 3.20 FATAL_ERROR)
22

3+
# Configure
34
set(NAME NNPOps)
4-
set(LIBRARY ${NAME}PyTorch)
5-
project(${NAME} LANGUAGES CXX CUDA)
5+
project(${NAME} LANGUAGES CXX)
6+
option(ENABLE_CUDA "Enable biuilding CUDA components" ON)
7+
if(ENABLE_CUDA)
8+
enable_language(CUDA)
9+
endif(ENABLE_CUDA)
610

11+
# Find dependencies
712
find_package(Python REQUIRED)
813
find_package(PythonLibs REQUIRED)
914
find_package(Torch REQUIRED)
1015
enable_testing()
1116

12-
add_library(${LIBRARY} SHARED src/ani/CpuANISymmetryFunctions.cpp
13-
src/ani/CudaANISymmetryFunctions.cu
14-
src/pytorch/BatchedNN.cpp
15-
src/pytorch/SymmetryFunctions.cpp
16-
src/schnet/CpuCFConv.cpp
17-
src/schnet/CudaCFConv.cu)
17+
# Source files of the library
18+
set(SRC_FILES src/ani/CpuANISymmetryFunctions.cpp
19+
src/ani/CudaANISymmetryFunctions.cu
20+
src/pytorch/BatchedNN.cpp
21+
src/pytorch/SymmetryFunctions.cpp
22+
src/schnet/CpuCFConv.cpp
23+
src/schnet/CudaCFConv.cu)
24+
25+
# Build the library
26+
set(LIBRARY ${NAME}PyTorch)
27+
add_library(${LIBRARY} SHARED ${SRC_FILES})
1828
target_include_directories(${LIBRARY} PRIVATE src/ani src/schnet)
1929
target_link_libraries(${LIBRARY} ${TORCH_LIBRARIES} ${PYTHON_LIBRARIES})
30+
if(ENABLE_CUDA)
31+
target_compile_definitions(${LIBRARY} PRIVATE ENABLE_CUDA)
32+
endif(ENABLE_CUDA)
2033

34+
# Test of the library
2135
set(TEST_PATHS src/ani/TestCpuANISymmetryFunctions.cpp
22-
src/ani/TestCudaANISymmetryFunctions.cpp
23-
src/schnet/TestCpuCFConv.cpp
24-
src/schnet/TestCudaCFConv.cu)
36+
src/schnet/TestCpuCFConv.cpp)
37+
if(ENABLE_CUDA)
38+
list(APPEND TEST_PATHS src/ani/TestCudaANISymmetryFunctions.cu
39+
src/schnet/TestCudaCFConv.cu)
40+
endif(ENABLE_CUDA)
2541
foreach(TEST_PATH ${TEST_PATHS})
2642
cmake_path(GET TEST_PATH STEM TEST_NAME)
2743
add_executable(${TEST_NAME} ${TEST_PATH})
2844
target_link_libraries(${TEST_NAME} ${LIBRARY})
2945
add_test(${TEST_NAME} ${TEST_NAME})
3046
endforeach()
3147

48+
# Tests of PyTorch wrappers
3249
add_test(TestBatchedNN pytest -v ${CMAKE_SOURCE_DIR}/src/pytorch/TestBatchedNN.py)
3350
add_test(TestEnergyShifter pytest -v ${CMAKE_SOURCE_DIR}/src/pytorch/TestEnergyShifter.py)
3451
add_test(TestOptimizedTorchANI pytest -v ${CMAKE_SOURCE_DIR}/src/pytorch/TestOptimizedTorchANI.py)
3552
add_test(TestSpeciesConverter pytest -v ${CMAKE_SOURCE_DIR}/src/pytorch/TestSpeciesConverter.py)
3653
add_test(TestSymmetryFunctions pytest -v ${CMAKE_SOURCE_DIR}/src/pytorch/TestSymmetryFunctions.py)
3754

55+
# Installation
3856
install(TARGETS ${LIBRARY} DESTINATION ${Python_SITEARCH}/${NAME})
3957
install(FILES src/pytorch/__init__.py
4058
src/pytorch/BatchedNN.py

src/pytorch/SymmetryFunctions.cpp

+16-2
Original file line numberDiff line numberDiff line change
@@ -23,14 +23,16 @@
2323

2424
#include <stdexcept>
2525
#include <torch/script.h>
26-
#include <c10/cuda/CUDAStream.h>
2726
#include "CpuANISymmetryFunctions.h"
27+
#ifdef ENABLE_CUDA
28+
#include <c10/cuda/CUDAStream.h>
2829
#include "CudaANISymmetryFunctions.h"
2930

3031
#define CHECK_CUDA_RESULT(result) \
3132
if (result != cudaSuccess) { \
3233
throw std::runtime_error(std::string("Encountered error ")+cudaGetErrorName(result)+" at "+__FILE__+":"+std::to_string(__LINE__));\
3334
}
35+
#endif
3436

3537
namespace NNPOps {
3638
namespace ANISymmetryFunctions {
@@ -87,17 +89,23 @@ class Holder : public torch::CustomClassHolder {
8789
const torch::Device& device = tensorOptions.device();
8890
if (device.is_cpu())
8991
symFunc = std::make_shared<CpuANISymmetryFunctions>(numAtoms, numSpecies, Rcr, Rca, false, atomSpecies, radialFunctions, angularFunctions, true);
90-
if (device.is_cuda()) {
92+
#ifdef ENABLE_CUDA
93+
else if (device.is_cuda()) {
9194
// PyTorch allow to chose GPU with "torch.device", but it doesn't set as the default one.
9295
CHECK_CUDA_RESULT(cudaSetDevice(device.index()));
9396
symFunc = std::make_shared<CudaANISymmetryFunctions>(numAtoms, numSpecies, Rcr, Rca, false, atomSpecies, radialFunctions, angularFunctions, true);
9497
}
98+
#endif
99+
else
100+
throw std::runtime_error("Unsupported device: " + device.str());
95101

96102
radial = torch::empty({numAtoms, numSpecies * (int)radialFunctions.size()}, tensorOptions);
97103
angular = torch::empty({numAtoms, numSpecies * (numSpecies + 1) / 2 * (int)angularFunctions.size()}, tensorOptions);
98104
positionsGrad = torch::empty({numAtoms, 3}, tensorOptions);
99105

106+
#ifdef ENABLE_CUDA
100107
cudaSymFunc = dynamic_cast<CudaANISymmetryFunctions*>(symFunc.get());
108+
#endif
101109
};
102110

103111
tensor_list forward(const Tensor& positions_, const optional<Tensor>& periodicBoxVectors_) {
@@ -111,10 +119,12 @@ class Holder : public torch::CustomClassHolder {
111119
float* periodicBoxVectorsPtr = periodicBoxVectors.data_ptr<float>();
112120
}
113121

122+
#ifdef ENABLE_CUDA
114123
if (cudaSymFunc) {
115124
const torch::cuda::CUDAStream stream = torch::cuda::getCurrentCUDAStream(tensorOptions.device().index());
116125
cudaSymFunc->setStream(stream.stream());
117126
}
127+
#endif
118128

119129
symFunc->computeSymmetryFunctions(positions.data_ptr<float>(), periodicBoxVectorsPtr, radial.data_ptr<float>(), angular.data_ptr<float>());
120130

@@ -126,10 +136,12 @@ class Holder : public torch::CustomClassHolder {
126136
const Tensor radialGrad = grads[0].clone();
127137
const Tensor angularGrad = grads[1].clone();
128138

139+
#ifdef ENABLE_CUDA
129140
if (cudaSymFunc) {
130141
const torch::cuda::CUDAStream stream = torch::cuda::getCurrentCUDAStream(tensorOptions.device().index());
131142
cudaSymFunc->setStream(stream.stream());
132143
}
144+
#endif
133145

134146
symFunc->backprop(radialGrad.data_ptr<float>(), angularGrad.data_ptr<float>(), positionsGrad.data_ptr<float>());
135147

@@ -146,7 +158,9 @@ class Holder : public torch::CustomClassHolder {
146158
Tensor radial;
147159
Tensor angular;
148160
Tensor positionsGrad;
161+
#ifdef ENABLE_CUDA
149162
CudaANISymmetryFunctions* cudaSymFunc;
163+
#endif
150164
};
151165

152166
class AutogradFunctions : public torch::autograd::Function<AutogradFunctions> {

0 commit comments

Comments
 (0)