Skip to content

Commit

Permalink
Look for libcudart in default CUDA installation paths (pytorch#195)
Browse files Browse the repository at this point in the history
  • Loading branch information
apaszke authored and soumith committed Nov 2, 2016
1 parent 3e5c121 commit ebc70f7
Show file tree
Hide file tree
Showing 4 changed files with 56 additions and 19 deletions.
19 changes: 9 additions & 10 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@
import sys
import os

# TODO: make this more robust
WITH_CUDA = os.path.exists('/Developer/NVIDIA/CUDA-7.5/include') or os.path.exists('/usr/local/cuda/include')
CUDA_HOME = os.getenv('CUDA_HOME', '/usr/local/cuda')
WITH_CUDA = os.path.exists(CUDA_HOME)
WITH_CUDNN = WITH_CUDA
DEBUG = False

Expand Down Expand Up @@ -176,18 +176,17 @@ def run(self):
pass

if WITH_CUDA:
if platform.system() == 'Darwin':
cuda_path = '/Developer/NVIDIA/CUDA-7.5'
cuda_include_path = cuda_path + '/include'
cuda_lib_path = cuda_path + '/lib'
else:
cuda_path = '/usr/local/cuda'
cuda_include_path = cuda_path + '/include'
cuda_lib_path = cuda_path + '/lib64'
cuda_lib_dirs = ['lib64', 'lib']
cuda_include_path = os.path.join(CUDA_HOME, 'include')
for lib_dir in cuda_lib_dirs:
cuda_lib_path = os.path.join(CUDA_HOME, lib_dir)
if os.path.exists(cuda_lib_path):
break
include_dirs.append(cuda_include_path)
extra_link_args.append('-L' + cuda_lib_path)
extra_link_args.append('-Wl,-rpath,' + cuda_lib_path)
extra_compile_args += ['-DWITH_CUDA']
extra_compile_args += ['-DCUDA_LIB_PATH=' + cuda_lib_path]
main_libraries += ['THC']
main_sources += [
"torch/csrc/cuda/Module.cpp",
Expand Down
2 changes: 2 additions & 0 deletions torch/csrc/Module.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -579,6 +579,7 @@ extern PyObject * THCPModule_seedAll(PyObject *_unused);
extern PyObject * THCPModule_initialSeed(PyObject *_unused);
extern PyObject * THCPModule_cudaHostAllocator(PyObject *_unused);
extern PyObject * THCPModule_cudaSynchronize(PyObject *_unused);
extern PyObject * THCPModule_getLibPath(PyObject *_unused);
#endif

static PyMethodDef TorchMethods[] = {
Expand All @@ -602,6 +603,7 @@ static PyMethodDef TorchMethods[] = {
{"_cuda_initialSeed", (PyCFunction)THCPModule_initialSeed, METH_NOARGS, NULL},
{"_cuda_cudaHostAllocator", (PyCFunction)THCPModule_cudaHostAllocator, METH_NOARGS, NULL},
{"_cuda_synchronize", (PyCFunction)THCPModule_cudaSynchronize, METH_NOARGS, NULL},
{"_cuda_getLibPath", (PyCFunction)THCPModule_getLibPath, METH_NOARGS, NULL},
#endif
{"_safe_call", (PyCFunction)THPModule_safeCall, METH_VARARGS | METH_KEYWORDS, NULL},
{"_sendfd", (PyCFunction)THPModule_sendfd, METH_VARARGS, NULL},
Expand Down
13 changes: 13 additions & 0 deletions torch/csrc/cuda/Module.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -233,6 +233,19 @@ PyObject * THCPModule_cudaSynchronize(PyObject *_unused)
END_HANDLE_TH_ERRORS
}

PyObject * THCPModule_getLibPath(PyObject *_unused)
{
#define _STR(x) #x
#define STR(x) _STR(x)
#if PY_MAJOR_VERSION == 2
return PyString_FromString(STR(CUDA_LIB_PATH));
#else
return PyUnicode_FromString(STR(CUDA_LIB_PATH));
#endif
#undef STR
#undef _STR
}

////////////////////////////////////////////////////////////////////////////////
// Cuda module initialization
////////////////////////////////////////////////////////////////////////////////
Expand Down
41 changes: 32 additions & 9 deletions torch/cuda/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import contextlib
import platform
import ctypes
import os
import torch

_initialized = False
Expand All @@ -11,10 +12,28 @@ def is_available():
return (hasattr(torch._C, '_cuda_isDriverSufficient') and
torch._C._cuda_isDriverSufficient())

def _lazy_init():
global _initialized, _cudart
if _initialized:
return

def _load_cudart():
system = platform.system()
lib_name = 'libcudart.' + ('dylib' if system == 'Darwin' else 'so')
lib_paths = [
lib_name,
os.path.join(torch._C._cuda_getLibPath(), lib_name),
os.path.join('/usr/local/cuda/lib64', lib_name),
os.path.join('/usr/local/cuda/lib', lib_name),
]
for path in lib_paths:
try:
return ctypes.cdll.LoadLibrary(path)
except OSError:
pass
raise RuntimeError("couldn't find libcudart. Make sure CUDA libraries "
"are installed in a default location, or that they're in " +
("DYLD_LIBRARY_PATH" if system == 'Darwin' else "LD_LIBRARY_PATH") +
".")


def _check_driver():
if not hasattr(torch._C, '_cuda_isDriverSufficient'):
raise AssertionError("Torch not compiled with CUDA enabled")
if not torch._C._cuda_isDriverSufficient():
Expand All @@ -33,14 +52,18 @@ def _lazy_init():
Alternatively, go to: https://pytorch.org/binaries to install
a PyTorch version that has been compiled with your version
of the CUDA driver.""".format(str(torch._C._cuda_getDriverVersion())))


def _lazy_init():
global _initialized, _cudart
if _initialized:
return
_check_driver()
assert torch._C._cuda_init()
_initialized = True
if platform.system() == 'Darwin':
_cudart = ctypes.cdll.LoadLibrary('libcudart.dylib')
else:
_cudart = ctypes.cdll.LoadLibrary('libcudart.so')
_cudart = _load_cudart()
_cudart.cudaGetErrorName.restype = ctypes.c_char_p
_cudart.cudaGetErrorString.restype = ctypes.c_char_p
_initialized = True


def cudart():
Expand Down

0 comments on commit ebc70f7

Please sign in to comment.