forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Refactor autograd package to separate Python dependencies. (pytorch#662)
The core autograd Variable, Function, and Engine no longer depend on the Python API. This let's us implement functions in C++. In the future, we can also multithread engine and release the GIL for most of the non-Python backwards.
- Loading branch information
Showing
44 changed files
with
2,970 additions
and
1,767 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,161 @@ | ||
#include "DynamicTypes.h" | ||
|
||
#include "THP.h" | ||
#include <vector> | ||
#include <unordered_map> | ||
#include <THPP/tensors/THTensor.hpp> | ||
#include <THPP/tensors/THSTensor.hpp> | ||
|
||
#ifdef WITH_CUDA | ||
#include <THC/THC.h> | ||
#include <THPP/tensors/THCTensor.hpp> | ||
extern THCState* state; | ||
#endif | ||
|
||
|
||
using namespace thpp; | ||
|
||
namespace torch { | ||
|
||
struct TensorType { | ||
Type data_type; | ||
bool is_cuda; | ||
bool is_sparse; | ||
|
||
friend bool operator==(const TensorType &t1, const TensorType &t2) | ||
{ | ||
return (t1.data_type == t2.data_type && | ||
t1.is_cuda == t2.is_cuda && | ||
t1.is_sparse == t2.is_sparse); | ||
} | ||
|
||
friend bool operator!=(const TensorType &t1, const TensorType &t2) | ||
{ | ||
return !(t1 == t2); | ||
} | ||
}; | ||
|
||
struct TensorTypeHasher | ||
{ | ||
std::size_t operator()(const TensorType& k) const | ||
{ | ||
size_t hash = static_cast<size_t>(k.data_type); | ||
hash = (hash << 8) + k.is_cuda; | ||
hash = (hash << 1) + k.is_sparse; | ||
return hash; | ||
} | ||
}; | ||
|
||
static std::unordered_map<std::string, Type> type_names = { | ||
{"Float", Type::FLOAT}, | ||
{"Double", Type::DOUBLE}, | ||
{"Half", Type::HALF}, | ||
{"Byte", Type::UCHAR}, | ||
{"Char", Type::CHAR}, | ||
{"Short", Type::SHORT}, | ||
{"Int", Type::INT}, | ||
{"Long", Type::LONG}, | ||
}; | ||
static std::unordered_map<PyTypeObject*, TensorType> pytype_to_tensortype; | ||
static std::unordered_map<TensorType, PyTypeObject*, TensorTypeHasher> tensortype_to_pytype; | ||
|
||
void registerPyTypeObject(PyTypeObject *pytype, const std::string& name, bool is_cuda, bool is_sparse) | ||
{ | ||
TensorType type; | ||
type.data_type = type_names.at(name); | ||
type.is_cuda = is_cuda; | ||
type.is_sparse = is_sparse; | ||
|
||
pytype_to_tensortype[pytype] = type; | ||
tensortype_to_pytype[type] = pytype; | ||
} | ||
|
||
PyTypeObject* getPyTypeObject(const thpp::Tensor& tensor) | ||
{ | ||
TensorType type; | ||
type.data_type = tensor.type(); | ||
type.is_cuda = tensor.isCuda(); | ||
type.is_sparse = tensor.isSparse(); | ||
|
||
return tensortype_to_pytype.at(type); | ||
} | ||
|
||
static std::unique_ptr<Tensor> createTensor(void *tensor, Type type, bool is_cuda, bool is_sparse) | ||
{ | ||
if (is_cuda) { | ||
#ifdef WITH_CUDA | ||
if (type == Type::UCHAR) { | ||
return std::unique_ptr<Tensor>(new THCTensor<unsigned char>(state, (THCudaByteTensor*)tensor)); | ||
} else if (type == Type::CHAR) { | ||
return std::unique_ptr<Tensor>(new THCTensor<char>(state, (THCudaCharTensor*)tensor)); | ||
} else if (type == Type::SHORT) { | ||
return std::unique_ptr<Tensor>(new THCTensor<short>(state, (THCudaShortTensor*)tensor)); | ||
} else if (type == Type::INT) { | ||
return std::unique_ptr<Tensor>(new THCTensor<int>(state, (THCudaIntTensor*)tensor)); | ||
} else if (type == Type::LONG) { | ||
return std::unique_ptr<Tensor>(new THCTensor<long>(state, (THCudaLongTensor*)tensor)); | ||
} else if (type == Type::FLOAT) { | ||
return std::unique_ptr<Tensor>(new THCTensor<float>(state, (THCudaTensor*)tensor)); | ||
} else if (type == Type::DOUBLE) { | ||
return std::unique_ptr<Tensor>(new THCTensor<double>(state, (THCudaDoubleTensor*)tensor)); | ||
} else if (type == Type::HALF) { | ||
return std::unique_ptr<Tensor>(new THCTensor<half>(state, (THCudaHalfTensor*)tensor)); | ||
} | ||
#else | ||
throw std::runtime_error("Compiled without CUDA support"); | ||
#endif | ||
} else if (is_sparse) { | ||
if (type == Type::UCHAR) { | ||
return std::unique_ptr<Tensor>(new THSTensor<unsigned char>((THSByteTensor*)tensor)); | ||
} else if (type == Type::CHAR) { | ||
return std::unique_ptr<Tensor>(new THSTensor<char>((THSCharTensor*)tensor)); | ||
} else if (type == Type::SHORT) { | ||
return std::unique_ptr<Tensor>(new THSTensor<short>((THSShortTensor*)tensor)); | ||
} else if (type == Type::INT) { | ||
return std::unique_ptr<Tensor>(new THSTensor<int>((THSIntTensor*)tensor)); | ||
} else if (type == Type::LONG) { | ||
return std::unique_ptr<Tensor>(new THSTensor<long>((THSLongTensor*)tensor)); | ||
} else if (type == Type::FLOAT) { | ||
return std::unique_ptr<Tensor>(new THSTensor<float>((THSFloatTensor*)tensor)); | ||
} else if (type == Type::DOUBLE) { | ||
return std::unique_ptr<Tensor>(new THSTensor<double>((THSDoubleTensor*)tensor)); | ||
} | ||
} else if (type == Type::UCHAR) { | ||
return std::unique_ptr<Tensor>(new THTensor<unsigned char>((THByteTensor*)tensor)); | ||
} else if (type == Type::CHAR) { | ||
return std::unique_ptr<Tensor>(new THTensor<char>((THCharTensor*)tensor)); | ||
} else if (type == Type::SHORT) { | ||
return std::unique_ptr<Tensor>(new THTensor<short>((THShortTensor*)tensor)); | ||
} else if (type == Type::INT) { | ||
return std::unique_ptr<Tensor>(new THTensor<int>((THIntTensor*)tensor)); | ||
} else if (type == Type::LONG) { | ||
return std::unique_ptr<Tensor>(new THTensor<long>((THLongTensor*)tensor)); | ||
} else if (type == Type::FLOAT) { | ||
return std::unique_ptr<Tensor>(new THTensor<float>((THFloatTensor*)tensor)); | ||
} else if (type == Type::DOUBLE) { | ||
return std::unique_ptr<Tensor>(new THTensor<double>((THDoubleTensor*)tensor)); | ||
} | ||
throw std::invalid_argument("Unsupported tensor type"); | ||
} | ||
|
||
std::unique_ptr<Tensor> createTensor(PyObject *data) | ||
{ | ||
auto tensor_type = pytype_to_tensortype.at(Py_TYPE(data)); | ||
auto type = tensor_type.data_type; | ||
auto tensor = ((THPVoidTensor *)data)->cdata; | ||
auto wrapper = createTensor(tensor, type, tensor_type.is_cuda, tensor_type.is_sparse); | ||
wrapper->retain(); | ||
return wrapper; | ||
} | ||
|
||
PyObject* createPyObject(const thpp::Tensor& tensor) | ||
{ | ||
auto type = getPyTypeObject(tensor); | ||
PyObject *obj = type->tp_alloc(type, 0); | ||
if (obj) { | ||
((THPVoidTensor*)obj)->cdata = (THVoidTensor *)const_cast<thpp::Tensor&>(tensor).retain().cdata(); | ||
} | ||
return obj; | ||
} | ||
|
||
} // namespace |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,25 @@ | ||
#pragma once | ||
|
||
// Provides conversions between Python tensor objects and thpp::Tensors. | ||
|
||
#include <memory> | ||
#include <Python.h> | ||
#include <THPP/THPP.h> | ||
|
||
namespace torch { | ||
|
||
// Register a PyTypeObject* with the given attributes | ||
void registerPyTypeObject( | ||
PyTypeObject *pytype, const std::string& name, | ||
bool is_cuda, bool is_sparse); | ||
|
||
// Gets the PyTypeObject* corresponding to the Tensor | ||
PyTypeObject* getPyTypeObject(const thpp::Tensor& tensor); | ||
|
||
// Creates a Tensor from a Python tensor object | ||
std::unique_ptr<thpp::Tensor> createTensor(PyObject *data); | ||
|
||
// Creates Python tensor object from a Tensor | ||
PyObject* createPyObject(const thpp::Tensor& tensor); | ||
|
||
} // namespace torch |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.