Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 10 additions & 10 deletions src/Native/LibTorchSharp/THSFFT.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ Tensor THSTensor_ifft2(const Tensor tensor, const int64_t* s, const int64_t* dim
CATCH_TENSOR(torch::fft::ifft2(*tensor, sArg, dArg, normArg));
}

Tensor THSTensor_fftn(const Tensor tensor, const int64_t* s, const int s_length, const int64_t* dim, const int dim_length, int8_t norm)
Tensor THSTensor_fftn(const Tensor tensor, const int64_t* s, const int32_t s_length, const int64_t* dim, const int32_t dim_length, int8_t norm)
{
auto normArg = (norm == 0) ? "backward" : (norm == 1) ? "forward" : "ortho";
auto sArg = (s == nullptr) ? c10::nullopt : c10::optional<c10::IntArrayRef>(c10::IntArrayRef(s, s_length));
Expand All @@ -45,7 +45,7 @@ Tensor THSTensor_fftn(const Tensor tensor, const int64_t* s, const int s_length,
CATCH_TENSOR(torch::fft::fftn(*tensor, sArg, dArg, normArg));
}

Tensor THSTensor_ifftn(const Tensor tensor, const int64_t* s, const int s_length, const int64_t* dim, const int dim_length, int8_t norm)
Tensor THSTensor_ifftn(const Tensor tensor, const int64_t* s, const int32_t s_length, const int64_t* dim, const int32_t dim_length, int8_t norm)
{
auto normArg = (norm == 0) ? "backward" : (norm == 1) ? "forward" : "ortho";
auto sArg = (s == nullptr) ? c10::nullopt : c10::optional<c10::IntArrayRef>(c10::IntArrayRef(s, s_length));
Expand All @@ -69,7 +69,7 @@ Tensor THSTensor_hfft2(const Tensor tensor, const int64_t* s, const int64_t* dim
CATCH_TENSOR(torch::fft::hfft2(*tensor, sArg, dArg, normArg));
}

Tensor THSTensor_hfftn(const Tensor tensor, const int64_t* s, const int s_length, const int64_t* dim, const int dim_length, int8_t norm)
Tensor THSTensor_hfftn(const Tensor tensor, const int64_t* s, const int32_t s_length, const int64_t* dim, const int32_t dim_length, int8_t norm)
{
auto normArg = (norm == 0) ? "backward" : (norm == 1) ? "forward" : "ortho";
auto sArg = (s == nullptr) ? c10::nullopt : c10::optional<c10::IntArrayRef>(c10::IntArrayRef(s, s_length));
Expand All @@ -93,7 +93,7 @@ Tensor THSTensor_ihfft2(const Tensor tensor, const int64_t* s, const int64_t* di
CATCH_TENSOR(torch::fft::ihfft2(*tensor, sArg, dArg, normArg));
}

Tensor THSTensor_ihfftn(const Tensor tensor, const int64_t* s, const int s_length, const int64_t* dim, const int dim_length, int8_t norm)
Tensor THSTensor_ihfftn(const Tensor tensor, const int64_t* s, const int32_t s_length, const int64_t* dim, const int32_t dim_length, int8_t norm)
{
auto normArg = (norm == 0) ? "backward" : (norm == 1) ? "forward" : "ortho";
auto sArg = (s == nullptr) ? c10::nullopt : c10::optional<c10::IntArrayRef>(c10::IntArrayRef(s, s_length));
Expand Down Expand Up @@ -134,7 +134,7 @@ Tensor THSTensor_irfft2(const Tensor tensor, const int64_t* s, const int64_t* di
CATCH_TENSOR(torch::fft::irfft2(*tensor, sArg, dArg, normArg));
}

Tensor THSTensor_rfftn(const Tensor tensor, const int64_t* s, const int s_length, const int64_t* dim, const int dim_length, int8_t norm)
Tensor THSTensor_rfftn(const Tensor tensor, const int64_t* s, const int32_t s_length, const int64_t* dim, const int32_t dim_length, int8_t norm)
{
auto normArg = (norm == 0) ? "backward" : (norm == 1) ? "forward" : "ortho";
auto sArg = (s == nullptr) ? c10::nullopt : c10::optional<c10::IntArrayRef>(c10::IntArrayRef(s, s_length));
Expand All @@ -143,7 +143,7 @@ Tensor THSTensor_rfftn(const Tensor tensor, const int64_t* s, const int s_length
CATCH_TENSOR(torch::fft::rfftn(*tensor, sArg, dArg, normArg));
}

Tensor THSTensor_irfftn(const Tensor tensor, const int64_t* s, const int s_length, const int64_t* dim, const int dim_length, int8_t norm)
Tensor THSTensor_irfftn(const Tensor tensor, const int64_t* s, const int32_t s_length, const int64_t* dim, const int32_t dim_length, int8_t norm)
{
auto normArg = (norm == 0) ? "backward" : (norm == 1) ? "forward" : "ortho";
auto sArg = (s == nullptr) ? c10::nullopt : c10::optional<c10::IntArrayRef>(c10::IntArrayRef(s, s_length));
Expand All @@ -152,7 +152,7 @@ Tensor THSTensor_irfftn(const Tensor tensor, const int64_t* s, const int s_lengt
CATCH_TENSOR(torch::fft::irfftn(*tensor, sArg, dArg, normArg));
}

Tensor THSTensor_fftfreq(const int64_t n, const double d, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad)
Tensor THSTensor_fftfreq(const int64_t n, const double d, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad)
{
auto options = at::TensorOptions()
.dtype(at::ScalarType(scalar_type))
Expand All @@ -162,7 +162,7 @@ Tensor THSTensor_fftfreq(const int64_t n, const double d, const int8_t scalar_ty
CATCH_TENSOR(d == 0.0 ? torch::fft::fftfreq(n, options) : torch::fft::fftfreq(n, d, options));
}

Tensor THSTensor_rfftfreq(const int64_t n, const double d, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad)
Tensor THSTensor_rfftfreq(const int64_t n, const double d, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad)
{
auto options = at::TensorOptions()
.dtype(at::ScalarType(scalar_type))
Expand All @@ -172,13 +172,13 @@ Tensor THSTensor_rfftfreq(const int64_t n, const double d, const int8_t scalar_t
CATCH_TENSOR(d == 0.0 ? torch::fft::rfftfreq(n, options) : torch::fft::rfftfreq(n, d, options));
}

Tensor THSTensor_fftshift(const Tensor tensor, const int64_t* dim, const int dim_length)
Tensor THSTensor_fftshift(const Tensor tensor, const int64_t* dim, const int32_t dim_length)
{
auto dArg = (dim == nullptr) ? c10::nullopt : c10::optional<c10::IntArrayRef>(c10::IntArrayRef(dim, dim_length));
CATCH_TENSOR(torch::fft::fftshift(*tensor, dArg));
}

Tensor THSTensor_ifftshift(const Tensor tensor, const int64_t* dim, const int dim_length)
Tensor THSTensor_ifftshift(const Tensor tensor, const int64_t* dim, const int32_t dim_length)
{
auto dArg = (dim == nullptr) ? c10::nullopt : c10::optional<c10::IntArrayRef>(c10::IntArrayRef(dim, dim_length));
CATCH_TENSOR(torch::fft::ifftshift(*tensor, dArg));
Expand Down
34 changes: 17 additions & 17 deletions src/Native/LibTorchSharp/THSJIT.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -405,12 +405,12 @@ TensorOrScalar* ReturnHelper(c10::IValue result, TensorOrScalar* (*allocator)(in
return nullptr;
}

c10::impl::GenericList toScalarValueList(const TensorOrScalar* tensorPtrs, const int length)
c10::impl::GenericList toScalarValueList(const TensorOrScalar* tensorPtrs, const int32_t length)
{
auto list = c10::impl::GenericList(c10::ScalarTypeType::get());

if (tensorPtrs != nullptr) {
for (int i = 0; i < length; i++)
for (int32_t i = 0; i < length; i++)
{
switch (tensorPtrs[i].TypeCode) {
case 1:
Expand All @@ -423,12 +423,12 @@ c10::impl::GenericList toScalarValueList(const TensorOrScalar* tensorPtrs, const
return list;
}

c10::impl::GenericList toTensorValueList(const TensorOrScalar* tensorPtrs, const int length)
c10::impl::GenericList toTensorValueList(const TensorOrScalar* tensorPtrs, const int32_t length)
{
auto list = c10::impl::GenericList(c10::TensorType::get());

if (tensorPtrs != nullptr) {
for (int i = 0; i < length; i++)
for (int32_t i = 0; i < length; i++)
{
switch (tensorPtrs[i].TypeCode) {
case 0:
Expand All @@ -441,7 +441,7 @@ c10::impl::GenericList toTensorValueList(const TensorOrScalar* tensorPtrs, const
return list;
}

std::vector<c10::IValue> toIValue(const TensorOrScalar* tensorPtrs, const int length)
std::vector<c10::IValue> toIValue(const TensorOrScalar* tensorPtrs, const int32_t length)
{
// TypeCode:
//
Expand All @@ -456,7 +456,7 @@ std::vector<c10::IValue> toIValue(const TensorOrScalar* tensorPtrs, const int le
std::vector<c10::IValue> tensors;

if (tensorPtrs != nullptr) {
for (int i = 0; i < length; i++)
for (int32_t i = 0; i < length; i++)
{
switch (tensorPtrs[i].TypeCode) {
case 0:
Expand Down Expand Up @@ -495,7 +495,7 @@ std::vector<c10::IValue> toIValue(const TensorOrScalar* tensorPtrs, const int le
return tensors;
}

void THSJIT_Module_forward(const JITModule module, const TensorOrScalar* tensorPtrs, const int length, TensorOrScalar* (*allocator)(int32_t idx, size_t length), int8_t* typeCode, int32_t idx)
void THSJIT_Module_forward(const JITModule module, const TensorOrScalar* tensorPtrs, const int32_t length, TensorOrScalar* (*allocator)(int32_t idx, size_t length), int8_t* typeCode, int32_t idx)
{
*typeCode = 0;

Expand All @@ -505,7 +505,7 @@ void THSJIT_Module_forward(const JITModule module, const TensorOrScalar* tensorP
)
}

void THSJIT_Module_invoke(const JITModule module, const char* name, const TensorOrScalar* tensorPtrs, const int length, TensorOrScalar* (*allocator)(int32_t idx, size_t length), int8_t* typeCode, int32_t idx)
void THSJIT_Module_invoke(const JITModule module, const char* name, const TensorOrScalar* tensorPtrs, const int32_t length, TensorOrScalar* (*allocator)(int32_t idx, size_t length), int8_t* typeCode, int32_t idx)
{
*typeCode = 0;

Expand All @@ -516,7 +516,7 @@ void THSJIT_Module_invoke(const JITModule module, const char* name, const Tensor
)
}

void THSJIT_CompilationUnit_Invoke(const JITCompilationUnit module, const char* method, const TensorOrScalar* tensorPtrs, const int length, TensorOrScalar* (*allocator)(int32_t idx, size_t length), int8_t* typeCode, int32_t idx)
void THSJIT_CompilationUnit_Invoke(const JITCompilationUnit module, const char* method, const TensorOrScalar* tensorPtrs, const int32_t length, TensorOrScalar* (*allocator)(int32_t idx, size_t length), int8_t* typeCode, int32_t idx)
{
*typeCode = 0;

Expand All @@ -538,19 +538,19 @@ const char* THSJIT_Method_name(const JITMethod method)
return make_sharable_string((*method)->name());
}

int THSJIT_Method_num_inputs(const JITMethod method)
int32_t THSJIT_Method_num_inputs(const JITMethod method)
{
return (int)(*method)->num_inputs();
return (int32_t)(*method)->num_inputs();
}

int THSJIT_Module_num_inputs(const JITModule module)
int32_t THSJIT_Module_num_inputs(const JITModule module)
{
return (int)(*module)->get_method("forward").num_inputs() - 1; // Don't count the 'self' argument.
return (int32_t)(*module)->get_method("forward").num_inputs() - 1; // Don't count the 'self' argument.
}

int THSJIT_Module_num_outputs(const JITModule module)
int32_t THSJIT_Module_num_outputs(const JITModule module)
{
return (int)(*module)->get_method("forward").function().getSchema().returns().size();
return (int32_t)(*module)->get_method("forward").function().getSchema().returns().size();
}

JITFunction THSJIT_Method_function(const JITMethod method)
Expand All @@ -567,9 +567,9 @@ void THSJIT_Method_dispose(const JITMethod method)
//-------------------------------------------------------------------------------------
// JITFunction

int THSJIT_Function_num_inputs(const JITFunction function)
int32_t THSJIT_Function_num_inputs(const JITFunction function)
{
return (int)(*function)->num_inputs();
return (int32_t)(*function)->num_inputs();
}

// TODO other function operations
Expand Down
12 changes: 6 additions & 6 deletions src/Native/LibTorchSharp/THSJIT.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,13 +35,13 @@ EXPORT_API(JITCompilationUnit) THSJIT_compile(const char* script);
EXPORT_API(void) THSJIT_Module_dispose(const JITModule module);
EXPORT_API(void) THSJIT_CompilationUnit_dispose(const JITCompilationUnit module);

EXPORT_API(int) THSJIT_Module_num_inputs(const JITModule method);
EXPORT_API(int) THSJIT_Module_num_outputs(const JITModule method);
EXPORT_API(int32_t) THSJIT_Module_num_inputs(const JITModule method);
EXPORT_API(int32_t) THSJIT_Module_num_outputs(const JITModule method);

EXPORT_API(void) THSJIT_Module_forward(const JITModule module, const TensorOrScalar* tensorPtrs, const int length, TensorOrScalar* (*allocator)(int32_t idx, size_t length), int8_t* typeCode, int32_t idx);
EXPORT_API(void) THSJIT_Module_invoke(const JITModule module, const char* name, const TensorOrScalar* tensorPtrs, const int length, TensorOrScalar* (*allocator)(int32_t idx, size_t length), int8_t* typeCode, int32_t idx);
EXPORT_API(void) THSJIT_Module_forward(const JITModule module, const TensorOrScalar* tensorPtrs, const int32_t length, TensorOrScalar* (*allocator)(int32_t idx, size_t length), int8_t* typeCode, int32_t idx);
EXPORT_API(void) THSJIT_Module_invoke(const JITModule module, const char* name, const TensorOrScalar* tensorPtrs, const int32_t length, TensorOrScalar* (*allocator)(int32_t idx, size_t length), int8_t* typeCode, int32_t idx);

EXPORT_API(void) THSJIT_CompilationUnit_Invoke(const JITCompilationUnit module, const char* method, const TensorOrScalar* tensorPtrs, const int length, TensorOrScalar* (*allocator)(int32_t idx, size_t length), int8_t* typeCode, int32_t idx);
EXPORT_API(void) THSJIT_CompilationUnit_Invoke(const JITCompilationUnit module, const char* method, const TensorOrScalar* tensorPtrs, const int32_t length, TensorOrScalar* (*allocator)(int32_t idx, size_t length), int8_t* typeCode, int32_t idx);

EXPORT_API(int) THSJIT_Module_is_training(JITModule module);
EXPORT_API(void) THSJIT_Module_zero_grad(const JITModule module, bool set_to_none);
Expand Down Expand Up @@ -89,7 +89,7 @@ EXPORT_API(void) THSJIT_Module_named_attributes(const JITModule module, bool rec

EXPORT_API(void) THSJIT_Module_set_attribute(const JITModule module, const char* name, Tensor tensor);

EXPORT_API(int) THSJIT_Method_num_inputs(const JITMethod method);
EXPORT_API(int32_t) THSJIT_Method_num_inputs(const JITMethod method);

EXPORT_API(void) THSJIT_Method_dispose(const JITMethod method);

Expand Down
26 changes: 13 additions & 13 deletions src/Native/LibTorchSharp/THSLinearAlgebra.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ Tensor THSLinalg_cholesky_ex(const Tensor tensor, bool check_errors, Tensor* inf
return ResultTensor(std::get<0>(res));
}

Tensor THSLinalg_cond_int(const Tensor tensor, const int p)
Tensor THSLinalg_cond_int(const Tensor tensor, const int32_t p)
{
CATCH_TENSOR(torch::linalg_cond(*tensor, p))
}
Expand Down Expand Up @@ -187,19 +187,19 @@ Tensor THSLinalg_ldl_solve(const Tensor LD, const Tensor pivots, const Tensor B,
CATCH_TENSOR(torch::linalg_ldl_solve(*LD, *pivots, *B, hermitian))
}

Tensor THSLinalg_matrix_norm(const Tensor tensor, const Scalar ord, const int64_t* dim, const int dim_length, const bool keepdim)
Tensor THSLinalg_matrix_norm(const Tensor tensor, const Scalar ord, const int64_t* dim, const int32_t dim_length, const bool keepdim)
{
auto dims = c10::ArrayRef<int64_t>(dim, dim_length);
CATCH_TENSOR(torch::linalg_matrix_norm(*tensor, *ord, dims, keepdim, c10::nullopt))
}

Tensor THSLinalg_matrix_norm_fronuc(const Tensor tensor, const int8_t fronuc, const int64_t* dim, const int dim_length, const bool keepdim)
Tensor THSLinalg_matrix_norm_fronuc(const Tensor tensor, const int8_t fronuc, const int64_t* dim, const int32_t dim_length, const bool keepdim)
{
auto dims = c10::ArrayRef<int64_t>(dim, dim_length);
CATCH_TENSOR(torch::linalg_matrix_norm(*tensor, (fronuc == 0) ? "fro" : "nuc", dims, keepdim, c10::nullopt))
}

Tensor THSLinalg_vector_norm(const Tensor tensor, const Scalar ord, const int64_t* dim, const int dim_length, const bool keepdim)
Tensor THSLinalg_vector_norm(const Tensor tensor, const Scalar ord, const int64_t* dim, const int32_t dim_length, const bool keepdim)
{
auto dims = c10::ArrayRef<int64_t>(dim, dim_length);
CATCH_TENSOR(torch::linalg_vector_norm(*tensor, *ord, dims, keepdim, c10::nullopt))
Expand All @@ -226,30 +226,30 @@ Tensor THSLinalg_matrix_power(const Tensor tensor, const int64_t n)
CATCH_TENSOR(torch::linalg_matrix_power(*tensor, n))
}

Tensor THSLinalg_multi_dot(const Tensor* tensors, const int length)
Tensor THSLinalg_multi_dot(const Tensor* tensors, const int32_t length)
{
CATCH_TENSOR(torch::linalg_multi_dot(toTensors<at::Tensor>((torch::Tensor**)tensors, length)))
}

Tensor THSLinalg_norm_str(const Tensor tensor, const char* p, const int64_t* dim, const int dim_length, const bool keepdim)
Tensor THSLinalg_norm_str(const Tensor tensor, const char* p, const int64_t* dim, const int32_t dim_length, const bool keepdim)
{
c10::optional<at::IntArrayRef> dims = (dim == nullptr) ? c10::nullopt : c10::optional<at::IntArrayRef>(at::ArrayRef<int64_t>(dim, dim_length));
CATCH_TENSOR(torch::linalg_norm(*tensor, p, dims, keepdim, c10::nullopt))
}

Tensor THSLinalg_norm_float(const Tensor tensor, const double p, const int64_t* dim, const int dim_length, const bool keepdim)
Tensor THSLinalg_norm_float(const Tensor tensor, const double p, const int64_t* dim, const int32_t dim_length, const bool keepdim)
{
c10::optional<at::IntArrayRef> dims = (dim == nullptr) ? c10::nullopt : c10::optional<at::IntArrayRef>(at::ArrayRef<int64_t>(dim, dim_length));
CATCH_TENSOR(torch::linalg_norm(*tensor, p, dims, keepdim, c10::nullopt))
}

Tensor THSLinalg_norm_int(const Tensor tensor, const int p, const int64_t* dim, const int dim_length, const bool keepdim)
Tensor THSLinalg_norm_int(const Tensor tensor, const int32_t p, const int64_t* dim, const int32_t dim_length, const bool keepdim)
{
c10::optional<at::IntArrayRef> dims = (dim == nullptr) ? c10::nullopt : c10::optional<at::IntArrayRef>(at::ArrayRef<int64_t>(dim, dim_length));
CATCH_TENSOR(torch::linalg_norm(*tensor, p, dims, keepdim, c10::nullopt))
}

Tensor THSLinalg_norm_opt(const Tensor tensor, const int64_t* dim, const int dim_length, const bool keepdim)
Tensor THSLinalg_norm_opt(const Tensor tensor, const int64_t* dim, const int32_t dim_length, const bool keepdim)
{
c10::optional<at::IntArrayRef> dims = (dim == nullptr) ? c10::nullopt : c10::optional<at::IntArrayRef>(at::ArrayRef<int64_t>(dim, dim_length));
CATCH_TENSOR(torch::linalg_norm(*tensor, c10::nullopt, dims, keepdim, c10::nullopt))
Expand All @@ -276,7 +276,7 @@ Tensor THSLinalg_pinverse(const Tensor tensor, const double rcond, const bool he
CATCH_TENSOR(torch::linalg_pinv(*tensor, rcond, hermitian))
}

Tensor THSLinalg_qr(const Tensor tensor, const char mode, Tensor* R)
Tensor THSLinalg_qr(const Tensor tensor, const uint8_t mode, Tensor* R)
{
std::tuple<at::Tensor, at::Tensor> res;
if (mode == 0) {
Expand Down Expand Up @@ -335,7 +335,7 @@ Tensor THSLinalg_tensorinv(const Tensor tensor, const int64_t ind)
CATCH_TENSOR(torch::linalg_tensorinv(*tensor, ind))
}

Tensor THSLinalg_tensorsolve(const Tensor tensor, Tensor other, const int64_t* dim, const int dim_length)
Tensor THSLinalg_tensorsolve(const Tensor tensor, Tensor other, const int64_t* dim, const int32_t dim_length)
{
c10::optional<at::IntArrayRef> dims = (dim == nullptr) ? c10::nullopt : c10::optional<at::IntArrayRef>(at::ArrayRef<int64_t>(dim, dim_length));
CATCH_TENSOR(torch::linalg_tensorsolve(*tensor, *other, dims))
Expand Down Expand Up @@ -467,9 +467,9 @@ Tensor THSLinalg_tensordot(
const Tensor input1,
const Tensor input2,
const int64_t* dims1,
const int dims1_length,
const int32_t dims1_length,
const int64_t* dims2,
const int dims2_length)
const int32_t dims2_length)
{
auto d1 = c10::ArrayRef<int64_t>(dims1, dims1_length);
auto d2 = c10::ArrayRef<int64_t>(dims2, dims2_length);
Expand Down
4 changes: 2 additions & 2 deletions src/Native/LibTorchSharp/THSModule.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -161,12 +161,12 @@ void THSNN_Module_get_named_modules(const NNModule module, NNModule* (*allocator
}
}

long THSNN_Module_children_size(const NNModule module)
size_t THSNN_Module_children_size(const NNModule module)
{
return (*module)->children().size();
}

NNModule THSNN_Module_child(const NNModule module, const int index)
NNModule THSNN_Module_child(const NNModule module, const int32_t index)
{
return new std::shared_ptr<torch::nn::Module>((*module)->children()[index]);
}
Expand Down
Loading