Skip to content

Commit

Permalink
Deprecate tensor.data<T>(), and codemod tensor.data<T>() to tensor.da…
Browse files Browse the repository at this point in the history
…ta_ptr<T>() (pytorch#24886)

Summary:
This PR adds deprecation message for `tensor.data<T>()` (pytorch@91d94e7), and changes all call sites of `tensor.data<T>()` to `tensor.data_ptr<T>()`  in PyTorch core.
Pull Request resolved: pytorch#24886

Differential Revision: D16924576

Pulled By: yf225

fbshipit-source-id: 0943d6be73245c7c549c78597b74c3b07fa24440
  • Loading branch information
Will Feng authored and facebook-github-bot committed Aug 22, 2019
1 parent aa66146 commit 420b37f
Show file tree
Hide file tree
Showing 135 changed files with 989 additions and 987 deletions.
4 changes: 2 additions & 2 deletions aten/src/ATen/CPUApplyUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ struct strided_tensor_iter_fixed {
void operator=(strided_tensor_iter_fixed const& x) = delete;
strided_tensor_iter_fixed(strided_tensor_iter_fixed&&) = default;
strided_tensor_iter_fixed(Tensor& tensor, bool sort_strides = false)
: data_(tensor.data<T>()) {
: data_(tensor.data_ptr<T>()) {
std::memset(counter_, 0, sizeof(int64_t) * N);
if (tensor.dim() > 0) {
std::memcpy(
Expand Down Expand Up @@ -183,7 +183,7 @@ struct strided_tensor_iter {
void operator=(strided_tensor_iter const& x) = delete;
strided_tensor_iter(strided_tensor_iter&&) = default;
strided_tensor_iter(Tensor& tensor)
: data_(tensor.data<T>()),
: data_(tensor.data_ptr<T>()),
dim_(tensor.ndimension()),
counter_(dim_, 0),
sizes_(tensor.sizes().vec()),
Expand Down
8 changes: 4 additions & 4 deletions aten/src/ATen/core/Formatting.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ static std::tuple<double, int64_t> __printFormat(std::ostream& stream, const Ten
return std::make_tuple(1., 0);
}
bool intMode = true;
auto self_p = self.data<double>();
auto self_p = self.data_ptr<double>();
for(int64_t i = 0; i < size; i++) {
auto z = self_p[i];
if(std::isfinite(z)) {
Expand Down Expand Up @@ -165,7 +165,7 @@ static void __printMatrix(std::ostream& stream, const Tensor& self, int64_t line
}
for(int64_t l = 0; l < self.size(0); l++) {
Tensor row = self.select(0,l);
double *row_ptr = row.data<double>();
double *row_ptr = row.data_ptr<double>();
for(int64_t c = firstColumn; c < lastColumn+1; c++) {
stream << std::setw(sz) << row_ptr[c]/scale;
if(c == lastColumn) {
Expand Down Expand Up @@ -245,7 +245,7 @@ std::ostream& print(std::ostream& stream, const Tensor & tensor_, int64_t linesi
tensor = tensor_.to(kCPU, kDouble).contiguous();
}
if(tensor.ndimension() == 0) {
stream << defaultfloat << tensor.data<double>()[0] << std::endl;
stream << defaultfloat << tensor.data_ptr<double>()[0] << std::endl;
stream << "[ " << tensor_.toString() << "{}";
} else if(tensor.ndimension() == 1) {
if (tensor.numel() > 0) {
Expand All @@ -255,7 +255,7 @@ std::ostream& print(std::ostream& stream, const Tensor & tensor_, int64_t linesi
if(scale != 1) {
printScale(stream, scale);
}
double* tensor_p = tensor.data<double>();
double* tensor_p = tensor.data_ptr<double>();
for(int64_t i = 0; i < tensor.size(0); i++) {
stream << std::setw(sz) << tensor_p[i]/scale << std::endl;
}
Expand Down
9 changes: 5 additions & 4 deletions aten/src/ATen/core/Tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -289,6 +289,7 @@ class CAFFE2_API Tensor {

template<typename T>
T * data() const {
TORCH_WARN("Tensor.data<T>() is deprecated. Please use Tensor.data_ptr<T>() instead.");
return data_ptr<T>();
}

Expand All @@ -302,9 +303,9 @@ class CAFFE2_API Tensor {
// dimension.
template<typename T, size_t N>
TensorAccessor<T,N> accessor() const& {
static_assert(N > 0, "accessor is used for indexing tensor, for scalars use *data<T>()");
static_assert(N > 0, "accessor is used for indexing tensor, for scalars use *data_ptr<T>()");
TORCH_CHECK(dim() == N, "expected ", N, " dims but tensor has ", dim());
return TensorAccessor<T,N>(data<T>(),sizes().data(),strides().data());
return TensorAccessor<T,N>(data_ptr<T>(),sizes().data(),strides().data());
}
template<typename T, size_t N>
TensorAccessor<T,N> accessor() && = delete;
Expand All @@ -316,9 +317,9 @@ class CAFFE2_API Tensor {
// as an argument.
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
PackedTensorAccessor<T,N,PtrTraits,index_t> packed_accessor() const& {
static_assert(N > 0, "accessor is used for indexing tensor, for scalars use *data<T>()");
static_assert(N > 0, "accessor is used for indexing tensor, for scalars use *data_ptr<T>()");
TORCH_CHECK(dim() == N, "expected ", N, " dims but tensor has ", dim());
return PackedTensorAccessor<T,N,PtrTraits,index_t>(static_cast<typename PtrTraits<T>::PtrType>(data<T>()),sizes().data(),strides().data());
return PackedTensorAccessor<T,N,PtrTraits,index_t>(static_cast<typename PtrTraits<T>::PtrType>(data_ptr<T>()),sizes().data(),strides().data());
}
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
PackedTensorAccessor<T,N> packed_accessor() && = delete;
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/cuda/detail/IndexUtils.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ getTensorInfo(const at::Tensor& t) {
}

return TensorInfo<scalar, IndexType>(
t.data<scalar>(), dims, sz, st);
t.data_ptr<scalar>(), dims, sz, st);
}

} // detail
Expand Down
32 changes: 16 additions & 16 deletions aten/src/ATen/native/Activation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -93,9 +93,9 @@ void inline prelu_cpu_kernel_share_weights(
const Tensor& weight) {

int64_t input_numel = input.numel();
auto result_data = result.data<scalar_t>();
auto input_data = input.data<scalar_t>();
auto weight_val = weight.data<scalar_t>()[0];
auto result_data = result.data_ptr<scalar_t>();
auto input_data = input.data_ptr<scalar_t>();
auto weight_val = weight.data_ptr<scalar_t>()[0];

at::parallel_for(0, input_numel, 1000, [&](int64_t start, int64_t end) {
for (auto i = start; i < end; i++) {
Expand All @@ -117,9 +117,9 @@ void inline prelu_cpu_kernel_multi_weights(
int64_t input_stride0,
int64_t input_stride1) {

scalar_t* result_data = result.data<scalar_t>();
scalar_t* input_data = input.data<scalar_t>();
scalar_t* weight_data = weight.data<scalar_t>();
scalar_t* result_data = result.data_ptr<scalar_t>();
scalar_t* input_data = input.data_ptr<scalar_t>();
scalar_t* weight_data = weight.data_ptr<scalar_t>();

auto loop = [&](int64_t start, int64_t end) {
for (auto i = start; i < end; ++i) {
Expand Down Expand Up @@ -204,11 +204,11 @@ void inline prelu_cpu_backward_kernel_share_weights(
Tensor& weight_grad) {

int64_t input_numel = input.numel();
auto input_data = input.data<scalar_t>();
auto weight_val = weight.data<scalar_t>()[0];
auto grad_out_data = grad_out.data<scalar_t>();
auto input_grad_data = input_grad.data<scalar_t>();
auto weight_grad_data = weight_grad.data<scalar_t>();
auto input_data = input.data_ptr<scalar_t>();
auto weight_val = weight.data_ptr<scalar_t>()[0];
auto grad_out_data = grad_out.data_ptr<scalar_t>();
auto input_grad_data = input_grad.data_ptr<scalar_t>();
auto weight_grad_data = weight_grad.data_ptr<scalar_t>();

scalar_t sum = at::parallel_reduce(0, input_numel, 1000, scalar_t(0),
[&](int64_t start, int64_t end, scalar_t ident) -> scalar_t {
Expand Down Expand Up @@ -240,11 +240,11 @@ void inline prelu_cpu_backward_kernel_multi_weights(
int64_t input_stride0,
int64_t input_stride1) {

auto input_data = input.data<scalar_t>();
auto weight_data = weight.data<scalar_t>();
auto grad_out_data = grad_out.data<scalar_t>();
auto input_grad_data = input_grad.data<scalar_t>();
auto weight_grad_collector_data = weight_grad_collector.data<scalar_t>();
auto input_data = input.data_ptr<scalar_t>();
auto weight_data = weight.data_ptr<scalar_t>();
auto grad_out_data = grad_out.data_ptr<scalar_t>();
auto input_grad_data = input_grad.data_ptr<scalar_t>();
auto weight_grad_collector_data = weight_grad_collector.data_ptr<scalar_t>();

auto loop = [&](int64_t start, int64_t end) {
for (auto i = start; i < end; i++) {
Expand Down
16 changes: 8 additions & 8 deletions aten/src/ATen/native/AdaptiveAveragePooling.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -137,8 +137,8 @@ namespace {
output.resize_({1, sizeD, osizeH, osizeW});
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "adaptive_avg_pool2d_cpu", [&] {
auto input_data = input.data<scalar_t>();
auto output_data = output.data<scalar_t>();
auto input_data = input.data_ptr<scalar_t>();
auto output_data = output.data_ptr<scalar_t>();
adaptive_avg_pool2d_single_out_frame<scalar_t>(
input_data,
output_data,
Expand All @@ -157,8 +157,8 @@ namespace {
int64_t istrideB = input.stride(-4);

AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "adaptive_avg_pool2d_cpu", [&] {
auto input_data = input.data<scalar_t>();
auto output_data = output.data<scalar_t>();
auto input_data = input.data_ptr<scalar_t>();
auto output_data = output.data_ptr<scalar_t>();
adaptive_avg_pool2d_out_frame<scalar_t>(
input_data,
output_data,
Expand Down Expand Up @@ -268,8 +268,8 @@ namespace {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "adaptive_avg_pool2d_backward_cpu", [&] {
/* get raw pointers */
scalar_t *gradInput_data = gradInput.data<scalar_t>();
scalar_t *gradOutput_data = gradOutput.data<scalar_t>();
scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>();
scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>();

adaptive_avg_pool2d_backward_single_out_frame<scalar_t>(
gradInput_data, gradOutput_data,
Expand All @@ -284,8 +284,8 @@ namespace {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "adaptive_avg_pool2d_backward_cpu", [&] {
/* get raw pointers */
scalar_t *gradInput_data = gradInput.data<scalar_t>();
scalar_t *gradOutput_data = gradOutput.data<scalar_t>();
scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>();
scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>();
int64_t sizeB = input.size(-4);

adaptive_avg_pool2d_backward_out_frame<scalar_t>(
Expand Down
16 changes: 8 additions & 8 deletions aten/src/ATen/native/AdaptiveAveragePooling3d.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -116,8 +116,8 @@ void adaptive_avg_pool3d_out_cpu_template(

AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "adaptive_avg_pool3d_cpu", [&] {
auto input_data = input.data<scalar_t>();
auto output_data = output.data<scalar_t>();
auto input_data = input.data_ptr<scalar_t>();
auto output_data = output.data_ptr<scalar_t>();
adaptive_avg_pool3d_out_frame<scalar_t>(
input_data,
output_data,
Expand All @@ -140,8 +140,8 @@ void adaptive_avg_pool3d_out_cpu_template(
for (b = 0; b < input.size(0); b++) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "adaptive_avg_pool3d_cpu", [&] {
auto input_data = input.data<scalar_t>();
auto output_data = output.data<scalar_t>();
auto input_data = input.data_ptr<scalar_t>();
auto output_data = output.data_ptr<scalar_t>();
adaptive_avg_pool3d_out_frame<scalar_t>(
input_data + b * input.stride(0),
output_data + b * sizeD * osizeT * osizeH * osizeW,
Expand Down Expand Up @@ -236,8 +236,8 @@ Tensor& adaptive_avg_pool3d_backward_out_cpu_template(
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "adaptive_avg_pool3d_backward_cpu", [&] {
/* get raw pointers */
scalar_t* gradInput_data = gradInput.data<scalar_t>();
scalar_t* gradOutput_data = gradOutput.data<scalar_t>();
scalar_t* gradInput_data = gradInput.data_ptr<scalar_t>();
scalar_t* gradOutput_data = gradOutput.data_ptr<scalar_t>();

adaptive_avg_pool3d_backward_out_frame<scalar_t>(
gradInput_data,
Expand All @@ -257,8 +257,8 @@ Tensor& adaptive_avg_pool3d_backward_out_cpu_template(
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "adaptive_avg_pool3d_backward_cpu", [&] {
/* get raw pointers */
scalar_t* gradInput_data = gradInput.data<scalar_t>();
scalar_t* gradOutput_data = gradOutput.data<scalar_t>();
scalar_t* gradInput_data = gradInput.data_ptr<scalar_t>();
scalar_t* gradOutput_data = gradOutput.data_ptr<scalar_t>();
adaptive_avg_pool3d_backward_out_frame<scalar_t>(
gradInput_data + b * sizeD * isizeT * isizeH * isizeW,
gradOutput_data + b * sizeD * osizeT * osizeH * osizeW,
Expand Down
24 changes: 12 additions & 12 deletions aten/src/ATen/native/AdaptiveMaxPooling2d.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -174,9 +174,9 @@ void adaptive_max_pool2d_out_cpu_template(
indices.resize_({sizeD, osizeH, osizeW});

AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "adaptive_max_pool2d_cpu", [&] {
auto input_data = input.data<scalar_t>();
auto output_data = output.data<scalar_t>();
auto indices_data = indices.data<int64_t>();
auto input_data = input.data_ptr<scalar_t>();
auto output_data = output.data_ptr<scalar_t>();
auto indices_data = indices.data_ptr<int64_t>();

adaptive_max_pool2d_single_out_frame<scalar_t>(input_data, output_data,
indices_data,
Expand All @@ -195,9 +195,9 @@ void adaptive_max_pool2d_out_cpu_template(
indices.resize_({sizeB, sizeD, osizeH, osizeW});

AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "adaptive_max_pool2d_cpu", [&] {
auto input_data = input.data<scalar_t>();
auto output_data = output.data<scalar_t>();
auto indices_data = indices.data<int64_t>();
auto input_data = input.data_ptr<scalar_t>();
auto output_data = output.data_ptr<scalar_t>();
auto indices_data = indices.data_ptr<int64_t>();

adaptive_max_pool2d_out_frame<scalar_t>(input_data, output_data,
indices_data,
Expand Down Expand Up @@ -309,9 +309,9 @@ Tensor& adaptive_max_pool2d_backward_out_cpu_template(
{
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "adaptive_max_pool2d_backward", [&] {
/* get raw pointers */
scalar_t *gradInput_data = gradInput.data<scalar_t>();
scalar_t *gradOutput_data = gradOutput.data<scalar_t>();
int64_t *indices_data = indices.data<int64_t>();
scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>();
scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>();
int64_t *indices_data = indices.data_ptr<int64_t>();

adaptive_max_pool2d_backward_single_out_frame<scalar_t>(gradInput_data,
gradOutput_data,
Expand All @@ -326,9 +326,9 @@ Tensor& adaptive_max_pool2d_backward_out_cpu_template(
{
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "adaptive_max_pool2d_backward", [&] {
/* get raw pointers */
scalar_t *gradInput_data = gradInput.data<scalar_t>();
scalar_t *gradOutput_data = gradOutput.data<scalar_t>();
int64_t *indices_data = indices.data<int64_t>();
scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>();
scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>();
int64_t *indices_data = indices.data_ptr<int64_t>();

adaptive_max_pool2d_backward_out_frame<scalar_t>(gradInput_data,
gradOutput_data,
Expand Down
24 changes: 12 additions & 12 deletions aten/src/ATen/native/AdaptiveMaxPooling3d.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -201,9 +201,9 @@ void adaptive_max_pool3d_out_cpu_template(
indices.resize_({sizeD, osizeT, osizeH, osizeW});

AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "adaptive_max_pool3d_cpu", [&] {
auto input_data = input.data<scalar_t>();
auto output_data = output.data<scalar_t>();
auto indices_data = indices.data<int64_t>();
auto input_data = input.data_ptr<scalar_t>();
auto output_data = output.data_ptr<scalar_t>();
auto indices_data = indices.data_ptr<int64_t>();

adaptive_max_pool3d_single_out_frame<scalar_t>(input_data, output_data,
indices_data,
Expand All @@ -222,9 +222,9 @@ void adaptive_max_pool3d_out_cpu_template(
indices.resize_({sizeB, sizeD, osizeT, osizeH, osizeW});

AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "adaptive_max_pool3d_cpu", [&] {
auto input_data = input.data<scalar_t>();
auto output_data = output.data<scalar_t>();
auto indices_data = indices.data<int64_t>();
auto input_data = input.data_ptr<scalar_t>();
auto output_data = output.data_ptr<scalar_t>();
auto indices_data = indices.data_ptr<int64_t>();

adaptive_max_pool3d_out_frame<scalar_t>(input_data, output_data,
indices_data,
Expand Down Expand Up @@ -356,9 +356,9 @@ Tensor& adaptive_max_pool3d_backward_out_cpu_template(
"adaptive_max_pool3d_backward",
[&] {
/* get raw pointers */
scalar_t *gradInput_data = gradInput.data<scalar_t>();
scalar_t *gradOutput_data = gradOutput.data<scalar_t>();
int64_t *indices_data = indices.data<int64_t>();
scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>();
scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>();
int64_t *indices_data = indices.data_ptr<int64_t>();

adaptive_max_pool3d_backward_single_out_frame<scalar_t>(gradInput_data, gradOutput_data,
indices_data,
Expand All @@ -374,9 +374,9 @@ Tensor& adaptive_max_pool3d_backward_out_cpu_template(
"adaptive_max_pool3d_backward",
[&] {
/* get raw pointers */
scalar_t *gradInput_data = gradInput.data<scalar_t>();
scalar_t *gradOutput_data = gradOutput.data<scalar_t>();
int64_t *indices_data = indices.data<int64_t>();
scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>();
scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>();
int64_t *indices_data = indices.data_ptr<int64_t>();

adaptive_max_pool3d_backward_out_frame<scalar_t>(gradInput_data, gradOutput_data,
indices_data,
Expand Down
8 changes: 4 additions & 4 deletions aten/src/ATen/native/AveragePool2d.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -150,8 +150,8 @@ void avg_pool2d_out_cpu_template(
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Long, input.scalar_type(),
"avg_pool2d_out_frame",
[&] {
scalar_t *input_data = input.data<scalar_t>();
scalar_t *output_data = output.data<scalar_t>();
scalar_t *input_data = input.data_ptr<scalar_t>();
scalar_t *output_data = output.data_ptr<scalar_t>();

avg_pool2d_out_frame(
input_data,
Expand Down Expand Up @@ -307,8 +307,8 @@ Tensor& avg_pool2d_backward_out_cpu_template(
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Long, input.scalar_type(),
"avg_pool2d_backward_out_frame",
[&] {
scalar_t *gradInput_data = gradInput.data<scalar_t>();
scalar_t *gradOutput_data = gradOutput.data<scalar_t>();
scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>();
scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>();

avg_pool2d_backward_out_frame(
gradInput_data,
Expand Down
Loading

0 comments on commit 420b37f

Please sign in to comment.