Skip to content

Commit e12a905

Browse files
authored
Remove the declaration of using Tensor in framework/tensor.h (#46432)
* remove needless using tensor * remove needless using tensor * resolve conflict * replace tensor using * fix format error * revert needless changing * fix rocm and npu compile error * fix cinn compile error * fix format error * fix mkldnn format error * fix mkldnn format error * fix cinn compile error * fix cinn compile error * fix cinn compile error * resolve conflict
1 parent 6a706e6 commit e12a905

File tree

1,025 files changed

+8034
-7749
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,025 files changed

+8034
-7749
lines changed

paddle/fluid/distributed/ps/service/brpc_utils.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -282,7 +282,7 @@ void DeserializeSelectedRows(
282282
const platform::DeviceContext& ctx) {
283283
const auto place = ctx.GetPlace();
284284
auto* slr = var->GetMutable<phi::SelectedRows>();
285-
framework::Tensor* tensor = slr->mutable_value();
285+
phi::DenseTensor* tensor = slr->mutable_value();
286286
slr->set_height(msg.slr_height());
287287
std::vector<int64_t> tmp_rows(msg.dims()[0]);
288288
memcpy(tmp_rows.data(), msg.data().data(), msg.dims()[0] * sizeof(int64_t));

paddle/fluid/eager/eager_tensor.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -248,7 +248,7 @@ class EagerVariable final {
248248
// Construct allocation only once.
249249
if (var_.IsInitialized()) {
250250
if (var_.IsType<paddle::framework::LoDTensor>() ||
251-
var_.IsType<paddle::framework::Tensor>()) {
251+
var_.IsType<phi::DenseTensor>()) {
252252
return SetImplWithLegacyTensor<phi::DenseTensor>();
253253
} else if (var_.IsType<phi::SelectedRows>()) {
254254
return SetImplWithLegacyTensor<phi::SelectedRows>();
@@ -286,7 +286,7 @@ class EagerVariable final {
286286
template <typename VarType>
287287
void ConstructVariableFromTensor(const paddle::experimental::Tensor& tensor) {
288288
auto* framework_tensor = var_.GetMutable<VarType>();
289-
// Contruct framework::Tensor from egr::EagerVariable
289+
// Contruct phi::DenseTensor from egr::EagerVariable
290290
auto tensor_dense = std::dynamic_pointer_cast<VarType>(tensor.impl());
291291
PADDLE_ENFORCE_EQ(
292292
(tensor_dense.get() && tensor_dense),
@@ -303,7 +303,7 @@ class EagerVariable final {
303303
void ConstructVariableFromCompatTensor(
304304
const paddle::experimental::Tensor& tensor) {
305305
auto* framework_holder = var_.GetMutable<VarType>();
306-
// Contruct framework::Tensor from egr::EagerVariable
306+
// Contruct phi::DenseTensor from egr::EagerVariable
307307
auto* compat_tensor =
308308
static_cast<VariableCompatTensor*>(tensor.impl().get());
309309
PADDLE_ENFORCE_NOT_NULL(compat_tensor,

paddle/fluid/framework/attribute_checker.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -246,7 +246,7 @@ class TypedAttrChecker {
246246
true,
247247
platform::errors::InvalidArgument(
248248
"Found Attribute('%s') with type(Variable), but it "
249-
"doesn't support Tensor type.",
249+
"doesn't support phi::DenseTensor type.",
250250
attr_name_));
251251

252252
VLOG(1) << "Found Attribute " << attr_name_ << " with type(Variable).";

paddle/fluid/framework/copy_same_tensor_test.cc

+3-3
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ static bool CopySameTensorTestMain(const DDim &dims,
4646
FLAGS_use_system_allocator = true; // force to use system allocator
4747

4848
// Step 1: create a cpu tensor and initialize it with random value;
49-
Tensor src_cpu_tensor;
49+
phi::DenseTensor src_cpu_tensor;
5050
{
5151
src_cpu_tensor.Resize(dims);
5252
auto *src_ptr_cpu = src_cpu_tensor.mutable_data<T>(platform::CPUPlace());
@@ -60,9 +60,9 @@ static bool CopySameTensorTestMain(const DDim &dims,
6060
}
6161

6262
// Step 2: copy the source tensor to dst place
63-
Tensor dst_cpu_tensor;
63+
phi::DenseTensor dst_cpu_tensor;
6464
{
65-
Tensor src_tensor;
65+
phi::DenseTensor src_tensor;
6666
TensorCopySync(src_cpu_tensor, src_place, &src_tensor);
6767

6868
// The source tensor and dst_tensor is the same

paddle/fluid/framework/custom_operator.cc

+8-8
Original file line numberDiff line numberDiff line change
@@ -133,8 +133,8 @@ static void RunKernelFunc(const framework::ExecutionContext& ctx,
133133
for (auto& in_name : inputs) {
134134
VLOG(3) << "Custom Operator: input name - " << in_name;
135135
if (detail::IsDuplicableVar(in_name)) {
136-
// return const std::vector<const Tensor*>
137-
auto vec_x = ctx.MultiInput<Tensor>(in_name);
136+
// return const std::vector<const phi::DenseTensor*>
137+
auto vec_x = ctx.MultiInput<phi::DenseTensor>(in_name);
138138
PADDLE_ENFORCE_NE(vec_x.empty(),
139139
true,
140140
platform::errors::NotFound(
@@ -161,7 +161,7 @@ static void RunKernelFunc(const framework::ExecutionContext& ctx,
161161
}
162162
kernel_ctx.EmplaceBackInputs(std::move(custom_vec_in));
163163
} else {
164-
auto* x = ctx.Input<Tensor>(in_name);
164+
auto* x = ctx.Input<phi::DenseTensor>(in_name);
165165
PADDLE_ENFORCE_NOT_NULL(
166166
x,
167167
platform::errors::NotFound("Input tensor (%s) is nullptr.", in_name));
@@ -222,7 +222,7 @@ static void RunKernelFunc(const framework::ExecutionContext& ctx,
222222

223223
VLOG(3) << "Custom Operator: push outputs into CustomOpKernelContext.";
224224
// cache the target tensor pointers
225-
std::vector<Tensor*> true_out_ptrs;
225+
std::vector<phi::DenseTensor*> true_out_ptrs;
226226
for (size_t i = 0; i < outputs.size(); ++i) {
227227
auto out_name = outputs[i];
228228
if (detail::IsDuplicableVar(out_name)) {
@@ -231,7 +231,7 @@ static void RunKernelFunc(const framework::ExecutionContext& ctx,
231231
"If custom operator's outputs contains `paddle::Vec("
232232
")` type, "
233233
"it only can hold one output."));
234-
auto vec_out = ctx.MultiOutput<Tensor>(out_name);
234+
auto vec_out = ctx.MultiOutput<phi::DenseTensor>(out_name);
235235
PADDLE_ENFORCE_NE(vec_out.empty(),
236236
true,
237237
platform::errors::NotFound(
@@ -253,7 +253,7 @@ static void RunKernelFunc(const framework::ExecutionContext& ctx,
253253
}
254254
kernel_ctx.EmplaceBackOutputs(std::move(custom_vec_out));
255255
} else {
256-
auto* out = ctx.Output<Tensor>(out_name);
256+
auto* out = ctx.Output<phi::DenseTensor>(out_name);
257257
PADDLE_ENFORCE_NOT_NULL(out,
258258
platform::errors::NotFound(
259259
"Output tensor (%s) is nullptr.", out_name));
@@ -431,7 +431,7 @@ class CustomOperator : public OperatorWithKernel {
431431
*/
432432
framework::OpKernelType GetKernelTypeForVar(
433433
const std::string& var_name,
434-
const Tensor& tensor,
434+
const phi::DenseTensor& tensor,
435435
const OpKernelType& expected_kernel_type) const override {
436436
return OpKernelType(expected_kernel_type.data_type_,
437437
expected_kernel_type.place_,
@@ -511,7 +511,7 @@ class CustomOpMaker : public OpProtoAndCheckerMaker {
511511
AddComment(R"DOC(
512512
Custom Operator.
513513
514-
According to the Tensor operation function implemented by the user
514+
According to the phi::DenseTensor operation function implemented by the user
515515
independently of the framework, it is encapsulated into a framework
516516
operator to adapt to various execution scenarios such as dynamic graph,
517517
mode static graph mode, and inference mode.

paddle/fluid/framework/data_device_transform.cc

+2-2
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,9 @@ limitations under the License. */
1616
namespace paddle {
1717
namespace framework {
1818

19-
void TransDataDevice(const Tensor &in,
19+
void TransDataDevice(const phi::DenseTensor &in,
2020
const platform::Place &dst_place,
21-
Tensor *out) {
21+
phi::DenseTensor *out) {
2222
VLOG(3) << "DeviceTransform in, src_place " << in.place()
2323
<< " dst_place: " << dst_place;
2424

paddle/fluid/framework/data_device_transform.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -21,9 +21,9 @@ limitations under the License. */
2121
namespace paddle {
2222
namespace framework {
2323

24-
void TransDataDevice(const Tensor& in,
24+
void TransDataDevice(const phi::DenseTensor& in,
2525
const platform::Place& dst_place,
26-
Tensor* out);
26+
phi::DenseTensor* out);
2727

2828
} // namespace framework
2929
} // namespace paddle

paddle/fluid/framework/data_device_transform_test.cu

+3-3
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ class TestOpWithKernel : public OperatorWithKernel {
5555
} else {
5656
VLOG(3) << "use default kernel";
5757
return OpKernelType(proto::VarType::FP32,
58-
ctx.Input<Tensor>("input")->place());
58+
ctx.Input<phi::DenseTensor>("input")->place());
5959
}
6060
}
6161
};
@@ -66,7 +66,7 @@ class TestKernel : public OpKernel<float> {
6666
void Compute(const ExecutionContext& ctx) const {
6767
std::cout << ctx.DebugString() << std::endl;
6868

69-
const Tensor* input = ctx.Input<Tensor>("input");
69+
const phi::DenseTensor* input = ctx.Input<phi::DenseTensor>("input");
7070

7171
std::cout << "input place:" << input->place() << std::endl;
7272
auto* output = ctx.Output<framework::LoDTensor>("output");
@@ -158,7 +158,7 @@ TEST(Operator, CPUtoGPU) {
158158
paddle::platform::DeviceContextPool::Instance();
159159
auto dev_ctx = pool.Get(cuda_place);
160160

161-
paddle::framework::Tensor output_tensor;
161+
phi::DenseTensor output_tensor;
162162
paddle::framework::TensorCopy(output2->Get<paddle::framework::LoDTensor>(),
163163
paddle::platform::CPUPlace(),
164164
*dev_ctx,

paddle/fluid/framework/data_feed.proto

+1-1
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ message Slot {
1919
required string type = 2;
2020
optional bool is_dense = 3 [ default = false ];
2121
optional bool is_used = 4 [ default = false ];
22-
repeated int32 shape = 5; // we can define N-D Tensor
22+
repeated int32 shape = 5; // we can define N-D phi::DenseTensor
2323
}
2424

2525
message MultiSlotDesc {

paddle/fluid/framework/data_layout_transform.cc

+8-7
Original file line numberDiff line numberDiff line change
@@ -55,8 +55,8 @@ void CastDataLayout::apply() {
5555

5656
void TransDataLayout(const OpKernelType& kernel_type_for_var,
5757
const OpKernelType& expected_kernel_type,
58-
const Tensor& in,
59-
Tensor* out) {
58+
const phi::DenseTensor& in,
59+
phi::DenseTensor* out) {
6060
PADDLE_ENFORCE(
6161
platform::places_are_same_class(kernel_type_for_var.place_,
6262
expected_kernel_type.place_),
@@ -97,7 +97,8 @@ using dnnl::memory;
9797
using dnnl::primitive;
9898
using dnnl::reorder;
9999

100-
void* GetDataFromTensor(const Tensor& tensor, dnnl::memory::data_type type) {
100+
void* GetDataFromTensor(const phi::DenseTensor& tensor,
101+
dnnl::memory::data_type type) {
101102
switch (type) {
102103
case dnnl::memory::data_type::f32:
103104
return platform::to_void_cast(tensor.data<float>());
@@ -117,8 +118,8 @@ void* GetDataFromTensor(const Tensor& tensor, dnnl::memory::data_type type) {
117118

118119
void TransDataLayoutFromMKLDNN(const OpKernelType& kernel_type_for_var,
119120
const OpKernelType& expected_kernel_type,
120-
const Tensor& in,
121-
Tensor* out) {
121+
const phi::DenseTensor& in,
122+
phi::DenseTensor* out) {
122123
auto in_layout = kernel_type_for_var.data_layout_;
123124
auto out_layout = expected_kernel_type.data_layout_;
124125
auto place = expected_kernel_type.place_;
@@ -139,8 +140,8 @@ void TransDataLayoutFromMKLDNN(const OpKernelType& kernel_type_for_var,
139140

140141
void innerTransDataLayoutFromMKLDNN(DataLayout in_layout,
141142
DataLayout out_layout,
142-
const Tensor& in,
143-
Tensor* out,
143+
const phi::DenseTensor& in,
144+
phi::DenseTensor* out,
144145
platform::Place place,
145146
bool always_copy) {
146147
// Set default as NCHW in case not specified

paddle/fluid/framework/data_layout_transform.h

+11-11
Original file line numberDiff line numberDiff line change
@@ -38,12 +38,12 @@ namespace framework {
3838
struct CastDataLayout {
3939
CastDataLayout(const platform::DeviceContext* ctx,
4040
const std::vector<int>& axis,
41-
const framework::Tensor& in,
42-
framework::Tensor* out)
41+
const phi::DenseTensor& in,
42+
phi::DenseTensor* out)
4343
: in_(in), out_(out), ctx_(ctx), axis_(axis) {}
4444

45-
const framework::Tensor in_;
46-
framework::Tensor* out_;
45+
const phi::DenseTensor in_;
46+
phi::DenseTensor* out_;
4747
const platform::DeviceContext* ctx_;
4848
const std::vector<int> axis_;
4949

@@ -101,26 +101,26 @@ inline MKLDNNDataType ToMKLDNNDataType(proto::VarType::Type type) {
101101

102102
void innerTransDataLayoutFromMKLDNN(DataLayout in_layout,
103103
DataLayout out_layout,
104-
const Tensor& in,
105-
Tensor* out,
104+
const phi::DenseTensor& in,
105+
phi::DenseTensor* out,
106106
platform::Place place,
107107
bool always_copy = false);
108108

109109
void TransDataLayoutFromMKLDNN(const OpKernelType& kernel_type_for_var,
110110
const OpKernelType& expected_kernel_type,
111-
const Tensor& in,
112-
Tensor* out);
111+
const phi::DenseTensor& in,
112+
phi::DenseTensor* out);
113113

114-
void* GetDataFromTensor(const Tensor& tensor, MKLDNNDataType type);
114+
void* GetDataFromTensor(const phi::DenseTensor& tensor, MKLDNNDataType type);
115115

116116
#endif
117117

118118
std::vector<int> GetAxis(const DataLayout& from, const DataLayout& to);
119119

120120
void TransDataLayout(const OpKernelType& kernel_type_for_var,
121121
const OpKernelType& expected_kernel_type,
122-
const Tensor& in,
123-
Tensor* out);
122+
const phi::DenseTensor& in,
123+
phi::DenseTensor* out);
124124

125125
} // namespace framework
126126
} // namespace paddle

paddle/fluid/framework/data_layout_transform_test.cc

+4-4
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,8 @@
1818

1919
TEST(DataTransform, DataLayoutFunction) {
2020
auto place = paddle::platform::CPUPlace();
21-
paddle::framework::Tensor in = paddle::framework::Tensor();
22-
paddle::framework::Tensor out = paddle::framework::Tensor();
21+
phi::DenseTensor in = phi::DenseTensor();
22+
phi::DenseTensor out = phi::DenseTensor();
2323
in.mutable_data<double>(phi::make_ddim({2, 3, 1, 2}), place);
2424
in.set_layout(paddle::framework::DataLayout::kNHWC);
2525

@@ -48,7 +48,7 @@ TEST(DataTransform, DataLayoutFunction) {
4848
#ifdef PADDLE_WITH_MKLDNN
4949
TEST(DataTransformBf16, GetDataFromTensorDNNL) {
5050
auto place = paddle::platform::CPUPlace();
51-
paddle::framework::Tensor in = paddle::framework::Tensor();
51+
phi::DenseTensor in = phi::DenseTensor();
5252
in.mutable_data<paddle::platform::bfloat16>(phi::make_ddim({2, 3, 1, 2}),
5353
place);
5454

@@ -61,7 +61,7 @@ TEST(DataTransformBf16, GetDataFromTensorDNNL) {
6161

6262
TEST(DataTransformInt32, GetDataFromTensorDNNL) {
6363
auto place = paddle::platform::CPUPlace();
64-
paddle::framework::Tensor in = paddle::framework::Tensor();
64+
phi::DenseTensor in = phi::DenseTensor();
6565
in.mutable_data<int32_t>(phi::make_ddim({2, 3, 1, 2}), place);
6666

6767
void* in_data =

paddle/fluid/framework/data_transform.cc

+7-7
Original file line numberDiff line numberDiff line change
@@ -31,19 +31,19 @@ class Variable;
3131
namespace paddle {
3232
namespace framework {
3333

34-
static void PassTensorData(Tensor *from, Tensor *to) {
34+
static void PassTensorData(phi::DenseTensor *from, phi::DenseTensor *to) {
3535
to->ShareDataWith(*from);
36-
*from = Tensor();
36+
*from = phi::DenseTensor();
3737
}
3838

3939
void TransformData(const OpKernelType &expected_kernel_type,
4040
const OpKernelType &kernel_type_for_var,
41-
const Tensor &input_tensor,
42-
Tensor *output_tensor) {
41+
const phi::DenseTensor &input_tensor,
42+
phi::DenseTensor *output_tensor) {
4343
bool transformed = false;
44-
Tensor in;
44+
phi::DenseTensor in;
4545
in.ShareDataWith(input_tensor);
46-
Tensor out;
46+
phi::DenseTensor out;
4747
const DataLayout lin = kernel_type_for_var.data_layout_;
4848
const DataLayout lout = expected_kernel_type.data_layout_;
4949
// do layout transform
@@ -120,7 +120,7 @@ void TransformData(const OpKernelType &expected_kernel_type,
120120
}
121121

122122
void SetTensorToVariable(const Variable &in_var,
123-
const Tensor &tensor,
123+
const phi::DenseTensor &tensor,
124124
Variable *out_var) {
125125
if (in_var.IsType<LoDTensor>()) {
126126
auto &in_lod_tensor = in_var.Get<LoDTensor>();

paddle/fluid/framework/data_transform.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -35,14 +35,14 @@ class Variable;
3535

3636
void TransformData(const OpKernelType &expected_kernel_type,
3737
const OpKernelType &kernel_type_for_var,
38-
const Tensor &input_tensor,
39-
Tensor *out);
38+
const phi::DenseTensor &input_tensor,
39+
phi::DenseTensor *out);
4040

4141
/**
4242
* Set OutVar from InVar, except the tensor is shared with `tensor`
4343
*/
4444
void SetTensorToVariable(const Variable &in_var,
45-
const Tensor &tensor,
45+
const phi::DenseTensor &tensor,
4646
Variable *out_var);
4747
} // namespace framework
4848
} // namespace paddle

paddle/fluid/framework/data_type_test.cc

+2-4
Original file line numberDiff line numberDiff line change
@@ -20,13 +20,12 @@
2020
#include "paddle/fluid/framework/tensor.h"
2121

2222
TEST(DataType, float16) {
23-
using paddle::framework::Tensor;
2423
using paddle::platform::CPUPlace;
2524
using paddle::platform::float16;
2625
namespace f = paddle::framework;
2726
f::proto::VarType::Type dtype = f::proto::VarType::FP16;
2827

29-
Tensor tensor;
28+
phi::DenseTensor tensor;
3029
CPUPlace cpu;
3130
tensor.mutable_data(cpu, f::TransToPhiDataType(dtype));
3231

@@ -43,13 +42,12 @@ TEST(DataType, float16) {
4342
}
4443

4544
TEST(DataType, bfloat16) {
46-
using paddle::framework::Tensor;
4745
using paddle::platform::bfloat16;
4846
using paddle::platform::CPUPlace;
4947
namespace f = paddle::framework;
5048
f::proto::VarType::Type dtype = f::proto::VarType::BF16;
5149

52-
Tensor tensor;
50+
phi::DenseTensor tensor;
5351
CPUPlace cpu;
5452
tensor.mutable_data(cpu, f::TransToPhiDataType(dtype));
5553

0 commit comments

Comments
 (0)