Skip to content

Commit f84e366

Browse files
committed
Fixed merge issues
1 parent f5c2e22 commit f84e366

File tree

3 files changed

+7
-7
lines changed

3 files changed

+7
-7
lines changed

paddle/fluid/inference/lite/test_tensor_utils.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@ void test_tensor_copy(const platform::DeviceContext& ctx) {
122122
}
123123
#endif
124124
std::vector<float> result;
125-
TensorToVector(lod_tensor_n, ctx, &result);
125+
paddle::framework::TensorToVector(lod_tensor_n, ctx, &result);
126126
ASSERT_EQ(result, vector);
127127
ASSERT_EQ(lod_tensor_n.lod(), lod_tensor.lod());
128128
}
@@ -142,7 +142,7 @@ void test_tensor_share(const platform::DeviceContext& ctx) {
142142
framework::LoDTensor lod_tensor_n;
143143
TensorCopyAsync(&lod_tensor_n, lite_api_tensor, ctx);
144144
std::vector<float> result;
145-
TensorToVector(lod_tensor_n, ctx, &result);
145+
paddle::framework::TensorToVector(lod_tensor_n, ctx, &result);
146146
ASSERT_EQ(result, vector);
147147
ASSERT_EQ(lod_tensor_n.lod(), lod_tensor.lod());
148148
}

paddle/fluid/platform/device/npu/npu_op_runner.cc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -231,7 +231,7 @@ NpuOpRunner &NpuOpRunner::AddInput(std::vector<int32_t> &&dims) {
231231
auto *dev_ctx =
232232
static_cast<platform::CPUDeviceContext *>(pool.Get(platform::CPUPlace()));
233233
Tensor host_tensor;
234-
TensorFromVector(dims, *dev_ctx, &host_tensor);
234+
paddle::framework::TensorFromVector(dims, *dev_ctx, &host_tensor);
235235
host_tensors_.emplace_back(host_tensor);
236236

237237
// create aclTensorDesc
@@ -247,7 +247,7 @@ NpuOpRunner &NpuOpRunner::AddInput(std::vector<int64_t> &&dims) {
247247
auto *dev_ctx =
248248
static_cast<platform::CPUDeviceContext *>(pool.Get(platform::CPUPlace()));
249249
Tensor host_tensor;
250-
TensorFromVector(dims, *dev_ctx, &host_tensor);
250+
paddle::framework::TensorFromVector(dims, *dev_ctx, &host_tensor);
251251
host_tensors_.emplace_back(host_tensor);
252252

253253
// create aclTensorDesc
@@ -263,7 +263,7 @@ NpuOpRunner &NpuOpRunner::AddInput(std::vector<float> &&values) {
263263
auto *dev_ctx =
264264
static_cast<platform::CPUDeviceContext *>(pool.Get(platform::CPUPlace()));
265265
Tensor host_tensor;
266-
TensorFromVector(values, *dev_ctx, &host_tensor);
266+
paddle::framework::TensorFromVector(values, *dev_ctx, &host_tensor);
267267
host_tensors_.emplace_back(host_tensor);
268268

269269
// create aclTensorDesc
@@ -279,7 +279,7 @@ NpuOpRunner &NpuOpRunner::AddInput(std::vector<double> &&values) {
279279
auto *dev_ctx =
280280
static_cast<platform::CPUDeviceContext *>(pool.Get(platform::CPUPlace()));
281281
Tensor host_tensor;
282-
TensorFromVector(values, *dev_ctx, &host_tensor);
282+
paddle::framework::TensorFromVector(values, *dev_ctx, &host_tensor);
283283
host_tensors_.emplace_back(host_tensor);
284284

285285
// create aclTensorDesc

paddle/pten/kernels/gpu/elementwise.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -854,7 +854,7 @@ void LaunchElementwiseCudaKernel(const KPDevice &ctx,
854854
dims_size.emplace_back(in->dims().size());
855855
}
856856
if (no_broadcast_flag) {
857-
ptem::LaunchSameDimsElementwiseCudaKernel<ET, InT, OutT, Functor, NumOuts>(
857+
pten::LaunchSameDimsElementwiseCudaKernel<ET, InT, OutT, Functor, NumOuts>(
858858
ctx, ins, outs, func);
859859
} else {
860860
axis = axis == -1

0 commit comments

Comments
 (0)