Skip to content

Commit 1f28968

Browse files
authored
[NPU] Fix vector overflow in slice grad npu op (#34032)
* fix vector overflow * refine code * refine ut
1 parent fd85be8 commit 1f28968

File tree

2 files changed

+14
-14
lines changed

2 files changed

+14
-14
lines changed

paddle/fluid/operators/slice_op_npu.cc

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -25,15 +25,16 @@ namespace operators {
2525

2626
using Tensor = framework::Tensor;
2727

28-
void UpdateAttr(const framework::DDim in_dims, const std::vector<int> axes,
28+
void UpdateAttr(const framework::DDim& in_dims, const std::vector<int> axes,
2929
const std::vector<int> starts, const std::vector<int> ends,
3030
std::vector<int>* offsets, std::vector<int>* size) {
3131
int cnt = 0;
3232
for (int i = 0; i < in_dims.size(); ++i) {
3333
int start = 0;
3434
int end = in_dims[i];
35-
int axis = axes[cnt];
36-
35+
// NOTE(zhiqiu): Becareful that cnt may > axes.size() and result in
36+
// overflow.
37+
int axis = cnt < static_cast<int>(axes.size()) ? axes[cnt] : -1;
3738
if (axis == i) {
3839
start = starts[cnt];
3940
if (start < 0) {
@@ -63,10 +64,10 @@ class SliceNPUKernel : public framework::OpKernel<T> {
6364
auto axes = ctx.Attr<std::vector<int>>("axes");
6465
auto starts = ctx.Attr<std::vector<int>>("starts");
6566
auto ends = ctx.Attr<std::vector<int>>("ends");
67+
const auto& in_dims = input->dims();
6668

6769
out->mutable_data<T>(ctx.GetPlace());
6870

69-
auto in_dims = input->dims();
7071
std::vector<int> offsets(in_dims.size());
7172
std::vector<int> size(in_dims.size());
7273

@@ -93,8 +94,7 @@ class SliceGradNPUKernel : public framework::OpKernel<T> {
9394
auto axes = ctx.Attr<std::vector<int>>("axes");
9495
auto starts = ctx.Attr<std::vector<int>>("starts");
9596
auto ends = ctx.Attr<std::vector<int>>("ends");
96-
97-
auto in_dims = input->dims();
97+
const auto& in_dims = input->dims();
9898
int rank = in_dims.size();
9999

100100
std::vector<int> offsets(rank);

python/paddle/fluid/tests/unittests/npu/test_slice_op_npu.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -71,12 +71,12 @@ def test_check_grad_normal(self):
7171

7272
class TestSliceOp2(TestSliceOp):
7373
def config(self):
74-
self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype)
75-
self.starts = [1, 0, -3]
76-
self.ends = [3, 3, -1]
77-
self.axes = [0, 1, 2]
78-
self.infer_flags = [1, 1, 1]
79-
self.out = self.input[1:3, 0:3, -3:-1, :]
74+
self.input = np.random.random([10, 5, 6]).astype(self.dtype)
75+
self.starts = [0]
76+
self.ends = [1]
77+
self.axes = [1]
78+
self.infer_flags = [1]
79+
self.out = self.input[:, 0:1, :]
8080

8181

8282
@unittest.skipIf(not paddle.is_compiled_with_npu(),
@@ -118,8 +118,8 @@ def _test(self, run_npu=True):
118118

119119
prediction = paddle.static.nn.fc(z, size=2, activation='softmax')
120120

121-
cost = paddle.nn.functional.cross_entropy(
122-
input=prediction, label=label)
121+
cost = paddle.fluid.layers.softmax_with_cross_entropy(
122+
logits=prediction, label=label)
123123
loss = paddle.mean(cost)
124124
sgd = paddle.optimizer.SGD(learning_rate=0.01)
125125
sgd.minimize(loss)

0 commit comments

Comments
 (0)