Skip to content

Commit 564d995

Browse files
committed
add unique_consecutive_op
1 parent 79cea06 commit 564d995

File tree

3 files changed

+0
-31
lines changed

3 files changed

+0
-31
lines changed

paddle/fluid/operators/unique_consecutive_op.cu

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -33,10 +33,8 @@ template <typename InT>
3333
struct BinaryEqual {
3434
int64_t col;
3535
const InT* in_trans_data;
36-
3736
BinaryEqual(int64_t _col, const InT* _in_trans_data)
3837
: col(_col), in_trans_data(_in_trans_data) {}
39-
4038
__device__ bool operator()(int64_t a, int64_t b) const {
4139
for (int64_t i = 0; i < col; ++i) {
4240
InT lhs = in_trans_data[i + a * col];
@@ -54,10 +52,8 @@ template <typename InT>
5452
struct BinaryNotEqual {
5553
int64_t col;
5654
const InT* in_trans_data;
57-
5855
BinaryNotEqual(int64_t _col, const InT* _in_trans_data)
5956
: col(_col), in_trans_data(_in_trans_data) {}
60-
6157
__device__ bool operator()(int64_t a, int64_t b) const {
6258
for (int64_t i = 0; i < col; ++i) {
6359
InT lhs = in_trans_data[i + a * col];
@@ -83,23 +79,19 @@ void IndexSelect(const framework::ExecutionContext& context,
8379
for (auto i = dim + 1; i < input_dim_size; i++) {
8480
slice_size *= input_dim[i];
8581
}
86-
8782
auto input_width = slice_size * input_dim[dim];
8883
auto output_width = slice_size * output_dim[dim];
8984

9085
auto outer_nums = 1;
9186
for (auto i = 0; i < dim; i++) {
9287
outer_nums *= input_dim[i];
9388
}
94-
9589
auto index_size = index.dims()[0];
96-
9790
std::vector<InT> input_vec;
9891
std::vector<IndexT> index_vec;
9992
TensorToVector(input, context.device_context(), &input_vec);
10093
TensorToVector(index, context.device_context(), &index_vec);
10194
std::vector<InT> out_vec(output->numel());
102-
10395
for (int i = 0; i < index_size; i++) {
10496
PADDLE_ENFORCE_GE(
10597
index_vec[i], 0,
@@ -116,7 +108,6 @@ void IndexSelect(const framework::ExecutionContext& context,
116108
"value.",
117109
input_dim[dim], index_vec[i]));
118110
}
119-
120111
for (auto i = 0; i < outer_nums; i++) {
121112
auto input_start_offset = i * input_width;
122113
auto output_start_offset = i * output_width;
@@ -144,14 +135,12 @@ static void UniqueConsecutiveFlattendCUDATensor(
144135
Tensor in_hat;
145136
framework::TensorCopy(in, context.GetPlace(), &in_hat);
146137
auto in_data_hat = in_hat.mutable_data<InT>(context.GetPlace());
147-
148138
Tensor sorted_indices;
149139
sorted_indices.Resize(framework::make_ddim({num_input}));
150140
auto sorted_indices_data =
151141
sorted_indices.mutable_data<IndexT>(context.GetPlace());
152142
thrust::sequence(thrust::device, sorted_indices_data,
153143
sorted_indices_data + num_input);
154-
155144
// 1. Calculate op result: 'out'
156145
Tensor range;
157146
range.Resize(framework::make_ddim({num_input + 1}));
@@ -166,7 +155,6 @@ static void UniqueConsecutiveFlattendCUDATensor(
166155
.first -
167156
out_data;
168157
out->Resize(framework::make_ddim({num_out}));
169-
170158
// 2. Calculate inverse index: 'inverse'
171159
if (return_inverse) {
172160
Tensor* inverse = context.Output<Tensor>("Index");
@@ -186,7 +174,6 @@ static void UniqueConsecutiveFlattendCUDATensor(
186174
inv_loc_data_ptr + num_input, sorted_indices_data,
187175
inverse_data);
188176
}
189-
190177
// 3. Calculate 'counts'
191178
if (return_counts) {
192179
Tensor* counts = context.Output<Tensor>("Counts");
@@ -224,7 +211,6 @@ static void ComputeUniqueConsecutiveDims(
224211
inv_loc_data_ptr + row, inv_loc_data_ptr);
225212
thrust::scatter(thrust::device, inv_loc_data_ptr, inv_loc_data_ptr + row,
226213
sorted_indices_data, inverse_data);
227-
228214
// 2. sorted indices
229215
Tensor range;
230216
range.Resize(framework::make_ddim({row + 1}));

paddle/fluid/operators/unique_consecutive_op.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,6 @@ static void UniqueConsecutiveDim(const framework::ExecutionContext& context,
149149
in_trans_data + static_cast<int64_t>(sorted_indices_vec[i]) * col,
150150
col * sizeof(InT));
151151
}
152-
153152
std::vector<framework::Tensor> input_unbind = Unbind(input_sorted);
154153
std::vector<IndexT> inverse_vec(sorted_indices_vec.size(), 0);
155154
std::vector<IndexT> counts_vec(sorted_indices_vec.size(), 0);
@@ -172,12 +171,10 @@ static void UniqueConsecutiveDim(const framework::ExecutionContext& context,
172171
concat_functor(dev_ctx, input_unbind, 0, &out_trans);
173172
TransCompute<DeviceContext, InT>(out_trans.dims().size(), dev_ctx, out_trans,
174173
out, permute);
175-
176174
if (return_inverse) {
177175
auto* inverse = context.Output<framework::Tensor>("Index");
178176
framework::TensorFromVector(inverse_vec, context.device_context(), inverse);
179177
}
180-
181178
if (return_counts) {
182179
auto* count = context.Output<framework::Tensor>("Counts");
183180
framework::TensorFromVector(counts_vec, context.device_context(), count);
@@ -233,7 +230,6 @@ struct UniqueConsecutiveDimFunctor {
233230
ctx_, in_, out_, return_inverse_, return_counts_, axis_);
234231
}
235232
};
236-
237233
template <typename DeviceContext, typename T>
238234
class UniqueConsecutiveKernel : public framework::OpKernel<T> {
239235
public:
@@ -251,7 +247,6 @@ class UniqueConsecutiveKernel : public framework::OpKernel<T> {
251247
"int64.",
252248
x->numel()));
253249
}
254-
255250
std::vector<int> axis_vec = context.Attr<std::vector<int>>("axis");
256251
bool return_inverse = context.Attr<bool>("return_inverse");
257252
bool return_counts = context.Attr<bool>("return_counts");
@@ -269,6 +264,5 @@ class UniqueConsecutiveKernel : public framework::OpKernel<T> {
269264
}
270265
}
271266
};
272-
273267
} // namespace operators
274268
} // namespace paddle

python/paddle/tensor/manipulation.py

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -718,24 +718,20 @@ def unique_consecutive(x,
718718
name=None):
719719
r"""
720720
Eliminates all but the first element from every consecutive group of equivalent elements.
721-
722721
.. note:: This function is different from :func:`paddle.unique` in the sense that this function
723722
only eliminates consecutive duplicate values. This semantics is similar to `std::unique`
724723
in C++.
725-
726724
Args:
727725
x(Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
728726
return_inverse(bool, optional): If True, also return the indices for where elements in
729727
the original input ended up in the returned unique consecutive tensor.
730728
return_counts(bool, optional): If True, also return the counts for each unique consecutive element.
731729
axis(int, optional): The axis to apply unique consecutive. If None, the input will be flattened.
732730
Default: None.
733-
734731
Returns:
735732
tuple: (out, inverse, counts). `out` is the unique consecutive tensor for `x`.`inverse` is \
736733
provided only if `return_inverse` is True. `counts` is provided only if `return_counts` \
737734
is True.
738-
739735
Example:
740736
.. code-block:: python
741737
@@ -774,12 +770,9 @@ def unique_consecutive(x,
774770
outs.append(inverse)
775771
if return_counts:
776772
outs.append(counts)
777-
778773
if len(outs) == 1:
779774
return outs[0]
780-
781775
return tuple(outs)
782-
783776
check_variable_and_dtype(x, "input",
784777
['float32', 'float64', 'int32', 'int64'],
785778
'unique_consecutive')
@@ -788,7 +781,6 @@ def unique_consecutive(x,
788781
check_dtype(dtype, 'dtype', ['int32', 'int64'], 'unique_consecutive')
789782
if len(axis) != 0:
790783
check_type(axis[0], 'axis', int, 'unique_consecutive')
791-
792784
helper = LayerHelper('unique_consecutive', **locals())
793785
attrs = {
794786
'dtype': attr_dtype,
@@ -808,16 +800,13 @@ def unique_consecutive(x,
808800
outs.append(inverse)
809801
if return_counts:
810802
outs.append(counts)
811-
812803
helper.append_op(
813804
type="unique_consecutive",
814805
inputs={"X": x},
815806
attrs=attrs,
816807
outputs=outputs)
817-
818808
if len(outs) == 1:
819809
return outs[0]
820-
821810
return tuple(outs)
822811

823812

0 commit comments

Comments
 (0)