Skip to content

Commit 521867b

Browse files
Merge pull request #1210 from IntelPython/feature/sum-reduction
Implement `dpctl.tensor.sum` reduction operation
2 parents cf431d7 + fcd0935 commit 521867b

File tree

10 files changed

+1949
-1
lines changed

10 files changed

+1949
-1
lines changed

dpctl/tensor/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ pybind11_add_module(${python_module_name} MODULE
4747
${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/boolean_reductions.cpp
4848
${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/device_support_queries.cpp
4949
${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions.cpp
50+
${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/sum_reductions.cpp
5051
)
5152
set(_clang_prefix "")
5253
if (WIN32)

dpctl/tensor/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,7 @@
104104
sqrt,
105105
subtract,
106106
)
107+
from ._reduction import sum
107108

108109
__all__ = [
109110
"Device",
@@ -191,4 +192,5 @@
191192
"multiply",
192193
"subtract",
193194
"equal",
195+
"sum",
194196
]

dpctl/tensor/_reduction.py

Lines changed: 167 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,167 @@
1+
# Data Parallel Control (dpctl)
2+
#
3+
# Copyright 2020-2023 Intel Corporation
4+
#
5+
# Licensed under the Apache License, Version 2.0 (the "License");
6+
# you may not use this file except in compliance with the License.
7+
# You may obtain a copy of the License at
8+
#
9+
# http://www.apache.org/licenses/LICENSE-2.0
10+
#
11+
# Unless required by applicable law or agreed to in writing, software
12+
# distributed under the License is distributed on an "AS IS" BASIS,
13+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
# See the License for the specific language governing permissions and
15+
# limitations under the License.
16+
17+
from numpy.core.numeric import normalize_axis_tuple
18+
19+
import dpctl
20+
import dpctl.tensor as dpt
21+
import dpctl.tensor._tensor_impl as ti
22+
23+
from ._type_utils import _to_device_supported_dtype
24+
25+
26+
def _default_reduction_dtype(inp_dt, q):
27+
"""Gives default output data type for given input data
28+
type `inp_dt` when reduction is performed on queue `q`
29+
"""
30+
inp_kind = inp_dt.kind
31+
if inp_kind in "bi":
32+
res_dt = dpt.dtype(ti.default_device_int_type(q))
33+
if inp_dt.itemsize > res_dt.itemsize:
34+
res_dt = inp_dt
35+
elif inp_kind in "u":
36+
res_dt = dpt.dtype(ti.default_device_int_type(q).upper())
37+
res_ii = dpt.iinfo(res_dt)
38+
inp_ii = dpt.iinfo(inp_dt)
39+
if inp_ii.min >= res_ii.min and inp_ii.max <= res_ii.max:
40+
pass
41+
else:
42+
res_dt = inp_dt
43+
elif inp_kind in "f":
44+
res_dt = dpt.dtype(ti.default_device_fp_type(q))
45+
if res_dt.itemsize < inp_dt.itemsize:
46+
res_dt = inp_dt
47+
elif inp_kind in "c":
48+
res_dt = dpt.dtype(ti.default_device_complex_type(q))
49+
if res_dt.itemsize < inp_dt.itemsize:
50+
res_dt = inp_dt
51+
52+
return res_dt
53+
54+
55+
def sum(arr, axis=None, dtype=None, keepdims=False):
56+
"""sum(x, axis=None, dtype=None, keepdims=False)
57+
58+
Calculates the sum of the input array `x`.
59+
60+
Args:
61+
x (usm_ndarray):
62+
input array.
63+
axis (Optional[int, Tuple[int,...]]):
64+
axis or axes along which sums must be computed. If a tuple
65+
of unique integers, sums are computed over multiple axes.
66+
If `None`, the sum if computed over the entire array.
67+
Default: `None`.
68+
dtype (Optional[dtype]):
69+
data type of the returned array. If `None`, the default data
70+
type is inferred from the "kind" of the input array data type.
71+
* If `x` has a real-valued floating-point data type,
72+
the returned array will have the default real-valued
73+
floating-point data type for the device where input
74+
array `x` is allocated.
75+
* If x` has signed integral data type, the returned array
76+
will have the default signed integral type for the device
77+
where input array `x` is allocated.
78+
* If `x` has unsigned integral data type, the returned array
79+
will have the default unsigned integral type for the device
80+
where input array `x` is allocated.
81+
* If `x` has a complex-valued floating-point data typee,
82+
the returned array will have the default complex-valued
83+
floating-pointer data type for the device where input
84+
array `x` is allocated.
85+
* If `x` has a boolean data type, the returned array will
86+
have the default signed integral type for the device
87+
where input array `x` is allocated.
88+
If the data type (either specified or resolved) differs from the
89+
data type of `x`, the input array elements are cast to the
90+
specified data type before computing the sum. Default: `None`.
91+
keepdims (Optional[bool]):
92+
if `True`, the reduced axes (dimensions) are included in the result
93+
as singleton dimensions, so that the returned array remains
94+
compatible with the input arrays according to Array Broadcasting
95+
rules. Otherwise, if `False`, the reduced axes are not included in
96+
the returned array. Default: `False`.
97+
Returns:
98+
usm_ndarray:
99+
an array containing the sums. If the sum was computed over the
100+
entire array, a zero-dimensional array is returned. The returned
101+
array has the data type as described in the `dtype` parameter
102+
description above.
103+
"""
104+
if not isinstance(arr, dpt.usm_ndarray):
105+
raise TypeError(f"Expected dpctl.tensor.usm_ndarray, got {type(arr)}")
106+
nd = arr.ndim
107+
if axis is None:
108+
axis = tuple(range(nd))
109+
if not isinstance(axis, (tuple, list)):
110+
axis = (axis,)
111+
axis = normalize_axis_tuple(axis, nd, "axis")
112+
red_nd = len(axis)
113+
perm = [i for i in range(nd) if i not in axis] + list(axis)
114+
arr2 = dpt.permute_dims(arr, perm)
115+
res_shape = arr2.shape[: nd - red_nd]
116+
q = arr.sycl_queue
117+
inp_dt = arr.dtype
118+
if dtype is None:
119+
res_dt = _default_reduction_dtype(inp_dt, q)
120+
else:
121+
res_dt = dpt.dtype(dtype)
122+
res_dt = _to_device_supported_dtype(res_dt, q.sycl_device)
123+
124+
res_usm_type = arr.usm_type
125+
if red_nd == 0:
126+
return dpt.zeros(
127+
res_shape, dtype=res_dt, usm_type=res_usm_type, sycl_queue=q
128+
)
129+
130+
host_tasks_list = []
131+
if ti._sum_over_axis_dtype_supported(inp_dt, res_dt, res_usm_type, q):
132+
res = dpt.empty(
133+
res_shape, dtype=res_dt, usm_type=res_usm_type, sycl_queue=q
134+
)
135+
ht_e, _ = ti._sum_over_axis(
136+
src=arr2, trailing_dims_to_reduce=red_nd, dst=res, sycl_queue=q
137+
)
138+
host_tasks_list.append(ht_e)
139+
else:
140+
if dtype is None:
141+
raise RuntimeError(
142+
"Automatically determined reduction data type does not "
143+
"have direct implementation"
144+
)
145+
tmp_dt = _default_reduction_dtype(inp_dt, q)
146+
tmp = dpt.empty(
147+
res_shape, dtype=tmp_dt, usm_type=res_usm_type, sycl_queue=q
148+
)
149+
ht_e_tmp, r_e = ti._sum_over_axis(
150+
src=arr2, trailing_dims_to_reduce=red_nd, dst=tmp, sycl_queue=q
151+
)
152+
host_tasks_list.append(ht_e_tmp)
153+
res = dpt.empty(
154+
res_shape, dtype=res_dt, usm_type=res_usm_type, sycl_queue=q
155+
)
156+
ht_e, _ = ti._copy_usm_ndarray_into_usm_ndarray(
157+
src=tmp, dst=res, sycl_queue=q, depends=[r_e]
158+
)
159+
host_tasks_list.append(ht_e)
160+
161+
if keepdims:
162+
res_shape = res_shape + (1,) * red_nd
163+
inv_perm = sorted(range(nd), key=lambda d: perm[d])
164+
res = dpt.permute_dims(dpt.reshape(res, res_shape), inv_perm)
165+
dpctl.SyclEvent.wait_for(host_tasks_list)
166+
167+
return res

dpctl/tensor/libtensor/include/kernels/boolean_reductions.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,7 @@ struct SequentialBooleanReduction
183183
void operator()(sycl::id<1> id) const
184184
{
185185

186-
auto inp_out_iter_offsets_ = inp_out_iter_indexer_(id[0]);
186+
auto const &inp_out_iter_offsets_ = inp_out_iter_indexer_(id[0]);
187187
const py::ssize_t &inp_iter_offset =
188188
inp_out_iter_offsets_.get_first_offset();
189189
const py::ssize_t &out_iter_offset =

0 commit comments

Comments
 (0)