Skip to content

Commit

Permalink
Implement dpnp.nancumprod() through dpnp.cumprod call (#1812)
Browse files Browse the repository at this point in the history
* Implement dpnp.cumprod through dpctl.tensor

* Implement dpnp.nancumprod() through existing calls

* Applied review comments
  • Loading branch information
antonwolfy authored May 10, 2024
1 parent a079815 commit 5d94ca8
Show file tree
Hide file tree
Showing 9 changed files with 50 additions and 157 deletions.
2 changes: 0 additions & 2 deletions dpnp/backend/include/dpnp_iface_fptr.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -105,8 +105,6 @@ enum class DPNPFuncName : size_t
DPNP_FN_COV, /**< Used in numpy.cov() impl */
DPNP_FN_CROSS, /**< Used in numpy.cross() impl */
DPNP_FN_CUMPROD, /**< Used in numpy.cumprod() impl */
DPNP_FN_CUMPROD_EXT, /**< Used in numpy.cumprod() impl, requires extra
parameters */
DPNP_FN_CUMSUM, /**< Used in numpy.cumsum() impl */
DPNP_FN_DEGREES, /**< Used in numpy.degrees() impl */
DPNP_FN_DEGREES_EXT, /**< Used in numpy.degrees() impl, requires extra
Expand Down
17 changes: 0 additions & 17 deletions dpnp/backend/kernels/dpnp_krnl_mathematical.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -364,14 +364,6 @@ template <typename _DataType_input, typename _DataType_output>
void (*dpnp_cumprod_default_c)(void *, void *, size_t) =
dpnp_cumprod_c<_DataType_input, _DataType_output>;

template <typename _DataType_input, typename _DataType_output>
DPCTLSyclEventRef (*dpnp_cumprod_ext_c)(DPCTLSyclQueueRef,
void *,
void *,
size_t,
const DPCTLEventVectorRef) =
dpnp_cumprod_c<_DataType_input, _DataType_output>;

template <typename _KernelNameSpecialization1,
typename _KernelNameSpecialization2>
class dpnp_cumsum_c_kernel;
Expand Down Expand Up @@ -1153,15 +1145,6 @@ void func_map_init_mathematical(func_map_t &fmap)
fmap[DPNPFuncName::DPNP_FN_CUMPROD][eft_DBL][eft_DBL] = {
eft_DBL, (void *)dpnp_cumprod_default_c<double, double>};

fmap[DPNPFuncName::DPNP_FN_CUMPROD_EXT][eft_INT][eft_INT] = {
eft_LNG, (void *)dpnp_cumprod_ext_c<int32_t, int64_t>};
fmap[DPNPFuncName::DPNP_FN_CUMPROD_EXT][eft_LNG][eft_LNG] = {
eft_LNG, (void *)dpnp_cumprod_ext_c<int64_t, int64_t>};
fmap[DPNPFuncName::DPNP_FN_CUMPROD_EXT][eft_FLT][eft_FLT] = {
eft_FLT, (void *)dpnp_cumprod_ext_c<float, float>};
fmap[DPNPFuncName::DPNP_FN_CUMPROD_EXT][eft_DBL][eft_DBL] = {
eft_DBL, (void *)dpnp_cumprod_ext_c<double, double>};

fmap[DPNPFuncName::DPNP_FN_CUMSUM][eft_INT][eft_INT] = {
eft_LNG, (void *)dpnp_cumsum_default_c<int32_t, int64_t>};
fmap[DPNPFuncName::DPNP_FN_CUMSUM][eft_LNG][eft_LNG] = {
Expand Down
4 changes: 0 additions & 4 deletions dpnp/dpnp_algo/dpnp_algo.pxd
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ cdef extern from "dpnp_iface_fptr.hpp" namespace "DPNPFuncName": # need this na
DPNP_FN_CHOOSE_EXT
DPNP_FN_COPY_EXT
DPNP_FN_CORRELATE_EXT
DPNP_FN_CUMPROD_EXT
DPNP_FN_DEGREES_EXT
DPNP_FN_DIAG_INDICES_EXT
DPNP_FN_DIAGONAL_EXT
Expand Down Expand Up @@ -127,9 +126,6 @@ cdef extern from "dpnp_iface.hpp":


# C function pointer to the C library template functions
ctypedef c_dpctl.DPCTLSyclEventRef(*fptr_1in_1out_t)(c_dpctl.DPCTLSyclQueueRef,
void *, void * , size_t,
const c_dpctl.DPCTLEventVectorRef)
ctypedef c_dpctl.DPCTLSyclEventRef(*fptr_1in_1out_strides_t)(c_dpctl.DPCTLSyclQueueRef,
void *, const size_t, const size_t,
const shape_elem_type * , const shape_elem_type * ,
Expand Down
50 changes: 0 additions & 50 deletions dpnp/dpnp_algo/dpnp_algo.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -147,56 +147,6 @@ cdef dpnp_DPNPFuncType_to_dtype(size_t type):
utils.checker_throw_type_error("dpnp_DPNPFuncType_to_dtype", type)


cdef utils.dpnp_descriptor call_fptr_1in_1out(DPNPFuncName fptr_name,
utils.dpnp_descriptor x1,
shape_type_c result_shape,
utils.dpnp_descriptor out=None,
func_name=None):

""" Convert type (x1.dtype) to C enum DPNPFuncType """
cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(x1.dtype)

""" get the FPTR data structure """
cdef DPNPFuncData kernel_data = get_dpnp_function_ptr(fptr_name, param1_type, param1_type)

result_type = dpnp_DPNPFuncType_to_dtype( < size_t > kernel_data.return_type)

cdef utils.dpnp_descriptor result

if out is None:
""" Create result array with type given by FPTR data """
x1_obj = x1.get_array()
result = utils.create_output_descriptor(result_shape,
kernel_data.return_type,
None,
device=x1_obj.sycl_device,
usm_type=x1_obj.usm_type,
sycl_queue=x1_obj.sycl_queue)
else:
if out.dtype != result_type:
utils.checker_throw_value_error(func_name, 'out.dtype', out.dtype, result_type)
if out.shape != result_shape:
utils.checker_throw_value_error(func_name, 'out.shape', out.shape, result_shape)

result = out

utils.get_common_usm_allocation(x1, result) # check USM allocation is common

result_sycl_queue = result.get_array().sycl_queue

cdef c_dpctl.SyclQueue q = <c_dpctl.SyclQueue> result_sycl_queue
cdef c_dpctl.DPCTLSyclQueueRef q_ref = q.get_queue_ref()

cdef fptr_1in_1out_t func = <fptr_1in_1out_t > kernel_data.ptr

cdef c_dpctl.DPCTLSyclEventRef event_ref = func(q_ref, x1.get_data(), result.get_data(), x1.size, NULL)

with nogil: c_dpctl.DPCTLEvent_WaitAndThrow(event_ref)
c_dpctl.DPCTLEvent_Delete(event_ref)

return result


cdef utils.dpnp_descriptor call_fptr_1in_1out_strides(DPNPFuncName fptr_name,
utils.dpnp_descriptor x1,
object dtype=None,
Expand Down
26 changes: 0 additions & 26 deletions dpnp/dpnp_algo/dpnp_algo_mathematical.pxi
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@ __all__ += [
"dpnp_fmax",
"dpnp_fmin",
"dpnp_modf",
"dpnp_nancumprod",
"dpnp_trapz",
]

Expand All @@ -56,18 +55,6 @@ ctypedef c_dpctl.DPCTLSyclEventRef(*ftpr_custom_trapz_2in_1out_with_2size_t)(c_d
const c_dpctl.DPCTLEventVectorRef)


cpdef utils.dpnp_descriptor dpnp_cumprod(utils.dpnp_descriptor x1):
# instead of x1.shape, (x1.size, ) is passed to the function
# due to the following:
# >>> import numpy
# >>> a = numpy.array([[1, 2], [2, 3]])
# >>> res = numpy.cumprod(a)
# >>> res.shape
# (4,)

return call_fptr_1in_1out(DPNP_FN_CUMPROD_EXT, x1, (x1.size,))


cpdef utils.dpnp_descriptor dpnp_ediff1d(utils.dpnp_descriptor x1):

if x1.size <= 1:
Expand Down Expand Up @@ -226,19 +213,6 @@ cpdef tuple dpnp_modf(utils.dpnp_descriptor x1):
return (result1.get_pyobj(), result2.get_pyobj())


cpdef utils.dpnp_descriptor dpnp_nancumprod(utils.dpnp_descriptor x1):
cur_x1 = x1.get_pyobj().copy()

cur_x1_flatiter = cur_x1.flat

for i in range(cur_x1.size):
if dpnp.isnan(cur_x1_flatiter[i]):
cur_x1_flatiter[i] = 1

x1_desc = dpnp.get_dpnp_descriptor(cur_x1, copy_when_nondefault_queue=False)
return dpnp_cumprod(x1_desc)


cpdef utils.dpnp_descriptor dpnp_trapz(utils.dpnp_descriptor y1, utils.dpnp_descriptor x1, double dx):

cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(y1.dtype)
Expand Down
79 changes: 47 additions & 32 deletions dpnp/dpnp_iface_nanfunctions.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,18 +39,8 @@

import warnings

import numpy

import dpnp

# pylint: disable=no-name-in-module
from .dpnp_algo import (
dpnp_nancumprod,
)
from .dpnp_utils import (
call_origin,
)

__all__ = [
"nanargmax",
"nanargmin",
Expand Down Expand Up @@ -249,19 +239,40 @@ def nanargmin(a, axis=None, out=None, *, keepdims=False):
return dpnp.argmin(a, axis=axis, out=out, keepdims=keepdims)


def nancumprod(x1, **kwargs):
def nancumprod(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product of array elements over a given axis treating
Not a Numbers (NaNs) as one.
Not a Numbers (NaNs) as zero. The cumulative product does not change when
NaNs are encountered and leading NaNs are replaced by ones.
For full documentation refer to :obj:`numpy.nancumprod`.
Limitations
-----------
Parameter `x` is supported as :class:`dpnp.ndarray`.
Keyword argument `kwargs` is currently unsupported.
Otherwise the function will be executed sequentially on CPU.
Input array data types are limited by supported DPNP :ref:`Data types`.
Parameters
----------
a : {dpnp.ndarray, usm_ndarray}
Input array.
axis : {None, int}, optional
Axis along which the cumulative product is computed. The default
(``None``) is to compute the cumulative product over the flattened
array.
dtype : {None, dtype}, optional
Type of the returned array and of the accumulator in which the elements
are summed. If `dtype` is not specified, it defaults to the dtype of
`a`, unless `a` has an integer dtype with a precision less than that of
the default platform integer. In that case, the default platform
integer is used.
out : {None, dpnp.ndarray, usm_ndarray}, optional
Alternative output array in which to place the result. It must have the
same shape and buffer length as the expected output but the type will
be cast if necessary.
Returns
-------
out : dpnp.ndarray
A new array holding the result is returned unless `out` is specified as
:class:`dpnp.ndarray`, in which case a reference to `out` is returned.
The result has the same size as `a`, and the same shape as `a` if `axis`
is not ``None`` or `a` is a 1-d array.
See Also
--------
Expand All @@ -271,22 +282,26 @@ def nancumprod(x1, **kwargs):
Examples
--------
>>> import dpnp as np
>>> a = np.array([1., np.nan])
>>> result = np.nancumprod(a)
>>> [x for x in result]
[1.0, 1.0]
>>> b = np.array([[1., 2., np.nan], [4., np.nan, 6.]])
>>> result = np.nancumprod(b)
>>> [x for x in result]
[1.0, 2.0, 2.0, 8.0, 8.0, 48.0]
>>> np.nancumprod(np.array(1))
array(1)
>>> np.nancumprod(np.array([1]))
array([1])
>>> np.nancumprod(np.array([1, np.nan]))
array([1., 1.])
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nancumprod(a)
array([1., 2., 6., 6.])
>>> np.nancumprod(a, axis=0)
array([[1., 2.],
[3., 2.]])
>>> np.nancumprod(a, axis=1)
array([[1., 2.],
[3., 3.]])
"""

x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)
if x1_desc and not kwargs:
return dpnp_nancumprod(x1_desc).get_pyobj()

return call_origin(numpy.nancumprod, x1, **kwargs)
a, _ = _replace_nan(a, 1)
return dpnp.cumprod(a, axis=axis, dtype=dtype, out=out)


def nancumsum(a, axis=None, dtype=None, out=None):
Expand Down Expand Up @@ -332,7 +347,7 @@ def nancumsum(a, axis=None, dtype=None, out=None):
--------
>>> import dpnp as np
>>> np.nancumsum(np.array(1))
array([1])
array(1)
>>> np.nancumsum(np.array([1]))
array([1])
>>> np.nancumsum(np.array([1, np.nan]))
Expand Down
6 changes: 0 additions & 6 deletions tests/test_nanfunctions.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,12 +25,6 @@
)
)
class TestNanCumSumProd:
@pytest.fixture(autouse=True)
def setUp(self):
if self.func == "nancumprod":
pytest.skip("nancumprod() is not implemented yet")
pass

@pytest.mark.parametrize("dtype", get_float_complex_dtypes())
@pytest.mark.parametrize(
"array",
Expand Down
1 change: 1 addition & 0 deletions tests/test_usm_type.py
Original file line number Diff line number Diff line change
Expand Up @@ -542,6 +542,7 @@ def test_norm(usm_type, ord, axis):
pytest.param("min", [1.0, 2.0, 4.0, 7.0]),
pytest.param("nanargmax", [1.0, 2.0, 4.0, dp.nan]),
pytest.param("nanargmin", [1.0, 2.0, 4.0, dp.nan]),
pytest.param("nancumprod", [3.0, dp.nan]),
pytest.param("nancumsum", [3.0, dp.nan]),
pytest.param("nanmax", [1.0, 2.0, 4.0, dp.nan]),
pytest.param("nanmean", [1.0, 2.0, 4.0, dp.nan]),
Expand Down
22 changes: 2 additions & 20 deletions tests/third_party/cupy/math_tests/test_sumprod.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,17 +235,6 @@ def _numpy_nanprod_implemented(self):
)

def _test(self, xp, dtype):
if (
self.func == "nanprod"
and self.shape == (20, 30, 40)
and has_support_aspect64()
):
# If input type is float, NumPy returns the same data type but
# dpctl (and dpnp) returns default platform float following array api.
# When input is `float32` and output is a very large number, dpnp returns
# the number because it is `float64` but NumPy returns `inf` since it is `float32`.
pytest.skip("Output is a very large number.")

a = testing.shaped_arange(self.shape, xp, dtype)
if self.transpose_axes:
a = a.transpose(2, 0, 1)
Expand All @@ -265,9 +254,7 @@ def test_nansum_all(self, xp, dtype):
return self._test(xp, dtype)

@testing.for_all_dtypes(no_bool=True, no_float16=True)
@testing.numpy_cupy_allclose(
contiguous_check=False, type_check=has_support_aspect64()
)
@testing.numpy_cupy_allclose(type_check=has_support_aspect64())
def test_nansum_axis_transposed(self, xp, dtype):
if (
not self._numpy_nanprod_implemented()
Expand Down Expand Up @@ -579,6 +566,7 @@ def test_cumproduct_alias(self, xp):
return xp.cumproduct(a)


@pytest.mark.usefixtures("suppress_invalid_numpy_warnings")
@testing.parameterize(
*testing.product(
{
Expand All @@ -591,12 +579,6 @@ def test_cumproduct_alias(self, xp):
class TestNanCumSumProd:
zero_density = 0.25

@pytest.fixture(autouse=True)
def setUp(self):
if self.func == "nancumprod":
pytest.skip("nancumprod() is not implemented yet")
pass

def _make_array(self, dtype):
dtype = numpy.dtype(dtype)
if dtype.char in "efdFD":
Expand Down

0 comments on commit 5d94ca8

Please sign in to comment.