Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update tests to run on Iris Xe #1498

Merged
merged 5 commits into from
Jul 27, 2023
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions dpnp/backend/kernels/dpnp_krnl_random.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2988,9 +2988,13 @@ void func_map_init_random(func_map_t &fmap)

fmap[DPNPFuncName::DPNP_FN_RNG_CHISQUARE][eft_DBL][eft_DBL] = {
eft_DBL, (void *)dpnp_rng_chisquare_default_c<double>};
fmap[DPNPFuncName::DPNP_FN_RNG_CHISQUARE][eft_FLT][eft_FLT] = {
eft_FLT, (void *)dpnp_rng_chisquare_default_c<float>};

fmap[DPNPFuncName::DPNP_FN_RNG_CHISQUARE_EXT][eft_DBL][eft_DBL] = {
eft_DBL, (void *)dpnp_rng_chisquare_ext_c<double>};
fmap[DPNPFuncName::DPNP_FN_RNG_CHISQUARE_EXT][eft_FLT][eft_FLT] = {
eft_FLT, (void *)dpnp_rng_chisquare_ext_c<float>};

fmap[DPNPFuncName::DPNP_FN_RNG_EXPONENTIAL][eft_DBL][eft_DBL] = {
eft_DBL, (void *)dpnp_rng_exponential_default_c<double>};
Expand Down Expand Up @@ -3136,15 +3140,23 @@ void func_map_init_random(func_map_t &fmap)

fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_CAUCHY][eft_DBL][eft_DBL] = {
eft_DBL, (void *)dpnp_rng_standard_cauchy_default_c<double>};
fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_CAUCHY][eft_FLT][eft_FLT] = {
eft_FLT, (void *)dpnp_rng_standard_cauchy_default_c<float>};

fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_CAUCHY_EXT][eft_DBL][eft_DBL] = {
eft_DBL, (void *)dpnp_rng_standard_cauchy_ext_c<double>};
fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_CAUCHY_EXT][eft_FLT][eft_FLT] = {
eft_FLT, (void *)dpnp_rng_standard_cauchy_ext_c<float>};

fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_EXPONENTIAL][eft_DBL][eft_DBL] = {
eft_DBL, (void *)dpnp_rng_standard_exponential_default_c<double>};
fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_EXPONENTIAL][eft_FLT][eft_FLT] = {
eft_FLT, (void *)dpnp_rng_standard_exponential_default_c<float>};

fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_EXPONENTIAL_EXT][eft_DBL][eft_DBL] =
{eft_DBL, (void *)dpnp_rng_standard_exponential_ext_c<double>};
fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_EXPONENTIAL_EXT][eft_FLT][eft_FLT] =
{eft_FLT, (void *)dpnp_rng_standard_exponential_ext_c<float>};

fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_GAMMA][eft_DBL][eft_DBL] = {
eft_DBL, (void *)dpnp_rng_standard_gamma_default_c<double>};
Expand Down
9 changes: 5 additions & 4 deletions dpnp/random/dpnp_algo_random.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,8 @@ ctypedef c_dpctl.DPCTLSyclEventRef(*fptr_dpnp_rng_beta_c_1out_t)(c_dpctl.DPCTLSy
const c_dpctl.DPCTLEventVectorRef) except +
ctypedef c_dpctl.DPCTLSyclEventRef(*fptr_dpnp_rng_binomial_c_1out_t)(c_dpctl.DPCTLSyclQueueRef,
void * ,
const int, const double,
const int,
const double,
const size_t,
const c_dpctl.DPCTLEventVectorRef) except +
ctypedef c_dpctl.DPCTLSyclEventRef(*fptr_dpnp_rng_chisquare_c_1out_t)(c_dpctl.DPCTLSyclQueueRef,
Expand Down Expand Up @@ -585,7 +586,7 @@ cpdef utils.dpnp_descriptor dpnp_rng_chisquare(int df, size):
"""

# convert string type names (array.dtype) to C enum DPNPFuncType
cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(dpnp.float64)
cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(dpnp.default_float_type())

# get the FPTR data structure
cdef DPNPFuncData kernel_data = get_dpnp_function_ptr(DPNP_FN_RNG_CHISQUARE_EXT, param1_type, param1_type)
Expand Down Expand Up @@ -1329,7 +1330,7 @@ cpdef utils.dpnp_descriptor dpnp_rng_standard_cauchy(size):
"""

# convert string type names (array.dtype) to C enum DPNPFuncType
cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(dpnp.float64)
cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(dpnp.default_float_type())

# get the FPTR data structure
cdef DPNPFuncData kernel_data = get_dpnp_function_ptr(DPNP_FN_RNG_STANDARD_CAUCHY_EXT, param1_type, param1_type)
Expand Down Expand Up @@ -1364,7 +1365,7 @@ cpdef utils.dpnp_descriptor dpnp_rng_standard_exponential(size):
cdef fptr_dpnp_rng_standard_exponential_c_1out_t func

# convert string type names (array.dtype) to C enum DPNPFuncType
cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(dpnp.float64)
cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(dpnp.default_float_type())

# get the FPTR data structure
cdef DPNPFuncData kernel_data = get_dpnp_function_ptr(DPNP_FN_RNG_STANDARD_EXPONENTIAL_EXT, param1_type, param1_type)
Expand Down
6 changes: 3 additions & 3 deletions dpnp/random/dpnp_iface_random.py
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ def chisquare(df, size=None):
-----------
Parameter ``df`` is supported as a scalar.
Otherwise, :obj:`numpy.random.chisquare(df, size)` samples are drawn.
Output array data type is :obj:`dpnp.float64`.
Output array data type is default float type.

Examples
--------
Expand Down Expand Up @@ -1533,7 +1533,7 @@ def standard_cauchy(size=None):

Limitations
-----------
Output array data type is :obj:`dpnp.float64`.
Output array data type is default float type.

Examples
--------
Expand Down Expand Up @@ -1562,7 +1562,7 @@ def standard_exponential(size=None):

Limitations
-----------
Output array data type is :obj:`dpnp.float64`.
Output array data type is default float type.

Examples
--------
Expand Down
5 changes: 4 additions & 1 deletion tests/test_logic.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

import dpnp

from .helper import get_all_dtypes
from .helper import get_all_dtypes, has_support_aspect64


@pytest.mark.parametrize("type", get_all_dtypes())
Expand Down Expand Up @@ -40,6 +40,9 @@ def test_all(type, shape):
assert_allclose(dpnp_res, np_res)


@pytest.mark.skipif(
not has_support_aspect64(), reason="Aborted on Iris Xe: SAT-5988"
)
@pytest.mark.parametrize("type", get_all_dtypes(no_bool=True, no_complex=True))
def test_allclose(type):
a = numpy.random.rand(10)
Expand Down
19 changes: 14 additions & 5 deletions tests/test_manipulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,20 +4,29 @@

import dpnp

from .helper import (
get_all_dtypes,
get_complex_dtypes,
get_float_dtypes,
)

testdata = []
testdata += [
([True, False, True], dtype)
for dtype in ["float32", "float64", "int32", "int64", "bool"]
for dtype in get_all_dtypes(no_none=True, no_complex=True)
]
testdata += [
([1, -1, 0], dtype) for dtype in ["float32", "float64", "int32", "int64"]
([1, -1, 0], dtype)
for dtype in get_all_dtypes(no_none=True, no_bool=True, no_complex=True)
]
testdata += [([0.1, 0.0, -0.1], dtype) for dtype in ["float32", "float64"]]
testdata += [([1j, -1j, 1 - 2j], dtype) for dtype in ["complex128"]]
testdata += [([0.1, 0.0, -0.1], dtype) for dtype in get_float_dtypes()]
testdata += [([1j, -1j, 1 - 2j], dtype) for dtype in get_complex_dtypes()]


@pytest.mark.parametrize("in_obj,out_dtype", testdata)
@pytest.mark.parametrize("in_obj, out_dtype", testdata)
def test_copyto_dtype(in_obj, out_dtype):
if out_dtype == dpnp.complex64:
pytest.skip("SAT-6016: dpnp.copyto() do not work with complex64 dtype")
ndarr = numpy.array(in_obj)
expected = numpy.empty(ndarr.size, dtype=out_dtype)
numpy.copyto(expected, ndarr)
Expand Down
94 changes: 48 additions & 46 deletions tests/test_mixins.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,72 +4,74 @@

import dpnp as inp

from .helper import get_float_dtypes


class TestMatMul(unittest.TestCase):
def test_matmul(self):
array_data = [1.0, 2.0, 3.0, 4.0]
size = 2

# DPNP
array1 = inp.reshape(
inp.array(array_data, dtype=inp.float64), (size, size)
)
array2 = inp.reshape(
inp.array(array_data, dtype=inp.float64), (size, size)
)
result = inp.matmul(array1, array2)
# print(result)

# original
array_1 = numpy.array(array_data, dtype=numpy.float64).reshape(
(size, size)
)
array_2 = numpy.array(array_data, dtype=numpy.float64).reshape(
(size, size)
)
expected = numpy.matmul(array_1, array_2)
# print(expected)

# passed
numpy.testing.assert_array_equal(expected, result)
# still failed
# self.assertEqual(expected, result)
for dtype in get_float_dtypes():
npolina4 marked this conversation as resolved.
Show resolved Hide resolved
# DPNP
array1 = inp.reshape(
inp.array(array_data, dtype=dtype), (size, size)
)
array2 = inp.reshape(
inp.array(array_data, dtype=dtype), (size, size)
)
result = inp.matmul(array1, array2)
# print(result)

# original
array_1 = numpy.array(array_data, dtype=dtype).reshape((size, size))
array_2 = numpy.array(array_data, dtype=dtype).reshape((size, size))
expected = numpy.matmul(array_1, array_2)
# print(expected)

# passed
numpy.testing.assert_array_equal(expected, result)
# still failed
# self.assertEqual(expected, result)

def test_matmul2(self):
array_data1 = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
array_data2 = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]

# DPNP
array1 = inp.reshape(inp.array(array_data1, dtype=inp.float64), (3, 2))
array2 = inp.reshape(inp.array(array_data2, dtype=inp.float64), (2, 4))
result = inp.matmul(array1, array2)
# print(result)
for dtype in get_float_dtypes():
# DPNP
array1 = inp.reshape(inp.array(array_data1, dtype=dtype), (3, 2))
array2 = inp.reshape(inp.array(array_data2, dtype=dtype), (2, 4))
result = inp.matmul(array1, array2)
# print(result)

# original
array_1 = numpy.array(array_data1, dtype=numpy.float64).reshape((3, 2))
array_2 = numpy.array(array_data2, dtype=numpy.float64).reshape((2, 4))
expected = numpy.matmul(array_1, array_2)
# print(expected)
# original
array_1 = numpy.array(array_data1, dtype=dtype).reshape((3, 2))
array_2 = numpy.array(array_data2, dtype=dtype).reshape((2, 4))
expected = numpy.matmul(array_1, array_2)
# print(expected)

numpy.testing.assert_array_equal(expected, result)
numpy.testing.assert_array_equal(expected, result)

def test_matmul3(self):
array_data1 = numpy.full((513, 513), 5)
array_data2 = numpy.full((513, 513), 2)
out = numpy.empty((513, 513), dtype=numpy.float64)

# DPNP
array1 = inp.array(array_data1, dtype=inp.float64)
array2 = inp.array(array_data2, dtype=inp.float64)
out1 = inp.array(out, dtype=inp.float64)
result = inp.matmul(array1, array2, out=out1)
for dtype in get_float_dtypes():
out = numpy.empty((513, 513), dtype=dtype)

# DPNP
array1 = inp.array(array_data1, dtype=dtype)
array2 = inp.array(array_data2, dtype=dtype)
out1 = inp.array(out, dtype=dtype)
result = inp.matmul(array1, array2, out=out1)

# original
array_1 = numpy.array(array_data1, dtype=numpy.float64)
array_2 = numpy.array(array_data2, dtype=numpy.float64)
expected = numpy.matmul(array_1, array_2, out=out)
# original
array_1 = numpy.array(array_data1, dtype=dtype)
array_2 = numpy.array(array_data2, dtype=dtype)
expected = numpy.matmul(array_1, array_2, out=out)

numpy.testing.assert_array_equal(expected, result)
numpy.testing.assert_array_equal(expected, result)


if __name__ == "__main__":
Expand Down
Loading
Loading