Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update tests to run on Iris Xe #1498

Merged
merged 5 commits into from
Jul 27, 2023
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions dpnp/backend/kernels/dpnp_krnl_random.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2988,9 +2988,13 @@ void func_map_init_random(func_map_t &fmap)

fmap[DPNPFuncName::DPNP_FN_RNG_CHISQUARE][eft_DBL][eft_DBL] = {
eft_DBL, (void *)dpnp_rng_chisquare_default_c<double>};
fmap[DPNPFuncName::DPNP_FN_RNG_CHISQUARE][eft_FLT][eft_FLT] = {
eft_FLT, (void *)dpnp_rng_chisquare_default_c<float>};

fmap[DPNPFuncName::DPNP_FN_RNG_CHISQUARE_EXT][eft_DBL][eft_DBL] = {
eft_DBL, (void *)dpnp_rng_chisquare_ext_c<double>};
fmap[DPNPFuncName::DPNP_FN_RNG_CHISQUARE_EXT][eft_FLT][eft_FLT] = {
eft_FLT, (void *)dpnp_rng_chisquare_ext_c<float>};

fmap[DPNPFuncName::DPNP_FN_RNG_EXPONENTIAL][eft_DBL][eft_DBL] = {
eft_DBL, (void *)dpnp_rng_exponential_default_c<double>};
Expand Down Expand Up @@ -3136,15 +3140,23 @@ void func_map_init_random(func_map_t &fmap)

fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_CAUCHY][eft_DBL][eft_DBL] = {
eft_DBL, (void *)dpnp_rng_standard_cauchy_default_c<double>};
fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_CAUCHY][eft_FLT][eft_FLT] = {
eft_FLT, (void *)dpnp_rng_standard_cauchy_default_c<float>};

fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_CAUCHY_EXT][eft_DBL][eft_DBL] = {
eft_DBL, (void *)dpnp_rng_standard_cauchy_ext_c<double>};
fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_CAUCHY_EXT][eft_FLT][eft_FLT] = {
eft_FLT, (void *)dpnp_rng_standard_cauchy_ext_c<float>};

fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_EXPONENTIAL][eft_DBL][eft_DBL] = {
eft_DBL, (void *)dpnp_rng_standard_exponential_default_c<double>};
fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_EXPONENTIAL][eft_FLT][eft_FLT] = {
eft_FLT, (void *)dpnp_rng_standard_exponential_default_c<float>};

fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_EXPONENTIAL_EXT][eft_DBL][eft_DBL] =
{eft_DBL, (void *)dpnp_rng_standard_exponential_ext_c<double>};
fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_EXPONENTIAL_EXT][eft_FLT][eft_FLT] =
{eft_FLT, (void *)dpnp_rng_standard_exponential_ext_c<float>};

fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_GAMMA][eft_DBL][eft_DBL] = {
eft_DBL, (void *)dpnp_rng_standard_gamma_default_c<double>};
Expand Down
9 changes: 5 additions & 4 deletions dpnp/random/dpnp_algo_random.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,8 @@ ctypedef c_dpctl.DPCTLSyclEventRef(*fptr_dpnp_rng_beta_c_1out_t)(c_dpctl.DPCTLSy
const c_dpctl.DPCTLEventVectorRef) except +
ctypedef c_dpctl.DPCTLSyclEventRef(*fptr_dpnp_rng_binomial_c_1out_t)(c_dpctl.DPCTLSyclQueueRef,
void * ,
const int, const double,
const int,
const double,
const size_t,
const c_dpctl.DPCTLEventVectorRef) except +
ctypedef c_dpctl.DPCTLSyclEventRef(*fptr_dpnp_rng_chisquare_c_1out_t)(c_dpctl.DPCTLSyclQueueRef,
Expand Down Expand Up @@ -585,7 +586,7 @@ cpdef utils.dpnp_descriptor dpnp_rng_chisquare(int df, size):
"""

# convert string type names (array.dtype) to C enum DPNPFuncType
cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(dpnp.float64)
cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(dpnp.default_float_type())

# get the FPTR data structure
cdef DPNPFuncData kernel_data = get_dpnp_function_ptr(DPNP_FN_RNG_CHISQUARE_EXT, param1_type, param1_type)
Expand Down Expand Up @@ -1329,7 +1330,7 @@ cpdef utils.dpnp_descriptor dpnp_rng_standard_cauchy(size):
"""

# convert string type names (array.dtype) to C enum DPNPFuncType
cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(dpnp.float64)
cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(dpnp.default_float_type())

# get the FPTR data structure
cdef DPNPFuncData kernel_data = get_dpnp_function_ptr(DPNP_FN_RNG_STANDARD_CAUCHY_EXT, param1_type, param1_type)
Expand Down Expand Up @@ -1364,7 +1365,7 @@ cpdef utils.dpnp_descriptor dpnp_rng_standard_exponential(size):
cdef fptr_dpnp_rng_standard_exponential_c_1out_t func

# convert string type names (array.dtype) to C enum DPNPFuncType
cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(dpnp.float64)
cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(dpnp.default_float_type())

# get the FPTR data structure
cdef DPNPFuncData kernel_data = get_dpnp_function_ptr(DPNP_FN_RNG_STANDARD_EXPONENTIAL_EXT, param1_type, param1_type)
Expand Down
6 changes: 3 additions & 3 deletions dpnp/random/dpnp_iface_random.py
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ def chisquare(df, size=None):
-----------
Parameter ``df`` is supported as a scalar.
Otherwise, :obj:`numpy.random.chisquare(df, size)` samples are drawn.
Output array data type is :obj:`dpnp.float64`.
Output array data type is default float type.

Examples
--------
Expand Down Expand Up @@ -1533,7 +1533,7 @@ def standard_cauchy(size=None):

Limitations
-----------
Output array data type is :obj:`dpnp.float64`.
Output array data type is default float type.

Examples
--------
Expand Down Expand Up @@ -1562,7 +1562,7 @@ def standard_exponential(size=None):

Limitations
-----------
Output array data type is :obj:`dpnp.float64`.
Output array data type is default float type.

Examples
--------
Expand Down
5 changes: 4 additions & 1 deletion tests/test_logic.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

import dpnp

from .helper import get_all_dtypes
from .helper import get_all_dtypes, has_support_aspect64


@pytest.mark.parametrize("type", get_all_dtypes())
Expand Down Expand Up @@ -40,6 +40,9 @@ def test_all(type, shape):
assert_allclose(dpnp_res, np_res)


@pytest.mark.skipif(
not has_support_aspect64(), reason="Aborted on Iris Xe: SAT-5988"
)
@pytest.mark.parametrize("type", get_all_dtypes(no_bool=True, no_complex=True))
def test_allclose(type):
a = numpy.random.rand(10)
Expand Down
18 changes: 11 additions & 7 deletions tests/test_manipulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,19 +4,23 @@

import dpnp

from .helper import has_support_aspect64

testdata = []
testdata += [
([True, False, True], dtype)
for dtype in ["float32", "float64", "int32", "int64", "bool"]
]
testdata += [
([1, -1, 0], dtype) for dtype in ["float32", "float64", "int32", "int64"]
for dtype in ["float32", "int32", "int64", "bool"]
npolina4 marked this conversation as resolved.
Show resolved Hide resolved
]
testdata += [([0.1, 0.0, -0.1], dtype) for dtype in ["float32", "float64"]]
testdata += [([1j, -1j, 1 - 2j], dtype) for dtype in ["complex128"]]
testdata += [([1, -1, 0], dtype) for dtype in ["float32", "int32", "int64"]]
npolina4 marked this conversation as resolved.
Show resolved Hide resolved
testdata += [([0.1, 0.0, -0.1], dtype) for dtype in ["float32"]]
npolina4 marked this conversation as resolved.
Show resolved Hide resolved
if has_support_aspect64():
testdata += [([True, False, True], dtype) for dtype in ["float64"]]
testdata += [([1, -1, 0], dtype) for dtype in ["float64"]]
testdata += [([0.1, 0.0, -0.1], dtype) for dtype in ["float64"]]
testdata += [([1j, -1j, 1 - 2j], dtype) for dtype in ["complex128"]]
npolina4 marked this conversation as resolved.
Show resolved Hide resolved


@pytest.mark.parametrize("in_obj,out_dtype", testdata)
@pytest.mark.parametrize("in_obj, out_dtype", testdata)
def test_copyto_dtype(in_obj, out_dtype):
ndarr = numpy.array(in_obj)
expected = numpy.empty(ndarr.size, dtype=out_dtype)
Expand Down
28 changes: 14 additions & 14 deletions tests/test_mixins.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,19 +12,19 @@ def test_matmul(self):

# DPNP
array1 = inp.reshape(
inp.array(array_data, dtype=inp.float64), (size, size)
inp.array(array_data, dtype=inp.float32), (size, size)
npolina4 marked this conversation as resolved.
Show resolved Hide resolved
)
array2 = inp.reshape(
inp.array(array_data, dtype=inp.float64), (size, size)
inp.array(array_data, dtype=inp.float32), (size, size)
)
result = inp.matmul(array1, array2)
# print(result)
npolina4 marked this conversation as resolved.
Show resolved Hide resolved

# original
array_1 = numpy.array(array_data, dtype=numpy.float64).reshape(
array_1 = numpy.array(array_data, dtype=numpy.float32).reshape(
(size, size)
)
array_2 = numpy.array(array_data, dtype=numpy.float64).reshape(
array_2 = numpy.array(array_data, dtype=numpy.float32).reshape(
(size, size)
)
expected = numpy.matmul(array_1, array_2)
Expand All @@ -40,14 +40,14 @@ def test_matmul2(self):
array_data2 = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]

# DPNP
array1 = inp.reshape(inp.array(array_data1, dtype=inp.float64), (3, 2))
array2 = inp.reshape(inp.array(array_data2, dtype=inp.float64), (2, 4))
array1 = inp.reshape(inp.array(array_data1, dtype=inp.float32), (3, 2))
array2 = inp.reshape(inp.array(array_data2, dtype=inp.float32), (2, 4))
result = inp.matmul(array1, array2)
# print(result)

# original
array_1 = numpy.array(array_data1, dtype=numpy.float64).reshape((3, 2))
array_2 = numpy.array(array_data2, dtype=numpy.float64).reshape((2, 4))
array_1 = numpy.array(array_data1, dtype=numpy.float32).reshape((3, 2))
array_2 = numpy.array(array_data2, dtype=numpy.float32).reshape((2, 4))
expected = numpy.matmul(array_1, array_2)
# print(expected)

Expand All @@ -56,17 +56,17 @@ def test_matmul2(self):
def test_matmul3(self):
array_data1 = numpy.full((513, 513), 5)
array_data2 = numpy.full((513, 513), 2)
out = numpy.empty((513, 513), dtype=numpy.float64)
out = numpy.empty((513, 513), dtype=numpy.float32)

# DPNP
array1 = inp.array(array_data1, dtype=inp.float64)
array2 = inp.array(array_data2, dtype=inp.float64)
out1 = inp.array(out, dtype=inp.float64)
array1 = inp.array(array_data1, dtype=inp.float32)
array2 = inp.array(array_data2, dtype=inp.float32)
out1 = inp.array(out, dtype=inp.float32)
result = inp.matmul(array1, array2, out=out1)

# original
array_1 = numpy.array(array_data1, dtype=numpy.float64)
array_2 = numpy.array(array_data2, dtype=numpy.float64)
array_1 = numpy.array(array_data1, dtype=numpy.float32)
array_2 = numpy.array(array_data2, dtype=numpy.float32)
expected = numpy.matmul(array_1, array_2, out=out)

numpy.testing.assert_array_equal(expected, result)
Expand Down
Loading