Skip to content

dublicate: Implement syevd_batch/heevd_batch via syevd/heevd call #1867

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 13 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,7 @@ repos:
"-sn", # Don't display the score
"--disable=import-error",
"--disable=redefined-builtin",
"--disable=unused-wildcard-import"
"--disable=unused-wildcard-import",
"--disable=c-extension-no-member"
]
files: '^dpnp/(dpnp_iface.*|fft|linalg)'
133 changes: 130 additions & 3 deletions dpnp/backend/extensions/lapack/heevd.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ namespace mkl_lapack = oneapi::mkl::lapack;
namespace py = pybind11;
namespace type_utils = dpctl::tensor::type_utils;

typedef sycl::event (*heevd_impl_fn_ptr_t)(sycl::queue,
typedef sycl::event (*heevd_impl_fn_ptr_t)(sycl::queue &,
const oneapi::mkl::job,
const oneapi::mkl::uplo,
const std::int64_t,
Expand All @@ -59,7 +59,7 @@ static heevd_impl_fn_ptr_t heevd_dispatch_table[dpctl_td_ns::num_types]
[dpctl_td_ns::num_types];

template <typename T, typename RealT>
static sycl::event heevd_impl(sycl::queue exec_q,
static sycl::event heevd_impl(sycl::queue &exec_q,
const oneapi::mkl::job jobz,
const oneapi::mkl::uplo upper_lower,
const std::int64_t n,
Expand Down Expand Up @@ -132,7 +132,7 @@ static sycl::event heevd_impl(sycl::queue exec_q,
}

std::pair<sycl::event, sycl::event>
heevd(sycl::queue exec_q,
heevd(sycl::queue &exec_q,
const std::int8_t jobz,
const std::int8_t upper_lower,
dpctl::tensor::usm_ndarray eig_vecs,
Expand Down Expand Up @@ -228,6 +228,133 @@ std::pair<sycl::event, sycl::event>
return std::make_pair(args_ev, heevd_ev);
}

std::pair<sycl::event, sycl::event>
heevd_batch(sycl::queue &exec_q,
const std::int8_t jobz,
const std::int8_t upper_lower,
dpctl::tensor::usm_ndarray eig_vecs,
dpctl::tensor::usm_ndarray eig_vals,
const std::vector<sycl::event> &depends)
{
const int eig_vecs_nd = eig_vecs.get_ndim();
const int eig_vals_nd = eig_vals.get_ndim();

if (eig_vecs_nd != 3) {
throw py::value_error("Unexpected ndim=" + std::to_string(eig_vecs_nd) +
" of an output array with eigenvectors");
}
else if (eig_vals_nd != 2) {
throw py::value_error("Unexpected ndim=" + std::to_string(eig_vals_nd) +
" of an output array with eigenvalues");
}

const py::ssize_t *eig_vecs_shape = eig_vecs.get_shape_raw();
const py::ssize_t *eig_vals_shape = eig_vals.get_shape_raw();

if (eig_vecs_shape[1] != eig_vecs_shape[2]) {
throw py::value_error(
"The last two dimensions of 'eig_vecs' must be the same.");
}
else if (eig_vecs_shape[0] != eig_vals_shape[0] ||
eig_vecs_shape[1] != eig_vals_shape[1])
{
throw py::value_error(
"The shape of 'eig_vals' must be (batch_size, n), "
"where batch_size = " +
std::to_string(eig_vecs_shape[0]) +
" and n = " + std::to_string(eig_vecs_shape[1]));
}

size_t src_nelems(1);

for (int i = 0; i < eig_vecs_nd; ++i) {
src_nelems *= static_cast<size_t>(eig_vecs_shape[i]);
}

if (src_nelems == 0) {
// nothing to do
return std::make_pair(sycl::event(), sycl::event());
}

// check compatibility of execution queue and allocation queue
if (!dpctl::utils::queues_are_compatible(exec_q, {eig_vecs, eig_vals})) {
throw py::value_error(
"Execution queue is not compatible with allocation queues");
}

auto const &overlap = dpctl::tensor::overlap::MemoryOverlap();
if (overlap(eig_vecs, eig_vals)) {
throw py::value_error("Arrays with eigenvectors and eigenvalues are "
"overlapping segments of memory");
}

bool is_eig_vecs_c_contig = eig_vecs.is_c_contiguous();
bool is_eig_vals_c_contig = eig_vals.is_c_contiguous();
if (!is_eig_vecs_c_contig) {
throw py::value_error(
"An array with input matrix / output eigenvectors "
"must be C-contiguous");
}
else if (!is_eig_vals_c_contig) {
throw py::value_error(
"An array with output eigenvalues must be C-contiguous");
}

auto array_types = dpctl_td_ns::usm_ndarray_types();
int eig_vecs_type_id =
array_types.typenum_to_lookup_id(eig_vecs.get_typenum());
int eig_vals_type_id =
array_types.typenum_to_lookup_id(eig_vals.get_typenum());

heevd_impl_fn_ptr_t heevd_fn =
heevd_dispatch_table[eig_vecs_type_id][eig_vals_type_id];
if (heevd_fn == nullptr) {
throw py::value_error("No heevd implementation defined for a pair of "
"type for eigenvectors and eigenvalues");
}

char *eig_vecs_data = eig_vecs.get_data();
char *eig_vals_data = eig_vals.get_data();

const std::int64_t batch_size = eig_vecs_shape[0];
const std::int64_t n = eig_vecs_shape[1];
int vecs_elemsize = eig_vecs.get_elemsize();
int vals_elemsize = eig_vals.get_elemsize();

const oneapi::mkl::job jobz_val = static_cast<oneapi::mkl::job>(jobz);
const oneapi::mkl::uplo uplo_val =
static_cast<oneapi::mkl::uplo>(upper_lower);

std::vector<sycl::event> host_task_events;
std::vector<sycl::event> heevd_task_events;

host_task_events.reserve(batch_size);
heevd_task_events.reserve(batch_size);

// Release GIL to avoid serialization of host task
// submissions to the same queue in OneMKL
py::gil_scoped_release release;

for (std::int64_t i = 0; i < batch_size; ++i) {
char *eig_vecs_batch = eig_vecs_data + i * n * n * vecs_elemsize;
char *eig_vals_batch = eig_vals_data + i * n * vals_elemsize;

sycl::event heevd_ev =
heevd_fn(exec_q, jobz_val, uplo_val, n, eig_vecs_batch,
eig_vals_batch, host_task_events, depends);

heevd_task_events.push_back(heevd_ev);
}

sycl::event combine_ev = exec_q.submit(
[&](sycl::handler &cgh) { cgh.depends_on(heevd_task_events); });

sycl::event args_ev = dpctl::utils::keep_args_alive(
exec_q, {eig_vecs, eig_vals}, host_task_events);

return std::make_pair(args_ev, combine_ev);
}

template <typename fnT, typename T, typename RealT>
struct HeevdContigFactory
{
Expand Down
10 changes: 9 additions & 1 deletion dpnp/backend/extensions/lapack/heevd.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,13 +39,21 @@ namespace ext
namespace lapack
{
extern std::pair<sycl::event, sycl::event>
heevd(sycl::queue exec_q,
heevd(sycl::queue &exec_q,
const std::int8_t jobz,
const std::int8_t upper_lower,
dpctl::tensor::usm_ndarray eig_vecs,
dpctl::tensor::usm_ndarray eig_vals,
const std::vector<sycl::event> &depends = {});

extern std::pair<sycl::event, sycl::event>
heevd_batch(sycl::queue &exec_q,
const std::int8_t jobz,
const std::int8_t upper_lower,
dpctl::tensor::usm_ndarray eig_vecs,
dpctl::tensor::usm_ndarray eig_vals,
const std::vector<sycl::event> &depends = {});

extern void init_heevd_dispatch_table(void);
} // namespace lapack
} // namespace ext
Expand Down
16 changes: 16 additions & 0 deletions dpnp/backend/extensions/lapack/lapack_py.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,14 @@ PYBIND11_MODULE(_lapack_impl, m)
py::arg("eig_vecs"), py::arg("eig_vals"),
py::arg("depends") = py::list());

m.def("_heevd_batch", &lapack_ext::heevd_batch,
"Call `heevd` from OneMKL LAPACK library in a loop to return "
"the eigenvalues and eigenvectors of a batch of complex Hermitian "
"matrices",
py::arg("sycl_queue"), py::arg("jobz"), py::arg("upper_lower"),
py::arg("eig_vecs"), py::arg("eig_vals"),
py::arg("depends") = py::list());

m.def("_orgqr_batch", &lapack_ext::orgqr_batch,
"Call `_orgqr_batch` from OneMKL LAPACK library to return "
"the real orthogonal matrix Qi of the QR factorization "
Expand Down Expand Up @@ -183,6 +191,14 @@ PYBIND11_MODULE(_lapack_impl, m)
py::arg("eig_vecs"), py::arg("eig_vals"),
py::arg("depends") = py::list());

m.def("_syevd_batch", &lapack_ext::syevd_batch,
"Call `syevd` from OneMKL LAPACK library in a loop to return "
"the eigenvalues and eigenvectors of a batch of real symmetric "
"matrices",
py::arg("sycl_queue"), py::arg("jobz"), py::arg("upper_lower"),
py::arg("eig_vecs"), py::arg("eig_vals"),
py::arg("depends") = py::list());

m.def("_ungqr_batch", &lapack_ext::ungqr_batch,
"Call `_ungqr_batch` from OneMKL LAPACK library to return "
"the complex unitary matrices matrix Qi of the QR factorization "
Expand Down
136 changes: 133 additions & 3 deletions dpnp/backend/extensions/lapack/syevd.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ namespace mkl_lapack = oneapi::mkl::lapack;
namespace py = pybind11;
namespace type_utils = dpctl::tensor::type_utils;

typedef sycl::event (*syevd_impl_fn_ptr_t)(sycl::queue,
typedef sycl::event (*syevd_impl_fn_ptr_t)(sycl::queue &,
const oneapi::mkl::job,
const oneapi::mkl::uplo,
const std::int64_t,
Expand All @@ -58,7 +58,7 @@ typedef sycl::event (*syevd_impl_fn_ptr_t)(sycl::queue,
static syevd_impl_fn_ptr_t syevd_dispatch_vector[dpctl_td_ns::num_types];

template <typename T>
static sycl::event syevd_impl(sycl::queue exec_q,
static sycl::event syevd_impl(sycl::queue &exec_q,
const oneapi::mkl::job jobz,
const oneapi::mkl::uplo upper_lower,
const std::int64_t n,
Expand Down Expand Up @@ -130,7 +130,7 @@ static sycl::event syevd_impl(sycl::queue exec_q,
}

std::pair<sycl::event, sycl::event>
syevd(sycl::queue exec_q,
syevd(sycl::queue &exec_q,
const std::int8_t jobz,
const std::int8_t upper_lower,
dpctl::tensor::usm_ndarray eig_vecs,
Expand Down Expand Up @@ -230,6 +230,136 @@ std::pair<sycl::event, sycl::event>
return std::make_pair(args_ev, syevd_ev);
}

std::pair<sycl::event, sycl::event>
syevd_batch(sycl::queue &exec_q,
const std::int8_t jobz,
const std::int8_t upper_lower,
dpctl::tensor::usm_ndarray eig_vecs,
dpctl::tensor::usm_ndarray eig_vals,
const std::vector<sycl::event> &depends)
{
const int eig_vecs_nd = eig_vecs.get_ndim();
const int eig_vals_nd = eig_vals.get_ndim();

if (eig_vecs_nd != 3) {
throw py::value_error("Unexpected ndim=" + std::to_string(eig_vecs_nd) +
" of an output array with eigenvectors");
}
else if (eig_vals_nd != 2) {
throw py::value_error("Unexpected ndim=" + std::to_string(eig_vals_nd) +
" of an output array with eigenvalues");
}

const py::ssize_t *eig_vecs_shape = eig_vecs.get_shape_raw();
const py::ssize_t *eig_vals_shape = eig_vals.get_shape_raw();

if (eig_vecs_shape[1] != eig_vecs_shape[2]) {
throw py::value_error(
"The last two dimensions of 'eig_vecs' must be the same.");
}
else if (eig_vecs_shape[0] != eig_vals_shape[0] ||
eig_vecs_shape[1] != eig_vals_shape[1])
{
throw py::value_error(
"The shape of 'eig_vals' must be (batch_size, n), "
"where batch_size = " +
std::to_string(eig_vecs_shape[0]) +
" and n = " + std::to_string(eig_vecs_shape[1]));
}

size_t src_nelems(1);

for (int i = 0; i < eig_vecs_nd; ++i) {
src_nelems *= static_cast<size_t>(eig_vecs_shape[i]);
}

if (src_nelems == 0) {
// nothing to do
return std::make_pair(sycl::event(), sycl::event());
}

// check compatibility of execution queue and allocation queue
if (!dpctl::utils::queues_are_compatible(exec_q, {eig_vecs, eig_vals})) {
throw py::value_error(
"Execution queue is not compatible with allocation queues");
}

auto const &overlap = dpctl::tensor::overlap::MemoryOverlap();
if (overlap(eig_vecs, eig_vals)) {
throw py::value_error("Arrays with eigenvectors and eigenvalues are "
"overlapping segments of memory");
}

bool is_eig_vecs_c_contig = eig_vecs.is_c_contiguous();
bool is_eig_vals_c_contig = eig_vals.is_c_contiguous();
if (!is_eig_vecs_c_contig) {
throw py::value_error(
"An array with input matrix / output eigenvectors "
"must be C-contiguous");
}
else if (!is_eig_vals_c_contig) {
throw py::value_error(
"An array with output eigenvalues must be C-contiguous");
}

auto array_types = dpctl_td_ns::usm_ndarray_types();
int eig_vecs_type_id =
array_types.typenum_to_lookup_id(eig_vecs.get_typenum());
int eig_vals_type_id =
array_types.typenum_to_lookup_id(eig_vals.get_typenum());

if (eig_vecs_type_id != eig_vals_type_id) {
throw py::value_error(
"Types of eigenvectors and eigenvalues are mismatched");
}

syevd_impl_fn_ptr_t syevd_fn = syevd_dispatch_vector[eig_vecs_type_id];
if (syevd_fn == nullptr) {
throw py::value_error("No syevd implementation defined for a type of "
"eigenvectors and eigenvalues");
}

char *eig_vecs_data = eig_vecs.get_data();
char *eig_vals_data = eig_vals.get_data();

const std::int64_t batch_size = eig_vecs_shape[0];
const std::int64_t n = eig_vecs_shape[1];
int elemsize = eig_vecs.get_elemsize();

const oneapi::mkl::job jobz_val = static_cast<oneapi::mkl::job>(jobz);
const oneapi::mkl::uplo uplo_val =
static_cast<oneapi::mkl::uplo>(upper_lower);

std::vector<sycl::event> host_task_events;
std::vector<sycl::event> syevd_task_events;

host_task_events.reserve(batch_size);
syevd_task_events.reserve(batch_size);

// Release GIL to avoid serialization of host task
// submissions to the same queue in OneMKL
py::gil_scoped_release release;

for (std::int64_t i = 0; i < batch_size; ++i) {
char *eig_vecs_batch = eig_vecs_data + i * n * n * elemsize;
char *eig_vals_batch = eig_vals_data + i * n * elemsize;

sycl::event syevd_ev =
syevd_fn(exec_q, jobz_val, uplo_val, n, eig_vecs_batch,
eig_vals_batch, host_task_events, depends);

syevd_task_events.push_back(syevd_ev);
}

sycl::event combine_ev = exec_q.submit(
[&](sycl::handler &cgh) { cgh.depends_on(syevd_task_events); });

sycl::event args_ev = dpctl::utils::keep_args_alive(
exec_q, {eig_vecs, eig_vals}, host_task_events);

return std::make_pair(args_ev, combine_ev);
}

template <typename fnT, typename T>
struct SyevdContigFactory
{
Expand Down
Loading
Loading