Skip to content

Commit a8f8660

Browse files
author
zhangkaihuo
authored
Add sparse convertion api and sparse creation api (#40780)
1 parent f95f3a6 commit a8f8660

File tree

6 files changed

+216
-7
lines changed

6 files changed

+216
-7
lines changed

paddle/fluid/pybind/eager_functions.cc

Lines changed: 95 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,9 @@ limitations under the License. */
4040
#include "paddle/phi/common/data_type.h"
4141
#include "paddle/phi/core/compat/convert_utils.h"
4242
#include "paddle/phi/core/dense_tensor.h"
43+
#include "paddle/phi/core/sparse_coo_tensor.h"
44+
#include "paddle/phi/core/sparse_csr_tensor.h"
45+
4346
namespace paddle {
4447
namespace pybind {
4548

@@ -468,6 +471,90 @@ static PyObject* eager_api_run_costum_op(PyObject* self, PyObject* args,
468471
EAGER_CATCH_AND_THROW_RETURN_NULL
469472
}
470473

474+
static PyObject* eager_api_sparse_coo_tensor(PyObject* self, PyObject* args,
475+
PyObject* kwargs) {
476+
EAGER_TRY
477+
auto non_zero_indices = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0);
478+
auto non_zero_elements = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 1), 1);
479+
auto dense_shape = CastPyArg2VectorOfInt(PyTuple_GET_ITEM(args, 2), 2);
480+
auto stop_gradient = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3);
481+
PADDLE_ENFORCE(non_zero_indices.is_dense_tensor(),
482+
paddle::platform::errors::Fatal(
483+
"the non-zero indices must be a DenseTensor."));
484+
PADDLE_ENFORCE(non_zero_elements.is_dense_tensor(),
485+
paddle::platform::errors::Fatal(
486+
"the non-zero elements must be a DenseTensor."));
487+
auto dense_indices =
488+
std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_indices.impl());
489+
auto dense_elements =
490+
std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_elements.impl());
491+
// TODO(zhangkaihuo): After create SparseTensor, call coalesced() to sort and
492+
// merge duplicate indices
493+
std::shared_ptr<phi::SparseCooTensor> coo_tensor =
494+
std::make_shared<phi::SparseCooTensor>(*dense_indices, *dense_elements,
495+
phi::make_ddim(dense_shape));
496+
paddle::experimental::Tensor tensor;
497+
tensor.set_impl(coo_tensor);
498+
auto name =
499+
egr::Controller::Instance().GenerateUniqueName("generated_tensor");
500+
tensor.set_name(name);
501+
auto autograd_meta = egr::EagerUtils::autograd_meta(&tensor);
502+
autograd_meta->SetStopGradient(static_cast<bool>(stop_gradient));
503+
if (!autograd_meta->GetMutableGradNode()) {
504+
VLOG(3) << "Tensor(" << name
505+
<< ") have not GradNode, add GradNodeAccumulation for it.";
506+
autograd_meta->SetGradNode(
507+
std::make_shared<egr::GradNodeAccumulation>(autograd_meta));
508+
}
509+
return ToPyObject(tensor);
510+
EAGER_CATCH_AND_THROW_RETURN_NULL
511+
}
512+
513+
static PyObject* eager_api_sparse_csr_tensor(PyObject* self, PyObject* args,
514+
PyObject* kwargs) {
515+
EAGER_TRY
516+
auto non_zero_crows = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0);
517+
auto non_zero_cols = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 1), 1);
518+
auto non_zero_elements = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 2), 2);
519+
auto dense_shape = CastPyArg2VectorOfInt(PyTuple_GET_ITEM(args, 3), 3);
520+
auto stop_gradient = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 4), 4);
521+
PADDLE_ENFORCE(non_zero_crows.is_dense_tensor(),
522+
paddle::platform::errors::Fatal(
523+
"the compressed non-zero rows must be a DenseTensor."));
524+
PADDLE_ENFORCE(non_zero_cols.is_dense_tensor(),
525+
paddle::platform::errors::Fatal(
526+
"the non-zero cols must be a DenseTensor."));
527+
PADDLE_ENFORCE(non_zero_elements.is_dense_tensor(),
528+
paddle::platform::errors::Fatal(
529+
"the non-zero elements must be a DenseTensor."));
530+
531+
auto dense_crows =
532+
std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_crows.impl());
533+
auto dense_cols =
534+
std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_cols.impl());
535+
auto dense_elements =
536+
std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_elements.impl());
537+
std::shared_ptr<phi::SparseCsrTensor> csr_tensor =
538+
std::make_shared<phi::SparseCsrTensor>(*dense_crows, *dense_cols,
539+
*dense_elements,
540+
phi::make_ddim(dense_shape));
541+
paddle::experimental::Tensor tensor;
542+
tensor.set_impl(csr_tensor);
543+
auto name =
544+
egr::Controller::Instance().GenerateUniqueName("generated_tensor");
545+
tensor.set_name(name);
546+
auto autograd_meta = egr::EagerUtils::autograd_meta(&tensor);
547+
autograd_meta->SetStopGradient(static_cast<bool>(stop_gradient));
548+
if (!autograd_meta->GetMutableGradNode()) {
549+
VLOG(3) << "Tensor(" << name
550+
<< ") have not GradNode, add GradNodeAccumulation for it.";
551+
autograd_meta->SetGradNode(
552+
std::make_shared<egr::GradNodeAccumulation>(autograd_meta));
553+
}
554+
return ToPyObject(tensor);
555+
EAGER_CATCH_AND_THROW_RETURN_NULL
556+
}
557+
471558
PyMethodDef variable_functions[] = {
472559
// TODO(jiabin): Remove scale when we have final state tests
473560
{"scale", (PyCFunction)(void (*)(void))eager_api_scale,
@@ -490,6 +577,14 @@ PyMethodDef variable_functions[] = {
490577
{"read_next_tensor_list",
491578
(PyCFunction)(void (*)(void))eager_api_read_next_tensor_list,
492579
METH_VARARGS | METH_KEYWORDS, NULL},
580+
/**sparse functions**/
581+
{"sparse_coo_tensor",
582+
(PyCFunction)(void (*)(void))eager_api_sparse_coo_tensor,
583+
METH_VARARGS | METH_KEYWORDS, NULL},
584+
{"sparse_csr_tensor",
585+
(PyCFunction)(void (*)(void))eager_api_sparse_csr_tensor,
586+
METH_VARARGS | METH_KEYWORDS, NULL},
587+
/**sparse functions**/
493588
{NULL, NULL, 0, NULL}};
494589

495590
void BindFunctions(PyObject* module) {

paddle/fluid/pybind/eager_method.cc

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1097,6 +1097,49 @@ static PyObject* tensor_method_is_sparse_csr(TensorObject* self, PyObject* args,
10971097
EAGER_CATCH_AND_THROW_RETURN_NULL
10981098
}
10991099

1100+
static PyObject* tensor_method_to_sparse_coo(TensorObject* self, PyObject* args,
1101+
PyObject* kwargs) {
1102+
EAGER_TRY
1103+
int64_t sparse_dim = CastPyArg2AttrLong(PyTuple_GET_ITEM(args, 0), 0);
1104+
auto coo_tensor = self->tensor.to_sparse_coo(sparse_dim);
1105+
egr::EagerUtils::autograd_meta(&coo_tensor)
1106+
->SetStopGradient(
1107+
egr::EagerUtils::autograd_meta(&self->tensor)->StopGradient());
1108+
egr::EagerUtils::autograd_meta(&coo_tensor)
1109+
->SetPersistable(
1110+
egr::EagerUtils::autograd_meta(&(self->tensor))->Persistable());
1111+
return ToPyObject(coo_tensor);
1112+
EAGER_CATCH_AND_THROW_RETURN_NULL
1113+
}
1114+
1115+
static PyObject* tensor_method_to_sparse_csr(TensorObject* self, PyObject* args,
1116+
PyObject* kwargs) {
1117+
EAGER_TRY
1118+
auto csr_tensor = self->tensor.to_sparse_csr();
1119+
egr::EagerUtils::autograd_meta(&csr_tensor)
1120+
->SetStopGradient(
1121+
egr::EagerUtils::autograd_meta(&self->tensor)->StopGradient());
1122+
egr::EagerUtils::autograd_meta(&csr_tensor)
1123+
->SetPersistable(
1124+
egr::EagerUtils::autograd_meta(&(self->tensor))->Persistable());
1125+
return ToPyObject(csr_tensor);
1126+
EAGER_CATCH_AND_THROW_RETURN_NULL
1127+
}
1128+
1129+
static PyObject* tensor_method_to_dense(TensorObject* self, PyObject* args,
1130+
PyObject* kwargs) {
1131+
EAGER_TRY
1132+
auto dense_tensor = self->tensor.to_dense();
1133+
egr::EagerUtils::autograd_meta(&dense_tensor)
1134+
->SetStopGradient(
1135+
egr::EagerUtils::autograd_meta(&self->tensor)->StopGradient());
1136+
egr::EagerUtils::autograd_meta(&dense_tensor)
1137+
->SetPersistable(
1138+
egr::EagerUtils::autograd_meta(&(self->tensor))->Persistable());
1139+
return ToPyObject(dense_tensor);
1140+
EAGER_CATCH_AND_THROW_RETURN_NULL
1141+
}
1142+
11001143
static PyObject* tensor__inplace_version(TensorObject* self, PyObject* args,
11011144
PyObject* kwargs) {
11021145
EAGER_TRY
@@ -1185,6 +1228,12 @@ PyMethodDef variable_methods[] = {
11851228
METH_VARARGS | METH_KEYWORDS, NULL},
11861229
{"is_sparse_csr", (PyCFunction)(void (*)(void))tensor_method_is_sparse_csr,
11871230
METH_VARARGS | METH_KEYWORDS, NULL},
1231+
{"to_sparse_coo", (PyCFunction)(void (*)(void))tensor_method_to_sparse_coo,
1232+
METH_VARARGS | METH_KEYWORDS, NULL},
1233+
{"to_sparse_csr", (PyCFunction)(void (*)(void))tensor_method_to_sparse_csr,
1234+
METH_VARARGS | METH_KEYWORDS, NULL},
1235+
{"to_dense", (PyCFunction)(void (*)(void))tensor_method_to_dense,
1236+
METH_VARARGS | METH_KEYWORDS, NULL},
11881237
/***the method of sparse tensor****/
11891238
{"_inplace_version", (PyCFunction)(void (*)(void))tensor__inplace_version,
11901239
METH_VARARGS | METH_KEYWORDS, NULL},

paddle/phi/api/include/tensor.h

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -518,6 +518,30 @@ class PADDLE_API Tensor final {
518518

519519
/* Part 10: Auto generated Tensor methods */
520520

521+
/* Part 11: Methods of converting SparseTensor and DenseTensor to each other
522+
*/
523+
/**
524+
* @brief Convert DenseTensor or SparseCsrTensor to SparseCooTensor
525+
*
526+
* @param sparse_dim, The number of sparse dimensions
527+
* @return Tensor
528+
*/
529+
Tensor to_sparse_coo(const int64_t sparse_dim) const;
530+
531+
/**
532+
* @brief Convert DenseTensor or SparseCooTensor to SparseCsrTensor
533+
*
534+
* @return Tensor
535+
*/
536+
Tensor to_sparse_csr() const;
537+
538+
/**
539+
* @brief Convert SparseCooTensor or SparseCsrTensor to DenseTensor
540+
*
541+
* @return Tensor
542+
*/
543+
Tensor to_dense() const;
544+
521545
private:
522546
/**
523547
* [ Why use abstract TensorImpl interface here? ]

paddle/phi/api/lib/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -149,4 +149,4 @@ cc_library(phi_bw_function_api SRCS ${bw_api_source_file} DEPS phi_tensor_raw ph
149149
cc_library(sparse_api SRCS ${sparse_api_source_file} DEPS phi_tensor_raw phi kernel_dispatch api_gen_utils sparse_api_custom_impl)
150150
cc_library(sparse_bw_api SRCS ${sparse_bw_api_source_file} DEPS phi_tensor_raw phi kernel_dispatch api_gen_utils sparse_api sparse_api_custom_impl)
151151

152-
cc_library(phi_tensor SRCS tensor_method.cc DEPS phi_tensor_raw phi_function_api api_gen_utils kernel_dispatch infermeta)
152+
cc_library(phi_tensor SRCS tensor_method.cc DEPS phi_tensor_raw phi_function_api api_gen_utils kernel_dispatch infermeta sparse_api)

paddle/phi/api/lib/tensor_method.cc

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ limitations under the License. */
1919
#include "paddle/phi/core/compat/convert_utils.h"
2020
#include "paddle/phi/core/tensor_base.h"
2121

22+
#include "paddle/phi/api/include/sparse_api.h"
2223
#include "paddle/phi/api/lib/api_gen_utils.h"
2324
#include "paddle/phi/api/lib/kernel_dispatch.h"
2425
#include "paddle/phi/infermeta/unary.h"
@@ -183,5 +184,17 @@ void Tensor::copy_(const Tensor &src,
183184
}
184185
}
185186

187+
Tensor Tensor::to_sparse_coo(const int64_t sparse_dim) const {
188+
return experimental::sparse::to_sparse_coo(*this, sparse_dim);
189+
}
190+
191+
Tensor Tensor::to_sparse_csr() const {
192+
return experimental::sparse::to_sparse_csr(*this);
193+
}
194+
195+
Tensor Tensor::to_dense() const {
196+
return experimental::sparse::to_dense(*this);
197+
}
198+
186199
} // namespace experimental
187200
} // namespace paddle

python/paddle/fluid/tests/unittests/test_sparse_utils_op.py

Lines changed: 34 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -17,25 +17,53 @@
1717
import numpy as np
1818
import paddle
1919
from paddle import _C_ops
20+
from paddle.fluid import core
2021
from paddle.fluid.framework import _test_eager_guard
2122

2223

2324
class TestSparseUtils(unittest.TestCase):
25+
def test_create_sparse_coo_tensor(self):
26+
with _test_eager_guard():
27+
non_zero_indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]]
28+
non_zero_elements = [1, 2, 3, 4, 5]
29+
dense_shape = [3, 4]
30+
dense_indices = paddle.to_tensor(non_zero_indices)
31+
dense_elements = paddle.to_tensor(
32+
non_zero_elements, dtype='float32')
33+
stop_gradient = False
34+
coo = core.eager.sparse_coo_tensor(dense_indices, dense_elements,
35+
dense_shape, stop_gradient)
36+
print(coo)
37+
38+
def test_create_sparse_csr_tensor(self):
39+
with _test_eager_guard():
40+
non_zero_crows = [0, 2, 3, 5]
41+
non_zero_cols = [1, 3, 2, 0, 1]
42+
non_zero_elements = [1, 2, 3, 4, 5]
43+
dense_shape = [3, 4]
44+
dense_crows = paddle.to_tensor(non_zero_crows)
45+
dense_cols = paddle.to_tensor(non_zero_cols)
46+
dense_elements = paddle.to_tensor(
47+
non_zero_elements, dtype='float32')
48+
stop_gradient = False
49+
csr = core.eager.sparse_csr_tensor(dense_crows, dense_cols,
50+
dense_elements, dense_shape,
51+
stop_gradient)
52+
print(csr)
53+
2454
def test_to_sparse_coo(self):
2555
with _test_eager_guard():
2656
x = [[0, 1, 0, 2], [0, 0, 3, 0], [4, 5, 0, 0]]
2757
non_zero_indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]]
2858
non_zero_elements = [1, 2, 3, 4, 5]
2959
dense_x = paddle.to_tensor(x)
30-
#TODO(zhangkaihuo): change to test the corresponding API
31-
out = _C_ops.final_state_to_sparse_coo(dense_x, 2)
32-
print(out)
60+
out = dense_x.to_sparse_coo(2)
3361
assert np.array_equal(out.non_zero_indices().numpy(),
3462
non_zero_indices)
3563
assert np.array_equal(out.non_zero_elements().numpy(),
3664
non_zero_elements)
3765

38-
dense_tensor = _C_ops.final_state_to_dense(out)
66+
dense_tensor = out.to_dense()
3967
assert np.array_equal(dense_tensor.numpy(), x)
4068

4169
def test_to_sparse_csr(self):
@@ -45,14 +73,14 @@ def test_to_sparse_csr(self):
4573
non_zero_cols = [1, 3, 2, 0, 1]
4674
non_zero_elements = [1, 2, 3, 4, 5]
4775
dense_x = paddle.to_tensor(x)
48-
out = _C_ops.final_state_to_sparse_csr(dense_x)
76+
out = dense_x.to_sparse_csr()
4977
print(out)
5078
assert np.array_equal(out.non_zero_crows().numpy(), non_zero_crows)
5179
assert np.array_equal(out.non_zero_cols().numpy(), non_zero_cols)
5280
assert np.array_equal(out.non_zero_elements().numpy(),
5381
non_zero_elements)
5482

55-
dense_tensor = _C_ops.final_state_to_dense(out)
83+
dense_tensor = out.to_dense()
5684
assert np.array_equal(dense_tensor.numpy(), x)
5785

5886

0 commit comments

Comments
 (0)