@@ -38,6 +38,15 @@ static inline void CheckSparseIndexFormatType(SparseTensorFormat::type expected,
3838 ASSERT_EQ (expected, sparse_tensor.sparse_index ()->format_id ());
3939}
4040
41+ static inline void AssertCOOIndex (
42+ const std::shared_ptr<SparseCOOIndex::CoordsTensor>& sidx, const int64_t nth,
43+ const std::vector<int64_t >& expected_values) {
44+ int64_t n = static_cast <int64_t >(expected_values.size ());
45+ for (int64_t i = 0 ; i < n; ++i) {
46+ ASSERT_EQ (expected_values[i], sidx->Value ({nth, i}));
47+ }
48+ }
49+
4150TEST (TestSparseCOOTensor, CreationEmptyTensor) {
4251 std::vector<int64_t > shape = {2 , 3 , 4 };
4352 SparseTensorImpl<SparseCOOIndex> st1 (int64 (), shape);
@@ -84,13 +93,8 @@ TEST(TestSparseCOOTensor, CreationFromNumericTensor) {
8493 ASSERT_EQ (" " , st1.dim_name (1 ));
8594 ASSERT_EQ (" " , st1.dim_name (2 ));
8695
87- const int64_t * ptr = reinterpret_cast <const int64_t *>(st1.raw_data ());
88- for (int i = 0 ; i < 6 ; ++i) {
89- ASSERT_EQ (i + 1 , ptr[i]);
90- }
91- for (int i = 0 ; i < 6 ; ++i) {
92- ASSERT_EQ (i + 11 , ptr[i + 6 ]);
93- }
96+ const int64_t * raw_data = reinterpret_cast <const int64_t *>(st1.raw_data ());
97+ AssertNumericDataEqual (raw_data, {1 , 2 , 3 , 4 , 5 , 6 , 11 , 12 , 13 , 14 , 15 , 16 });
9498
9599 const auto & si = internal::checked_cast<const SparseCOOIndex&>(*st1.sparse_index ());
96100 ASSERT_EQ (std::string (" SparseCOOIndex" ), si.ToString ());
@@ -99,30 +103,11 @@ TEST(TestSparseCOOTensor, CreationFromNumericTensor) {
99103 ASSERT_EQ (std::vector<int64_t >({12 , 3 }), sidx->shape ());
100104 ASSERT_TRUE (sidx->is_column_major ());
101105
102- // (0, 0, 0) -> 1
103- ASSERT_EQ (0 , sidx->Value ({0 , 0 }));
104- ASSERT_EQ (0 , sidx->Value ({0 , 1 }));
105- ASSERT_EQ (0 , sidx->Value ({0 , 2 }));
106-
107- // (0, 0, 2) -> 2
108- ASSERT_EQ (0 , sidx->Value ({1 , 0 }));
109- ASSERT_EQ (0 , sidx->Value ({1 , 1 }));
110- ASSERT_EQ (2 , sidx->Value ({1 , 2 }));
111-
112- // (0, 1, 1) -> 3
113- ASSERT_EQ (0 , sidx->Value ({2 , 0 }));
114- ASSERT_EQ (1 , sidx->Value ({2 , 1 }));
115- ASSERT_EQ (1 , sidx->Value ({2 , 2 }));
116-
117- // (1, 2, 1) -> 15
118- ASSERT_EQ (1 , sidx->Value ({10 , 0 }));
119- ASSERT_EQ (2 , sidx->Value ({10 , 1 }));
120- ASSERT_EQ (1 , sidx->Value ({10 , 2 }));
121-
122- // (1, 2, 3) -> 16
123- ASSERT_EQ (1 , sidx->Value ({11 , 0 }));
124- ASSERT_EQ (2 , sidx->Value ({11 , 1 }));
125- ASSERT_EQ (3 , sidx->Value ({11 , 2 }));
106+ AssertCOOIndex (sidx, 0 , {0 , 0 , 0 });
107+ AssertCOOIndex (sidx, 1 , {0 , 0 , 2 });
108+ AssertCOOIndex (sidx, 2 , {0 , 1 , 1 });
109+ AssertCOOIndex (sidx, 10 , {1 , 2 , 1 });
110+ AssertCOOIndex (sidx, 11 , {1 , 2 , 3 });
126111}
127112
128113TEST (TestSparseCOOTensor, CreationFromTensor) {
@@ -147,43 +132,47 @@ TEST(TestSparseCOOTensor, CreationFromTensor) {
147132 ASSERT_EQ (" " , st1.dim_name (1 ));
148133 ASSERT_EQ (" " , st1.dim_name (2 ));
149134
150- const int64_t * ptr = reinterpret_cast <const int64_t *>(st1.raw_data ());
151- for (int i = 0 ; i < 6 ; ++i) {
152- ASSERT_EQ (i + 1 , ptr[i]);
153- }
154- for (int i = 0 ; i < 6 ; ++i) {
155- ASSERT_EQ (i + 11 , ptr[i + 6 ]);
156- }
135+ const int64_t * raw_data = reinterpret_cast <const int64_t *>(st1.raw_data ());
136+ AssertNumericDataEqual (raw_data, {1 , 2 , 3 , 4 , 5 , 6 , 11 , 12 , 13 , 14 , 15 , 16 });
157137
158138 const auto & si = internal::checked_cast<const SparseCOOIndex&>(*st1.sparse_index ());
159139 std::shared_ptr<SparseCOOIndex::CoordsTensor> sidx = si.indices ();
160140 ASSERT_EQ (std::vector<int64_t >({12 , 3 }), sidx->shape ());
161141 ASSERT_TRUE (sidx->is_column_major ());
162142
163- // (0, 0, 0) -> 1
164- ASSERT_EQ (0 , sidx->Value ({0 , 0 }));
165- ASSERT_EQ (0 , sidx->Value ({0 , 1 }));
166- ASSERT_EQ (0 , sidx->Value ({0 , 2 }));
167-
168- // (0, 0, 2) -> 2
169- ASSERT_EQ (0 , sidx->Value ({1 , 0 }));
170- ASSERT_EQ (0 , sidx->Value ({1 , 1 }));
171- ASSERT_EQ (2 , sidx->Value ({1 , 2 }));
172-
173- // (0, 1, 1) -> 3
174- ASSERT_EQ (0 , sidx->Value ({2 , 0 }));
175- ASSERT_EQ (1 , sidx->Value ({2 , 1 }));
176- ASSERT_EQ (1 , sidx->Value ({2 , 2 }));
177-
178- // (1, 2, 1) -> 15
179- ASSERT_EQ (1 , sidx->Value ({10 , 0 }));
180- ASSERT_EQ (2 , sidx->Value ({10 , 1 }));
181- ASSERT_EQ (1 , sidx->Value ({10 , 2 }));
182-
183- // (1, 2, 3) -> 16
184- ASSERT_EQ (1 , sidx->Value ({11 , 0 }));
185- ASSERT_EQ (2 , sidx->Value ({11 , 1 }));
186- ASSERT_EQ (3 , sidx->Value ({11 , 2 }));
143+ AssertCOOIndex (sidx, 0 , {0 , 0 , 0 });
144+ AssertCOOIndex (sidx, 1 , {0 , 0 , 2 });
145+ AssertCOOIndex (sidx, 2 , {0 , 1 , 1 });
146+ AssertCOOIndex (sidx, 10 , {1 , 2 , 1 });
147+ AssertCOOIndex (sidx, 11 , {1 , 2 , 3 });
148+ }
149+
150+ TEST (TestSparseCOOTensor, CreationFromNonContiguousTensor) {
151+ std::vector<int64_t > shape = {2 , 3 , 4 };
152+ std::vector<int64_t > values = {1 , 0 , 0 , 0 , 2 , 0 , 0 , 0 , 0 , 0 , 3 , 0 , 0 , 0 , 4 , 0 ,
153+ 5 , 0 , 0 , 0 , 6 , 0 , 0 , 0 , 0 , 0 , 11 , 0 , 0 , 0 , 12 , 0 ,
154+ 13 , 0 , 0 , 0 , 14 , 0 , 0 , 0 , 0 , 0 , 15 , 0 , 0 , 0 , 16 , 0 };
155+ std::vector<int64_t > strides = {192 , 64 , 16 };
156+ std::shared_ptr<Buffer> buffer = Buffer::Wrap (values);
157+ Tensor tensor (int64 (), buffer, shape, strides);
158+ SparseTensorImpl<SparseCOOIndex> st (tensor);
159+
160+ ASSERT_EQ (12 , st.non_zero_length ());
161+ ASSERT_TRUE (st.is_mutable ());
162+
163+ const int64_t * raw_data = reinterpret_cast <const int64_t *>(st.raw_data ());
164+ AssertNumericDataEqual (raw_data, {1 , 2 , 3 , 4 , 5 , 6 , 11 , 12 , 13 , 14 , 15 , 16 });
165+
166+ const auto & si = internal::checked_cast<const SparseCOOIndex&>(*st.sparse_index ());
167+ std::shared_ptr<SparseCOOIndex::CoordsTensor> sidx = si.indices ();
168+ ASSERT_EQ (std::vector<int64_t >({12 , 3 }), sidx->shape ());
169+ ASSERT_TRUE (sidx->is_column_major ());
170+
171+ AssertCOOIndex (sidx, 0 , {0 , 0 , 0 });
172+ AssertCOOIndex (sidx, 1 , {0 , 0 , 2 });
173+ AssertCOOIndex (sidx, 2 , {0 , 1 , 1 });
174+ AssertCOOIndex (sidx, 10 , {1 , 2 , 1 });
175+ AssertCOOIndex (sidx, 11 , {1 , 2 , 3 });
187176}
188177
189178TEST (TestSparseCSRMatrix, CreationFromNumericTensor2D) {
@@ -211,16 +200,10 @@ TEST(TestSparseCSRMatrix, CreationFromNumericTensor2D) {
211200 ASSERT_EQ (" " , st1.dim_name (1 ));
212201 ASSERT_EQ (" " , st1.dim_name (2 ));
213202
214- const int64_t * ptr = reinterpret_cast <const int64_t *>(st1.raw_data ());
215- for (int i = 0 ; i < 6 ; ++i) {
216- ASSERT_EQ (i + 1 , ptr[i]);
217- }
218- for (int i = 0 ; i < 6 ; ++i) {
219- ASSERT_EQ (i + 11 , ptr[i + 6 ]);
220- }
203+ const int64_t * raw_data = reinterpret_cast <const int64_t *>(st1.raw_data ());
204+ AssertNumericDataEqual (raw_data, {1 , 2 , 3 , 4 , 5 , 6 , 11 , 12 , 13 , 14 , 15 , 16 });
221205
222206 const auto & si = internal::checked_cast<const SparseCSRIndex&>(*st1.sparse_index ());
223-
224207 ASSERT_EQ (std::string (" SparseCSRIndex" ), si.ToString ());
225208 ASSERT_EQ (1 , si.indptr ()->ndim ());
226209 ASSERT_EQ (1 , si.indices ()->ndim ());
@@ -241,4 +224,40 @@ TEST(TestSparseCSRMatrix, CreationFromNumericTensor2D) {
241224 ASSERT_EQ (std::vector<int64_t >({0 , 2 , 1 , 3 , 0 , 2 , 1 , 3 , 0 , 2 , 1 , 3 }), indices_values);
242225}
243226
227+ TEST (TestSparseCSRMatrix, CreationFromNonContiguousTensor) {
228+ std::vector<int64_t > shape = {6 , 4 };
229+ std::vector<int64_t > values = {1 , 0 , 0 , 0 , 2 , 0 , 0 , 0 , 0 , 0 , 3 , 0 , 0 , 0 , 4 , 0 ,
230+ 5 , 0 , 0 , 0 , 6 , 0 , 0 , 0 , 0 , 0 , 11 , 0 , 0 , 0 , 12 , 0 ,
231+ 13 , 0 , 0 , 0 , 14 , 0 , 0 , 0 , 0 , 0 , 15 , 0 , 0 , 0 , 16 , 0 };
232+ std::vector<int64_t > strides = {64 , 16 };
233+ std::shared_ptr<Buffer> buffer = Buffer::Wrap (values);
234+ Tensor tensor (int64 (), buffer, shape, strides);
235+ SparseTensorImpl<SparseCSRIndex> st (tensor);
236+
237+ ASSERT_EQ (12 , st.non_zero_length ());
238+ ASSERT_TRUE (st.is_mutable ());
239+
240+ const int64_t * raw_data = reinterpret_cast <const int64_t *>(st.raw_data ());
241+ AssertNumericDataEqual (raw_data, {1 , 2 , 3 , 4 , 5 , 6 , 11 , 12 , 13 , 14 , 15 , 16 });
242+
243+ const auto & si = internal::checked_cast<const SparseCSRIndex&>(*st.sparse_index ());
244+ ASSERT_EQ (1 , si.indptr ()->ndim ());
245+ ASSERT_EQ (1 , si.indices ()->ndim ());
246+
247+ const int64_t * indptr_begin = reinterpret_cast <const int64_t *>(si.indptr ()->raw_data ());
248+ std::vector<int64_t > indptr_values (indptr_begin,
249+ indptr_begin + si.indptr ()->shape ()[0 ]);
250+
251+ ASSERT_EQ (7 , indptr_values.size ());
252+ ASSERT_EQ (std::vector<int64_t >({0 , 2 , 4 , 6 , 8 , 10 , 12 }), indptr_values);
253+
254+ const int64_t * indices_begin =
255+ reinterpret_cast <const int64_t *>(si.indices ()->raw_data ());
256+ std::vector<int64_t > indices_values (indices_begin,
257+ indices_begin + si.indices ()->shape ()[0 ]);
258+
259+ ASSERT_EQ (12 , indices_values.size ());
260+ ASSERT_EQ (std::vector<int64_t >({0 , 2 , 1 , 3 , 0 , 2 , 1 , 3 , 0 , 2 , 1 , 3 }), indices_values);
261+ }
262+
244263} // namespace arrow
0 commit comments