@@ -1749,6 +1749,7 @@ def test_forward_batch_matmul():
1749
1749
_test_batch_matmul ((3 , 4 , 5 , 6 ), (3 , 4 , 5 , 6 ), "int32" , True , False )
1750
1750
_test_batch_matmul ((2 , 3 , 4 , 2 , 3 , 4 , 5 , 6 ), (2 , 3 , 4 , 2 , 3 , 4 , 5 , 6 ), "float32" , False , True )
1751
1751
1752
+
1752
1753
#######################################################################
1753
1754
# SparseTensorDenseMatMul
1754
1755
# ----------------------------------
@@ -1757,23 +1758,30 @@ def test_forward_batch_matmul():
1757
1758
def _test_sparse_dense_matmul (indices , values , A_shape , B_shape , dtype , flip = False ):
1758
1759
""" One iteration of sparse_dense_matmul """
1759
1760
1760
- #TODO: Support adjoint options too
1761
+ # TODO: Support adjoint options too
1761
1762
for adjoint_a in [False ]:
1762
1763
for adjoint_b in [False ]:
1763
1764
with tf .Graph ().as_default ():
1764
- A_sp = tf .sparse .SparseTensor (indices = [[0 , 0 ], [1 , 2 ]], values = [4. , 8. ], dense_shape = A_shape )
1765
+ A_sp = tf .sparse .SparseTensor (
1766
+ indices = [[0 , 0 ], [1 , 2 ]], values = [4.0 , 8.0 ], dense_shape = A_shape
1767
+ )
1765
1768
B = tf .placeholder (shape = B_shape , dtype = dtype , name = "B" )
1766
1769
1767
1770
if flip :
1768
- result = tf .sparse .sparse_dense_matmul (B , A_sp , adjoint_a = adjoint_a , adjoint_b = adjoint_b )
1771
+ result = tf .sparse .sparse_dense_matmul (
1772
+ B , A_sp , adjoint_a = adjoint_a , adjoint_b = adjoint_b
1773
+ )
1769
1774
else :
1770
- result = tf .sparse .sparse_dense_matmul (A_sp , B , adjoint_a = adjoint_a , adjoint_b = adjoint_b )
1775
+ result = tf .sparse .sparse_dense_matmul (
1776
+ A_sp , B , adjoint_a = adjoint_a , adjoint_b = adjoint_b
1777
+ )
1771
1778
1772
1779
B_np = np .random .uniform (high = 5.0 , size = B_shape ).astype (dtype )
1773
1780
1774
- #TODO: There is an issue in cuda scheduling for csr, work in progress
1781
+ # TODO: There is an issue in cuda scheduling for csr, work in progress
1775
1782
compare_tf_with_tvm ([B_np ], [B .name ], result .name , no_gpu = True )
1776
1783
1784
+
1777
1785
def test_forward_sparse_dense_matmul ():
1778
1786
""" sparse_dense_matmul op test"""
1779
1787
###################################################################
@@ -1786,14 +1794,18 @@ def test_forward_sparse_dense_matmul():
1786
1794
# [0, 0, 2, 0]
1787
1795
# [0, 0, 0, 0]]
1788
1796
#
1789
- #------------------------------------------------------------------
1790
-
1791
- #TODO: False case for flip need to be supported
1792
- #_test_sparse_dense_matmul([[0, 0], [1, 2]], [4., 8.], [3, 4], [4, 3], "float32")
1793
- _test_sparse_dense_matmul ([[0 , 0 ], [1 , 2 ]], [4. , 8. ], [3 , 5 ], [4 , 3 ], "float32" , True )
1794
- _test_sparse_dense_matmul ([[0 , 0 ], [1 , 2 ]], [4. , 8. ], [3 , 3 ], [3 , 3 ], "float32" , True )
1795
- _test_sparse_dense_matmul ([[0 , 0 ], [1 , 3 ], [4 , 3 ]], [3. , 6. , 9. ], [5 , 5 ], [5 , 5 ], "float32" , True )
1796
- _test_sparse_dense_matmul ([[0 , 0 ], [1 , 3 ], [4 , 3 ]], [3. , 6. , 9. ], [9 , 5 ], [7 , 9 ], "float32" , True )
1797
+ # ------------------------------------------------------------------
1798
+
1799
+ # TODO: False case for flip need to be supported
1800
+ # _test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], [4, 3], "float32")
1801
+ _test_sparse_dense_matmul ([[0 , 0 ], [1 , 2 ]], [4.0 , 8.0 ], [3 , 5 ], [4 , 3 ], "float32" , True )
1802
+ _test_sparse_dense_matmul ([[0 , 0 ], [1 , 2 ]], [4.0 , 8.0 ], [3 , 3 ], [3 , 3 ], "float32" , True )
1803
+ _test_sparse_dense_matmul (
1804
+ [[0 , 0 ], [1 , 3 ], [4 , 3 ]], [3.0 , 6.0 , 9.0 ], [5 , 5 ], [5 , 5 ], "float32" , True
1805
+ )
1806
+ _test_sparse_dense_matmul (
1807
+ [[0 , 0 ], [1 , 3 ], [4 , 3 ]], [3.0 , 6.0 , 9.0 ], [9 , 5 ], [7 , 9 ], "float32" , True
1808
+ )
1797
1809
1798
1810
1799
1811
#######################################################################
0 commit comments