Skip to content

Commit b68b6b5

Browse files
ANSHUMAN TRIPATHYANSHUMAN TRIPATHY
authored andcommitted
Lint error resolved
1 parent 575617f commit b68b6b5

File tree

3 files changed

+35
-19
lines changed

3 files changed

+35
-19
lines changed

python/tvm/relay/frontend/tensorflow.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -889,6 +889,7 @@ def _impl(inputs, attr, params, mod):
889889

890890
return _impl
891891

892+
892893
def _sparse_tensor_dense_matmul():
893894
# Sparse utility from Numpy
894895
from scipy import sparse
@@ -906,7 +907,9 @@ def _impl(inputs, attr, params, mod):
906907
cols = [x[1] for x in indices_tensor]
907908

908909
# Create Numpy sparse Tensor(CSR)
909-
weight_sp = sparse.csr_matrix((values_tensor, (rows, cols)), shape=tuple(dense_shape_tensor.tolist()))
910+
weight_sp = sparse.csr_matrix(
911+
(values_tensor, (rows, cols)), shape=tuple(dense_shape_tensor.tolist())
912+
)
910913
weight_sp = sparse.csr_matrix(weight_sp.transpose())
911914

912915
weight_data = _expr.const(weight_sp.data, weight_sp.data.dtype)
@@ -924,6 +927,7 @@ def _impl(inputs, attr, params, mod):
924927

925928
return _impl
926929

930+
927931
def _identity():
928932
def _impl(inputs, attr, params, mod):
929933
return inputs[0]

python/tvm/topi/cuda/sparse.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -369,12 +369,12 @@ def _alter_sparse_dense_layout(_attrs, inputs, _tinfos, _out_type):
369369
):
370370
if len(inputs[1].data.asnumpy().shape) == 1:
371371
sparse_matrix = sp.csr_matrix(
372-
(inputs[1].data.asnumpy(), inputs[2].data.asnumpy(), inputs[3].data.asnumpy())
373-
).tobsr()
374-
else :
372+
(inputs[1].data.asnumpy(), inputs[2].data.asnumpy(), inputs[3].data.asnumpy())
373+
).tobsr()
374+
else:
375375
sparse_matrix = sp.bsr_matrix(
376-
(inputs[1].data.asnumpy(), inputs[2].data.asnumpy(), inputs[3].data.asnumpy())
377-
)
376+
(inputs[1].data.asnumpy(), inputs[2].data.asnumpy(), inputs[3].data.asnumpy())
377+
)
378378
warp_size = int(tvm.target.Target.current(allow_none=False).thread_warp_size)
379379
sparse_matrix = pad_sparse_matrix(sparse_matrix, warp_size)
380380
return relay.nn._make.sparse_dense_padded(

tests/python/frontend/tensorflow/test_forward.py

Lines changed: 25 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1749,6 +1749,7 @@ def test_forward_batch_matmul():
17491749
_test_batch_matmul((3, 4, 5, 6), (3, 4, 5, 6), "int32", True, False)
17501750
_test_batch_matmul((2, 3, 4, 2, 3, 4, 5, 6), (2, 3, 4, 2, 3, 4, 5, 6), "float32", False, True)
17511751

1752+
17521753
#######################################################################
17531754
# SparseTensorDenseMatMul
17541755
# ----------------------------------
@@ -1757,23 +1758,30 @@ def test_forward_batch_matmul():
17571758
def _test_sparse_dense_matmul(indices, values, A_shape, B_shape, dtype, flip=False):
17581759
""" One iteration of sparse_dense_matmul """
17591760

1760-
#TODO: Support adjoint options too
1761+
# TODO: Support adjoint options too
17611762
for adjoint_a in [False]:
17621763
for adjoint_b in [False]:
17631764
with tf.Graph().as_default():
1764-
A_sp = tf.sparse.SparseTensor(indices=[[0, 0], [1, 2]], values=[4., 8.], dense_shape=A_shape)
1765+
A_sp = tf.sparse.SparseTensor(
1766+
indices=[[0, 0], [1, 2]], values=[4.0, 8.0], dense_shape=A_shape
1767+
)
17651768
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
17661769

17671770
if flip:
1768-
result = tf.sparse.sparse_dense_matmul(B, A_sp, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
1771+
result = tf.sparse.sparse_dense_matmul(
1772+
B, A_sp, adjoint_a=adjoint_a, adjoint_b=adjoint_b
1773+
)
17691774
else:
1770-
result = tf.sparse.sparse_dense_matmul(A_sp, B, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
1775+
result = tf.sparse.sparse_dense_matmul(
1776+
A_sp, B, adjoint_a=adjoint_a, adjoint_b=adjoint_b
1777+
)
17711778

17721779
B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)
17731780

1774-
#TODO: There is an issue in cuda scheduling for csr, work in progress
1781+
# TODO: There is an issue in cuda scheduling for csr, work in progress
17751782
compare_tf_with_tvm([B_np], [B.name], result.name, no_gpu=True)
17761783

1784+
17771785
def test_forward_sparse_dense_matmul():
17781786
""" sparse_dense_matmul op test"""
17791787
###################################################################
@@ -1786,14 +1794,18 @@ def test_forward_sparse_dense_matmul():
17861794
# [0, 0, 2, 0]
17871795
# [0, 0, 0, 0]]
17881796
#
1789-
#------------------------------------------------------------------
1790-
1791-
#TODO: False case for flip need to be supported
1792-
#_test_sparse_dense_matmul([[0, 0], [1, 2]], [4., 8.], [3, 4], [4, 3], "float32")
1793-
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4., 8.], [3, 5], [4, 3], "float32", True)
1794-
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4., 8.], [3, 3], [3, 3], "float32", True)
1795-
_test_sparse_dense_matmul([[0, 0], [1, 3], [4, 3]], [3., 6., 9.], [5, 5], [5, 5], "float32", True)
1796-
_test_sparse_dense_matmul([[0, 0], [1, 3], [4, 3]], [3., 6., 9.], [9, 5], [7, 9], "float32", True)
1797+
# ------------------------------------------------------------------
1798+
1799+
# TODO: False case for flip need to be supported
1800+
# _test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], [4, 3], "float32")
1801+
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [3, 5], [4, 3], "float32", True)
1802+
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [3, 3], [3, 3], "float32", True)
1803+
_test_sparse_dense_matmul(
1804+
[[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], "float32", True
1805+
)
1806+
_test_sparse_dense_matmul(
1807+
[[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [9, 5], [7, 9], "float32", True
1808+
)
17971809

17981810

17991811
#######################################################################

0 commit comments

Comments
 (0)