Skip to content

Commit

Permalink
[4] Review comment handled
Browse files Browse the repository at this point in the history
  • Loading branch information
ANSHUMAN TRIPATHY authored and ANSHUMAN TRIPATHY committed Mar 4, 2021
1 parent 58b0bf6 commit f69b399
Show file tree
Hide file tree
Showing 7 changed files with 43 additions and 63 deletions.
4 changes: 2 additions & 2 deletions python/tvm/relay/op/nn/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -2165,10 +2165,10 @@ def sparse_add(dense_mat, sparse_mat):
Parameters
----------
dense_mat : tvm.relay.Expr
The input dense matrix for the matrix multiplication
The input dense matrix for the matrix addition
sparse_mat : Union[namedtuple, Tuple[ndarray, ndarray, ndarray]].
The input sparse matrix for the matrix multiplication.
The input sparse matrix(CSR) for the matrix addition.
Returns
-------
Expand Down
7 changes: 0 additions & 7 deletions python/tvm/relay/op/strategy/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -822,13 +822,6 @@ def sparse_add_strategy(attrs, inputs, out_type, target):
return strategy


@generic_func
def schedule_sparse_add(attrs, outs, target):
"""schedule sparse_add"""
with target:
return topi.generic.schedule_sparse_add(outs)


# sparse_transpose
@generic_func
def schedule_sparse_transpose(attrs, outs, target):
Expand Down
17 changes: 0 additions & 17 deletions python/tvm/topi/generic/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -730,23 +730,6 @@ def schedule_sparse_transpose(outs):
return _default_schedule(outs, False)


def schedule_sparse_add(outs):
"""Schedule for sparse_add
Parameters
----------
outs: Array of Tensor
The computation graph description of sparse_add
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)


def schedule_batch_matmul(outs):
"""Schedule for batch_matmul
Expand Down
16 changes: 8 additions & 8 deletions python/tvm/topi/nn/sparse.py
Original file line number Diff line number Diff line change
Expand Up @@ -365,16 +365,16 @@ def sparse_add(dense_data, sparse_data, sparse_indices, sparse_indptr):
Parameters
----------
dense_data : tvm.te.Tensor
2-D with shape [M, N], float32
2-D with shape [M, N]
sparse_data : tvm.te.Tensor
1-D with shape [nnz] (CSR) or
1-D with shape [nnz] (CSR)
sparse_indices : tvm.te.Tensor
1-D with shape [nnz] (CSR) or
1-D with shape [nnz] (CSR)
sparse_indptr : tvm.te.Tensor
1-D with shape [M + 1] (CSR) or
1-D with shape [M + 1] (CSR)
Returns
-------
Expand All @@ -398,11 +398,11 @@ def _csr_add_ir(dense_data, sparse_data, sparse_indices, sparse_indptr, out_data

out_data_ptr = irb.buffer_ptr(out_data)

with irb.for_range(0, oshape[0], kind="serial", name="row") as row:
with irb.for_range(0, oshape[1], kind="serial", name="col") as col:
with irb.for_range(0, oshape[0], kind="vectorize", name="row") as row:
with irb.for_range(0, oshape[1], kind="parallel", name="col") as col:
out_data_ptr[row, col] = dense_data_ptr[row, col]

with irb.for_range(0, oshape[0], kind="serial", name="row") as row:
with irb.for_range(0, oshape[0], kind="parallel", name="row") as row:
offset = sparse_indptr_ptr[row]
diff = sparse_indptr_ptr[row + 1] - sparse_indptr_ptr[row]
with irb.for_range(0, diff, kind="serial", name="idx") as idx:
Expand All @@ -423,5 +423,5 @@ def _csr_add_ir(dense_data, sparse_data, sparse_indices, sparse_indptr, out_data
sparse_indices_inp.dtype,
sparse_indptr_inp.dtype,
],
name="out",
name="sparse_add_csr_output",
)
2 changes: 1 addition & 1 deletion src/relay/op/nn/sparse.cc
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ RELAY_REGISTER_OP("nn.sparse_transpose")
// relay.nn.sparse_add
bool SparseAddRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
const TypeReporter& reporter) {
ICHECK_EQ(types.size(), 5);
ICHECK_EQ(types.size(), 5) << "expecting 4 inputs and 1 output.";
const auto* dense_data = types[0].as<TensorTypeNode>();
const auto* sparse_data = types[1].as<TensorTypeNode>();
ICHECK(reporter->Assert(sparse_data->dtype == dense_data->dtype))
Expand Down
14 changes: 8 additions & 6 deletions tests/python/frontend/tensorflow/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -2189,7 +2189,9 @@ def _test_sparse_add(indices, values, A_shape, B_shape, dtype, flip=False):
# TODO(ANSHUMAN87): support both sparse input case

with tf.Graph().as_default():
A_sp = tf.sparse.SparseTensor(indices=indices, values=values, dense_shape=A_shape)
A_sp = tf.sparse.SparseTensor(
indices=indices, values=np.array(values).astype(dtype), dense_shape=A_shape
)
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")

# TODO(ANSHUMAN87): support user input threashold values
Expand All @@ -2216,11 +2218,11 @@ def test_sparse_add():
# [0, 0, 0, 0]]
#
# ------------------------------------------------------------------

_test_sparse_add([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], [3, 4], "float32")
_test_sparse_add([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], [3, 4], "float32", True)
_test_sparse_add([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], "float32")
_test_sparse_add([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], "float32", True)
for dtype_inp in ["float32", "float64", "int32"]:
_test_sparse_add([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], [3, 4], dtype_inp)
_test_sparse_add([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], [3, 4], dtype_inp, True)
_test_sparse_add([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], dtype_inp)
_test_sparse_add([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], dtype_inp, True)


#######################################################################
Expand Down
46 changes: 24 additions & 22 deletions tests/python/topi/python/test_topi_sparse.py
Original file line number Diff line number Diff line change
Expand Up @@ -527,28 +527,30 @@ def test_sparse_dense_padded_alter_op():


def test_sparse_add_csr():
M, K, density = 3, 49, 0.2
X_np = np.random.randn(M, K).astype("float32")
Y_sp_np = sp.random(M, K, density=density, format="csr", dtype="float32")
Y_np = Y_sp_np.todense()
Z_np = X_np + Y_np

Y_data = te.placeholder(shape=Y_sp_np.data.shape, dtype=str(Y_sp_np.data.dtype))
Y_indices = te.placeholder(shape=Y_sp_np.indices.shape, dtype=str(Y_sp_np.indices.dtype))
Y_indptr = te.placeholder(shape=Y_sp_np.indptr.shape, dtype=str(Y_sp_np.indptr.dtype))
X = te.placeholder(shape=X_np.shape, dtype=str(X_np.dtype))
Z = topi.nn.sparse_add(X, Y_data, Y_indices, Y_indptr)
s = te.create_schedule(Z.op)
func = tvm.build(s, [X, Y_data, Y_indices, Y_indptr, Z])
Z_tvm = tvm.nd.array(np.zeros(Z_np.shape, dtype=Z_np.dtype))
func(
tvm.nd.array(X_np),
tvm.nd.array(Y_sp_np.data),
tvm.nd.array(Y_sp_np.indices),
tvm.nd.array(Y_sp_np.indptr),
Z_tvm,
)
tvm.testing.assert_allclose(Z_tvm.asnumpy(), Z_np, atol=1e-4, rtol=1e-4)
for indices_dtype in ["int32", "int64"]:
for data_dtype in ["float32", "float64"]:
M, K, density = 3, 49, 0.2
X_np = np.random.randn(M, K).astype(data_dtype)
Y_sp_np = sp.random(M, K, density=density, format="csr", dtype=data_dtype)
Y_np = Y_sp_np.todense()
Z_np = X_np + Y_np

Y_data = te.placeholder(shape=Y_sp_np.data.shape, dtype=data_dtype)
Y_indices = te.placeholder(shape=Y_sp_np.indices.shape, dtype=indices_dtype)
Y_indptr = te.placeholder(shape=Y_sp_np.indptr.shape, dtype=indices_dtype)
X = te.placeholder(shape=X_np.shape, dtype=data_dtype)
Z = topi.nn.sparse_add(X, Y_data, Y_indices, Y_indptr)
s = te.create_schedule(Z.op)
func = tvm.build(s, [X, Y_data, Y_indices, Y_indptr, Z])
Z_tvm = tvm.nd.array(np.zeros(Z_np.shape, dtype=Z_np.dtype))
func(
tvm.nd.array(X_np.astype(data_dtype)),
tvm.nd.array(Y_sp_np.data.astype(data_dtype)),
tvm.nd.array(Y_sp_np.indices.astype(indices_dtype)),
tvm.nd.array(Y_sp_np.indptr.astype(indices_dtype)),
Z_tvm,
)
tvm.testing.assert_allclose(Z_tvm.asnumpy(), Z_np, atol=1e-4, rtol=1e-4)


if __name__ == "__main__":
Expand Down

0 comments on commit f69b399

Please sign in to comment.