-
Notifications
You must be signed in to change notification settings - Fork 3.7k
Closed
Labels
relay:opsrc/relay/opsrc/relay/op
Description
It looks like the native tvm implementation of nn.dense does not handle dynamic shapes correctly though using libs such as mkl, cublas has no issues. The following is the code to reproduce this issue.
import tvm
from tvm import relay
from tvm.relay import create_executor, Any
import numpy as np
A = relay.var("A",shape=[Any(), Any()],dtype="float32")
B = relay.var("B",shape=[Any(), Any()],dtype="float32")
C = relay.nn.dense(A, relay.transpose(B))
f = relay.Function([A, B], C)
mod = tvm.IRModule.from_expr(f)
for target in ["llvm -libs=mkl", "llvm"]:
dev = tvm.device(target,0)
executor = create_executor(kind="vm", mod=mod, device=dev, target=target)
a = np.random.uniform(size=[10,10]).astype("float32")
b = np.random.uniform(size=[10,10]).astype("float32")
res = executor.evaluate()(a,b).asnumpy()
print(np.sum(res))
ref = np.matmul(a,b)
print(np.sum(ref))
np.testing.assert_allclose(res, ref, rtol=1e-5)
Please note nn.batch_matmul works correctly for such cases not using libs:
import tvm
from tvm import relay
from tvm.relay import create_executor, Any
import numpy as np
A = relay.var("A",shape=[1, Any(), Any()],dtype="float32")
B = relay.var("B",shape=[1, Any(), Any()],dtype="float32")
C = relay.nn.batch_matmul(A, relay.transpose(B, axes=[0,2,1]))
f = relay.Function([A, B], C)
mod = tvm.IRModule.from_expr(f)
for target in ["llvm"]:
dev = tvm.device(target,0)
executor = create_executor(kind="vm", mod=mod, device=dev, target=target)
a = np.random.uniform(size=[1,10,10]).astype("float32")
b = np.random.uniform(size=[1,10,10]).astype("float32")
res = executor.evaluate()(a,b).asnumpy()
print(np.sum(res))
ref = np.matmul(a,b)
print(np.sum(ref))
np.testing.assert_allclose(res, ref, rtol=1e-5)
Metadata
Metadata
Assignees
Labels
relay:opsrc/relay/opsrc/relay/op