Skip to content

Commit

Permalink
Merge main
Browse files Browse the repository at this point in the history
  • Loading branch information
zxybazh committed Mar 30, 2021
2 parents fdfb93a + 9b43a64 commit e2b5c1a
Show file tree
Hide file tree
Showing 7 changed files with 23 additions and 9 deletions.
6 changes: 3 additions & 3 deletions tests/python/contrib/test_miopen.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,11 +63,11 @@ def verify():
X, W, (stride_h, stride_w), (pad_h, pad_w), (dilation_h, dilation_w)
)
s_ref = te.create_schedule(Y_ref.op)
<<<<<<< HEAD
f_ref = tvm.build(s_ref, [X, W, Y_ref], "rocm --host=llvm")
=======
f_ref = tvm.build(s_ref, [X, W, Y_ref], "rocm", target_host="llvm")
y_ref = tvm.nd.array(np.random.uniform(-1, 1, yshape).astype(np.float32), dev)
f_ref(x, w, y_ref)
print("Max abs diff:", np.max(np.abs(y.asnumpy() - y_ref.asnumpy())))
tvm.testing.assert_allclose(y.asnumpy(), y_ref.asnumpy(), atol=1e-3)

verify()

Expand Down
4 changes: 1 addition & 3 deletions tests/python/frontend/tensorflow/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,9 +165,7 @@ def run_tvm_graph(
else:
with tvm.transform.PassContext(opt_level=opt_level, disabled_pass=disabled_pass):
target = tvm.target.Target(target, target_host)
graph, lib, params = relay.build(
mod, target=target, params=params
)
graph, lib, params = relay.build(mod, target=target, params=params)
from tvm.contrib import graph_executor

m = graph_executor.create(graph, lib, dev)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,9 +62,6 @@ def test_in_bounds_llvm():
fadd = tvm.build(s, [A, B, C], target=tgt, name="myadd")
dev = tvm.device(tgt.kind.name, 0)
a = tvm.nd.array(np.random.uniform(size=1024).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=1024).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros(1024, dtype=C.dtype), dev)
fadd(a, b, c)


@tvm.testing.requires_llvm
Expand Down
5 changes: 5 additions & 0 deletions tutorials/frontend/from_darknet.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,12 @@
# Import the graph to Relay
# -------------------------
# compile the model
<<<<<<< HEAD
target = tvm.target.Target("llvm", host="llvm")
=======
target = "llvm"
target_host = "llvm"
>>>>>>> main
dev = tvm.cpu(0)
data = np.empty([batch_size, net.c, net.h, net.w], dtype)
shape = {"data": data.shape}
Expand Down
5 changes: 5 additions & 0 deletions tutorials/frontend/from_pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,12 @@
# Relay Build
# -----------
# Compile the graph to llvm target with given input specification.
<<<<<<< HEAD
target = tvm.target.Target("llvm", host="llvm")
=======
target = "llvm"
target_host = "llvm"
>>>>>>> main
dev = tvm.cpu(0)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params=params)
Expand Down
5 changes: 5 additions & 0 deletions tutorials/frontend/from_tensorflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,12 @@
# target = tvm.target.Target("cuda", host="llvm")
# layout = "NCHW"
# dev = tvm.gpu(0)
<<<<<<< HEAD
target = tvm.target.Target("llvm", host="llvm")
=======
target = "llvm"
target_host = "llvm"
>>>>>>> main
layout = None
dev = tvm.cpu(0)

Expand Down
4 changes: 4 additions & 0 deletions tutorials/get_started/tensor_expr_get_started.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,11 @@
# - fadd runs the actual computation.
# - asnumpy() copies the GPU array back to the CPU and we can use this to verify correctness
#
<<<<<<< HEAD
dev = tvm.device(tgt.kind.name, 0)
=======
dev = tvm.device(tgt, 0)
>>>>>>> main

n = 1024
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
Expand Down

0 comments on commit e2b5c1a

Please sign in to comment.