Commit 2f2170f4 by masahi Committed by Tianqi Chen

add helpful message to topi test (#592)

parent 20144de2
...@@ -12,6 +12,7 @@ def verify_broadcast_to_ele(in_shape, out_shape): ...@@ -12,6 +12,7 @@ def verify_broadcast_to_ele(in_shape, out_shape):
if not tvm.module.enabled(device): if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device) print("Skip because %s is not enabled" % device)
return return
print("Running on target: %s" % device)
with tvm.target.create(device): with tvm.target.create(device):
s = topi.generic.schedule_broadcast(B) s = topi.generic.schedule_broadcast(B)
ctx = tvm.context(device, 0) ctx = tvm.context(device, 0)
...@@ -52,6 +53,7 @@ def verify_broadcast_binary_ele(lhs_shape, rhs_shape, typ="add"): ...@@ -52,6 +53,7 @@ def verify_broadcast_binary_ele(lhs_shape, rhs_shape, typ="add"):
if not tvm.module.enabled(device): if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device) print("Skip because %s is not enabled" % device)
return return
print("Running on target: %s" % device)
with tvm.target.create(device): with tvm.target.create(device):
s = topi.generic.schedule_broadcast(C) s = topi.generic.schedule_broadcast(C)
ctx = tvm.context(device, 0) ctx = tvm.context(device, 0)
......
...@@ -34,6 +34,7 @@ def verify_conv2d_hwcn(batch, in_channel, in_size, num_filter, kernel, stride, p ...@@ -34,6 +34,7 @@ def verify_conv2d_hwcn(batch, in_channel, in_size, num_filter, kernel, stride, p
if not tvm.module.enabled(device): if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device) print("Skip because %s is not enabled" % device)
return return
print("Running on target: %s" % device)
ctx = tvm.context(device, 0) ctx = tvm.context(device, 0)
a = tvm.nd.array(a_np, ctx) a = tvm.nd.array(a_np, ctx)
w = tvm.nd.array(w_np, ctx) w = tvm.nd.array(w_np, ctx)
......
...@@ -33,6 +33,7 @@ def verify_conv2d_nchw(batch, in_channel, in_size, num_filter, kernel, stride, p ...@@ -33,6 +33,7 @@ def verify_conv2d_nchw(batch, in_channel, in_size, num_filter, kernel, stride, p
if not tvm.module.enabled(device): if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device) print("Skip because %s is not enabled" % device)
return return
print("Running on target: %s" % device)
with tvm.target.create(device): with tvm.target.create(device):
s1 = topi.generic.schedule_conv2d_nchw([B]) s1 = topi.generic.schedule_conv2d_nchw([B])
s2 = topi.generic.schedule_conv2d_nchw([C]) s2 = topi.generic.schedule_conv2d_nchw([C])
......
...@@ -32,6 +32,7 @@ def verify_conv2d_transpose_nchw(batch, in_channel, in_size, num_filter, kernel, ...@@ -32,6 +32,7 @@ def verify_conv2d_transpose_nchw(batch, in_channel, in_size, num_filter, kernel,
if not tvm.module.enabled(device): if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device) print("Skip because %s is not enabled" % device)
return return
print("Running on target: %s" % device)
with tvm.target.create(device): with tvm.target.create(device):
s1 = topi.generic.schedule_conv2d_transpose_nchw([B]) s1 = topi.generic.schedule_conv2d_transpose_nchw([B])
s2 = topi.generic.schedule_conv2d_transpose_nchw([C]) s2 = topi.generic.schedule_conv2d_transpose_nchw([C])
......
...@@ -32,6 +32,7 @@ def verify_dense(batch, in_dim, out_dim, use_bias=True): ...@@ -32,6 +32,7 @@ def verify_dense(batch, in_dim, out_dim, use_bias=True):
if not tvm.module.enabled(device): if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device) print("Skip because %s is not enabled" % device)
return return
print("Running on target: %s" % device)
with tvm.target.create(device): with tvm.target.create(device):
s = topi.generic.schedule_dense(D) s = topi.generic.schedule_dense(D)
ctx = tvm.context(device, 0) ctx = tvm.context(device, 0)
......
...@@ -27,6 +27,7 @@ def depthwise_conv2d_with_workload_nchw(batch, in_channel, in_height, channel_mu ...@@ -27,6 +27,7 @@ def depthwise_conv2d_with_workload_nchw(batch, in_channel, in_height, channel_mu
if not tvm.module.enabled(device): if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device) print("Skip because %s is not enabled" % device)
return return
print("Running on target: %s" % device)
with tvm.target.create(device): with tvm.target.create(device):
# schedule # schedule
s1 = topi.generic.schedule_depthwise_conv2d_nchw(DepthwiseConv2d) s1 = topi.generic.schedule_depthwise_conv2d_nchw(DepthwiseConv2d)
...@@ -111,6 +112,7 @@ def depthwise_conv2d_with_workload_nhwc(batch, in_channel, in_height, channel_mu ...@@ -111,6 +112,7 @@ def depthwise_conv2d_with_workload_nhwc(batch, in_channel, in_height, channel_mu
if not tvm.module.enabled(device): if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device) print("Skip because %s is not enabled" % device)
return return
print("Running on target: %s" % device)
with tvm.target.create(device): with tvm.target.create(device):
s1 = topi.generic.schedule_depthwise_conv2d_nhwc(DepthwiseConv2d) s1 = topi.generic.schedule_depthwise_conv2d_nhwc(DepthwiseConv2d)
......
...@@ -35,6 +35,7 @@ def verify_depthwise_conv2d_back_input(batch, in_channel, in_h, channel_multipli ...@@ -35,6 +35,7 @@ def verify_depthwise_conv2d_back_input(batch, in_channel, in_h, channel_multipli
if not tvm.module.enabled(device): if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device) print("Skip because %s is not enabled" % device)
return return
print("Running on target: %s" % device)
ctx = tvm.context(device, 0) ctx = tvm.context(device, 0)
# build the kernel # build the kernel
f = tvm.build(schedule, [Filter, Out_grad, In_grad], device) f = tvm.build(schedule, [Filter, Out_grad, In_grad], device)
......
...@@ -35,6 +35,7 @@ def verify_depthwise_conv2d_back_weight(batch, in_channel, in_h, channel_multipl ...@@ -35,6 +35,7 @@ def verify_depthwise_conv2d_back_weight(batch, in_channel, in_h, channel_multipl
if not tvm.module.enabled(device): if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device) print("Skip because %s is not enabled" % device)
return return
print("Running on target: %s" % device)
ctx = tvm.context(device, 0) ctx = tvm.context(device, 0)
# build the kernel # build the kernel
f = tvm.build(schedule, [Input, Out_grad, Weight_grad], device) f = tvm.build(schedule, [Input, Out_grad, Weight_grad], device)
......
...@@ -35,6 +35,7 @@ def verify_pool(n, ic, ih, kh, sh, padding, pool_type): ...@@ -35,6 +35,7 @@ def verify_pool(n, ic, ih, kh, sh, padding, pool_type):
if not tvm.module.enabled(device): if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device) print("Skip because %s is not enabled" % device)
return return
print("Running on target: %s" % device)
with tvm.target.create(device): with tvm.target.create(device):
s = topi.generic.schedule_pool(B) s = topi.generic.schedule_pool(B)
ctx = tvm.context(device, 0) ctx = tvm.context(device, 0)
...@@ -70,6 +71,7 @@ def verify_global_pool(n, c, h, w, pool_type): ...@@ -70,6 +71,7 @@ def verify_global_pool(n, c, h, w, pool_type):
if not tvm.module.enabled(device): if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device) print("Skip because %s is not enabled" % device)
return return
print("Running on target: %s" % device)
with tvm.target.create(device): with tvm.target.create(device):
s = topi.generic.schedule_global_pool(B) s = topi.generic.schedule_global_pool(B)
ctx = tvm.context(device, 0) ctx = tvm.context(device, 0)
......
...@@ -50,6 +50,7 @@ def verify_reduce_map_ele(in_shape, axis, keepdims, type="sum"): ...@@ -50,6 +50,7 @@ def verify_reduce_map_ele(in_shape, axis, keepdims, type="sum"):
if not tvm.module.enabled(device): if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device) print("Skip because %s is not enabled" % device)
return return
print("Running on target: %s" % device)
with tvm.target.create(device): with tvm.target.create(device):
s = topi.generic.schedule_reduce(B) s = topi.generic.schedule_reduce(B)
ctx = tvm.context(device, 0) ctx = tvm.context(device, 0)
......
...@@ -16,6 +16,7 @@ def verify_relu(m, n): ...@@ -16,6 +16,7 @@ def verify_relu(m, n):
if not tvm.module.enabled(device): if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device) print("Skip because %s is not enabled" % device)
return return
print("Running on target: %s" % device)
with tvm.target.create(device): with tvm.target.create(device):
s = topi.generic.schedule_elemwise(B) s = topi.generic.schedule_elemwise(B)
ctx = tvm.context(device, 0) ctx = tvm.context(device, 0)
......
...@@ -20,6 +20,7 @@ def verify_softmax(m, n): ...@@ -20,6 +20,7 @@ def verify_softmax(m, n):
if not tvm.module.enabled(device): if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device) print("Skip because %s is not enabled" % device)
return return
print("Running on target: %s" % device)
with tvm.target.create(device): with tvm.target.create(device):
s = topi.generic.schedule_softmax(B) s = topi.generic.schedule_softmax(B)
ctx = tvm.context(device, 0) ctx = tvm.context(device, 0)
...@@ -50,6 +51,7 @@ def verify_log_softmax(m, n): ...@@ -50,6 +51,7 @@ def verify_log_softmax(m, n):
if not tvm.module.enabled(device): if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device) print("Skip because %s is not enabled" % device)
return return
print("Running on target: %s" % device)
with tvm.target.create(device): with tvm.target.create(device):
s = topi.generic.schedule_softmax(B) s = topi.generic.schedule_softmax(B)
ctx = tvm.context(device, 0) ctx = tvm.context(device, 0)
......
...@@ -10,6 +10,7 @@ def verify_expand_dims(in_shape, out_shape, axis, num_newaxis): ...@@ -10,6 +10,7 @@ def verify_expand_dims(in_shape, out_shape, axis, num_newaxis):
if not tvm.module.enabled(device): if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device) print("Skip because %s is not enabled" % device)
return return
print("Running on target: %s" % device)
with tvm.target.create(device): with tvm.target.create(device):
s = topi.generic.schedule_broadcast(B) s = topi.generic.schedule_broadcast(B)
ctx = tvm.context(device, 0) ctx = tvm.context(device, 0)
...@@ -32,6 +33,7 @@ def verify_tranpose(in_shape, axes): ...@@ -32,6 +33,7 @@ def verify_tranpose(in_shape, axes):
if not tvm.module.enabled(device): if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device) print("Skip because %s is not enabled" % device)
return return
print("Running on target: %s" % device)
with tvm.target.create(device): with tvm.target.create(device):
s = topi.generic.schedule_injective(B) s = topi.generic.schedule_injective(B)
ctx = tvm.context(device, 0) ctx = tvm.context(device, 0)
...@@ -54,6 +56,7 @@ def verify_reshape(src_shape, dst_shape): ...@@ -54,6 +56,7 @@ def verify_reshape(src_shape, dst_shape):
if not tvm.module.enabled(device): if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device) print("Skip because %s is not enabled" % device)
return return
print("Running on target: %s" % device)
with tvm.target.create(device): with tvm.target.create(device):
s = topi.generic.schedule_injective(B) s = topi.generic.schedule_injective(B)
ctx = tvm.context(device, 0) ctx = tvm.context(device, 0)
...@@ -76,6 +79,7 @@ def verify_squeeze(src_shape, axis): ...@@ -76,6 +79,7 @@ def verify_squeeze(src_shape, axis):
if not tvm.module.enabled(device): if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device) print("Skip because %s is not enabled" % device)
return return
print("Running on target: %s" % device)
with tvm.target.create(device): with tvm.target.create(device):
s = topi.generic.schedule_injective(B) s = topi.generic.schedule_injective(B)
ctx = tvm.context(device, 0) ctx = tvm.context(device, 0)
...@@ -103,6 +107,7 @@ def verify_concatenate(shapes, axis): ...@@ -103,6 +107,7 @@ def verify_concatenate(shapes, axis):
if not tvm.module.enabled(device): if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device) print("Skip because %s is not enabled" % device)
return return
print("Running on target: %s" % device)
with tvm.target.create(device): with tvm.target.create(device):
s = topi.generic.schedule_injective(out_tensor) s = topi.generic.schedule_injective(out_tensor)
ctx = tvm.context(device, 0) ctx = tvm.context(device, 0)
...@@ -125,6 +130,7 @@ def verify_split(src_shape, indices_or_sections, axis): ...@@ -125,6 +130,7 @@ def verify_split(src_shape, indices_or_sections, axis):
if not tvm.module.enabled(device): if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device) print("Skip because %s is not enabled" % device)
return return
print("Running on target: %s" % device)
with tvm.target.create(device): with tvm.target.create(device):
s = topi.generic.schedule_injective(tensor_l) s = topi.generic.schedule_injective(tensor_l)
ctx = tvm.context(device, 0) ctx = tvm.context(device, 0)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment