Unverified Commit d56829ea by Matthew Brookhart Committed by GitHub

Conv3D ONNX support and conv3D_ncdhw x86 schedules (#4949)

* Support 3d Convolution with the ONNX frontend

* add unit tests for conv3d in onnx frontend

respond to PR formatting requests

add x86 schedules to conv3d ncdhw test

fix a doc string format issue

refactor for changed upsream API

* first attempt at conv3d autotuning

add default schedule for conv3d_ncdhw

fill in autotvm integration

add a fallback for invalid schedules

fix fallback

fix reduction order to get simd working correctly
parent a6cb4b8d
......@@ -91,16 +91,18 @@ def get_numpy(tensor_proto):
return to_array(tensor_proto)
def dimension_picker(prefix, surfix=''):
def dimension_picker(prefix, suffix=''):
"""Check that dimensions are supported."""
def _impl(attr):
kernel = attr['kernel_shape']
if len(kernel) == 1:
return prefix + '1d' + surfix
return prefix + '1d' + suffix
if len(kernel) == 2:
return prefix + '2d' + surfix
msg = 'Only 1D and 2D kernels are supported for operator {}.'
op_name = prefix + '1d/2d'
return prefix + '2d' + suffix
if len(kernel) == 3:
return prefix + '3d' + suffix
msg = 'Only 1D, 2D, and 3D kernels are supported for operator {}.'
op_name = prefix + '1d/2d/3d'
raise tvm.error.OpAttributeInvalid(msg.format(op_name))
return _impl
......@@ -155,11 +157,11 @@ def onnx_storage_order2layout(storage_order, dims=2):
def dimension_constraint():
def _dim_check(attrs):
if len(attrs['kernel_shape']) == 2 or len(attrs['kernel_shape']) == 1:
if len(attrs['kernel_shape']) in [1, 2, 3]:
return True
return False
return _dim_check, "Only 1d and 2d kernel supported."
return _dim_check, "Only 1d, 2d and 3d kernel supported."
class OnnxOpConverter(object):
......
......@@ -188,10 +188,9 @@ def conv3d_strategy_cpu(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
layout = attrs.data_layout
if layout == "NCDHW":
logger.warning("conv3d with layout NCDHW is not optimized for x86.")
strategy.add_implementation(wrap_compute_conv3d(topi.nn.conv3d_ncdhw),
wrap_topi_schedule(topi.generic.schedule_conv3d_ncdhw),
name="conv3d_ncdhw.generic")
strategy.add_implementation(wrap_compute_conv3d(topi.x86.conv3d_ncdhw),
wrap_topi_schedule(topi.x86.schedule_conv3d_ncdhw),
name="conv3d_ncdhw.x86")
elif layout == "NDHWC":
strategy.add_implementation(wrap_compute_conv3d(topi.x86.conv3d_ndhwc),
wrap_topi_schedule(topi.x86.schedule_conv3d_ndhwc),
......
......@@ -1794,37 +1794,51 @@ def verify_conv(x_shape, w_shape, y_shape, padding, kernel_shape, strides, dilat
def test_conv():
def repeat(N, D):
return tuple([N for _ in range(D)])
for D in [1, 2, 3]:
# Convolution with padding
# Conv2D
verify_conv((1, 1, 5, 5), (1, 1, 3, 3), (1, 1, 5, 5), [1, 1, 1, 1], [3, 3], [1, 1], [1, 1])
# Conv1D
verify_conv((1, 1, 5), (1, 1, 3), (1, 1, 5), [1, 1], [3], [1], [1])
verify_conv((1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(5, D),
2 * repeat(1, D),
repeat(3, D),
repeat(1, D),
repeat(1, D))
# Convolution without padding
# Conv2D
verify_conv((1, 1, 5, 5), (1, 1, 3, 3), (1, 1, 3, 3), [0, 0, 0, 0], [3, 3], [1, 1], [1, 1])
# Conv1D
verify_conv((1, 1, 5), (1, 1, 3), (1, 1, 3), [0, 0], [3], [1], [1])
verify_conv((1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(3, D),
2 * repeat(0, D),
repeat(3, D),
repeat(1, D),
repeat(1, D))
# Convolution with autopadding
verify_conv((1, 1, 5, 5), (1, 1, 3, 3), (1, 1, 5, 5),
None, [3, 3], [1, 1], [1, 1],
verify_conv((1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(5, D),
None,
repeat(3, D),
repeat(1, D),
repeat(1, D),
auto_pad="SAME_UPPER")
# Conv1D
verify_conv((1, 1, 5), (1, 1, 3), (1, 1, 5), None, [3], [1], [1], auto_pad="SAME_UPPER")
# Convolution with non uniform stride
verify_conv((1, 1, 5, 5), (1, 1, 3, 3), (1, 1, 3, 3),
None, [3, 3], [2, 2], [1, 1],
verify_conv((1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(3, D),
None,
repeat(3, D),
repeat(2, D),
repeat(1, D),
auto_pad="SAME_UPPER")
# Conv1D
verify_conv((1, 1, 5), (1, 1, 3), (1, 1, 3), None, [3], [2], [1], auto_pad="SAME_UPPER")
# Convolution with dilation
verify_conv((1, 1, 5, 5), (1, 1, 3, 3), (1, 1, 5, 5), [2, 2, 2, 2], [3, 3], [1, 1], [2, 2])
# Conv1D
verify_conv((1, 1, 5), (1, 1, 3), (1, 1, 5), [2, 2], [3], [1], [2])
verify_conv((1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(5, D),
2 * repeat(2, D),
repeat(3, D),
repeat(1, D),
repeat(2, D))
def verify_convtranspose(x_shape, w_shape, y_shape, p):
node = onnx.helper.make_node("ConvTranspose",
......
......@@ -30,6 +30,7 @@ from common import get_all_backend
_conv3d_ncdhw_implement = {
"generic": (topi.nn.conv3d_ncdhw, topi.generic.schedule_conv3d_ncdhw),
"cpu": (topi.x86.conv3d_ncdhw, topi.x86.schedule_conv3d_ncdhw),
"gpu": (topi.cuda.conv3d_ncdhw, topi.cuda.schedule_conv3d_ncdhw),
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment