Unverified Commit d56829ea by Matthew Brookhart Committed by GitHub

Conv3D ONNX support and conv3D_ncdhw x86 schedules (#4949)

* Support 3d Convolution with the ONNX frontend

* add unit tests for conv3d in onnx frontend

respond to PR formatting requests

add x86 schedules to conv3d ncdhw test

fix a doc string format issue

refactor for changed upsream API

* first attempt at conv3d autotuning

add default schedule for conv3d_ncdhw

fill in autotvm integration

add a fallback for invalid schedules

fix fallback

fix reduction order to get simd working correctly
parent a6cb4b8d
...@@ -91,16 +91,18 @@ def get_numpy(tensor_proto): ...@@ -91,16 +91,18 @@ def get_numpy(tensor_proto):
return to_array(tensor_proto) return to_array(tensor_proto)
def dimension_picker(prefix, surfix=''): def dimension_picker(prefix, suffix=''):
"""Check that dimensions are supported.""" """Check that dimensions are supported."""
def _impl(attr): def _impl(attr):
kernel = attr['kernel_shape'] kernel = attr['kernel_shape']
if len(kernel) == 1: if len(kernel) == 1:
return prefix + '1d' + surfix return prefix + '1d' + suffix
if len(kernel) == 2: if len(kernel) == 2:
return prefix + '2d' + surfix return prefix + '2d' + suffix
msg = 'Only 1D and 2D kernels are supported for operator {}.' if len(kernel) == 3:
op_name = prefix + '1d/2d' return prefix + '3d' + suffix
msg = 'Only 1D, 2D, and 3D kernels are supported for operator {}.'
op_name = prefix + '1d/2d/3d'
raise tvm.error.OpAttributeInvalid(msg.format(op_name)) raise tvm.error.OpAttributeInvalid(msg.format(op_name))
return _impl return _impl
...@@ -155,11 +157,11 @@ def onnx_storage_order2layout(storage_order, dims=2): ...@@ -155,11 +157,11 @@ def onnx_storage_order2layout(storage_order, dims=2):
def dimension_constraint(): def dimension_constraint():
def _dim_check(attrs): def _dim_check(attrs):
if len(attrs['kernel_shape']) == 2 or len(attrs['kernel_shape']) == 1: if len(attrs['kernel_shape']) in [1, 2, 3]:
return True return True
return False return False
return _dim_check, "Only 1d and 2d kernel supported." return _dim_check, "Only 1d, 2d and 3d kernel supported."
class OnnxOpConverter(object): class OnnxOpConverter(object):
......
...@@ -188,10 +188,9 @@ def conv3d_strategy_cpu(attrs, inputs, out_type, target): ...@@ -188,10 +188,9 @@ def conv3d_strategy_cpu(attrs, inputs, out_type, target):
strategy = _op.OpStrategy() strategy = _op.OpStrategy()
layout = attrs.data_layout layout = attrs.data_layout
if layout == "NCDHW": if layout == "NCDHW":
logger.warning("conv3d with layout NCDHW is not optimized for x86.") strategy.add_implementation(wrap_compute_conv3d(topi.x86.conv3d_ncdhw),
strategy.add_implementation(wrap_compute_conv3d(topi.nn.conv3d_ncdhw), wrap_topi_schedule(topi.x86.schedule_conv3d_ncdhw),
wrap_topi_schedule(topi.generic.schedule_conv3d_ncdhw), name="conv3d_ncdhw.x86")
name="conv3d_ncdhw.generic")
elif layout == "NDHWC": elif layout == "NDHWC":
strategy.add_implementation(wrap_compute_conv3d(topi.x86.conv3d_ndhwc), strategy.add_implementation(wrap_compute_conv3d(topi.x86.conv3d_ndhwc),
wrap_topi_schedule(topi.x86.schedule_conv3d_ndhwc), wrap_topi_schedule(topi.x86.schedule_conv3d_ndhwc),
......
...@@ -1794,37 +1794,51 @@ def verify_conv(x_shape, w_shape, y_shape, padding, kernel_shape, strides, dilat ...@@ -1794,37 +1794,51 @@ def verify_conv(x_shape, w_shape, y_shape, padding, kernel_shape, strides, dilat
def test_conv(): def test_conv():
# Convolution with padding def repeat(N, D):
# Conv2D return tuple([N for _ in range(D)])
verify_conv((1, 1, 5, 5), (1, 1, 3, 3), (1, 1, 5, 5), [1, 1, 1, 1], [3, 3], [1, 1], [1, 1]) for D in [1, 2, 3]:
# Conv1D # Convolution with padding
verify_conv((1, 1, 5), (1, 1, 3), (1, 1, 5), [1, 1], [3], [1], [1]) verify_conv((1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
# Convolution without padding (1, 1) + repeat(5, D),
# Conv2D 2 * repeat(1, D),
verify_conv((1, 1, 5, 5), (1, 1, 3, 3), (1, 1, 3, 3), [0, 0, 0, 0], [3, 3], [1, 1], [1, 1]) repeat(3, D),
# Conv1D repeat(1, D),
verify_conv((1, 1, 5), (1, 1, 3), (1, 1, 3), [0, 0], [3], [1], [1]) repeat(1, D))
# Convolution without padding
# Convolution with autopadding verify_conv((1, 1) + repeat(5, D),
verify_conv((1, 1, 5, 5), (1, 1, 3, 3), (1, 1, 5, 5), (1, 1) + repeat(3, D),
None, [3, 3], [1, 1], [1, 1], (1, 1) + repeat(3, D),
auto_pad="SAME_UPPER") 2 * repeat(0, D),
# Conv1D repeat(3, D),
verify_conv((1, 1, 5), (1, 1, 3), (1, 1, 5), None, [3], [1], [1], auto_pad="SAME_UPPER") repeat(1, D),
repeat(1, D))
# Convolution with non uniform stride # Convolution with autopadding
verify_conv((1, 1, 5, 5), (1, 1, 3, 3), (1, 1, 3, 3), verify_conv((1, 1) + repeat(5, D),
None, [3, 3], [2, 2], [1, 1], (1, 1) + repeat(3, D),
auto_pad="SAME_UPPER") (1, 1) + repeat(5, D),
# Conv1D None,
verify_conv((1, 1, 5), (1, 1, 3), (1, 1, 3), None, [3], [2], [1], auto_pad="SAME_UPPER") repeat(3, D),
repeat(1, D),
# Convolution with dilation repeat(1, D),
verify_conv((1, 1, 5, 5), (1, 1, 3, 3), (1, 1, 5, 5), [2, 2, 2, 2], [3, 3], [1, 1], [2, 2]) auto_pad="SAME_UPPER")
# Conv1D # Convolution with non uniform stride
verify_conv((1, 1, 5), (1, 1, 3), (1, 1, 5), [2, 2], [3], [1], [2]) verify_conv((1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(3, D),
None,
repeat(3, D),
repeat(2, D),
repeat(1, D),
auto_pad="SAME_UPPER")
# Convolution with dilation
verify_conv((1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(5, D),
2 * repeat(2, D),
repeat(3, D),
repeat(1, D),
repeat(2, D))
def verify_convtranspose(x_shape, w_shape, y_shape, p): def verify_convtranspose(x_shape, w_shape, y_shape, p):
node = onnx.helper.make_node("ConvTranspose", node = onnx.helper.make_node("ConvTranspose",
......
...@@ -30,6 +30,7 @@ from common import get_all_backend ...@@ -30,6 +30,7 @@ from common import get_all_backend
_conv3d_ncdhw_implement = { _conv3d_ncdhw_implement = {
"generic": (topi.nn.conv3d_ncdhw, topi.generic.schedule_conv3d_ncdhw), "generic": (topi.nn.conv3d_ncdhw, topi.generic.schedule_conv3d_ncdhw),
"cpu": (topi.x86.conv3d_ncdhw, topi.x86.schedule_conv3d_ncdhw),
"gpu": (topi.cuda.conv3d_ncdhw, topi.cuda.schedule_conv3d_ncdhw), "gpu": (topi.cuda.conv3d_ncdhw, topi.cuda.schedule_conv3d_ncdhw),
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment