Commit 2fa0eca1 by Siva Committed by Tianqi Chen

[NNVM][ONNX] Slice, Floor, Ceil, Clip and MatMul support for frontend #1297 (#1371)

parent 3d010ed5
......@@ -446,6 +446,47 @@ class Unsqueeze(OnnxOpConverter):
inputs[0] = _sym.expand_dims(inputs[0], axis=axes, num_newaxis=1)
return inputs[0]
class Slice(OnnxOpConverter):
""" Operator converter for Slice.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if isinstance(attr['starts'], int):
attr['starts'] = (attr['starts'],)
attr['ends'] = (attr['ends'],)
try:
# Update the starts and ends according to axes if required.
if isinstance(attr['axes'], int):
attr['axes'] = (attr['axes'],)
if (max(attr['axes']) + 1) != len(attr['axes']):
new_axes = []
new_starts = []
new_ends = []
pop_index = 0
for i in range(max(attr['axes']) + 1):
if i in attr['axes']:
new_axes.append(i)
new_starts.append(attr['starts'][pop_index])
new_ends.append(attr['ends'][pop_index])
pop_index += 1
else:
new_axes.append(i)
new_starts.append(0)
new_ends.append(np.iinfo(np.int32).max)
attr['axes'] = new_axes
attr['starts'] = new_starts
attr['ends'] = new_ends
except KeyError:
pass
return AttrCvt(op_name='strided_slice',
transforms={'starts': 'begin',
'ends': 'end'},
ignores=['axes'])(inputs, attr)
# compatible operators that do NOT require any conversion.
_identity_list = []
......@@ -477,7 +518,7 @@ def _get_convert_map(opset):
'SpatialBN': BatchNorm.get_converter(opset),
# defs/generator
# 'Constant'
# 'Constant' # Implemented
# 'RandomUniform'
# 'RandomNormal'
# 'RandomUniformLike'
......@@ -493,8 +534,8 @@ def _get_convert_map(opset):
'Neg': Renamer('negative'),
'Abs': Absolute.get_converter(opset),
'Reciprocal': Reciprocal.get_converter(opset),
# 'Floor'
# 'Ceil'
'Floor': Renamer('floor'),
'Ceil': Renamer('ceil'),
'Sqrt': Renamer('sqrt'),
'Relu': Renamer('relu'),
'LeakyRelu': Renamer('leaky_relu'),
......@@ -511,7 +552,7 @@ def _get_convert_map(opset):
# 'Min' : this is the elemwise minimum
'Sum': Sum.get_converter(opset),
# 'Mean'
# 'Clip'
'Clip': AttrCvt('clip', transforms={'min': 'a_min', 'max': 'a_max'}),
# softmax default axis is different in onnx
'Softmax': AttrCvt('softmax', {'axis': ('axis', 1)}),
'LogSoftmax': AttrCvt('log_softmax', {'axis': ('axis', 1)}),
......@@ -519,7 +560,7 @@ def _get_convert_map(opset):
'Softsign': Softsign.get_converter(opset),
'SoftPlus': SoftPlus.get_converter(opset),
'Gemm': Gemm.get_converter(opset),
# 'MatMul' batch stacked dot operation
'MatMul': Renamer('matmul'),
# defs/nn
'AveragePool': AveragePool.get_converter(opset),
......@@ -550,7 +591,7 @@ def _get_convert_map(opset):
'Reshape': Reshape.get_converter(opset),
'Concat': Renamer('concatenate'),
'Split': AttrCvt('split', {'split': 'indices_or_sections'}),
# 'Slice'
'Slice': Slice.get_converter(opset),
'Transpose': AttrCvt('transpose', {'perm': 'axes'}),
# 'Gather'
'Squeeze': Renamer('squeeze'),
......
......@@ -23,14 +23,15 @@ def get_tvm_output(model, x, target, ctx, out_shape, dtype='float32'):
return out.asnumpy()
def verify_onnx_forward_impl(graph_file, data_shape, out_shape):
def get_caffe2_output(model, x, dtype='float32'):
import caffe2.python.onnx.backend
def get_caffe2_output(model, x, dtype='float32'):
prepared_backend = caffe2.python.onnx.backend.prepare(model)
W = {model.graph.input[0].name: x.astype(dtype)}
c2_out = prepared_backend.run(W)[0]
return c2_out
prepared_backend = caffe2.python.onnx.backend.prepare(model)
W = {model.graph.input[0].name: x.astype(dtype)}
c2_out = prepared_backend.run(W)[0]
return c2_out
def verify_onnx_forward_impl(graph_file, data_shape, out_shape):
dtype = 'float32'
x = np.random.uniform(size=data_shape)
model = onnx.load(graph_file)
......@@ -144,6 +145,101 @@ def test_unsqueeze():
np.testing.assert_allclose(out_shape, tvm_out.shape)
def _test_slice_iteration(indata, outdata, starts, ends, axes=None):
if axes:
y = helper.make_node("Slice", ['in'], ['out'], axes=axes, starts=starts, ends=ends)
else:
y = helper.make_node("Slice", ['in'], ['out'], starts=starts, ends=ends)
graph = helper.make_graph([y],
'slice_test',
inputs = [helper.make_tensor_value_info("in", TensorProto.FLOAT, list(indata.shape))],
outputs = [helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))])
model = helper.make_model(graph, producer_name='slice_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(model, indata, target, ctx, outdata.shape, 'float32')
np.testing.assert_allclose(outdata, tvm_out)
def test_slice():
x = np.random.randn(20, 10, 5).astype(np.float32)
_test_slice_iteration(x, x[0:3, 0:10], (0, 0), (3, 10), (0, 1))
_test_slice_iteration(x, x[:, :, 3:4], (0, 0, 3), (20, 10, 4))
_test_slice_iteration(x, x[:, 1:1000], (1), (1000), (1))
_test_slice_iteration(x, x[:, 0:-1], (0), (-1), (1))
def _test_onnx_op_elementwise(inshape, outfunc, npargs, dtype, opname, kwargs):
indata = np.random.uniform(size=(2, 4, 5, 6)).astype(dtype)
outdata = outfunc(indata, **npargs)
y = helper.make_node(opname, ['in'], ['out'], **kwargs)
graph = helper.make_graph([y],
opname+'_test',
inputs = [helper.make_tensor_value_info("in", TensorProto.FLOAT, list(indata.shape))],
outputs = [helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))])
model = helper.make_model(graph, producer_name=opname+'_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(model, indata, target, ctx, outdata.shape, dtype)
np.testing.assert_allclose(outdata, tvm_out)
def test_floor():
_test_onnx_op_elementwise((2, 4, 5, 6), np.floor, {}, 'float32', 'Floor', {})
def test_ceil():
_test_onnx_op_elementwise((2, 4, 5, 6), np.ceil, {}, 'float32', 'Ceil', {})
def test_clip():
_test_onnx_op_elementwise((2, 4, 5, 6),
np.clip,
{'a_min': -1.0, 'a_max': 1.0},
'float32',
'Clip',
{'min': -1.0, 'max': 1.0})
def test_matmul():
a_shape = (4, 3)
b_shape = (3, 4)
out_shape = (4, 4)
a_array = np.random.uniform(size=a_shape).astype('float32')
b_array = np.random.uniform(size=b_shape).astype('float32')
mul_node = helper.make_node("MatMul", ["a", "b"], ["out"])
graph = helper.make_graph([mul_node],
"matmul_test",
inputs = [helper.make_tensor_value_info("a", TensorProto.FLOAT, list(a_shape)),
helper.make_tensor_value_info("b", TensorProto.FLOAT, list(b_shape))],
outputs = [helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))])
model = helper.make_model(graph, producer_name='matmul_test')
for target, ctx in ctx_list():
new_sym, params = nnvm.frontend.from_onnx(model)
input_name = model.graph.input[0].name
input_name1 = model.graph.input[1].name
shape_dict = {input_name: a_array.shape, input_name1: b_array.shape}
dtype_dict = {input_name: 'float32', input_name1: 'float32'}
graph, lib, params = nnvm.compiler.build(new_sym, target, shape_dict, dtype_dict, params=params)
m = graph_runtime.create(graph, lib, ctx)
# set inputs
m.set_input(input_name, tvm.nd.array(a_array.astype('float32')))
m.set_input(input_name1, tvm.nd.array(b_array.astype('float32')))
m.set_input(**params)
m.run()
# get outputs
tvm_out = m.get_output(0, tvm.nd.empty(out_shape, 'float32'))
np.testing.assert_allclose(np.matmul(a_array, b_array), tvm_out.asnumpy(), rtol=1e-5, atol=1e-5)
if __name__ == '__main__':
# verify_super_resolution_example()
# verify_squeezenet1_1()
......@@ -153,3 +249,8 @@ if __name__ == '__main__':
test_reshape_like()
test_squeeze()
test_unsqueeze()
test_slice()
test_floor()
test_ceil()
test_clip()
test_matmul()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment