Commit a9e0567d by Siju Committed by Yizhi Liu

[FRONTEND][ONNX]HardSigmoid, min, max, mean ops support (#1645)

parent a03c60ba
......@@ -529,6 +529,53 @@ class LRN(OnnxOpConverter):
return _sym.lrn(inputs[0], size=nsize, axis=axis,
alpha=alpha, beta=beta, bias=bias)
class Maximum(OnnxOpConverter):
""" Operator converter for Maximum.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if not isinstance(inputs, list) or len(inputs) < 2:
raise ValueError("Expect minimum 2 inputs")
_max = inputs[0]
for i in range(1, len(inputs)):
_max = AttrCvt(op_name='broadcast_max')([_max, inputs[i]], {})
return _max
class Minimum(OnnxOpConverter):
""" Operator converter for Minimum.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if not isinstance(inputs, list) or len(inputs) < 2:
raise ValueError("Expect minimum 2 inputs")
_min = inputs[0]
for i in range(1, len(inputs)):
_min = AttrCvt(op_name='broadcast_min')([_min, inputs[i]], {})
return _min
class Mean(OnnxOpConverter):
""" Operator converter for Mean.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if not isinstance(inputs, list) or len(inputs) < 2:
raise ValueError("Expect minimum 2 inputs")
count = len(inputs)
_sum = inputs[0]
for i in range(1, count):
_sum = AttrCvt(op_name='broadcast_add')([_sum, inputs[i]], {})
return _sum / count
class HardSigmoid(OnnxOpConverter):
""" Operator converter for HardSigmoid.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = attr.get('alpha', 0.2)
beta = attr.get('beta', 0.5)
transformX = (inputs[0] * alpha) + beta
attr = {'a_min':0, 'a_max':1}
return AttrCvt(op_name='clip')([transformX], attr)
# compatible operators that do NOT require any conversion.
_identity_list = []
......@@ -557,7 +604,6 @@ def _get_convert_map(opset):
# 'MeanVarianceNormalization'
# 'Crop'
# 'Embedding'
# 'Upsample'
'Upsample' : Upsample.get_converter(opset),
'SpatialBN': BatchNorm.get_converter(opset),
......@@ -591,11 +637,11 @@ def _get_convert_map(opset):
'Pow': Renamer('broadcast_pow'),
'PRelu': Prelu.get_converter(opset),
'Sigmoid': Renamer('sigmoid'),
# 'HardSigmoid'
# 'Max' : this is the elemwise maximum
# 'Min' : this is the elemwise minimum
'HardSigmoid': HardSigmoid.get_converter(opset),
'Max': Maximum.get_converter(opset),
'Min': Minimum.get_converter(opset),
'Sum': Sum.get_converter(opset),
# 'Mean'
'Mean': Mean.get_converter(opset),
'Clip': AttrCvt('clip', transforms={'min': 'a_min', 'max': 'a_max'}),
# softmax default axis is different in onnx
'Softmax': AttrCvt('softmax', {'axis': ('axis', 1)}),
......
......@@ -426,6 +426,127 @@ def test_upsample():
_test_upsample_nearest()
_test_upsample_bilinear()
def verify_min(input_dim):
dtype = 'float32'
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
a_np2 = np.random.uniform(size=input_dim).astype(dtype)
a_np3 = np.random.uniform(size=input_dim).astype(dtype)
b_np = np.min((a_np1, a_np2, a_np3), axis=0)
min_node = helper.make_node("Min", ["a_np1", "a_np2", "a_np3"], ["out"])
graph = helper.make_graph([min_node],
"Min_test",
inputs = [helper.make_tensor_value_info("a_np1",
TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np2",
TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np3",
TensorProto.FLOAT, list(input_dim))],
outputs = [helper.make_tensor_value_info("out",
TensorProto.FLOAT, list(b_np.shape))])
model = helper.make_model(graph, producer_name='Min_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(model, [a_np1, a_np2, a_np3], target, ctx, b_np.shape)
np.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5)
def test_forward_min():
verify_min((1, 3, 20, 20))
verify_min((20, 20))
def verify_max(input_dim):
dtype = 'float32'
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
a_np2 = np.random.uniform(size=input_dim).astype(dtype)
a_np3 = np.random.uniform(size=input_dim).astype(dtype)
b_np = np.max((a_np1, a_np2, a_np3), axis=0)
max_node = helper.make_node("Max", ["a_np1", "a_np2", "a_np3"], ["out"])
graph = helper.make_graph([max_node],
"Max_test",
inputs = [helper.make_tensor_value_info("a_np1",
TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np2",
TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np3",
TensorProto.FLOAT, list(input_dim))],
outputs = [helper.make_tensor_value_info("out",
TensorProto.FLOAT, list(b_np.shape))])
model = helper.make_model(graph, producer_name='Max_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(model, [a_np1, a_np2, a_np3], target, ctx, b_np.shape)
np.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5)
def test_forward_max():
verify_max((1, 3, 20, 20))
verify_max((20, 20))
def verify_mean(input_dim):
dtype = 'float32'
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
a_np2 = np.random.uniform(size=input_dim).astype(dtype)
a_np3 = np.random.uniform(size=input_dim).astype(dtype)
b_np = np.mean((a_np1, a_np2, a_np3), axis=0)
mean_node = helper.make_node("Mean", ["a_np1", "a_np2", "a_np3"], ["out"])
graph = helper.make_graph([mean_node],
"Mean_test",
inputs = [helper.make_tensor_value_info("a_np1",
TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np2",
TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np3",
TensorProto.FLOAT, list(input_dim))],
outputs = [helper.make_tensor_value_info("out",
TensorProto.FLOAT, list(b_np.shape))])
model = helper.make_model(graph, producer_name='Mean_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(model, [a_np1, a_np2, a_np3], target, ctx, b_np.shape)
np.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5)
def test_forward_mean():
verify_mean((1, 3, 20, 20))
verify_mean((20, 20))
def verify_hardsigmoid(input_dim, alpha, beta):
dtype = 'float32'
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
b_np = np.clip(a_np1 * alpha + beta, 0, 1)
hardsigmoid_node = helper.make_node("HardSigmoid", ["a_np1"], ["out"], alpha=alpha, beta=beta)
graph = helper.make_graph([hardsigmoid_node],
"HardSigmoid_test",
inputs = [helper.make_tensor_value_info("a_np1",
TensorProto.FLOAT, list(input_dim))],
outputs = [helper.make_tensor_value_info("out",
TensorProto.FLOAT, list(b_np.shape))])
model = helper.make_model(graph, producer_name='HardSigmoid_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(model, [a_np1], target, ctx, b_np.shape)
np.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5)
def test_forward_hardsigmoid():
verify_hardsigmoid((1, 3, 20, 20), 0.5, 0.6)
verify_hardsigmoid((20, 20), 0.3, 0.4)
if __name__ == '__main__':
# verify_super_resolution_example()
......@@ -445,3 +566,7 @@ if __name__ == '__main__':
test_gather()
test_lrn()
test_upsample()
test_forward_min()
test_forward_max()
test_forward_mean()
test_forward_hardsigmoid()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment