Commit 881a78b3 by Yizhi Liu Committed by Haichen Shen

[Relay][Frontend] CoreML Support (#2476)

* [Relay][Frontend] Add CoreML Support

* pip install six in CI

* remove triggering nnvm coreml test

* set opt_level=2 for nnvm coreml test case
parent 5a30a22c
# install libraries for python package on ubuntu
pip2 install nose pylint numpy nose-timer cython decorator scipy tornado typing antlr4-python2-runtime attrs
pip3 install nose pylint numpy nose-timer cython decorator scipy tornado typed_ast pytest mypy orderedset antlr4-python3-runtime attrs
pip2 install nose pylint six numpy nose-timer cython decorator scipy tornado typing antlr4-python2-runtime attrs
pip3 install nose pylint six numpy nose-timer cython decorator scipy tornado typed_ast pytest mypy orderedset antlr4-python3-runtime attrs
......@@ -68,14 +68,15 @@ def ConvolutionLayerParams(op, insym, symtab):
else:
pos = [insym, weights]
if op.isDeconvolution:
ret = _sym.conv2d_transpose(*pos, **params)
else:
ret = _sym.conv2d(*pos, **params)
# consume padding layer
if symtab.in_padding:
params['padding'] = [sum(x) for x in zip(params.get('padding', [0, 0]), symtab.paddings)]
symtab.clear_padding()
if op.isDeconvolution:
ret = _sym.conv2d_transpose(*pos, **params)
else:
ret = _sym.conv2d(*pos, **params)
return ret
def BatchnormLayerParams(op, insym, symtab):
......
import urllib
from six.moves import urllib
import os
from PIL import Image
import numpy as np
......@@ -7,7 +7,7 @@ def download(url, path, overwrite=False):
if os.path.exists(path) and not overwrite:
return
print('Downloading {} to {}.'.format(url, path))
urllib.URLopener().retrieve(url, path)
urllib.request.urlretrieve(url, path)
def get_mobilenet():
url = 'https://docs-assets.developer.apple.com/coreml/models/MobileNet.mlmodel'
......
......@@ -15,9 +15,9 @@ import coremltools as cm
import model_zoo
def get_tvm_output(symbol, x, params, target, ctx,
out_shape=(1000,), input_name='image', dtype='float32'):
out_shape=(1, 1000), input_name='image', dtype='float32'):
shape_dict = {input_name : x.shape}
with nnvm.compiler.build_config(opt_level=3):
with nnvm.compiler.build_config(opt_level=2):
graph, lib, params = nnvm.compiler.build(symbol, target, shape_dict, params=params)
m = graph_runtime.create(graph, lib, ctx)
# set inputs
......@@ -28,7 +28,7 @@ def get_tvm_output(symbol, x, params, target, ctx,
out = m.get_output(0, tvm.nd.empty(out_shape, dtype))
return out.asnumpy()
def test_model_checkonly(model_file, model_name=''):
def run_model_checkonly(model_file, model_name=''):
model = cm.models.MLModel(model_file)
sym, params = nnvm.frontend.from_coreml(model)
x = model_zoo.get_cat_image()
......@@ -38,11 +38,11 @@ def test_model_checkonly(model_file, model_name=''):
def test_mobilenet_checkonly():
model_file = model_zoo.get_mobilenet()
test_model_checkonly(model_file, 'mobilenet')
run_model_checkonly(model_file, 'mobilenet')
def test_resnet50_checkonly():
model_file = model_zoo.get_resnet50()
test_model_checkonly(model_file, 'resnet50')
run_model_checkonly(model_file, 'resnet50')
def run_tvm_graph(graph_def, input_data, input_name, output_shape, output_dtype='float32'):
""" Generic function to compile on nnvm and execute on tvm """
......
......@@ -231,7 +231,7 @@ class Function(Expr):
_make.Function, params, body, ret_type, type_params, attrs)
def __call__(self, *args):
"""Invoke the gobal function.
"""Invoke the global function.
Parameters
----------
......
......@@ -11,3 +11,4 @@ from .mxnet import from_mxnet
from .keras import from_keras
from .onnx import from_onnx
from .tflite import from_tflite
from .coreml import from_coreml
......@@ -240,6 +240,7 @@ class ExprTable(object):
self.exprs = {}
self.params = {}
self.const_ctr = 1
self.in_padding = False
def new_const(self, value, shape=None, dtype="float32"):
name = "_param_%d" % (self.const_ctr)
......@@ -257,6 +258,13 @@ class ExprTable(object):
assert isinstance(expr, _expr.Expr)
self.exprs[name] = expr
def set_padding(self, paddings):
self.paddings = paddings
self.in_padding = True
def clear_padding(self):
self.in_padding = False
class AttrCvt(object):
"""Common attribute conveter. An AttrConverter instance is a callable:
......
# pylint: disable=invalid-name, import-self, unused-argument, unused-variable, inconsistent-return-statements
"""CoreML frontend."""
from __future__ import absolute_import as _abs
import numpy as np
from .. import ir_pass
from .. import expr as _expr
from .. import op as _op
from ... import nd as _nd
from ..._ffi import base as _base
from .common import ExprTable
__all__ = ['from_coreml']
def _NeuralNetworkImageScaler(op, inexpr, etab):
# this changes the symbol
biases = np.array([op.blueBias, op.greenBias, op.redBias]).reshape([3, 1, 1])
bias = etab.new_const(biases)
ret = _op.multiply(inexpr, _expr.const(op.channelScale, dtype='float32'))
ret = _op.add(ret, bias)
return ret
def _NeuralNetworkMeanImage(op, inexpr, etab):
# this changes the symbol
ret = _op.subtract(inexpr, _expr.const(op.meanImage, dtype='float32'))
return ret
def _ConvolutionLayerParams(op, inexpr, etab):
"""Convolution layer params."""
weights = etab.new_const(np.array(list(op.weights.floatValue)).reshape(
tuple([op.outputChannels, op.kernelChannels] + list(op.kernelSize))))
dilation = list(op.dilationFactor)
if not dilation:
dilation = [1, 1]
params = {'channels':op.outputChannels,
'kernel_size':list(op.kernelSize),
'strides':list(op.stride),
'dilation': dilation,
'groups':op.nGroups}
if op.WhichOneof('ConvolutionPaddingType') == 'valid':
valid = op.valid
padding = [b.startEdgeSize for b in valid.paddingAmounts.borderAmounts]
padding2 = [b.endEdgeSize for b in valid.paddingAmounts.borderAmounts]
for i, j in zip(padding, padding2):
assert i == j, "Asymmetry padding not supported"
if padding:
params['padding'] = padding
elif op.WhichOneof('ConvolutionPaddingType') == 'same':
kernel = params['kernel_size']
pad_h = kernel[0] - 1
pad_w = kernel[1] - 1
pad_t = pad_h // 2
pad_l = pad_w // 2
pad_b = pad_h - pad_t
pad_r = pad_w - pad_l
assert pad_t == pad_r and pad_l == pad_b, "Asymmetry padding not supported"
params['padding'] = [pad_t, pad_l]
else:
raise NotImplementedError("Valid/Same convolution padding implemented")
# consume padding layer
if etab.in_padding:
params['padding'] = [sum(x) for x in zip(params.get('padding', [0, 0]), etab.paddings)]
etab.clear_padding()
if op.isDeconvolution:
ret = _op.nn.conv2d_transpose(data=inexpr, weight=weights, **params)
else:
ret = _op.nn.conv2d(data=inexpr, weight=weights, **params)
if op.hasBias:
biases = etab.new_const(list(op.bias.floatValue))
ret = _op.nn.bias_add(ret, biases)
return ret
def _BatchnormLayerParams(op, inexpr, etab):
"""Get layer of batchnorm parameter"""
# this changes the symbol
if op.instanceNormalization:
raise NotImplementedError("instance normalization not implemented")
else:
params = {'gamma':etab.new_const(list(op.gamma.floatValue)),
'beta':etab.new_const(list(op.beta.floatValue)),
'moving_mean':etab.new_const(list(op.mean.floatValue)),
'moving_var': etab.new_const(list(op.variance.floatValue)),
'epsilon': op.epsilon}
result, moving_mean, moving_var = _op.nn.batch_norm(data=inexpr, **params)
return result
def _ActivationParams(op, inexpr, etab):
"""Get activation parameters"""
whichActivation = op.WhichOneof('NonlinearityType')
par = getattr(op, whichActivation)
if whichActivation == 'linear':
alpha = _expr.const(par.alpha, dtype='float32')
beta = _expr.const(par.beta, dtype='float32')
return _op.add(_op.multiply(inexpr, alpha), beta)
elif whichActivation == 'ReLU':
return _op.nn.relu(inexpr)
elif whichActivation == 'leakyReLU':
_op.nn.leaky_relu(inexpr, alpha=_expr.const(par.alpha, dtype='float32'))
elif whichActivation == 'thresholdedReLU':
alpha_tensor = _op.full_like(inexpr, fill_value=_expr.const(par.alpha, dtype='float32'))
return _op.multiply(inexpr, _op.greater(inexpr, alpha_tensor).as_type('float32'))
elif whichActivation == 'PReLU':
return _op.nn.prelu(inexpr, alpha=_expr.const(par.alpha, dtype='float32'))
elif whichActivation == 'tanh':
return _op.tanh(inexpr)
elif whichActivation == 'scaledTanh':
alpha = _expr.const(par.alpha, dtype='float32')
beta = _expr.const(par.beta, dtype='float32')
return _op.multiply(_op.tanh(_op.multiply(inexpr, beta)), alpha)
elif whichActivation == 'sigmoid':
return _op.sigmoid(inexpr)
elif whichActivation == 'sigmoidHard':
alpha = _expr.const(par.alpha, dtype='float32')
beta = _expr.const(par.beta, dtype='float32')
transformX = (alpha * inexpr) + beta
return _op.clip(transformX, a_min=0., a_max=1.)
elif whichActivation == 'ELU':
return _op.multiply(_op.add(_op.exp(inexpr), _expr.const(-1, dtype='float32')),
_expr.const(par.alpha, dtype='float32'))
elif whichActivation == 'softsign':
return inexpr / (_expr.const(1, dtype='float32') + (
op.nn.relu(inexpr) + _op.nn.relu(_op.negative(inexpr))))
elif whichActivation == 'softplus':
return _op.log(_op.add(_op.exp(inexpr), _expr.const(1, dtype='float32')))
elif whichActivation == 'parametricSoftplus':
alpha = list(par.alpha.floatValue)
beta = list(par.alpha.floatValue)
if len(alpha) == 1:
return _op.multiply(_op.log(_op.add(_op.exp(inexpr),
_expr.const(beta[0], dtype='float32'))),
_expr.const(alpha[0], dtype='float32'))
alpha = np.array(alpha).reshape((len(alpha), 1, 1))
beta = np.array(beta).reshape((len(beta), 1, 1))
alpha_expr = etab.new_const(alpha)
beta_expr = etab.new_const(beta)
return _op.multiply(_op.log(_op.add(_op.exp(inexpr), beta_expr)), alpha_expr)
else:
raise NotImplementedError('%s not implemented' % whichActivation)
def _ScaleLayerParams(op, inexpr, etab):
"""Scale layer params."""
scale = etab.new_const(np.array(list(op.scale.floatValue)).reshape(
tuple(list(op.shapeScale) + [1, 1])))
ret = _op.multiply(inexpr, scale)
if op.hasBias:
bias = etab.new_const(np.array(list(op.bias.floatValue)).reshape(
tuple(list(op.shapeBias) + [1, 1])))
ret = _op.add(ret, bias)
return ret
def _PoolingLayerParams(op, inexpr, etab):
"""get pooling parameters"""
if op.globalPooling:
if op.type == 0:
return _op.nn.global_max_pool2d(inexpr)
elif op.type == 1:
return _op.nn.global_avg_pool2d(inexpr)
else:
raise NotImplementedError("Only max and average pooling implemented")
else:
params = {'pool_size':list(op.kernelSize),
'strides':list(op.stride)}
if op.WhichOneof('PoolingPaddingType') == 'valid':
valid = op.valid
padding = [b.startEdgeSize for b in valid.paddingAmounts.borderAmounts]
padding2 = [b.endEdgeSize for b in valid.paddingAmounts.borderAmounts]
for i, j in zip(padding, padding2):
assert i == j
params['padding'] = padding
elif op.WhichOneof('PoolingPaddingType') == 'includeLastPixel':
# I don't know if this is correct
valid = op.includeLastPixel
padding = list(valid.paddingAmounts)
params['padding'] = padding
params['ceil_mode'] = True
else:
raise NotImplementedError("Other convolution padding not implemented")
# consume padding layer
if etab.in_padding:
params['padding'] = [sum(x) for x in zip(
params.get('padding', [0, 0]), etab.paddings)]
etab.clear_padding()
if op.type == 0:
return _op.nn.max_pool2d(inexpr, **params)
elif op.type == 1:
return _op.nn.avg_pool2d(inexpr, **params)
else:
raise NotImplementedError("Only max and average pooling implemented")
def _SoftmaxLayerParams(op, inexpr, etab):
return _op.nn.softmax(_op.nn.batch_flatten(inexpr))
def _InnerProductLayerParams(op, inexpr, etab):
weights = etab.new_const(np.array(op.weights.floatValue).reshape(
(op.outputChannels, op.inputChannels)))
out = _op.nn.dense(data=inexpr, weight=weights, units=op.outputChannels)
if op.hasBias:
bias = etab.new_const(np.array(op.bias.floatValue))
out = _op.nn.bias_add(out, bias)
return out
def _AddLayerParams(op, inexpr, etab):
if not isinstance(inexpr, list):
inexpr = [inexpr]
ret = inexpr[0]
for i in range(1, len(inexpr)):
ret = _op.add(ret, inexpr[i])
if op.alpha > 0:
ret = _op.add(ret, _expr.const(op.alpha, dtype='float32'))
return ret
def _MultiplyLayerParams(op, inexpr, etab):
if not isinstance(inexpr, list):
inexpr = [inexpr]
ret = inexpr[0]
for i in range(1, len(inexpr)):
ret = _op.multiply(ret, inexpr[i])
if op.alpha != 1:
ret = _op.multiply(ret, _expr.const(op.alpha, dtype='float32'))
return ret
def _ConcatLayerParams(op, inexpr, etab):
if not isinstance(inexpr, list):
inexpr = [inexpr]
if op.sequenceConcat:
raise NotImplementedError("Sequence Concat not supported")
ret = _op.concatenate(inexpr, axis=1)
return ret
def _FlattenLayerParams(op, inexpr, etab):
if op.mode == 1:
inexpr = _op.transpose(_op.reshape(inexpr, newshape=(0, 0, -1)), axes=(0, 2, 1))
return _op.nn.batch_flatten(inexpr)
def _PaddingLayerParams(op, inexpr, etab):
"""Hacking for padding layer params."""
if op.WhichOneof('PaddingType') == 'constant':
constant = op.constant
if constant.value != 0:
raise NotImplementedError("Padding value {} not supported.".format(constant.value))
padding = [b.startEdgeSize for b in op.paddingAmounts.borderAmounts]
padding2 = [b.endEdgeSize for b in op.paddingAmounts.borderAmounts]
for i, j in zip(padding, padding2):
assert i == j
etab.set_padding(padding)
else:
raise NotImplementedError("Only constant padding is supported now.")
return inexpr
def _PermuteLayerParams(op, inexpr, etab):
axes = tuple(op.axis)
return _op.transpose(inexpr, axes=axes)
def _UpsampleLayerParams(op, inexpr, etab):
if op.scalingFactor[0] != op.scalingFactor[1]:
raise NotImplementedError("Upsampling only supported with same \
height and width scaling factor.")
interpolationMode = 'NEAREST_NEIGHBOR' if op.mode == 0 else 'BILINEAR'
return _op.nn.upsampling(inexpr, scale=op.scalingFactor[0], method=interpolationMode)
def _L2NormalizeLayerParams(op, inexpr, etab):
return _op.nn.l2_normalize(inexpr, eps=op.epsilon, axis=[1])
def _LRNLayerParams(op, inexpr, etab):
par = {}
par['size'] = op.localSize
par['bias'] = op.k
par['alpha'] = op.alpha
par['beta'] = op.beta
par['axis'] = 1 # default layout is nchw
return _op.nn.lrn(data=inexpr, **par)
def _AverageLayerParams(op, inexpr, etab):
if not isinstance(inexpr, list) or len(inexpr) < 2:
raise ValueError("Expect minimum 2 inputs")
count = len(inexpr)
_sum = inexpr[0]
for i in range(1, count):
_sum = _op.add(_sum, inexpr[i])
return _sum / _expr.const(count, dtype='float32')
def _MaxLayerParams(op, inexpr, etab):
if not isinstance(inexpr, list) or len(inexpr) < 2:
raise ValueError("Expect minimum 2 inputs")
_max = inexpr[0]
for i in range(1, len(inexpr)):
_max = _op.maximum(_max, inexpr[i])
return _max
def _MinLayerParams(op, inexpr, etab):
if not isinstance(inexpr, list) or len(inexpr) < 2:
raise ValueError("Expect minimum 2 inputs")
_min = inexpr[0]
for i in range(1, len(inexpr)):
_min = _op.minimum(_min, inexpr[i])
return _min
_convert_map = {
'NeuralNetworkMeanImage': _NeuralNetworkMeanImage,
'NeuralNetworkImageScaler': _NeuralNetworkImageScaler,
'ConvolutionLayerParams': _ConvolutionLayerParams,
'BatchnormLayerParams': _BatchnormLayerParams,
'ActivationParams': _ActivationParams,
'ScaleLayerParams': _ScaleLayerParams,
'PoolingLayerParams': _PoolingLayerParams,
'SoftmaxLayerParams': _SoftmaxLayerParams,
'InnerProductLayerParams': _InnerProductLayerParams,
'AddLayerParams': _AddLayerParams,
'MultiplyLayerParams': _MultiplyLayerParams,
'FlattenLayerParams': _FlattenLayerParams,
'ConcatLayerParams': _ConcatLayerParams,
'PaddingLayerParams': _PaddingLayerParams,
'PermuteLayerParams': _PermuteLayerParams,
'UpsampleLayerParams': _UpsampleLayerParams,
'L2NormalizeLayerParams': _L2NormalizeLayerParams,
'LRNLayerParams': _LRNLayerParams,
'AverageLayerParams': _AverageLayerParams,
'MaxLayerParams': _MaxLayerParams,
'MinLayerParams': _MinLayerParams,
}
def coreml_op_to_relay(op, inname, outname, etab):
"""Convert coreml layer to a Relay expression and update the expression table.
Parameters
----------
op: a coreml protobuf bit
inname : str or list of str
Name of the input Relay expression.
outname : str
Name of the output Relay expression.
etab : relay.frontend.common.ExprTable
The global expression table to be updated.
"""
classname = type(op).__name__
if classname not in _convert_map:
raise NotImplementedError("%s is not supported" % (classname))
if isinstance(inname, _base.string_types):
insym = etab.get_expr(inname)
else:
insym = [etab.get_expr(i) for i in inname]
ret = _convert_map[classname](op, insym, etab)
if outname:
etab.set_expr(outname, ret)
if classname != 'PaddingLayerParams':
assert not etab.in_padding, "Previous padding not consumed by conv/pool"
def from_coreml(model, shape=None):
"""Convert from coreml model into Relay Function.
Parameters
----------
model:
coremltools.models.MLModel of a NeuralNetworkClassifier
shape : dict of str to int list/tuple, optional
The input shapes
Returns
-------
func : tvm.relay.Function
Compatible relay Function.
params : dict of str to tvm.NDArray
The parameter dict to be used by Relay.
"""
try:
import coremltools as cm
except ImportError:
raise ImportError('The coremltools package must be installed')
assert isinstance(model, cm.models.MLModel)
spec = model.get_spec()
modeltype = spec.WhichOneof('Type')
assert modeltype in ['neuralNetworkClassifier', 'neuralNetwork', 'neuralNetworkRegressor']
cc = getattr(spec, modeltype)
etab = ExprTable()
for i in spec.description.input:
input_shape = shape[i.name] if shape is not None and i.name in shape else None
etab.set_expr(i.name, _expr.var(i.name, shape=input_shape))
for pp in cc.preprocessing:
whichpp = pp.WhichOneof('preprocessor')
ppmethod = getattr(pp, whichpp)
# the NeuralNetworkImageScalar doesn't seem to have a featureName?
if whichpp == 'scaler':
for i in spec.description.input:
coreml_op_to_relay(ppmethod, i.name, i.name, etab)
else:
coreml_op_to_relay(ppmethod, pp.featureName, pp.featureName, etab)
for l in cc.layers:
layertype = l.WhichOneof('layer')
layerop = getattr(l, layertype)
assert len(l.output) == 1
if len(l.input) == 1:
coreml_op_to_relay(layerop, l.input[0], l.output[0], etab)
else:
coreml_op_to_relay(layerop, list(l.input), l.output[0], etab)
outexpr = [etab.get_expr(o.name) if o.name in etab.exprs else _expr.var(o.name)
for o in spec.description.output]
# for now return first output
outexpr = outexpr[0]
func = _expr.Function(ir_pass.free_vars(outexpr), outexpr)
params = {k:_nd.array(np.array(v, dtype=np.float32)) for k, v in etab.params.items()}
return func, params
......@@ -625,7 +625,7 @@ def keras_op_to_relay(inexpr, keras_layer, outname, etab):
etab.set_expr(name, out)
def from_keras(model, shape_dict):
def from_keras(model, shape=None):
"""Convert keras model to relay Function.
Parameters
......@@ -633,8 +633,8 @@ def from_keras(model, shape_dict):
model : keras.engine.training.Model
The keras model to be converted.
shape_dict : dict of str to int list/tuple
Input shapes of the model.
shape: dict of str to int list/tuple
Input shapes of the model, optional
Returns
-------
......@@ -642,7 +642,7 @@ def from_keras(model, shape_dict):
Compatible relay Function.
params : dict of str to tvm.NDArray
The parameter dict to be used by relay.
The parameter dict to be used by Relay.
"""
try:
import keras
......@@ -659,8 +659,8 @@ def from_keras(model, shape_dict):
for keras_layer in model.layers:
if isinstance(keras_layer, keras.engine.InputLayer):
input_name = keras_layer.name
shape = shape_dict[input_name] if input_name in shape_dict else None
etab.set_expr(input_name, _expr.var(input_name, shape=shape))
input_shape = shape[input_name] if shape is not None and input_name in shape else None
etab.set_expr(input_name, _expr.var(input_name, shape=input_shape))
else:
inbound_nodes = keras_layer.inbound_nodes if hasattr(keras_layer, 'inbound_nodes') \
else keras_layer._inbound_nodes if hasattr(keras_layer, '_inbound_nodes') \
......
from six.moves import urllib
import os
from PIL import Image
import numpy as np
def download(url, path, overwrite=False):
if os.path.exists(path) and not overwrite:
return
print('Downloading {} to {}.'.format(url, path))
urllib.request.urlretrieve(url, path)
def get_mobilenet():
url = 'https://docs-assets.developer.apple.com/coreml/models/MobileNet.mlmodel'
dst = 'mobilenet.mlmodel'
real_dst = os.path.abspath(os.path.join(os.path.dirname(__file__), dst))
download(url, real_dst)
return os.path.abspath(real_dst)
def get_resnet50():
url = 'https://docs-assets.developer.apple.com/coreml/models/Resnet50.mlmodel'
dst = 'resnet50.mlmodel'
real_dst = os.path.abspath(os.path.join(os.path.dirname(__file__), dst))
download(url, real_dst)
return os.path.abspath(real_dst)
def get_cat_image():
url = 'https://gist.githubusercontent.com/zhreshold/bcda4716699ac97ea44f791c24310193/raw/fa7ef0e9c9a5daea686d6473a62aacd1a5885849/cat.png'
dst = 'cat.png'
real_dst = os.path.abspath(os.path.join(os.path.dirname(__file__), dst))
download(url, real_dst)
img = Image.open(real_dst).resize((224, 224))
img = np.transpose(img, (2, 0, 1))[np.newaxis, :]
return np.asarray(img)
\ No newline at end of file
import numpy as np
from coremltools.models.neural_network import NeuralNetworkBuilder
from coremltools.models import datatypes
import tvm
from tvm.contrib import graph_runtime
import topi
import topi.testing
from tvm import relay
from tvm.relay.testing.config import ctx_list
import coremltools as cm
import model_zoo
def get_tvm_output(func, x, params, target, ctx,
out_shape=(1, 1000), input_name='image', dtype='float32'):
with relay.build_module.build_config(opt_level=3):
graph, lib, params = relay.build(func, target, params=params)
m = graph_runtime.create(graph, lib, ctx)
# set inputs
m.set_input(input_name, tvm.nd.array(x.astype(dtype)))
m.set_input(**params)
m.run()
# get outputs
out = m.get_output(0, tvm.nd.empty(out_shape, dtype))
return out.asnumpy()
def run_model_checkonly(model_file, model_name='', input_name='image'):
model = cm.models.MLModel(model_file)
x = model_zoo.get_cat_image()
shape_dict = {input_name : x.shape}
func, params = relay.frontend.from_coreml(model, shape_dict)
for target, ctx in ctx_list():
tvm_output = get_tvm_output(func, x, params, target, ctx)
print(target, ctx, model_name, 'prediction id: ', np.argmax(tvm_output.flat))
def test_mobilenet_checkonly():
model_file = model_zoo.get_mobilenet()
run_model_checkonly(model_file, 'mobilenet')
def test_resnet50_checkonly():
model_file = model_zoo.get_resnet50()
run_model_checkonly(model_file, 'resnet50')
def run_tvm_graph(coreml_model, target, ctx, input_data, input_name, output_shape, output_dtype='float32'):
""" Generic function to compile on relay and execute on tvm """
if isinstance(input_data, list):
shape_dict = {}
dtype_dict = {}
for i, e in enumerate(input_name):
shape_dict[e] = input_data[i].shape
dtype_dict[e] = input_data[i].dtype
else:
shape_dict = {input_name: input_data.shape}
dtype_dict = {input_name: input_data.dtype}
func, params = relay.frontend.from_coreml(coreml_model, shape_dict)
with relay.build_module.build_config(opt_level=3):
graph, lib, params = relay.build(func, target, params=params)
from tvm.contrib import graph_runtime
m = graph_runtime.create(graph, lib, ctx)
# set inputs
if isinstance(input_data, list):
for i, e in enumerate(input_name):
m.set_input(e, tvm.nd.array(input_data[i].astype(input_data[i].dtype)))
else:
m.set_input(input_name, tvm.nd.array(input_data.astype(input_data.dtype)))
m.set_input(**params)
# execute
m.run()
# get outputs
if isinstance(output_shape, list) and isinstance(output_dtype, list):
tvm_output_list = []
for i, s in enumerate(output_shape):
tvm_output = m.get_output(i, tvm.nd.empty((s), output_dtype[i]))
tvm_output_list.append(tvm_output.asnumpy())
return tvm_output_list
else:
tvm_output = m.get_output(0, tvm.nd.empty((output_shape), output_dtype))
return tvm_output.asnumpy()
def verify_AddLayerParams(input_dim, alpha=2):
dtype = 'float32'
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
a_np2 = np.random.uniform(size=input_dim).astype(dtype)
b_np = np.add(a_np1, a_np2) + alpha
inputs = [('input1', datatypes.Array(*input_dim)),
('input2', datatypes.Array(*input_dim))]
output = [('output', datatypes.Array(*b_np.shape))]
builder = NeuralNetworkBuilder(inputs, output)
builder.add_elementwise(name='Add',
alpha=alpha,
input_names=['input1', 'input2'],
output_name='output',
mode='ADD')
model = cm.models.MLModel(builder.spec)
for target, ctx in ctx_list():
out = run_tvm_graph(model, target, ctx, [a_np1, a_np2], ['input1', 'input2'], b_np.shape, dtype)
tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
def test_forward_AddLayerParams():
verify_AddLayerParams((1, 2, 2), 0)
verify_AddLayerParams((1, 2, 2), 1)
verify_AddLayerParams((1, 3, 3), 2)
def verify_MultiplyLayerParams(input_dim, alpha):
dtype = 'float32'
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
a_np2 = np.random.uniform(size=input_dim).astype(dtype)
b_np = np.multiply(a_np1, a_np2) * alpha
inputs = [('input1', datatypes.Array(*input_dim)),
('input2', datatypes.Array(*input_dim))]
output = [('output', datatypes.Array(*b_np.shape))]
builder = NeuralNetworkBuilder(inputs, output)
builder.add_elementwise(name='Mul',
alpha=alpha,
input_names=['input1', 'input2'],
output_name='output',
mode='MULTIPLY')
model = cm.models.MLModel(builder.spec)
for target, ctx in ctx_list():
out = run_tvm_graph(model, target, ctx, [a_np1, a_np2], ['input1', 'input2'], b_np.shape, dtype)
tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
def test_forward_MultiplyLayerParams():
verify_MultiplyLayerParams((1, 2, 2), 0)
verify_MultiplyLayerParams((1, 2, 2), 1)
verify_MultiplyLayerParams((1, 3, 3), 2)
def verify_ConcatLayerParams(input1_dim, input2_dim):
dtype = 'float32'
a_np1 = np.random.uniform(size=input1_dim).astype(dtype)
a_np2 = np.random.uniform(size=input2_dim).astype(dtype)
b_np = np.concatenate((a_np1, a_np2), axis=1)
inputs = [('input1', datatypes.Array(*input1_dim)),
('input2', datatypes.Array(*input2_dim))]
output = [('output', datatypes.Array(*b_np.shape))]
builder = NeuralNetworkBuilder(inputs, output)
builder.add_elementwise(name='Concate',
input_names=['input1', 'input2'],
output_name='output',
mode='CONCAT')
model = cm.models.MLModel(builder.spec)
for target, ctx in ctx_list():
out = run_tvm_graph(model, target, ctx, [a_np1, a_np2], ['input1', 'input2'], b_np.shape, dtype)
tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
def test_forward_ConcatLayerParams():
verify_ConcatLayerParams((1, 1, 2, 2), (1, 2, 2, 2))
verify_ConcatLayerParams((1, 2, 4, 4), (1, 3, 4, 4))
def verify_UpsampleLayerParams(input_dim, scale, mode):
dtype = "float32"
a_np = np.full(input_dim, 1, dtype=dtype)
if mode == 'NN':
b_np = topi.testing.upsampling_python(a_np, scale)
else:
new_h = input_dim[2] * scale
new_w = input_dim[3] * scale
b_np = topi.testing.bilinear_resize_python(a_np, (new_h, new_w), 'NCHW')
input = [('input', datatypes.Array(*input_dim))]
output = [('output', datatypes.Array(*b_np.shape))]
builder = NeuralNetworkBuilder(input, output)
builder.add_upsample(name='Upsample',
scaling_factor_h=scale,
scaling_factor_w=scale,
mode=mode,
input_name='input',
output_name='output')
model = cm.models.MLModel(builder.spec)
for target, ctx in ctx_list():
out = run_tvm_graph(model, target, ctx, a_np, 'input', b_np.shape, dtype)
tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
def test_forward_UpsampleLayerParams():
verify_UpsampleLayerParams((1, 16, 32, 32), 2, 'NN')
verify_UpsampleLayerParams((1, 4, 6, 6), 3, 'BILINEAR')
def verify_l2_normalize(input_dim, eps):
dtype = "float32"
a_np = np.random.uniform(size=input_dim).astype(dtype)
b_np = topi.testing.l2_normalize_python(a_np, eps, 1)
input = [('input', datatypes.Array(*input_dim))]
output = [('output', datatypes.Array(*b_np.shape))]
builder = NeuralNetworkBuilder(input, output)
builder.add_l2_normalize(name='L2', epsilon=eps, input_name='input', output_name='output')
model = cm.models.MLModel(builder.spec)
for target, ctx in ctx_list():
out = run_tvm_graph(model, target, ctx, a_np, 'input', b_np.shape, dtype)
tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
def test_forward_l2_normalize():
verify_l2_normalize((1, 3, 20, 20), 0.001)
def verify_lrn(input_dim, size, bias, alpha, beta):
dtype = "float32"
axis=1
a_np = np.random.uniform(size=input_dim).astype(dtype)
b_np = topi.testing.lrn_python(a_np, size, axis, bias, alpha, beta)
input = [('input', datatypes.Array(*input_dim))]
output = [('output', datatypes.Array(*b_np.shape))]
builder = NeuralNetworkBuilder(input, output)
builder.add_lrn(name='LRN',
input_name='input',
output_name='output',
alpha=alpha,
beta=beta,
k=bias,
local_size=size)
model = cm.models.MLModel(builder.spec)
for target, ctx in ctx_list():
out = run_tvm_graph(model, target, ctx, a_np, 'input', b_np.shape, dtype)
tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
def test_forward_lrn():
verify_lrn((1, 3, 10, 20), 3, 1.0, 1.0, 0.5)
def verify_average(input_dim1, input_dim2, axis=0):
dtype = 'float32'
a_np1 = np.random.uniform(size=input_dim1).astype(dtype)
a_np2 = np.random.uniform(size=input_dim2).astype(dtype)
b_np = np.mean((a_np1, a_np2), axis=axis)
inputs = [('input1', datatypes.Array(*input_dim1)),
('input2', datatypes.Array(*input_dim2))]
output = [('output', datatypes.Array(*b_np.shape))]
builder = NeuralNetworkBuilder(inputs, output)
builder.add_elementwise(name='MEAN',
input_names=['input1', 'input2'],
output_name='output',
mode='AVE')
model = cm.models.MLModel(builder.spec)
for target, ctx in ctx_list():
out = run_tvm_graph(model, target, ctx, [a_np1, a_np2], ['input1', 'input2'], b_np.shape, dtype)
tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
def test_forward_average():
verify_average((1, 3, 20, 20), (1, 3, 20, 20))
verify_average((3, 20, 20), (1, 3, 20, 20))
verify_average((20, 20), (1, 3, 20, 20))
def verify_max(input_dim):
dtype = 'float32'
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
a_np2 = np.random.uniform(size=input_dim).astype(dtype)
a_np3 = np.random.uniform(size=input_dim).astype(dtype)
b_np = np.max((a_np1, a_np2, a_np3), axis=0)
inputs = [('input1', datatypes.Array(*input_dim)),
('input2', datatypes.Array(*input_dim)),
('input3', datatypes.Array(*input_dim))]
output = [('output', datatypes.Array(*b_np.shape))]
builder = NeuralNetworkBuilder(inputs, output)
builder.add_elementwise(name='Max',
input_names=['input1', 'input2', 'input3'],
output_name='output',
mode='MAX')
model = cm.models.MLModel(builder.spec)
for target, ctx in ctx_list():
out = run_tvm_graph(model, target, ctx, [a_np1, a_np2, a_np3],
['input1', 'input2', 'input3'], b_np.shape, dtype)
tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
def test_forward_max():
verify_max((1, 3, 20, 20))
verify_max((20, 20))
def verify_min(input_dim):
dtype = 'float32'
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
a_np2 = np.random.uniform(size=input_dim).astype(dtype)
a_np3 = np.random.uniform(size=input_dim).astype(dtype)
b_np = np.min((a_np1, a_np2, a_np3), axis=0)
inputs = [('input1', datatypes.Array(*input_dim)),
('input2', datatypes.Array(*input_dim)),
('input3', datatypes.Array(*input_dim))]
output = [('output', datatypes.Array(*b_np.shape))]
builder = NeuralNetworkBuilder(inputs, output)
builder.add_elementwise(name='Min',
input_names=['input1', 'input2', 'input3'],
output_name='output',
mode='MIN')
model = cm.models.MLModel(builder.spec)
for target, ctx in ctx_list():
out = run_tvm_graph(model, target, ctx, [a_np1, a_np2, a_np3],
['input1', 'input2', 'input3'], b_np.shape, dtype)
tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
def test_forward_min():
verify_min((1, 3, 20, 20))
verify_min((20, 20))
if __name__ == '__main__':
test_forward_AddLayerParams()
test_forward_ConcatLayerParams()
test_forward_MultiplyLayerParams()
test_forward_UpsampleLayerParams()
test_forward_l2_normalize()
test_forward_lrn()
test_forward_average()
test_forward_max()
test_forward_min()
test_mobilenet_checkonly()
test_resnet50_checkonly()
......@@ -27,6 +27,9 @@ python3 -m nose -v nnvm/tests/python/frontend/keras || exit -1
echo "Running nnvm Tensorflow frontend test..."
python3 -m nose -v nnvm/tests/python/frontend/tensorflow || exit -1
echo "Running nnvm CoreML frontend test..."
python3 -m nose -v nnvm/tests/python/frontend/coreml || exit -1
echo "Running relay MXNet frontend test..."
python3 -m nose -v tests/python/frontend/mxnet || exit -1
......@@ -36,6 +39,9 @@ python3 -m nose -v tests/python/frontend/keras || exit -1
echo "Running relay ONNX frondend test..."
python3 -m nose -v tests/python/frontend/onnx || exit -1
echo "Running relay CoreML frondend test..."
python3 -m nose -v tests/python/frontend/coreml || exit -1
echo "Running nnvm to relay frontend test..."
python3 -m nose -v tests/python/frontend/nnvm_to_relay || exit -1
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment