Commit e1827173 by Alexander Pivovarov Committed by Tianqi Chen

Add mod supoort in relay.build (#3424)

parent 18a53a41
......@@ -18,6 +18,7 @@
Construct the necessary state for the TVM graph runtime
from a Relay expression.
"""
import warnings
import numpy as np
from tvm import expr as tvm_expr
......@@ -27,6 +28,7 @@ from . import _build_module
from . import ir_pass
from . import ty as _ty
from . import expr as _expr
from .module import Module as _Module
from .backend import interpreter as _interpreter
from .backend.vm import VMExecutor
......@@ -137,14 +139,14 @@ class BuildModule(object):
return ret
def build(func, target=None, target_host=None, params=None):
def build(mod, target=None, target_host=None, params=None):
"""Helper function that builds a Relay function to run on TVM graph
runtime.
Parameters
----------
func: relay.Function
The function to build.
mod : relay.Module
The module to build. Using relay.Function is deprecated.
target : str, :any:`tvm.target.Target`, or dict of str(i.e. device/context
name) to str/tvm.target.Target, optional
......@@ -175,6 +177,17 @@ def build(func, target=None, target_host=None, params=None):
params : dict
The parameters of the final graph.
"""
if isinstance(mod, _Module):
func = mod[mod.entry_func]
elif isinstance(mod, _expr.Function):
func = mod
warnings.warn(
"Please use input parameter mod (tvm.relay.module.Module) "
"instead of deprecated parameter func (tvm.relay.expr.Function)",
DeprecationWarning)
else:
raise ValueError("Type of input parameter mod must be tvm.relay.module.Module")
target = _update_target(target)
if isinstance(target_host, (str, _target.Target)):
......@@ -192,8 +205,7 @@ def build(func, target=None, target_host=None, params=None):
with tophub_context:
bld_mod = BuildModule()
graph_json, mod, params = bld_mod.build(func, target, target_host,
params)
graph_json, mod, params = bld_mod.build(func, target, target_host, params)
return graph_json, mod, params
......
......@@ -43,7 +43,7 @@ def get_tvm_output(model,
mod, params = relay.frontend.from_caffe2(
model.init_net, model.predict_net, shape_dict, dtype_dict)
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(mod[mod.entry_func], target, params=params)
graph, lib, params = relay.build(mod, target, params=params)
m = graph_runtime.create(graph, lib, ctx)
......
......@@ -73,7 +73,7 @@ def run_tvm_graph(coreml_model, target, ctx, input_data, input_name, output_shap
mod, params = relay.frontend.from_coreml(coreml_model, shape_dict)
with relay.transform.build_config(opt_level=3):
graph, lib, params = relay.build(mod[mod.entry_func], target, params=params)
graph, lib, params = relay.build(mod, target, params=params)
from tvm.contrib import graph_runtime
m = graph_runtime.create(graph, lib, ctx)
......
......@@ -55,7 +55,7 @@ def _get_tvm_output(net, data, build_dtype='float32', states=None):
mod, params = relay.frontend.from_darknet(net, data.shape, dtype)
target = 'llvm'
shape_dict = {'data': data.shape}
graph, library, params = relay.build(mod[mod.entry_func],
graph, library, params = relay.build(mod,
target,
params=params)
......
......@@ -44,7 +44,7 @@ def verify_keras_frontend(keras_model, need_transpose=True):
shape_dict = {name: x.shape for (name, x) in zip(keras_model.input_names, xs)}
mod, params = relay.frontend.from_keras(keras_model, shape_dict)
with relay.transform.build_config(opt_level=2):
graph, lib, params = relay.build(mod[mod.entry_func],
graph, lib, params = relay.build(mod,
target,
params=params)
m = graph_runtime.create(graph, lib, ctx)
......
......@@ -66,7 +66,7 @@ def verify_mxnet_frontend_impl(mx_symbol,
arg_params=args,
aux_params=auxs)
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(mod[mod.entry_func], target, params=params)
graph, lib, params = relay.build(mod, target, params=params)
m = graph_runtime.create(graph, lib, ctx)
# set inputs
m.set_input("data", tvm.nd.array(x.astype(dtype)))
......
......@@ -47,7 +47,7 @@ def get_tvm_output(graph_def, input_data, target, ctx, output_shape=None, output
mod, params = relay.frontend.from_onnx(graph_def, shape_dict)
with relay.build_config(opt_level=1):
graph, lib, params = relay.build(mod[mod.entry_func],
graph, lib, params = relay.build(mod,
target,
params=params)
......
......@@ -64,7 +64,7 @@ def run_tvm_graph(graph_def, input_data, input_node, num_output=1,
shape=shape_dict,
outputs=out_names)
with relay.build_config(opt_level=opt_level):
graph, lib, params = relay.build(mod[mod.entry_func], target, target_host, params)
graph, lib, params = relay.build(mod, target, target_host, params)
ctx = tvm.context(target, 0)
from tvm.contrib import graph_runtime
......@@ -1487,7 +1487,7 @@ def test_forward_ptb():
'Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_h':'float32'}
target = 'llvm'
with relay.build_config(opt_level=0):
graph, lib, params = relay.build(mod[mod.entry_func],
graph, lib, params = relay.build(mod,
target,
params=params)
from tvm.contrib import graph_runtime
......
......@@ -71,9 +71,7 @@ def run_tvm_graph(tflite_model_buf, input_data, input_node, num_output=1, target
shape_dict=shape_dict,
dtype_dict=dtype_dict)
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(mod[mod.entry_func],
target,
params=params)
graph, lib, params = relay.build(mod, target, params=params)
ctx = tvm.context(target, 0)
from tvm.contrib import graph_runtime
......
......@@ -263,7 +263,7 @@ shape_dict = {input_name: x.shape}
mod, params = relay.frontend.from_keras(keras_mobilenet_v2, shape_dict)
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(mod[mod.entry_func], target=target,
graph, lib, params = relay.build(mod, target=target,
target_host=target_host, params=params)
# After `relay.build`, you will get three return values: graph,
......
......@@ -78,7 +78,7 @@ block = model_zoo.get_model(model_name, pretrained=True)
def build(target):
mod, params = relay.frontend.from_mxnet(block, {"data": dshape})
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(mod[mod.entry_func], target, params=params)
graph, lib, params = relay.build(mod, target, params=params)
return graph, lib, params
######################################################################
......
......@@ -89,7 +89,7 @@ mod, params = relay.frontend.from_caffe2(resnet50.init_net, resnet50.predict_net
# target x86 CPU
target = 'llvm'
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(mod[mod.entry_func], target, params=params)
graph, lib, params = relay.build(mod, target, params=params)
######################################################################
# Execute on TVM
......
......@@ -71,7 +71,7 @@ shape_dict = {'image': x.shape}
mod, params = relay.frontend.from_coreml(mlmodel, shape_dict)
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(mod[mod.entry_func],
graph, lib, params = relay.build(mod,
target,
params=params)
......
......@@ -95,7 +95,7 @@ data = np.empty([batch_size, net.c, net.h, net.w], dtype)
shape = {'data': data.shape}
print("Compiling the model...")
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(mod[mod.entry_func],
graph, lib, params = relay.build(mod,
target=target,
target_host=target_host,
params=params)
......
......@@ -140,7 +140,7 @@ print("Tensorflow protobuf imported to relay frontend.")
# lib: target library which can be deployed on target with TVM runtime.
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(mod[mod.entry_func],
graph, lib, params = relay.build(mod,
target=target,
target_host=target_host,
params=params)
......
......@@ -145,7 +145,7 @@ mod, params = relay.frontend.from_tflite(tflite_model,
# target x86 CPU
target = "llvm"
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(mod[mod.entry_func], target, params=params)
graph, lib, params = relay.build(mod, target, params=params)
######################################################################
# Execute on TVM
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment