Commit 49fb6e85 by Lianmin Zheng Committed by Tianqi Chen

[NNVM][KERAS] Fix keras model converter and improve tutorial (#1716)

parent ef39eac3
...@@ -180,6 +180,9 @@ def _convert_convolution(insym, keras_layer, symtab): ...@@ -180,6 +180,9 @@ def _convert_convolution(insym, keras_layer, symtab):
in_w = keras_layer.input_shape[2] in_w = keras_layer.input_shape[2]
pad_t, pad_b = _get_pad_pair(in_h, kernel_h, stride_h) pad_t, pad_b = _get_pad_pair(in_h, kernel_h, stride_h)
pad_l, pad_r = _get_pad_pair(in_w, kernel_w, stride_w) pad_l, pad_r = _get_pad_pair(in_w, kernel_w, stride_w)
if pad_t == pad_b and pad_l == pad_r:
params['padding'] = (pad_t, pad_l)
else:
insym = _sym.pad(data=insym, pad_width=((0, 0), (0, 0), (pad_t, pad_b), (pad_l, pad_r))) insym = _sym.pad(data=insym, pad_width=((0, 0), (0, 0), (pad_t, pad_b), (pad_l, pad_r)))
else: else:
raise TypeError("Unsupported padding type : {}".format(keras_layer.padding)) raise TypeError("Unsupported padding type : {}".format(keras_layer.padding))
......
...@@ -169,14 +169,16 @@ task = autotvm.task.create(conv2d_no_batching, ...@@ -169,14 +169,16 @@ task = autotvm.task.create(conv2d_no_batching,
target='cuda') target='cuda')
print(task.config_space) print(task.config_space)
# use local gpu, measure 10 times for every config to reduce variance # Use local gpu, measure 10 times for every config to reduce variance
# The timeout of compiling a program is 10 seconds, the timeout for running is 4 seconds # The timeout of compiling a program is 10 seconds, the timeout for running is 4 seconds
measure_option = autotvm.measure_option( measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(), builder=autotvm.LocalBuilder(),
runner=autotvm.LocalRunner(repeat=3, min_repeat_ms=100, timeout=4) runner=autotvm.LocalRunner(repeat=3, min_repeat_ms=100, timeout=4)
) )
# begin tuning, log records to file `conv2d.log` # Begin tuning, log records to file `conv2d.log`
# During tuning we will also try many invalid configs, so you are expected to
# see many error reports. As long as you can see non-zero GFLOPS, it is okay.
tuner = autotvm.tuner.XGBTuner(task) tuner = autotvm.tuner.XGBTuner(task)
tuner.tune(n_trial=20, tuner.tune(n_trial=20,
measure_option=measure_option, measure_option=measure_option,
......
...@@ -94,8 +94,7 @@ m.set_input(**params) ...@@ -94,8 +94,7 @@ m.set_input(**params)
# execute # execute
m.run() m.run()
# get outputs # get outputs
_, oshape = compiler.graph_util.infer_shape(graph, shape={"data": dshape}) tvm_output = m.get_output(0)
tvm_output = m.get_output(0, tvm.nd.empty(tuple(oshape[0]), dtype))
###################################################################### ######################################################################
......
...@@ -8,9 +8,11 @@ This article is an introductory tutorial to deploy CoreML models with NNVM. ...@@ -8,9 +8,11 @@ This article is an introductory tutorial to deploy CoreML models with NNVM.
For us to begin with, coremltools module is required to be installed. For us to begin with, coremltools module is required to be installed.
A quick solution is to install via pip A quick solution is to install via pip
```bash
pip install -U coremltools --user .. code-block:: bash
```
pip install -U coremltools --user
or please refer to official site or please refer to official site
https://github.com/apple/coremltools https://github.com/apple/coremltools
""" """
...@@ -65,7 +67,8 @@ x = image[np.newaxis, :] ...@@ -65,7 +67,8 @@ x = image[np.newaxis, :]
import nnvm.compiler import nnvm.compiler
target = 'cuda' target = 'cuda'
shape_dict = {'image': x.shape} shape_dict = {'image': x.shape}
graph, lib, params = nnvm.compiler.build(sym, target, shape_dict, params=params) with nnvm.compiler.build_config(opt_level=2, add_pass=['AlterOpLayout']):
graph, lib, params = nnvm.compiler.build(sym, target, shape_dict, params=params)
###################################################################### ######################################################################
# Execute on TVM # Execute on TVM
...@@ -81,14 +84,13 @@ m.set_input(**params) ...@@ -81,14 +84,13 @@ m.set_input(**params)
# execute # execute
m.run() m.run()
# get outputs # get outputs
output_shape = (1000,) tvm_output = m.get_output(0)
tvm_output = m.get_output(0, tvm.nd.empty(output_shape, dtype)).asnumpy() top1 = np.argmax(tvm_output.asnumpy()[0])
top1 = np.argmax(tvm_output)
##################################################################### #####################################################################
# Look up synset name # Look up synset name
# ------------------- # -------------------
# Look up prdiction top 1 index in 1000 class synset. # Look up prediction top 1 index in 1000 class synset.
synset_url = ''.join(['https://gist.githubusercontent.com/zhreshold/', synset_url = ''.join(['https://gist.githubusercontent.com/zhreshold/',
'4d0b62f3d01426887599d4f7ede23ee5/raw/', '4d0b62f3d01426887599d4f7ede23ee5/raw/',
'596b27d23537e5a1b5751d2b0481ef172f58b539/', '596b27d23537e5a1b5751d2b0481ef172f58b539/',
......
...@@ -21,14 +21,13 @@ import nnvm.testing.darknet ...@@ -21,14 +21,13 @@ import nnvm.testing.darknet
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import numpy as np import numpy as np
import tvm import tvm
import os
import sys import sys
from ctypes import * from ctypes import *
from tvm.contrib.download import download from tvm.contrib.download import download
from nnvm.testing.darknet import __darknetffi__ from nnvm.testing.darknet import __darknetffi__
#Model name # Model name
MODEL_NAME = 'yolo' MODEL_NAME = 'yolo'
###################################################################### ######################################################################
...@@ -79,30 +78,13 @@ print("Compiling the model...") ...@@ -79,30 +78,13 @@ print("Compiling the model...")
with nnvm.compiler.build_config(opt_level=2): with nnvm.compiler.build_config(opt_level=2):
graph, lib, params = nnvm.compiler.build(sym, target, shape, dtype, params) graph, lib, params = nnvm.compiler.build(sym, target, shape, dtype, params)
#####################################################################
# Save the JSON
# -------------
def save_lib():
#Save the graph, params and .so to the current directory
print("Saving the compiled output...")
path_name = 'nnvm_darknet_' + model_name
path_lib = path_name + '_deploy_lib.so'
lib.export_library(path_lib)
with open(path_name
+ "deploy_graph.json", "w") as fo:
fo.write(graph.json())
with open(path_name
+ "deploy_param.params", "wb") as fo:
fo.write(nnvm.compiler.save_param_dict(params))
#save_lib()
###################################################################### ######################################################################
# Load a test image # Load a test image
# -------------------------------------------------------------------- # --------------------------------------------------------------------
test_image = 'dog.jpg' test_image = 'dog.jpg'
print("Loading the test image...") print("Loading the test image...")
img_url = 'https://github.com/siju-samuel/darknet/blob/master/data/' + \ img_url = 'https://github.com/siju-samuel/darknet/blob/master/data/' + \
test_image +'?raw=true' test_image + '?raw=true'
download(img_url, test_image) download(img_url, test_image)
data = nnvm.testing.darknet.load_image(test_image, net.w, net.h) data = nnvm.testing.darknet.load_image(test_image, net.w, net.h)
...@@ -124,9 +106,9 @@ print("Running the test image...") ...@@ -124,9 +106,9 @@ print("Running the test image...")
m.run() m.run()
# get outputs # get outputs
out_shape = (net.outputs,) out_shape = (net.outputs,)
tvm_out = m.get_output(0, tvm.nd.empty(out_shape, dtype)).asnumpy() tvm_out = m.get_output(0).asnumpy().flatten()
#do the detection and bring up the bounding boxes # do the detection and bring up the bounding boxes
thresh = 0.24 thresh = 0.24
hier_thresh = 0.5 hier_thresh = 0.5
img = nnvm.testing.darknet.load_image_color(test_image) img = nnvm.testing.darknet.load_image_color(test_image)
...@@ -134,16 +116,18 @@ _, im_h, im_w = img.shape ...@@ -134,16 +116,18 @@ _, im_h, im_w = img.shape
probs = [] probs = []
boxes = [] boxes = []
region_layer = net.layers[net.n - 1] region_layer = net.layers[net.n - 1]
boxes, probs = nnvm.testing.yolo2_detection.get_region_boxes(region_layer, im_w, im_h, net.w, net.h, boxes, probs = nnvm.testing.yolo2_detection.get_region_boxes(
region_layer, im_w, im_h, net.w, net.h,
thresh, probs, boxes, 1, tvm_out) thresh, probs, boxes, 1, tvm_out)
boxes, probs = nnvm.testing.yolo2_detection.do_nms_sort(boxes, probs, boxes, probs = nnvm.testing.yolo2_detection.do_nms_sort(
boxes, probs,
region_layer.w*region_layer.h*region_layer.n, region_layer.classes, 0.3) region_layer.w*region_layer.h*region_layer.n, region_layer.classes, 0.3)
coco_name = 'coco.names' coco_name = 'coco.names'
coco_url = 'https://github.com/siju-samuel/darknet/blob/master/data/' + coco_name +'?raw=true' coco_url = 'https://github.com/siju-samuel/darknet/blob/master/data/' + coco_name + '?raw=true'
font_name = 'arial.ttf' font_name = 'arial.ttf'
font_url = 'https://github.com/siju-samuel/darknet/blob/master/data/' + font_name +'?raw=true' font_url = 'https://github.com/siju-samuel/darknet/blob/master/data/' + font_name + '?raw=true'
download(coco_url, coco_name) download(coco_url, coco_name)
download(font_url, font_name) download(font_url, font_name)
...@@ -152,7 +136,8 @@ with open(coco_name) as f: ...@@ -152,7 +136,8 @@ with open(coco_name) as f:
names = [x.strip() for x in content] names = [x.strip() for x in content]
nnvm.testing.yolo2_detection.draw_detections(img, region_layer.w*region_layer.h*region_layer.n, nnvm.testing.yolo2_detection.draw_detections(
img, region_layer.w*region_layer.h*region_layer.n,
thresh, boxes, probs, names, region_layer.classes) thresh, boxes, probs, names, region_layer.classes)
plt.imshow(img.transpose(1, 2, 0)) plt.imshow(img.transpose(1, 2, 0))
plt.show() plt.show()
...@@ -9,12 +9,12 @@ For us to begin with, keras should be installed. ...@@ -9,12 +9,12 @@ For us to begin with, keras should be installed.
Tensorflow is also required since it's used as the default backend of keras. Tensorflow is also required since it's used as the default backend of keras.
A quick solution is to install via pip A quick solution is to install via pip
```
pip install -U keras --user .. code-block:: bash
```
``` pip install -U keras --user
pip install -U tensorflow --user pip install -U tensorflow --user
```
or please refer to official site or please refer to official site
https://keras.io/#installation https://keras.io/#installation
""" """
...@@ -45,7 +45,7 @@ weights_url = ''.join(['https://github.com/fchollet/deep-learning-models/release ...@@ -45,7 +45,7 @@ weights_url = ''.join(['https://github.com/fchollet/deep-learning-models/release
weights_file = 'resnet50_weights.h5' weights_file = 'resnet50_weights.h5'
download(weights_url, weights_file) download(weights_url, weights_file)
keras_resnet50 = keras.applications.resnet50.ResNet50(include_top=True, weights=None, keras_resnet50 = keras.applications.resnet50.ResNet50(include_top=True, weights=None,
input_shape=(224,224,3), classes=1000) input_shape=(224, 224, 3), classes=1000)
keras_resnet50.load_weights('resnet50_weights.h5') keras_resnet50.load_weights('resnet50_weights.h5')
###################################################################### ######################################################################
...@@ -75,7 +75,7 @@ sym, params = nnvm.frontend.from_keras(keras_resnet50) ...@@ -75,7 +75,7 @@ sym, params = nnvm.frontend.from_keras(keras_resnet50)
# compile the model # compile the model
target = 'cuda' target = 'cuda'
shape_dict = {'input_1': data.shape} shape_dict = {'input_1': data.shape}
with nnvm.compiler.build_config(opt_level=2): with nnvm.compiler.build_config(opt_level=3):
graph, lib, params = nnvm.compiler.build(sym, target, shape_dict, params=params) graph, lib, params = nnvm.compiler.build(sym, target, shape_dict, params=params)
###################################################################### ######################################################################
...@@ -91,14 +91,13 @@ m.set_input(**params) ...@@ -91,14 +91,13 @@ m.set_input(**params)
# execute # execute
m.run() m.run()
# get outputs # get outputs
out_shape = (1000,) tvm_out = m.get_output(0)
tvm_out = m.get_output(0, tvm.nd.empty(out_shape, 'float32')).asnumpy() top1_tvm = np.argmax(tvm_out.asnumpy()[0])
top1_tvm = np.argmax(tvm_out)
##################################################################### #####################################################################
# Look up synset name # Look up synset name
# ------------------- # -------------------
# Look up prdiction top 1 index in 1000 class synset. # Look up prediction top 1 index in 1000 class synset.
synset_url = ''.join(['https://gist.githubusercontent.com/zhreshold/', synset_url = ''.join(['https://gist.githubusercontent.com/zhreshold/',
'4d0b62f3d01426887599d4f7ede23ee5/raw/', '4d0b62f3d01426887599d4f7ede23ee5/raw/',
'596b27d23537e5a1b5751d2b0481ef172f58b539/', '596b27d23537e5a1b5751d2b0481ef172f58b539/',
......
...@@ -10,9 +10,11 @@ This article is an introductory tutorial to deploy mxnet models with NNVM. ...@@ -10,9 +10,11 @@ This article is an introductory tutorial to deploy mxnet models with NNVM.
For us to begin with, mxnet module is required to be installed. For us to begin with, mxnet module is required to be installed.
A quick solution is A quick solution is
```
pip install mxnet --user .. code-block:: bash
```
pip install mxnet --user
or please refer to offical installation guide. or please refer to offical installation guide.
https://mxnet.incubator.apache.org/versions/master/install/index.html https://mxnet.incubator.apache.org/versions/master/install/index.html
""" """
...@@ -70,7 +72,8 @@ sym = nnvm.sym.softmax(sym) ...@@ -70,7 +72,8 @@ sym = nnvm.sym.softmax(sym)
import nnvm.compiler import nnvm.compiler
target = 'cuda' target = 'cuda'
shape_dict = {'data': x.shape} shape_dict = {'data': x.shape}
graph, lib, params = nnvm.compiler.build(sym, target, shape_dict, params=params) with nnvm.compiler.build_config(opt_level=3):
graph, lib, params = nnvm.compiler.build(sym, target, shape_dict, params=params)
###################################################################### ######################################################################
# Execute the portable graph on TVM # Execute the portable graph on TVM
...@@ -86,8 +89,8 @@ m.set_input(**params) ...@@ -86,8 +89,8 @@ m.set_input(**params)
# execute # execute
m.run() m.run()
# get outputs # get outputs
tvm_output = m.get_output(0, tvm.nd.empty((1000,), dtype)) tvm_output = m.get_output(0)
top1 = np.argmax(tvm_output.asnumpy()) top1 = np.argmax(tvm_output.asnumpy()[0])
print('TVM prediction top-1:', top1, synset[top1]) print('TVM prediction top-1:', top1, synset[top1])
###################################################################### ######################################################################
......
...@@ -8,9 +8,11 @@ This article is an introductory tutorial to deploy ONNX models with NNVM. ...@@ -8,9 +8,11 @@ This article is an introductory tutorial to deploy ONNX models with NNVM.
For us to begin with, onnx module is required to be installed. For us to begin with, onnx module is required to be installed.
A quick solution is to install protobuf compiler, and A quick solution is to install protobuf compiler, and
```bash
pip install onnx --user .. code-block:: bash
```
pip install onnx --user
or please refer to offical site. or please refer to offical site.
https://github.com/onnx/onnx https://github.com/onnx/onnx
""" """
...@@ -69,7 +71,8 @@ target = 'cuda' ...@@ -69,7 +71,8 @@ target = 'cuda'
# assume first input name is data # assume first input name is data
input_name = sym.list_input_names()[0] input_name = sym.list_input_names()[0]
shape_dict = {input_name: x.shape} shape_dict = {input_name: x.shape}
graph, lib, params = nnvm.compiler.build(sym, target, shape_dict, params=params) with nnvm.compiler.build_config(opt_level=3):
graph, lib, params = nnvm.compiler.build(sym, target, shape_dict, params=params)
###################################################################### ######################################################################
# Execute on TVM # Execute on TVM
......
...@@ -5,9 +5,7 @@ This article is an introductory tutorial to deploy tensorflow models with TVM. ...@@ -5,9 +5,7 @@ This article is an introductory tutorial to deploy tensorflow models with TVM.
For us to begin with, tensorflow python module is required to be installed. For us to begin with, tensorflow python module is required to be installed.
A quick solution is to install tensorflow from Please refer to https://www.tensorflow.org/install
https://www.tensorflow.org/install
""" """
# tvm and nnvm # tvm and nnvm
......
...@@ -49,8 +49,8 @@ image_shape = (3, 224, 224) ...@@ -49,8 +49,8 @@ image_shape = (3, 224, 224)
data_shape = (batch_size,) + image_shape data_shape = (batch_size,) + image_shape
out_shape = (batch_size, num_class) out_shape = (batch_size, num_class)
net, params = nnvm.testing.resnet.get_workload(layers=18, net, params = nnvm.testing.resnet.get_workload(
batch_size=batch_size, image_shape=image_shape) layers=18, batch_size=batch_size, image_shape=image_shape)
print(net.debug_str()) print(net.debug_str())
###################################################################### ######################################################################
...@@ -117,7 +117,7 @@ print(out.asnumpy().flatten()[0:10]) ...@@ -117,7 +117,7 @@ print(out.asnumpy().flatten()[0:10])
from tvm.contrib import util from tvm.contrib import util
temp = util.tempdir() temp = util.tempdir()
path_lib = temp.relpath("deploy_lib.so") path_lib = temp.relpath("deploy_lib.tar")
lib.export_library(path_lib) lib.export_library(path_lib)
with open(temp.relpath("deploy_graph.json"), "w") as fo: with open(temp.relpath("deploy_graph.json"), "w") as fo:
fo.write(graph.json()) fo.write(graph.json())
...@@ -136,6 +136,4 @@ input_data = tvm.nd.array(np.random.uniform(size=data_shape).astype("float32")) ...@@ -136,6 +136,4 @@ input_data = tvm.nd.array(np.random.uniform(size=data_shape).astype("float32"))
module = graph_runtime.create(loaded_json, loaded_lib, tvm.gpu(0)) module = graph_runtime.create(loaded_json, loaded_lib, tvm.gpu(0))
module.load_params(loaded_params) module.load_params(loaded_params)
module.run(data=input_data) module.run(data=input_data)
out = module.get_output(0).asnumpy()
out = module.get_output(0, out=tvm.nd.empty(out_shape))
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment