Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
T
tic
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
wenyuanbo
tic
Commits
8fd84719
Unverified
Commit
8fd84719
authored
Mar 12, 2019
by
Tianqi Chen
Committed by
GitHub
Mar 12, 2019
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[DOCS] Phase out nnvm tutorials (#2783)
parent
5847ed3a
Show whitespace changes
Inline
Side-by-side
Showing
24 changed files
with
8 additions
and
150 deletions
+8
-150
docs/conf.py
+1
-3
nnvm/tutorials/.gitignore
+0
-0
nnvm/tutorials/README.txt
+0
-0
nnvm/tutorials/deploy_model_on_mali_gpu.py
+0
-0
nnvm/tutorials/deploy_model_on_rasp.py
+0
-0
nnvm/tutorials/deploy_ssd_mxnet.py
+0
-0
nnvm/tutorials/from_coreml.py
+0
-0
nnvm/tutorials/from_darknet.py
+0
-0
nnvm/tutorials/from_mxnet.py
+0
-0
nnvm/tutorials/from_mxnet_to_webgl.py
+0
-0
nnvm/tutorials/from_onnx.py
+0
-0
nnvm/tutorials/from_tensorflow.py
+0
-0
nnvm/tutorials/get_started.py
+0
-0
nnvm/tutorials/nlp/from_darknet_rnn.py
+0
-0
nnvm/tutorials/nlp/keras_s2s_translate.py
+0
-0
nnvm/tutorials/tune_nnvm_arm.py
+0
-0
nnvm/tutorials/tune_nnvm_cuda.py
+0
-0
nnvm/tutorials/tune_nnvm_mobile_gpu.py
+0
-0
nnvm/tutorials/tune_nnvm_x86.py
+0
-0
nnvm/tutorials/using_external_lib.py
+0
-0
nnvm/tutorials/web/resnet.html
+0
-0
tutorials/frontend/deploy_ssd_gluoncv.py
+1
-3
tutorials/nnvm_quick_start.py
+0
-139
tutorials/tensor_expr_get_started.py
+6
-5
No files found.
docs/conf.py
View file @
8fd84719
...
...
@@ -194,10 +194,8 @@ subsection_order = ExplicitOrder(
'../tutorials/optimize'
,
'../tutorials/autotvm'
,
'../tutorials/dev'
,
'../tutorials/vta'
,
'../tutorials/topi'
,
'../tutorials/deployment'
,
'../tutorials/nnvm'
])
'../tutorials/deployment'
])
def
generate_doxygen_xml
(
app
):
"""Run the doxygen make commands if we're on the ReadTheDocs server"""
...
...
tutorials/nnvm
/.gitignore
→
nnvm/tutorials
/.gitignore
View file @
8fd84719
File moved
tutorials/nnvm
/README.txt
→
nnvm/tutorials
/README.txt
View file @
8fd84719
File moved
tutorials/nnvm
/deploy_model_on_mali_gpu.py
→
nnvm/tutorials
/deploy_model_on_mali_gpu.py
View file @
8fd84719
File moved
tutorials/nnvm
/deploy_model_on_rasp.py
→
nnvm/tutorials
/deploy_model_on_rasp.py
View file @
8fd84719
File moved
tutorials/nnvm
/deploy_ssd_mxnet.py
→
nnvm/tutorials
/deploy_ssd_mxnet.py
View file @
8fd84719
File moved
tutorials/nnvm
/from_coreml.py
→
nnvm/tutorials
/from_coreml.py
View file @
8fd84719
File moved
tutorials/nnvm
/from_darknet.py
→
nnvm/tutorials
/from_darknet.py
View file @
8fd84719
File moved
tutorials/nnvm
/from_mxnet.py
→
nnvm/tutorials
/from_mxnet.py
View file @
8fd84719
File moved
tutorials/nnvm
/from_mxnet_to_webgl.py
→
nnvm/tutorials
/from_mxnet_to_webgl.py
View file @
8fd84719
File moved
tutorials/nnvm
/from_onnx.py
→
nnvm/tutorials
/from_onnx.py
View file @
8fd84719
File moved
tutorials/nnvm
/from_tensorflow.py
→
nnvm/tutorials
/from_tensorflow.py
View file @
8fd84719
File moved
tutorials/nnvm
/get_started.py
→
nnvm/tutorials
/get_started.py
View file @
8fd84719
File moved
tutorials/nnvm
/nlp/from_darknet_rnn.py
→
nnvm/tutorials
/nlp/from_darknet_rnn.py
View file @
8fd84719
File moved
tutorials/nnvm
/nlp/keras_s2s_translate.py
→
nnvm/tutorials
/nlp/keras_s2s_translate.py
View file @
8fd84719
File moved
tutorials/nnvm
/tune_nnvm_arm.py
→
nnvm/tutorials
/tune_nnvm_arm.py
View file @
8fd84719
File moved
tutorials/nnvm
/tune_nnvm_cuda.py
→
nnvm/tutorials
/tune_nnvm_cuda.py
View file @
8fd84719
File moved
tutorials/nnvm
/tune_nnvm_mobile_gpu.py
→
nnvm/tutorials
/tune_nnvm_mobile_gpu.py
View file @
8fd84719
File moved
tutorials/nnvm
/tune_nnvm_x86.py
→
nnvm/tutorials
/tune_nnvm_x86.py
View file @
8fd84719
File moved
tutorials/nnvm
/using_external_lib.py
→
nnvm/tutorials
/using_external_lib.py
View file @
8fd84719
File moved
tutorials/nnvm
/web/resnet.html
→
nnvm/tutorials
/web/resnet.html
View file @
8fd84719
File moved
tutorials/frontend/deploy_ssd_gluoncv.py
View file @
8fd84719
...
...
@@ -9,9 +9,7 @@ We will use GluonCV pre-trained SSD model and convert it to Relay IR
import
tvm
from
matplotlib
import
pyplot
as
plt
from
nnvm
import
compiler
from
nnvm.frontend
import
from_mxnet
from
nnvm.testing.config
import
ctx_list
from
tvm.relay.testing.config
import
ctx_list
from
tvm
import
relay
from
tvm.contrib
import
graph_runtime
from
gluoncv
import
model_zoo
,
data
,
utils
...
...
tutorials/nnvm_quick_start.py
deleted
100644 → 0
View file @
5847ed3a
"""
.. _tutorial-nnvm-quick-start:
Quick Start Tutorial for Compiling Deep Learning Models
=======================================================
**Author**: `Yao Wang <https://github.com/kevinthesun>`_
This example shows how to build a neural network with NNVM python frontend and
generate runtime library for Nvidia GPU with TVM.
Notice that you need to build TVM with cuda and llvm enabled.
"""
######################################################################
# Overview for Supported Hardware Backend of TVM
# ----------------------------------------------
# The image below shows hardware backend currently supported by TVM:
#
# .. image:: https://github.com/dmlc/web-data/raw/master/tvm/tutorial/tvm_support_list.png
# :align: center
# :scale: 100%
#
# In this tutorial, we'll choose cuda and llvm as target backends.
# To begin with, let's import NNVM and TVM.
import
numpy
as
np
import
nnvm.compiler
import
nnvm.testing
import
tvm
from
tvm.contrib
import
graph_runtime
######################################################################
# Define Neural Network in NNVM
# -----------------------------
# First, let's define a neural network with nnvm python frontend.
# For simplicity, we'll use pre-defined resnet-18 network in NNVM.
# Parameters are initialized with Xavier initializer.
# NNVM also supports other model formats such as MXNet, CoreML, ONNX and
# Tensorflow.
#
# In this tutorial, we assume we will do inference on our device
# and the batch size is set to be 1. Input images are RGB color
# images of size 224 * 224. We can call the :any:`nnvm.symbol.debug_str`
# to show the network structure.
batch_size
=
1
num_class
=
1000
image_shape
=
(
3
,
224
,
224
)
data_shape
=
(
batch_size
,)
+
image_shape
out_shape
=
(
batch_size
,
num_class
)
net
,
params
=
nnvm
.
testing
.
resnet
.
get_workload
(
num_layers
=
18
,
batch_size
=
batch_size
,
image_shape
=
image_shape
)
print
(
net
.
debug_str
())
######################################################################
# Compilation
# -----------
# Next step is to compile the model using the NNVM/TVM pipeline.
# Users can specify the optimization level of the compilation.
# Currently this value can be 0 to 3. The optimization passes include
# operator fusion, pre-computation, layout transformation and so on.
#
# :any:`nnvm.compiler.build` returns three components: the execution graph in
# json format, the TVM module library of compiled functions specifically
# for this graph on the target hardware, and the parameter blobs of
# the model. During the compilation, NNVM does the graph-level
# optimization while TVM does the tensor-level optimization, resulting
# in an optimized runtime module for model serving.
#
# We'll first compile for Nvidia GPU. Behind the scene, `nnvm.compiler.build`
# first does a number of graph-level optimizations, e.g. pruning, fusing, etc.,
# then registers the operators (i.e. the nodes of the optimized graphs) to
# TVM implementations to generate a `tvm.module`.
# To generate the module library, TVM will first transfer the High level IR
# into the lower intrinsic IR of the specified target backend, which is CUDA
# in this example. Then the machine code will be generated as the module library.
opt_level
=
3
target
=
tvm
.
target
.
cuda
()
with
nnvm
.
compiler
.
build_config
(
opt_level
=
opt_level
):
graph
,
lib
,
params
=
nnvm
.
compiler
.
build
(
net
,
target
,
shape
=
{
"data"
:
data_shape
},
params
=
params
)
#####################################################################
# Run the generate library
# ------------------------
# Now we can create graph runtime and run the module on Nvidia GPU.
# create random input
ctx
=
tvm
.
gpu
()
data
=
np
.
random
.
uniform
(
-
1
,
1
,
size
=
data_shape
)
.
astype
(
"float32"
)
# create module
module
=
graph_runtime
.
create
(
graph
,
lib
,
ctx
)
# set input and parameters
module
.
set_input
(
"data"
,
data
)
module
.
set_input
(
**
params
)
# run
module
.
run
()
# get output
out
=
module
.
get_output
(
0
,
tvm
.
nd
.
empty
(
out_shape
))
# convert to numpy
out
.
asnumpy
()
# Print first 10 elements of output
print
(
out
.
asnumpy
()
.
flatten
()[
0
:
10
])
######################################################################
# Save and Load Compiled Module
# -----------------------------
# We can also save the graph, lib and parameters into files and load them
# back in deploy environment.
####################################################
# save the graph, lib and params into separate files
from
tvm.contrib
import
util
temp
=
util
.
tempdir
()
path_lib
=
temp
.
relpath
(
"deploy_lib.tar"
)
lib
.
export_library
(
path_lib
)
with
open
(
temp
.
relpath
(
"deploy_graph.json"
),
"w"
)
as
fo
:
fo
.
write
(
graph
.
json
())
with
open
(
temp
.
relpath
(
"deploy_param.params"
),
"wb"
)
as
fo
:
fo
.
write
(
nnvm
.
compiler
.
save_param_dict
(
params
))
print
(
temp
.
listdir
())
####################################################
# load the module back.
loaded_json
=
open
(
temp
.
relpath
(
"deploy_graph.json"
))
.
read
()
loaded_lib
=
tvm
.
module
.
load
(
path_lib
)
loaded_params
=
bytearray
(
open
(
temp
.
relpath
(
"deploy_param.params"
),
"rb"
)
.
read
())
input_data
=
tvm
.
nd
.
array
(
np
.
random
.
uniform
(
size
=
data_shape
)
.
astype
(
"float32"
))
module
=
graph_runtime
.
create
(
loaded_json
,
loaded_lib
,
ctx
)
module
.
load_params
(
loaded_params
)
module
.
run
(
data
=
input_data
)
out
=
module
.
get_output
(
0
)
.
asnumpy
()
tutorials/get_started.py
→
tutorials/
tensor_expr_
get_started.py
View file @
8fd84719
"""
Get Started with T
VM
====================
Get Started with T
ensor Expression
====================
==============
**Author**: `Tianqi Chen <https://tqchen.github.io>`_
This is an introduction tutorial to TVM.
TVM
is a domain specific language
for efficient kernel construction.
This is an introduction tutorial to T
ensor expression language in T
VM.
TVM
uses a domain specific tensor expression
for efficient kernel construction.
In this tutorial, we will demonstrate the basic workflow in TVM.
In this tutorial, we will demonstrate the basic workflow to use
the tensor expression language.
"""
from
__future__
import
absolute_import
,
print_function
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment