Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
T
tic
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
wenyuanbo
tic
Commits
77869913
Commit
77869913
authored
Nov 21, 2018
by
Ashutosh Parkhi
Committed by
Tianqi Chen
Nov 21, 2018
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
tensorflow frontend supports user given outputs (#1913)
parent
2c231b5a
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
91 additions
and
46 deletions
+91
-46
nnvm/python/nnvm/frontend/tensorflow.py
+11
-6
nnvm/tests/python/frontend/tensorflow/test_forward.py
+80
-40
No files found.
nnvm/python/nnvm/frontend/tensorflow.py
View file @
77869913
...
@@ -1039,7 +1039,7 @@ class GraphProto(object):
...
@@ -1039,7 +1039,7 @@ class GraphProto(object):
self
.
_num_param
=
0
self
.
_num_param
=
0
self
.
_num_rnn_layer
=
False
self
.
_num_rnn_layer
=
False
def
from_tensorflow
(
self
,
graph
,
layout
=
"NHWC"
,
shape
=
None
):
def
from_tensorflow
(
self
,
graph
,
layout
=
"NHWC"
,
shape
=
None
,
outputs
=
None
):
"""Construct nnvm nodes from tensorflow graph definition - GraphDef.
"""Construct nnvm nodes from tensorflow graph definition - GraphDef.
Follow the tensorflow graph definition to parse and convert it to NNVM.
Follow the tensorflow graph definition to parse and convert it to NNVM.
...
@@ -1086,6 +1086,7 @@ class GraphProto(object):
...
@@ -1086,6 +1086,7 @@ class GraphProto(object):
raise
NotImplementedError
(
\
raise
NotImplementedError
(
\
"The following operators are not implemented: {}"
.
format
(
missing_operators
))
"The following operators are not implemented: {}"
.
format
(
missing_operators
))
final_op
=
None
# Parse the nodes to re-create TF graph using Symbol API of NNVM
# Parse the nodes to re-create TF graph using Symbol API of NNVM
for
node
in
graph
.
node
:
for
node
in
graph
.
node
:
# Tensorflow doesn't have seperate list for params extraction.
# Tensorflow doesn't have seperate list for params extraction.
...
@@ -1165,6 +1166,7 @@ class GraphProto(object):
...
@@ -1165,6 +1166,7 @@ class GraphProto(object):
# Assuming only one output.
# Assuming only one output.
self
.
_nodes
[
node
.
name
]
=
op
self
.
_nodes
[
node
.
name
]
=
op
final_op
=
op
# Infer shapes if passed explicitely
# Infer shapes if passed explicitely
node_output
=
self
.
_nodes
[
node
.
name
]
node_output
=
self
.
_nodes
[
node
.
name
]
...
@@ -1175,13 +1177,16 @@ class GraphProto(object):
...
@@ -1175,13 +1177,16 @@ class GraphProto(object):
_
,
out_shapes
=
graph_util
.
infer_shape
(
g
,
**
shape_dict
)
_
,
out_shapes
=
graph_util
.
infer_shape
(
g
,
**
shape_dict
)
self
.
_output_shapes
[
node
.
name
]
=
out_shapes
self
.
_output_shapes
[
node
.
name
]
=
out_shapes
# Assume the final node is the output node
out
=
[]
out
=
node_output
if
outputs
is
None
:
out
.
append
(
final_op
)
else
:
out
=
[
self
.
_nodes
[
out_name
]
for
out_name
in
outputs
]
#Add the RNN outputs also with 'head' nodes of the nnvm graph
#Add the RNN outputs also with 'head' nodes of the nnvm graph
if
self
.
_num_rnn_layer
:
if
self
.
_num_rnn_layer
:
out_rnn
=
_sym
.
concatenate
(
*
self
.
_out_rnn
,
axis
=
0
)
out_rnn
=
_sym
.
concatenate
(
*
self
.
_out_rnn
,
axis
=
0
)
out
=
[
out
,
out_rnn
]
out
.
append
(
out_rnn
)
if
isinstance
(
out
,
list
):
if
isinstance
(
out
,
list
):
out
=
_sym
.
Group
(
out
)
out
=
_sym
.
Group
(
out
)
...
@@ -1378,7 +1383,7 @@ class GraphProto(object):
...
@@ -1378,7 +1383,7 @@ class GraphProto(object):
return
inputs
return
inputs
def
from_tensorflow
(
graph
,
layout
=
"NHWC"
,
shape
=
None
):
def
from_tensorflow
(
graph
,
layout
=
"NHWC"
,
shape
=
None
,
outputs
=
None
):
""" Load tensorflow graph which is a python tensorflow graph object into nnvm graph.
""" Load tensorflow graph which is a python tensorflow graph object into nnvm graph.
The companion parameters will be handled automatically.
The companion parameters will be handled automatically.
...
@@ -1396,5 +1401,5 @@ def from_tensorflow(graph, layout="NHWC", shape=None):
...
@@ -1396,5 +1401,5 @@ def from_tensorflow(graph, layout="NHWC", shape=None):
Dict of converted parameters stored in tvm.ndarray format
Dict of converted parameters stored in tvm.ndarray format
"""
"""
g
=
GraphProto
()
g
=
GraphProto
()
sym
,
params
=
g
.
from_tensorflow
(
graph
,
layout
,
shape
)
sym
,
params
=
g
.
from_tensorflow
(
graph
,
layout
,
shape
,
outputs
)
return
sym
,
params
return
sym
,
params
nnvm/tests/python/frontend/tensorflow/test_forward.py
View file @
77869913
...
@@ -26,8 +26,15 @@ import nnvm.testing.tf
...
@@ -26,8 +26,15 @@ import nnvm.testing.tf
#######################################################################
#######################################################################
# Generic run functions for TVM & tensorflow
# Generic run functions for TVM & tensorflow
# ------------------------------------------
# ------------------------------------------
def
run_tvm_graph
(
graph_def
,
input_data
,
input_node
,
num_output
=
1
,
target
=
'llvm'
):
def
convert_to_list
(
x
):
if
not
isinstance
(
x
,
list
):
x
=
[
x
]
return
x
def
run_tvm_graph
(
graph_def
,
input_data
,
input_node
,
num_output
=
1
,
target
=
'llvm'
,
out_names
=
None
):
""" Generic function to compile on nnvm and execute on tvm """
""" Generic function to compile on nnvm and execute on tvm """
input_data
=
convert_to_list
(
input_data
)
input_node
=
convert_to_list
(
input_node
)
layout
=
None
layout
=
None
if
target
==
"cuda"
:
if
target
==
"cuda"
:
...
@@ -43,8 +50,8 @@ def run_tvm_graph(graph_def, input_data, input_node, num_output=1, target='llvm'
...
@@ -43,8 +50,8 @@ def run_tvm_graph(graph_def, input_data, input_node, num_output=1, target='llvm'
else
:
else
:
shape_dict
=
{
input_node
:
input_data
.
shape
}
shape_dict
=
{
input_node
:
input_data
.
shape
}
dtype_dict
=
{
input_node
:
input_data
.
dtype
}
dtype_dict
=
{
input_node
:
input_data
.
dtype
}
sym
,
params
=
nnvm
.
frontend
.
from_tensorflow
(
graph_def
,
layout
=
layout
,
shape
=
shape_dict
)
sym
,
params
=
nnvm
.
frontend
.
from_tensorflow
(
graph_def
,
layout
=
layout
,
shape
=
shape_dict
,
outputs
=
out_names
)
graph
,
lib
,
params
=
nnvm
.
compiler
.
build
(
sym
,
target
=
target
,
target_host
=
target_host
,
shape
=
shape_dict
,
graph
,
lib
,
params
=
nnvm
.
compiler
.
build
(
sym
,
target
=
target
,
target_host
=
target_host
,
shape
=
shape_dict
,
dtype
=
dtype_dict
,
params
=
params
)
dtype
=
dtype_dict
,
params
=
params
)
...
@@ -52,37 +59,34 @@ def run_tvm_graph(graph_def, input_data, input_node, num_output=1, target='llvm'
...
@@ -52,37 +59,34 @@ def run_tvm_graph(graph_def, input_data, input_node, num_output=1, target='llvm'
from
tvm.contrib
import
graph_runtime
from
tvm.contrib
import
graph_runtime
m
=
graph_runtime
.
create
(
graph
,
lib
,
ctx
)
m
=
graph_runtime
.
create
(
graph
,
lib
,
ctx
)
# set inputs
# set inputs
if
isinstance
(
input_data
,
list
):
for
i
,
e
in
enumerate
(
input_node
):
for
i
,
e
in
enumerate
(
input_node
):
m
.
set_input
(
e
,
tvm
.
nd
.
array
(
input_data
[
i
]
.
astype
(
input_data
[
i
]
.
dtype
)))
m
.
set_input
(
e
,
tvm
.
nd
.
array
(
input_data
[
i
]
.
astype
(
input_data
[
i
]
.
dtype
)))
else
:
m
.
set_input
(
input_node
,
tvm
.
nd
.
array
(
input_data
.
astype
(
input_data
.
dtype
)))
m
.
set_input
(
**
params
)
m
.
set_input
(
**
params
)
# execute
# execute
m
.
run
()
m
.
run
()
# get outputs
# get outputs
if
num_output
>
1
:
assert
out_names
is
None
or
num_output
==
len
(
out_names
),
"out_names: {} num_output: {}"
.
format
(
tvm_output_list
=
[]
out_names
,
num_output
)
for
i
in
range
(
0
,
num_output
):
tvm_output_list
=
[]
tvm_output
=
m
.
get_output
(
i
)
for
i
in
range
(
0
,
num_output
):
tvm_output_list
.
append
(
tvm_output
.
asnumpy
())
tvm_output
=
m
.
get_output
(
i
)
return
tvm_output_list
tvm_output_list
.
append
(
tvm_output
.
asnumpy
())
else
:
return
tvm_output_list
tvm_output
=
m
.
get_output
(
0
)
return
tvm_output
.
asnumpy
()
def
run_tf_graph
(
sess
,
input_data
,
input_node
,
output_node
):
def
run_tf_graph
(
sess
,
input_data
,
input_node
,
output_node
):
""" Generic function to execute tensorflow """
""" Generic function to execute tensorflow """
input_data
=
convert_to_list
(
input_data
)
input_node
=
convert_to_list
(
input_node
)
output_node
=
convert_to_list
(
output_node
)
tensor
=
sess
.
graph
.
get_tensor_by_name
(
output_node
)
tensor
=
[
0
]
*
len
(
output_node
)
for
i
in
range
(
len
(
output_node
)):
tensor
[
i
]
=
sess
.
graph
.
get_tensor_by_name
(
output_node
[
i
])
if
isinstance
(
input_data
,
list
):
input_dict
=
{}
input_dict
=
{}
for
i
,
e
in
enumerate
(
input_node
):
for
i
,
e
in
enumerate
(
input_node
):
input_dict
[
e
]
=
input_data
[
i
]
input_dict
[
e
]
=
input_data
[
i
]
else
:
input_dict
=
{
input_node
:
input_data
}
output_data
=
sess
.
run
(
tensor
,
input_dict
)
output_data
=
sess
.
run
(
tensor
,
input_dict
)
return
output_data
return
output_data
...
@@ -91,14 +95,16 @@ def run_tf_graph(sess, input_data, input_node, output_node):
...
@@ -91,14 +95,16 @@ def run_tf_graph(sess, input_data, input_node, output_node):
def
compare_tf_with_tvm
(
in_data
,
in_name
,
out_name
,
init_global_variables
=
False
,
no_gpu
=
False
):
def
compare_tf_with_tvm
(
in_data
,
in_name
,
out_name
,
init_global_variables
=
False
,
no_gpu
=
False
):
"""Generic function to generate and compare tensorflow and TVM output"""
"""Generic function to generate and compare tensorflow and TVM output"""
out_node
=
out_name
.
split
(
':'
)[
0
]
if
":"
in
out_name
else
out_name
out_name
=
convert_to_list
(
out_name
)
out_node
=
[
0
]
*
len
(
out_name
)
for
i
in
range
(
len
(
out_name
)):
out_node
[
i
]
=
out_name
[
i
]
.
split
(
':'
)[
0
]
if
":"
in
out_name
[
i
]
else
out_name
[
i
]
if
isinstance
(
in_name
,
list
):
in_data
=
convert_to_list
(
in_data
)
in_node
=
[
0
]
*
len
(
in_name
)
in_name
=
convert_to_list
(
in_name
)
for
i
in
range
(
len
(
in_name
)):
in_node
=
[
0
]
*
len
(
in_name
)
in_node
[
i
]
=
in_name
[
i
]
.
split
(
':'
)[
0
]
if
":"
in
in_name
[
i
]
else
in_name
[
i
]
for
i
in
range
(
len
(
in_name
)):
else
:
in_node
[
i
]
=
in_name
[
i
]
.
split
(
':'
)[
0
]
if
":"
in
in_name
[
i
]
else
in_name
[
i
]
in_node
=
in_name
.
split
(
':'
)[
0
]
if
":"
in
in_name
else
in_name
with
tf
.
Session
()
as
sess
:
with
tf
.
Session
()
as
sess
:
if
init_global_variables
:
if
init_global_variables
:
...
@@ -106,9 +112,8 @@ def compare_tf_with_tvm(in_data, in_name, out_name, init_global_variables=False,
...
@@ -106,9 +112,8 @@ def compare_tf_with_tvm(in_data, in_name, out_name, init_global_variables=False,
final_graph_def
=
tf
.
graph_util
.
convert_variables_to_constants
(
final_graph_def
=
tf
.
graph_util
.
convert_variables_to_constants
(
sess
,
sess
,
sess
.
graph
.
as_graph_def
(
add_shapes
=
True
),
sess
.
graph
.
as_graph_def
(
add_shapes
=
True
),
[
out_node
]
,
out_node
,
)
)
tf_output
=
run_tf_graph
(
sess
,
in_data
,
in_name
,
out_name
)
tf_output
=
run_tf_graph
(
sess
,
in_data
,
in_name
,
out_name
)
for
device
in
[
"llvm"
,
"cuda"
]:
for
device
in
[
"llvm"
,
"cuda"
]:
...
@@ -120,7 +125,10 @@ def compare_tf_with_tvm(in_data, in_name, out_name, init_global_variables=False,
...
@@ -120,7 +125,10 @@ def compare_tf_with_tvm(in_data, in_name, out_name, init_global_variables=False,
continue
continue
tvm_output
=
run_tvm_graph
(
final_graph_def
,
in_data
,
in_node
,
target
=
device
)
tvm_output
=
run_tvm_graph
(
final_graph_def
,
in_data
,
in_node
,
target
=
device
)
tvm
.
testing
.
assert_allclose
(
tf_output
,
tvm_output
,
atol
=
1e-5
,
rtol
=
1e-5
)
# since the names from tensorflow and nnvm runs are not exactly same,
# first len(tf_output) will be compared
for
i
in
range
(
len
(
tf_output
)):
tvm
.
testing
.
assert_allclose
(
tf_output
[
i
],
tvm_output
[
i
],
atol
=
1e-5
,
rtol
=
1e-5
)
sess
.
close
()
sess
.
close
()
...
@@ -260,6 +268,7 @@ def test_forward_reshape():
...
@@ -260,6 +268,7 @@ def test_forward_reshape():
_test_reshape
(
np
.
arange
(
6
),
[
-
1
])
_test_reshape
(
np
.
arange
(
6
),
[
-
1
])
#######################################################################
#######################################################################
#######################################################################
# Squeeze
# Squeeze
# -------
# -------
...
@@ -509,6 +518,35 @@ def test_forward_multi_input():
...
@@ -509,6 +518,35 @@ def test_forward_multi_input():
[
'in1:0'
,
'in2:0'
,
'in3:0'
,
'in4:0'
],
'out:0'
)
[
'in1:0'
,
'in2:0'
,
'in3:0'
,
'in4:0'
],
'out:0'
)
#######################################################################
#######################################################################
# Multi Output to Graph
# ---------------------
def
test_forward_multi_output
():
with
tf
.
Graph
()
.
as_default
():
in1
=
tf
.
placeholder
(
tf
.
int32
,
shape
=
[
3
,
3
],
name
=
'in1'
)
in2
=
tf
.
placeholder
(
tf
.
int32
,
shape
=
[
3
,
3
],
name
=
'in2'
)
in3
=
tf
.
placeholder
(
tf
.
int32
,
shape
=
[
3
,
3
],
name
=
'in3'
)
in4
=
tf
.
placeholder
(
tf
.
int32
,
shape
=
[
3
,
3
],
name
=
'in4'
)
out1
=
tf
.
add
(
in1
,
in2
,
name
=
'out1'
)
out2
=
tf
.
subtract
(
in3
,
in4
,
name
=
'out2'
)
in_data
=
np
.
arange
(
9
,
dtype
=
'int32'
)
.
reshape
([
3
,
3
])
in_data
=
[
in_data
]
*
4
in_name
=
[
'in1:0'
,
'in2:0'
,
'in3:0'
,
'in4:0'
]
out_name
=
[
'out1:0'
,
'out2:0'
]
out_node
=
[
out
.
strip
(
':0'
)
for
out
in
out_name
]
in_node
=
[
inp
.
strip
(
':0'
)
for
inp
in
in_name
]
with
tf
.
Session
()
as
sess
:
final_graph_def
=
tf
.
graph_util
.
convert_variables_to_constants
(
sess
,
sess
.
graph
.
as_graph_def
(
add_shapes
=
True
),
out_node
,)
tf_output
=
run_tf_graph
(
sess
,
in_data
,
in_name
,
out_name
)
tvm_output
=
run_tvm_graph
(
final_graph_def
,
in_data
,
in_node
,
target
=
'llvm'
,
out_names
=
out_node
,
num_output
=
2
)
for
i
in
range
(
len
(
tf_output
)):
tvm
.
testing
.
assert_allclose
(
tf_output
[
i
],
tvm_output
[
i
],
atol
=
1e-5
,
rtol
=
1e-5
)
#######################################################################
# Resize Bilinear
# Resize Bilinear
# ---------------
# ---------------
...
@@ -580,7 +618,7 @@ def _test_lstm_cell(batch_size, num_hidden, num_layers, forget_bias, dtype):
...
@@ -580,7 +618,7 @@ def _test_lstm_cell(batch_size, num_hidden, num_layers, forget_bias, dtype):
out_state_c
=
np
.
reshape
(
out_state_tup
[
0
],
(
batch_size
,
num_hidden
))
out_state_c
=
np
.
reshape
(
out_state_tup
[
0
],
(
batch_size
,
num_hidden
))
out_state_h
=
np
.
reshape
(
out_state_tup
[
1
],
(
batch_size
,
num_hidden
))
out_state_h
=
np
.
reshape
(
out_state_tup
[
1
],
(
batch_size
,
num_hidden
))
tvm_out
=
[
out
,
out_state_c
,
out_state_h
]
tvm_out
=
[
out
,
out_state_c
,
out_state_h
]
tvm
.
testing
.
assert_allclose
(
tf_out
,
tvm_out
,
rtol
=
1e-3
,
atol
=
1e-3
)
tvm
.
testing
.
assert_allclose
(
tf_out
[
0
],
tvm_out
[
0
]
,
rtol
=
1e-3
,
atol
=
1e-3
)
def
test_forward_lstm
():
def
test_forward_lstm
():
'''test LSTM block cell'''
'''test LSTM block cell'''
...
@@ -653,7 +691,7 @@ def test_forward_inception_v3():
...
@@ -653,7 +691,7 @@ def test_forward_inception_v3():
with
tf
.
Session
()
as
sess
:
with
tf
.
Session
()
as
sess
:
tf_output
=
run_tf_graph
(
sess
,
data
,
'input:0'
,
'InceptionV3/Predictions/Reshape_1:0'
)
tf_output
=
run_tf_graph
(
sess
,
data
,
'input:0'
,
'InceptionV3/Predictions/Reshape_1:0'
)
tvm_output
=
run_tvm_graph
(
graph_def
,
data
,
'input'
)
tvm_output
=
run_tvm_graph
(
graph_def
,
data
,
'input'
)
tvm
.
testing
.
assert_allclose
(
tf_output
,
tvm_output
,
rtol
=
1e-5
,
atol
=
1e-5
)
tvm
.
testing
.
assert_allclose
(
tf_output
[
0
],
tvm_output
[
0
]
,
rtol
=
1e-5
,
atol
=
1e-5
)
#######################################################################
#######################################################################
# Inception V1
# Inception V1
...
@@ -689,7 +727,7 @@ def test_forward_inception_v1():
...
@@ -689,7 +727,7 @@ def test_forward_inception_v1():
with
tf
.
Session
()
as
sess
:
with
tf
.
Session
()
as
sess
:
tf_output
=
run_tf_graph
(
sess
,
data
,
'DecodeJpeg/contents:0'
,
'softmax:0'
)
tf_output
=
run_tf_graph
(
sess
,
data
,
'DecodeJpeg/contents:0'
,
'softmax:0'
)
tvm_output
=
run_tvm_graph
(
graph_def
,
tvm_data
,
'DecodeJpeg/contents'
)
tvm_output
=
run_tvm_graph
(
graph_def
,
tvm_data
,
'DecodeJpeg/contents'
)
tvm
.
testing
.
assert_allclose
(
tf_output
,
tvm_output
,
rtol
=
1e-5
,
atol
=
1e-5
)
tvm
.
testing
.
assert_allclose
(
tf_output
[
0
],
tvm_output
[
0
]
,
rtol
=
1e-5
,
atol
=
1e-5
)
#######################################################################
#######################################################################
# Mobilenet
# Mobilenet
...
@@ -712,7 +750,7 @@ def test_forward_mobilenet():
...
@@ -712,7 +750,7 @@ def test_forward_mobilenet():
graph_def
=
nnvm
.
testing
.
tf
.
AddShapesToGraphDef
(
sess
,
out_node
)
graph_def
=
nnvm
.
testing
.
tf
.
AddShapesToGraphDef
(
sess
,
out_node
)
tf_output
=
run_tf_graph
(
sess
,
data
,
'input:0'
,
out_node
+
':0'
)
tf_output
=
run_tf_graph
(
sess
,
data
,
'input:0'
,
out_node
+
':0'
)
tvm_output
=
run_tvm_graph
(
graph_def
,
data
,
'input'
)
tvm_output
=
run_tvm_graph
(
graph_def
,
data
,
'input'
)
tvm
.
testing
.
assert_allclose
(
np
.
squeeze
(
tvm_output
),
np
.
squeeze
(
tf_output
),
rtol
=
1e-5
,
atol
=
1e-5
)
tvm
.
testing
.
assert_allclose
(
np
.
squeeze
(
tvm_output
[
0
]),
np
.
squeeze
(
tf_output
[
0
]
),
rtol
=
1e-5
,
atol
=
1e-5
)
#######################################################################
#######################################################################
# ResnetV2
# ResnetV2
...
@@ -731,7 +769,7 @@ def test_forward_resnetv2():
...
@@ -731,7 +769,7 @@ def test_forward_resnetv2():
with
tf
.
Session
()
as
sess
:
with
tf
.
Session
()
as
sess
:
tf_output
=
run_tf_graph
(
sess
,
data
,
'input_tensor:0'
,
out_node
+
':0'
)
tf_output
=
run_tf_graph
(
sess
,
data
,
'input_tensor:0'
,
out_node
+
':0'
)
tvm_output
=
run_tvm_graph
(
graph_def
,
data
,
'input_tensor'
,
tf_output
.
shape
,
'float32'
)
tvm_output
=
run_tvm_graph
(
graph_def
,
data
,
'input_tensor'
,
tf_output
.
shape
,
'float32'
)
tvm
.
testing
.
assert_allclose
(
np
.
squeeze
(
tvm_output
),
np
.
squeeze
(
tf_output
),
rtol
=
1e-5
,
atol
=
1e-5
)
tvm
.
testing
.
assert_allclose
(
np
.
squeeze
(
tvm_output
[
0
]),
np
.
squeeze
(
tf_output
[
0
]
),
rtol
=
1e-5
,
atol
=
1e-5
)
#######################################################################
#######################################################################
# PTB
# PTB
...
@@ -797,6 +835,7 @@ def test_forward_ptb():
...
@@ -797,6 +835,7 @@ def test_forward_ptb():
state_output
=
model
.
get_output
(
1
,
tvm
.
nd
.
empty
(
out_state_shape
,
state_output
=
model
.
get_output
(
1
,
tvm
.
nd
.
empty
(
out_state_shape
,
"float32"
))
.
asnumpy
()
"float32"
))
.
asnumpy
()
sample
=
nnvm
.
testing
.
tf
.
pick_from_weight
(
tvm_output
[
0
])
sample
=
nnvm
.
testing
.
tf
.
pick_from_weight
(
tvm_output
[
0
])
return
sample
,
state_output
return
sample
,
state_output
for
x
in
data
:
for
x
in
data
:
...
@@ -942,7 +981,7 @@ def test_forward_leaky_relu():
...
@@ -942,7 +981,7 @@ def test_forward_leaky_relu():
with
tf
.
Graph
()
.
as_default
():
with
tf
.
Graph
()
.
as_default
():
in1
=
tf
.
placeholder
(
shape
=
inp_array
.
shape
,
dtype
=
inp_array
.
dtype
)
in1
=
tf
.
placeholder
(
shape
=
inp_array
.
shape
,
dtype
=
inp_array
.
dtype
)
tf
.
nn
.
leaky_relu
(
in1
,
alpha
=
0.4
)
tf
.
nn
.
leaky_relu
(
in1
,
alpha
=
0.4
)
compare_tf_with_tvm
(
inp_array
,
'Placeholder:0'
,
'LeakyRelu:0'
)
compare_tf_with_tvm
(
inp_array
,
'Placeholder:0'
,
'LeakyRelu
/mul
:0'
)
def
test_forward_elu
():
def
test_forward_elu
():
ishape
=
(
1
,
3
,
10
,
10
)
ishape
=
(
1
,
3
,
10
,
10
)
...
@@ -1042,6 +1081,7 @@ if __name__ == '__main__':
...
@@ -1042,6 +1081,7 @@ if __name__ == '__main__':
# General
# General
test_forward_multi_input
()
test_forward_multi_input
()
test_forward_multi_output
()
test_forward_variable
()
test_forward_variable
()
# End to End
# End to End
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment