Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
T
tic
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
wenyuanbo
tic
Commits
ef39eac3
Commit
ef39eac3
authored
Sep 13, 2018
by
Siju
Committed by
Tianqi Chen
Sep 13, 2018
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[NNVM]Tensorflow and Onnx basic ops (#1666)
parent
b25c15de
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
212 additions
and
2 deletions
+212
-2
nnvm/python/nnvm/frontend/onnx.py
+22
-2
nnvm/python/nnvm/frontend/tensorflow.py
+20
-0
nnvm/tests/python/frontend/onnx/test_forward.py
+106
-0
nnvm/tests/python/frontend/tensorflow/test_forward.py
+64
-0
No files found.
nnvm/python/nnvm/frontend/onnx.py
View file @
ef39eac3
...
...
@@ -577,6 +577,26 @@ class HardSigmoid(OnnxOpConverter):
attr
=
{
'a_min'
:
0
,
'a_max'
:
1
}
return
AttrCvt
(
op_name
=
'clip'
)([
transformX
],
attr
)
class
ArgMax
(
OnnxOpConverter
):
""" Operator converter for ArgMax.
"""
@classmethod
def
_impl_v1
(
cls
,
inputs
,
attr
,
params
):
axis
=
attr
.
get
(
'axis'
,
0
)
keepdims
=
attr
.
get
(
'keepdims'
,
True
)
attr
=
{
'axis'
:
axis
,
'keepdims'
:
keepdims
}
return
AttrCvt
(
op_name
=
'argmax'
)(
inputs
,
attr
)
class
ArgMin
(
OnnxOpConverter
):
""" Operator converter for ArgMin.
"""
@classmethod
def
_impl_v1
(
cls
,
inputs
,
attr
,
params
):
axis
=
attr
.
get
(
'axis'
,
0
)
keepdims
=
attr
.
get
(
'keepdims'
,
True
)
attr
=
{
'axis'
:
axis
,
'keepdims'
:
keepdims
}
return
AttrCvt
(
op_name
=
'argmin'
)(
inputs
,
attr
)
# compatible operators that do NOT require any conversion.
_identity_list
=
[]
...
...
@@ -673,8 +693,8 @@ def _get_convert_map(opset):
# 'ReduceMean'
# 'ReduceProd'
# 'ReduceLogSumExp'
# 'ArgMax'
# 'ArgMin'
'ArgMax'
:
ArgMax
.
get_converter
(
opset
),
'ArgMin'
:
ArgMin
.
get_converter
(
opset
),
# defs/tensor
'Cast'
:
Cast
.
get_converter
(
opset
),
...
...
nnvm/python/nnvm/frontend/tensorflow.py
View file @
ef39eac3
...
...
@@ -650,6 +650,7 @@ def _pad(name):
ignores
=
[
'Tpaddings'
],)(
new_inputs
,
attr
)
return
_impl
def
_transpose
():
def
_impl
(
inputs
,
attr
,
params
):
# If perm is not specified, axes is left empty,
...
...
@@ -680,6 +681,19 @@ def _range():
return
_sym
.
Variable
(
name
=
name
,
shape
=
params
[
name
]
.
shape
)
return
_impl
def
_elu
():
def
_impl
(
inputs
,
attr
,
params
):
alpha
=
1.0
return
-
alpha
*
_sym
.
relu
(
1
-
_sym
.
exp
(
inputs
[
0
]))
+
_sym
.
relu
(
inputs
[
0
])
return
_impl
def
_selu
():
def
_impl
(
inputs
,
attr
,
params
):
alpha
=
1.6732632423543772848170429916717
gamma
=
1.0507009873554804934193349852946
return
gamma
*
(
-
alpha
*
_sym
.
relu
(
1
-
_sym
.
exp
(
inputs
[
0
]))
+
_sym
.
relu
(
inputs
[
0
]))
return
_impl
# compatible operators that do NOT require any conversion.
_identity_list
=
[]
...
...
@@ -695,12 +709,15 @@ _convert_map = {
'BatchNormWithGlobalNormalization'
:
_batch_norm
(),
'BiasAdd'
:
_bias_add
(),
'Cast'
:
_cast
(),
'Ceil'
:
AttrCvt
(
'ceil'
),
'CheckNumerics'
:
_check_numerics
(),
'Concat'
:
_concat
(),
'ConcatV2'
:
_concatV2
(),
'Conv2D'
:
_conv
(
'conv'
),
'DecodeJpeg'
:
_decode_image
(),
'Elu'
:
_elu
(),
'ExpandDims'
:
_expand_dims
(),
'Floor'
:
AttrCvt
(
'floor'
),
'Identity'
:
_identity
(),
'MatMul'
:
_matmul
(),
'MaxPool'
:
_pooling
(
'max_pool'
),
...
...
@@ -712,9 +729,11 @@ _convert_map = {
'Sum'
:
_sum
(),
'Square'
:
_square
(),
'Pack'
:
_pack
(),
'LeakyRelu'
:
AttrCvt
(
'leaky_relu'
),
'Relu'
:
AttrCvt
(
'relu'
),
'Reshape'
:
_reshape
(),
'ResizeBilinear'
:
_resize_bilinear
(),
'Selu'
:
_selu
(),
'Softmax'
:
AttrCvt
(
'softmax'
,
{
'axis'
:
(
'axis'
,
1
)}),
'Rsqrt'
:
_rsqrt
(),
'Squeeze'
:
_squeeze
(),
...
...
@@ -732,6 +751,7 @@ _convert_map = {
'Range'
:
_range
(),
'Rank'
:
_rank
(),
'Transpose'
:
_transpose
(),
'Tanh'
:
AttrCvt
(
'tanh'
),
}
# _convert_map_rnn defines maps of rnn operator name to
...
...
nnvm/tests/python/frontend/onnx/test_forward.py
View file @
ef39eac3
...
...
@@ -548,6 +548,111 @@ def test_forward_hardsigmoid():
verify_hardsigmoid
((
1
,
3
,
20
,
20
),
0.5
,
0.6
)
verify_hardsigmoid
((
20
,
20
),
0.3
,
0.4
)
def
verify_argmin
(
input_dim
,
axis
=
None
,
keepdims
=
None
):
def
_argmin_numpy
(
data
,
axis
=
0
,
keepdims
=
True
):
result
=
np
.
argmin
(
data
,
axis
=
axis
)
if
(
keepdims
==
1
):
result
=
np
.
expand_dims
(
result
,
axis
)
return
result
.
astype
(
data
.
dtype
)
a_np1
=
np
.
random
.
uniform
(
-
10
,
10
,
input_dim
)
.
astype
(
np
.
int32
)
if
keepdims
is
None
and
axis
is
None
:
b_np
=
_argmin_numpy
(
a_np1
)
node
=
onnx
.
helper
.
make_node
(
'ArgMin'
,
inputs
=
[
'a_np1'
],
outputs
=
[
'out'
])
elif
axis
is
None
:
b_np
=
_argmin_numpy
(
a_np1
,
keepdims
=
keepdims
)
node
=
onnx
.
helper
.
make_node
(
'ArgMin'
,
inputs
=
[
'a_np1'
],
outputs
=
[
'out'
],
keepdims
=
keepdims
)
elif
keepdims
is
None
:
b_np
=
_argmin_numpy
(
a_np1
,
axis
=
axis
)
node
=
onnx
.
helper
.
make_node
(
'ArgMin'
,
inputs
=
[
'a_np1'
],
outputs
=
[
'out'
],
axis
=
axis
)
else
:
b_np
=
_argmin_numpy
(
a_np1
,
axis
=
axis
,
keepdims
=
keepdims
)
node
=
onnx
.
helper
.
make_node
(
'ArgMin'
,
inputs
=
[
'a_np1'
],
outputs
=
[
'out'
],
axis
=
axis
,
keepdims
=
keepdims
)
graph
=
helper
.
make_graph
([
node
],
"argmin_test"
,
inputs
=
[
helper
.
make_tensor_value_info
(
"a_np1"
,
TensorProto
.
INT32
,
list
(
a_np1
.
shape
))],
outputs
=
[
helper
.
make_tensor_value_info
(
"out"
,
TensorProto
.
INT32
,
list
(
b_np
.
shape
))])
model
=
helper
.
make_model
(
graph
,
producer_name
=
'argmin_test'
)
for
target
,
ctx
in
ctx_list
():
tvm_out
=
get_tvm_output
(
model
,
[
a_np1
],
target
,
ctx
,
b_np
.
shape
,
b_np
.
dtype
)
np
.
testing
.
assert_allclose
(
b_np
,
tvm_out
,
rtol
=
1e-5
,
atol
=
1e-5
)
def
verify_argmax
(
input_dim
,
axis
=
None
,
keepdims
=
None
):
def
_argmax_numpy
(
data
,
axis
=
0
,
keepdims
=
True
):
result
=
np
.
argmax
(
data
,
axis
=
axis
)
if
(
keepdims
==
1
):
result
=
np
.
expand_dims
(
result
,
axis
)
return
result
.
astype
(
data
.
dtype
)
a_np1
=
np
.
random
.
uniform
(
-
10
,
10
,
input_dim
)
.
astype
(
np
.
int32
)
if
keepdims
is
None
and
axis
is
None
:
b_np
=
_argmax_numpy
(
a_np1
)
node
=
onnx
.
helper
.
make_node
(
'ArgMax'
,
inputs
=
[
'a_np1'
],
outputs
=
[
'out'
])
elif
axis
is
None
:
b_np
=
_argmax_numpy
(
a_np1
,
keepdims
=
keepdims
)
node
=
onnx
.
helper
.
make_node
(
'ArgMax'
,
inputs
=
[
'a_np1'
],
outputs
=
[
'out'
],
keepdims
=
keepdims
)
elif
keepdims
is
None
:
b_np
=
_argmax_numpy
(
a_np1
,
axis
=
axis
)
node
=
onnx
.
helper
.
make_node
(
'ArgMax'
,
inputs
=
[
'a_np1'
],
outputs
=
[
'out'
],
axis
=
axis
)
else
:
b_np
=
_argmax_numpy
(
a_np1
,
axis
=
axis
,
keepdims
=
keepdims
)
node
=
onnx
.
helper
.
make_node
(
'ArgMax'
,
inputs
=
[
'a_np1'
],
outputs
=
[
'out'
],
axis
=
axis
,
keepdims
=
keepdims
)
graph
=
helper
.
make_graph
([
node
],
"argmax_test"
,
inputs
=
[
helper
.
make_tensor_value_info
(
"a_np1"
,
TensorProto
.
INT32
,
list
(
a_np1
.
shape
))],
outputs
=
[
helper
.
make_tensor_value_info
(
"out"
,
TensorProto
.
INT32
,
list
(
b_np
.
shape
))])
model
=
helper
.
make_model
(
graph
,
producer_name
=
'argmax_test'
)
for
target
,
ctx
in
ctx_list
():
tvm_out
=
get_tvm_output
(
model
,
[
a_np1
],
target
,
ctx
,
b_np
.
shape
,
b_np
.
dtype
)
np
.
testing
.
assert_allclose
(
b_np
,
tvm_out
,
rtol
=
1e-5
,
atol
=
1e-5
)
def
test_forward_arg_min_max
():
'''Verify argmin and argmax'''
verify_argmin
([
3
,
4
,
4
])
verify_argmax
([
3
,
4
,
4
])
verify_argmin
([
3
,
4
,
4
],
axis
=
1
)
verify_argmax
([
3
,
4
,
4
],
axis
=
0
)
verify_argmin
([
3
,
4
,
4
],
keepdims
=
0
)
verify_argmax
([
3
,
4
,
4
],
keepdims
=
1
)
for
axis
in
[
0
,
1
,
2
]:
for
keepdims
in
[
True
,
False
]:
verify_argmin
([
3
,
4
,
4
],
axis
,
keepdims
)
verify_argmax
([
3
,
4
,
4
],
axis
,
keepdims
)
if
__name__
==
'__main__'
:
# verify_super_resolution_example()
# verify_squeezenet1_1()
...
...
@@ -570,3 +675,4 @@ if __name__ == '__main__':
test_forward_max
()
test_forward_mean
()
test_forward_hardsigmoid
()
test_forward_arg_min_max
()
nnvm/tests/python/frontend/tensorflow/test_forward.py
View file @
ef39eac3
...
...
@@ -877,6 +877,63 @@ def test_forward_transpose():
_test_forward_transpose
((
2
,
3
,
4
),
(
0
,
1
,
2
))
_test_forward_transpose
((
2
,
3
,
4
,
5
),
(
3
,
0
,
1
,
2
))
def
test_forward_ceil
():
ishape
=
(
1
,
3
,
10
,
10
)
inp_array
=
np
.
random
.
uniform
(
size
=
ishape
)
.
astype
(
np
.
float32
)
with
tf
.
Graph
()
.
as_default
():
in1
=
tf
.
placeholder
(
shape
=
inp_array
.
shape
,
dtype
=
inp_array
.
dtype
)
tf
.
ceil
(
in1
)
compare_tf_with_tvm
(
inp_array
,
'Placeholder:0'
,
'Ceil:0'
)
def
test_forward_floor
():
ishape
=
(
1
,
3
,
10
,
10
)
inp_array
=
np
.
random
.
uniform
(
size
=
ishape
)
.
astype
(
np
.
float32
)
with
tf
.
Graph
()
.
as_default
():
in1
=
tf
.
placeholder
(
shape
=
inp_array
.
shape
,
dtype
=
inp_array
.
dtype
)
tf
.
floor
(
in1
)
compare_tf_with_tvm
(
inp_array
,
'Placeholder:0'
,
'Floor:0'
)
def
test_forward_relu
():
ishape
=
(
1
,
3
,
10
,
10
)
inp_array
=
np
.
random
.
uniform
(
-
5
,
5
,
size
=
ishape
)
.
astype
(
np
.
float32
)
with
tf
.
Graph
()
.
as_default
():
in1
=
tf
.
placeholder
(
shape
=
inp_array
.
shape
,
dtype
=
inp_array
.
dtype
)
tf
.
nn
.
relu
(
in1
)
compare_tf_with_tvm
(
inp_array
,
'Placeholder:0'
,
'Relu:0'
)
def
test_forward_leaky_relu
():
ishape
=
(
1
,
3
,
10
,
10
)
inp_array
=
np
.
random
.
uniform
(
-
5
,
5
,
size
=
ishape
)
.
astype
(
np
.
float32
)
with
tf
.
Graph
()
.
as_default
():
in1
=
tf
.
placeholder
(
shape
=
inp_array
.
shape
,
dtype
=
inp_array
.
dtype
)
tf
.
nn
.
leaky_relu
(
in1
,
alpha
=
0.4
)
compare_tf_with_tvm
(
inp_array
,
'Placeholder:0'
,
'LeakyRelu:0'
)
def
test_forward_elu
():
ishape
=
(
1
,
3
,
10
,
10
)
inp_array
=
np
.
random
.
uniform
(
-
5
,
5
,
size
=
ishape
)
.
astype
(
np
.
float32
)
with
tf
.
Graph
()
.
as_default
():
in1
=
tf
.
placeholder
(
shape
=
inp_array
.
shape
,
dtype
=
inp_array
.
dtype
)
tf
.
nn
.
elu
(
in1
)
compare_tf_with_tvm
(
inp_array
,
'Placeholder:0'
,
'Elu:0'
)
def
test_forward_selu
():
ishape
=
(
1
,
3
,
10
,
10
)
inp_array
=
np
.
random
.
uniform
(
-
5
,
5
,
size
=
ishape
)
.
astype
(
np
.
float32
)
with
tf
.
Graph
()
.
as_default
():
in1
=
tf
.
placeholder
(
shape
=
inp_array
.
shape
,
dtype
=
inp_array
.
dtype
)
tf
.
nn
.
selu
(
in1
)
compare_tf_with_tvm
(
inp_array
,
'Placeholder:0'
,
'Selu:0'
)
def
test_forward_tanh
():
ishape
=
(
1
,
3
,
10
,
10
)
inp_array
=
np
.
random
.
uniform
(
-
5
,
5
,
size
=
ishape
)
.
astype
(
np
.
float32
)
with
tf
.
Graph
()
.
as_default
():
in1
=
tf
.
placeholder
(
shape
=
inp_array
.
shape
,
dtype
=
inp_array
.
dtype
)
tf
.
nn
.
tanh
(
in1
)
compare_tf_with_tvm
(
inp_array
,
'Placeholder:0'
,
'Tanh:0'
)
#######################################################################
# Main
# ----
...
...
@@ -905,3 +962,10 @@ if __name__ == '__main__':
test_forward_ptb
()
test_forward_lrn
()
test_forward_l2_normalize
()
test_forward_ceil
()
test_forward_floor
()
test_forward_relu
()
test_forward_leaky_relu
()
test_forward_elu
()
test_forward_selu
()
test_forward_tanh
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment