Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
T
tic
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
wenyuanbo
tic
Commits
53511bf1
Commit
53511bf1
authored
Mar 28, 2019
by
Mark Rogers
Committed by
Tianqi Chen
Mar 28, 2019
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Unified error handling in NNVM and Relay frontends (#2828)
parent
e3206aa8
Hide whitespace changes
Inline
Side-by-side
Showing
15 changed files
with
471 additions
and
371 deletions
+471
-371
nnvm/python/nnvm/frontend/caffe2.py
+6
-7
nnvm/python/nnvm/frontend/common.py
+17
-1
nnvm/python/nnvm/frontend/coreml.py
+21
-13
nnvm/python/nnvm/frontend/darknet.py
+78
-96
nnvm/python/nnvm/frontend/keras.py
+39
-31
nnvm/python/nnvm/frontend/mxnet.py
+99
-111
nnvm/python/nnvm/frontend/onnx.py
+4
-3
nnvm/python/nnvm/frontend/tensorflow.py
+19
-10
python/tvm/relay/frontend/caffe2.py
+11
-8
python/tvm/relay/frontend/coreml.py
+22
-11
python/tvm/relay/frontend/keras.py
+48
-28
python/tvm/relay/frontend/mxnet.py
+51
-22
python/tvm/relay/frontend/onnx.py
+11
-5
python/tvm/relay/frontend/tensorflow.py
+22
-9
python/tvm/relay/frontend/tflite.py
+23
-16
No files found.
nnvm/python/nnvm/frontend/caffe2.py
View file @
53511bf1
...
@@ -3,7 +3,7 @@
...
@@ -3,7 +3,7 @@
from
__future__
import
absolute_import
as
_abs
from
__future__
import
absolute_import
as
_abs
import
tvm
import
tvm
from
nnvm
import
symbol
as
_sym
from
nnvm
import
symbol
as
_sym
from
nnvm.frontend.common
import
get_nnvm_op
,
Renamer
,
AttrConverter
as
AttrCvt
from
.common
import
get_nnvm_op
from
.onnx_caffe2_utils
import
dimension_picker
,
dimension_constraint
,
infer_channels
,
revert_caffe2_pad
from
.onnx_caffe2_utils
import
dimension_picker
,
dimension_constraint
,
infer_channels
,
revert_caffe2_pad
from
.
import
onnx
from
.
import
onnx
...
@@ -73,8 +73,8 @@ class Caffe2OpConverter(object):
...
@@ -73,8 +73,8 @@ class Caffe2OpConverter(object):
if
hasattr
(
cls
,
'_impl'
):
if
hasattr
(
cls
,
'_impl'
):
return
getattr
(
cls
,
'_impl'
)
return
getattr
(
cls
,
'_impl'
)
raise
NotImplementedError
(
'{} not implemented'
.
format
(
raise
tvm
.
error
.
OpNotImplemented
(
cls
.
__name__
))
'Operator {} is not implemented in frontend Caffe2.'
.
format
(
cls
.
__name__
))
_caffe2_internal_args
=
{
_caffe2_internal_args
=
{
...
@@ -176,8 +176,7 @@ class Concat(Caffe2OpConverter):
...
@@ -176,8 +176,7 @@ class Concat(Caffe2OpConverter):
return
1
return
1
if
order
==
'NHWC'
:
if
order
==
'NHWC'
:
return
3
return
3
raise
RuntimeError
(
raise
tvm
.
error
.
OpAttributeInvalid
(
'Value {} in attribute {} of operator {} is not valid.'
.
format
(
order
,
'order'
,
'Concat'
))
"Unsupported storage order: {} in caffe2"
.
format
(
order
))
return
AttrCvt
(
return
AttrCvt
(
op_name
=
'concatenate'
,
op_name
=
'concatenate'
,
...
@@ -427,8 +426,8 @@ class Caffe2NetDef(object):
...
@@ -427,8 +426,8 @@ class Caffe2NetDef(object):
# Add a sanitizing step to convert all byte strings in args to strings
# Add a sanitizing step to convert all byte strings in args to strings
sym
=
convert_map
[
op_type
](
inputs
,
args
,
self
.
_params
)
sym
=
convert_map
[
op_type
](
inputs
,
args
,
self
.
_params
)
else
:
else
:
raise
NotImplementedError
(
raise
tvm
.
error
.
OpNotImplemented
(
"Operator {} not implemented."
.
format
(
op_type
))
'Operator {} is not supported in frontend Caffe2.'
.
format
(
op_type
))
return
sym
return
sym
...
...
nnvm/python/nnvm/frontend/common.py
View file @
53511bf1
...
@@ -7,9 +7,25 @@ from .._base import string_types
...
@@ -7,9 +7,25 @@ from .._base import string_types
def
get_nnvm_op
(
op_name
):
def
get_nnvm_op
(
op_name
):
op
=
getattr
(
_sym
,
op_name
)
op
=
getattr
(
_sym
,
op_name
)
if
not
op
:
if
not
op
:
raise
RuntimeError
(
"Unable to map op_name {} to nnvm.sym"
.
format
(
op_name
))
raise
OpNotImplemented
(
'Operator {} is not supported.'
.
format
(
op
))
return
op
return
op
def
required_attr
(
attr
,
key
,
op_name
):
assert
isinstance
(
attr
,
dict
)
if
key
not
in
attr
:
raise
OpAttributeRequired
(
'Required attribute {} not found in operator {}'
.
format
(
key
,
op_name
))
return
attr
[
key
]
def
parse_tshape
(
tshape
):
"""Parse tshape in string."""
return
[
int
(
x
.
strip
())
for
x
in
tshape
.
strip
(
'()'
)
.
split
(
','
)]
def
parse_bool_str
(
attr
,
key
,
default
=
'False'
):
"""Parse bool string to boolean."""
return
attr
.
get
(
key
,
default
)
.
strip
()
.
lower
()
in
[
'true'
,
'1'
,
't'
,
'y'
,
'yes'
]
class
Renamer
(
object
):
class
Renamer
(
object
):
"""A simply renamer for operators.
"""A simply renamer for operators.
...
...
nnvm/python/nnvm/frontend/coreml.py
View file @
53511bf1
...
@@ -2,11 +2,10 @@
...
@@ -2,11 +2,10 @@
"""CoreML frontend."""
"""CoreML frontend."""
from
__future__
import
absolute_import
as
_abs
from
__future__
import
absolute_import
as
_abs
import
numpy
as
np
import
numpy
as
np
import
tvm
import
tvm
from
.common
import
SymbolTable
from
..
import
symbol
as
_sym
from
..
import
symbol
as
_sym
from
.._base
import
string_types
from
.._base
import
string_types
from
.common
import
SymbolTable
__all__
=
[
'from_coreml'
]
__all__
=
[
'from_coreml'
]
...
@@ -83,7 +82,8 @@ def BatchnormLayerParams(op, insym, symtab):
...
@@ -83,7 +82,8 @@ def BatchnormLayerParams(op, insym, symtab):
"""Get layer of batchnorm parameter"""
"""Get layer of batchnorm parameter"""
# this changes the symbol
# this changes the symbol
if
op
.
instanceNormalization
:
if
op
.
instanceNormalization
:
raise
NotImplementedError
(
"instance normalization not implemented"
)
msg
=
'Operator "instance normalization" is not supported in frontend CoreML.'
raise
tvm
.
error
.
OpNotImplemented
(
msg
)
else
:
else
:
params
=
{
'gamma'
:
symtab
.
new_const
(
list
(
op
.
gamma
.
floatValue
)),
params
=
{
'gamma'
:
symtab
.
new_const
(
list
(
op
.
gamma
.
floatValue
)),
'beta'
:
symtab
.
new_const
(
list
(
op
.
beta
.
floatValue
)),
'beta'
:
symtab
.
new_const
(
list
(
op
.
beta
.
floatValue
)),
...
@@ -136,7 +136,8 @@ def ActivationParams(op, insym, symtab):
...
@@ -136,7 +136,8 @@ def ActivationParams(op, insym, symtab):
betasym
=
symtab
.
new_const
(
beta
)
betasym
=
symtab
.
new_const
(
beta
)
return
_sym
.
broadcast_mul
(
_sym
.
log
(
_sym
.
broadcast_add
(
return
_sym
.
broadcast_mul
(
_sym
.
log
(
_sym
.
broadcast_add
(
_sym
.
exp
(
insym
),
betasym
)),
alphasym
)
_sym
.
exp
(
insym
),
betasym
)),
alphasym
)
raise
NotImplementedError
(
'
%
s not implemented'
%
whichActivation
)
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} is not supported in frontend CoreML.'
.
format
(
whichActivation
))
def
ScaleLayerParams
(
op
,
insym
,
symtab
):
def
ScaleLayerParams
(
op
,
insym
,
symtab
):
"""Scale layer params."""
"""Scale layer params."""
...
@@ -158,7 +159,8 @@ def PoolingLayerParams(op, insym, symtab):
...
@@ -158,7 +159,8 @@ def PoolingLayerParams(op, insym, symtab):
return
_sym
.
global_max_pool2d
(
insym
)
return
_sym
.
global_max_pool2d
(
insym
)
if
op
.
type
==
1
:
if
op
.
type
==
1
:
return
_sym
.
global_avg_pool2d
(
insym
)
return
_sym
.
global_avg_pool2d
(
insym
)
raise
NotImplementedError
(
"Only max and average pooling implemented"
)
raise
tvm
.
error
.
OpNotImplemented
(
'Operator pooling (not max or average) is not supported in frontend CoreML.'
)
else
:
else
:
params
=
{
'pool_size'
:
list
(
op
.
kernelSize
),
params
=
{
'pool_size'
:
list
(
op
.
kernelSize
),
...
@@ -178,7 +180,8 @@ def PoolingLayerParams(op, insym, symtab):
...
@@ -178,7 +180,8 @@ def PoolingLayerParams(op, insym, symtab):
params
[
'padding'
]
=
padding
params
[
'padding'
]
=
padding
params
[
'ceil_mode'
]
=
True
params
[
'ceil_mode'
]
=
True
else
:
else
:
raise
NotImplementedError
(
"Other convolution padding not implemented"
)
msg
=
'Value {} in attribute PoolingPaddingType of operator Pooling is not valid.'
raise
tvm
.
error
.
OpAttributeInvalid
(
msg
.
format
(
op
.
WhichOneof
(
'PoolingPaddingType'
)))
# consume padding layer
# consume padding layer
if
symtab
.
in_padding
:
if
symtab
.
in_padding
:
...
@@ -190,7 +193,8 @@ def PoolingLayerParams(op, insym, symtab):
...
@@ -190,7 +193,8 @@ def PoolingLayerParams(op, insym, symtab):
return
_sym
.
max_pool2d
(
insym
,
**
params
)
return
_sym
.
max_pool2d
(
insym
,
**
params
)
if
op
.
type
==
1
:
if
op
.
type
==
1
:
return
_sym
.
avg_pool2d
(
insym
,
**
params
)
return
_sym
.
avg_pool2d
(
insym
,
**
params
)
raise
NotImplementedError
(
"Only max and average pooling implemented"
)
msg
=
'Operator pooling (not max or average) is not supported in frontend CoreML.'
raise
tvm
.
error
.
OpNotImplemented
(
msg
)
def
SoftmaxLayerParams
(
op
,
insym
,
symtab
):
def
SoftmaxLayerParams
(
op
,
insym
,
symtab
):
return
_sym
.
softmax
(
_sym
.
flatten
(
insym
))
return
_sym
.
softmax
(
_sym
.
flatten
(
insym
))
...
@@ -229,7 +233,8 @@ def ConcatLayerParams(op, insyms, symtab):
...
@@ -229,7 +233,8 @@ def ConcatLayerParams(op, insyms, symtab):
if
not
isinstance
(
insyms
,
list
):
if
not
isinstance
(
insyms
,
list
):
insyms
=
[
insyms
]
insyms
=
[
insyms
]
if
op
.
sequenceConcat
:
if
op
.
sequenceConcat
:
raise
NotImplementedError
(
"Sequence Concat not supported"
)
raise
tvm
.
error
.
OpNotImplemented
(
'Operator Sequence Concat is not supported in frontend CoreML.'
)
ret
=
_sym
.
concatenate
(
*
insyms
,
axis
=
1
)
ret
=
_sym
.
concatenate
(
*
insyms
,
axis
=
1
)
return
ret
return
ret
...
@@ -243,14 +248,16 @@ def PaddingLayerParams(op, insym, symtab):
...
@@ -243,14 +248,16 @@ def PaddingLayerParams(op, insym, symtab):
if
op
.
WhichOneof
(
'PaddingType'
)
==
'constant'
:
if
op
.
WhichOneof
(
'PaddingType'
)
==
'constant'
:
constant
=
op
.
constant
constant
=
op
.
constant
if
constant
.
value
!=
0
:
if
constant
.
value
!=
0
:
raise
NotImplementedError
(
"Padding value {} not supported."
.
format
(
constant
.
value
))
msg
=
'Value {} in attribute "padding value" of operator Padding is not valid.'
raise
tvm
.
error
.
OpAttributeInvalid
(
msg
.
format
(
constant
.
value
))
padding
=
[
b
.
startEdgeSize
for
b
in
op
.
paddingAmounts
.
borderAmounts
]
padding
=
[
b
.
startEdgeSize
for
b
in
op
.
paddingAmounts
.
borderAmounts
]
padding2
=
[
b
.
endEdgeSize
for
b
in
op
.
paddingAmounts
.
borderAmounts
]
padding2
=
[
b
.
endEdgeSize
for
b
in
op
.
paddingAmounts
.
borderAmounts
]
for
i
,
j
in
zip
(
padding
,
padding2
):
for
i
,
j
in
zip
(
padding
,
padding2
):
assert
i
==
j
assert
i
==
j
symtab
.
set_padding
(
padding
)
symtab
.
set_padding
(
padding
)
else
:
else
:
raise
NotImplementedError
(
"Only constant padding is supported now."
)
raise
tvm
.
error
.
OpNotImplemented
(
'Operator "non-constant padding" is not supported in frontend CoreML.'
)
return
insym
return
insym
def
PermuteLayerParams
(
op
,
insym
,
symtab
):
def
PermuteLayerParams
(
op
,
insym
,
symtab
):
...
@@ -259,8 +266,8 @@ def PermuteLayerParams(op, insym, symtab):
...
@@ -259,8 +266,8 @@ def PermuteLayerParams(op, insym, symtab):
def
UpsampleLayerParams
(
op
,
insym
,
symtab
):
def
UpsampleLayerParams
(
op
,
insym
,
symtab
):
if
op
.
scalingFactor
[
0
]
!=
op
.
scalingFactor
[
1
]:
if
op
.
scalingFactor
[
0
]
!=
op
.
scalingFactor
[
1
]:
raise
NotImplementedError
(
"Upsampling only supported with same
\
raise
tvm
.
error
.
OpAttributeInvalid
(
height and width scaling factor."
)
'Height and width scaling factors of Upsample operator must be equal.'
)
interpolationMode
=
'NEAREST_NEIGHBOR'
if
op
.
mode
==
0
else
'BILINEAR'
interpolationMode
=
'NEAREST_NEIGHBOR'
if
op
.
mode
==
0
else
'BILINEAR'
return
_sym
.
upsampling
(
insym
,
scale
=
op
.
scalingFactor
[
0
],
method
=
interpolationMode
)
return
_sym
.
upsampling
(
insym
,
scale
=
op
.
scalingFactor
[
0
],
method
=
interpolationMode
)
...
@@ -341,7 +348,8 @@ def coreml_op_to_nnvm(op, inname, outname, symtab):
...
@@ -341,7 +348,8 @@ def coreml_op_to_nnvm(op, inname, outname, symtab):
"""
"""
classname
=
type
(
op
)
.
__name__
classname
=
type
(
op
)
.
__name__
if
classname
not
in
_convert_map
:
if
classname
not
in
_convert_map
:
raise
NotImplementedError
(
"
%
s is not supported"
%
(
classname
))
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} is not supported in frontend CoreML.'
.
format
(
classname
))
if
isinstance
(
inname
,
string_types
):
if
isinstance
(
inname
,
string_types
):
insym
=
symtab
.
get_var
(
inname
)
insym
=
symtab
.
get_var
(
inname
)
else
:
else
:
...
...
nnvm/python/nnvm/frontend/darknet.py
View file @
53511bf1
...
@@ -6,6 +6,7 @@ from __future__ import absolute_import as _abs
...
@@ -6,6 +6,7 @@ from __future__ import absolute_import as _abs
import
numpy
as
np
import
numpy
as
np
import
tvm
import
tvm
from
..
import
symbol
as
_sym
from
..
import
symbol
as
_sym
from
.common
import
get_nnvm_op
,
required_attr
,
parse_tshape
,
parse_bool_str
class
LAYERTYPE
(
object
):
class
LAYERTYPE
(
object
):
"""Darknet LAYERTYPE Class constant."""
"""Darknet LAYERTYPE Class constant."""
...
@@ -57,45 +58,12 @@ class ACTIVATION(object):
...
@@ -57,45 +58,12 @@ class ACTIVATION(object):
__all__
=
[
'from_darknet'
]
__all__
=
[
'from_darknet'
]
def
_darknet_get_nnvm_op
(
op_name
):
"""Get the nnvm operation from opname, raise error if not supported."""
op
=
getattr
(
_sym
,
op_name
)
if
not
op
:
raise
RuntimeError
(
"Not to map op_name {} to nnvm.sym"
.
format
(
op_name
))
return
op
def
_darknet_required_attr
(
attr
,
key
):
"""Check the attribute exists and return if exists, if not return error."""
assert
isinstance
(
attr
,
dict
)
if
key
not
in
attr
:
raise
AttributeError
(
"Required attribute {} not found."
.
format
(
key
))
return
attr
[
key
]
def
_darknet_raise_not_supported
(
attr
,
op
=
'nnvm'
):
"""Raise error if any operation is not supported."""
err
=
"{} is not supported in {}."
.
format
(
attr
,
op
)
raise
NotImplementedError
(
err
)
def
_darknet_warn_not_used
(
attr
,
op
=
'nnvm'
):
"""Raise warning if any operation not supported."""
import
warnings
err
=
"{} is ignored in {}."
.
format
(
attr
,
op
)
warnings
.
warn
(
err
)
def
_darknet_parse_tshape
(
tshape
):
"""Parse tshape in string."""
return
[
int
(
x
.
strip
())
for
x
in
tshape
.
strip
(
'()'
)
.
split
(
','
)]
def
_darknet_parse_bool_str
(
attr
,
key
,
default
=
'False'
):
"""Parse bool string to boolean."""
return
attr
.
get
(
key
,
default
)
.
strip
()
.
lower
()
in
\
[
'true'
,
'1'
,
't'
,
'y'
,
'yes'
]
def
_darknet_maxpooling
(
inputs
,
attrs
):
def
_darknet_maxpooling
(
inputs
,
attrs
):
"""Process the max pool 2d operation."""
"""Process the max pool 2d operation."""
kernel
=
_darknet_parse_tshape
(
_darknet_required_attr
(
attrs
,
'kerne
l'
))
kernel
=
parse_tshape
(
required_attr
(
attrs
,
'kernel'
,
'maxpoo
l'
))
if
len
(
kernel
)
!=
1
:
if
len
(
kernel
)
!=
1
:
_darknet_raise_not_supported
(
'non-2d kernel'
,
'pool_2d'
)
raise
tvm
.
error
.
OpAttributeUnimplemented
(
'Non-2D kernels for Max Pooling are not supported in frontend Darknet.'
)
op_name
,
new_attrs
=
'max_pool2d'
,
{}
op_name
,
new_attrs
=
'max_pool2d'
,
{}
strides
=
int
(
attrs
.
get
(
'stride'
,
(
1
,
1
)))
strides
=
int
(
attrs
.
get
(
'stride'
,
(
1
,
1
)))
...
@@ -107,13 +75,14 @@ def _darknet_maxpooling(inputs, attrs):
...
@@ -107,13 +75,14 @@ def _darknet_maxpooling(inputs, attrs):
if
extra_pad_size
:
if
extra_pad_size
:
pad_width
=
((
0
,
0
),
(
0
,
0
),
(
0
,
extra_pad_size
),
(
0
,
extra_pad_size
))
pad_width
=
((
0
,
0
),
(
0
,
0
),
(
0
,
extra_pad_size
),
(
0
,
extra_pad_size
))
inputs
=
_sym
.
pad
(
*
inputs
,
pad_width
=
pad_width
,
pad_value
=
np
.
finfo
(
np
.
float32
)
.
min
)
inputs
=
_sym
.
pad
(
*
inputs
,
pad_width
=
pad_width
,
pad_value
=
np
.
finfo
(
np
.
float32
)
.
min
)
return
_darknet_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
),
None
return
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
),
None
def
_darknet_avgpooling
(
inputs
,
attrs
):
def
_darknet_avgpooling
(
inputs
,
attrs
):
"""Process the average pool 2d operation."""
"""Process the average pool 2d operation."""
kernel
=
_darknet_parse_tshape
(
_darknet_required_attr
(
attrs
,
'kerne
l'
))
kernel
=
parse_tshape
(
required_attr
(
attrs
,
'kernel'
,
'avgpoo
l'
))
if
len
(
kernel
)
!=
1
:
if
len
(
kernel
)
!=
1
:
_darknet_raise_not_supported
(
'non-2d kernel'
,
'pool_2d'
)
raise
tvm
.
error
.
OpAttributeUnimplemented
(
'Non-2D kernels for Average Pooling are not supported in frontend Darknet.'
)
op_name
,
new_attrs
=
'avg_pool2d'
,
{}
op_name
,
new_attrs
=
'avg_pool2d'
,
{}
strides
=
int
(
attrs
.
get
(
'stride'
,
(
1
,
1
)))
strides
=
int
(
attrs
.
get
(
'stride'
,
(
1
,
1
)))
...
@@ -122,7 +91,7 @@ def _darknet_avgpooling(inputs, attrs):
...
@@ -122,7 +91,7 @@ def _darknet_avgpooling(inputs, attrs):
new_attrs
[
'strides'
]
=
str
((
strides
,
strides
))
new_attrs
[
'strides'
]
=
str
((
strides
,
strides
))
new_attrs
[
'padding'
]
=
str
((
pads
,
pads
))
new_attrs
[
'padding'
]
=
str
((
pads
,
pads
))
return
_darknet_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
),
None
return
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
),
None
def
_darknet_batch_norm
(
inputs
,
attrs
):
def
_darknet_batch_norm
(
inputs
,
attrs
):
"""Process the batchnormalization operation."""
"""Process the batchnormalization operation."""
...
@@ -131,21 +100,23 @@ def _darknet_batch_norm(inputs, attrs):
...
@@ -131,21 +100,23 @@ def _darknet_batch_norm(inputs, attrs):
new_attrs
[
'epsilon'
]
=
attrs
.
get
(
'eps'
,
0.000001
)
new_attrs
[
'epsilon'
]
=
attrs
.
get
(
'eps'
,
0.000001
)
new_attrs
[
'center'
]
=
True
new_attrs
[
'center'
]
=
True
new_attrs
[
'scale'
]
=
True
new_attrs
[
'scale'
]
=
True
return
_darknet_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
),
None
return
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
),
None
def
_darknet_conv2d
(
inputs
,
attrs
):
def
_darknet_conv2d
(
inputs
,
attrs
):
"""Process the convolution 2d operation."""
"""Process the convolution 2d operation."""
kernel
=
_darknet_parse_tshape
(
_darknet_required_attr
(
attrs
,
'kernel
'
))
kernel
=
parse_tshape
(
required_attr
(
attrs
,
'kernel'
,
'conv2d
'
))
if
len
(
kernel
)
!=
1
:
if
len
(
kernel
)
!=
1
:
_darknet_raise_not_supported
(
'non 2d kernel'
,
'conv2d'
)
raise
tvm
.
error
.
OpAttributeUnimplemented
(
'Non-2D kernels for Conv2D are unsupported '
'in frontend Darknet.'
)
layout
=
attrs
.
get
(
'layout'
,
'NCHW'
)
layout
=
attrs
.
get
(
'layout'
,
'NCHW'
)
if
layout
not
in
[
'NCHW'
,
'NHWC'
]:
if
layout
not
in
[
'NCHW'
,
'NHWC'
]:
_darknet_raise_not_supported
(
'layout: '
+
layout
,
'conv2d'
)
raise
tvm
.
error
.
OpAttributeInvalid
(
'Value {} in attribute "layout" of operator Conv2D is not valid.'
.
format
(
layout
))
strides
=
int
(
attrs
.
get
(
'stride'
,
(
1
,
1
)))
strides
=
int
(
attrs
.
get
(
'stride'
,
(
1
,
1
)))
pads
=
int
(
attrs
.
get
(
'pad'
,
(
0
,
0
)))
pads
=
int
(
attrs
.
get
(
'pad'
,
(
0
,
0
)))
op_name
,
new_attrs
=
'conv2d'
,
{}
op_name
,
new_attrs
=
'conv2d'
,
{}
new_attrs
[
'channels'
]
=
_darknet_required_attr
(
attrs
,
'num_filter
'
)
new_attrs
[
'channels'
]
=
required_attr
(
attrs
,
'num_filter'
,
'conv2d
'
)
new_attrs
[
'kernel_size'
]
=
[
kernel
[
0
],
kernel
[
0
]]
new_attrs
[
'kernel_size'
]
=
[
kernel
[
0
],
kernel
[
0
]]
new_attrs
[
'strides'
]
=
(
strides
,
strides
)
new_attrs
[
'strides'
]
=
(
strides
,
strides
)
new_attrs
[
'padding'
]
=
(
pads
,
pads
)
new_attrs
[
'padding'
]
=
(
pads
,
pads
)
...
@@ -157,13 +128,13 @@ def _darknet_conv2d(inputs, attrs):
...
@@ -157,13 +128,13 @@ def _darknet_conv2d(inputs, attrs):
else
:
else
:
new_attrs
[
'use_bias'
]
=
True
new_attrs
[
'use_bias'
]
=
True
out_name
=
{}
out_name
=
{}
sym
=
_darknet_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
sym
=
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
out_name
[
0
]
=
sym
.
list_output_names
()[
0
]
.
replace
(
'_output'
,
''
)
out_name
[
0
]
=
sym
.
list_output_names
()[
0
]
.
replace
(
'_output'
,
''
)
if
attrs
.
get
(
'use_batchNorm'
,
False
)
is
True
:
if
attrs
.
get
(
'use_batchNorm'
,
False
)
is
True
:
op_name
,
new_attrs
=
'batch_norm'
,
{}
op_name
,
new_attrs
=
'batch_norm'
,
{}
new_attrs
[
'epsilon'
]
=
0.000001
new_attrs
[
'epsilon'
]
=
0.000001
sym
=
_darknet_
get_nnvm_op
(
op_name
)(
*
sym
,
**
new_attrs
)
sym
=
get_nnvm_op
(
op_name
)(
*
sym
,
**
new_attrs
)
out_name
[
1
]
=
sym
.
list_output_names
()[
0
]
.
replace
(
'_output'
,
''
)
out_name
[
1
]
=
sym
.
list_output_names
()[
0
]
.
replace
(
'_output'
,
''
)
if
'activation'
in
attrs
:
if
'activation'
in
attrs
:
new_attrs
=
{}
new_attrs
=
{}
...
@@ -176,15 +147,18 @@ def _darknet_conv2d(inputs, attrs):
...
@@ -176,15 +147,18 @@ def _darknet_conv2d(inputs, attrs):
def
_darknet_conv2d_transpose
(
inputs
,
attrs
):
def
_darknet_conv2d_transpose
(
inputs
,
attrs
):
"""Process the convolution 2d transpose operation."""
"""Process the convolution 2d transpose operation."""
if
'target_shape'
in
attrs
:
if
'target_shape'
in
attrs
:
_darknet_raise_not_supported
(
'target_shape'
,
'conv2d_transpose'
)
raise
tvm
.
error
.
OpAttributeUnimplemented
(
kernel
=
_darknet_parse_tshape
(
_darknet_required_attr
(
attrs
,
'kernel'
))
'Attribute "target_shape" is not supported in operator Conv2D-transpose.'
)
kernel
=
parse_tshape
(
required_attr
(
attrs
,
'kernel'
,
'conv2d_transpose'
))
if
len
(
kernel
)
!=
2
:
if
len
(
kernel
)
!=
2
:
_darknet_raise_not_supported
(
'non-2d kernel'
,
'conv2d_transpose'
)
raise
tvm
.
error
.
OpAttributeUnimplemented
(
'Non-2D kernels are not supported in operator Conv2D-transpose.'
)
layout
=
attrs
.
get
(
'layout'
,
'NCHW'
)
layout
=
attrs
.
get
(
'layout'
,
'NCHW'
)
if
layout
not
in
[
'NCHW'
,
'NHWC'
]:
if
layout
not
in
[
'NCHW'
,
'NHWC'
]:
_darknet_raise_not_supported
(
'layout: '
+
layout
,
'conv2d_transpose'
)
msg
=
'Value {} in attribute "layout" of operator Conv2D-transpose is not valid.'
raise
tvm
.
error
.
OpAttributeInvalid
(
msg
.
format
(
layout
))
op_name
,
new_attrs
=
'conv2d_transpose'
,
{}
op_name
,
new_attrs
=
'conv2d_transpose'
,
{}
new_attrs
[
'channels'
]
=
_darknet_required_attr
(
attrs
,
'num_filter
'
)
new_attrs
[
'channels'
]
=
required_attr
(
attrs
,
'num_filter'
,
'conv2d_transpose
'
)
new_attrs
[
'kernel_size'
]
=
kernel
new_attrs
[
'kernel_size'
]
=
kernel
new_attrs
[
'strides'
]
=
attrs
.
get
(
'stride'
,
(
1
,
1
))
new_attrs
[
'strides'
]
=
attrs
.
get
(
'stride'
,
(
1
,
1
))
new_attrs
[
'output_padding'
]
=
attrs
.
get
(
'adj'
,
(
0
,
0
))
new_attrs
[
'output_padding'
]
=
attrs
.
get
(
'adj'
,
(
0
,
0
))
...
@@ -192,8 +166,8 @@ def _darknet_conv2d_transpose(inputs, attrs):
...
@@ -192,8 +166,8 @@ def _darknet_conv2d_transpose(inputs, attrs):
new_attrs
[
'dilation'
]
=
attrs
.
get
(
'dilate'
,
(
1
,
1
))
new_attrs
[
'dilation'
]
=
attrs
.
get
(
'dilate'
,
(
1
,
1
))
new_attrs
[
'groups'
]
=
attrs
.
get
(
'num_group'
,
1
)
new_attrs
[
'groups'
]
=
attrs
.
get
(
'num_group'
,
1
)
new_attrs
[
'layout'
]
=
layout
new_attrs
[
'layout'
]
=
layout
new_attrs
[
'use_bias'
]
=
not
_darknet_
parse_bool_str
(
attrs
,
'no_bias'
)
new_attrs
[
'use_bias'
]
=
not
parse_bool_str
(
attrs
,
'no_bias'
)
return
_darknet_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
),
None
return
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
),
None
def
_darknet_shortcut
(
inputs
,
attrs
):
def
_darknet_shortcut
(
inputs
,
attrs
):
"""Process the shortcut operation."""
"""Process the shortcut operation."""
...
@@ -219,7 +193,7 @@ def _darknet_shortcut(inputs, attrs):
...
@@ -219,7 +193,7 @@ def _darknet_shortcut(inputs, attrs):
pad_value
=
0.
)
pad_value
=
0.
)
new_inputs
=
_as_list
([
input_0
,
input_1
])
new_inputs
=
_as_list
([
input_0
,
input_1
])
sym
=
_darknet_
get_nnvm_op
(
op_name
)(
*
new_inputs
,
**
new_attrs
)
sym
=
get_nnvm_op
(
op_name
)(
*
new_inputs
,
**
new_attrs
)
out_name
=
sym
.
list_output_names
()[
0
]
.
replace
(
'_output'
,
''
)
out_name
=
sym
.
list_output_names
()[
0
]
.
replace
(
'_output'
,
''
)
if
'activation'
in
attrs
:
if
'activation'
in
attrs
:
new_attrs
[
'activation'
]
=
attrs
[
'activation'
]
new_attrs
[
'activation'
]
=
attrs
[
'activation'
]
...
@@ -229,17 +203,17 @@ def _darknet_shortcut(inputs, attrs):
...
@@ -229,17 +203,17 @@ def _darknet_shortcut(inputs, attrs):
def
_darknet_dense
(
inputs
,
attrs
):
def
_darknet_dense
(
inputs
,
attrs
):
"""Process the dense operation."""
"""Process the dense operation."""
op_name
,
new_attrs
=
'dense'
,
{}
op_name
,
new_attrs
=
'dense'
,
{}
new_attrs
[
'units'
]
=
_darknet_required_attr
(
attrs
,
'num_hidden
'
)
new_attrs
[
'units'
]
=
required_attr
(
attrs
,
'num_hidden'
,
'dense
'
)
out_name
=
{}
out_name
=
{}
new_attrs
[
'use_bias'
]
=
attrs
.
get
(
'use_bias'
,
False
)
new_attrs
[
'use_bias'
]
=
attrs
.
get
(
'use_bias'
,
False
)
if
attrs
.
get
(
'use_flatten'
,
False
)
is
True
:
if
attrs
.
get
(
'use_flatten'
,
False
)
is
True
:
inputs
[
0
]
=
_sym
.
flatten
(
inputs
[
0
])
inputs
[
0
]
=
_sym
.
flatten
(
inputs
[
0
])
sym
=
_darknet_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
sym
=
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
out_name
[
0
]
=
sym
.
list_output_names
()[
0
]
.
replace
(
'_output'
,
''
)
out_name
[
0
]
=
sym
.
list_output_names
()[
0
]
.
replace
(
'_output'
,
''
)
if
'use_batchNorm'
in
attrs
:
if
'use_batchNorm'
in
attrs
:
op_name
,
new_attrs
=
'batch_norm'
,
{}
op_name
,
new_attrs
=
'batch_norm'
,
{}
new_attrs
[
'epsilon'
]
=
0.000001
new_attrs
[
'epsilon'
]
=
0.000001
sym
=
_darknet_
get_nnvm_op
(
op_name
)(
*
sym
,
**
new_attrs
)
sym
=
get_nnvm_op
(
op_name
)(
*
sym
,
**
new_attrs
)
out_name
[
1
]
=
sym
.
list_output_names
()[
0
]
.
replace
(
'_output'
,
''
)
out_name
[
1
]
=
sym
.
list_output_names
()[
0
]
.
replace
(
'_output'
,
''
)
if
'activation'
in
attrs
:
if
'activation'
in
attrs
:
new_attrs
=
{}
new_attrs
=
{}
...
@@ -251,28 +225,29 @@ def _darknet_dropout(inputs, attrs):
...
@@ -251,28 +225,29 @@ def _darknet_dropout(inputs, attrs):
"""Process the dropout operation, its a blank operation."""
"""Process the dropout operation, its a blank operation."""
op_name
,
new_attrs
=
'dropout'
,
{}
op_name
,
new_attrs
=
'dropout'
,
{}
new_attrs
[
'rate'
]
=
attrs
.
get
(
'p'
,
0.5
)
new_attrs
[
'rate'
]
=
attrs
.
get
(
'p'
,
0.5
)
return
_darknet_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
),
None
return
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
),
None
def
_darknet_reshape
(
inputs
,
attrs
):
def
_darknet_reshape
(
inputs
,
attrs
):
"""Process the reshape operation."""
"""Process the reshape operation."""
if
_darknet_parse_bool_str
(
attrs
,
'reverse'
):
if
parse_bool_str
(
attrs
,
'reverse'
):
_darknet_raise_not_supported
(
'reverse'
,
'reshape'
)
raise
tvm
.
error
.
OpAttributeUnimplemented
(
'Attribute "reverse" is not supported in operator Reshape.'
)
op_name
,
new_attrs
=
'reshape'
,
{}
op_name
,
new_attrs
=
'reshape'
,
{}
new_attrs
[
'shape'
]
=
_darknet_required_attr
(
attrs
,
'
shape'
)
new_attrs
[
'shape'
]
=
required_attr
(
attrs
,
'shape'
,
're
shape'
)
return
_darknet_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
),
None
return
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
),
None
def
_darknet_upsampling
(
inputs
,
attrs
):
def
_darknet_upsampling
(
inputs
,
attrs
):
"""Process the upsampling operation."""
"""Process the upsampling operation."""
op_name
,
new_attrs
=
'upsampling'
,
{}
op_name
,
new_attrs
=
'upsampling'
,
{}
new_attrs
[
'scale'
]
=
attrs
.
get
(
'scale'
,
1
)
new_attrs
[
'scale'
]
=
attrs
.
get
(
'scale'
,
1
)
return
_darknet_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
),
None
return
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
),
None
def
_darknet_l2normalize
(
inputs
,
attrs
):
def
_darknet_l2normalize
(
inputs
,
attrs
):
"""Process the l2 normalization operation."""
"""Process the l2 normalization operation."""
op_name
,
new_attrs
=
'l2_normalize'
,
{}
op_name
,
new_attrs
=
'l2_normalize'
,
{}
new_attrs
[
'eps'
]
=
attrs
.
get
(
'eps'
,
0
)
new_attrs
[
'eps'
]
=
attrs
.
get
(
'eps'
,
0
)
new_attrs
[
'axis'
]
=
attrs
.
get
(
'axis'
,
1
)
new_attrs
[
'axis'
]
=
attrs
.
get
(
'axis'
,
1
)
return
_darknet_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
),
None
return
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
),
None
def
_darknet_softmax_output
(
inputs
,
attrs
):
def
_darknet_softmax_output
(
inputs
,
attrs
):
"""Process the softmax operation."""
"""Process the softmax operation."""
...
@@ -280,25 +255,25 @@ def _darknet_softmax_output(inputs, attrs):
...
@@ -280,25 +255,25 @@ def _darknet_softmax_output(inputs, attrs):
if
temperature
!=
1
:
if
temperature
!=
1
:
inputs
[
0
]
=
inputs
[
0
]
/
float
(
temperature
)
inputs
[
0
]
=
inputs
[
0
]
/
float
(
temperature
)
op_name
,
new_attrs
=
'softmax'
,
{}
op_name
,
new_attrs
=
'softmax'
,
{}
if
_darknet_
parse_bool_str
(
attrs
,
'multi_output'
):
if
parse_bool_str
(
attrs
,
'multi_output'
):
new_attrs
[
'axis'
]
=
1
new_attrs
[
'axis'
]
=
1
if
attrs
.
get
(
'use_flatten'
,
False
)
is
True
:
if
attrs
.
get
(
'use_flatten'
,
False
)
is
True
:
inputs
[
0
]
=
_sym
.
flatten
(
inputs
[
0
])
inputs
[
0
]
=
_sym
.
flatten
(
inputs
[
0
])
return
_darknet_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
),
None
return
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
),
None
def
_darknet_route
(
inputs
,
attrs
):
def
_darknet_route
(
inputs
,
attrs
):
"""Process the route operation, which is equivalent to concat."""
"""Process the route operation, which is equivalent to concat."""
op_name
=
'concatenate'
op_name
=
'concatenate'
new_attrs
=
{
'axis'
:
attrs
.
get
(
'dim'
,
1
)}
new_attrs
=
{
'axis'
:
attrs
.
get
(
'dim'
,
1
)}
return
_darknet_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
),
None
return
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
),
None
def
_darknet_reorg
(
inputs
,
attrs
):
def
_darknet_reorg
(
inputs
,
attrs
):
"""Process the reorg operation."""
"""Process the reorg operation."""
op_name
,
new_attrs
=
'yolo_reorg'
,
{}
op_name
,
new_attrs
=
'yolo_reorg'
,
{}
if
'stride'
in
attrs
:
if
'stride'
in
attrs
:
new_attrs
=
{
'stride'
:
attrs
.
get
(
'stride'
,
1
)}
new_attrs
=
{
'stride'
:
attrs
.
get
(
'stride'
,
1
)}
return
_darknet_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
),
None
return
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
),
None
def
_darknet_region
(
inputs
,
attrs
):
def
_darknet_region
(
inputs
,
attrs
):
"""Process the region operation."""
"""Process the region operation."""
...
@@ -344,7 +319,7 @@ def _darknet_yolo(inputs, attrs):
...
@@ -344,7 +319,7 @@ def _darknet_yolo(inputs, attrs):
def
_darknet_activations
(
inputs
,
attrs
):
def
_darknet_activations
(
inputs
,
attrs
):
"""Process the activation function."""
"""Process the activation function."""
act
=
_darknet_required_attr
(
attrs
,
'activation
'
)
act
=
required_attr
(
attrs
,
'activation'
,
'activations
'
)
if
ACTIVATION
.
LOGISTIC
==
act
:
if
ACTIVATION
.
LOGISTIC
==
act
:
act_type
=
'sigmoid'
act_type
=
'sigmoid'
elif
ACTIVATION
.
RELU
==
act
:
elif
ACTIVATION
.
RELU
==
act
:
...
@@ -358,22 +333,24 @@ def _darknet_activations(inputs, attrs):
...
@@ -358,22 +333,24 @@ def _darknet_activations(inputs, attrs):
elif
ACTIVATION
.
ELU
==
act
:
elif
ACTIVATION
.
ELU
==
act
:
act_type
=
'elu'
act_type
=
'elu'
else
:
else
:
_darknet_raise_not_supported
(
'act: '
+
act
)
raise
tvm
.
error
.
OpNotImplemented
(
'Operator act: {} is not supported in framework Darknet.'
.
format
(
act
))
if
act_type
in
[
'relu'
,
'tanh'
]:
if
act_type
in
[
'relu'
,
'tanh'
]:
op_name
,
new_attrs
=
act_type
,
{}
op_name
,
new_attrs
=
act_type
,
{}
sym
=
_darknet_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
sym
=
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
elif
act_type
in
[
'leaky_relu'
]:
elif
act_type
in
[
'leaky_relu'
]:
op_name
,
new_attrs
=
act_type
,
{}
op_name
,
new_attrs
=
act_type
,
{}
new_attrs
[
'alpha'
]
=
attrs
.
get
(
'slope'
,
0.1
)
new_attrs
[
'alpha'
]
=
attrs
.
get
(
'slope'
,
0.1
)
sym
=
_darknet_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
sym
=
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
elif
act_type
in
[
'elu'
]:
elif
act_type
in
[
'elu'
]:
sym
=
-
1
*
_sym
.
relu
(
1
-
_sym
.
exp
(
*
inputs
))
+
_sym
.
relu
(
*
inputs
)
sym
=
-
1
*
_sym
.
relu
(
1
-
_sym
.
exp
(
*
inputs
))
+
_sym
.
relu
(
*
inputs
)
elif
act_type
in
[
'sigmoid'
]:
elif
act_type
in
[
'sigmoid'
]:
op_name
,
new_attrs
=
act_type
,
{}
op_name
,
new_attrs
=
act_type
,
{}
sym
=
_darknet_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
sym
=
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
else
:
else
:
_darknet_raise_not_supported
(
'act_type: '
+
act_type
)
raise
tvm
.
error
.
OpNotImplemented
(
'Operator act: {} is not supported in framework Darknet.'
.
format
(
act
))
return
sym
,
None
return
sym
,
None
def
_darknet_op_not_support
(
inputs
,
attrs
):
def
_darknet_op_not_support
(
inputs
,
attrs
):
...
@@ -436,7 +413,8 @@ def _darknet_convert_symbol(op_name, inputs, attrs):
...
@@ -436,7 +413,8 @@ def _darknet_convert_symbol(op_name, inputs, attrs):
if
op_name
in
_DARKNET_CONVERT_MAP
:
if
op_name
in
_DARKNET_CONVERT_MAP
:
sym
,
out_name
=
_DARKNET_CONVERT_MAP
[
op_name
](
inputs
,
attrs
)
sym
,
out_name
=
_DARKNET_CONVERT_MAP
[
op_name
](
inputs
,
attrs
)
else
:
else
:
_darknet_raise_not_supported
(
'Operator type '
+
str
(
op_name
))
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} is not supported in frontend Darknet.'
.
format
(
op_name
))
if
out_name
is
None
:
if
out_name
is
None
:
out_name
=
sym
.
list_output_names
()[
0
]
.
replace
(
'_output'
,
''
)
out_name
=
sym
.
list_output_names
()[
0
]
.
replace
(
'_output'
,
''
)
return
out_name
,
sym
return
out_name
,
sym
...
@@ -482,8 +460,10 @@ class GraphProto(object):
...
@@ -482,8 +460,10 @@ class GraphProto(object):
if
layer
.
nweights
==
0
:
if
layer
.
nweights
==
0
:
return
return
if
(
layer
.
n
*
layer
.
c
*
layer
.
size
*
layer
.
size
)
!=
layer
.
nweights
:
if
layer
.
n
*
layer
.
c
*
layer
.
size
*
layer
.
size
!=
layer
.
nweights
:
raise
RuntimeError
(
"layer weights size not matching with n c h w"
)
msg
=
'nweights ({}) != n * c * h * w ({}) in operator {}'
msg
=
msg
.
format
(
layer
.
nweights
,
layer
.
n
*
layer
.
c
*
layer
.
size
**
2
,
opname
)
raise
tvm
.
error
.
OpAttributeInvalid
(
msg
)
shape
=
(
layer
.
n
,
layer
.
c
,
layer
.
size
,
layer
.
size
)
shape
=
(
layer
.
n
,
layer
.
c
,
layer
.
size
,
layer
.
size
)
weights
=
self
.
_read_memory_buffer
(
shape
,
layer
.
weights
)
weights
=
self
.
_read_memory_buffer
(
shape
,
layer
.
weights
)
...
@@ -663,8 +643,8 @@ class GraphProto(object):
...
@@ -663,8 +643,8 @@ class GraphProto(object):
pass
pass
else
:
else
:
err
=
"Darknet layer type {} is not supported in nnvm."
.
format
(
layer
.
type
)
raise
tvm
.
error
.
OpNotImplemented
(
raise
NotImplementedError
(
err
)
'Operator {} is not supported in frontend Darknet.'
.
format
(
layer
.
type
)
)
return
attr
return
attr
...
@@ -761,7 +741,7 @@ class GraphProto(object):
...
@@ -761,7 +741,7 @@ class GraphProto(object):
op_name
,
new_attrs
=
'elemwise_add'
,
{}
op_name
,
new_attrs
=
'elemwise_add'
,
{}
new_inputs
=
_as_list
([
sym
,
state
])
new_inputs
=
_as_list
([
sym
,
state
])
state
=
_darknet_
get_nnvm_op
(
op_name
)(
*
new_inputs
,
**
new_attrs
)
state
=
get_nnvm_op
(
op_name
)(
*
new_inputs
,
**
new_attrs
)
self
.
_outs
.
append
(
state
)
self
.
_outs
.
append
(
state
)
output_layer
=
layer
.
output_layer
output_layer
=
layer
.
output_layer
...
@@ -786,7 +766,7 @@ class GraphProto(object):
...
@@ -786,7 +766,7 @@ class GraphProto(object):
op_name
,
new_attrs
=
'elemwise_add'
,
{}
op_name
,
new_attrs
=
'elemwise_add'
,
{}
new_inputs
=
_as_list
([
sym
,
state
])
new_inputs
=
_as_list
([
sym
,
state
])
state
=
_darknet_
get_nnvm_op
(
op_name
)(
*
new_inputs
,
**
new_attrs
)
state
=
get_nnvm_op
(
op_name
)(
*
new_inputs
,
**
new_attrs
)
self
.
_outs
.
append
(
state
)
self
.
_outs
.
append
(
state
)
output_layer
=
layer
.
output_layer
output_layer
=
layer
.
output_layer
...
@@ -797,7 +777,8 @@ class GraphProto(object):
...
@@ -797,7 +777,8 @@ class GraphProto(object):
elif
LAYERTYPE
.
LSTM
==
layer
.
type
:
elif
LAYERTYPE
.
LSTM
==
layer
.
type
:
if
layer
.
steps
>
1
:
if
layer
.
steps
>
1
:
raise
NotImplementedError
(
"Currently support only single step GRU"
)
raise
tvm
.
error
.
OpAttributeInvalid
(
'Number of steps {} of RNN is not valid.'
.
format
(
layer
.
steps
))
op_name_add
=
'elemwise_add'
op_name_add
=
'elemwise_add'
op_name_mul
=
'elemwise_mul'
op_name_mul
=
'elemwise_mul'
...
@@ -819,16 +800,16 @@ class GraphProto(object):
...
@@ -819,16 +800,16 @@ class GraphProto(object):
sym_uo
=
self
.
_get_darknet_rnn_attrs
(
layer
.
uo
,
input_sym
)
sym_uo
=
self
.
_get_darknet_rnn_attrs
(
layer
.
uo
,
input_sym
)
new_inputs
=
_as_list
([
sym_wf
,
sym_uf
])
new_inputs
=
_as_list
([
sym_wf
,
sym_uf
])
add_f
=
_darknet_
get_nnvm_op
(
op_name_add
)(
*
new_inputs
,
**
attrs
)
add_f
=
get_nnvm_op
(
op_name_add
)(
*
new_inputs
,
**
attrs
)
new_inputs
=
_as_list
([
sym_wi
,
sym_ui
])
new_inputs
=
_as_list
([
sym_wi
,
sym_ui
])
add_i
=
_darknet_
get_nnvm_op
(
op_name_add
)(
*
new_inputs
,
**
attrs
)
add_i
=
get_nnvm_op
(
op_name_add
)(
*
new_inputs
,
**
attrs
)
new_inputs
=
_as_list
([
sym_wg
,
sym_ug
])
new_inputs
=
_as_list
([
sym_wg
,
sym_ug
])
add_g
=
_darknet_
get_nnvm_op
(
op_name_add
)(
*
new_inputs
,
**
attrs
)
add_g
=
get_nnvm_op
(
op_name_add
)(
*
new_inputs
,
**
attrs
)
new_inputs
=
_as_list
([
sym_wo
,
sym_uo
])
new_inputs
=
_as_list
([
sym_wo
,
sym_uo
])
add_o
=
_darknet_
get_nnvm_op
(
op_name_add
)(
*
new_inputs
,
**
attrs
)
add_o
=
get_nnvm_op
(
op_name_add
)(
*
new_inputs
,
**
attrs
)
act_attr
[
'activation'
]
=
ACTIVATION
.
LOGISTIC
act_attr
[
'activation'
]
=
ACTIVATION
.
LOGISTIC
act_f
,
_
=
_darknet_activations
(
_as_list
(
add_f
),
act_attr
)
act_f
,
_
=
_darknet_activations
(
_as_list
(
add_f
),
act_attr
)
...
@@ -843,19 +824,19 @@ class GraphProto(object):
...
@@ -843,19 +824,19 @@ class GraphProto(object):
act_o
,
_
=
_darknet_activations
(
_as_list
(
add_o
),
act_attr
)
act_o
,
_
=
_darknet_activations
(
_as_list
(
add_o
),
act_attr
)
new_inputs
=
_as_list
([
act_i
,
act_g
])
new_inputs
=
_as_list
([
act_i
,
act_g
])
mul_t
=
_darknet_
get_nnvm_op
(
op_name_mul
)(
*
new_inputs
,
**
attrs
)
mul_t
=
get_nnvm_op
(
op_name_mul
)(
*
new_inputs
,
**
attrs
)
new_inputs
=
_as_list
([
act_f
,
c_state
])
new_inputs
=
_as_list
([
act_f
,
c_state
])
c_state
=
_darknet_
get_nnvm_op
(
op_name_mul
)(
*
new_inputs
,
**
attrs
)
c_state
=
get_nnvm_op
(
op_name_mul
)(
*
new_inputs
,
**
attrs
)
new_inputs
=
_as_list
([
mul_t
,
c_state
])
new_inputs
=
_as_list
([
mul_t
,
c_state
])
c_state
=
_darknet_
get_nnvm_op
(
op_name_add
)(
*
new_inputs
,
**
attrs
)
c_state
=
get_nnvm_op
(
op_name_add
)(
*
new_inputs
,
**
attrs
)
act_attr
[
'activation'
]
=
ACTIVATION
.
TANH
act_attr
[
'activation'
]
=
ACTIVATION
.
TANH
h_state
,
_
=
_darknet_activations
(
_as_list
(
c_state
),
act_attr
)
h_state
,
_
=
_darknet_activations
(
_as_list
(
c_state
),
act_attr
)
new_inputs
=
_as_list
([
act_o
,
h_state
])
new_inputs
=
_as_list
([
act_o
,
h_state
])
h_state
=
_darknet_
get_nnvm_op
(
op_name_mul
)(
*
new_inputs
,
**
attrs
)
h_state
=
get_nnvm_op
(
op_name_mul
)(
*
new_inputs
,
**
attrs
)
self
.
_outs
=
self
.
_outs
+
[
c_state
,
h_state
]
self
.
_outs
=
self
.
_outs
+
[
c_state
,
h_state
]
sym
=
h_state
sym
=
h_state
self
.
_sym_array
[
layer_num
]
=
sym
self
.
_sym_array
[
layer_num
]
=
sym
...
@@ -863,7 +844,8 @@ class GraphProto(object):
...
@@ -863,7 +844,8 @@ class GraphProto(object):
elif
LAYERTYPE
.
GRU
==
layer
.
type
:
elif
LAYERTYPE
.
GRU
==
layer
.
type
:
if
layer
.
steps
>
1
:
if
layer
.
steps
>
1
:
raise
NotImplementedError
(
"Currently support only single step GRU"
)
raise
tvm
.
error
.
OpAttributeInvalid
(
'Number of steps {} is not valid in RNN.'
.
format
(
layer
.
steps
))
op_name_add
=
'elemwise_add'
op_name_add
=
'elemwise_add'
op_name_mul
=
'elemwise_mul'
op_name_mul
=
'elemwise_mul'
...
@@ -881,10 +863,10 @@ class GraphProto(object):
...
@@ -881,10 +863,10 @@ class GraphProto(object):
sym_uh
=
self
.
_get_darknet_rnn_attrs
(
layer
.
uh
,
input_sym
)
sym_uh
=
self
.
_get_darknet_rnn_attrs
(
layer
.
uh
,
input_sym
)
new_inputs
=
_as_list
([
sym_uz
,
sym_wz
])
new_inputs
=
_as_list
([
sym_uz
,
sym_wz
])
add_z
=
_darknet_
get_nnvm_op
(
op_name_add
)(
*
new_inputs
,
**
attrs
)
add_z
=
get_nnvm_op
(
op_name_add
)(
*
new_inputs
,
**
attrs
)
new_inputs
=
_as_list
([
sym_ur
,
sym_wr
])
new_inputs
=
_as_list
([
sym_ur
,
sym_wr
])
add_r
=
_darknet_
get_nnvm_op
(
op_name_add
)(
*
new_inputs
,
**
attrs
)
add_r
=
get_nnvm_op
(
op_name_add
)(
*
new_inputs
,
**
attrs
)
act_attr
[
'activation'
]
=
ACTIVATION
.
LOGISTIC
act_attr
[
'activation'
]
=
ACTIVATION
.
LOGISTIC
act_z
,
_
=
_darknet_activations
(
_as_list
(
add_z
),
act_attr
)
act_z
,
_
=
_darknet_activations
(
_as_list
(
add_z
),
act_attr
)
...
@@ -893,12 +875,12 @@ class GraphProto(object):
...
@@ -893,12 +875,12 @@ class GraphProto(object):
act_r
,
_
=
_darknet_activations
(
_as_list
(
add_r
),
act_attr
)
act_r
,
_
=
_darknet_activations
(
_as_list
(
add_r
),
act_attr
)
new_inputs
=
_as_list
([
act_r
,
state
])
new_inputs
=
_as_list
([
act_r
,
state
])
forgot
=
_darknet_
get_nnvm_op
(
op_name_mul
)(
*
new_inputs
,
**
attrs
)
forgot
=
get_nnvm_op
(
op_name_mul
)(
*
new_inputs
,
**
attrs
)
sym_wh
=
self
.
_get_darknet_rnn_attrs
(
layer
.
wh
,
forgot
)
sym_wh
=
self
.
_get_darknet_rnn_attrs
(
layer
.
wh
,
forgot
)
new_inputs
=
_as_list
([
sym_uh
,
sym_wh
])
new_inputs
=
_as_list
([
sym_uh
,
sym_wh
])
h_state
=
_darknet_
get_nnvm_op
(
op_name_add
)(
*
new_inputs
,
**
attrs
)
h_state
=
get_nnvm_op
(
op_name_add
)(
*
new_inputs
,
**
attrs
)
if
layer
.
tanh
==
1
:
if
layer
.
tanh
==
1
:
act_attr
[
'activation'
]
=
ACTIVATION
.
TANH
act_attr
[
'activation'
]
=
ACTIVATION
.
TANH
...
...
nnvm/python/nnvm/frontend/keras.py
View file @
53511bf1
...
@@ -74,7 +74,8 @@ def _convert_activation(insym, keras_layer, _):
...
@@ -74,7 +74,8 @@ def _convert_activation(insym, keras_layer, _):
if
act_type
==
'hard_sigmoid'
:
if
act_type
==
'hard_sigmoid'
:
transformX
=
(
0.2
*
insym
)
+
0.5
transformX
=
(
0.2
*
insym
)
+
0.5
return
_sym
.
clip
(
transformX
,
a_min
=
0
,
a_max
=
1
)
return
_sym
.
clip
(
transformX
,
a_min
=
0
,
a_max
=
1
)
raise
TypeError
(
"Unsupported activation type : {}"
.
format
(
act_type
))
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} is not supported in frontend Keras.'
.
format
(
act_type
))
def
_convert_advanced_activation
(
insym
,
keras_layer
,
symtab
):
def
_convert_advanced_activation
(
insym
,
keras_layer
,
symtab
):
...
@@ -100,7 +101,8 @@ def _convert_advanced_activation(insym, keras_layer, symtab):
...
@@ -100,7 +101,8 @@ def _convert_advanced_activation(insym, keras_layer, symtab):
theta
=
keras_layer
.
theta
if
hasattr
(
keras_layer
,
"theta"
)
else
1.0
theta
=
keras_layer
.
theta
if
hasattr
(
keras_layer
,
"theta"
)
else
1.0
theta_tensor
=
_sym
.
full_like
(
insym
[
0
],
fill_value
=
float
(
theta
))
theta_tensor
=
_sym
.
full_like
(
insym
[
0
],
fill_value
=
float
(
theta
))
return
_sym
.
elemwise_mul
(
insym
[
0
],
_sym
.
greater
(
insym
[
0
],
theta_tensor
,
out_type
=
"float32"
))
return
_sym
.
elemwise_mul
(
insym
[
0
],
_sym
.
greater
(
insym
[
0
],
theta_tensor
,
out_type
=
"float32"
))
raise
TypeError
(
"Unsupported advanced activation type : {}"
.
format
(
act_type
))
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} is not supported in frontend Keras.'
.
format
(
act_type
))
def
_convert_merge
(
insym
,
keras_layer
,
_
):
def
_convert_merge
(
insym
,
keras_layer
,
_
):
...
@@ -113,12 +115,9 @@ def _convert_merge(insym, keras_layer, _):
...
@@ -113,12 +115,9 @@ def _convert_merge(insym, keras_layer, _):
ret
=
_sym
.
elemwise_sub
(
ret
,
insym
[
i
])
ret
=
_sym
.
elemwise_sub
(
ret
,
insym
[
i
])
elif
merge_type
==
'Multiply'
:
elif
merge_type
==
'Multiply'
:
ret
=
_sym
.
elemwise_mul
(
ret
,
insym
[
i
])
ret
=
_sym
.
elemwise_mul
(
ret
,
insym
[
i
])
elif
merge_type
==
'Average'
:
raise
NotImplementedError
(
'Average merge not implemented'
)
elif
merge_type
==
'Maximum'
:
raise
NotImplementedError
(
'Maximum merge not implemented'
)
else
:
else
:
raise
TypeError
(
"Unsupported merge type : {}"
.
format
(
merge_type
))
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} Merge is not supported in frontend Keras.'
.
format
(
merge_type
))
return
ret
return
ret
...
@@ -135,7 +134,8 @@ def _convert_dense(insym, keras_layer, symtab):
...
@@ -135,7 +134,8 @@ def _convert_dense(insym, keras_layer, symtab):
if
input_dim
>
2
:
if
input_dim
>
2
:
input_shape
=
tuple
(
dim
if
dim
else
1
for
dim
in
_as_list
(
input_shape
)[
0
])
input_shape
=
tuple
(
dim
if
dim
else
1
for
dim
in
_as_list
(
input_shape
)[
0
])
if
input_dim
!=
3
or
input_shape
[
0
]
!=
1
or
input_shape
[
1
]
!=
1
:
if
input_dim
!=
3
or
input_shape
[
0
]
!=
1
or
input_shape
[
1
]
!=
1
:
raise
ValueError
(
"Cannot flatten the inputs with shape."
,
input_shape
,
" for dense."
)
msg
=
'Value {} in attribute "input_shape" of operator Dense is not valid.'
raise
tvm
.
error
.
OpAttributeInvalid
(
msg
.
format
(
input_shape
))
insym
=
_sym
.
squeeze
(
insym
,
axis
=
0
)
insym
=
_sym
.
squeeze
(
insym
,
axis
=
0
)
out
=
_sym
.
dense
(
data
=
insym
,
**
params
)
out
=
_sym
.
dense
(
data
=
insym
,
**
params
)
# defuse activation
# defuse activation
...
@@ -199,7 +199,8 @@ def _convert_convolution(insym, keras_layer, symtab):
...
@@ -199,7 +199,8 @@ def _convert_convolution(insym, keras_layer, symtab):
else
:
else
:
insym
=
_sym
.
pad
(
data
=
insym
,
pad_width
=
((
0
,
0
),
(
0
,
0
),
(
pad_t
,
pad_b
),
(
pad_l
,
pad_r
)))
insym
=
_sym
.
pad
(
data
=
insym
,
pad_width
=
((
0
,
0
),
(
0
,
0
),
(
pad_t
,
pad_b
),
(
pad_l
,
pad_r
)))
else
:
else
:
raise
TypeError
(
"Unsupported padding type : {}"
.
format
(
keras_layer
.
padding
))
msg
=
'Value {} in attribute "padding" of operator Convolution is not valid.'
raise
tvm
.
error
.
OpAttributeInvalid
(
msg
.
format
(
keras_layer
.
padding
))
if
is_deconv
:
if
is_deconv
:
out
=
_sym
.
conv2d_transpose
(
data
=
insym
,
**
params
)
out
=
_sym
.
conv2d_transpose
(
data
=
insym
,
**
params
)
else
:
else
:
...
@@ -240,7 +241,8 @@ def _convert_separable_convolution(insym, keras_layer, symtab):
...
@@ -240,7 +241,8 @@ def _convert_separable_convolution(insym, keras_layer, symtab):
insym
=
_sym
.
pad
(
data
=
insym
,
pad_width
=
(
insym
=
_sym
.
pad
(
data
=
insym
,
pad_width
=
(
(
0
,
0
),
(
0
,
0
),
(
pad_t
,
pad_b
),
(
pad_l
,
pad_r
)))
(
0
,
0
),
(
0
,
0
),
(
pad_t
,
pad_b
),
(
pad_l
,
pad_r
)))
else
:
else
:
raise
TypeError
(
"Unsupported padding type : {}"
.
format
(
keras_layer
.
padding
))
msg
=
'Value {} in attribute "padding" of operator Separable Convolution is not valid.'
raise
tvm
.
error
.
OpAttributeInvalid
(
msg
.
format
(
keras_layer
.
padding
))
depthconv
=
_sym
.
conv2d
(
data
=
insym
,
**
params0
)
depthconv
=
_sym
.
conv2d
(
data
=
insym
,
**
params0
)
# pointwise conv
# pointwise conv
weight1
=
weightList
[
1
]
.
transpose
([
3
,
2
,
0
,
1
])
weight1
=
weightList
[
1
]
.
transpose
([
3
,
2
,
0
,
1
])
...
@@ -294,13 +296,15 @@ def _convert_pooling(insym, keras_layer, symtab):
...
@@ -294,13 +296,15 @@ def _convert_pooling(insym, keras_layer, symtab):
pad_l
,
pad_r
=
_get_pad_pair
(
in_w
,
pool_w
,
stride_w
)
pad_l
,
pad_r
=
_get_pad_pair
(
in_w
,
pool_w
,
stride_w
)
params
[
'padding'
]
=
[
pad_t
,
pad_l
,
pad_b
,
pad_r
]
params
[
'padding'
]
=
[
pad_t
,
pad_l
,
pad_b
,
pad_r
]
else
:
else
:
raise
TypeError
(
"Unsupported padding type : {}"
.
format
(
keras_layer
.
padding
))
msg
=
'Value {} in attribute "padding" of operator Pooling is not valid.'
raise
tvm
.
error
.
OpAttributeInvalid
(
msg
.
format
(
keras_layer
.
padding
))
if
pool_type
==
'MaxPooling2D'
:
if
pool_type
==
'MaxPooling2D'
:
return
_sym
.
max_pool2d
(
insym
,
**
params
)
return
_sym
.
max_pool2d
(
insym
,
**
params
)
if
pool_type
==
'AveragePooling2D'
:
if
pool_type
==
'AveragePooling2D'
:
# TODO: in keras, padded zeros are not calculated
# TODO: in keras, padded zeros are not calculated
return
_sym
.
avg_pool2d
(
insym
,
**
params
)
return
_sym
.
avg_pool2d
(
insym
,
**
params
)
raise
TypeError
(
"Unsupported pooling type : {}"
.
format
(
keras_layer
))
msg
=
'Value {} in attribute "padding" of operator Pooling is not valid.'
raise
tvm
.
error
.
OpAttributeInvalid
(
msg
.
format
(
keras_layer
.
padding
))
def
_convert_upsample
(
insym
,
keras_layer
,
_
):
def
_convert_upsample
(
insym
,
keras_layer
,
_
):
...
@@ -312,30 +316,30 @@ def _convert_upsample(insym, keras_layer, _):
...
@@ -312,30 +316,30 @@ def _convert_upsample(insym, keras_layer, _):
elif
upsample_type
==
"UpSampling2D"
:
elif
upsample_type
==
"UpSampling2D"
:
h
,
w
=
keras_layer
.
size
h
,
w
=
keras_layer
.
size
if
h
!=
w
:
if
h
!=
w
:
raise
TypeError
(
"Unsupported upsampling type with different axes size : {}"
raise
tvm
.
error
.
OpAttributeInvalid
(
.
format
(
keras_layer
.
size
))
'Upsample height ({}) must equal width ({})'
.
format
(
h
,
w
))
params
=
{
'scale'
:
h
}
params
=
{
'scale'
:
h
}
elif
upsample_type
==
"UpSampling3D"
:
elif
upsample_type
==
"UpSampling3D"
:
h
,
w
,
d
=
keras_layer
.
size
h
,
w
,
d
=
keras_layer
.
size
if
h
!=
w
or
w
!=
d
:
if
h
!=
w
or
w
!=
d
:
raise
TypeError
(
"Unsupported upsampling type with different axes size : {}"
raise
tvm
.
error
.
OpAttributeInvalid
(
.
format
(
keras_layer
.
size
))
'Upsample height ({}), width ({}), and depth ({}) must be equal.'
.
format
(
h
,
w
,
d
))
params
=
{
'scale'
:
h
}
params
=
{
'scale'
:
h
}
else
:
else
:
raise
TypeError
(
"Unsupported upsampling type : {}"
.
format
(
upsample_type
))
msg
=
'Operator {} is not supported in frontend Keras.'
raise
tvm
.
error
.
OpNotImplemented
(
msg
.
format
(
upsample_type
))
return
_sym
.
upsampling
(
insym
,
**
params
)
return
_sym
.
upsampling
(
insym
,
**
params
)
def
_convert_cropping
(
insym
,
keras_layer
,
_
):
def
_convert_cropping
(
insym
,
keras_layer
,
_
):
_check_data_format
(
keras_layer
)
_check_data_format
(
keras_layer
)
crop_type
=
type
(
keras_layer
)
.
__name__
crop_type
=
type
(
keras_layer
)
.
__name__
if
crop_type
==
"Cropping1D"
:
if
crop_type
==
"Cropping2D"
:
raise
NotImplementedError
(
"Cropping1D not implemented"
)
elif
crop_type
==
"Cropping2D"
:
(
_
,
in_h
,
in_w
,
_
)
=
keras_layer
.
input_shape
(
_
,
in_h
,
in_w
,
_
)
=
keras_layer
.
input_shape
((
crop_t
,
crop_b
),
(
crop_l
,
crop_r
))
=
keras_layer
.
cropping
((
crop_t
,
crop_b
),
(
crop_l
,
crop_r
))
=
keras_layer
.
cropping
else
:
else
:
raise
TypeError
(
"Unrecognized cropping type : {}"
.
format
(
crop_type
))
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} is not supported in frontend Keras.'
.
format
(
crop_type
))
int32_max
=
np
.
iinfo
(
np
.
int32
)
.
max
int32_max
=
np
.
iinfo
(
np
.
int32
)
.
max
return
_sym
.
strided_slice
(
insym
,
begin
=
[
0
,
0
,
crop_t
,
crop_l
],
return
_sym
.
strided_slice
(
insym
,
begin
=
[
0
,
0
,
crop_t
,
crop_l
],
end
=
[
int32_max
,
int32_max
,
in_h
-
crop_b
,
in_w
-
crop_r
])
end
=
[
int32_max
,
int32_max
,
in_h
-
crop_b
,
in_w
-
crop_r
])
...
@@ -379,13 +383,13 @@ def _convert_padding(insym, keras_layer, _):
...
@@ -379,13 +383,13 @@ def _convert_padding(insym, keras_layer, _):
top
,
bottom
=
padding
[
0
]
top
,
bottom
=
padding
[
0
]
left
,
right
=
padding
[
1
]
left
,
right
=
padding
[
1
]
else
:
else
:
raise
ValueError
(
"Unrecognized padding option: {}"
.
format
(
str
(
padding
)))
msg
=
'Value {} in attribute "padding" of operator {} is not valid.'
raise
tvm
.
error
.
OpAttributeInvalid
(
msg
.
format
(
str
(
padding
),
padding_type
))
else
:
else
:
raise
ValueError
(
"Unrecognized padding option: {}"
.
format
(
str
(
padding
)))
msg
=
'Value {} in attribute "padding" of operator {} is not valid.'
elif
padding_type
==
'ZeroPadding1D'
:
raise
tvm
.
error
.
OpAttributeInvalid
(
msg
.
format
(
str
(
padding
),
padding_type
))
raise
NotImplementedError
(
"ZeroPadding1D not implemented"
)
else
:
else
:
raise
ValueError
(
"Unrecognized padding type: {}"
.
format
(
padding_type
)
)
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} is not supported in frontend Keras.'
)
return
_sym
.
pad
(
data
=
insym
,
pad_width
=
((
0
,
0
),
(
0
,
0
),
(
top
,
bottom
),
(
left
,
right
)))
return
_sym
.
pad
(
data
=
insym
,
pad_width
=
((
0
,
0
),
(
0
,
0
),
(
top
,
bottom
),
(
left
,
right
)))
...
@@ -592,8 +596,10 @@ _convert_map = {
...
@@ -592,8 +596,10 @@ _convert_map = {
def
_check_unsupported_layers
(
model
):
def
_check_unsupported_layers
(
model
):
for
layer
in
model
.
layers
:
for
layer
in
model
.
layers
:
if
type
(
layer
)
.
__name__
not
in
_convert_map
:
op_name
=
type
(
layer
)
.
__name__
raise
ValueError
(
"Keras layer {} not supported."
.
format
(
type
(
layer
)
.
__name__
))
if
op_name
not
in
_convert_map
:
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} is not supported in frontend Keras.'
.
format
(
op_name
))
def
_as_list
(
arr
):
def
_as_list
(
arr
):
"""Force being a list, ignore if already is."""
"""Force being a list, ignore if already is."""
...
@@ -618,9 +624,11 @@ def keras_op_to_nnvm(insym, keras_layer, outname, symtab):
...
@@ -618,9 +624,11 @@ def keras_op_to_nnvm(insym, keras_layer, outname, symtab):
symtab : nnvm.frontend.common.SymbolTable
symtab : nnvm.frontend.common.SymbolTable
The global symbol table to be updated
The global symbol table to be updated
"""
"""
if
type
(
keras_layer
)
.
__name__
not
in
_convert_map
:
op_name
=
type
(
keras_layer
)
.
__name__
raise
NotImplementedError
(
"{} is not supported"
.
format
((
type
(
keras_layer
)
.
__name__
)))
if
op_name
not
in
_convert_map
:
outs
=
_convert_map
[
type
(
keras_layer
)
.
__name__
](
insym
,
keras_layer
,
symtab
)
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} is not supported in frontend Keras.'
.
format
(
op_name
))
outs
=
_convert_map
[
op_name
](
insym
,
keras_layer
,
symtab
)
outs
=
_as_list
(
outs
)
outs
=
_as_list
(
outs
)
for
t_idx
,
out
in
enumerate
(
outs
):
for
t_idx
,
out
in
enumerate
(
outs
):
...
...
nnvm/python/nnvm/frontend/mxnet.py
View file @
53511bf1
...
@@ -4,51 +4,25 @@ from __future__ import absolute_import as _abs
...
@@ -4,51 +4,25 @@ from __future__ import absolute_import as _abs
import
json
import
json
import
tvm
import
tvm
from
..
import
symbol
as
_sym
from
..
import
symbol
as
_sym
from
.common
import
get_nnvm_op
,
required_attr
,
parse_tshape
,
parse_bool_str
__all__
=
[
'from_mxnet'
]
__all__
=
[
'from_mxnet'
]
def
_get_nnvm_op
(
op_name
):
op
=
getattr
(
_sym
,
op_name
)
if
not
op
:
raise
RuntimeError
(
"Unable to map op_name {} to nnvm.sym"
.
format
(
op_name
))
return
op
def
_required_attr
(
attr
,
key
):
assert
isinstance
(
attr
,
dict
)
if
key
not
in
attr
:
raise
AttributeError
(
"Required attribute {} not found."
.
format
(
key
))
return
attr
[
key
]
def
_raise_not_supported
(
attr
,
op
=
'nnvm'
):
err
=
"{} is not supported in {}."
.
format
(
attr
,
op
)
raise
NotImplementedError
(
err
)
def
_warn_not_used
(
attr
,
op
=
'nnvm'
):
import
warnings
err
=
"{} is ignored in {}."
.
format
(
attr
,
op
)
warnings
.
warn
(
err
)
def
_parse_tshape
(
tshape
):
"""Parse tshape in string."""
return
[
int
(
x
.
strip
())
for
x
in
tshape
.
strip
(
'()'
)
.
split
(
','
)]
def
_parse_bool_str
(
attr
,
key
,
default
=
'False'
):
"""Parse bool string to boolean."""
return
attr
.
get
(
key
,
default
)
.
strip
()
.
lower
()
in
[
'true'
,
'1'
,
't'
,
'y'
,
'yes'
]
def
_rename
(
new_name
):
def
_rename
(
new_name
):
def
impl
(
inputs
,
attrs
):
def
impl
(
inputs
,
attrs
):
return
_
get_nnvm_op
(
new_name
)(
*
inputs
,
**
attrs
)
return
get_nnvm_op
(
new_name
)(
*
inputs
,
**
attrs
)
return
impl
return
impl
def
_pooling
(
inputs
,
attrs
):
def
_pooling
(
inputs
,
attrs
):
kernel
=
_parse_tshape
(
_required_attr
(
attrs
,
'kernel
'
))
kernel
=
parse_tshape
(
required_attr
(
attrs
,
'kernel'
,
'pooling
'
))
if
len
(
kernel
)
!=
2
:
if
len
(
kernel
)
!=
2
:
_raise_not_supported
(
'non-2d kernel'
,
'pool_2d'
)
raise
tvm
.
error
.
OpAttributeUnimplemented
(
global_pool
=
'global'
if
_parse_bool_str
(
attrs
,
'global_pool'
)
else
''
'Non-2D kernels are not supported for Pool2D.'
)
pool_type
=
_required_attr
(
attrs
,
'pool_type'
)
global_pool
=
'global'
if
parse_bool_str
(
attrs
,
'global_pool'
)
else
''
pool_type
=
required_attr
(
attrs
,
'pool_type'
,
'pooling'
)
if
pool_type
not
in
[
'avg'
,
'max'
]:
if
pool_type
not
in
[
'avg'
,
'max'
]:
_raise_not_supported
(
'non-avg/max'
,
'pool2d'
)
raise
tvm
.
error
.
OpNotImplemented
(
'Only max and average pooling are supported in frontend MXNet.'
)
op_name
,
new_attrs
=
'_'
.
join
([
global_pool
,
pool_type
,
'pool2d'
])
.
strip
(
'_'
),
{}
op_name
,
new_attrs
=
'_'
.
join
([
global_pool
,
pool_type
,
'pool2d'
])
.
strip
(
'_'
),
{}
# new_attrs['layout'] = 'NCHW'
# new_attrs['layout'] = 'NCHW'
if
not
global_pool
:
if
not
global_pool
:
...
@@ -58,42 +32,47 @@ def _pooling(inputs, attrs):
...
@@ -58,42 +32,47 @@ def _pooling(inputs, attrs):
new_attrs
[
'ceil_mode'
]
=
(
attrs
.
get
(
'pooling_convention'
,
'valid'
)
==
'full'
)
new_attrs
[
'ceil_mode'
]
=
(
attrs
.
get
(
'pooling_convention'
,
'valid'
)
==
'full'
)
if
pool_type
==
'avg'
:
if
pool_type
==
'avg'
:
new_attrs
[
'count_include_pad'
]
=
attrs
.
get
(
'count_include_pad'
,
True
)
new_attrs
[
'count_include_pad'
]
=
attrs
.
get
(
'count_include_pad'
,
True
)
return
_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
return
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
def
_batch_norm
(
inputs
,
attrs
):
def
_batch_norm
(
inputs
,
attrs
):
if
_parse_bool_str
(
attrs
,
'output_mean_var'
):
if
parse_bool_str
(
attrs
,
'output_mean_var'
):
_raise_not_supported
(
'output_mean_var'
,
'batch_norm'
)
raise
tvm
.
error
.
OpAttributeUnimplemented
(
# if _parse_bool_str(attrs, 'fix_gamma'):
'Attribute "output_mean_var" is not supported in operator batch_norm.'
)
# if parse_bool_str(attrs, 'fix_gamma'):
# _warn_not_used('fix_gamma', 'batch_norm')
# _warn_not_used('fix_gamma', 'batch_norm')
if
_parse_bool_str
(
attrs
,
'use_global_stats'
):
if
parse_bool_str
(
attrs
,
'use_global_stats'
):
_warn_not_used
(
'use_global_stats'
,
'batch_norm'
)
from
warnings
import
warn
# if _parse_bool_str(attrs, 'momentum'):
warn
(
'Attribute "use_global_stats" is ignored in operator batch_norm.'
)
# if parse_bool_str(attrs, 'momentum'):
# _warn_not_used('momentum', 'batch_norm')
# _warn_not_used('momentum', 'batch_norm')
op_name
,
new_attrs
=
'batch_norm'
,
{}
op_name
,
new_attrs
=
'batch_norm'
,
{}
new_attrs
[
'axis'
]
=
attrs
.
get
(
'axis'
,
1
)
new_attrs
[
'axis'
]
=
attrs
.
get
(
'axis'
,
1
)
new_attrs
[
'epsilon'
]
=
attrs
.
get
(
'eps'
,
0.001
)
new_attrs
[
'epsilon'
]
=
attrs
.
get
(
'eps'
,
0.001
)
new_attrs
[
'center'
]
=
True
new_attrs
[
'center'
]
=
True
new_attrs
[
'scale'
]
=
not
_
parse_bool_str
(
attrs
,
'fix_gamma'
,
default
=
"False"
)
new_attrs
[
'scale'
]
=
not
parse_bool_str
(
attrs
,
'fix_gamma'
,
default
=
"False"
)
return
_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
return
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
def
_concat
(
inputs
,
attrs
):
def
_concat
(
inputs
,
attrs
):
op_name
=
'concatenate'
op_name
=
'concatenate'
new_attrs
=
{
'axis'
:
attrs
.
get
(
'dim'
,
1
)}
new_attrs
=
{
'axis'
:
attrs
.
get
(
'dim'
,
1
)}
return
_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
return
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
def
_conv2d
(
inputs
,
attrs
):
def
_conv2d
(
inputs
,
attrs
):
kernel
=
_parse_tshape
(
_required_attr
(
attrs
,
'kernel
'
))
kernel
=
parse_tshape
(
required_attr
(
attrs
,
'kernel'
,
'conv2d
'
))
if
len
(
kernel
)
!=
2
:
if
len
(
kernel
)
!=
2
:
_raise_not_supported
(
'non 2d kernel'
,
'conv2d'
)
raise
tvm
.
error
.
OpAttributeUnimplemented
(
'Non-2D kernels are not supported for operator Conv2D.'
)
layout
=
attrs
.
get
(
'layout'
,
'NCHW'
)
layout
=
attrs
.
get
(
'layout'
,
'NCHW'
)
if
layout
not
in
[
'NCHW'
,
'NHWC'
]:
if
layout
not
in
[
'NCHW'
,
'NHWC'
]:
_raise_not_supported
(
'layout: '
+
layout
,
'conv2d'
)
raise
tvm
.
error
.
OpAttributeUnimplemented
(
'Layout {} is not supported in operator Conv2D.'
.
format
(
layout
))
if
'kernel_layout'
in
attrs
:
if
'kernel_layout'
in
attrs
:
kernel_layout
=
attrs
[
'kernel_layout'
]
kernel_layout
=
attrs
[
'kernel_layout'
]
else
:
else
:
kernel_layout
=
'HWIO'
if
layout
==
'NHWC'
else
'OIHW'
kernel_layout
=
'HWIO'
if
layout
==
'NHWC'
else
'OIHW'
op_name
,
new_attrs
=
'conv2d'
,
{}
op_name
,
new_attrs
=
'conv2d'
,
{}
new_attrs
[
'channels'
]
=
_required_attr
(
attrs
,
'num_filter
'
)
new_attrs
[
'channels'
]
=
required_attr
(
attrs
,
'num_filter'
,
'conv2d
'
)
new_attrs
[
'kernel_size'
]
=
kernel
new_attrs
[
'kernel_size'
]
=
kernel
new_attrs
[
'strides'
]
=
attrs
.
get
(
'stride'
,
(
1
,
1
))
new_attrs
[
'strides'
]
=
attrs
.
get
(
'stride'
,
(
1
,
1
))
new_attrs
[
'padding'
]
=
attrs
.
get
(
'pad'
,
(
0
,
0
))
new_attrs
[
'padding'
]
=
attrs
.
get
(
'pad'
,
(
0
,
0
))
...
@@ -102,23 +81,26 @@ def _conv2d(inputs, attrs):
...
@@ -102,23 +81,26 @@ def _conv2d(inputs, attrs):
new_attrs
[
'layout'
]
=
layout
new_attrs
[
'layout'
]
=
layout
new_attrs
[
'kernel_layout'
]
=
kernel_layout
new_attrs
[
'kernel_layout'
]
=
kernel_layout
new_attrs
[
'use_bias'
]
=
attrs
.
get
(
'no_bias'
,
'False'
)
.
strip
()
==
'False'
new_attrs
[
'use_bias'
]
=
attrs
.
get
(
'no_bias'
,
'False'
)
.
strip
()
==
'False'
return
_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
return
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
def
_conv2d_transpose
(
inputs
,
attrs
):
def
_conv2d_transpose
(
inputs
,
attrs
):
if
'target_shape'
in
attrs
:
if
'target_shape'
in
attrs
:
_raise_not_supported
(
'target_shape'
,
'conv2d_transpose'
)
raise
tvm
.
error
.
OpAttributeUnimplemented
(
kernel
=
_parse_tshape
(
_required_attr
(
attrs
,
'kernel'
))
'Attribute "target_shape" is not supported in operator Conv2D-transpose.'
)
kernel
=
parse_tshape
(
required_attr
(
attrs
,
'kernel'
,
'conv2d_transpose'
))
if
len
(
kernel
)
!=
2
:
if
len
(
kernel
)
!=
2
:
_raise_not_supported
(
'non-2d kernel'
,
'conv2d_transpose'
)
raise
tvm
.
error
.
OpAttributeInvalid
(
'Non-2D kernels are not supported in Conv2D-transpose.'
)
layout
=
attrs
.
get
(
'layout'
,
'NCHW'
)
layout
=
attrs
.
get
(
'layout'
,
'NCHW'
)
if
layout
not
in
[
'NCHW'
,
'NHWC'
]:
if
layout
not
in
[
'NCHW'
,
'NHWC'
]:
_raise_not_supported
(
'layout: '
+
layout
,
'conv2d_transpose'
)
raise
tvm
.
error
.
OpAttributeUnimplemented
(
'Layout {} is not supported in operator Conv2D-transpose.'
)
if
'kernel_layout'
in
attrs
:
if
'kernel_layout'
in
attrs
:
kernel_layout
=
attrs
[
'kernel_layout'
]
kernel_layout
=
attrs
[
'kernel_layout'
]
else
:
else
:
kernel_layout
=
'HWIO'
if
layout
==
'NHWC'
else
'OIHW'
kernel_layout
=
'HWIO'
if
layout
==
'NHWC'
else
'OIHW'
op_name
,
new_attrs
=
'conv2d_transpose'
,
{}
op_name
,
new_attrs
=
'conv2d_transpose'
,
{}
new_attrs
[
'channels'
]
=
_required_attr
(
attrs
,
'num_filter
'
)
new_attrs
[
'channels'
]
=
required_attr
(
attrs
,
'num_filter'
,
'conv2d_transpose
'
)
new_attrs
[
'kernel_size'
]
=
kernel
new_attrs
[
'kernel_size'
]
=
kernel
new_attrs
[
'strides'
]
=
attrs
.
get
(
'stride'
,
(
1
,
1
))
new_attrs
[
'strides'
]
=
attrs
.
get
(
'stride'
,
(
1
,
1
))
new_attrs
[
'output_padding'
]
=
attrs
.
get
(
'adj'
,
(
0
,
0
))
new_attrs
[
'output_padding'
]
=
attrs
.
get
(
'adj'
,
(
0
,
0
))
...
@@ -127,67 +109,70 @@ def _conv2d_transpose(inputs, attrs):
...
@@ -127,67 +109,70 @@ def _conv2d_transpose(inputs, attrs):
new_attrs
[
'groups'
]
=
attrs
.
get
(
'num_group'
,
1
)
new_attrs
[
'groups'
]
=
attrs
.
get
(
'num_group'
,
1
)
new_attrs
[
'layout'
]
=
layout
new_attrs
[
'layout'
]
=
layout
new_attrs
[
'kernel_layout'
]
=
kernel_layout
new_attrs
[
'kernel_layout'
]
=
kernel_layout
new_attrs
[
'use_bias'
]
=
not
_
parse_bool_str
(
attrs
,
'no_bias'
)
new_attrs
[
'use_bias'
]
=
not
parse_bool_str
(
attrs
,
'no_bias'
)
return
_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
return
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
def
_dense
(
inputs
,
attrs
):
def
_dense
(
inputs
,
attrs
):
import
mxnet
as
mx
import
mxnet
as
mx
op_name
,
new_attrs
=
'dense'
,
{}
op_name
,
new_attrs
=
'dense'
,
{}
new_attrs
[
'units'
]
=
_required_attr
(
attrs
,
'num_hidden
'
)
new_attrs
[
'units'
]
=
required_attr
(
attrs
,
'num_hidden'
,
'dense
'
)
new_attrs
[
'use_bias'
]
=
not
_
parse_bool_str
(
attrs
,
'no_bias'
)
new_attrs
[
'use_bias'
]
=
not
parse_bool_str
(
attrs
,
'no_bias'
)
try
:
try
:
_
=
mx
.
sym
.
FullyConnected
(
mx
.
sym
.
var
(
'x'
),
num_hidden
=
1
,
flatten
=
True
)
_
=
mx
.
sym
.
FullyConnected
(
mx
.
sym
.
var
(
'x'
),
num_hidden
=
1
,
flatten
=
True
)
has_flatten
=
True
has_flatten
=
True
except
mx
.
base
.
MXNetError
:
except
mx
.
base
.
MXNetError
:
# no flatten attribute in old mxnet
# no flatten attribute in old mxnet
has_flatten
=
False
has_flatten
=
False
use_flatten
=
_
parse_bool_str
(
attrs
,
'flatten'
,
'True'
)
use_flatten
=
parse_bool_str
(
attrs
,
'flatten'
,
'True'
)
if
has_flatten
and
use_flatten
:
if
has_flatten
and
use_flatten
:
inputs
[
0
]
=
_sym
.
flatten
(
inputs
[
0
])
inputs
[
0
]
=
_sym
.
flatten
(
inputs
[
0
])
return
_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
return
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
def
_dropout
(
inputs
,
attrs
):
def
_dropout
(
inputs
,
attrs
):
op_name
,
new_attrs
=
'dropout'
,
{}
op_name
,
new_attrs
=
'dropout'
,
{}
new_attrs
[
'rate'
]
=
attrs
.
get
(
'p'
,
0.5
)
new_attrs
[
'rate'
]
=
attrs
.
get
(
'p'
,
0.5
)
return
_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
return
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
def
_leaky_relu
(
inputs
,
attrs
):
def
_leaky_relu
(
inputs
,
attrs
):
act_type
=
_required_attr
(
attrs
,
'act_type
'
)
act_type
=
required_attr
(
attrs
,
'act_type'
,
'leaky_relu
'
)
if
act_type
in
[
'leaky'
,
'prelu'
]:
if
act_type
in
[
'leaky'
,
'prelu'
]:
op_name
,
new_attrs
=
act_type
,
{}
op_name
,
new_attrs
=
act_type
,
{}
if
act_type
==
'leaky'
:
if
act_type
==
'leaky'
:
new_attrs
[
'alpha'
]
=
attrs
.
get
(
'slope'
,
0.25
)
new_attrs
[
'alpha'
]
=
attrs
.
get
(
'slope'
,
0.25
)
sym
=
_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
sym
=
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
elif
act_type
==
'elu'
:
elif
act_type
==
'elu'
:
slope
=
attrs
.
get
(
'slope'
,
0.25
)
slope
=
attrs
.
get
(
'slope'
,
0.25
)
sym
=
-
slope
*
_sym
.
relu
(
1
-
_sym
.
exp
(
*
inputs
))
+
_sym
.
relu
(
*
inputs
)
sym
=
-
slope
*
_sym
.
relu
(
1
-
_sym
.
exp
(
*
inputs
))
+
_sym
.
relu
(
*
inputs
)
elif
act_type
==
'rrelu'
:
elif
act_type
==
'rrelu'
:
lower_bound
=
float
(
_required_attr
(
attrs
,
'lower_bound
'
))
lower_bound
=
float
(
required_attr
(
attrs
,
'lower_bound'
,
'leaky_relu
'
))
upper_bound
=
float
(
_required_attr
(
attrs
,
'upper_bound
'
))
upper_bound
=
float
(
required_attr
(
attrs
,
'upper_bound'
,
'leaky_relu
'
))
slope
=
(
lower_bound
+
upper_bound
)
/
2.0
slope
=
(
lower_bound
+
upper_bound
)
/
2.0
op_name
,
new_attrs
=
'leaky_relu'
,
{
'alpha'
:
str
(
slope
)}
op_name
,
new_attrs
=
'leaky_relu'
,
{
'alpha'
:
str
(
slope
)}
sym
=
_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
sym
=
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
else
:
else
:
_raise_not_supported
(
'act_type: '
+
act_type
)
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} is not supported in frontend MXNet.'
.
format
(
act_type
))
return
sym
return
sym
def
_activations
(
inputs
,
attrs
):
def
_activations
(
inputs
,
attrs
):
act_type
=
_required_attr
(
attrs
,
'act_type
'
)
act_type
=
required_attr
(
attrs
,
'act_type'
,
'activations
'
)
if
act_type
in
[
'relu'
,
'sigmoid'
,
'tanh'
]:
if
act_type
in
[
'relu'
,
'sigmoid'
,
'tanh'
]:
op_name
,
new_attrs
=
act_type
,
{}
op_name
,
new_attrs
=
act_type
,
{}
sym
=
_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
sym
=
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
elif
act_type
==
'softrelu'
:
elif
act_type
==
'softrelu'
:
sym
=
_sym
.
log
((
1
+
_sym
.
exp
(
*
inputs
)))
sym
=
_sym
.
log
((
1
+
_sym
.
exp
(
*
inputs
)))
else
:
else
:
_raise_not_supported
(
'act_type: '
+
act_type
)
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} is not supported in frontend MXNet.'
.
format
(
act_type
))
return
sym
return
sym
def
_reshape
(
inputs
,
attrs
):
def
_reshape
(
inputs
,
attrs
):
if
_parse_bool_str
(
attrs
,
'reverse'
):
if
parse_bool_str
(
attrs
,
'reverse'
):
_raise_not_supported
(
'reverse'
,
'reshape'
)
raise
tvm
.
error
.
OpAttributeUnimplemented
(
'Attribute "reverse" is not supported in operator Reshape.'
)
op_name
,
new_attrs
=
'reshape'
,
{}
op_name
,
new_attrs
=
'reshape'
,
{}
new_attrs
[
'shape'
]
=
_required_attr
(
attrs
,
'
shape'
)
new_attrs
[
'shape'
]
=
required_attr
(
attrs
,
'shape'
,
're
shape'
)
return
_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
return
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
def
_slice
(
inputs
,
attrs
):
def
_slice
(
inputs
,
attrs
):
begin
=
attrs
.
get
(
'begin'
,
None
)
begin
=
attrs
.
get
(
'begin'
,
None
)
...
@@ -200,60 +185,60 @@ def _slice(inputs, attrs):
...
@@ -200,60 +185,60 @@ def _slice(inputs, attrs):
new_attrs
=
{
'begin'
:
begin
,
'end'
:
end
}
new_attrs
=
{
'begin'
:
begin
,
'end'
:
end
}
if
stride
is
not
None
:
if
stride
is
not
None
:
new_attrs
[
'stride'
]
=
stride
new_attrs
[
'stride'
]
=
stride
return
_
get_nnvm_op
(
'strided_slice'
)(
inputs
[
0
],
**
new_attrs
)
return
get_nnvm_op
(
'strided_slice'
)(
inputs
[
0
],
**
new_attrs
)
def
_split
(
inputs
,
attrs
):
def
_split
(
inputs
,
attrs
):
op_name
,
new_attrs
=
'split'
,
{}
op_name
,
new_attrs
=
'split'
,
{}
axis
=
attrs
.
get
(
'axis'
,
1
)
axis
=
attrs
.
get
(
'axis'
,
1
)
new_attrs
[
'indices_or_sections'
]
=
_required_attr
(
attrs
,
'num_outputs
'
)
new_attrs
[
'indices_or_sections'
]
=
required_attr
(
attrs
,
'num_outputs'
,
'split
'
)
new_attrs
[
'axis'
]
=
axis
new_attrs
[
'axis'
]
=
axis
outputs
=
_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
outputs
=
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
if
_
parse_bool_str
(
attrs
,
'squeeze_axis'
):
if
parse_bool_str
(
attrs
,
'squeeze_axis'
):
squeeze_attrs
=
{
'axis'
:
axis
}
squeeze_attrs
=
{
'axis'
:
axis
}
outputs
=
_sym
.
Group
([
_
get_nnvm_op
(
'squeeze'
)(
o
,
**
squeeze_attrs
)
for
o
in
outputs
])
outputs
=
_sym
.
Group
([
get_nnvm_op
(
'squeeze'
)(
o
,
**
squeeze_attrs
)
for
o
in
outputs
])
return
outputs
return
outputs
def
_softmax_activation
(
inputs
,
attrs
):
def
_softmax_activation
(
inputs
,
attrs
):
op_name
,
new_attrs
=
'softmax'
,
{}
op_name
,
new_attrs
=
'softmax'
,
{}
mode
=
attrs
.
get
(
'mode'
,
'instance'
)
mode
=
attrs
.
get
(
'mode'
,
'instance'
)
new_attrs
[
'axis'
]
=
0
if
mode
==
'instance'
else
1
new_attrs
[
'axis'
]
=
0
if
mode
==
'instance'
else
1
return
_
get_nnvm_op
(
op_name
)(
inputs
[
0
],
**
new_attrs
)
return
get_nnvm_op
(
op_name
)(
inputs
[
0
],
**
new_attrs
)
def
_softmax_output
(
inputs
,
attrs
):
def
_softmax_output
(
inputs
,
attrs
):
op_name
,
new_attrs
=
'softmax'
,
{}
op_name
,
new_attrs
=
'softmax'
,
{}
if
_
parse_bool_str
(
attrs
,
'multi_output'
):
if
parse_bool_str
(
attrs
,
'multi_output'
):
new_attrs
[
'axis'
]
=
1
new_attrs
[
'axis'
]
=
1
return
_
get_nnvm_op
(
op_name
)(
inputs
[
0
],
**
new_attrs
)
return
get_nnvm_op
(
op_name
)(
inputs
[
0
],
**
new_attrs
)
def
_upsampling
(
inputs
,
attrs
):
def
_upsampling
(
inputs
,
attrs
):
scale
=
attrs
.
get
(
'scale'
)
scale
=
attrs
.
get
(
'scale'
)
new_attrs
=
{
'scale'
:
int
(
scale
)}
new_attrs
=
{
'scale'
:
int
(
scale
)}
return
_
get_nnvm_op
(
'upsampling'
)(
inputs
[
0
],
**
new_attrs
)
return
get_nnvm_op
(
'upsampling'
)(
inputs
[
0
],
**
new_attrs
)
def
_clip
(
inputs
,
attrs
):
def
_clip
(
inputs
,
attrs
):
op_name
,
new_attrs
=
"clip"
,
{}
op_name
,
new_attrs
=
"clip"
,
{}
new_attrs
[
'a_min'
]
=
_required_attr
(
attrs
,
'a_min
'
)
new_attrs
[
'a_min'
]
=
required_attr
(
attrs
,
'a_min'
,
'clip
'
)
new_attrs
[
'a_max'
]
=
_required_attr
(
attrs
,
'a_max
'
)
new_attrs
[
'a_max'
]
=
required_attr
(
attrs
,
'a_max'
,
'clip
'
)
return
_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
return
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
def
_contrib_multibox_detection
(
inputs
,
attrs
):
def
_contrib_multibox_detection
(
inputs
,
attrs
):
clip
=
_
parse_bool_str
(
attrs
,
'clip'
,
default
=
'True'
)
clip
=
parse_bool_str
(
attrs
,
'clip'
,
default
=
'True'
)
threshold
=
attrs
.
get
(
'threshold'
)
or
0.01
threshold
=
attrs
.
get
(
'threshold'
)
or
0.01
nms_threshold
=
attrs
.
get
(
'nms_threshold'
)
or
0.5
nms_threshold
=
attrs
.
get
(
'nms_threshold'
)
or
0.5
force_suppress
=
_
parse_bool_str
(
attrs
,
'force_suppress'
,
default
=
'False'
)
force_suppress
=
parse_bool_str
(
attrs
,
'force_suppress'
,
default
=
'False'
)
variances
=
tuple
([
float
(
x
.
strip
())
for
x
in
attrs
.
get
(
'variances'
)
.
strip
(
'()'
)
.
split
(
','
)])
\
variances
=
tuple
([
float
(
x
.
strip
())
for
x
in
attrs
.
get
(
'variances'
)
.
strip
(
'()'
)
.
split
(
','
)])
\
if
attrs
.
get
(
'variances'
)
is
not
None
else
(
0.1
,
0.1
,
0.2
,
0.2
)
if
attrs
.
get
(
'variances'
)
is
not
None
else
(
0.1
,
0.1
,
0.2
,
0.2
)
nms_topk
=
attrs
.
get
(
'nms_topk'
)
or
-
1
nms_topk
=
attrs
.
get
(
'nms_topk'
)
or
-
1
new_attrs0
=
{
'clip'
:
clip
,
'threshold'
:
float
(
threshold
),
'variances'
:
variances
}
new_attrs0
=
{
'clip'
:
clip
,
'threshold'
:
float
(
threshold
),
'variances'
:
variances
}
new_attrs1
=
{
'return_indices'
:
False
,
'iou_threshold'
:
float
(
nms_threshold
),
new_attrs1
=
{
'return_indices'
:
False
,
'iou_threshold'
:
float
(
nms_threshold
),
'force_suppress'
:
force_suppress
,
'top_k'
:
int
(
nms_topk
)}
'force_suppress'
:
force_suppress
,
'top_k'
:
int
(
nms_topk
)}
data
,
valid_count
=
_
get_nnvm_op
(
'multibox_transform_loc'
)(
inputs
[
0
],
inputs
[
1
],
data
,
valid_count
=
get_nnvm_op
(
'multibox_transform_loc'
)(
inputs
[
0
],
inputs
[
1
],
inputs
[
2
],
**
new_attrs0
)
inputs
[
2
],
**
new_attrs0
)
return
_
get_nnvm_op
(
'non_max_suppression'
)(
data
,
valid_count
,
**
new_attrs1
)
return
get_nnvm_op
(
'non_max_suppression'
)(
data
,
valid_count
,
**
new_attrs1
)
def
_elemwise_sum
(
inputs
,
_
):
def
_elemwise_sum
(
inputs
,
_
):
new_attrs
=
{
'num_args'
:
len
(
inputs
)}
new_attrs
=
{
'num_args'
:
len
(
inputs
)}
return
_
get_nnvm_op
(
'elemwise_sum'
)(
*
inputs
,
**
new_attrs
)
return
get_nnvm_op
(
'elemwise_sum'
)(
*
inputs
,
**
new_attrs
)
def
_crop_like
(
inputs
,
attrs
):
def
_crop_like
(
inputs
,
attrs
):
new_attrs
=
{}
new_attrs
=
{}
...
@@ -261,20 +246,22 @@ def _crop_like(inputs, attrs):
...
@@ -261,20 +246,22 @@ def _crop_like(inputs, attrs):
tuple
([
float
(
x
.
strip
())
for
x
in
attrs
.
get
(
'offsets'
)
.
strip
(
'()'
)
.
split
(
','
)])
\
tuple
([
float
(
x
.
strip
())
for
x
in
attrs
.
get
(
'offsets'
)
.
strip
(
'()'
)
.
split
(
','
)])
\
if
attrs
.
get
(
'offsets'
)
is
not
None
else
(
0
,
0
)
if
attrs
.
get
(
'offsets'
)
is
not
None
else
(
0
,
0
)
if
offsets
!=
(
0
,
0
):
if
offsets
!=
(
0
,
0
):
raise
RuntimeError
(
"Currently only supports offsets to be zero."
)
raise
tvm
.
error
.
OpAttributeInvalid
(
center_crop
=
_parse_bool_str
(
attrs
,
'center_crop'
,
default
=
"False"
)
'crop_like offsets must equal (0,0).'
)
center_crop
=
parse_bool_str
(
attrs
,
'center_crop'
,
default
=
"False"
)
if
center_crop
:
if
center_crop
:
raise
RuntimeError
(
"center crop is not supported."
)
raise
tvm
.
error
.
OpAttributeUnimplemented
(
'Center crop is not supported in operator crop_like.'
)
if
len
(
inputs
)
<
2
:
if
len
(
inputs
)
<
2
:
raise
RuntimeError
(
"Only support crop_like pattern."
)
raise
RuntimeError
(
"Only support crop_like pattern."
)
new_attrs
[
"axis"
]
=
[
2
,
3
]
new_attrs
[
"axis"
]
=
[
2
,
3
]
return
_
get_nnvm_op
(
'slice_like'
)(
inputs
[
0
],
inputs
[
1
],
**
new_attrs
)
return
get_nnvm_op
(
'slice_like'
)(
inputs
[
0
],
inputs
[
1
],
**
new_attrs
)
def
_expand_dims
(
inputs
,
attrs
):
def
_expand_dims
(
inputs
,
attrs
):
op_name
,
new_attrs
=
'expand_dims'
,
{}
op_name
,
new_attrs
=
'expand_dims'
,
{}
new_attrs
[
'axis'
]
=
_required_attr
(
attrs
,
'axi
s'
)
new_attrs
[
'axis'
]
=
required_attr
(
attrs
,
'axis'
,
'expand_dim
s'
)
return
_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
return
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
def
_lrn
(
inputs
,
attrs
):
def
_lrn
(
inputs
,
attrs
):
op_name
,
new_attrs
=
'lrn'
,
{}
op_name
,
new_attrs
=
'lrn'
,
{}
...
@@ -283,36 +270,36 @@ def _lrn(inputs, attrs):
...
@@ -283,36 +270,36 @@ def _lrn(inputs, attrs):
new_attrs
[
'bias'
]
=
attrs
.
get
(
'knorm'
,
2
)
new_attrs
[
'bias'
]
=
attrs
.
get
(
'knorm'
,
2
)
# NCHW format and normalization along channel axis
# NCHW format and normalization along channel axis
new_attrs
[
'axis'
]
=
1
new_attrs
[
'axis'
]
=
1
new_attrs
[
'size'
]
=
_required_attr
(
attrs
,
'nsize
'
)
new_attrs
[
'size'
]
=
required_attr
(
attrs
,
'nsize'
,
'lrn
'
)
return
_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
return
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
def
_minimum
(
inputs
,
attrs
):
def
_minimum
(
inputs
,
attrs
):
return
_
get_nnvm_op
(
'broadcast_min'
)(
*
inputs
,
**
attrs
)
return
get_nnvm_op
(
'broadcast_min'
)(
*
inputs
,
**
attrs
)
def
_maximum
(
inputs
,
attrs
):
def
_maximum
(
inputs
,
attrs
):
return
_
get_nnvm_op
(
'broadcast_max'
)(
*
inputs
,
**
attrs
)
return
get_nnvm_op
(
'broadcast_max'
)(
*
inputs
,
**
attrs
)
def
_ones
(
_
,
attrs
):
def
_ones
(
_
,
attrs
):
op_name
=
'ones'
op_name
=
'ones'
return
_
get_nnvm_op
(
op_name
)(
**
attrs
)
return
get_nnvm_op
(
op_name
)(
**
attrs
)
def
_zeros
(
_
,
attrs
):
def
_zeros
(
_
,
attrs
):
op_name
=
'zeros'
op_name
=
'zeros'
return
_
get_nnvm_op
(
op_name
)(
**
attrs
)
return
get_nnvm_op
(
op_name
)(
**
attrs
)
def
_argmax
(
inputs
,
attrs
):
def
_argmax
(
inputs
,
attrs
):
op_name
,
new_attrs
=
'argmax'
,
{}
op_name
,
new_attrs
=
'argmax'
,
{}
new_attrs
[
'dtype'
]
=
'float32'
new_attrs
[
'dtype'
]
=
'float32'
new_attrs
[
'axis'
]
=
attrs
.
get
(
'axis'
,
0
)
new_attrs
[
'axis'
]
=
attrs
.
get
(
'axis'
,
0
)
new_attrs
[
'keepdims'
]
=
_
parse_bool_str
(
attrs
,
'keepdims'
,
default
=
"False"
)
new_attrs
[
'keepdims'
]
=
parse_bool_str
(
attrs
,
'keepdims'
,
default
=
"False"
)
return
_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
return
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
def
_argmin
(
inputs
,
attrs
):
def
_argmin
(
inputs
,
attrs
):
op_name
,
new_attrs
=
'argmin'
,
{}
op_name
,
new_attrs
=
'argmin'
,
{}
new_attrs
[
'dtype'
]
=
'float32'
new_attrs
[
'dtype'
]
=
'float32'
new_attrs
[
'axis'
]
=
attrs
.
get
(
'axis'
,
0
)
new_attrs
[
'axis'
]
=
attrs
.
get
(
'axis'
,
0
)
new_attrs
[
'keepdims'
]
=
_
parse_bool_str
(
attrs
,
'keepdims'
,
default
=
"False"
)
new_attrs
[
'keepdims'
]
=
parse_bool_str
(
attrs
,
'keepdims'
,
default
=
"False"
)
return
_
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
return
get_nnvm_op
(
op_name
)(
*
inputs
,
**
new_attrs
)
_identity_list
=
[
'__add_scalar__'
,
'__add_symbol__'
,
'__div_scalar__'
,
_identity_list
=
[
'__add_scalar__'
,
'__add_symbol__'
,
'__div_scalar__'
,
'__div_symbol__'
,
'__mul_scalar__'
,
'__mul_symbol__'
,
'__div_symbol__'
,
'__mul_scalar__'
,
'__mul_symbol__'
,
...
@@ -406,12 +393,13 @@ def _convert_symbol(op_name, inputs, attrs,
...
@@ -406,12 +393,13 @@ def _convert_symbol(op_name, inputs, attrs,
identity_list
=
identity_list
if
identity_list
else
_identity_list
identity_list
=
identity_list
if
identity_list
else
_identity_list
convert_map
=
convert_map
if
convert_map
else
_convert_map
convert_map
=
convert_map
if
convert_map
else
_convert_map
if
op_name
in
identity_list
:
if
op_name
in
identity_list
:
op
=
_
get_nnvm_op
(
op_name
)
op
=
get_nnvm_op
(
op_name
)
sym
=
op
(
*
inputs
,
**
attrs
)
sym
=
op
(
*
inputs
,
**
attrs
)
elif
op_name
in
convert_map
:
elif
op_name
in
convert_map
:
sym
=
convert_map
[
op_name
](
inputs
,
attrs
)
sym
=
convert_map
[
op_name
](
inputs
,
attrs
)
else
:
else
:
_raise_not_supported
(
'Operator: '
+
op_name
)
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} is not supported in frontend MXNet.'
.
format
(
op_name
))
return
sym
return
sym
def
_as_list
(
arr
):
def
_as_list
(
arr
):
...
...
nnvm/python/nnvm/frontend/onnx.py
View file @
53511bf1
...
@@ -397,7 +397,8 @@ class Upsample(OnnxOpConverter):
...
@@ -397,7 +397,8 @@ class Upsample(OnnxOpConverter):
elif
mode
==
b
'linear'
:
elif
mode
==
b
'linear'
:
method
=
"BILINEAR"
method
=
"BILINEAR"
else
:
else
:
raise
ValueError
(
"Invalid ONNX upsample mode: {}"
.
format
(
mode
))
raise
tvm
.
error
.
OpAttributeInvalid
(
'Value {} in attribute "mode" of operator Upsample is not valid.'
.
format
(
mode
))
return
_sym
.
upsampling
(
inputs
[
0
],
scale
=
int
(
scales
[
-
1
]),
method
=
method
,
layout
=
'NCHW'
)
return
_sym
.
upsampling
(
inputs
[
0
],
scale
=
int
(
scales
[
-
1
]),
method
=
method
,
layout
=
'NCHW'
)
...
@@ -922,8 +923,8 @@ class GraphProto(object):
...
@@ -922,8 +923,8 @@ class GraphProto(object):
elif
op_name
in
convert_map
:
elif
op_name
in
convert_map
:
sym
=
convert_map
[
op_name
](
inputs
,
attrs
,
self
.
_params
)
sym
=
convert_map
[
op_name
](
inputs
,
attrs
,
self
.
_params
)
else
:
else
:
raise
NotImplementedError
(
raise
tvm
.
error
.
OpNotImplemented
(
"Operator {} not implemented."
.
format
(
op_name
)
)
'Operator {} is not supported in frontend ONNX.'
)
return
sym
return
sym
def
_fix_outputs
(
self
,
op_name
,
outputs
):
def
_fix_outputs
(
self
,
op_name
,
outputs
):
...
...
nnvm/python/nnvm/frontend/tensorflow.py
View file @
53511bf1
...
@@ -68,7 +68,8 @@ def _dimension_picker(prefix, surfix=''):
...
@@ -68,7 +68,8 @@ def _dimension_picker(prefix, surfix=''):
kernel
=
attr
[
'kernel_shape'
]
kernel
=
attr
[
'kernel_shape'
]
if
len
(
kernel
)
==
2
:
if
len
(
kernel
)
==
2
:
return
prefix
+
'2d'
+
surfix
return
prefix
+
'2d'
+
surfix
raise
NotImplementedError
(
"Only 2d kernel supported."
)
raise
tvm
.
error
.
OpAttributeUnimplemented
(
'Non-2D kernels are not supported for operator {}.'
.
format
(
prefix
))
return
_impl
return
_impl
def
_dimension_constraint
():
def
_dimension_constraint
():
...
@@ -129,7 +130,8 @@ def _pooling(name):
...
@@ -129,7 +130,8 @@ def _pooling(name):
attr
[
'kernel_shape'
]
=
(
attr
[
'ksize'
][
2
],
attr
[
'ksize'
][
3
])
attr
[
'kernel_shape'
]
=
(
attr
[
'ksize'
][
2
],
attr
[
'ksize'
][
3
])
attr
[
'strides'
]
=
(
attr
[
'strides'
][
2
],
attr
[
'strides'
][
3
])
attr
[
'strides'
]
=
(
attr
[
'strides'
][
2
],
attr
[
'strides'
][
3
])
else
:
else
:
raise
TypeError
(
"Unsupported data_format type : {}"
.
format
(
attr
[
'data_format'
]))
msg
=
'Value {} in attribute "data_format" of operator Pooling is not valid.'
raise
tvm
.
error
.
OpAttributeInvalid
(
msg
.
format
(
attr
[
'data_format'
]))
if
attr
[
'_target_layout'
]
==
"NCHW"
and
attr
[
'data_format'
]
==
"NHWC"
:
if
attr
[
'_target_layout'
]
==
"NCHW"
and
attr
[
'data_format'
]
==
"NHWC"
:
tmp_shape
=
attr
[
'_input_shapes'
][
inputs
[
0
]]
tmp_shape
=
attr
[
'_input_shapes'
][
inputs
[
0
]]
...
@@ -158,7 +160,8 @@ def _pooling(name):
...
@@ -158,7 +160,8 @@ def _pooling(name):
attr
[
'padding'
]
=
[
pad_v
[
0
],
pad_h
[
0
],
pad_v
[
1
],
pad_h
[
1
]]
attr
[
'padding'
]
=
[
pad_v
[
0
],
pad_h
[
0
],
pad_v
[
1
],
pad_h
[
1
]]
else
:
else
:
raise
TypeError
(
"Unsupported padding type : {}"
.
format
(
attr
[
'padding'
]))
msg
=
'Value {} in attribute "padding" of operator Pooling is not valid.'
raise
tvm
.
error
.
OpAttributeUnimplemented
(
msg
.
format
(
attr
[
'padding'
]))
if
name
==
"avg_pool"
:
if
name
==
"avg_pool"
:
attr
[
'count_include_pad'
]
=
False
attr
[
'count_include_pad'
]
=
False
...
@@ -232,7 +235,8 @@ def _conv(opname):
...
@@ -232,7 +235,8 @@ def _conv(opname):
attr
[
'dilations'
]
=
(
attr
[
'dilations'
][
2
],
attr
[
'dilations'
][
3
])
attr
[
'dilations'
]
=
(
attr
[
'dilations'
][
2
],
attr
[
'dilations'
][
3
])
attr
[
'strides'
]
=
(
attr
[
'strides'
][
2
],
attr
[
'strides'
][
3
])
attr
[
'strides'
]
=
(
attr
[
'strides'
][
2
],
attr
[
'strides'
][
3
])
else
:
else
:
raise
TypeError
(
"Unsupported data format type : {}"
.
format
(
attr
[
'data_format'
]))
msg
=
'Value {} in attribute "data_format" of operator Conv is not valid.'
raise
tvm
.
error
.
OpAttributeInvalid
(
msg
.
format
(
attr
[
'data_format'
]))
if
opname
==
'depthwise'
:
if
opname
==
'depthwise'
:
...
@@ -276,7 +280,8 @@ def _conv(opname):
...
@@ -276,7 +280,8 @@ def _conv(opname):
attr
[
'padding'
]
=
[
0
,
0
]
attr
[
'padding'
]
=
[
0
,
0
]
else
:
else
:
raise
TypeError
(
"Unsupported padding type : {}"
.
format
(
attr
[
'padding'
]))
msg
=
'Value {} in attribute "padding" of operator Conv is not valid.'
raise
tvm
.
error
.
OpAttributeInvalid
(
msg
.
format
(
attr
[
'padding'
]))
if
'kernel_layout'
not
in
attr
:
if
'kernel_layout'
not
in
attr
:
if
opname
==
'conv'
:
if
opname
==
'conv'
:
...
@@ -432,7 +437,8 @@ def _reshape():
...
@@ -432,7 +437,8 @@ def _reshape():
op_name
=
"reshape"
,
op_name
=
"reshape"
,
extras
=
{
'shape'
:
tuple
(
params_new
[
0
]
.
asnumpy
()
.
flatten
())},
extras
=
{
'shape'
:
tuple
(
params_new
[
0
]
.
asnumpy
()
.
flatten
())},
ignores
=
[
'Tshape'
])(
inputs
,
attr
)
ignores
=
[
'Tshape'
])(
inputs
,
attr
)
raise
RuntimeError
(
"Reshape with dynamic shape input not supported yet."
)
raise
tvm
.
error
.
OpAttributeUnimplemented
(
'Attribute "dynamic shape" of operator Reshape is not supported.'
)
return
_impl
return
_impl
def
_bias_add
():
def
_bias_add
():
...
@@ -736,7 +742,8 @@ def _pad(name):
...
@@ -736,7 +742,8 @@ def _pad(name):
if
padlist_key
in
params
:
if
padlist_key
in
params
:
padlist
=
params
.
pop
(
padlist_key
)
.
asnumpy
()
padlist
=
params
.
pop
(
padlist_key
)
.
asnumpy
()
else
:
else
:
raise
RuntimeError
(
"Required parameter {} not fount."
.
format
(
padlist_key
))
raise
tvm
.
error
.
OpAttributeRequired
(
'Required attribute "{}" not found in operator Pad.'
.
format
(
padlist_key
))
paddings
=
tuple
([
tuple
(
l
)
for
l
in
padlist
])
paddings
=
tuple
([
tuple
(
l
)
for
l
in
padlist
])
attr
[
'pad_width'
]
=
paddings
attr
[
'pad_width'
]
=
paddings
attr
[
'pad_value'
]
=
0
attr
[
'pad_value'
]
=
0
...
@@ -1188,8 +1195,9 @@ class GraphProto(object):
...
@@ -1188,8 +1195,9 @@ class GraphProto(object):
missing_operators
=
self
.
_parse_import_prerequisites
(
graph
)
missing_operators
=
self
.
_parse_import_prerequisites
(
graph
)
if
missing_operators
:
if
missing_operators
:
raise
NotImplementedError
(
\
msg
=
'The following operators are not supported in frontend TensorFlow: {}'
"The following operators are not implemented: {}"
.
format
(
missing_operators
))
ops
=
str
(
list
(
missing_operators
))
.
strip
(
'[,]'
)
raise
tvm
.
error
.
OpNotImplemented
(
msg
.
format
(
ops
))
for
node
in
graph
.
node
:
for
node
in
graph
.
node
:
if
node
.
op
==
'Placeholder'
:
if
node
.
op
==
'Placeholder'
:
...
@@ -1529,7 +1537,8 @@ class GraphProto(object):
...
@@ -1529,7 +1537,8 @@ class GraphProto(object):
self
.
_params
,
graph
,
self
.
_params
,
graph
,
convert_map_rnn
)
convert_map_rnn
)
else
:
else
:
raise
NotImplementedError
(
"Operator {} not implemented."
.
format
(
op_name
))
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} is not supported in frontend TensorFlow.'
.
format
(
op_name
))
return
sym
return
sym
def
_fix_extranodes
(
self
,
op_name
,
attr
,
inputs
):
def
_fix_extranodes
(
self
,
op_name
,
attr
,
inputs
):
...
...
python/tvm/relay/frontend/caffe2.py
View file @
53511bf1
# pylint: disable=import-self, invalid-name, line-too-long, unused-argument
# pylint: disable=import-self, invalid-name, line-too-long, unused-argument
"""Caffe2 frontend"""
"""Caffe2 frontend"""
from
__future__
import
absolute_import
as
_abs
from
__future__
import
absolute_import
as
_abs
import
tvm
from
..
import
ir_pass
from
..
import
ir_pass
from
..
import
expr
as
_expr
from
..
import
expr
as
_expr
from
..
import
op
as
_op
from
..
import
op
as
_op
...
@@ -15,7 +16,8 @@ def dimension_picker(prefix, surfix=''):
...
@@ -15,7 +16,8 @@ def dimension_picker(prefix, surfix=''):
kernel
=
attr
[
'kernel_shape'
]
kernel
=
attr
[
'kernel_shape'
]
if
len
(
kernel
)
==
2
:
if
len
(
kernel
)
==
2
:
return
prefix
+
'2d'
+
surfix
return
prefix
+
'2d'
+
surfix
raise
NotImplementedError
(
"Only 2d kernel supported."
)
raise
tvm
.
error
.
OpAttributeUnimplemented
(
'Non-2D kernels are not supported for operator {}2d'
.
format
(
prefix
))
return
_impl
return
_impl
...
@@ -27,7 +29,8 @@ def revert_caffe2_pad(pads):
...
@@ -27,7 +29,8 @@ def revert_caffe2_pad(pads):
elif
len
(
pads
)
==
2
:
elif
len
(
pads
)
==
2
:
pass
pass
else
:
else
:
raise
ValueError
(
"Invalid caffe2 type padding: {}"
.
format
(
pads
))
raise
tvm
.
error
.
OpAttributeInvalid
(
'Number of pads must equal 2 or 4.'
)
return
pads
return
pads
...
@@ -103,8 +106,8 @@ class Caffe2OpConverter(object):
...
@@ -103,8 +106,8 @@ class Caffe2OpConverter(object):
if
hasattr
(
cls
,
'_impl'
):
if
hasattr
(
cls
,
'_impl'
):
return
getattr
(
cls
,
'_impl'
)
return
getattr
(
cls
,
'_impl'
)
raise
NotImplementedError
(
'{} not implemented'
.
format
(
raise
tvm
.
error
.
OpNotInplemented
(
cls
.
__name__
))
'Operator {} is not supported in frontend Caffe2.'
.
format
(
cls
.
__name__
))
_caffe2_internal_args
=
[
_caffe2_internal_args
=
[
...
@@ -224,8 +227,8 @@ class Concat(Caffe2OpConverter):
...
@@ -224,8 +227,8 @@ class Concat(Caffe2OpConverter):
return
1
return
1
if
order
==
'NHWC'
:
if
order
==
'NHWC'
:
return
3
return
3
raise
RuntimeError
(
raise
tvm
.
error
.
OpAttributeUnimplemented
(
"Unsupported storage order: {} in caffe2"
.
format
(
order
))
'Order {} is not supported in operator Concat.'
.
format
(
order
))
return
AttrCvt
(
return
AttrCvt
(
op_name
=
'concatenate'
,
op_name
=
'concatenate'
,
...
@@ -517,8 +520,8 @@ class Caffe2NetDef(object):
...
@@ -517,8 +520,8 @@ class Caffe2NetDef(object):
# Add a sanitizing step to convert all byte strings in args to strings
# Add a sanitizing step to convert all byte strings in args to strings
func
=
convert_map
[
op_type
](
inputs
,
args
,
self
.
_params
)
func
=
convert_map
[
op_type
](
inputs
,
args
,
self
.
_params
)
else
:
else
:
raise
NotImplementedError
(
raise
tvm
.
error
.
OpNotImplemented
(
"Operator {} not implemented."
.
format
(
op_type
))
'Operator {} is not supported in frontend Caffe2.'
.
format
(
op_type
))
return
func
return
func
...
...
python/tvm/relay/frontend/coreml.py
View file @
53511bf1
# pylint: disable=invalid-name, import-self, unused-argument, unused-variable, inconsistent-return-statements
# pylint: disable=invalid-name, import-self, unused-argument, unused-variable, inconsistent-return-statements
"""CoreML frontend."""
"""CoreML frontend."""
from
__future__
import
absolute_import
as
_abs
from
__future__
import
absolute_import
as
_abs
import
tvm
import
numpy
as
np
import
numpy
as
np
from
..
import
ir_pass
from
..
import
ir_pass
from
..
import
expr
as
_expr
from
..
import
expr
as
_expr
...
@@ -81,7 +82,8 @@ def _BatchnormLayerParams(op, inexpr, etab):
...
@@ -81,7 +82,8 @@ def _BatchnormLayerParams(op, inexpr, etab):
"""Get layer of batchnorm parameter"""
"""Get layer of batchnorm parameter"""
# this changes the symbol
# this changes the symbol
if
op
.
instanceNormalization
:
if
op
.
instanceNormalization
:
raise
NotImplementedError
(
"instance normalization not implemented"
)
raise
tvm
.
error
.
OpNotImplemented
(
'Operator "instance normalization" is not supported in frontend CoreML.'
)
else
:
else
:
params
=
{
'gamma'
:
etab
.
new_const
(
list
(
op
.
gamma
.
floatValue
)),
params
=
{
'gamma'
:
etab
.
new_const
(
list
(
op
.
gamma
.
floatValue
)),
'beta'
:
etab
.
new_const
(
list
(
op
.
beta
.
floatValue
)),
'beta'
:
etab
.
new_const
(
list
(
op
.
beta
.
floatValue
)),
...
@@ -142,7 +144,8 @@ def _ActivationParams(op, inexpr, etab):
...
@@ -142,7 +144,8 @@ def _ActivationParams(op, inexpr, etab):
alpha_expr
=
etab
.
new_const
(
alpha
)
alpha_expr
=
etab
.
new_const
(
alpha
)
beta_expr
=
etab
.
new_const
(
beta
)
beta_expr
=
etab
.
new_const
(
beta
)
return
_op
.
multiply
(
_op
.
log
(
_op
.
add
(
_op
.
exp
(
inexpr
),
beta_expr
)),
alpha_expr
)
return
_op
.
multiply
(
_op
.
log
(
_op
.
add
(
_op
.
exp
(
inexpr
),
beta_expr
)),
alpha_expr
)
raise
NotImplementedError
(
'
%
s not implemented'
%
whichActivation
)
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} is not supported in frontend CoreML.'
.
format
(
whichActivation
))
def
_ScaleLayerParams
(
op
,
inexpr
,
etab
):
def
_ScaleLayerParams
(
op
,
inexpr
,
etab
):
...
@@ -164,7 +167,8 @@ def _PoolingLayerParams(op, inexpr, etab):
...
@@ -164,7 +167,8 @@ def _PoolingLayerParams(op, inexpr, etab):
return
_op
.
nn
.
global_max_pool2d
(
inexpr
)
return
_op
.
nn
.
global_max_pool2d
(
inexpr
)
if
op
.
type
==
1
:
if
op
.
type
==
1
:
return
_op
.
nn
.
global_avg_pool2d
(
inexpr
)
return
_op
.
nn
.
global_avg_pool2d
(
inexpr
)
raise
NotImplementedError
(
"Only max and average pooling implemented"
)
raise
tvm
.
error
.
OpNotImplemented
(
'Only Max and Average Pooling are supported in frontend CoreML.'
)
else
:
else
:
params
=
{
'pool_size'
:
list
(
op
.
kernelSize
),
params
=
{
'pool_size'
:
list
(
op
.
kernelSize
),
...
@@ -184,7 +188,9 @@ def _PoolingLayerParams(op, inexpr, etab):
...
@@ -184,7 +188,9 @@ def _PoolingLayerParams(op, inexpr, etab):
params
[
'padding'
]
=
padding
params
[
'padding'
]
=
padding
params
[
'ceil_mode'
]
=
True
params
[
'ceil_mode'
]
=
True
else
:
else
:
raise
NotImplementedError
(
"Other convolution padding not implemented"
)
msg
=
'PoolingPaddingType {} is not supported in operator Pooling.'
op_name
=
op
.
WhichOneof
(
'PoolingPaddingType'
)
raise
tvm
.
error
.
OpAttributeUnimplemented
(
msg
.
format
(
op_name
))
# consume padding layer
# consume padding layer
if
etab
.
in_padding
:
if
etab
.
in_padding
:
...
@@ -196,7 +202,8 @@ def _PoolingLayerParams(op, inexpr, etab):
...
@@ -196,7 +202,8 @@ def _PoolingLayerParams(op, inexpr, etab):
return
_op
.
nn
.
max_pool2d
(
inexpr
,
**
params
)
return
_op
.
nn
.
max_pool2d
(
inexpr
,
**
params
)
if
op
.
type
==
1
:
if
op
.
type
==
1
:
return
_op
.
nn
.
avg_pool2d
(
inexpr
,
**
params
)
return
_op
.
nn
.
avg_pool2d
(
inexpr
,
**
params
)
raise
NotImplementedError
(
"Only max and average pooling implemented"
)
raise
tvm
.
error
.
OpNotImplemented
(
'Only Max and Average Pooling are supported in CoreML.'
)
def
_SoftmaxLayerParams
(
op
,
inexpr
,
etab
):
def
_SoftmaxLayerParams
(
op
,
inexpr
,
etab
):
...
@@ -239,7 +246,8 @@ def _ConcatLayerParams(op, inexpr, etab):
...
@@ -239,7 +246,8 @@ def _ConcatLayerParams(op, inexpr, etab):
if
not
isinstance
(
inexpr
,
list
):
if
not
isinstance
(
inexpr
,
list
):
inexpr
=
[
inexpr
]
inexpr
=
[
inexpr
]
if
op
.
sequenceConcat
:
if
op
.
sequenceConcat
:
raise
NotImplementedError
(
"Sequence Concat not supported"
)
raise
tvm
.
error
.
OpNotImplemented
(
'Operator Sequence Concat is not supported in frontend CoreML.'
)
ret
=
_op
.
concatenate
(
inexpr
,
axis
=
1
)
ret
=
_op
.
concatenate
(
inexpr
,
axis
=
1
)
return
ret
return
ret
...
@@ -255,14 +263,16 @@ def _PaddingLayerParams(op, inexpr, etab):
...
@@ -255,14 +263,16 @@ def _PaddingLayerParams(op, inexpr, etab):
if
op
.
WhichOneof
(
'PaddingType'
)
==
'constant'
:
if
op
.
WhichOneof
(
'PaddingType'
)
==
'constant'
:
constant
=
op
.
constant
constant
=
op
.
constant
if
constant
.
value
!=
0
:
if
constant
.
value
!=
0
:
raise
NotImplementedError
(
"Padding value {} not supported."
.
format
(
constant
.
value
))
raise
tvm
.
error
.
OpAttributeUnimplemented
(
'{} is not supported in operator Padding.'
.
format
(
constant
.
value
))
padding
=
[
b
.
startEdgeSize
for
b
in
op
.
paddingAmounts
.
borderAmounts
]
padding
=
[
b
.
startEdgeSize
for
b
in
op
.
paddingAmounts
.
borderAmounts
]
padding2
=
[
b
.
endEdgeSize
for
b
in
op
.
paddingAmounts
.
borderAmounts
]
padding2
=
[
b
.
endEdgeSize
for
b
in
op
.
paddingAmounts
.
borderAmounts
]
for
i
,
j
in
zip
(
padding
,
padding2
):
for
i
,
j
in
zip
(
padding
,
padding2
):
assert
i
==
j
assert
i
==
j
etab
.
set_padding
(
padding
)
etab
.
set_padding
(
padding
)
else
:
else
:
raise
NotImplementedError
(
"Only constant padding is supported now."
)
raise
tvm
.
error
.
OpNotImplemented
(
'Non-constant padding is not supported in frontend CoreML.'
)
return
inexpr
return
inexpr
...
@@ -273,8 +283,8 @@ def _PermuteLayerParams(op, inexpr, etab):
...
@@ -273,8 +283,8 @@ def _PermuteLayerParams(op, inexpr, etab):
def
_UpsampleLayerParams
(
op
,
inexpr
,
etab
):
def
_UpsampleLayerParams
(
op
,
inexpr
,
etab
):
if
op
.
scalingFactor
[
0
]
!=
op
.
scalingFactor
[
1
]:
if
op
.
scalingFactor
[
0
]
!=
op
.
scalingFactor
[
1
]:
raise
NotImplementedError
(
"Upsampling only supported with same
\
raise
tvm
.
error
.
OpAttributeUnimplemented
(
height and width scaling factor."
)
'Upsample height and width must be equal.'
)
interpolationMode
=
'NEAREST_NEIGHBOR'
if
op
.
mode
==
0
else
'BILINEAR'
interpolationMode
=
'NEAREST_NEIGHBOR'
if
op
.
mode
==
0
else
'BILINEAR'
return
_op
.
nn
.
upsampling
(
inexpr
,
scale
=
op
.
scalingFactor
[
0
],
method
=
interpolationMode
)
return
_op
.
nn
.
upsampling
(
inexpr
,
scale
=
op
.
scalingFactor
[
0
],
method
=
interpolationMode
)
...
@@ -364,7 +374,8 @@ def coreml_op_to_relay(op, inname, outname, etab):
...
@@ -364,7 +374,8 @@ def coreml_op_to_relay(op, inname, outname, etab):
"""
"""
classname
=
type
(
op
)
.
__name__
classname
=
type
(
op
)
.
__name__
if
classname
not
in
_convert_map
:
if
classname
not
in
_convert_map
:
raise
NotImplementedError
(
"
%
s is not supported"
%
(
classname
))
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} is not supported in frontend CoreML.'
.
format
(
classname
))
if
isinstance
(
inname
,
_base
.
string_types
):
if
isinstance
(
inname
,
_base
.
string_types
):
insym
=
etab
.
get_expr
(
inname
)
insym
=
etab
.
get_expr
(
inname
)
else
:
else
:
...
...
python/tvm/relay/frontend/keras.py
View file @
53511bf1
...
@@ -2,6 +2,7 @@
...
@@ -2,6 +2,7 @@
"""Keras frontend."""
"""Keras frontend."""
from
__future__
import
absolute_import
as
_abs
from
__future__
import
absolute_import
as
_abs
import
sys
import
sys
import
tvm
import
numpy
as
np
import
numpy
as
np
from
..
import
ir_pass
from
..
import
ir_pass
from
..
import
expr
as
_expr
from
..
import
expr
as
_expr
...
@@ -91,7 +92,8 @@ def _convert_activation(inexpr, keras_layer, _):
...
@@ -91,7 +92,8 @@ def _convert_activation(inexpr, keras_layer, _):
x
=
(
_expr
.
const
(
0.2
,
dtype
=
'float32'
)
*
inexpr
)
+
_expr
.
const
(
0.5
,
dtype
=
'float32'
)
x
=
(
_expr
.
const
(
0.2
,
dtype
=
'float32'
)
*
inexpr
)
+
_expr
.
const
(
0.5
,
dtype
=
'float32'
)
return
_op
.
clip
(
x
,
a_min
=
0.
,
a_max
=
1.
)
return
_op
.
clip
(
x
,
a_min
=
0.
,
a_max
=
1.
)
raise
TypeError
(
"Unsupported activation type : {}"
.
format
(
act_type
))
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} is not supported in frontend Keras.'
.
format
(
act_type
))
def
_convert_advanced_activation
(
inexpr
,
keras_layer
,
etab
):
def
_convert_advanced_activation
(
inexpr
,
keras_layer
,
etab
):
...
@@ -118,7 +120,8 @@ def _convert_advanced_activation(inexpr, keras_layer, etab):
...
@@ -118,7 +120,8 @@ def _convert_advanced_activation(inexpr, keras_layer, etab):
return
_op
.
multiply
(
inexpr
,
_op
.
greater
(
inexpr
,
\
return
_op
.
multiply
(
inexpr
,
_op
.
greater
(
inexpr
,
\
_expr
.
const
(
theta
,
dtype
=
'float32'
))
.
astype
(
'float32'
))
_expr
.
const
(
theta
,
dtype
=
'float32'
))
.
astype
(
'float32'
))
raise
TypeError
(
"Unsupported advanced activation type : {}"
.
format
(
act_type
))
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} is not supported in frontend Keras.'
.
format
(
act_type
))
def
_convert_merge
(
inexpr
,
keras_layer
,
_
):
def
_convert_merge
(
inexpr
,
keras_layer
,
_
):
...
@@ -136,7 +139,8 @@ def _convert_merge(inexpr, keras_layer, _):
...
@@ -136,7 +139,8 @@ def _convert_merge(inexpr, keras_layer, _):
ret
=
_op
.
add
(
ret
,
inexpr
[
i
])
ret
=
_op
.
add
(
ret
,
inexpr
[
i
])
ret
=
ret
/
_expr
.
const
(
len
(
inexpr
),
dtype
=
'float32'
)
ret
=
ret
/
_expr
.
const
(
len
(
inexpr
),
dtype
=
'float32'
)
else
:
else
:
raise
TypeError
(
"Unsupported merge type : {}"
.
format
(
merge_type
))
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} is not supported in frontend Keras.'
.
format
(
merge_type
))
return
ret
return
ret
...
@@ -150,7 +154,8 @@ def _convert_dense(inexpr, keras_layer, etab):
...
@@ -150,7 +154,8 @@ def _convert_dense(inexpr, keras_layer, etab):
if
input_dim
>
2
:
if
input_dim
>
2
:
input_shape
=
tuple
(
dim
if
dim
else
1
for
dim
in
_as_list
(
input_shape
)[
0
])
input_shape
=
tuple
(
dim
if
dim
else
1
for
dim
in
_as_list
(
input_shape
)[
0
])
if
input_dim
!=
3
or
input_shape
[
0
]
!=
1
or
input_shape
[
1
]
!=
1
:
if
input_dim
!=
3
or
input_shape
[
0
]
!=
1
or
input_shape
[
1
]
!=
1
:
raise
ValueError
(
"Cannot flatten the inputs with shape."
,
input_shape
,
" for dense."
)
raise
tvm
.
error
.
OpAttributeInvalid
(
'Input shape {} is not valid for operator Dense.'
.
format
(
input_shape
))
inexpr
=
_op
.
squeeze
(
inexpr
,
axis
=
0
)
inexpr
=
_op
.
squeeze
(
inexpr
,
axis
=
0
)
out
=
_op
.
nn
.
dense
(
data
=
inexpr
,
**
params
)
out
=
_op
.
nn
.
dense
(
data
=
inexpr
,
**
params
)
if
keras_layer
.
use_bias
:
if
keras_layer
.
use_bias
:
...
@@ -214,7 +219,9 @@ def _convert_convolution(inexpr, keras_layer, etab):
...
@@ -214,7 +219,9 @@ def _convert_convolution(inexpr, keras_layer, etab):
inexpr
=
_op
.
nn
.
pad
(
data
=
inexpr
,
pad_width
=
(
inexpr
=
_op
.
nn
.
pad
(
data
=
inexpr
,
pad_width
=
(
(
0
,
0
),
(
0
,
0
),
(
pad_t
,
pad_b
),
(
pad_l
,
pad_r
)))
(
0
,
0
),
(
0
,
0
),
(
pad_t
,
pad_b
),
(
pad_l
,
pad_r
)))
else
:
else
:
raise
TypeError
(
"Unsupported padding type : {}"
.
format
(
keras_layer
.
padding
))
msg
=
'Padding with {} is not supported for operator Convolution '
\
'in frontend Keras.'
raise
tvm
.
error
.
OpAttributeUnimplemented
(
msg
.
format
(
keras_layer
.
padding
))
if
is_deconv
:
if
is_deconv
:
out
=
_op
.
nn
.
conv2d_transpose
(
data
=
inexpr
,
**
params
)
out
=
_op
.
nn
.
conv2d_transpose
(
data
=
inexpr
,
**
params
)
else
:
else
:
...
@@ -260,7 +267,10 @@ def _convert_separable_convolution(inexpr, keras_layer, etab):
...
@@ -260,7 +267,10 @@ def _convert_separable_convolution(inexpr, keras_layer, etab):
inexpr
=
_op
.
nn
.
pad
(
data
=
inexpr
,
pad_width
=
(
inexpr
=
_op
.
nn
.
pad
(
data
=
inexpr
,
pad_width
=
(
(
0
,
0
),
(
0
,
0
),
(
pad_t
,
pad_b
),
(
pad_l
,
pad_r
)))
(
0
,
0
),
(
0
,
0
),
(
pad_t
,
pad_b
),
(
pad_l
,
pad_r
)))
else
:
else
:
raise
TypeError
(
"Unsupported padding type : {}"
.
format
(
keras_layer
.
padding
))
msg
=
'Padding with {} is not supported for operator Separable '
\
'Convolution in frontend Keras.'
raise
tvm
.
error
.
OpAttributeUnimplemented
(
msg
.
format
(
keras_layer
.
padding
))
depthconv
=
_op
.
nn
.
conv2d
(
data
=
inexpr
,
**
params0
)
depthconv
=
_op
.
nn
.
conv2d
(
data
=
inexpr
,
**
params0
)
# pointwise conv
# pointwise conv
weight1
=
weightList
[
1
]
.
transpose
([
3
,
2
,
0
,
1
])
weight1
=
weightList
[
1
]
.
transpose
([
3
,
2
,
0
,
1
])
...
@@ -313,13 +323,15 @@ def _convert_pooling(inexpr, keras_layer, etab):
...
@@ -313,13 +323,15 @@ def _convert_pooling(inexpr, keras_layer, etab):
pad_l
,
pad_r
=
_get_pad_pair
(
in_w
,
pool_w
,
stride_w
)
pad_l
,
pad_r
=
_get_pad_pair
(
in_w
,
pool_w
,
stride_w
)
params
[
'padding'
]
=
[
pad_t
,
pad_l
,
pad_b
,
pad_r
]
params
[
'padding'
]
=
[
pad_t
,
pad_l
,
pad_b
,
pad_r
]
else
:
else
:
raise
TypeError
(
"Unsupported padding type : {}"
.
format
(
keras_layer
.
padding
))
raise
tvm
.
error
.
OpAttributeUnimplemented
(
'Padding with {} is not supported in operator Pooling.'
.
format
(
keras_layer
.
padding
))
if
pool_type
==
'MaxPooling2D'
:
if
pool_type
==
'MaxPooling2D'
:
return
_op
.
nn
.
max_pool2d
(
inexpr
,
**
params
)
return
_op
.
nn
.
max_pool2d
(
inexpr
,
**
params
)
if
pool_type
==
'AveragePooling2D'
:
if
pool_type
==
'AveragePooling2D'
:
params
[
'count_include_pad'
]
=
False
params
[
'count_include_pad'
]
=
False
return
_op
.
nn
.
avg_pool2d
(
inexpr
,
**
params
)
return
_op
.
nn
.
avg_pool2d
(
inexpr
,
**
params
)
raise
TypeError
(
"Unsupported pooling type : {}"
.
format
(
keras_layer
))
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} is not supported for frontend Keras.'
.
format
(
keras_layer
))
def
_convert_upsample
(
inexpr
,
keras_layer
,
_
):
def
_convert_upsample
(
inexpr
,
keras_layer
,
_
):
...
@@ -331,8 +343,8 @@ def _convert_upsample(inexpr, keras_layer, _):
...
@@ -331,8 +343,8 @@ def _convert_upsample(inexpr, keras_layer, _):
elif
upsample_type
==
'UpSampling2D'
:
elif
upsample_type
==
'UpSampling2D'
:
h
,
w
=
keras_layer
.
size
h
,
w
=
keras_layer
.
size
if
h
!=
w
:
if
h
!=
w
:
raise
TypeError
(
"Unsupported upsampling type with different axes size : {}"
raise
tvm
.
error
.
OpAttributeInvalid
(
.
format
(
keras_layer
.
size
)
)
'Height must equal width for operator Upsample.'
)
params
=
{
'scale'
:
h
}
params
=
{
'scale'
:
h
}
if
hasattr
(
keras_layer
,
'interpolation'
):
if
hasattr
(
keras_layer
,
'interpolation'
):
...
@@ -345,24 +357,24 @@ def _convert_upsample(inexpr, keras_layer, _):
...
@@ -345,24 +357,24 @@ def _convert_upsample(inexpr, keras_layer, _):
elif
upsample_type
==
'UpSampling3D'
:
elif
upsample_type
==
'UpSampling3D'
:
h
,
w
,
d
=
keras_layer
.
size
h
,
w
,
d
=
keras_layer
.
size
if
h
!=
w
or
w
!=
d
:
if
h
!=
w
or
w
!=
d
:
raise
TypeError
(
"Unsupported upsampling type with different axes size : {}"
raise
tvm
.
error
.
OpAttributeInvalid
(
.
format
(
keras_layer
.
size
)
)
'Height, width, and depth must all be equal for operator Upsample.'
)
params
=
{
'scale'
:
h
}
params
=
{
'scale'
:
h
}
else
:
else
:
raise
TypeError
(
"Unsupported upsampling type : {}"
.
format
(
upsample_type
))
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} is not supported for frontend Keras.'
.
format
(
upsample_type
))
return
_op
.
nn
.
upsampling
(
inexpr
,
**
params
)
return
_op
.
nn
.
upsampling
(
inexpr
,
**
params
)
def
_convert_cropping
(
inexpr
,
keras_layer
,
_
):
def
_convert_cropping
(
inexpr
,
keras_layer
,
_
):
_check_data_format
(
keras_layer
)
_check_data_format
(
keras_layer
)
crop_type
=
type
(
keras_layer
)
.
__name__
crop_type
=
type
(
keras_layer
)
.
__name__
if
crop_type
==
'Cropping1D'
:
if
crop_type
==
'Cropping2D'
:
raise
NotImplementedError
(
"Cropping1D not implemented"
)
elif
crop_type
==
'Cropping2D'
:
(
_
,
in_h
,
in_w
,
_
)
=
keras_layer
.
input_shape
(
_
,
in_h
,
in_w
,
_
)
=
keras_layer
.
input_shape
((
crop_t
,
crop_b
),
(
crop_l
,
crop_r
))
=
keras_layer
.
cropping
((
crop_t
,
crop_b
),
(
crop_l
,
crop_r
))
=
keras_layer
.
cropping
else
:
else
:
raise
TypeError
(
"Unrecognized cropping type : {}"
.
format
(
crop_type
))
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} is not supported for frontend Keras.'
.
format
(
crop_type
))
int32_max
=
np
.
iinfo
(
np
.
int32
)
.
max
int32_max
=
np
.
iinfo
(
np
.
int32
)
.
max
return
_op
.
strided_slice
(
inexpr
,
begin
=
[
0
,
0
,
crop_t
,
crop_l
],
\
return
_op
.
strided_slice
(
inexpr
,
begin
=
[
0
,
0
,
crop_t
,
crop_l
],
\
end
=
[
int32_max
,
int32_max
,
in_h
-
crop_b
,
in_w
-
crop_r
])
end
=
[
int32_max
,
int32_max
,
in_h
-
crop_b
,
in_w
-
crop_r
])
...
@@ -407,14 +419,18 @@ def _convert_padding(inexpr, keras_layer, _):
...
@@ -407,14 +419,18 @@ def _convert_padding(inexpr, keras_layer, _):
top
,
bottom
=
padding
[
0
]
top
,
bottom
=
padding
[
0
]
left
,
right
=
padding
[
1
]
left
,
right
=
padding
[
1
]
else
:
else
:
raise
ValueError
(
"Unrecognized padding option: {}"
.
format
(
str
(
padding
)))
msg
=
'Value {} in attribute "padding" of operator Padding '
\
'is not valid.'
raise
tvm
.
error
.
OpAttributeInvalid
(
msg
.
format
(
str
(
padding
)))
else
:
else
:
raise
ValueError
(
"Unrecognized padding option: {}"
.
format
(
str
(
padding
)))
msg
=
'Value {} in attribute "padding" of operator Padding is '
\
elif
padding_type
==
'ZeroPadding1D'
:
'not valid.'
raise
NotImplementedError
(
"ZeroPadding1D not implemented"
)
raise
tvm
.
error
.
OpAttributeInvalid
(
msg
.
format
(
str
(
padding
))
)
else
:
else
:
raise
ValueError
(
"Unrecognized padding type: {}"
.
format
(
padding_type
))
msg
=
'Operator {} is not supported in frontend Keras.'
return
_op
.
nn
.
pad
(
data
=
inexpr
,
pad_width
=
((
0
,
0
),
(
0
,
0
),
(
top
,
bottom
),
(
left
,
right
)))
raise
tvm
.
error
.
OpNotImplemented
(
msg
.
format
(
padding_type
))
return
_op
.
nn
.
pad
(
data
=
inexpr
,
pad_width
=
((
0
,
0
),
(
0
,
0
),
(
top
,
bottom
),
(
left
,
right
)))
def
_convert_concat
(
inexpr
,
keras_layer
,
_
):
def
_convert_concat
(
inexpr
,
keras_layer
,
_
):
...
@@ -601,8 +617,10 @@ _convert_map = {
...
@@ -601,8 +617,10 @@ _convert_map = {
def
_check_unsupported_layers
(
model
):
def
_check_unsupported_layers
(
model
):
for
layer
in
model
.
layers
:
for
layer
in
model
.
layers
:
if
type
(
layer
)
.
__name__
not
in
_convert_map
:
op_name
=
type
(
layer
)
.
__name__
raise
ValueError
(
"Keras layer {} not supported."
.
format
(
type
(
layer
)
.
__name__
))
if
op_name
not
in
_convert_map
:
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} is not supported in frontend Keras.'
.
format
(
op_name
))
def
keras_op_to_relay
(
inexpr
,
keras_layer
,
outname
,
etab
):
def
keras_op_to_relay
(
inexpr
,
keras_layer
,
outname
,
etab
):
...
@@ -622,9 +640,11 @@ def keras_op_to_relay(inexpr, keras_layer, outname, etab):
...
@@ -622,9 +640,11 @@ def keras_op_to_relay(inexpr, keras_layer, outname, etab):
etab : relay.frontend.common.ExprTable
etab : relay.frontend.common.ExprTable
The global expression table to be updated.
The global expression table to be updated.
"""
"""
if
type
(
keras_layer
)
.
__name__
not
in
_convert_map
:
op_name
=
type
(
keras_layer
)
.
__name__
raise
NotImplementedError
(
"{} is not supported"
.
format
((
type
(
keras_layer
)
.
__name__
)))
if
op_name
not
in
_convert_map
:
outs
=
_convert_map
[
type
(
keras_layer
)
.
__name__
](
inexpr
,
keras_layer
,
etab
)
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} is not supported for frontend Keras.'
.
format
(
op_name
))
outs
=
_convert_map
[
op_name
](
inexpr
,
keras_layer
,
etab
)
outs
=
_as_list
(
outs
)
outs
=
_as_list
(
outs
)
for
t_idx
,
out
in
enumerate
(
outs
):
for
t_idx
,
out
in
enumerate
(
outs
):
name
=
outname
+
":"
+
str
(
t_idx
)
name
=
outname
+
":"
+
str
(
t_idx
)
...
...
python/tvm/relay/frontend/mxnet.py
View file @
53511bf1
...
@@ -3,10 +3,12 @@
...
@@ -3,10 +3,12 @@
from
__future__
import
absolute_import
as
_abs
from
__future__
import
absolute_import
as
_abs
import
json
import
json
import
tvm
from
..
import
ir_pass
from
..
import
ir_pass
from
..
import
expr
as
_expr
from
..
import
expr
as
_expr
from
..
import
op
as
_op
from
..
import
op
as
_op
from
...
import
nd
as
_nd
from
...
import
nd
as
_nd
from
.common
import
StrAttrsDict
from
.common
import
StrAttrsDict
from
.nnvm_common
import
_rename
,
_binop_scalar
,
_rbinop_scalar
,
_reduce
from
.nnvm_common
import
_rename
,
_binop_scalar
,
_rbinop_scalar
,
_reduce
from
.nnvm_common
import
_arg_reduce
,
_init_op
,
_softmax_op
,
_cast
from
.nnvm_common
import
_arg_reduce
,
_init_op
,
_softmax_op
,
_cast
...
@@ -41,7 +43,8 @@ def _get_channel_axis(layout, op_name):
...
@@ -41,7 +43,8 @@ def _get_channel_axis(layout, op_name):
return
1
return
1
if
layout
==
"NHWC"
:
if
layout
==
"NHWC"
:
return
3
return
3
raise
RuntimeError
(
"layout: {} is not supported in {}"
.
format
(
layout
,
op_name
))
raise
tvm
.
error
.
OpAttributeInvalid
(
'Value {} in attribute "layout" of operator {} is not valid.'
.
format
(
layout
,
op_name
))
def
_mx_activations
(
inputs
,
attrs
):
def
_mx_activations
(
inputs
,
attrs
):
...
@@ -61,7 +64,8 @@ def _mx_activations(inputs, attrs):
...
@@ -61,7 +64,8 @@ def _mx_activations(inputs, attrs):
return
_op
.
add
(
_op
.
log
(
_op
.
add
(
one
,
exp_neg_abs_x
)),
return
_op
.
add
(
_op
.
log
(
_op
.
add
(
one
,
exp_neg_abs_x
)),
_op
.
nn
.
relu
(
x
))
_op
.
nn
.
relu
(
x
))
return
_stable_softrelu
(
inputs
[
0
])
return
_stable_softrelu
(
inputs
[
0
])
raise
RuntimeError
(
"Do not support act_type: {}"
.
format
(
act_type
))
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} is not supported for frontend MXNet.'
.
format
(
act_type
))
def
_mx_compare
(
new_op
,
wrapper
):
def
_mx_compare
(
new_op
,
wrapper
):
...
@@ -74,7 +78,8 @@ def _mx_compare(new_op, wrapper):
...
@@ -74,7 +78,8 @@ def _mx_compare(new_op, wrapper):
def
_mx_conv2d
(
inputs
,
attrs
):
def
_mx_conv2d
(
inputs
,
attrs
):
kernel_size
=
attrs
.
get_int_tuple
(
"kernel"
)
kernel_size
=
attrs
.
get_int_tuple
(
"kernel"
)
if
len
(
kernel_size
)
!=
2
:
if
len
(
kernel_size
)
!=
2
:
raise
RuntimeError
(
"non-2d kernel is not supported in conv2d"
)
raise
tvm
.
error
.
OpAttributeInvalid
(
'Non-2D kernels are not supported for operator Conv2D.'
)
data_layout
=
attrs
.
get_str
(
"layout"
,
"NCHW"
)
data_layout
=
attrs
.
get_str
(
"layout"
,
"NCHW"
)
channel_axis
=
_get_channel_axis
(
data_layout
,
"conv2d"
)
channel_axis
=
_get_channel_axis
(
data_layout
,
"conv2d"
)
...
@@ -102,10 +107,12 @@ def _mx_conv2d(inputs, attrs):
...
@@ -102,10 +107,12 @@ def _mx_conv2d(inputs, attrs):
def
_mx_conv2d_transpose
(
inputs
,
attrs
):
def
_mx_conv2d_transpose
(
inputs
,
attrs
):
if
"target_shape"
in
attrs
.
attrs
:
if
"target_shape"
in
attrs
.
attrs
:
raise
RuntimeError
(
"target_shape is not supported in conv2d_transpose"
)
raise
tvm
.
error
.
OpAttributeUnimplemented
(
'Attribute "target_shape" is not supported for operator Conv2D-transpose.'
)
kernel_size
=
attrs
.
get_int_tuple
(
"kernel"
)
kernel_size
=
attrs
.
get_int_tuple
(
"kernel"
)
if
len
(
kernel_size
)
!=
2
:
if
len
(
kernel_size
)
!=
2
:
raise
RuntimeError
(
"non-2d kernel is not supported in conv2d"
)
raise
tvm
.
error
.
OpAttributeInvalid
(
'Non-2D kernels are not supported for operator Conv2D-transpose.'
)
data_layout
=
attrs
.
get_str
(
"layout"
,
"NCHW"
)
data_layout
=
attrs
.
get_str
(
"layout"
,
"NCHW"
)
channel_axis
=
_get_channel_axis
(
data_layout
,
"conv2d_transpose"
)
channel_axis
=
_get_channel_axis
(
data_layout
,
"conv2d_transpose"
)
...
@@ -140,7 +147,8 @@ def _mx_pooling(inputs, attrs):
...
@@ -140,7 +147,8 @@ def _mx_pooling(inputs, attrs):
def
_pool2d
(
new_op
,
is_avg
):
def
_pool2d
(
new_op
,
is_avg
):
kernel_size
=
attrs
.
get_int_tuple
(
"kernel"
)
kernel_size
=
attrs
.
get_int_tuple
(
"kernel"
)
if
len
(
kernel_size
)
!=
2
:
if
len
(
kernel_size
)
!=
2
:
raise
RuntimeError
(
"non-2d kernel is not supported in pool2d"
)
raise
tvm
.
error
.
OpAttributeInvalid
(
'Only 2D kernels are supported for operator Pool2D.'
)
new_attrs
=
{}
new_attrs
=
{}
new_attrs
[
"pool_size"
]
=
kernel_size
new_attrs
[
"pool_size"
]
=
kernel_size
new_attrs
[
"strides"
]
=
attrs
.
get_int_tuple
(
"stride"
,
(
1
,
1
))
new_attrs
[
"strides"
]
=
attrs
.
get_int_tuple
(
"stride"
,
(
1
,
1
))
...
@@ -158,7 +166,8 @@ def _mx_pooling(inputs, attrs):
...
@@ -158,7 +166,8 @@ def _mx_pooling(inputs, attrs):
if
global_pool
:
if
global_pool
:
return
_op
.
nn
.
global_avg_pool2d
(
inputs
[
0
])
return
_op
.
nn
.
global_avg_pool2d
(
inputs
[
0
])
return
_pool2d
(
_op
.
nn
.
avg_pool2d
,
True
)
return
_pool2d
(
_op
.
nn
.
avg_pool2d
,
True
)
raise
RuntimeError
(
"Do not support pool_type:{}"
.
format
(
pool_type
))
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} Pooling is not supported for frontend MXNet.'
.
format
(
pool_type
.
capitalize
()))
def
_mx_dropout
(
inputs
,
attrs
):
def
_mx_dropout
(
inputs
,
attrs
):
...
@@ -172,7 +181,8 @@ def _mx_BlockGrad(inputs, attrs): #pylint: disable=unused-argument
...
@@ -172,7 +181,8 @@ def _mx_BlockGrad(inputs, attrs): #pylint: disable=unused-argument
def
_mx_batch_norm
(
inputs
,
attrs
):
def
_mx_batch_norm
(
inputs
,
attrs
):
if
attrs
.
get_bool
(
"output_mean_var"
,
False
):
if
attrs
.
get_bool
(
"output_mean_var"
,
False
):
raise
RuntimeError
(
"batch_norm do not support output_mean_var"
)
raise
tvm
.
error
.
OpAttributeUnimplemented
(
'Attribute "output_mean_var" is not supported for operator Batch Norm.'
)
if
attrs
.
get_bool
(
"use_global_stats"
,
False
):
if
attrs
.
get_bool
(
"use_global_stats"
,
False
):
_warn_not_used
(
"use_global_stats"
,
"batch_norm"
)
_warn_not_used
(
"use_global_stats"
,
"batch_norm"
)
new_attrs
=
{}
new_attrs
=
{}
...
@@ -188,10 +198,18 @@ def _mx_slice(inputs, attrs):
...
@@ -188,10 +198,18 @@ def _mx_slice(inputs, attrs):
begin
=
attrs
.
get_int_tuple
(
'begin'
,
None
)
begin
=
attrs
.
get_int_tuple
(
'begin'
,
None
)
end
=
attrs
.
get_int_tuple
(
'end'
,
None
)
end
=
attrs
.
get_int_tuple
(
'end'
,
None
)
stride
=
attrs
.
get_int_tuple
(
'step'
,
None
)
stride
=
attrs
.
get_int_tuple
(
'step'
,
None
)
if
begin
is
None
or
end
is
None
:
if
begin
is
None
:
raise
RuntimeError
(
"begin and end are required parameters."
)
raise
tvm
.
error
.
OpAttributeRequired
(
if
None
in
begin
or
None
in
end
:
'Attribute "begin" not found in operator Slice.'
)
raise
RuntimeError
(
"None in begin or end is not supported yet."
)
if
end
is
None
:
raise
tvm
.
error
.
OpAttributeRequired
(
'Attribute "end" not found in operator Slice.'
)
if
None
in
begin
:
raise
tvm
.
error
.
OpAttributeInvalid
(
'Value None in attribute "begin" of operator Slice is not valid.'
)
if
None
in
end
:
raise
tvm
.
error
.
OpAttributeInvalid
(
'Value None in attribute "end" of operator Slice is not valid.'
)
new_attrs
=
{
'begin'
:
begin
,
'end'
:
end
}
new_attrs
=
{
'begin'
:
begin
,
'end'
:
end
}
if
stride
is
not
None
:
if
stride
is
not
None
:
new_attrs
[
'strides'
]
=
stride
new_attrs
[
'strides'
]
=
stride
...
@@ -295,7 +313,8 @@ def _mx_leaky_relu(inputs, attrs):
...
@@ -295,7 +313,8 @@ def _mx_leaky_relu(inputs, attrs):
upper_bound
=
attrs
.
get_float
(
"upper_bound"
)
upper_bound
=
attrs
.
get_float
(
"upper_bound"
)
alpha
=
(
lower_bound
+
upper_bound
)
/
2.0
alpha
=
(
lower_bound
+
upper_bound
)
/
2.0
return
_op
.
nn
.
leaky_relu
(
inputs
[
0
],
alpha
=
alpha
)
return
_op
.
nn
.
leaky_relu
(
inputs
[
0
],
alpha
=
alpha
)
raise
RuntimeError
(
"act_type: {} is not supported"
.
format
(
act_type
))
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} is not supported for frontend MXNet.'
.
format
(
act_type
))
def
_mx_make_power
(
power
):
def
_mx_make_power
(
power
):
...
@@ -389,7 +408,9 @@ def _mx_batch_dot(inputs, attrs):
...
@@ -389,7 +408,9 @@ def _mx_batch_dot(inputs, attrs):
transpose_a
=
attrs
.
get_bool
(
"transpose_a"
,
False
)
transpose_a
=
attrs
.
get_bool
(
"transpose_a"
,
False
)
transpose_b
=
attrs
.
get_bool
(
"transpose_b"
,
False
)
transpose_b
=
attrs
.
get_bool
(
"transpose_b"
,
False
)
if
transpose_a
is
True
:
if
transpose_a
is
True
:
raise
RuntimeError
(
"batch_dot: only support transpose_a=False"
)
msg
=
'Value {} in attribute "transpose_a" of operator batch_dot '
\
'is not valid.'
raise
tvm
.
error
.
OpAttributeInvalid
(
msg
.
format
(
transpose_a
))
if
transpose_b
is
False
:
if
transpose_b
is
False
:
b
=
_op
.
transpose
(
b
,
axes
=
[
0
,
2
,
1
])
b
=
_op
.
transpose
(
b
,
axes
=
[
0
,
2
,
1
])
return
_op
.
batch_matmul
(
a
,
b
)
return
_op
.
batch_matmul
(
a
,
b
)
...
@@ -398,7 +419,8 @@ def _mx_batch_dot(inputs, attrs):
...
@@ -398,7 +419,8 @@ def _mx_batch_dot(inputs, attrs):
def
_mx_arange
(
inputs
,
attrs
):
def
_mx_arange
(
inputs
,
attrs
):
assert
len
(
inputs
)
==
0
assert
len
(
inputs
)
==
0
if
attrs
.
get_int
(
"repeat"
,
1
)
!=
1
:
if
attrs
.
get_int
(
"repeat"
,
1
)
!=
1
:
raise
RuntimeError
(
"arange doesn't support repeat"
)
raise
tvm
.
error
.
OpAttributeUnimplemented
(
'Attribute "repeat" is not supported in operator arange.'
)
new_attrs
=
{}
new_attrs
=
{}
new_attrs
[
"start"
]
=
attrs
.
get_float
(
"start"
,
0
)
new_attrs
[
"start"
]
=
attrs
.
get_float
(
"start"
,
0
)
new_attrs
[
"stop"
]
=
attrs
.
get_float
(
"stop"
)
new_attrs
[
"stop"
]
=
attrs
.
get_float
(
"stop"
)
...
@@ -482,15 +504,20 @@ def _mx_box_nms(inputs, attrs):
...
@@ -482,15 +504,20 @@ def _mx_box_nms(inputs, attrs):
in_format
=
attrs
.
get_str
(
'in_format'
,
'corner'
)
in_format
=
attrs
.
get_str
(
'in_format'
,
'corner'
)
out_format
=
attrs
.
get_str
(
'out_format'
,
'corner'
)
out_format
=
attrs
.
get_str
(
'out_format'
,
'corner'
)
if
coord_start
!=
2
:
if
coord_start
!=
2
:
raise
RuntimeError
(
'coord_start
%
s is not supported.'
%
coord_start
)
raise
tvm
.
error
.
OpAttributeInvalid
(
'Value of attribute "coord_start" must equal 2 for operator box_nms.'
)
if
score_index
!=
1
:
if
score_index
!=
1
:
raise
RuntimeError
(
'score_index
%
s is not supported.'
%
score_index
)
raise
tvm
.
error
.
OpAttributeInvalid
(
'Value of attribute "score_index" must equal 1 for operator box_nms.'
)
if
id_index
!=
-
1
and
int
(
id_index
)
!=
0
:
if
id_index
!=
-
1
and
int
(
id_index
)
!=
0
:
raise
RuntimeError
(
'id_index
%
s is not supported.'
%
id_index
)
raise
tvm
.
error
.
OpAttributeInvalid
(
'Value of attribute "id_index" must equal either -1 or 0 for operator box_nms.'
)
if
in_format
!=
'corner'
:
if
in_format
!=
'corner'
:
raise
RuntimeError
(
'in_format
%
s is not supported.'
%
in_format
)
raise
tvm
.
error
.
OpAttributeInvalid
(
'Value of attribute "in_format" must equal "corner" for operator box_nms.'
)
if
out_format
!=
'corner'
:
if
out_format
!=
'corner'
:
raise
RuntimeError
(
'out_format
%
s is not supported.'
%
out_format
)
raise
tvm
.
error
.
OpAttributeInvalid
(
'Value of attribute "out_format" must equal "corner" for operator box_nms.'
)
ret
=
_op
.
vision
.
get_valid_counts
(
inputs
[
0
],
score_threshold
=
valid_thresh
)
ret
=
_op
.
vision
.
get_valid_counts
(
inputs
[
0
],
score_threshold
=
valid_thresh
)
nms_out
=
_op
.
vision
.
non_max_suppression
(
ret
[
1
],
nms_out
=
_op
.
vision
.
non_max_suppression
(
ret
[
1
],
...
@@ -508,7 +535,8 @@ def _mx_l2_normalize(inputs, attrs):
...
@@ -508,7 +535,8 @@ def _mx_l2_normalize(inputs, attrs):
new_attrs
=
{}
new_attrs
=
{}
mode
=
attrs
.
get_str
(
'mode'
,
'instance'
)
mode
=
attrs
.
get_str
(
'mode'
,
'instance'
)
if
mode
!=
'channel'
:
if
mode
!=
'channel'
:
raise
RuntimeError
(
'mode
%
s is not supported.'
%
mode
)
raise
tvm
.
error
.
OpAttributeInvalid
(
'Value of attribute "mode" must equal "channel" for operator l2_normalize.'
)
new_attrs
[
'eps'
]
=
attrs
.
get_float
(
'eps'
,
1e-10
)
new_attrs
[
'eps'
]
=
attrs
.
get_float
(
'eps'
,
1e-10
)
new_attrs
[
'axis'
]
=
[
1
]
new_attrs
[
'axis'
]
=
[
1
]
return
_op
.
nn
.
l2_normalize
(
inputs
[
0
],
**
new_attrs
)
return
_op
.
nn
.
l2_normalize
(
inputs
[
0
],
**
new_attrs
)
...
@@ -771,7 +799,8 @@ def _from_mxnet_impl(symbol, shape_dict, dtype_info):
...
@@ -771,7 +799,8 @@ def _from_mxnet_impl(symbol, shape_dict, dtype_info):
raise
RuntimeError
(
"unexpected type
%
s"
%
type
(
res
))
raise
RuntimeError
(
"unexpected type
%
s"
%
type
(
res
))
node_map
[
nid
]
=
res
node_map
[
nid
]
=
res
else
:
else
:
raise
RuntimeError
(
"{} is not supported in relay frontend"
.
format
(
op_name
))
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} is not supported in frontend MXNet.'
.
format
(
op_name
))
outputs
=
[
node_map
[
e
[
0
]][
e
[
1
]]
for
e
in
jgraph
[
"heads"
]]
outputs
=
[
node_map
[
e
[
0
]][
e
[
1
]]
for
e
in
jgraph
[
"heads"
]]
outputs
=
outputs
[
0
]
if
len
(
outputs
)
==
1
else
_expr
.
Tuple
(
outputs
)
outputs
=
outputs
[
0
]
if
len
(
outputs
)
==
1
else
_expr
.
Tuple
(
outputs
)
...
...
python/tvm/relay/frontend/onnx.py
View file @
53511bf1
...
@@ -3,6 +3,7 @@
...
@@ -3,6 +3,7 @@
from
__future__
import
absolute_import
as
_abs
from
__future__
import
absolute_import
as
_abs
import
logging
import
logging
import
tvm
import
numpy
as
np
import
numpy
as
np
from
...
import
nd
as
_nd
from
...
import
nd
as
_nd
from
..
import
ir_pass
from
..
import
ir_pass
...
@@ -18,7 +19,9 @@ def dimension_picker(prefix, surfix=''):
...
@@ -18,7 +19,9 @@ def dimension_picker(prefix, surfix=''):
kernel
=
attr
[
'kernel_shape'
]
kernel
=
attr
[
'kernel_shape'
]
if
len
(
kernel
)
==
2
:
if
len
(
kernel
)
==
2
:
return
prefix
+
'2d'
+
surfix
return
prefix
+
'2d'
+
surfix
raise
NotImplementedError
(
"Only 2d kernel supported."
)
msg
=
'Only 2D kernels are supported for operator {}.'
op_name
=
prefix
+
'2d'
raise
tvm
.
error
.
OpAttributeInvalid
(
msg
.
format
(
op_name
))
return
_impl
return
_impl
...
@@ -29,7 +32,8 @@ def revert_caffe2_pad(pads):
...
@@ -29,7 +32,8 @@ def revert_caffe2_pad(pads):
elif
len
(
pads
)
==
2
:
elif
len
(
pads
)
==
2
:
pass
pass
else
:
else
:
raise
ValueError
(
"Invalid caffe2 type padding: {}"
.
format
(
pads
))
raise
tvm
.
error
.
OpAttributeInvalid
(
'Number of pads must be either 2 or 4.'
)
return
pads
return
pads
def
dimension_constraint
():
def
dimension_constraint
():
...
@@ -461,7 +465,8 @@ class Upsample(OnnxOpConverter):
...
@@ -461,7 +465,8 @@ class Upsample(OnnxOpConverter):
elif
mode
==
b
'linear'
:
elif
mode
==
b
'linear'
:
method
=
"BILINEAR"
method
=
"BILINEAR"
else
:
else
:
raise
ValueError
(
"Invalid ONNX upsample mode: {}"
.
format
(
mode
))
raise
tvm
.
error
.
OpAttributeInvalid
(
'Value {} in attribute "mode" of operator Upsample is not valid.'
.
format
(
mode
))
attr
=
{
'scale'
:
int
(
scales
[
-
1
]),
'method'
:
method
,
'layout'
:
'NCHW'
}
attr
=
{
'scale'
:
int
(
scales
[
-
1
]),
'method'
:
method
,
'layout'
:
'NCHW'
}
return
AttrCvt
(
'upsampling'
)(
inputs
,
attr
)
return
AttrCvt
(
'upsampling'
)(
inputs
,
attr
)
...
@@ -718,8 +723,9 @@ class ConstantFill(OnnxOpConverter):
...
@@ -718,8 +723,9 @@ class ConstantFill(OnnxOpConverter):
shape
=
params
[
get_name
(
inputs
[
0
])]
.
asnumpy
()
shape
=
params
[
get_name
(
inputs
[
0
])]
.
asnumpy
()
else
:
else
:
if
'extra_shape'
in
attr
:
if
'extra_shape'
in
attr
:
raise
ImportError
(
raise
tvm
.
error
.
OpAttributeInvalid
(
'Attribute "extra_shape" not '
"Extra Shape not supported with fill_like"
)
'supported with "fill_like" for '
'operator ConstantFill.'
)
return
_op
.
full_like
(
inputs
[
0
],
inputs
[
1
])
return
_op
.
full_like
(
inputs
[
0
],
inputs
[
1
])
if
'extra_shape'
in
attr
:
if
'extra_shape'
in
attr
:
...
...
python/tvm/relay/frontend/tensorflow.py
View file @
53511bf1
...
@@ -27,7 +27,8 @@ def _get_relay_op(op_name):
...
@@ -27,7 +27,8 @@ def _get_relay_op(op_name):
op
=
getattr
(
_op
.
image
,
op_name
)
op
=
getattr
(
_op
.
image
,
op_name
)
if
not
op
:
if
not
op
:
raise
RuntimeError
(
"Unable to map op_name {} to relay"
.
format
(
op_name
))
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} is not supported for frontend TensorFlow.'
.
format
(
op_name
))
return
op
return
op
class
AttrCvt
(
object
):
class
AttrCvt
(
object
):
...
@@ -99,7 +100,8 @@ class AttrCvt(object):
...
@@ -99,7 +100,8 @@ class AttrCvt(object):
new_attrs
=
{}
new_attrs
=
{}
for
k
in
attrs
.
keys
():
for
k
in
attrs
.
keys
():
if
k
in
self
.
_excludes
:
if
k
in
self
.
_excludes
:
raise
NotImplementedError
(
"Attribute {} not supported yet."
.
format
(
k
))
raise
tvm
.
error
.
OpAttributeUnimplemented
(
'Attribute {} in operator {} is not supported.'
.
format
(
k
,
op_name
))
elif
k
in
self
.
_disables
:
elif
k
in
self
.
_disables
:
logging
.
warning
(
"Attribute
%
s is disabled in relay.
%
s"
,
k
,
op_name
)
logging
.
warning
(
"Attribute
%
s is disabled in relay.
%
s"
,
k
,
op_name
)
elif
k
in
self
.
_ignores
:
elif
k
in
self
.
_ignores
:
...
@@ -148,7 +150,8 @@ class AttrCvt(object):
...
@@ -148,7 +150,8 @@ class AttrCvt(object):
"""Wrapper for getting required attributes."""
"""Wrapper for getting required attributes."""
assert
isinstance
(
attr
,
dict
)
assert
isinstance
(
attr
,
dict
)
if
key
not
in
attr
:
if
key
not
in
attr
:
raise
AttributeError
(
"Required attribute {} not found."
.
format
(
key
))
raise
tvm
.
error
.
OpAttributeRequired
(
'Attribute {} not found in operator {}'
.
format
(
key
,
self
.
_op_name
))
return
attr
[
key
]
return
attr
[
key
]
def
_get_pad_pair
(
input1d
,
kernel1d
,
stride1d
):
def
_get_pad_pair
(
input1d
,
kernel1d
,
stride1d
):
...
@@ -178,7 +181,8 @@ def _dimension_picker(prefix, surfix=''):
...
@@ -178,7 +181,8 @@ def _dimension_picker(prefix, surfix=''):
kernel
=
attr
[
'kernel_shape'
]
kernel
=
attr
[
'kernel_shape'
]
if
len
(
kernel
)
==
2
:
if
len
(
kernel
)
==
2
:
return
prefix
+
'2d'
+
surfix
return
prefix
+
'2d'
+
surfix
raise
NotImplementedError
(
"Only 2d kernel supported."
)
raise
tvm
.
error
.
OpAttributeInvalid
(
'Only 2D kernels are supported for operator {}'
.
format
(
prefix
+
'2d'
))
return
_impl
return
_impl
def
_dimension_constraint
():
def
_dimension_constraint
():
...
@@ -238,7 +242,9 @@ def _pooling(name):
...
@@ -238,7 +242,9 @@ def _pooling(name):
attr
[
'kernel_shape'
]
=
(
attr
[
'ksize'
][
2
],
attr
[
'ksize'
][
3
])
attr
[
'kernel_shape'
]
=
(
attr
[
'ksize'
][
2
],
attr
[
'ksize'
][
3
])
attr
[
'strides'
]
=
(
attr
[
'strides'
][
2
],
attr
[
'strides'
][
3
])
attr
[
'strides'
]
=
(
attr
[
'strides'
][
2
],
attr
[
'strides'
][
3
])
else
:
else
:
raise
TypeError
(
"Unsupported data_format type : {}"
.
format
(
attr
[
'data_format'
]))
msg
=
'Value {} of attribute "data_format" of operator Pooling '
\
'is not valid.'
raise
tvm
.
error
.
OpAttributeInvalid
(
msg
.
format
(
attrs
[
'data_format'
]))
if
attr
[
'_target_layout'
]
==
"NCHW"
and
attr
[
'data_format'
]
==
"NHWC"
:
if
attr
[
'_target_layout'
]
==
"NCHW"
and
attr
[
'data_format'
]
==
"NHWC"
:
tmp_shape
=
attr
[
'_input_shapes'
][
inputs
[
0
]]
tmp_shape
=
attr
[
'_input_shapes'
][
inputs
[
0
]]
...
@@ -267,7 +273,9 @@ def _pooling(name):
...
@@ -267,7 +273,9 @@ def _pooling(name):
attr
[
'padding'
]
=
[
pad_v
[
0
],
pad_h
[
0
],
pad_v
[
1
],
pad_h
[
1
]]
attr
[
'padding'
]
=
[
pad_v
[
0
],
pad_h
[
0
],
pad_v
[
1
],
pad_h
[
1
]]
else
:
else
:
raise
TypeError
(
"Unsupported padding type : {}"
.
format
(
attr
[
'padding'
]))
msg
=
'Value {} in attribute "padding" of operator Pooling is '
\
'not valid.'
raise
tvm
.
error
.
OpAttributeInvalid
(
msg
.
format
(
attr
[
'padding'
]))
if
name
==
"avg_pool"
:
if
name
==
"avg_pool"
:
attr
[
'count_include_pad'
]
=
False
attr
[
'count_include_pad'
]
=
False
...
@@ -341,7 +349,9 @@ def _conv(opname):
...
@@ -341,7 +349,9 @@ def _conv(opname):
attr
[
'dilations'
]
=
(
attr
[
'dilations'
][
2
],
attr
[
'dilations'
][
3
])
attr
[
'dilations'
]
=
(
attr
[
'dilations'
][
2
],
attr
[
'dilations'
][
3
])
attr
[
'strides'
]
=
(
attr
[
'strides'
][
2
],
attr
[
'strides'
][
3
])
attr
[
'strides'
]
=
(
attr
[
'strides'
][
2
],
attr
[
'strides'
][
3
])
else
:
else
:
raise
TypeError
(
"Unsupported data format type : {}"
.
format
(
attr
[
'data_format'
]))
msg
=
'Value {} in attribute "data_format" of operator Conv is '
\
'not valid.'
raise
tvm
.
error
.
OpAttributeInvalid
(
msg
.
format
(
attr
[
'data_format'
]))
if
opname
==
'depthwise'
:
if
opname
==
'depthwise'
:
...
@@ -386,7 +396,9 @@ def _conv(opname):
...
@@ -386,7 +396,9 @@ def _conv(opname):
attr
[
'padding'
]
=
[
0
,
0
]
attr
[
'padding'
]
=
[
0
,
0
]
else
:
else
:
raise
TypeError
(
"Unsupported padding type : {}"
.
format
(
attr
[
'padding'
]))
msg
=
'Value {} in attribute "padding" of operator Conv is not '
\
'valid.'
raise
tvm
.
error
.
OpAttributeInvalid
(
msg
.
format
(
attr
[
'padding'
]))
if
'kernel_layout'
not
in
attr
:
if
'kernel_layout'
not
in
attr
:
if
opname
==
'conv'
:
if
opname
==
'conv'
:
...
@@ -791,7 +803,8 @@ def _pad(name):
...
@@ -791,7 +803,8 @@ def _pad(name):
if
padlist_key
in
params
:
if
padlist_key
in
params
:
padlist
=
params
.
pop
(
padlist_key
)
.
asnumpy
()
padlist
=
params
.
pop
(
padlist_key
)
.
asnumpy
()
else
:
else
:
raise
RuntimeError
(
"Required parameter {} not fount."
.
format
(
padlist_key
))
raise
tvm
.
error
.
OpAttributeRequired
(
'Attribute {} not found in operator Pad.'
.
format
(
padlist_key
))
paddings
=
tuple
([
tuple
(
l
)
for
l
in
padlist
])
paddings
=
tuple
([
tuple
(
l
)
for
l
in
padlist
])
attr
[
'pad_width'
]
=
paddings
attr
[
'pad_width'
]
=
paddings
attr
[
'pad_value'
]
=
0
attr
[
'pad_value'
]
=
0
...
...
python/tvm/relay/frontend/tflite.py
View file @
53511bf1
...
@@ -3,6 +3,7 @@
...
@@ -3,6 +3,7 @@
from
__future__
import
absolute_import
as
_abs
from
__future__
import
absolute_import
as
_abs
import
math
import
math
import
numpy
as
np
import
numpy
as
np
import
tvm
from
..
import
ir_pass
from
..
import
ir_pass
from
..
import
expr
as
_expr
from
..
import
expr
as
_expr
from
..
import
op
as
_op
from
..
import
op
as
_op
...
@@ -59,8 +60,10 @@ class OperatorConverter(object):
...
@@ -59,8 +60,10 @@ class OperatorConverter(object):
unsupported_ops_set
.
add
(
op_code_str
)
unsupported_ops_set
.
add
(
op_code_str
)
if
unsupported_ops_set
:
if
unsupported_ops_set
:
raise
NotImplementedError
(
"Unsupported Ops:
%
s"
%
(
msg
=
'The following operators are not supported in frontend '
\
','
.
join
(
unsupported_ops_set
)))
'TFLite: {}'
ops
=
str
(
list
(
unsupported_ops_set
))
.
strip
(
'[,]'
)
raise
tvm
.
error
.
OpNotImplemented
(
msg
.
format
(
ops
))
def
convert_op_to_relay
(
self
):
def
convert_op_to_relay
(
self
):
"""Convert TFLite ops to relay ops"""
"""Convert TFLite ops to relay ops"""
...
@@ -205,8 +208,8 @@ class OperatorConverter(object):
...
@@ -205,8 +208,8 @@ class OperatorConverter(object):
# finally convert back if necessary
# finally convert back if necessary
in_expr
=
_op
.
transpose
(
in_expr
,
axes
=
(
0
,
2
,
3
,
1
))
in_expr
=
_op
.
transpose
(
in_expr
,
axes
=
(
0
,
2
,
3
,
1
))
else
:
else
:
raise
NotImplementedError
(
"Not support input shape length {} of reshape : "
msg
=
'Input shape length {} for operator Reshape is not valid.'
.
format
(
str
(
input_shape_length
)
))
raise
tvm
.
error
.
OpAttributeInvalid
(
msg
.
format
(
input_shape_length
))
out
=
_op
.
reshape
(
in_expr
,
newshape
=
tuple
(
target_shape
))
out
=
_op
.
reshape
(
in_expr
,
newshape
=
tuple
(
target_shape
))
...
@@ -223,8 +226,8 @@ class OperatorConverter(object):
...
@@ -223,8 +226,8 @@ class OperatorConverter(object):
elif
len
(
target_shape
)
==
4
:
elif
len
(
target_shape
)
==
4
:
out
=
_op
.
transpose
(
out
,
axes
=
(
0
,
3
,
1
,
2
))
out
=
_op
.
transpose
(
out
,
axes
=
(
0
,
3
,
1
,
2
))
else
:
else
:
raise
NotImplementedError
(
"Not support to reshape to shape length {}: "
raise
tvm
.
error
.
OpAttributeInvalid
(
.
format
(
str
(
len
(
target_shape
)))
)
'Length of target shape must be between 1 and 5 for operator Reshape.'
)
return
out
return
out
...
@@ -330,8 +333,8 @@ class OperatorConverter(object):
...
@@ -330,8 +333,8 @@ class OperatorConverter(object):
# finally convert back if necessary
# finally convert back if necessary
in_expr
=
_op
.
transpose
(
in_expr
,
axes
=
(
0
,
2
,
3
,
1
))
in_expr
=
_op
.
transpose
(
in_expr
,
axes
=
(
0
,
2
,
3
,
1
))
else
:
else
:
raise
NotImplementedError
(
"Not support input shape length {} of squeeze : "
msg
=
'Input shape length {} for operator Squeeze is not valid.'
.
format
(
str
(
input_shape_length
)
))
raise
tvm
.
error
.
OpAttributeInvalid
(
msg
.
format
(
input_shape_length
))
out
=
_op
.
squeeze
(
in_expr
,
axis
=
tuple
(
squeeze_axis
))
out
=
_op
.
squeeze
(
in_expr
,
axis
=
tuple
(
squeeze_axis
))
...
@@ -348,8 +351,8 @@ class OperatorConverter(object):
...
@@ -348,8 +351,8 @@ class OperatorConverter(object):
elif
output_shape_length
==
4
:
elif
output_shape_length
==
4
:
out
=
_op
.
transpose
(
out
,
axes
=
(
0
,
3
,
1
,
2
))
out
=
_op
.
transpose
(
out
,
axes
=
(
0
,
3
,
1
,
2
))
else
:
else
:
raise
NotImplementedError
(
"Not support to squeeze to length {} : "
msg
=
'Output shape length {} for operator Squeeze is not valid.'
.
format
(
str
(
output_shape_length
)
))
raise
tvm
.
error
.
OpAttributeInvalid
(
msg
.
format
(
output_shape_length
))
return
out
return
out
...
@@ -369,8 +372,8 @@ class OperatorConverter(object):
...
@@ -369,8 +372,8 @@ class OperatorConverter(object):
if
fused_activation_fn
==
ActivationFunctionType
.
TANH
:
if
fused_activation_fn
==
ActivationFunctionType
.
TANH
:
return
_op
.
tanh
(
in_expr
)
return
_op
.
tanh
(
in_expr
)
fused_activation_fn_str
=
self
.
activation_fn_type
[
fused_activation_fn
]
fused_activation_fn_str
=
self
.
activation_fn_type
[
fused_activation_fn
]
raise
NotImplementedError
(
"Unsupported fused activation fn {}"
raise
tvm
.
error
.
OpNotImplemented
(
.
format
(
fused_activation_fn_str
))
'Operator {} is not supported for frontend TFLite.'
.
format
(
fused_activation_fn_str
))
def
convert_conv
(
self
,
op
,
conv_type
):
def
convert_conv
(
self
,
op
,
conv_type
):
"""convolution implementation."""
"""convolution implementation."""
...
@@ -409,7 +412,8 @@ class OperatorConverter(object):
...
@@ -409,7 +412,8 @@ class OperatorConverter(object):
assert
depth_multiplier
==
1
,
"TF frontend have transformed it be 1 "
\
assert
depth_multiplier
==
1
,
"TF frontend have transformed it be 1 "
\
"no matter original value be set by 0.25, 0.5 or any else"
"no matter original value be set by 0.25, 0.5 or any else"
else
:
else
:
raise
ValueError
(
"Not support conv type: {}"
.
format
(
conv_type
))
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} is not supported for frontend TFLite.'
.
format
(
conv_type
))
stride_h
=
conv_options
.
StrideH
()
stride_h
=
conv_options
.
StrideH
()
stride_w
=
conv_options
.
StrideW
()
stride_w
=
conv_options
.
StrideW
()
...
@@ -466,7 +470,8 @@ class OperatorConverter(object):
...
@@ -466,7 +470,8 @@ class OperatorConverter(object):
(
pad_top
,
pad_bottom
),
(
pad_top
,
pad_bottom
),
(
pad_left
,
pad_right
)))
(
pad_left
,
pad_right
)))
else
:
else
:
raise
NotImplementedError
(
"Not support padding format: {}"
.
format
(
padding
))
raise
tvm
.
error
.
OpAttributeUnimplemented
(
'Padding format {} is not supported for operator Conv.'
.
format
(
padding
))
out
=
_op
.
nn
.
conv2d
(
data
=
in_expr
,
weight
=
weight_expr
,
**
params
)
out
=
_op
.
nn
.
conv2d
(
data
=
in_expr
,
weight
=
weight_expr
,
**
params
)
...
@@ -529,14 +534,16 @@ class OperatorConverter(object):
...
@@ -529,14 +534,16 @@ class OperatorConverter(object):
pad_left
,
pad_right
=
get_pad_value
(
input_w
,
filter_w
,
stride_w
)
pad_left
,
pad_right
=
get_pad_value
(
input_w
,
filter_w
,
stride_w
)
params
[
'padding'
]
=
[
pad_top
,
pad_left
,
pad_bottom
,
pad_right
]
params
[
'padding'
]
=
[
pad_top
,
pad_left
,
pad_bottom
,
pad_right
]
else
:
else
:
raise
NotImplementedError
(
"Not support padding format: {}"
.
format
(
padding
))
raise
tvm
.
error
.
OpAttributeUnimplemented
(
'Padding format {} for operator Pool2D is not supported.'
.
format
(
padding
))
if
pool_type
==
"average"
:
if
pool_type
==
"average"
:
out
=
_op
.
nn
.
avg_pool2d
(
in_expr
,
**
params
)
out
=
_op
.
nn
.
avg_pool2d
(
in_expr
,
**
params
)
elif
pool_type
==
"max"
:
elif
pool_type
==
"max"
:
out
=
_op
.
nn
.
max_pool2d
(
in_expr
,
**
params
)
out
=
_op
.
nn
.
max_pool2d
(
in_expr
,
**
params
)
else
:
else
:
raise
ValueError
(
"Not support pool type: {}"
.
format
(
pool_type
))
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} is not supported for frontend TFLite.'
.
format
(
pool_type
+
' pool'
))
# If we have fused activations
# If we have fused activations
if
fused_activation_fn
!=
ActivationFunctionType
.
NONE
:
if
fused_activation_fn
!=
ActivationFunctionType
.
NONE
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment