Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
T
tic
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
wenyuanbo
tic
Commits
c855882a
Commit
c855882a
authored
Jul 17, 2019
by
zhengdi
Committed by
Yao Wang
Jul 16, 2019
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[FRONTEND][TENSORFLOW] Some bug fixes for tensorflow NCHW data_format (#3514)
parent
b6dc7826
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
90 additions
and
16 deletions
+90
-16
nnvm/python/nnvm/frontend/tensorflow.py
+14
-3
python/tvm/relay/frontend/tensorflow.py
+9
-3
tests/python/frontend/tensorflow/test_forward.py
+67
-10
No files found.
nnvm/python/nnvm/frontend/tensorflow.py
View file @
c855882a
...
@@ -205,8 +205,12 @@ def _conv(opname):
...
@@ -205,8 +205,12 @@ def _conv(opname):
# NCHW Layout require weights transpose
# NCHW Layout require weights transpose
if
attr
[
'data_format'
]
==
'NCHW'
:
if
attr
[
'data_format'
]
==
'NCHW'
:
tmp_shape
=
attr
[
'_input_shapes'
][
inputs
[
1
]]
tmp_shape
=
attr
[
'_input_shapes'
][
inputs
[
1
]]
if
opname
==
'conv'
:
tmp_shape
=
[
tmp_shape
[
ii
]
for
ii
in
(
3
,
2
,
0
,
1
)]
tmp_shape
=
[
tmp_shape
[
ii
]
for
ii
in
(
3
,
2
,
0
,
1
)]
inputs
[
1
]
=
_sym
.
transpose
(
inputs
[
1
],
axes
=
(
3
,
2
,
0
,
1
))
inputs
[
1
]
=
_sym
.
transpose
(
inputs
[
1
],
axes
=
(
3
,
2
,
0
,
1
))
else
:
tmp_shape
=
[
tmp_shape
[
ii
]
for
ii
in
(
2
,
3
,
0
,
1
)]
inputs
[
1
]
=
_sym
.
transpose
(
inputs
[
1
],
axes
=
(
2
,
3
,
0
,
1
))
attr
[
'_input_shapes'
][
inputs
[
1
]]
=
tmp_shape
attr
[
'_input_shapes'
][
inputs
[
1
]]
=
tmp_shape
input_shape
=
attr
[
'_input_shapes'
][
inputs
[
0
]]
input_shape
=
attr
[
'_input_shapes'
][
inputs
[
0
]]
...
@@ -238,12 +242,12 @@ def _conv(opname):
...
@@ -238,12 +242,12 @@ def _conv(opname):
attr
[
'dilations'
]
=
(
attr
[
'dilations'
][
1
],
attr
[
'dilations'
][
2
])
attr
[
'dilations'
]
=
(
attr
[
'dilations'
][
1
],
attr
[
'dilations'
][
2
])
attr
[
'strides'
]
=
(
attr
[
'strides'
][
1
],
attr
[
'strides'
][
2
])
attr
[
'strides'
]
=
(
attr
[
'strides'
][
1
],
attr
[
'strides'
][
2
])
elif
attr
[
'data_format'
]
==
'NCHW'
:
elif
attr
[
'data_format'
]
==
'NCHW'
:
depth_mult
,
_
,
kernel_h
,
kernel_w
=
weights_shape
_
,
depth_mult
,
kernel_h
,
kernel_w
=
weights_shape
attr
[
'kernel_shape'
]
=
(
weights_shape
[
2
],
weights_shape
[
3
])
attr
[
'kernel_shape'
]
=
(
weights_shape
[
2
],
weights_shape
[
3
])
if
opname
==
'conv'
:
if
opname
==
'conv'
:
attr
[
'channels'
]
=
weights_shape
[
0
]
attr
[
'channels'
]
=
weights_shape
[
0
]
else
:
else
:
attr
[
'channels'
]
=
input_shape
[
0
]
*
depth_mult
attr
[
'channels'
]
=
input_shape
[
1
]
*
depth_mult
if
attr
[
'channels'
]
<
0
:
if
attr
[
'channels'
]
<
0
:
attr
[
'channels'
]
*=
-
1
attr
[
'channels'
]
*=
-
1
...
@@ -256,6 +260,9 @@ def _conv(opname):
...
@@ -256,6 +260,9 @@ def _conv(opname):
if
opname
==
'depthwise'
:
if
opname
==
'depthwise'
:
if
depth_mult
>
1
:
raise
tvm
.
error
.
OpNotImplemented
(
'depth_mult > 1 of operator DepthwiseConv2dNative'
' is not supported.'
)
attr
[
'groups'
]
=
attr
[
'channels'
]
attr
[
'groups'
]
=
attr
[
'channels'
]
# Fix padding
# Fix padding
...
@@ -459,7 +466,11 @@ def _reshape():
...
@@ -459,7 +466,11 @@ def _reshape():
def
_bias_add
():
def
_bias_add
():
def
_impl
(
inputs
,
attr
,
params
):
def
_impl
(
inputs
,
attr
,
params
):
return
_sym
.
broadcast_add
(
inputs
[
0
],
inputs
[
1
])
if
attr
[
'data_format'
]
.
decode
(
"utf-8"
)
==
'NCHW'
:
bias
=
_sym
.
reshape
(
inputs
[
1
],
newshape
=
(
1
,
-
1
,
1
,
1
))
else
:
bias
=
inputs
[
1
]
return
_sym
.
broadcast_add
(
inputs
[
0
],
bias
)
return
_impl
return
_impl
def
_squeeze
():
def
_squeeze
():
...
...
python/tvm/relay/frontend/tensorflow.py
View file @
c855882a
...
@@ -361,8 +361,12 @@ def _conv(opname):
...
@@ -361,8 +361,12 @@ def _conv(opname):
# NCHW Layout require weights transpose
# NCHW Layout require weights transpose
if
attr
[
'data_format'
]
==
'NCHW'
:
if
attr
[
'data_format'
]
==
'NCHW'
:
tmp_shape
=
attr
[
'_input_shapes'
][
inputs
[
1
]]
tmp_shape
=
attr
[
'_input_shapes'
][
inputs
[
1
]]
if
opname
==
'conv'
:
tmp_shape
=
[
tmp_shape
[
ii
]
for
ii
in
(
3
,
2
,
0
,
1
)]
tmp_shape
=
[
tmp_shape
[
ii
]
for
ii
in
(
3
,
2
,
0
,
1
)]
inputs
[
1
]
=
_op
.
transpose
(
inputs
[
1
],
axes
=
(
3
,
2
,
0
,
1
))
inputs
[
1
]
=
_op
.
transpose
(
inputs
[
1
],
axes
=
(
3
,
2
,
0
,
1
))
else
:
tmp_shape
=
[
tmp_shape
[
ii
]
for
ii
in
(
2
,
3
,
0
,
1
)]
inputs
[
1
]
=
_op
.
transpose
(
inputs
[
1
],
axes
=
(
2
,
3
,
0
,
1
))
attr
[
'_input_shapes'
][
inputs
[
1
]]
=
tmp_shape
attr
[
'_input_shapes'
][
inputs
[
1
]]
=
tmp_shape
input_shape
=
attr
[
'_input_shapes'
][
inputs
[
0
]]
input_shape
=
attr
[
'_input_shapes'
][
inputs
[
0
]]
...
@@ -394,12 +398,12 @@ def _conv(opname):
...
@@ -394,12 +398,12 @@ def _conv(opname):
attr
[
'dilations'
]
=
(
attr
[
'dilations'
][
1
],
attr
[
'dilations'
][
2
])
attr
[
'dilations'
]
=
(
attr
[
'dilations'
][
1
],
attr
[
'dilations'
][
2
])
attr
[
'strides'
]
=
(
attr
[
'strides'
][
1
],
attr
[
'strides'
][
2
])
attr
[
'strides'
]
=
(
attr
[
'strides'
][
1
],
attr
[
'strides'
][
2
])
elif
attr
[
'data_format'
]
==
'NCHW'
:
elif
attr
[
'data_format'
]
==
'NCHW'
:
depth_mult
,
_
,
kernel_h
,
kernel_w
=
weights_shape
_
,
depth_mult
,
kernel_h
,
kernel_w
=
weights_shape
attr
[
'kernel_shape'
]
=
(
weights_shape
[
2
],
weights_shape
[
3
])
attr
[
'kernel_shape'
]
=
(
weights_shape
[
2
],
weights_shape
[
3
])
if
opname
==
'conv'
:
if
opname
==
'conv'
:
attr
[
'channels'
]
=
weights_shape
[
0
]
attr
[
'channels'
]
=
weights_shape
[
0
]
else
:
else
:
attr
[
'channels'
]
=
input_shape
[
0
]
*
depth_mult
attr
[
'channels'
]
=
input_shape
[
1
]
*
depth_mult
if
attr
[
'channels'
]
<
0
:
if
attr
[
'channels'
]
<
0
:
attr
[
'channels'
]
*=
-
1
attr
[
'channels'
]
*=
-
1
...
@@ -411,8 +415,10 @@ def _conv(opname):
...
@@ -411,8 +415,10 @@ def _conv(opname):
'not valid.'
'not valid.'
raise
tvm
.
error
.
OpAttributeInvalid
(
msg
.
format
(
attr
[
'data_format'
]))
raise
tvm
.
error
.
OpAttributeInvalid
(
msg
.
format
(
attr
[
'data_format'
]))
if
opname
==
'depthwise'
:
if
opname
==
'depthwise'
:
if
depth_mult
>
1
:
raise
tvm
.
error
.
OpNotImplemented
(
'depth_mult > 1 of operator DepthwiseConv2dNative'
' is not supported.'
)
attr
[
'groups'
]
=
attr
[
'channels'
]
attr
[
'groups'
]
=
attr
[
'channels'
]
# Fix padding
# Fix padding
...
...
tests/python/frontend/tensorflow/test_forward.py
View file @
c855882a
...
@@ -223,7 +223,7 @@ def test_forward_pooling():
...
@@ -223,7 +223,7 @@ def test_forward_pooling():
# Convolution
# Convolution
# -----------
# -----------
def
_test_convolution
(
tensor_in_sizes
,
filter_in_sizes
,
def
_test_convolution
(
opname
,
tensor_in_sizes
,
filter_in_sizes
,
dilations
,
strides
,
padding
,
data_format
):
dilations
,
strides
,
padding
,
data_format
):
""" One iteration of convolution with given shapes and attributes """
""" One iteration of convolution with given shapes and attributes """
...
@@ -244,6 +244,7 @@ def _test_convolution(tensor_in_sizes, filter_in_sizes,
...
@@ -244,6 +244,7 @@ def _test_convolution(tensor_in_sizes, filter_in_sizes,
strides
=
[
1
,
1
]
+
strides
strides
=
[
1
,
1
]
+
strides
dilations
=
[
1
,
1
]
+
dilations
dilations
=
[
1
,
1
]
+
dilations
if
opname
==
'conv'
:
nn_ops
.
conv2d
(
in_data
,
nn_ops
.
conv2d
(
in_data
,
in_filter
,
in_filter
,
strides
=
strides
,
strides
=
strides
,
...
@@ -253,18 +254,74 @@ def _test_convolution(tensor_in_sizes, filter_in_sizes,
...
@@ -253,18 +254,74 @@ def _test_convolution(tensor_in_sizes, filter_in_sizes,
compare_tf_with_tvm
(
np
.
reshape
(
data_array
,
tensor_in_sizes
)
.
astype
(
'float32'
),
compare_tf_with_tvm
(
np
.
reshape
(
data_array
,
tensor_in_sizes
)
.
astype
(
'float32'
),
'Placeholder:0'
,
'Conv2D:0'
)
'Placeholder:0'
,
'Conv2D:0'
)
else
:
nn_ops
.
depthwise_conv2d_native
(
in_data
,
in_filter
,
strides
=
strides
,
dilations
=
dilations
,
padding
=
padding
,
data_format
=
data_format
)
compare_tf_with_tvm
(
np
.
reshape
(
data_array
,
tensor_in_sizes
)
.
astype
(
'float32'
),
'Placeholder:0'
,
'DepthwiseConv2dNative:0'
)
def
test_forward_convolution
():
def
test_forward_convolution
():
if
is_gpu_available
():
if
is_gpu_available
():
_test_convolution
([
4
,
176
,
8
,
8
],
[
1
,
1
,
176
,
32
],
[
1
,
1
],
[
1
,
1
],
'SAME'
,
'NCHW'
)
_test_convolution
(
'conv'
,
[
4
,
176
,
8
,
8
],
[
1
,
1
,
176
,
32
],
[
1
,
1
],
[
1
,
1
],
'SAME'
,
'NCHW'
)
_test_convolution
([
4
,
19
,
17
,
17
],
[
3
,
3
,
19
,
19
],
[
1
,
1
],
[
2
,
2
],
'VALID'
,
'NCHW'
)
_test_convolution
(
'conv'
,
[
4
,
19
,
17
,
17
],
[
3
,
3
,
19
,
19
],
[
1
,
1
],
[
2
,
2
],
'VALID'
,
'NCHW'
)
_test_convolution
([
4
,
124
,
17
,
17
],
[
1
,
1
,
124
,
19
],
[
1
,
1
],
[
1
,
1
],
'SAME'
,
'NCHW'
)
_test_convolution
(
'conv'
,
[
4
,
124
,
17
,
17
],
[
1
,
1
,
124
,
19
],
[
1
,
1
],
[
1
,
1
],
'SAME'
,
'NCHW'
)
_test_convolution
([
4
,
12
,
17
,
17
],
[
3
,
3
,
12
,
32
],
[
1
,
1
],
[
2
,
2
],
'VALID'
,
'NCHW'
)
_test_convolution
(
'conv'
,
[
4
,
12
,
17
,
17
],
[
3
,
3
,
12
,
32
],
[
1
,
1
],
[
2
,
2
],
'VALID'
,
'NCHW'
)
_test_convolution
(
'depthwise'
,
[
4
,
176
,
8
,
8
],
[
1
,
1
,
176
,
1
],
[
1
,
1
],
[
1
,
1
],
'SAME'
,
'NCHW'
)
_test_convolution
([
4
,
8
,
8
,
176
],
[
1
,
1
,
176
,
32
],
[
1
,
1
],
[
1
,
1
],
'SAME'
,
'NHWC'
)
_test_convolution
(
'depthwise'
,
[
4
,
19
,
17
,
17
],
[
3
,
3
,
19
,
1
],
[
1
,
1
],
[
2
,
2
],
'VALID'
,
'NCHW'
)
_test_convolution
([
4
,
17
,
17
,
19
],
[
3
,
3
,
19
,
19
],
[
1
,
1
],
[
2
,
2
],
'VALID'
,
'NHWC'
)
_test_convolution
(
'depthwise'
,
[
4
,
124
,
17
,
17
],
[
1
,
1
,
124
,
1
],
[
1
,
1
],
[
1
,
1
],
'SAME'
,
'NCHW'
)
_test_convolution
([
4
,
17
,
17
,
124
],
[
1
,
1
,
124
,
19
],
[
1
,
1
],
[
1
,
1
],
'SAME'
,
'NHWC'
)
_test_convolution
(
'depthwise'
,
[
4
,
12
,
17
,
17
],
[
3
,
3
,
12
,
1
],
[
1
,
1
],
[
2
,
2
],
'VALID'
,
'NCHW'
)
_test_convolution
([
4
,
17
,
17
,
12
],
[
3
,
3
,
12
,
32
],
[
1
,
1
],
[
2
,
2
],
'VALID'
,
'NHWC'
)
_test_convolution
(
'conv'
,
[
4
,
8
,
8
,
176
],
[
1
,
1
,
176
,
32
],
[
1
,
1
],
[
1
,
1
],
'SAME'
,
'NHWC'
)
_test_convolution
(
'conv'
,
[
4
,
17
,
17
,
19
],
[
3
,
3
,
19
,
19
],
[
1
,
1
],
[
2
,
2
],
'VALID'
,
'NHWC'
)
_test_convolution
(
'conv'
,
[
4
,
17
,
17
,
124
],
[
1
,
1
,
124
,
19
],
[
1
,
1
],
[
1
,
1
],
'SAME'
,
'NHWC'
)
_test_convolution
(
'conv'
,
[
4
,
17
,
17
,
12
],
[
3
,
3
,
12
,
32
],
[
1
,
1
],
[
2
,
2
],
'VALID'
,
'NHWC'
)
_test_convolution
(
'depthwise'
,
[
4
,
8
,
8
,
176
],
[
1
,
1
,
176
,
1
],
[
1
,
1
],
[
1
,
1
],
'SAME'
,
'NHWC'
)
_test_convolution
(
'depthwise'
,
[
4
,
17
,
17
,
19
],
[
3
,
3
,
19
,
1
],
[
1
,
1
],
[
2
,
2
],
'VALID'
,
'NHWC'
)
_test_convolution
(
'depthwise'
,
[
4
,
17
,
17
,
124
],
[
1
,
1
,
124
,
1
],
[
1
,
1
],
[
1
,
1
],
'SAME'
,
'NHWC'
)
_test_convolution
(
'depthwise'
,
[
4
,
17
,
17
,
12
],
[
3
,
3
,
12
,
1
],
[
1
,
1
],
[
2
,
2
],
'VALID'
,
'NHWC'
)
#######################################################################
# BiasAdd
# -----------
def
_test_biasadd
(
tensor_in_sizes
,
data_format
):
""" One iteration of biasadd with given shapes and attributes """
total_size_1
=
1
for
s
in
tensor_in_sizes
:
total_size_1
*=
s
tensor_bias_sizes
=
[
tensor_in_sizes
[
1
]]
if
data_format
==
'NCHW'
else
[
tensor_in_sizes
[
3
]]
total_size_2
=
tensor_bias_sizes
[
0
]
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array
=
[
f
*
1.0
for
f
in
range
(
1
,
total_size_1
+
1
)]
bias_array
=
[
f
*
1.0
for
f
in
range
(
1
,
total_size_2
+
1
)]
with
tf
.
Graph
()
.
as_default
():
in_data
=
array_ops
.
placeholder
(
shape
=
tensor_in_sizes
,
dtype
=
'float32'
)
in_bias
=
constant_op
.
constant
(
bias_array
,
shape
=
tensor_bias_sizes
,
dtype
=
'float32'
)
nn_ops
.
bias_add
(
in_data
,
in_bias
,
data_format
=
data_format
)
compare_tf_with_tvm
(
np
.
reshape
(
data_array
,
tensor_in_sizes
)
.
astype
(
'float32'
),
'Placeholder:0'
,
'BiasAdd:0'
)
def
test_forward_biasadd
():
if
is_gpu_available
():
_test_biasadd
([
4
,
176
,
8
,
8
],
'NCHW'
)
_test_biasadd
([
1
,
100
,
1
,
1
],
'NCHW'
)
_test_biasadd
([
4
,
19
,
17
,
17
],
'NCHW'
)
_test_biasadd
([
4
,
124
,
3
,
3
],
'NCHW'
)
_test_biasadd
([
4
,
8
,
8
,
176
],
'NHWC'
)
_test_biasadd
([
1
,
1
,
1
,
100
],
'NHWC'
)
_test_biasadd
([
4
,
17
,
17
,
19
],
'NHWC'
)
_test_biasadd
([
4
,
3
,
3
,
124
],
'NHWC'
)
#######################################################################
#######################################################################
# SpaceToBatchND
# SpaceToBatchND
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment