Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
T
tic
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
wenyuanbo
tic
Commits
7196c791
Commit
7196c791
authored
Aug 15, 2017
by
Tianqi Chen
Committed by
GitHub
Aug 15, 2017
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[TOPI] Isolate padding option, improve decl of depthwise/conv2d/pool (#332)
parent
abccd9cd
Hide whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
221 additions
and
147 deletions
+221
-147
python/tvm/expr.py
+6
-0
python/tvm/make.py
+5
-4
topi/python/topi/__init__.py
+1
-0
topi/python/topi/nn/convolution.py
+45
-94
topi/python/topi/nn/dilate.py
+18
-19
topi/python/topi/nn/pad.py
+104
-0
topi/python/topi/nn/pooling.py
+18
-19
topi/python/topi/nn/softmax.py
+5
-6
topi/python/topi/util.py
+16
-0
topi/recipe/conv/depthwise_conv2d_test.py
+1
-1
topi/tests/python/test_topi_depthwise_conv2d.py
+1
-1
topi/tests/python/test_topi_dilate.py
+1
-3
No files found.
python/tvm/expr.py
View file @
7196c791
...
@@ -50,6 +50,12 @@ class ExprOp(object):
...
@@ -50,6 +50,12 @@ class ExprOp(object):
def
__rtruediv__
(
self
,
other
):
def
__rtruediv__
(
self
,
other
):
return
self
.
__rdiv__
(
other
)
return
self
.
__rdiv__
(
other
)
def
__floordiv__
(
self
,
other
):
return
self
.
__div__
(
other
)
def
__rfloordiv__
(
self
,
other
):
return
self
.
__rdiv__
(
other
)
def
__mod__
(
self
,
other
):
def
__mod__
(
self
,
other
):
return
_make
.
Mod
(
self
,
other
)
return
_make
.
Mod
(
self
,
other
)
...
...
python/tvm/make.py
View file @
7196c791
...
@@ -52,10 +52,11 @@ def static_cast(dtype, expr):
...
@@ -52,10 +52,11 @@ def static_cast(dtype, expr):
"""
"""
target_type
=
TVMType
(
dtype
)
target_type
=
TVMType
(
dtype
)
src_type
=
TVMType
(
expr
.
dtype
)
src_type
=
TVMType
(
expr
.
dtype
)
if
target_type
.
type_code
==
src_type
.
type_code
\
if
target_type
.
type_code
==
src_type
.
type_code
and
src_type
.
bits
==
target_type
.
bits
:
and
src_type
.
lanes
==
1
\
if
src_type
.
lanes
==
target_type
.
lanes
:
and
target_type
.
lanes
>
1
:
return
expr
return
Broadcast
(
expr
,
target_type
.
lanes
)
elif
src_type
.
lanes
==
1
and
target_type
.
lanes
>
1
:
return
Broadcast
(
expr
,
target_type
.
lanes
)
return
Cast
(
dtype
,
expr
)
return
Cast
(
dtype
,
expr
)
...
...
topi/python/topi/__init__.py
View file @
7196c791
...
@@ -15,3 +15,4 @@ from .broadcast import *
...
@@ -15,3 +15,4 @@ from .broadcast import *
from
.
import
nn
from
.
import
nn
from
.
import
cuda
from
.
import
cuda
from
.
import
testing
from
.
import
testing
from
.
import
util
topi/python/topi/nn/convolution.py
View file @
7196c791
# pylint: disable=invalid-name,
line-too-long,
unused-variable, too-many-locals
# pylint: disable=invalid-name, unused-variable, too-many-locals
"""Convolution operators"""
"""Convolution operators"""
from
__future__
import
absolute_import
as
_abs
from
__future__
import
absolute_import
as
_abs
import
tvm
import
tvm
import
numpy
as
np
from
..util
import
simplify
from
.
.util
import
get_const_tuple
from
.
pad
import
pad
,
_spatial2d_pad_option
@tvm.tag_scope
(
tag
=
"conv2d_nchw"
)
def
conv2d_nchw
(
Input
,
Filter
,
stride
,
padding
):
def
conv2d_nchw
(
Input
,
Filter
,
stride
,
padding
):
"""Convolution operator in HWCN layout.
"""Convolution operator in HWCN layout.
...
@@ -31,45 +30,33 @@ def conv2d_nchw(Input, Filter, stride, padding):
...
@@ -31,45 +30,33 @@ def conv2d_nchw(Input, Filter, stride, padding):
"""
"""
assert
isinstance
(
stride
,
int
)
or
len
(
stride
)
==
2
assert
isinstance
(
stride
,
int
)
or
len
(
stride
)
==
2
assert
isinstance
(
padding
,
int
)
or
padding
in
[
'VALID'
,
'SAME'
]
assert
isinstance
(
padding
,
int
)
or
padding
in
[
'VALID'
,
'SAME'
]
batch
,
in_channel
,
in_height
,
in_width
=
get_const_tuple
(
Input
.
shape
)
batch
,
in_channel
,
in_height
,
in_width
=
Input
.
shape
num_filter
,
channel
,
kernel_h
,
kernel_w
=
get_const_tuple
(
Filter
.
shape
)
num_filter
,
channel
,
kernel_h
,
kernel_w
=
Filter
.
shape
if
isinstance
(
stride
,
int
):
if
isinstance
(
stride
,
int
):
stride_h
=
stride_w
=
stride
stride_h
=
stride_w
=
stride
else
:
else
:
stride_h
,
stride_w
=
stride
stride_h
,
stride_w
=
stride
# compute the padding size
pad_top
,
pad_left
,
pad_down
,
pad_right
=
_spatial2d_pad_option
(
if
isinstance
(
padding
,
int
):
padding
,
(
kernel_h
,
kernel_w
))
pad_h
=
pad_w
=
padding
*
2
elif
padding
==
'VALID'
:
pad_h
=
0
pad_w
=
0
else
:
# 'SAME'
pad_h
=
kernel_h
-
1
pad_w
=
kernel_w
-
1
pad_top
=
int
(
np
.
ceil
(
float
(
pad_h
)
/
2
))
pad_left
=
int
(
np
.
ceil
(
float
(
pad_w
)
/
2
))
# compute the output shape
# compute the output shape
out_channel
=
num_filter
out_channel
=
num_filter
out_height
=
(
in_height
-
kernel_h
+
pad_h
)
//
stride_h
+
1
out_height
=
simplify
((
in_height
-
kernel_h
+
pad_top
+
pad_down
)
//
stride_h
+
1
)
out_width
=
(
in_width
-
kernel_w
+
pad_w
)
//
stride_w
+
1
out_width
=
simplify
((
in_width
-
kernel_w
+
pad_left
+
pad_right
)
//
stride_w
+
1
)
# compute graph
# compute graph
temp
=
tvm
.
compute
(
pad_before
=
[
0
,
0
,
pad_top
,
pad_left
]
(
batch
,
in_channel
,
in_height
+
pad_h
,
in_width
+
pad_w
),
pad_after
=
[
0
,
0
,
pad_down
,
pad_right
]
lambda
nn
,
cc
,
yy
,
xx
:
tvm
.
select
(
temp
=
pad
(
Input
,
pad_before
,
pad_after
,
name
=
"pad_temp"
)
tvm
.
all
(
yy
>=
pad_top
,
yy
-
pad_top
<
in_height
,
xx
>=
pad_left
,
xx
-
pad_left
<
in_width
),
Input
[
nn
,
cc
,
yy
-
pad_top
,
xx
-
pad_left
],
tvm
.
const
(
0.
)),
name
=
'temp'
)
rc
=
tvm
.
reduce_axis
((
0
,
in_channel
),
name
=
'rc'
)
rc
=
tvm
.
reduce_axis
((
0
,
in_channel
),
name
=
'rc'
)
ry
=
tvm
.
reduce_axis
((
0
,
kernel_h
),
name
=
'ry'
)
ry
=
tvm
.
reduce_axis
((
0
,
kernel_h
),
name
=
'ry'
)
rx
=
tvm
.
reduce_axis
((
0
,
kernel_w
),
name
=
'rx'
)
rx
=
tvm
.
reduce_axis
((
0
,
kernel_w
),
name
=
'rx'
)
return
tvm
.
compute
(
return
tvm
.
compute
(
(
batch
,
out_channel
,
out_height
,
out_width
),
(
batch
,
out_channel
,
out_height
,
out_width
),
lambda
nn
,
ff
,
yy
,
xx
:
tvm
.
sum
(
lambda
nn
,
ff
,
yy
,
xx
:
tvm
.
sum
(
temp
[
nn
,
rc
,
yy
*
stride_h
+
ry
,
xx
*
stride_w
+
rx
]
*
Filter
[
ff
,
rc
,
ry
,
rx
],
temp
[
nn
,
rc
,
yy
*
stride_h
+
ry
,
xx
*
stride_w
+
rx
]
*
Filter
[
ff
,
rc
,
ry
,
rx
],
axis
=
[
rc
,
ry
,
rx
]))
axis
=
[
rc
,
ry
,
rx
]),
tag
=
"conv2d_nchw"
)
@tvm.tag_scope
(
tag
=
"conv2d_hwcn"
)
def
conv2d_hwcn
(
Input
,
Filter
,
stride
,
padding
):
def
conv2d_hwcn
(
Input
,
Filter
,
stride
,
padding
):
"""Convolution operator in HWCN layout.
"""Convolution operator in HWCN layout.
...
@@ -93,36 +80,22 @@ def conv2d_hwcn(Input, Filter, stride, padding):
...
@@ -93,36 +80,22 @@ def conv2d_hwcn(Input, Filter, stride, padding):
4-D with shape [out_height, out_width, out_channel, batch]
4-D with shape [out_height, out_width, out_channel, batch]
"""
"""
assert
isinstance
(
stride
,
int
)
or
len
(
stride
)
==
2
assert
isinstance
(
stride
,
int
)
or
len
(
stride
)
==
2
assert
isinstance
(
padding
,
int
)
or
padding
in
[
'VALID'
,
'SAME'
]
in_height
,
in_width
,
in_channel
,
batch
=
Input
.
shape
in_height
,
in_width
,
in_channel
,
batch
=
get_const_tuple
(
Input
.
shape
)
kernel_h
,
kernel_w
,
channel
,
num_filter
=
Filter
.
shape
kernel_h
,
kernel_w
,
channel
,
num_filter
=
get_const_tuple
(
Filter
.
shape
)
if
isinstance
(
stride
,
int
):
if
isinstance
(
stride
,
int
):
stride_h
=
stride_w
=
stride
stride_h
=
stride_w
=
stride
else
:
else
:
stride_h
,
stride_w
=
stride
stride_h
,
stride_w
=
stride
# compute the padding size
if
isinstance
(
padding
,
int
):
pad_top
,
pad_left
,
pad_down
,
pad_right
=
_spatial2d_pad_option
(
pad_h
=
pad_w
=
padding
*
2
padding
,
(
kernel_h
,
kernel_w
))
elif
padding
==
'VALID'
:
pad_h
=
0
pad_w
=
0
else
:
# 'SAME'
pad_h
=
kernel_h
-
1
pad_w
=
kernel_w
-
1
pad_top
=
int
(
np
.
ceil
(
float
(
pad_h
)
/
2
))
pad_left
=
int
(
np
.
ceil
(
float
(
pad_w
)
/
2
))
# compute the output shape
# compute the output shape
out_channel
=
num_filter
out_channel
=
num_filter
out_height
=
(
in_height
-
kernel_h
+
pad_h
)
//
stride_h
+
1
out_height
=
simplify
((
in_height
-
kernel_h
+
pad_top
+
pad_down
)
//
stride_h
+
1
)
out_width
=
(
in_width
-
kernel_w
+
pad_w
)
//
stride_w
+
1
out_width
=
simplify
((
in_width
-
kernel_w
+
pad_left
+
pad_right
)
//
stride_w
+
1
)
# compute graph
pad_before
=
[
pad_top
,
pad_left
,
0
,
0
]
PaddedInput
=
tvm
.
compute
(
pad_after
=
[
pad_down
,
pad_right
,
0
,
0
]
(
in_height
+
pad_h
,
in_width
+
pad_w
,
in_channel
,
batch
),
PaddedInput
=
pad
(
Input
,
pad_before
,
pad_after
,
name
=
"PaddedInput"
)
lambda
yy
,
xx
,
cc
,
nn
:
tvm
.
select
(
tvm
.
all
(
yy
>=
pad_top
,
yy
-
pad_top
<
in_height
,
xx
>=
pad_left
,
xx
-
pad_left
<
in_width
),
Input
[
yy
-
pad_top
,
xx
-
pad_left
,
cc
,
nn
],
tvm
.
const
(
0.
)),
name
=
'PaddedInput'
)
rc
=
tvm
.
reduce_axis
((
0
,
in_channel
),
name
=
'rc'
)
rc
=
tvm
.
reduce_axis
((
0
,
in_channel
),
name
=
'rc'
)
ry
=
tvm
.
reduce_axis
((
0
,
kernel_h
),
name
=
'ry'
)
ry
=
tvm
.
reduce_axis
((
0
,
kernel_h
),
name
=
'ry'
)
rx
=
tvm
.
reduce_axis
((
0
,
kernel_w
),
name
=
'rx'
)
rx
=
tvm
.
reduce_axis
((
0
,
kernel_w
),
name
=
'rx'
)
...
@@ -131,12 +104,11 @@ def conv2d_hwcn(Input, Filter, stride, padding):
...
@@ -131,12 +104,11 @@ def conv2d_hwcn(Input, Filter, stride, padding):
lambda
yy
,
xx
,
ff
,
nn
:
tvm
.
sum
(
lambda
yy
,
xx
,
ff
,
nn
:
tvm
.
sum
(
PaddedInput
[
yy
*
stride_h
+
ry
,
xx
*
stride_w
+
rx
,
rc
,
nn
]
*
Filter
[
ry
,
rx
,
rc
,
ff
],
PaddedInput
[
yy
*
stride_h
+
ry
,
xx
*
stride_w
+
rx
,
rc
,
nn
]
*
Filter
[
ry
,
rx
,
rc
,
ff
],
axis
=
[
ry
,
rx
,
rc
]),
axis
=
[
ry
,
rx
,
rc
]),
name
=
'Conv2dOutput'
)
name
=
"Conv2dOutput"
,
tag
=
"conv2d_hwcn"
)
return
Output
return
Output
@tvm.tag_scope
(
tag
=
"depthwise_conv2d"
)
def
depthwise_conv2d
(
Input
,
Filter
,
stride
,
padding
):
def
depthwise_conv2d
(
Input
,
Filter
,
Stride
,
padding
):
"""Depthwise convolution operator.
"""Depthwise convolution operator.
Parameters
Parameters
...
@@ -147,8 +119,8 @@ def depthwise_conv2d(Input, Filter, Stride, padding):
...
@@ -147,8 +119,8 @@ def depthwise_conv2d(Input, Filter, Stride, padding):
Filter : tvm.Tensor
Filter : tvm.Tensor
4-D with shape [in_channel, channel_multiplier, filter_height, filter_width]
4-D with shape [in_channel, channel_multiplier, filter_height, filter_width]
Stride : tvm.Tensor
stride : tuple of two ints
1-D of size 2
The spatial stride along height and width
padding : str
padding : str
'VALID' or 'SAME'
'VALID' or 'SAME'
...
@@ -158,49 +130,28 @@ def depthwise_conv2d(Input, Filter, Stride, padding):
...
@@ -158,49 +130,28 @@ def depthwise_conv2d(Input, Filter, Stride, padding):
Output : tvm.Tensor
Output : tvm.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
4-D with shape [batch, out_channel, out_height, out_width]
"""
"""
in_shape
=
get_const_tuple
(
Input
.
shape
)
batch
,
in_channel
,
in_height
,
in_width
=
Input
.
shape
batch
=
in_shape
[
0
]
filter_channel
,
channel_multiplier
,
filter_height
,
filter_width
=
Filter
.
shape
in_channel
=
in_shape
[
1
]
stride_h
,
stride_w
=
stride
in_height
=
in_shape
[
2
]
in_width
=
in_shape
[
3
]
pad_top
,
pad_left
,
pad_down
,
pad_right
=
_spatial2d_pad_option
(
filter_shape
=
get_const_tuple
(
Filter
.
shape
)
padding
,
(
filter_height
,
filter_width
))
filter_channel
=
filter_shape
[
0
]
out_channel
=
simplify
(
in_channel
*
channel_multiplier
)
channel_multiplier
=
filter_shape
[
1
]
out_height
=
simplify
((
in_height
-
filter_height
+
pad_top
+
pad_down
)
//
stride_h
+
1
)
filter_height
=
filter_shape
[
2
]
out_width
=
simplify
((
in_width
-
filter_width
+
pad_left
+
pad_right
)
//
stride_w
+
1
)
filter_width
=
filter_shape
[
3
]
stride_h
=
Stride
.
asnumpy
()[
0
]
stride_w
=
Stride
.
asnumpy
()[
1
]
# calculate output shape
if
padding
==
'VALID'
:
out_channel
=
in_channel
*
channel_multiplier
out_height
=
(
in_height
-
filter_height
)
//
stride_h
+
1
out_width
=
(
in_width
-
filter_width
)
//
stride_w
+
1
pad_along_height
=
0
pad_along_width
=
0
if
padding
==
'SAME'
:
out_channel
=
in_channel
*
channel_multiplier
out_height
=
np
.
int
(
np
.
ceil
(
float
(
in_height
)
/
float
(
stride_h
)))
out_width
=
np
.
int
(
np
.
ceil
(
float
(
in_width
)
/
float
(
stride_w
)))
pad_along_height
=
np
.
int
(
np
.
max
((
out_height
-
1
)
*
stride_h
+
filter_height
-
in_height
,
0
))
pad_along_width
=
np
.
int
(
np
.
max
((
out_width
-
1
)
*
stride_w
+
filter_width
-
in_width
,
0
))
height_after_pad
=
in_height
+
pad_along_height
width_after_pad
=
in_width
+
pad_along_width
pad_top
=
np
.
int
(
np
.
ceil
(
float
(
pad_along_height
)
/
2
))
pad_left
=
np
.
int
(
np
.
ceil
(
float
(
pad_along_width
)
/
2
))
# padding stage
# padding stage
PaddedInput
=
tvm
.
compute
(
pad_before
=
[
0
,
0
,
pad_top
,
pad_left
]
(
batch
,
in_channel
,
height_after_pad
,
width_after_pad
),
pad_after
=
[
0
,
0
,
pad_down
,
pad_right
]
lambda
b
,
c
,
i
,
j
:
tvm
.
select
(
PaddedInput
=
pad
(
Input
,
pad_before
,
pad_after
,
name
=
"PaddedInput"
)
tvm
.
all
(
i
>=
pad_top
,
i
-
pad_top
<
in_height
,
j
>=
pad_left
,
j
-
pad_left
<
in_width
),
Input
[
b
,
c
,
i
-
pad_top
,
j
-
pad_left
],
tvm
.
const
(
0.0
)),
name
=
"PaddedInput"
)
# depthconv stage
# depthconv stage
di
=
tvm
.
reduce_axis
((
0
,
filter_height
),
name
=
'di'
)
di
=
tvm
.
reduce_axis
((
0
,
filter_height
),
name
=
'di'
)
dj
=
tvm
.
reduce_axis
((
0
,
filter_width
),
name
=
'dj'
)
dj
=
tvm
.
reduce_axis
((
0
,
filter_width
),
name
=
'dj'
)
Output
=
tvm
.
compute
(
Output
=
tvm
.
compute
(
(
batch
,
out_channel
,
out_height
,
out_width
),
(
batch
,
out_channel
,
out_height
,
out_width
),
lambda
b
,
c
,
i
,
j
:
tvm
.
sum
(
lambda
b
,
c
,
i
,
j
:
tvm
.
sum
(
PaddedInput
[
b
,
c
/
channel_multiplier
,
i
*
stride_h
+
di
,
j
*
stride_w
+
dj
]
*
Filter
[
c
/
channel_multiplier
,
c
%
channel_multiplier
,
di
,
dj
],
(
PaddedInput
[
b
,
c
/
channel_multiplier
,
i
*
stride_h
+
di
,
j
*
stride_w
+
dj
]
*
Filter
[
c
/
channel_multiplier
,
c
%
channel_multiplier
,
di
,
dj
]),
axis
=
[
di
,
dj
]),
axis
=
[
di
,
dj
]),
name
=
'DepthwiseConv2d'
)
name
=
'DepthwiseConv2d'
,
tag
=
"depthwise_conv2d"
)
return
Output
return
Output
topi/python/topi/nn/dilate.py
View file @
7196c791
...
@@ -6,35 +6,39 @@ from .. import util
...
@@ -6,35 +6,39 @@ from .. import util
@tvm.tag_scope
(
tag
=
"dilation"
)
@tvm.tag_scope
(
tag
=
"dilation"
)
def
dilate
(
Input
,
strides
):
def
dilate
(
data
,
strides
,
name
=
"DilatedInput"
):
"""Dilate
Input
with zeros.
"""Dilate
data
with zeros.
Parameters
Parameters
----------
----------
Input
: tvm.Tensor
data
: tvm.Tensor
n-D, can be any layout.
n-D, can be any layout.
strides : list / tuple of n ints
strides : list / tuple of n ints
Dilation stride on each dimension, 1 means no dilation.
Dilation stride on each dimension, 1 means no dilation.
name : str, optional
The name prefix operators generated
Returns
Returns
-------
-------
Output : tvm.Tensor
Output : tvm.Tensor
n-D, the same layout as
Input
.
n-D, the same layout as
data
.
"""
"""
n
=
len
(
Input
.
shape
)
n
=
len
(
data
.
shape
)
assert
len
(
strides
)
==
n
,
\
if
len
(
strides
)
!=
n
:
"Input dimension and strides size dismatch :
%
d vs
%
d"
%
(
n
,
len
(
strides
))
raise
ValueError
(
"data dimension and strides size dismatch :
%
d vs
%
d"
%
(
output_size
=
()
n
,
len
(
strides
)))
for
i
in
range
(
n
):
output_size
+=
(
tvm
.
ir_pass
.
Simplify
((
Input
.
shape
[
i
]
-
1
)
*
strides
[
i
]
+
1
),)
out_shape
=
tuple
(
tvm
.
ir_pass
.
Simplify
((
data
.
shape
[
i
]
-
1
)
*
strides
[
i
]
+
1
)
for
i
in
range
(
n
))
def
_dilate
(
data
,
*
indices
):
def
_dilate
(
*
indices
):
not_zero
=
[]
not_zero
=
[]
index_tuple
=
[]
index_tuple
=
[]
for
i
in
range
(
n
):
for
i
in
range
(
n
):
if
not
util
.
equal_const_int
(
strides
[
i
],
1
):
if
not
util
.
equal_const_int
(
strides
[
i
],
1
):
index_tuple
.
append
(
indices
[
i
]
/
strides
[
i
])
index_tuple
.
append
(
indices
[
i
]
/
strides
[
i
])
not_zero
.
append
((
indices
[
i
]
%
strides
[
i
])
.
equal
(
0
))
not_zero
.
append
((
indices
[
i
]
%
strides
[
i
])
.
equal
(
0
))
else
:
else
:
index_tuple
.
append
(
indices
[
i
])
index_tuple
.
append
(
indices
[
i
])
...
@@ -43,9 +47,4 @@ def dilate(Input, strides):
...
@@ -43,9 +47,4 @@ def dilate(Input, strides):
return
tvm
.
select
(
not_zero
,
data
(
*
index_tuple
),
tvm
.
const
(
0.0
,
data
.
dtype
))
return
tvm
.
select
(
not_zero
,
data
(
*
index_tuple
),
tvm
.
const
(
0.0
,
data
.
dtype
))
return
data
(
*
index_tuple
)
return
data
(
*
index_tuple
)
Output
=
tvm
.
compute
(
return
tvm
.
compute
(
out_shape
,
_dilate
,
name
=
name
)
output_size
,
lambda
*
indices
:
_dilate
(
Input
,
*
indices
),
name
=
'DilatedInput'
)
return
Output
topi/python/topi/nn/pad.py
0 → 100644
View file @
7196c791
"""Pad the data by constant value """
from
__future__
import
absolute_import
as
_abs
import
tvm
from
..util
import
equal_const_int
def
_spatial2d_pad_option
(
padding
,
kernel
):
"""Common code to get the pad option
Parameters
----------
padding : int or str
Padding size, or ['VALID', 'SAME']
kernel : tuple of int
Conv kernel size
Returns
-------
pad_top : int
Padding size on top
pad_left : int
Padding size on left
pad_down : int
Padding size on down.
pad_right : int
Padding size on right.
"""
# compute the padding size
if
isinstance
(
padding
,
(
tuple
,
list
)):
pad_h
=
padding
[
0
]
*
2
pad_w
=
padding
[
1
]
*
2
elif
isinstance
(
padding
,
int
):
pad_h
=
pad_w
=
padding
*
2
elif
padding
==
"VALID"
:
pad_h
=
0
pad_w
=
0
elif
padding
==
"SAME"
:
pad_h
=
kernel
[
0
]
-
1
pad_w
=
kernel
[
1
]
-
1
else
:
raise
ValueError
(
"Unknown padding option
%
s"
%
padding
)
pad_top
=
(
pad_h
+
1
)
//
2
pad_left
=
(
pad_w
+
1
)
//
2
return
pad_top
,
pad_left
,
pad_h
-
pad_top
,
pad_w
-
pad_left
@tvm.tag_scope
(
tag
=
"pad"
)
def
pad
(
data
,
pad_before
,
pad_after
=
None
,
pad_value
=
0.0
,
name
=
"PadInput"
):
"""Dilate Input with zeros.
Parameters
----------
data : tvm.Tensor
n-D input, can be any layout.
pad_before : list / tuple of n ints
Pad width on each dimension to pad the before the axis begin.
pad_after : list / tuple of n ints, optional
Pad width each dimension to pad the after the axis end.
pad_value : float, optional
The value to be padded.
name : str, optional
The name prefix operators generated
Returns
-------
Output : tvm.Tensor
n-D, the same layout as Input.
"""
n
=
len
(
data
.
shape
)
pad_after
=
pad_after
if
pad_after
else
pad_before
if
len
(
pad_before
)
!=
n
:
raise
ValueError
(
"Input dimension and pad_before dismatch :
%
d vs
%
d"
%
(
n
,
len
(
pad_before
)))
if
len
(
pad_after
)
!=
n
:
raise
ValueError
(
"Input dimension and pad_after dismatch :
%
d vs
%
d"
%
(
n
,
len
(
pad_before
)))
out_shape
=
tuple
(
tvm
.
ir_pass
.
Simplify
(
(
data
.
shape
[
i
]
+
pad_before
[
i
]
+
pad_after
[
i
]))
for
i
in
range
(
n
))
pad_value
=
(
pad_value
if
isinstance
(
pad_value
,
tvm
.
expr
.
Expr
)
else
tvm
.
const
(
pad_value
,
data
.
dtype
))
def
_pad
(
*
indices
):
not_zero
=
[]
index_tuple
=
[]
for
i
in
range
(
n
):
if
equal_const_int
(
pad_before
[
i
],
0
)
and
equal_const_int
(
pad_after
[
i
],
0
):
index_tuple
.
append
(
indices
[
i
])
else
:
index_tuple
.
append
(
indices
[
i
]
-
pad_before
[
i
])
not_zero
.
append
(
indices
[
i
]
>=
pad_before
[
i
])
not_zero
.
append
(
indices
[
i
]
<
data
.
shape
[
i
]
+
pad_before
[
i
])
if
not_zero
:
not_zero
=
tvm
.
all
(
*
not_zero
)
return
tvm
.
select
(
not_zero
,
data
(
*
index_tuple
),
pad_value
)
return
data
(
*
index_tuple
)
return
tvm
.
compute
(
out_shape
,
_pad
,
name
=
name
)
topi/python/topi/nn/pooling.py
View file @
7196c791
"""TVM operator pooling compute."""
"""TVM operator pooling compute."""
from
__future__
import
absolute_import
from
__future__
import
absolute_import
import
tvm
import
tvm
from
..
import
util
from
.pad
import
pad
,
_spatial2d_pad_option
@tvm.tag_scope
(
tag
=
'max_pool'
)
def
max_pool
(
data
,
kernel
,
stride
,
padding
):
def
max_pool
(
data
,
kernel
,
stride
,
pad
):
"""Perform max pooling on the data
"""Perform max pooling on the data
Parameters
Parameters
...
@@ -17,7 +18,7 @@ def max_pool(data, kernel, stride, pad):
...
@@ -17,7 +18,7 @@ def max_pool(data, kernel, stride, pad):
stride : list/tuple of two ints
stride : list/tuple of two ints
Stride size, or [stride_height, stride_width]
Stride size, or [stride_height, stride_width]
pad : list/tuple of two ints
pad
dding
: list/tuple of two ints
Pad size, or [pad_height, pad_width]
Pad size, or [pad_height, pad_width]
Returns
Returns
...
@@ -26,29 +27,27 @@ def max_pool(data, kernel, stride, pad):
...
@@ -26,29 +27,27 @@ def max_pool(data, kernel, stride, pad):
4-D with shape [batch, channel, out_height, out_width]
4-D with shape [batch, channel, out_height, out_width]
"""
"""
assert
len
(
data
.
shape
)
==
4
,
"only support 4-dim pooling"
assert
len
(
data
.
shape
)
==
4
,
"only support 4-dim pooling"
assert
len
(
stride
.
shape
)
==
2
,
"only support 2-dim stride"
assert
len
(
stride
)
==
2
,
"only support 2-dim stride"
assert
len
(
pad
.
shape
)
==
2
,
"only support 2-dim pad"
kernel_height
,
kernel_width
=
kernel
kernel_height
,
kernel_width
=
kernel
stride_height
,
stride_width
=
stride
stride_height
,
stride_width
=
stride
pad_height
,
pad_width
=
pad
batch
,
channel
,
height
,
width
=
data
.
shape
batch
,
channel
,
height
,
width
=
data
.
shape
padded_height
=
height
+
2
*
pad_height
padded_width
=
width
+
2
*
pad_width
out_height
=
(
height
+
2
*
pad_height
-
kernl_height
)
/
stride_height
+
1
out_width
=
(
width
+
2
*
pad_width
-
kernel_width
)
/
stride_width
+
1
pad_top
,
pad_left
,
pad_down
,
pad_right
=
_spatial2d_pad_option
(
padding
,
(
kernel_height
,
kernel_width
))
pad_before
=
[
0
,
0
,
pad_top
,
pad_left
]
pad_after
=
[
0
,
0
,
pad_down
,
pad_right
]
temp
=
pad
(
data
,
pad_before
,
pad_after
,
name
=
"pad_temp"
,
pad_value
=
tvm
.
min_value
(
"float32"
))
out_height
=
util
.
simplify
((
height
-
kernel_height
+
pad_top
+
pad_down
)
//
stride_height
+
1
)
out_width
=
util
.
simplify
((
width
-
kernel_width
+
pad_left
+
pad_right
)
//
stride_width
+
1
)
dheight
=
tvm
.
reduce_axis
((
0
,
kernel_height
))
dheight
=
tvm
.
reduce_axis
((
0
,
kernel_height
))
dwidth
=
tvm
.
reduce_axis
((
0
,
kernel_width
))
dwidth
=
tvm
.
reduce_axis
((
0
,
kernel_width
))
temp
=
tvm
.
compute
((
batch
,
channel
,
padded_height
,
padded_width
),
lambda
i
,
c
,
h
,
w
:
\
return
tvm
.
compute
(
tvm
.
select
(
(
batch
,
channel
,
out_height
,
out_width
),
tvm
.
make
.
Or
(
tvm
.
make
.
Or
((
h
<
pad_height
),
(
h
>=
height
+
pad_height
)),
lambda
i
,
c
,
h
,
w
:
tvm
.
make
.
Or
((
w
<
pad_width
),
(
w
>=
width
+
pad_width
))),
tvm
.
max
(
temp
[
i
,
c
,
h
*
stride_height
+
dheight
,
w
*
stride_width
+
dwidth
],
axis
=
[
dheight
,
dwidth
]),
tvm
.
min_value
(
'float32'
),
tag
=
"max_pool"
)
data
[
i
,
c
,
h
-
pad_height
,
w
-
pad_width
]),
name
=
'temp'
)
return
tvm
.
compute
((
batch
,
channel
,
out_height
,
out_width
),
lambda
i
,
c
,
h
,
w
:
\
tvm
.
max
(
temp
[
i
,
c
,
h
*
stride_height
+
dheight
,
w
*
stride_width
+
dwidth
],
axis
=
[
dheight
,
dwidth
]))
@tvm.tag_scope
(
tag
=
'global_avg_pool'
)
@tvm.tag_scope
(
tag
=
'global_avg_pool'
)
...
...
topi/python/topi/nn/softmax.py
View file @
7196c791
...
@@ -19,9 +19,8 @@ def softmax(x):
...
@@ -19,9 +19,8 @@ def softmax(x):
assert
len
(
x
.
shape
)
==
2
,
"only support 2-dim softmax"
assert
len
(
x
.
shape
)
==
2
,
"only support 2-dim softmax"
m
,
n
=
x
.
shape
m
,
n
=
x
.
shape
k
=
tvm
.
reduce_axis
((
0
,
n
),
name
=
'k'
)
k
=
tvm
.
reduce_axis
((
0
,
n
),
name
=
'k'
)
max_elem
=
tvm
.
compute
((
m
,
),
lambda
i
:
\
max_elem
=
tvm
.
compute
((
m
,
),
lambda
i
:
tvm
.
max
(
x
[
i
,
k
],
axis
=
k
))
tvm
.
max
(
x
[
i
,
k
]),
axis
=
k
)
expsum
=
tvm
.
compute
(
expsum
=
tvm
.
compute
((
m
,
),
lambda
i
:
\
(
m
,
),
lambda
i
:
tvm
.
sum
(
tvm
.
exp
(
x
[
i
,
k
]
-
max_elem
[
i
]),
axis
=
k
))
tvm
.
sum
(
tvm
.
exp
(
x
[
i
,
k
]
-
max_elem
[
i
]),
axis
=
k
))
return
tvm
.
compute
(
return
tvm
.
compute
(
x
.
shape
,
lambda
i
,
j
:
\
x
.
shape
,
lambda
i
,
j
:
tvm
.
exp
(
x
[
i
,
j
]
-
max_elem
[
i
])
/
expsum
[
i
])
tvm
.
exp
(
x
[
i
,
j
]
-
max_elem
[
i
])
/
expsum
[
i
])
topi/python/topi/util.py
View file @
7196c791
...
@@ -63,3 +63,19 @@ def get_const_tuple(in_tuple):
...
@@ -63,3 +63,19 @@ def get_const_tuple(in_tuple):
raise
ValueError
(
"Element of input tuple should be const int"
)
raise
ValueError
(
"Element of input tuple should be const int"
)
out_tuple
=
out_tuple
+
(
elem
.
value
,
)
out_tuple
=
out_tuple
+
(
elem
.
value
,
)
return
out_tuple
return
out_tuple
def
simplify
(
expr
):
"""Simplify the expression if it is Expr, directly return if it is int.
Parameters
----------
expr : Expr or int
The input.
Returns
-------
out : Expr or int
The simplified output
"""
return
tvm
.
ir_pass
.
Simplify
(
expr
)
if
isinstance
(
expr
,
tvm
.
expr
.
Expr
)
else
expr
topi/recipe/conv/depthwise_conv2d_test.py
View file @
7196c791
...
@@ -49,7 +49,7 @@ def test_depthwise_conv2d():
...
@@ -49,7 +49,7 @@ def test_depthwise_conv2d():
# Placeholder
# Placeholder
Input
=
tvm
.
placeholder
((
batch
,
in_channel
,
in_height
,
in_width
),
name
=
'Input'
)
Input
=
tvm
.
placeholder
((
batch
,
in_channel
,
in_height
,
in_width
),
name
=
'Input'
)
Filter
=
tvm
.
placeholder
((
filter_channel
,
channel_multiplier
,
filter_height
,
filter_width
),
name
=
'Filter'
)
Filter
=
tvm
.
placeholder
((
filter_channel
,
channel_multiplier
,
filter_height
,
filter_width
),
name
=
'Filter'
)
Stride
=
tvm
.
nd
.
array
(
np
.
array
([
stride_h
,
stride_w
]))
Stride
=
[
stride_h
,
stride_w
]
Scale
=
tvm
.
placeholder
((
in_channel
*
channel_multiplier
,),
name
=
'Scale'
)
Scale
=
tvm
.
placeholder
((
in_channel
*
channel_multiplier
,),
name
=
'Scale'
)
Shift
=
tvm
.
placeholder
((
in_channel
*
channel_multiplier
,),
name
=
'Shift'
)
Shift
=
tvm
.
placeholder
((
in_channel
*
channel_multiplier
,),
name
=
'Shift'
)
# Declare
# Declare
...
...
topi/tests/python/test_topi_depthwise_conv2d.py
View file @
7196c791
...
@@ -13,7 +13,7 @@ def depthwise_conv2d_with_workload(batch, in_channel, in_height, channel_multipl
...
@@ -13,7 +13,7 @@ def depthwise_conv2d_with_workload(batch, in_channel, in_height, channel_multipl
# placeholder
# placeholder
Input
=
tvm
.
placeholder
((
batch
,
in_channel
,
in_height
,
in_width
),
name
=
'Input'
)
Input
=
tvm
.
placeholder
((
batch
,
in_channel
,
in_height
,
in_width
),
name
=
'Input'
)
Filter
=
tvm
.
placeholder
((
filter_channel
,
channel_multiplier
,
filter_height
,
filter_width
),
name
=
'Filter'
)
Filter
=
tvm
.
placeholder
((
filter_channel
,
channel_multiplier
,
filter_height
,
filter_width
),
name
=
'Filter'
)
Stride
=
tvm
.
nd
.
array
(
np
.
array
([
stride_h
,
stride_w
]))
Stride
=
[
stride_h
,
stride_w
]
Scale
=
tvm
.
placeholder
((
in_channel
*
channel_multiplier
,),
name
=
'Scale'
)
Scale
=
tvm
.
placeholder
((
in_channel
*
channel_multiplier
,),
name
=
'Scale'
)
Shift
=
tvm
.
placeholder
((
in_channel
*
channel_multiplier
,),
name
=
'Shift'
)
Shift
=
tvm
.
placeholder
((
in_channel
*
channel_multiplier
,),
name
=
'Shift'
)
# declare
# declare
...
...
topi/tests/python/test_topi_dilate.py
View file @
7196c791
...
@@ -14,9 +14,7 @@ def test_dilate():
...
@@ -14,9 +14,7 @@ def test_dilate():
input_np
=
np
.
random
.
uniform
(
size
=
input_size
)
.
astype
(
Input
.
dtype
)
input_np
=
np
.
random
.
uniform
(
size
=
input_size
)
.
astype
(
Input
.
dtype
)
output_np
=
topi
.
testing
.
dilate_python
(
input_np
,
strides
)
output_np
=
topi
.
testing
.
dilate_python
(
input_np
,
strides
)
input_tvm
=
tvm
.
nd
.
array
(
input_np
,
ctx
=
ctx
)
input_tvm
=
tvm
.
nd
.
array
(
input_np
,
ctx
=
ctx
)
output_size
=
()
output_size
=
topi
.
util
.
get_const_tuple
(
Output
.
shape
)
for
i
in
range
(
len
(
input_size
)):
output_size
+=
(
tvm
.
ir_pass
.
Simplify
(
Output
.
shape
[
i
])
.
value
,)
output_tvm
=
tvm
.
nd
.
array
(
np
.
zeros
(
shape
=
output_size
)
.
astype
(
Output
.
dtype
),
ctx
=
ctx
)
output_tvm
=
tvm
.
nd
.
array
(
np
.
zeros
(
shape
=
output_size
)
.
astype
(
Output
.
dtype
),
ctx
=
ctx
)
f
=
tvm
.
build
(
schedule
,
[
Input
,
Output
],
target
)
f
=
tvm
.
build
(
schedule
,
[
Input
,
Output
],
target
)
f
(
input_tvm
,
output_tvm
)
f
(
input_tvm
,
output_tvm
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment