Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
T
tic
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
wenyuanbo
tic
Commits
a5d7bdab
Unverified
Commit
a5d7bdab
authored
Mar 22, 2020
by
Haichen Shen
Committed by
GitHub
Mar 22, 2020
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Adjust strategy plevel to achieve expected performance by default (#5118)
parent
d2bc94d9
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
19 additions
and
21 deletions
+19
-21
python/tvm/relay/op/strategy/arm_cpu.py
+3
-3
python/tvm/relay/op/strategy/bifrost.py
+1
-1
python/tvm/relay/op/strategy/cuda.py
+3
-3
python/tvm/relay/op/strategy/mali.py
+1
-1
python/tvm/relay/op/strategy/rocm.py
+8
-10
python/tvm/relay/op/strategy/x86.py
+3
-3
No files found.
python/tvm/relay/op/strategy/arm_cpu.py
View file @
a5d7bdab
...
@@ -67,13 +67,13 @@ def conv2d_strategy_arm_cpu(attrs, inputs, out_type, target):
...
@@ -67,13 +67,13 @@ def conv2d_strategy_arm_cpu(attrs, inputs, out_type, target):
wrap_compute_conv2d
(
topi
.
arm_cpu
.
conv2d_nchw_winograd
),
wrap_compute_conv2d
(
topi
.
arm_cpu
.
conv2d_nchw_winograd
),
wrap_topi_schedule
(
topi
.
arm_cpu
.
schedule_conv2d_nchw_winograd
),
wrap_topi_schedule
(
topi
.
arm_cpu
.
schedule_conv2d_nchw_winograd
),
name
=
"conv2d_nchw_winograd.arm_cpu"
,
name
=
"conv2d_nchw_winograd.arm_cpu"
,
plevel
=
1
5
)
plevel
=
5
)
if
"nnpack"
in
target
.
libs
and
pt
==
1
and
pb
==
1
and
pl
==
1
and
pr
==
1
:
if
"nnpack"
in
target
.
libs
and
pt
==
1
and
pb
==
1
and
pl
==
1
and
pr
==
1
:
strategy
.
add_implementation
(
strategy
.
add_implementation
(
wrap_compute_conv2d
(
topi
.
arm_cpu
.
conv2d_nchw_winograd_nnpack
),
wrap_compute_conv2d
(
topi
.
arm_cpu
.
conv2d_nchw_winograd_nnpack
),
wrap_topi_schedule
(
topi
.
arm_cpu
.
schedule_conv2d_nchw_winograd_nnpack
),
wrap_topi_schedule
(
topi
.
arm_cpu
.
schedule_conv2d_nchw_winograd_nnpack
),
name
=
"conv2d_nchw_winograd_nnpack.arm_cpu"
,
name
=
"conv2d_nchw_winograd_nnpack.arm_cpu"
,
plevel
=
1
3
)
plevel
=
1
5
)
elif
re
.
match
(
r"OIHW\d*o"
,
kernel_layout
):
elif
re
.
match
(
r"OIHW\d*o"
,
kernel_layout
):
strategy
.
add_implementation
(
strategy
.
add_implementation
(
wrap_compute_conv2d
(
topi
.
arm_cpu
.
conv2d_nchw_spatial_pack
),
wrap_compute_conv2d
(
topi
.
arm_cpu
.
conv2d_nchw_spatial_pack
),
...
@@ -177,7 +177,7 @@ def conv2d_winograd_without_weight_transfrom_strategy_arm_cpu(attrs, inputs, out
...
@@ -177,7 +177,7 @@ def conv2d_winograd_without_weight_transfrom_strategy_arm_cpu(attrs, inputs, out
wrap_topi_schedule
(
wrap_topi_schedule
(
topi
.
arm_cpu
.
schedule_conv2d_nchw_winograd_nnpack_without_weight_transform
),
topi
.
arm_cpu
.
schedule_conv2d_nchw_winograd_nnpack_without_weight_transform
),
name
=
"conv2d_nchw_winograd_nnpack_withou_weight_transform.arm_cpu"
,
name
=
"conv2d_nchw_winograd_nnpack_withou_weight_transform.arm_cpu"
,
plevel
=
5
)
plevel
=
1
5
)
else
:
else
:
raise
RuntimeError
(
"Unsupported kernel shape: {}"
.
format
(
kernel
.
shape
))
raise
RuntimeError
(
"Unsupported kernel shape: {}"
.
format
(
kernel
.
shape
))
else
:
else
:
...
...
python/tvm/relay/op/strategy/bifrost.py
View file @
a5d7bdab
...
@@ -50,7 +50,7 @@ def conv2d_strategy_bifrost(attrs, inputs, out_type, target):
...
@@ -50,7 +50,7 @@ def conv2d_strategy_bifrost(attrs, inputs, out_type, target):
wrap_compute_conv2d
(
topi
.
bifrost
.
conv2d_nchw_winograd
),
wrap_compute_conv2d
(
topi
.
bifrost
.
conv2d_nchw_winograd
),
wrap_topi_schedule
(
topi
.
bifrost
.
schedule_conv2d_nchw_winograd
),
wrap_topi_schedule
(
topi
.
bifrost
.
schedule_conv2d_nchw_winograd
),
name
=
"conv2d_nchw_winograd.bifrost"
,
name
=
"conv2d_nchw_winograd.bifrost"
,
plevel
=
1
5
)
plevel
=
5
)
elif
re
.
match
(
r"OIHW\d*o"
,
kernel_layout
):
elif
re
.
match
(
r"OIHW\d*o"
,
kernel_layout
):
strategy
.
add_implementation
(
strategy
.
add_implementation
(
wrap_compute_conv2d
(
topi
.
bifrost
.
conv2d_nchw_spatial_pack
),
wrap_compute_conv2d
(
topi
.
bifrost
.
conv2d_nchw_spatial_pack
),
...
...
python/tvm/relay/op/strategy/cuda.py
View file @
a5d7bdab
...
@@ -135,7 +135,7 @@ def conv2d_strategy_cuda(attrs, inputs, out_type, target):
...
@@ -135,7 +135,7 @@ def conv2d_strategy_cuda(attrs, inputs, out_type, target):
wrap_compute_conv2d
(
topi
.
cuda
.
conv2d_cudnn
,
True
),
wrap_compute_conv2d
(
topi
.
cuda
.
conv2d_cudnn
,
True
),
wrap_topi_schedule
(
topi
.
cuda
.
schedule_conv2d_cudnn
),
wrap_topi_schedule
(
topi
.
cuda
.
schedule_conv2d_cudnn
),
name
=
"conv2d_cudnn.cuda"
,
name
=
"conv2d_cudnn.cuda"
,
plevel
=
5
)
plevel
=
1
5
)
elif
is_depthwise_conv2d
(
data
.
shape
,
layout
,
kernel
.
shape
,
kernel_layout
,
groups
):
elif
is_depthwise_conv2d
(
data
.
shape
,
layout
,
kernel
.
shape
,
kernel_layout
,
groups
):
if
layout
==
"NCHW"
:
if
layout
==
"NCHW"
:
assert
kernel_layout
==
"OIHW"
assert
kernel_layout
==
"OIHW"
...
@@ -295,13 +295,13 @@ def dense_strategy_cuda(attrs, inputs, out_type, target):
...
@@ -295,13 +295,13 @@ def dense_strategy_cuda(attrs, inputs, out_type, target):
wrap_compute_dense
(
topi
.
cuda
.
dense_large_batch
),
wrap_compute_dense
(
topi
.
cuda
.
dense_large_batch
),
wrap_topi_schedule
(
topi
.
cuda
.
schedule_dense_large_batch
),
wrap_topi_schedule
(
topi
.
cuda
.
schedule_dense_large_batch
),
name
=
"dense_large_batch.cuda"
,
name
=
"dense_large_batch.cuda"
,
plevel
=
1
5
)
plevel
=
5
)
if
target
.
target_name
==
"cuda"
and
"cublas"
in
target
.
libs
:
if
target
.
target_name
==
"cuda"
and
"cublas"
in
target
.
libs
:
strategy
.
add_implementation
(
strategy
.
add_implementation
(
wrap_compute_dense
(
topi
.
cuda
.
dense_cublas
),
wrap_compute_dense
(
topi
.
cuda
.
dense_cublas
),
wrap_topi_schedule
(
topi
.
cuda
.
schedule_dense_cublas
),
wrap_topi_schedule
(
topi
.
cuda
.
schedule_dense_cublas
),
name
=
"dense_cublas.cuda"
,
name
=
"dense_cublas.cuda"
,
plevel
=
20
)
plevel
=
15
)
return
strategy
return
strategy
@batch_matmul_strategy.register
([
"cuda"
,
"gpu"
])
@batch_matmul_strategy.register
([
"cuda"
,
"gpu"
])
...
...
python/tvm/relay/op/strategy/mali.py
View file @
a5d7bdab
...
@@ -49,7 +49,7 @@ def conv2d_strategy_mali(attrs, inputs, out_type, target):
...
@@ -49,7 +49,7 @@ def conv2d_strategy_mali(attrs, inputs, out_type, target):
wrap_compute_conv2d
(
topi
.
mali
.
conv2d_nchw_winograd
),
wrap_compute_conv2d
(
topi
.
mali
.
conv2d_nchw_winograd
),
wrap_topi_schedule
(
topi
.
mali
.
schedule_conv2d_nchw_winograd
),
wrap_topi_schedule
(
topi
.
mali
.
schedule_conv2d_nchw_winograd
),
name
=
"conv2d_nchw_winograd.mali"
,
name
=
"conv2d_nchw_winograd.mali"
,
plevel
=
1
5
)
plevel
=
5
)
elif
re
.
match
(
r"OIHW\d*o"
,
kernel_layout
):
elif
re
.
match
(
r"OIHW\d*o"
,
kernel_layout
):
strategy
.
add_implementation
(
strategy
.
add_implementation
(
wrap_compute_conv2d
(
topi
.
mali
.
conv2d_nchw_spatial_pack
),
wrap_compute_conv2d
(
topi
.
mali
.
conv2d_nchw_spatial_pack
),
...
...
python/tvm/relay/op/strategy/rocm.py
View file @
a5d7bdab
...
@@ -77,13 +77,12 @@ def conv2d_strategy_rocm(attrs, inputs, out_type, target):
...
@@ -77,13 +77,12 @@ def conv2d_strategy_rocm(attrs, inputs, out_type, target):
else
:
else
:
raise
RuntimeError
(
"Unsupported conv2d layout {} for CUDA"
.
format
(
layout
))
raise
RuntimeError
(
"Unsupported conv2d layout {} for CUDA"
.
format
(
layout
))
# add miopen implementation
# add miopen implementation
if
"miopen"
in
target
.
libs
:
if
"miopen"
in
target
.
libs
and
layout
==
"NCHW"
:
if
layout
==
"NCHW"
:
strategy
.
add_implementation
(
strategy
.
add_implementation
(
wrap_compute_conv2d
(
topi
.
rocm
.
conv2d_nchw_miopen
,
True
),
wrap_compute_conv2d
(
topi
.
rocm
.
conv2d_nchw_miopen
,
True
),
wrap_topi_schedule
(
topi
.
rocm
.
schedule_conv2d_nchw_miopen
),
wrap_topi_schedule
(
topi
.
rocm
.
schedule_conv2d_nchw_miopen
),
name
=
"conv2d_nchw_miopen.rocm"
,
name
=
"conv2d_nchw_miopen.rocm"
,
plevel
=
15
)
plevel
=
15
)
elif
is_depthwise_conv2d
(
data
.
shape
,
layout
,
kernel
.
shape
,
kernel_layout
,
groups
):
elif
is_depthwise_conv2d
(
data
.
shape
,
layout
,
kernel
.
shape
,
kernel_layout
,
groups
):
if
layout
==
"NCHW"
:
if
layout
==
"NCHW"
:
assert
kernel_layout
==
"OIHW"
assert
kernel_layout
==
"OIHW"
...
@@ -120,9 +119,8 @@ def conv2d_strategy_rocm(attrs, inputs, out_type, target):
...
@@ -120,9 +119,8 @@ def conv2d_strategy_rocm(attrs, inputs, out_type, target):
@dense_strategy.register
(
"rocm"
)
@dense_strategy.register
(
"rocm"
)
def
dense_strategy_rocm
(
attrs
,
inputs
,
out_type
,
target
):
def
dense_strategy_rocm
(
attrs
,
inputs
,
out_type
,
target
):
"""Dense strategy for ROCM"""
"""Dense strategy for ROCM"""
strategy
=
_op
.
OpStrategy
()
assert
len
(
inputs
[
0
]
.
shape
)
==
2
and
len
(
inputs
[
1
]
.
shape
)
==
2
,
"Only support 2-dim dense"
assert
len
(
inputs
[
0
]
.
shape
)
==
2
and
len
(
inputs
[
1
]
.
shape
)
==
2
,
"Only support 2-dim dense"
strategy
=
_op
.
OpStrategy
()
strategy
.
add_implementation
(
strategy
.
add_implementation
(
wrap_compute_dense
(
topi
.
rocm
.
dense
),
wrap_compute_dense
(
topi
.
rocm
.
dense
),
wrap_topi_schedule
(
topi
.
rocm
.
schedule_dense
),
wrap_topi_schedule
(
topi
.
rocm
.
schedule_dense
),
...
@@ -133,5 +131,5 @@ def dense_strategy_rocm(attrs, inputs, out_type, target):
...
@@ -133,5 +131,5 @@ def dense_strategy_rocm(attrs, inputs, out_type, target):
wrap_compute_dense
(
topi
.
rocm
.
dense_rocblas
),
wrap_compute_dense
(
topi
.
rocm
.
dense_rocblas
),
wrap_topi_schedule
(
topi
.
rocm
.
dense_rocblas
),
wrap_topi_schedule
(
topi
.
rocm
.
dense_rocblas
),
name
=
"dense_rocblas.rocm"
,
name
=
"dense_rocblas.rocm"
,
plevel
=
5
)
plevel
=
1
5
)
return
strategy
return
strategy
python/tvm/relay/op/strategy/x86.py
View file @
a5d7bdab
...
@@ -232,13 +232,13 @@ def dense_strategy_cpu(attrs, inputs, out_type, target):
...
@@ -232,13 +232,13 @@ def dense_strategy_cpu(attrs, inputs, out_type, target):
strategy
.
add_implementation
(
wrap_compute_dense
(
topi
.
x86
.
dense_cblas
),
strategy
.
add_implementation
(
wrap_compute_dense
(
topi
.
x86
.
dense_cblas
),
wrap_topi_schedule
(
topi
.
x86
.
schedule_dense_cblas
),
wrap_topi_schedule
(
topi
.
x86
.
schedule_dense_cblas
),
name
=
"dense_cblas.x86"
,
name
=
"dense_cblas.x86"
,
plevel
=
5
)
plevel
=
1
5
)
with
SpecializedCondition
(
m
>=
16
):
with
SpecializedCondition
(
m
>=
16
):
# this implementation may not be well-optimized, so use plevel=8 for now.
# this implementation may not be well-optimized, so use plevel=8 for now.
strategy
.
add_implementation
(
wrap_compute_dense
(
topi
.
x86
.
dense_pack
),
strategy
.
add_implementation
(
wrap_compute_dense
(
topi
.
x86
.
dense_pack
),
wrap_topi_schedule
(
topi
.
x86
.
schedule_dense_pack
),
wrap_topi_schedule
(
topi
.
x86
.
schedule_dense_pack
),
name
=
"dense_pack.x86"
,
name
=
"dense_pack.x86"
,
plevel
=
8
)
plevel
=
5
)
return
strategy
return
strategy
@batch_matmul_strategy.register
(
"cpu"
)
@batch_matmul_strategy.register
(
"cpu"
)
...
@@ -253,7 +253,7 @@ def batch_matmul_strategy_cpu(attrs, inputs, out_type, target):
...
@@ -253,7 +253,7 @@ def batch_matmul_strategy_cpu(attrs, inputs, out_type, target):
strategy
.
add_implementation
(
wrap_compute_batch_matmul
(
topi
.
x86
.
batch_matmul_cblas
),
strategy
.
add_implementation
(
wrap_compute_batch_matmul
(
topi
.
x86
.
batch_matmul_cblas
),
wrap_topi_schedule
(
topi
.
x86
.
schedule_batch_matmul_cblas
),
wrap_topi_schedule
(
topi
.
x86
.
schedule_batch_matmul_cblas
),
name
=
"batch_matmul_cblas.x86"
,
name
=
"batch_matmul_cblas.x86"
,
plevel
=
5
)
plevel
=
1
5
)
return
strategy
return
strategy
@schedule_sparse_dense.register
(
"cpu"
)
@schedule_sparse_dense.register
(
"cpu"
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment