Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
T
tic
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
wenyuanbo
tic
Commits
b528acc1
Commit
b528acc1
authored
Feb 08, 2020
by
Tianqi Chen
Committed by
GitHub
Feb 08, 2020
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[LINT][PY] Fixes for pylint==2.4.4 (#4849)
parent
b46c2548
Show whitespace changes
Inline
Side-by-side
Showing
96 changed files
with
130 additions
and
171 deletions
+130
-171
Makefile
+1
-4
python/tvm/_ffi/base.py
+2
-2
python/tvm/autotvm/database.py
+1
-0
python/tvm/autotvm/feature.py
+2
-1
python/tvm/autotvm/graph_tuner/base_graph_tuner.py
+0
-1
python/tvm/autotvm/graph_tuner/utils/traverse_graph.py
+1
-0
python/tvm/autotvm/measure/measure.py
+1
-0
python/tvm/autotvm/measure/measure_methods.py
+4
-2
python/tvm/autotvm/task/dispatcher.py
+2
-0
python/tvm/autotvm/task/relay_integration.py
+3
-2
python/tvm/autotvm/task/topi_integration.py
+2
-0
python/tvm/autotvm/tophub.py
+1
-0
python/tvm/autotvm/tuner/callback.py
+1
-0
python/tvm/autotvm/tuner/sa_model_optimizer.py
+1
-1
python/tvm/autotvm/tuner/xgboost_cost_model.py
+1
-0
python/tvm/build_module.py
+1
-1
python/tvm/contrib/cc.py
+0
-1
python/tvm/contrib/dlpack.py
+1
-0
python/tvm/contrib/download.py
+1
-7
python/tvm/contrib/mxnet.py
+1
-1
python/tvm/contrib/util.py
+0
-33
python/tvm/hybrid/__init__.py
+2
-1
python/tvm/hybrid/calls.py
+1
-0
python/tvm/hybrid/parser.py
+1
-1
python/tvm/hybrid/util.py
+1
-0
python/tvm/relay/_parser.py
+3
-3
python/tvm/relay/analysis.py
+1
-1
python/tvm/relay/backend/_backend.py
+2
-1
python/tvm/relay/backend/compile_engine.py
+1
-1
python/tvm/relay/build_module.py
+1
-2
python/tvm/relay/debug.py
+1
-1
python/tvm/relay/expr.py
+0
-2
python/tvm/relay/frontend/caffe2.py
+1
-0
python/tvm/relay/frontend/common.py
+3
-2
python/tvm/relay/frontend/coreml.py
+2
-5
python/tvm/relay/frontend/keras.py
+2
-3
python/tvm/relay/frontend/nnvm_common.py
+1
-3
python/tvm/relay/frontend/onnx.py
+1
-0
python/tvm/relay/frontend/tensorflow.py
+1
-0
python/tvm/relay/frontend/tensorflow_parser.py
+2
-2
python/tvm/relay/frontend/tflite.py
+2
-2
python/tvm/relay/op/__init__.py
+1
-0
python/tvm/relay/op/_transform.py
+4
-5
python/tvm/relay/op/nn/_nn.py
+3
-2
python/tvm/relay/op/transform.py
+2
-0
python/tvm/relay/parser.py
+1
-0
python/tvm/relay/qnn/op/op.py
+1
-1
python/tvm/relay/quantize/_partition.py
+4
-4
python/tvm/relay/quantize/quantize.py
+1
-1
python/tvm/relay/scope_builder.py
+0
-1
python/tvm/relay/testing/darknet.py
+1
-1
python/tvm/relay/testing/resnet.py
+2
-1
python/tvm/relay/testing/tf.py
+2
-2
python/tvm/relay/testing/yolo_detection.py
+2
-1
python/tvm/rpc/proxy.py
+1
-2
python/tvm/rpc/server.py
+3
-3
python/tvm/rpc/tornado_util.py
+1
-1
python/tvm/rpc/tracker.py
+0
-1
python/tvm/runtime/module.py
+1
-1
python/tvm/runtime/ndarray.py
+1
-1
python/tvm/tensor.py
+0
-1
python/tvm/tensor_intrin.py
+1
-1
topi/python/topi/arm_cpu/conv2d.py
+2
-3
topi/python/topi/bifrost/conv2d.py
+0
-1
topi/python/topi/cuda/conv2d_winograd.py
+1
-1
topi/python/topi/cuda/nms.py
+2
-1
topi/python/topi/cuda/rcnn/proposal.py
+1
-1
topi/python/topi/cuda/softmax.py
+2
-2
topi/python/topi/cuda/sort.py
+1
-0
topi/python/topi/cuda/vision.py
+1
-1
topi/python/topi/hls/nn.py
+1
-1
topi/python/topi/intel_graphics/conv2d.py
+2
-2
topi/python/topi/nn/bitserial_util.py
+1
-3
topi/python/topi/nn/conv2d.py
+3
-2
topi/python/topi/nn/fifo_buffer.py
+1
-1
topi/python/topi/opengl/softmax.py
+1
-1
topi/python/topi/testing/one_hot.py
+1
-1
topi/python/topi/transform.py
+3
-6
topi/python/topi/util.py
+1
-2
topi/python/topi/vision/rcnn/proposal.py
+1
-1
topi/python/topi/x86/conv2d.py
+2
-2
topi/python/topi/x86/conv2d_alter_op.py
+1
-1
topi/python/topi/x86/conv2d_int8.py
+2
-2
topi/python/topi/x86/nn.py
+1
-1
topi/python/topi/x86/util.py
+1
-1
vta/python/vta/bitstream.py
+0
-1
vta/python/vta/environment.py
+2
-2
vta/python/vta/exec/rpc_server.py
+1
-0
vta/python/vta/ir_pass.py
+1
-3
vta/python/vta/program_bitstream.py
+1
-0
vta/python/vta/top/graphpack.py
+5
-6
vta/python/vta/top/op.py
+0
-2
vta/scripts/tune_conv2d.py
+0
-1
vta/scripts/tune_conv2d_transpose.py
+0
-1
vta/scripts/tune_dense.py
+0
-1
vta/scripts/tune_group_conv2d.py
+0
-1
No files found.
Makefile
View file @
b528acc1
...
...
@@ -94,10 +94,7 @@ javadoc:
# Cython build
cython
:
cd
python
;
python setup.py build_ext
--inplace
cython2
:
cd
python
;
python2 setup.py build_ext
--inplace
cd
python
;
python3 setup.py build_ext
--inplace
cython3
:
cd
python
;
python3 setup.py build_ext
--inplace
...
...
python/tvm/_ffi/base.py
View file @
b528acc1
...
...
@@ -15,7 +15,7 @@
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=invalid-name
# pylint: disable=invalid-name
, import-outside-toplevel
"""Base library for TVM FFI."""
import
sys
import
os
...
...
@@ -204,7 +204,7 @@ def _find_error_type(line):
if
_valid_error_name
(
err_name
):
return
err_name
return
None
else
:
end_pos
=
line
.
find
(
":"
)
if
end_pos
==
-
1
:
return
None
...
...
python/tvm/autotvm/database.py
View file @
b528acc1
...
...
@@ -104,6 +104,7 @@ class RedisDatabase(Database):
MAGIC_SPLIT
=
"$"
def
__init__
(
self
,
db_index
=
REDIS_PROD
):
# pylint: disable=import-outside-toplevel
import
redis
if
db_index
==
RedisDatabase
.
REDIS_TEST
:
...
...
python/tvm/autotvm/feature.py
View file @
b528acc1
...
...
@@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
# pylint: disable=invalid-name
,
"""Extract feature of iter vars
There are two types of feature
...
...
@@ -148,6 +148,7 @@ def get_flatten_name(fea):
}
if
isinstance
(
fea
,
str
):
# pylint: disable=import-outside-toplevel
from
.record
import
decode
# flatten line to feature
line
=
fea
...
...
python/tvm/autotvm/graph_tuner/base_graph_tuner.py
View file @
b528acc1
...
...
@@ -539,4 +539,3 @@ class BaseGraphTuner(object):
@abstractmethod
def
run
(
self
,
**
kwargs
):
"""Run graph tuning."""
pass
python/tvm/autotvm/graph_tuner/utils/traverse_graph.py
View file @
b528acc1
...
...
@@ -65,6 +65,7 @@ def expr2graph(expr, target_ops, node_dict, node_list):
%
op_name
)
topi_funcs
+=
OP2COMPUTE
[
op_name
]
env
.
reset
(
topi_funcs
)
# pylint: disable=not-context-manager
with
env
:
_expr2graph_impl
(
expr
,
target_ops
,
node_dict
,
node_list
)
task_pos
=
0
...
...
python/tvm/autotvm/measure/measure.py
View file @
b528acc1
...
...
@@ -208,6 +208,7 @@ def measure_option(builder, runner):
Using `min_repeat_ms` can dynamically adjusts `number`, so it is recommended.
The typical value for NVIDIA GPU is 150 ms.
"""
# pylint: disable=import-outside-toplevel
from
.measure_methods
import
LocalBuilder
,
LocalRunner
if
isinstance
(
builder
,
str
):
...
...
python/tvm/autotvm/measure/measure_methods.py
View file @
b528acc1
...
...
@@ -324,11 +324,11 @@ class LocalRunner(RPCRunner):
self
.
server
=
None
def
set_task
(
self
,
task
):
self
.
task
=
task
# pylint: disable=import-outside-toplevel
from
...rpc.tracker
import
Tracker
from
...rpc.server
import
Server
self
.
task
=
task
tracker
=
Tracker
(
'0.0.0.0'
,
port
=
9000
,
port_end
=
10000
,
silent
=
True
)
device_key
=
'$local$device$
%
d'
%
tracker
.
port
server
=
Server
(
'0.0.0.0'
,
port
=
9000
,
port_end
=
10000
,
...
...
@@ -362,6 +362,7 @@ def _build_func_common(measure_input, check_gpu=None, cuda_arch=None, build_opti
# if target is vta, we need to use vta build
if
hasattr
(
measure_input
.
target
,
'device_name'
)
and
\
measure_input
.
target
.
device_name
==
'vta'
:
# pylint: disable=import-outside-toplevel
import
vta
func
=
vta
.
build
(
s
,
args
,
target_host
=
task
.
target_host
)
else
:
...
...
@@ -460,6 +461,7 @@ def run_through_rpc(measure_input, build_result,
# Program the FPGA every single time when targeting VTA
if
hasattr
(
measure_input
.
target
,
'device_name'
)
and
\
measure_input
.
target
.
device_name
==
'vta'
:
# pylint: disable=import-outside-toplevel
from
vta
import
program_fpga
,
reconfig_runtime
program_fpga
(
remote
,
None
)
reconfig_runtime
(
remote
)
...
...
python/tvm/autotvm/task/dispatcher.py
View file @
b528acc1
...
...
@@ -282,6 +282,7 @@ class ApplyHistoryBest(DispatchContext):
Each row of this file is an encoded record pair.
Otherwise, it is an iterator.
"""
# pylint: disable=import-outside-toplevel
from
pathlib
import
Path
from
..record
import
load_from_file
...
...
@@ -454,6 +455,7 @@ class ApplyGraphBest(DispatchContext):
Each row of this file is an encoded record pair.
Otherwise, it is an iterator.
"""
# pylint: disable=import-outside-toplevel
from
..record
import
load_from_file
super
(
ApplyGraphBest
,
self
)
.
__init__
()
...
...
python/tvm/autotvm/task/relay_integration.py
View file @
b528acc1
...
...
@@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable,invalid-name
# pylint: disable=unused-variable,invalid-name
, not-context-manager
"""
Decorator and utilities for the integration with TOPI and Relay
99.9
%
copy-paste of implementation by @MerryMercy
...
...
@@ -37,7 +37,7 @@ def _lower(mod,
params
):
""" Helper to lower VTA properly.
"""
# pylint: disable=import-outside-toplevel
from
tvm
import
relay
from
tvm.relay.backend
import
graph_runtime_codegen
...
...
@@ -114,6 +114,7 @@ def extract_from_multiple_program(mods, params, ops, target, target_host=None,
task: Array of autotvm.task.Task
collected tasks
"""
# pylint: disable=import-outside-toplevel
import
tvm.relay.op
from
tvm
import
relay
import
topi
...
...
python/tvm/autotvm/task/topi_integration.py
View file @
b528acc1
...
...
@@ -76,6 +76,7 @@ class TaskExtractEnv:
registered
=
None
def
__init__
(
self
,
allow_duplicate
=
False
):
# pylint: disable=import-outside-toplevel
import
topi
# topi compute -> autotvm task name
...
...
@@ -168,6 +169,7 @@ class TaskExtractEnv:
def
_register_topi_task
(
self
):
"""register tuning wrapper for topi function"""
# pylint: disable=import-outside-toplevel
import
topi
# Avoid double registration for certain targets
...
...
python/tvm/autotvm/tophub.py
View file @
b528acc1
...
...
@@ -147,6 +147,7 @@ def check_backend(tophub_location, backend):
if
os
.
path
.
isfile
(
os
.
path
.
join
(
AUTOTVM_TOPHUB_ROOT_PATH
,
package_name
)):
return
True
# pylint: disable=import-outside-toplevel
if
sys
.
version_info
>=
(
3
,):
import
urllib.request
as
urllib2
else
:
...
...
python/tvm/autotvm/tuner/callback.py
View file @
b528acc1
...
...
@@ -53,6 +53,7 @@ def log_to_file(file_out, protocol='json'):
for
inp
,
result
in
zip
(
inputs
,
results
):
file_out
.
write
(
record
.
encode
(
inp
,
result
,
protocol
)
+
"
\n
"
)
# pylint: disable=import-outside-toplevel
from
pathlib
import
Path
if
isinstance
(
file_out
,
Path
):
file_out
=
str
(
file_out
)
...
...
python/tvm/autotvm/tuner/sa_model_optimizer.py
View file @
b528acc1
...
...
@@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=consider-using-enumerate, invalid-name
# pylint: disable=consider-using-enumerate, invalid-name
, invalid-sequence-index
"""
Cost model optimizer based on simulated annealing
"""
...
...
python/tvm/autotvm/tuner/xgboost_cost_model.py
View file @
b528acc1
...
...
@@ -420,6 +420,7 @@ def _extract_curve_feature_log(arg):
def
custom_callback
(
stopping_rounds
,
metric
,
fevals
,
evals
=
(),
log_file
=
None
,
maximize
=
False
,
verbose_eval
=
True
):
"""callback function for xgboost to support multiple custom evaluation functions"""
# pylint: disable=import-outside-toplevel
from
xgboost.core
import
EarlyStopException
from
xgboost.callback
import
_fmt_metric
from
xgboost.training
import
aggcv
...
...
python/tvm/build_module.py
View file @
b528acc1
...
...
@@ -467,7 +467,7 @@ def _build_for_device(flist, target, target_host):
func
=
ir_pass
.
InferFragment
(
func
)
warp_size
=
target
.
thread_warp_size
func
=
ir_pass
.
LowerThreadAllreduce
(
func
,
warp_size
)
fsplits
=
[
s
for
s
in
ir_pass
.
SplitHostDevice
(
func
)]
fsplits
=
list
(
ir_pass
.
SplitHostDevice
(
func
))
fhost
.
append
(
fsplits
[
0
])
for
x
in
fsplits
[
1
:]:
fdevice
.
append
(
x
)
...
...
python/tvm/contrib/cc.py
View file @
b528acc1
...
...
@@ -76,7 +76,6 @@ def get_target_by_dump_machine(compiler):
msg
+=
py_str
(
out
)
return
None
return
py_str
(
out
)
else
:
return
None
return
get_target_triple
...
...
python/tvm/contrib/dlpack.py
View file @
b528acc1
...
...
@@ -54,6 +54,7 @@ def to_pytorch_func(tvm_func):
wrapped_func: Function
Wrapped tvm function that operates on PyTorch tensors
"""
# pylint: disable=import-outside-toplevel
import
torch
import
torch.utils.dlpack
return
convert_func
(
tvm_func
,
torch
.
Tensor
,
torch
.
utils
.
dlpack
.
to_dlpack
)
python/tvm/contrib/download.py
View file @
b528acc1
...
...
@@ -15,9 +15,6 @@
# specific language governing permissions and limitations
# under the License.
"""Helper utility for downloading"""
from
__future__
import
print_function
from
__future__
import
absolute_import
as
_abs
import
os
import
sys
import
time
...
...
@@ -48,10 +45,8 @@ def download(url, path, overwrite=False, size_compare=False, verbose=1, retries=
retries: int, optional
Number of time to retry download, default at 3.
"""
if
sys
.
version_info
>=
(
3
,):
# pylint: disable=import-outside-toplevel
import
urllib.request
as
urllib2
else
:
import
urllib2
if
os
.
path
.
isfile
(
path
)
and
not
overwrite
:
if
size_compare
:
...
...
@@ -114,7 +109,6 @@ def download(url, path, overwrite=False, size_compare=False, verbose=1, retries=
if
os
.
path
.
exists
(
tempfile
):
os
.
remove
(
tempfile
)
raise
err
else
:
print
(
"download failed due to {}, retrying, {} attempt{} left"
.
format
(
repr
(
err
),
retries
,
's'
if
retries
>
1
else
''
))
...
...
python/tvm/contrib/mxnet.py
View file @
b528acc1
...
...
@@ -49,7 +49,7 @@ def to_mxnet_func(func, const_loc=None):
Run asynchrously in MXNet's async engine.
"""
# only import mxnet when wrap get called.
# pylint: disable=import-self
# pylint: disable=import-self
, import-outside-toplevel
import
mxnet
if
isinstance
(
func
,
Module
):
func
=
func
.
entry_func
...
...
python/tvm/contrib/util.py
View file @
b528acc1
...
...
@@ -15,7 +15,6 @@
# specific language governing permissions and limitations
# under the License.
"""Common system utilities"""
from
__future__
import
absolute_import
as
_abs
import
os
import
tempfile
import
shutil
...
...
@@ -167,35 +166,3 @@ def which(exec_name):
if
os
.
path
.
isfile
(
full_path
)
and
os
.
access
(
full_path
,
os
.
X_OK
):
return
full_path
return
None
def
get_lower_ir
(
s
):
"""Get lower ir code of a schedule.
This is useful for debug, since you don't have to find all inputs/outputs
for a schedule in a fused subgraph.
Parameters
----------
s: Schedule
Returns
-------
ir: str
The lower ir
"""
from
..
import
tensor
from
..build_module
import
lower
outputs
=
s
.
outputs
inputs
=
[]
def
find_all
(
op
):
if
isinstance
(
op
,
tensor
.
PlaceholderOp
):
inputs
.
append
(
op
.
output
(
0
))
else
:
for
x
in
op
.
input_tensors
:
find_all
(
x
.
op
)
for
out
in
outputs
:
find_all
(
out
)
return
lower
(
s
,
inputs
,
simple_mode
=
True
)
python/tvm/hybrid/__init__.py
View file @
b528acc1
...
...
@@ -50,7 +50,8 @@ def script(pyfunc):
hybrid_func : function
A decorated hybrid script function.
"""
def
wrapped_func
(
func
,
*
args
,
**
kwargs
):
#pylint: disable=missing-docstring
# pylint: disable=import-outside-toplevel, missing-docstring
def
wrapped_func
(
func
,
*
args
,
**
kwargs
):
from
.util
import
_is_tvm_arg_types
if
_is_tvm_arg_types
(
args
):
src
=
_pruned_source
(
func
)
...
...
python/tvm/hybrid/calls.py
View file @
b528acc1
...
...
@@ -69,6 +69,7 @@ def bind(func_id, args):
def
_math_intrin
(
func_id
,
args
):
# pylint: disable=import-outside-toplevel
from
..
import
intrin
return
getattr
(
intrin
,
func_id
)(
*
args
)
...
...
python/tvm/hybrid/parser.py
View file @
b528acc1
...
...
@@ -198,7 +198,7 @@ class HybridParser(ast.NodeVisitor):
ty
,
entry
=
self
.
symbols
[
key
]
#pylint: disable=invalid-name
if
ty
in
[
Symbol
.
Input
,
Symbol
.
OutputBuffer
]:
continue
el
if
'Buffer'
in
ty
.
name
:
if
'Buffer'
in
ty
.
name
:
_buf
=
entry
_scope
=
'global'
if
ty
is
Symbol
.
BufferVar
else
ty
.
name
[:
-
6
]
.
lower
()
to_pop
.
append
(
key
)
...
...
python/tvm/hybrid/util.py
View file @
b528acc1
...
...
@@ -70,6 +70,7 @@ def _pruned_source(func):
def
replace_io
(
body
,
rmap
):
"""Replacing tensors usage according to the dict given"""
# pylint: disable=import-outside-toplevel
from
..
import
ir_pass
def
replace
(
op
):
...
...
python/tvm/relay/_parser.py
View file @
b528acc1
...
...
@@ -78,7 +78,7 @@ class ParseError(Exception):
class
OpWrapper
:
"""Overload the __call__ for op."""
pass
class
ExprOp
(
OpWrapper
):
"""Call an expr. The default, but does not handle attrs well."""
...
...
@@ -273,7 +273,7 @@ class ParseTreeToRelayIR(RelayVisitor):
def
_type_expr_name
(
self
,
e
):
if
isinstance
(
e
,
adt
.
Constructor
):
return
"`{0}` ADT constructor"
.
format
(
e
.
belong_to
.
name_hint
)
el
if
isinstance
(
e
,
ty
.
GlobalTypeVar
):
if
isinstance
(
e
,
ty
.
GlobalTypeVar
):
if
e
.
kind
==
ty
.
Kind
.
AdtHandle
:
return
"ADT definition"
return
"function definition"
...
...
@@ -623,7 +623,7 @@ class ParseTreeToRelayIR(RelayVisitor):
def
call
(
self
,
func
,
args
,
attrs
,
type_args
):
if
isinstance
(
func
,
OpWrapper
):
return
func
(
args
,
attrs
,
type_args
)
el
if
isinstance
(
func
,
adt
.
Constructor
):
if
isinstance
(
func
,
adt
.
Constructor
):
return
func
(
*
args
)
return
expr
.
Call
(
func
,
args
,
attrs
,
type_args
)
...
...
python/tvm/relay/analysis.py
View file @
b528acc1
...
...
@@ -384,7 +384,7 @@ def detect_feature(a, b=None):
"""
if
isinstance
(
a
,
Module
):
a
,
b
=
b
,
a
return
set
([
Feature
(
int
(
x
))
for
x
in
_analysis
.
detect_feature
(
a
,
b
)])
return
{
Feature
(
int
(
x
))
for
x
in
_analysis
.
detect_feature
(
a
,
b
)}
def
structural_hash
(
value
):
...
...
python/tvm/relay/backend/_backend.py
View file @
b528acc1
...
...
@@ -44,8 +44,9 @@ def lower(sch, inputs, func_name, source_func):
lowered_funcs : List[tvm.LoweredFunc]
The result of lowering.
"""
# pylint: disable=broad-except, import-outside-toplevel
import
traceback
# pylint: disable=broad-except
try
:
f
=
_build
.
lower
(
sch
,
inputs
,
name
=
func_name
)
# logging.debug("lower function %s", func_name)
...
...
python/tvm/relay/backend/compile_engine.py
View file @
b528acc1
...
...
@@ -86,7 +86,7 @@ class CompileEngine(Object):
cached_func: CachedFunc
The result of lowering.
"""
# pylint: disable=broad-except
# pylint: disable=broad-except
, import-outside-toplevel
try
:
key
=
_get_cache_key
(
source_func
,
target
)
return
_backend
.
_CompileEngineLower
(
self
,
key
)
...
...
python/tvm/relay/build_module.py
View file @
b528acc1
...
...
@@ -407,7 +407,6 @@ def create_executor(kind="debug",
return
_interpreter
.
Interpreter
(
mod
,
ctx
,
target
)
if
kind
==
"graph"
:
return
GraphExecutor
(
mod
,
ctx
,
target
)
el
if
kind
==
"vm"
:
if
kind
==
"vm"
:
return
VMExecutor
(
mod
,
ctx
,
target
)
else
:
raise
RuntimeError
(
"unknown execution strategy: {0}"
.
format
(
kind
))
python/tvm/relay/debug.py
View file @
b528acc1
...
...
@@ -20,7 +20,7 @@ from __future__ import absolute_import
from
..api
import
register_func
# pylint: disable=unused-argument
# pylint: disable=unused-argument
, import-outside-toplevel
def
_debugger_init
(
expr
,
stack
):
import
pdb
pdb
.
set_trace
()
...
...
python/tvm/relay/expr.py
View file @
b528acc1
...
...
@@ -125,7 +125,6 @@ class Expr(RelayNode):
def
__rsub__
(
self
,
other
):
if
isinstance
(
other
,
_Number
):
raise
TypeError
(
'convert "
%
s" with `const` first'
%
str
(
other
))
else
:
raise
TypeError
(
"type
%
s not supported"
%
str
(
type
(
other
)))
def
__mul__
(
self
,
other
):
...
...
@@ -150,7 +149,6 @@ class Expr(RelayNode):
def
__rdiv__
(
self
,
other
):
if
isinstance
(
other
,
_Number
):
raise
TypeError
(
'convert "
%
s" with `const` first'
%
str
(
other
))
else
:
raise
TypeError
(
"type
%
s not supported"
%
str
(
type
(
other
)))
def
__truediv__
(
self
,
other
):
...
...
python/tvm/relay/frontend/caffe2.py
View file @
b528acc1
...
...
@@ -401,6 +401,7 @@ class Caffe2NetDef(object):
params : dict
A dict of name: tvm.nd.array pairs, used as pretrained weights
"""
# pylint: disable=import-outside-toplevel
from
caffe2.python
import
workspace
workspace
.
RunNetOnce
(
init_net
)
...
...
python/tvm/relay/frontend/common.py
View file @
b528acc1
...
...
@@ -302,7 +302,7 @@ class ExprTable(object):
self
.
exprs
[
name
]
=
expr
def
has_expr
(
self
,
name
):
return
True
if
name
in
self
.
exprs
else
False
return
name
in
self
.
exprs
def
set_padding
(
self
,
paddings
):
self
.
paddings
=
paddings
...
...
@@ -391,7 +391,7 @@ class AttrCvt(object):
if
k
in
self
.
_excludes
:
raise
NotImplementedError
(
'Attribute
%
s in operator
%
s is not'
+
' supported.'
,
k
,
op_name
)
el
if
k
in
self
.
_disables
:
if
k
in
self
.
_disables
:
logging
.
warning
(
"Attribute
%
s is disabled in relay.sym.
%
s"
,
k
,
op_name
)
elif
k
in
self
.
_ignores
:
if
k
!=
'tvm_custom'
:
...
...
@@ -485,6 +485,7 @@ def infer_value(input_val, params):
portion of the relay graph. This is often needed for functions that
whose output shape depends on the value of a tensor.
"""
# pylint: disable=import-outside-toplevel
from
tvm.contrib
import
graph_runtime
# Check that all free variables have associated parameters.
assert
all
(
var
.
name_hint
in
params
.
keys
()
for
var
in
analysis
.
free_vars
(
...
...
python/tvm/relay/frontend/coreml.py
View file @
b528acc1
...
...
@@ -14,7 +14,8 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self, unused-argument, unused-variable, inconsistent-return-statements
# pylint: disable=invalid-name, import-self, unused-argument, unused-variable
# pylint: disable=inconsistent-return-statements, import-outside-toplevel
"""CoreML frontend."""
from
__future__
import
absolute_import
as
_abs
import
math
...
...
@@ -111,7 +112,6 @@ def _BatchnormLayerParams(op, inexpr, etab):
if
op
.
instanceNormalization
:
raise
tvm
.
error
.
OpNotImplemented
(
'Operator "instance normalization" is not supported in frontend CoreML.'
)
else
:
params
=
{
'gamma'
:
etab
.
new_const
(
list
(
op
.
gamma
.
floatValue
)),
'beta'
:
etab
.
new_const
(
list
(
op
.
beta
.
floatValue
)),
'moving_mean'
:
etab
.
new_const
(
list
(
op
.
mean
.
floatValue
)),
...
...
@@ -197,7 +197,6 @@ def _PoolingLayerParams(op, inexpr, etab):
raise
tvm
.
error
.
OpNotImplemented
(
'Only Max and Average Pooling are supported in frontend CoreML.'
)
else
:
params
=
{
'pool_size'
:
list
(
op
.
kernelSize
),
'strides'
:
list
(
op
.
stride
)}
...
...
@@ -297,8 +296,6 @@ def _PaddingLayerParams(op, inexpr, etab):
(
0
,
0
),
(
pad_t
,
pad_b
),
(
pad_l
,
pad_r
)))
else
:
raise
tvm
.
error
.
OpNotImplemented
(
'Non-constant padding is not supported in frontend CoreML.'
)
...
...
python/tvm/relay/frontend/keras.py
View file @
b528acc1
...
...
@@ -14,9 +14,8 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self
# pylint: disable=invalid-name, import-self
, import-outside-toplevel
"""Keras frontend."""
from
__future__
import
absolute_import
as
_abs
import
sys
import
numpy
as
np
import
tvm
...
...
@@ -133,7 +132,7 @@ def _convert_advanced_activation(inexpr, keras_layer, etab):
# f(x) = max_value, for x >= max_value
# f(x) = x, for threshold <= x < max_value
return
_op
.
clip
(
inexpr
,
a_min
=
0.
,
a_max
=
float
(
keras_layer
.
max_value
))
el
if
keras_layer
.
max_value
and
_op
.
greater
(
threshold
,
inexpr
)
.
astype
(
'float32'
):
if
keras_layer
.
max_value
and
_op
.
greater
(
threshold
,
inexpr
)
.
astype
(
'float32'
):
# f(x) = negative_slope * (inexpr - threshold)
negative_slope
=
_expr
.
const
(
keras_layer
.
negative_slope
,
dtype
=
'float32'
)
return
_op
.
multiply
(
negative_slope
,
_op
.
subtract
(
inexpr
,
threshold
))
...
...
python/tvm/relay/frontend/nnvm_common.py
View file @
b528acc1
...
...
@@ -16,15 +16,13 @@
# under the License.
# pylint: disable=invalid-name, import-self, len-as-condition
"""Utility functions common to NNVM and MxNet conversion."""
from
__future__
import
absolute_import
as
_abs
import
warnings
from
..
import
expr
as
_expr
from
..
import
op
as
_op
from
.common
import
get_relay_op
from
.common
import
infer_type
as
_infer_type
def
_warn_not_used
(
attr
,
op
=
'nnvm'
):
import
warnings
err
=
"{} is ignored in {}."
.
format
(
attr
,
op
)
warnings
.
warn
(
err
)
...
...
python/tvm/relay/frontend/onnx.py
View file @
b528acc1
...
...
@@ -15,6 +15,7 @@
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self, len-as-condition, unused-argument, too-many-lines
# pylint: disable=import-outside-toplevel
"""ONNX: Open Neural Network Exchange frontend for Relay."""
from
__future__
import
absolute_import
as
_abs
...
...
python/tvm/relay/frontend/tensorflow.py
View file @
b528acc1
...
...
@@ -16,6 +16,7 @@
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, unused-argument, too-many-lines, len-as-condition, broad-except
# pylint: disable=import-outside-toplevel
"""TF: Tensorflow frontend."""
from
__future__
import
absolute_import
as
_abs
from
__future__
import
print_function
...
...
python/tvm/relay/frontend/tensorflow_parser.py
View file @
b528acc1
...
...
@@ -15,8 +15,8 @@
# specific language governing permissions and limitations
# under the License.
"""TF: Tensorflow parser"""
from
__future__
import
absolute_import
as
_abs
from
__future__
import
print_function
# pylint: disable=import-outside-toplevel, assignment-from-no-return
import
os
from
tvm.contrib
import
util
...
...
python/tvm/relay/frontend/tflite.py
View file @
b528acc1
...
...
@@ -14,7 +14,8 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, too-many-lines
# pylint: disable=invalid-name, unused-argument, too-many-lines, import-outside-toplevel
"""Tensorflow lite frontend."""
from
__future__
import
absolute_import
as
_abs
import
math
...
...
@@ -1458,7 +1459,6 @@ class OperatorConverter(object):
raise
tvm
.
error
.
OpNotImplemented
(
'Operator {} with fused activation is not supported yet.'
.
format
(
'qnn.op.pool2d'
))
else
:
out
=
self
.
convert_fused_activation_function
(
out
,
fused_activation_fn
)
return
out
...
...
python/tvm/relay/op/__init__.py
View file @
b528acc1
...
...
@@ -46,6 +46,7 @@ from ..base import register_relay_node
def
_register_op_make
():
# pylint: disable=import-outside-toplevel
from
.
import
_make
from
..
import
expr
expr
.
_op_make
=
_make
...
...
python/tvm/relay/op/_transform.py
View file @
b528acc1
...
...
@@ -200,7 +200,6 @@ def take_shape_func(attrs, inputs, out_ndims):
"""
if
attrs
.
axis
is
None
:
return
[
_take_no_axis_shape_func
(
inputs
[
1
],
out_ndims
[
0
])]
else
:
axis
=
get_const_int
(
attrs
.
axis
)
data_ndim
=
int
(
inputs
[
0
]
.
shape
[
0
])
if
axis
<
0
:
...
...
@@ -275,13 +274,13 @@ def argwhere_shape_func(attrs, inputs, out_ndims):
"""
if
len
(
inputs
[
0
]
.
shape
)
==
1
:
return
[
_argwhere_shape_func_1d
(
inputs
[
0
])]
el
if
len
(
inputs
[
0
]
.
shape
)
==
2
:
if
len
(
inputs
[
0
]
.
shape
)
==
2
:
return
[
_argwhere_shape_func_2d
(
inputs
[
0
])]
el
if
len
(
inputs
[
0
]
.
shape
)
==
3
:
if
len
(
inputs
[
0
]
.
shape
)
==
3
:
return
[
_argwhere_shape_func_3d
(
inputs
[
0
])]
el
if
len
(
inputs
[
0
]
.
shape
)
==
4
:
if
len
(
inputs
[
0
]
.
shape
)
==
4
:
return
[
_argwhere_shape_func_4d
(
inputs
[
0
])]
el
if
len
(
inputs
[
0
]
.
shape
)
==
5
:
if
len
(
inputs
[
0
]
.
shape
)
==
5
:
return
[
_argwhere_shape_func_5d
(
inputs
[
0
])]
return
ValueError
(
"Does not support rank higher than 5 in argwhere"
)
...
...
python/tvm/relay/op/nn/_nn.py
View file @
b528acc1
...
...
@@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, invalid-name, unused-argument, too-many-arguments
# pylint: disable=no-else-return, invalid-name, unused-argument, too-many-arguments
, consider-using-in
"""Backend compiler related feature registration"""
from
__future__
import
absolute_import
...
...
@@ -265,6 +265,7 @@ def schedule_conv2d(attrs, outs, target):
@reg.register_alter_op_layout
(
"nn.conv2d"
)
def
alter_op_layout_conv2d
(
attrs
,
inputs
,
tinfos
):
"""Alternate the layout of conv2d"""
# pylint: disable=import-outside-toplevel
from
...
import
op
return
topi
.
nn
.
conv2d_alter_layout
(
attrs
,
inputs
,
tinfos
,
op
)
...
...
@@ -309,7 +310,7 @@ def convert_conv2d(attrs, inputs, tinfos, desired_layout):
result : tvm.relay.Expr
The transformed expr
"""
# pylint: disable=import-outside-toplevel
from
tvm
import
relay
data_layout
=
attrs
[
'data_layout'
]
kernel_layout
=
attrs
[
'kernel_layout'
]
...
...
python/tvm/relay/op/transform.py
View file @
b528acc1
...
...
@@ -14,6 +14,8 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-outside-toplevel
"""Transform operators."""
from
.
import
_make
...
...
python/tvm/relay/parser.py
View file @
b528acc1
...
...
@@ -22,6 +22,7 @@ from .. import register_func
@register_func
(
"relay.fromtext"
)
def
fromtext
(
data
,
source_name
=
None
):
"""Parse a Relay program."""
# pylint: disable=import-outside-toplevel
from
tvm.relay
import
_parser
x
=
_parser
.
fromtext
(
data
+
"
\n
"
,
source_name
)
if
x
is
None
:
...
...
python/tvm/relay/qnn/op/op.py
View file @
b528acc1
...
...
@@ -16,7 +16,7 @@
# under the License.
#pylint: disable=unused-argument
"""The register functions for the QNN dialect."""
from
tvm.relay.op.op
import
register
as
register
from
tvm.relay.op.op
import
register
def
register_qnn_legalize
(
op_name
,
legal_op
=
None
,
level
=
10
):
"""Register legal transformation function for a QNN op
...
...
python/tvm/relay/quantize/_partition.py
View file @
b528acc1
...
...
@@ -88,7 +88,7 @@ def add_partition_generic(ref_call, new_args, ctx):
lhs
=
new_args
[
0
]
.
realize
()
rhs
=
new_args
[
1
]
.
realize
()
return
_forward_op
(
ref_call
,
[
lhs
,
rhs
])
el
if
not
lhs_cond
and
rhs_cond
:
if
not
lhs_cond
and
rhs_cond
:
# - introduced by residual connection in ResNet
# ...
# %13 = nn.conv2d(%12, %meta[relay.Constant])
...
...
@@ -104,7 +104,7 @@ def add_partition_generic(ref_call, new_args, ctx):
# ...
rhs
=
new_args
[
1
]
.
realize
()
return
_forward_op
(
ref_call
,
[
lhs
,
rhs
])
el
if
lhs_cond
and
not
rhs_cond
:
if
lhs_cond
and
not
rhs_cond
:
if
_analysis
.
check_constant
(
rhs
):
# - introduced by batch_norm: add(out, bias)
return
QPartitionExpr
(
_forward_op
(
ref_call
,
[
lhs
,
rhs
]))
...
...
@@ -121,10 +121,10 @@ def add_partition_generic(ref_call, new_args, ctx):
# ...
lhs
=
new_args
[
0
]
.
realize
()
return
_forward_op
(
ref_call
,
[
lhs
,
rhs
])
el
if
not
lhs_cond
and
not
rhs_cond
:
if
not
lhs_cond
and
not
rhs_cond
:
# trivial case
return
None
else
:
raise
ValueError
...
...
python/tvm/relay/quantize/quantize.py
View file @
b528acc1
...
...
@@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#pylint: disable=unused-argument
#pylint: disable=unused-argument
, not-context-manager
"""Automatic quantization toolkit."""
from
__future__
import
absolute_import
from
.
import
_quantize
...
...
python/tvm/relay/scope_builder.py
View file @
b528acc1
...
...
@@ -41,7 +41,6 @@ class WithScope(object):
def
__exit__
(
self
,
ptype
,
value
,
trace
):
if
value
:
raise
value
else
:
self
.
_exit_cb
()
def
_make_lets
(
bindings
,
ret_value
):
...
...
python/tvm/relay/testing/darknet.py
View file @
b528acc1
...
...
@@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument, no-init
# pylint: disable=invalid-name, unused-variable, unused-argument, no-init
, unpacking-non-sequence
"""
Compile DarkNet Models
====================
...
...
python/tvm/relay/testing/resnet.py
View file @
b528acc1
...
...
@@ -85,7 +85,7 @@ def residual_unit(data,
data
=
act1
,
channels
=
num_filter
,
kernel_size
=
(
1
,
1
),
strides
=
stride
,
name
=
name
+
'_sc'
)
return
relay
.
add
(
conv3
,
shortcut
)
else
:
bn1
=
layers
.
batch_norm_infer
(
data
=
data
,
epsilon
=
2e-5
,
name
=
name
+
'_bn1'
)
act1
=
relay
.
nn
.
relu
(
data
=
bn1
)
conv1
=
layers
.
conv2d
(
...
...
@@ -96,6 +96,7 @@ def residual_unit(data,
conv2
=
layers
.
conv2d
(
data
=
act2
,
channels
=
num_filter
,
kernel_size
=
(
3
,
3
),
strides
=
(
1
,
1
),
padding
=
(
1
,
1
),
name
=
name
+
'_conv2'
)
if
dim_match
:
shortcut
=
data
else
:
...
...
python/tvm/relay/testing/tf.py
View file @
b528acc1
...
...
@@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument, no-init
# pylint: disable=invalid-name, unused-variable, unused-argument, no-init
, import-outside-toplevel
"""
Tensorflow Model Helpers
========================
...
...
@@ -346,7 +346,7 @@ def get_workload_ptb():
sample_data_file
=
'simple-examples.tgz'
sample_url
=
sample_repo
+
sample_data_file
ptb_model_file
=
'RNN/ptb/ptb_model_with_lstmblockcell.pb'
# pylint: disable=import-outside-toplevel
import
tarfile
file_path
=
download_testdata
(
sample_url
,
sample_data_file
,
module
=
[
'data'
,
'ptb_data'
])
dir_path
=
os
.
path
.
dirname
(
file_path
)
...
...
python/tvm/relay/testing/yolo_detection.py
View file @
b528acc1
...
...
@@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument, no-init
# pylint: disable=invalid-name, unused-variable, unused-argument, no-init
,
"""
Yolo detection boxes helper functions
====================
...
...
@@ -224,6 +224,7 @@ def _draw_label(im, r, c, label, rgb):
_set_pixel
(
im
,
i
+
c
,
j
+
r
,
k
,
val
)
#rgb[k] * val)
def
_get_label
(
font_path
,
labelstr
,
rgb
):
# pylint: disable=import-outside-toplevel
from
PIL
import
Image
from
PIL
import
ImageDraw
from
PIL
import
ImageFont
...
...
python/tvm/rpc/proxy.py
View file @
b528acc1
...
...
@@ -508,7 +508,6 @@ class Proxy(object):
except
socket
.
error
as
sock_err
:
if
sock_err
.
errno
in
[
98
,
48
]:
continue
else
:
raise
sock_err
if
not
self
.
port
:
raise
ValueError
(
"cannot bind to any port in [
%
d,
%
d)"
%
(
port
,
port_end
))
...
...
@@ -569,7 +568,7 @@ def websocket_proxy_server(url, key=""):
magic
=
struct
.
unpack
(
'<i'
,
msg
[:
4
])[
0
]
if
magic
==
base
.
RPC_CODE_DUPLICATE
:
raise
RuntimeError
(
"key:
%
s has already been used in proxy"
%
key
)
el
if
magic
==
base
.
RPC_CODE_MISMATCH
:
if
magic
==
base
.
RPC_CODE_MISMATCH
:
logging
.
info
(
"RPCProxy do not have matching client key
%
s"
,
key
)
elif
magic
!=
base
.
RPC_CODE_SUCCESS
:
raise
RuntimeError
(
"
%
s is not RPC Proxy"
%
url
)
...
...
python/tvm/rpc/server.py
View file @
b528acc1
...
...
@@ -161,7 +161,6 @@ def _listen_loop(sock, port, rpc_key, tracker_addr, load_library, custom_addr):
conn
.
close
()
logger
.
warning
(
"mismatch key from
%
s"
,
addr
)
continue
else
:
conn
.
sendall
(
struct
.
pack
(
"<i"
,
base
.
RPC_CODE_SUCCESS
))
conn
.
sendall
(
struct
.
pack
(
"<i"
,
len
(
server_key
)))
conn
.
sendall
(
server_key
.
encode
(
"utf-8"
))
...
...
@@ -208,6 +207,7 @@ def _listen_loop(sock, port, rpc_key, tracker_addr, load_library, custom_addr):
server_proc
.
join
(
opts
.
get
(
"timeout"
,
None
))
if
server_proc
.
is_alive
():
logger
.
info
(
"Timeout in RPC session, kill.."
)
# pylint: disable=import-outside-toplevel
import
psutil
parent
=
psutil
.
Process
(
server_proc
.
pid
)
# terminate worker childs
...
...
@@ -233,7 +233,8 @@ def _connect_proxy_loop(addr, key, load_library):
magic
=
struct
.
unpack
(
"<i"
,
base
.
recvall
(
sock
,
4
))[
0
]
if
magic
==
base
.
RPC_CODE_DUPLICATE
:
raise
RuntimeError
(
"key:
%
s has already been used in proxy"
%
key
)
elif
magic
==
base
.
RPC_CODE_MISMATCH
:
if
magic
==
base
.
RPC_CODE_MISMATCH
:
logger
.
warning
(
"RPCProxy do not have matching client key
%
s"
,
key
)
elif
magic
!=
base
.
RPC_CODE_SUCCESS
:
raise
RuntimeError
(
"
%
s is not RPC Proxy"
%
str
(
addr
))
...
...
@@ -380,7 +381,6 @@ class Server(object):
except
socket
.
error
as
sock_err
:
if
sock_err
.
errno
in
[
98
,
48
]:
continue
else
:
raise
sock_err
if
not
self
.
port
:
raise
ValueError
(
"cannot bind to any port in [
%
d,
%
d)"
%
(
port
,
port_end
))
...
...
python/tvm/rpc/tornado_util.py
View file @
b528acc1
...
...
@@ -92,8 +92,8 @@ class TCPHandler(object):
except
socket
.
error
as
err
:
if
err
.
args
[
0
]
in
(
errno
.
EAGAIN
,
errno
.
EWOULDBLOCK
):
break
else
:
self
.
on_error
(
err
)
if
self
.
_pending_write
:
self
.
_ioloop
.
update_handler
(
self
.
_sock
.
fileno
(),
self
.
_ioloop
.
READ
|
self
.
_ioloop
.
ERROR
|
self
.
_ioloop
.
WRITE
)
...
...
python/tvm/rpc/tracker.py
View file @
b528acc1
...
...
@@ -393,7 +393,6 @@ class Tracker(object):
except
socket
.
error
as
sock_err
:
if
sock_err
.
errno
in
[
98
,
48
]:
continue
else
:
raise
sock_err
if
not
self
.
port
:
raise
ValueError
(
"cannot bind to any port in [
%
d,
%
d)"
%
(
port
,
port_end
))
...
...
python/tvm/runtime/module.py
View file @
b528acc1
...
...
@@ -15,7 +15,7 @@
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-import
# pylint: disable=invalid-name, unused-import
, import-outside-toplevel
"""Runtime Module namespace."""
import
ctypes
import
struct
...
...
python/tvm/runtime/ndarray.py
View file @
b528acc1
...
...
@@ -184,7 +184,7 @@ class NDArray(NDArrayBase):
"""
if
isinstance
(
target
,
NDArrayBase
):
return
self
.
_copyto
(
target
)
el
if
isinstance
(
target
,
TVMContext
):
if
isinstance
(
target
,
TVMContext
):
res
=
empty
(
self
.
shape
,
self
.
dtype
,
target
)
return
self
.
_copyto
(
res
)
raise
ValueError
(
"Unsupported target type
%
s"
%
str
(
type
(
target
)))
...
...
python/tvm/tensor.py
View file @
b528acc1
...
...
@@ -179,7 +179,6 @@ class BaseComputeOp(Operation):
@tvm._ffi.register_object
class
ComputeOp
(
BaseComputeOp
):
"""Scalar operation."""
pass
@tvm._ffi.register_object
...
...
python/tvm/tensor_intrin.py
View file @
b528acc1
...
...
@@ -112,7 +112,7 @@ def decl_tensor_intrin(op,
raise
TypeError
(
"expect Operation"
)
inputs
=
op
.
input_tensors
binds
=
binds
if
binds
else
{}
tensors
=
[
x
for
x
in
inputs
]
tensors
=
list
(
inputs
)
for
i
in
range
(
op
.
num_outputs
):
tensors
.
append
(
op
.
output
(
i
))
...
...
topi/python/topi/arm_cpu/conv2d.py
View file @
b528acc1
...
...
@@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, no-else-return, unused-argument
# pylint: disable=invalid-name, unused-variable, no-else-return, unused-argument
, import-outside-toplevel
"""Conv2D schedule for ARM CPU"""
from
__future__
import
absolute_import
as
_abs
...
...
@@ -528,8 +528,7 @@ def _alter_conv2d_layout_arm(attrs, inputs, tinfos, F):
Unlike other TOPI functions, this function operates on both graph level and operator level,
so we have to pass 'F' to make it support our two versions of graph IR, Relay.
"""
copy_inputs
=
[
s
for
s
in
inputs
]
copy_inputs
=
list
(
inputs
)
new_attrs
=
{
k
:
attrs
[
k
]
for
k
in
attrs
.
keys
()}
if
F
.
__name__
==
'tvm.relay.op'
:
...
...
topi/python/topi/bifrost/conv2d.py
View file @
b528acc1
...
...
@@ -74,7 +74,6 @@ def conv2d_bifrost(cfg, data, kernel, strides, padding, dilation, layout, out_dt
if
layout
==
'NCHW'
:
return
conv2d_spatial_pack_nchw
(
cfg
,
data
,
kernel
,
strides
,
padding
,
dilation
,
out_dtype
,
num_tile
=
3
)
else
:
raise
ValueError
(
"Unsupported layout {}"
.
format
(
layout
))
...
...
topi/python/topi/cuda/conv2d_winograd.py
View file @
b528acc1
...
...
@@ -328,7 +328,7 @@ def _alter_conv2d_layout(attrs, inputs, tinfos, F):
if
'cudnn'
in
tvm
.
target
.
current_target
()
.
libs
or
'miopen'
in
tvm
.
target
.
current_target
()
.
libs
:
return
None
copy_inputs
=
[
s
for
s
in
inputs
]
copy_inputs
=
list
(
inputs
)
new_attrs
=
{
k
:
attrs
[
k
]
for
k
in
attrs
.
keys
()}
...
...
topi/python/topi/cuda/nms.py
View file @
b528acc1
...
...
@@ -14,7 +14,8 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-member, too-many-locals, too-many-arguments, too-many-statements, singleton-comparison, unused-argument
# pylint: disable=invalid-name, no-member, too-many-locals, too-many-arguments, too-many-statements, singleton-comparison
# pylint: disable=bad-continuation, unused-argument
"""Non-maximum suppression operator"""
import
math
import
tvm
...
...
topi/python/topi/cuda/rcnn/proposal.py
View file @
b528acc1
...
...
@@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, singleton-comparison
# pylint: disable=invalid-name, singleton-comparison
, bad-continuation
"""Proposal operator"""
import
math
import
tvm
...
...
topi/python/topi/cuda/softmax.py
View file @
b528acc1
...
...
@@ -54,7 +54,7 @@ def schedule_softmax(outs):
if
len
(
softmax
.
shape
)
>
2
:
ops
=
[
max_elem
.
op
,
expsum
.
op
,
softmax
.
op
]
if
exp
!=
None
:
if
exp
is
not
None
:
ops
.
append
(
exp
.
op
)
for
op
in
ops
:
...
...
@@ -64,7 +64,7 @@ def schedule_softmax(outs):
block_x
=
tvm
.
thread_axis
(
"blockIdx.x"
)
thread_x
=
tvm
.
thread_axis
((
0
,
num_thread
),
"threadIdx.x"
)
if
exp
!=
None
:
if
exp
is
not
None
:
s
[
exp
]
.
bind
(
exp
.
op
.
axis
[
0
],
block_x
)
s
[
max_elem
]
.
bind
(
max_elem
.
op
.
axis
[
0
],
block_x
)
...
...
topi/python/topi/cuda/sort.py
View file @
b528acc1
...
...
@@ -42,6 +42,7 @@ def _schedule_sort(outs):
outs
=
[
outs
]
if
isinstance
(
outs
,
tvm
.
tensor
.
Tensor
)
else
outs
s
=
tvm
.
create_schedule
([
x
.
op
for
x
in
outs
])
scheduled_ops
=
[]
# pylint: disable=import-outside-toplevel
from
.injective
import
schedule_injective_from_existing
def
traverse
(
op
):
if
tag
.
is_injective
(
op
.
tag
):
...
...
topi/python/topi/cuda/vision.py
View file @
b528acc1
...
...
@@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument, no-member
# pylint: disable=invalid-name, unused-variable, unused-argument, no-member
, import-outside-toplevel
"""Schedule for vision operators"""
from
__future__
import
absolute_import
as
_abs
import
tvm
...
...
topi/python/topi/hls/nn.py
View file @
b528acc1
...
...
@@ -275,7 +275,7 @@ def schedule_softmax(outs):
raise
ValueError
(
'Tag is expected to be softmax_output or log_softmax_output.
\
Got {0}'
.
format
(
op_tag
))
if
exp
!=
None
:
if
exp
is
not
None
:
s
[
exp
]
.
compute_at
(
s
[
softmax
],
s
[
softmax
]
.
op
.
axis
[
1
])
s
[
expsum
]
.
compute_at
(
s
[
softmax
],
s
[
softmax
]
.
op
.
axis
[
1
])
...
...
topi/python/topi/intel_graphics/conv2d.py
View file @
b528acc1
...
...
@@ -38,7 +38,7 @@ from ..util import simplify, get_const_tuple
def
_get_default_config
(
cfg
,
data
,
kernel
,
strides
,
padding
,
out_dtype
,
is_depthwise
=
False
):
if
is_depthwise
:
raise
RuntimeError
(
"Depthwise not supported for intel graphics."
)
else
:
batch_size
,
in_channel
,
height
,
width
=
get_const_tuple
(
data
.
shape
)
out_channel
,
_
,
hkernel
,
_
=
get_const_tuple
(
kernel
.
shape
)
HSTR
,
_
=
strides
...
...
@@ -189,7 +189,7 @@ def __topi_nn_conv2d_NCHWc(*args, **kwargs):
@conv2d_alter_layout.register
([
"intel_graphics"
])
def
_alter_conv2d_layout
(
attrs
,
inputs
,
tinfo
,
F
):
copy_inputs
=
[
s
for
s
in
inputs
]
copy_inputs
=
list
(
inputs
)
new_attrs
=
{
k
:
attrs
[
k
]
for
k
in
attrs
.
keys
()}
if
F
.
__name__
==
'tvm.relay.op'
:
...
...
topi/python/topi/nn/bitserial_util.py
View file @
b528acc1
...
...
@@ -60,7 +60,7 @@ def bitpack(data, bits, pack_axis, bit_axis, pack_type, name="QuantizeInput"):
for
i
in
range
(
n
+
1
):
if
i
==
bit_axis
:
continue
el
if
i
==
pack_axis
:
if
i
==
pack_axis
:
idx
[
j
]
=
indices
[
i
]
*
data_width
+
k
else
:
idx
[
j
]
=
indices
[
i
]
...
...
@@ -88,4 +88,3 @@ def binary_op_multiplier(pack_dtype):
pack_dtype: string
pack type for the operator (must be a uint)"""
return
int
(
pack_dtype
[
4
:])
\ No newline at end of file
topi/python/topi/nn/conv2d.py
View file @
b528acc1
...
...
@@ -66,9 +66,9 @@ def conv2d(input, filter, strides, padding, dilation, layout='NCHW', out_dtype=N
# default declaration
if
layout
==
'NCHW'
:
return
conv2d_nchw
(
input
,
filter
,
strides
,
padding
,
dilation
,
out_dtype
)
el
if
layout
==
'HWCN'
:
if
layout
==
'HWCN'
:
return
conv2d_hwcn
(
input
,
filter
,
strides
,
padding
,
dilation
,
out_dtype
)
el
if
layout
==
'NHWC'
:
if
layout
==
'NHWC'
:
return
conv2d_nhwc
(
input
,
filter
,
strides
,
padding
,
dilation
,
out_dtype
)
raise
ValueError
(
"not support this layout {} yet"
.
format
(
layout
))
...
...
@@ -764,6 +764,7 @@ def conv2d_winograd_nnpack_weight_transform(kernel, convolution_algorithm, out_d
output : tvm.Tensor
4-D with shape [alpha, alpha, CO, CI]
"""
# pylint: disable=import-outside-toplevel
from
tvm.contrib
import
nnpack
return
nnpack
.
convolution_inference_weight_transform
(
kernel
,
algorithm
=
convolution_algorithm
,
dtype
=
out_dtype
)
...
...
topi/python/topi/nn/fifo_buffer.py
View file @
b528acc1
...
...
@@ -76,7 +76,7 @@ def fifo_buffer(data, buffer, axis):
buffer
[
i
+
data_size
],
data
[
i
-
buflen
+
data_size
]),
name
=
'new_buffer'
)
el
if
len
(
buffer
.
shape
)
==
2
:
if
len
(
buffer
.
shape
)
==
2
:
if
axis
==
0
:
return
tvm
.
compute
(
buffer
.
shape
,
lambda
i
,
j
:
...
...
topi/python/topi/opengl/softmax.py
View file @
b528acc1
...
...
@@ -51,7 +51,7 @@ def schedule_softmax(outs):
raise
ValueError
(
'Tag is expected to be softmax_output or log_softmax_output.
\
Got {0}'
.
format
(
op_tag
))
if
exp
!=
None
:
if
exp
is
not
None
:
s
[
exp
]
.
opengl
()
s
[
max_elem
]
.
opengl
()
...
...
topi/python/topi/testing/one_hot.py
View file @
b528acc1
...
...
@@ -62,7 +62,7 @@ def one_hot(indices, on_value, off_value, depth, axis, dtype):
indices_index
+=
1
out
=
np
.
empty
(
oshape
)
output_indices
=
[
index
for
index
in
np
.
ndindex
(
out
.
shape
)]
output_indices
=
list
(
np
.
ndindex
(
out
.
shape
))
for
output_index
in
output_indices
:
indices_indices
=
[]
for
i
,
out_idx
in
enumerate
(
output_index
):
...
...
topi/python/topi/transform.py
View file @
b528acc1
...
...
@@ -238,13 +238,10 @@ def strided_set(a, v, begin, end, strides=None):
from_val
=
[]
index_tuple
=
[]
for
i
in
range
(
n
):
from_val
.
append
(
within_index
(
begin
[
i
],
end
[
i
],
strides
[
i
],
indices
[
i
]))
from_val
.
append
(
within_index
(
begin
[
i
],
end
[
i
],
strides
[
i
],
indices
[
i
]))
index_tuple
.
append
(
make_idx
(
begin
[
i
],
end
[
i
],
strides
[
i
],
a
.
shape
[
i
],
indices
[
i
]))
return
tvm
.
if_then_else
(
tvm
.
all
(
*
from_val
),
v
(
*
index_tuple
),
a
(
*
indices
))
return
tvm
.
if_then_else
(
tvm
.
all
(
*
from_val
),
v
(
*
index_tuple
),
a
(
*
indices
))
return
tvm
.
compute
(
a
.
shape
,
_select
,
name
=
"strided_set"
)
...
...
@@ -568,7 +565,7 @@ def sequence_mask(data, valid_length, mask_value=0, axis=0):
assert
len
(
data
.
shape
)
>=
2
,
\
"only support data.ndim >= 2, received data.shape = {}"
.
format
(
data
.
shape
)
assert
axis
==
0
or
axis
==
1
,
"only support axis = 0, 1, received axis = {}"
.
format
(
axis
)
assert
axis
in
(
0
,
1
)
,
"only support axis = 0, 1, received axis = {}"
.
format
(
axis
)
return
cpp
.
sequence_mask
(
data
,
valid_length
,
mask_value
,
axis
)
...
...
topi/python/topi/util.py
View file @
b528acc1
...
...
@@ -25,7 +25,6 @@ from . import tag, cpp
class
InvalidShapeError
(
ValueError
):
"""Invalid shape for a topi function. i.e. call winograd template for non-3x3 kernel)"""
pass
def
nchw_pack_layout
(
layout_info
):
"""Check whether the layout type is NCHWinic"""
...
...
@@ -350,7 +349,7 @@ def get_shape(src_shape, src_layout, dst_layout):
layout_mapping
=
bijective_layout
(
src_layout
,
dst_layout
)
dst_indices
=
layout_mapping
.
forward_index
(
tvm
.
convert
(
[
i
for
i
in
range
(
len
(
src_layout
))]
))
tvm
.
convert
(
list
(
range
(
len
(
src_layout
)))
))
return
get_const_tuple
(
tuple
([
src_shape
[
i
.
value
]
for
i
in
dst_indices
]))
...
...
topi/python/topi/vision/rcnn/proposal.py
View file @
b528acc1
...
...
@@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, singleton-comparison
# pylint: disable=invalid-name, singleton-comparison
, bad-continuation
"""Proposal operator"""
import
math
import
tvm
...
...
topi/python/topi/x86/conv2d.py
View file @
b528acc1
...
...
@@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
,import-outside-toplevel
"""Conv2D schedule on x86"""
import
logging
...
...
@@ -126,7 +126,7 @@ def _declaration_conv(cfg, data, kernel, strides, padding, dilation, layout, out
# # specialize for INT8 1X1 conv on X86
# return conv2d_avx_1x1._declaration_conv_nhwc_pack(cfg, data, kernel, strides,
# padding, dilation, out_dtype)
el
if
layout
==
'NHWC'
:
if
layout
==
'NHWC'
:
return
nn
.
conv2d_nhwc
(
data
,
kernel
,
strides
,
padding
,
dilation
,
out_dtype
)
raise
ValueError
(
"not support this layout {} yet"
.
format
(
layout
))
...
...
topi/python/topi/x86/conv2d_alter_op.py
View file @
b528acc1
...
...
@@ -63,7 +63,7 @@ def _alter_conv2d_layout(attrs, inputs, tinfo, F):
is_depthwise
=
groups
==
kshape
[
0
]
and
kshape
[
1
]
==
1
# Save the input exprs.
copy_inputs
=
[
s
for
s
in
inputs
]
copy_inputs
=
list
(
inputs
)
# Set the new attrs
new_attrs
=
{
k
:
attrs
[
k
]
for
k
in
attrs
.
keys
()}
...
...
topi/python/topi/x86/conv2d_int8.py
View file @
b528acc1
...
...
@@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
, import-outside-toplevel
"""Conv2D int8 schedule on x86"""
import
re
...
...
@@ -70,7 +70,7 @@ def _is_int8_hw_support(data_dtype, kernel_dtype):
# 3) Check target
mcpu
=
tvm
.
target
.
current_target
()
.
mcpu
is_target_support
=
False
if
mcpu
==
'skylake-avx512'
or
mcpu
==
'cascadelake'
:
if
mcpu
in
(
'skylake-avx512'
,
'cascadelake'
)
:
is_target_support
=
True
return
is_dtype_support
and
is_llvm_support
and
is_target_support
...
...
topi/python/topi/x86/nn.py
View file @
b528acc1
...
...
@@ -63,7 +63,7 @@ def schedule_softmax(outs):
s
[
max_elem
]
.
compute_at
(
s
[
softmax
],
fused_outer_axes
)
s
[
expsum
]
.
compute_at
(
s
[
softmax
],
fused_outer_axes
)
if
exp
!=
None
:
if
exp
is
not
None
:
s
[
exp
]
.
compute_at
(
s
[
softmax
],
fused_outer_axes
)
return
s
topi/python/topi/x86/util.py
View file @
b528acc1
...
...
@@ -21,6 +21,6 @@ import tvm
def
get_fp32_len
():
mcpu
=
tvm
.
target
.
current_target
()
.
mcpu
fp32_vec_len
=
8
if
mcpu
==
'skylake-avx512'
or
mcpu
==
'cascadelake'
:
if
mcpu
in
(
'skylake-avx512'
,
'cascadelake'
)
:
fp32_vec_len
=
16
return
fp32_vec_len
vta/python/vta/bitstream.py
View file @
b528acc1
...
...
@@ -79,7 +79,6 @@ compilation guide to get Xilinx toolchains setup) and add it to your \
$VTA_CACHE_PATH. Alternatively edit your config.json back to its default
\
settings. You can see the list of available bitstreams under {}"
.
format
(
url
,
BITSTREAM_URL
))
else
:
raise
RuntimeError
(
# This could happen when trying to access the URL behind a proxy
"Something went wrong when trying to access {}. Check your
\
...
...
vta/python/vta/environment.py
View file @
b528acc1
...
...
@@ -231,9 +231,9 @@ class Environment(object):
"""The target host"""
if
self
.
TARGET
in
[
"pynq"
,
"de10nano"
]:
return
"llvm -target=armv7-none-linux-gnueabihf"
el
if
self
.
TARGET
==
"ultra96"
:
if
self
.
TARGET
==
"ultra96"
:
return
"llvm -target=aarch64-linux-gnu"
el
if
self
.
TARGET
in
[
"sim"
,
"tsim"
]:
if
self
.
TARGET
in
[
"sim"
,
"tsim"
]:
return
"llvm"
raise
ValueError
(
"Unknown target
%
s"
%
self
.
TARGET
)
...
...
vta/python/vta/exec/rpc_server.py
View file @
b528acc1
...
...
@@ -66,6 +66,7 @@ def server_start():
@tvm.register_func
(
"tvm.contrib.vta.init"
,
override
=
True
)
def
program_fpga
(
file_name
):
# pylint: disable=import-outside-toplevel
from
pynq
import
xlnk
# Reset xilinx driver
xlnk
.
Xlnk
()
.
xlnk_reset
()
...
...
vta/python/vta/ir_pass.py
View file @
b528acc1
...
...
@@ -15,9 +15,7 @@
# specific language governing permissions and limitations
# under the License.
"""Additional IR Pass for VTA"""
# pylint: disable=len-as-condition
from
__future__
import
absolute_import
as
_abs
# pylint: disable=len-as-condition, no-else-return
import
tvm
from
topi
import
util
...
...
vta/python/vta/program_bitstream.py
View file @
b528acc1
...
...
@@ -43,6 +43,7 @@ def main():
bitstream_program
(
args
.
target
,
args
.
bitstream
)
def
pynq_bitstream_program
(
bitstream_path
):
# pylint: disable=import-outside-toplevel
from
pynq
import
Bitstream
bitstream
=
Bitstream
(
bitstream_path
)
bitstream
.
download
()
...
...
vta/python/vta/top/graphpack.py
View file @
b528acc1
...
...
@@ -151,14 +151,12 @@ class ExprPack(ExprMutator):
assert
not
self
.
start_pack
self
.
start_pack
=
True
return
_pack_batch_channel
(
args
[
0
],
oshape
,
self
.
bfactor
,
self
.
cfactor
)
el
if
call
.
op
==
self
.
bitpack_end
:
if
call
.
op
==
self
.
bitpack_end
:
if
self
.
start_pack
:
self
.
start_pack
=
False
data
=
args
[
0
]
data_shape
=
_get_shape
(
call
.
args
[
0
])
return
_unpack_batch_channel
(
data
,
data_shape
)
else
:
pass
if
self
.
start_pack
:
# Operator cases
if
call
.
op
==
self
.
conv2d
and
odtype
==
'int32'
:
...
...
@@ -188,7 +186,8 @@ class ExprPack(ExprMutator):
kernel_layout
=
kernel_layout
,
out_dtype
=
call
.
attrs
.
out_dtype
)
return
conv2d
elif
call
.
op
==
self
.
conv2d_transpose
and
odtype
==
'int32'
:
if
call
.
op
==
self
.
conv2d_transpose
and
odtype
==
'int32'
:
self
.
number_of_conv2d
+=
1
assert
8
%
self
.
weight_bits
==
0
w_lanes
=
8
//
self
.
weight_bits
...
...
@@ -213,7 +212,7 @@ class ExprPack(ExprMutator):
output_padding
=
call
.
attrs
.
output_padding
,
out_dtype
=
call
.
attrs
.
out_dtype
)
return
conv2d
el
if
call
.
op
==
self
.
add
and
\
if
call
.
op
==
self
.
add
and
\
tuple
(
input_types
[
0
]
.
shape
)
==
tuple
(
input_types
[
1
]
.
shape
):
pass
elif
call
.
op
==
self
.
add
and
len
(
input_types
[
1
]
.
shape
)
==
3
:
...
...
@@ -272,7 +271,7 @@ def get_subgraph(expr, start_name, stop_name, start_name_idx, stop_name_idx, cou
_recursion
(
anf
.
body
,
start_found
,
stop_found
,
operator_current_idx
),
anf
.
ret_type
,
anf
.
type_params
,
anf
.
attrs
)
el
if
isinstance
(
anf
,
relay
.
expr
.
Let
):
if
isinstance
(
anf
,
relay
.
expr
.
Let
):
value
=
anf
.
value
if
isinstance
(
value
,
relay
.
expr
.
Call
):
if
isinstance
(
value
.
op
,
relay
.
op
.
Op
):
...
...
vta/python/vta/top/op.py
View file @
b528acc1
...
...
@@ -127,7 +127,6 @@ def compute_conv2d_transpose(attrs, inputs, output_type, target):
if
is_packed_layout
(
layout
):
return
[
topi
.
nn
.
conv2d_transpose_nchw
(
inputs
[
0
],
inputs
[
1
],
strides
,
padding
,
out_dtype
)]
else
:
# If it's not packed, run on ARM CPU
with
tvm
.
target
.
arm_cpu
(
tvm
.
target
.
current_target
()
.
model
):
return
_nn
.
compute_conv2d_transpose
(
attrs
,
inputs
,
output_type
,
target
)
...
...
@@ -145,7 +144,6 @@ def schedule_conv2d_transpose(attrs, outputs, target):
if
target
.
device_name
==
"vta"
:
if
is_packed_layout
(
layout
):
return
topi
.
nn
.
schedule_conv2d_transpose_nchw
(
outputs
)
else
:
# If it's not packed, run on ARM CPU
with
tvm
.
target
.
arm_cpu
(
tvm
.
target
.
current_target
()
.
model
):
return
_nn
.
schedule_conv2d_transpose
(
attrs
,
outputs
,
tvm
.
target
.
current_target
())
...
...
vta/scripts/tune_conv2d.py
View file @
b528acc1
...
...
@@ -23,7 +23,6 @@ import os
import
tvm
from
tvm
import
autotvm
from
tvm.contrib.util
import
get_lower_ir
import
topi
import
vta
import
vta.testing
...
...
vta/scripts/tune_conv2d_transpose.py
View file @
b528acc1
...
...
@@ -23,7 +23,6 @@ import os
import
tvm
from
tvm
import
autotvm
from
tvm.contrib.util
import
get_lower_ir
import
topi
import
vta
import
vta.testing
...
...
vta/scripts/tune_dense.py
View file @
b528acc1
...
...
@@ -23,7 +23,6 @@ import os
import
tvm
from
tvm
import
autotvm
from
tvm.contrib.util
import
get_lower_ir
import
topi
import
vta
import
vta.testing
...
...
vta/scripts/tune_group_conv2d.py
View file @
b528acc1
...
...
@@ -23,7 +23,6 @@ import os
import
tvm
from
tvm
import
autotvm
from
tvm.contrib.util
import
get_lower_ir
import
topi
import
vta
import
vta.testing
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment