Commit b528acc1 by Tianqi Chen Committed by GitHub

[LINT][PY] Fixes for pylint==2.4.4 (#4849)

parent b46c2548
......@@ -94,10 +94,7 @@ javadoc:
# Cython build
cython:
cd python; python setup.py build_ext --inplace
cython2:
cd python; python2 setup.py build_ext --inplace
cd python; python3 setup.py build_ext --inplace
cython3:
cd python; python3 setup.py build_ext --inplace
......
......@@ -15,7 +15,7 @@
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=invalid-name
# pylint: disable=invalid-name, import-outside-toplevel
"""Base library for TVM FFI."""
import sys
import os
......@@ -204,7 +204,7 @@ def _find_error_type(line):
if _valid_error_name(err_name):
return err_name
return None
else:
end_pos = line.find(":")
if end_pos == -1:
return None
......
......@@ -104,6 +104,7 @@ class RedisDatabase(Database):
MAGIC_SPLIT = "$"
def __init__(self, db_index=REDIS_PROD):
# pylint: disable=import-outside-toplevel
import redis
if db_index == RedisDatabase.REDIS_TEST:
......
......@@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
# pylint: disable=invalid-name,
"""Extract feature of iter vars
There are two types of feature
......@@ -148,6 +148,7 @@ def get_flatten_name(fea):
}
if isinstance(fea, str):
# pylint: disable=import-outside-toplevel
from .record import decode
# flatten line to feature
line = fea
......
......@@ -539,4 +539,3 @@ class BaseGraphTuner(object):
@abstractmethod
def run(self, **kwargs):
"""Run graph tuning."""
pass
......@@ -65,6 +65,7 @@ def expr2graph(expr, target_ops, node_dict, node_list):
% op_name)
topi_funcs += OP2COMPUTE[op_name]
env.reset(topi_funcs)
# pylint: disable=not-context-manager
with env:
_expr2graph_impl(expr, target_ops, node_dict, node_list)
task_pos = 0
......
......@@ -208,6 +208,7 @@ def measure_option(builder, runner):
Using `min_repeat_ms` can dynamically adjusts `number`, so it is recommended.
The typical value for NVIDIA GPU is 150 ms.
"""
# pylint: disable=import-outside-toplevel
from .measure_methods import LocalBuilder, LocalRunner
if isinstance(builder, str):
......
......@@ -324,11 +324,11 @@ class LocalRunner(RPCRunner):
self.server = None
def set_task(self, task):
self.task = task
# pylint: disable=import-outside-toplevel
from ...rpc.tracker import Tracker
from ...rpc.server import Server
self.task = task
tracker = Tracker('0.0.0.0', port=9000, port_end=10000, silent=True)
device_key = '$local$device$%d' % tracker.port
server = Server('0.0.0.0', port=9000, port_end=10000,
......@@ -362,6 +362,7 @@ def _build_func_common(measure_input, check_gpu=None, cuda_arch=None, build_opti
# if target is vta, we need to use vta build
if hasattr(measure_input.target, 'device_name') and \
measure_input.target.device_name == 'vta':
# pylint: disable=import-outside-toplevel
import vta
func = vta.build(s, args, target_host=task.target_host)
else:
......@@ -460,6 +461,7 @@ def run_through_rpc(measure_input, build_result,
# Program the FPGA every single time when targeting VTA
if hasattr(measure_input.target, 'device_name') and \
measure_input.target.device_name == 'vta':
# pylint: disable=import-outside-toplevel
from vta import program_fpga, reconfig_runtime
program_fpga(remote, None)
reconfig_runtime(remote)
......
......@@ -282,6 +282,7 @@ class ApplyHistoryBest(DispatchContext):
Each row of this file is an encoded record pair.
Otherwise, it is an iterator.
"""
# pylint: disable=import-outside-toplevel
from pathlib import Path
from ..record import load_from_file
......@@ -454,6 +455,7 @@ class ApplyGraphBest(DispatchContext):
Each row of this file is an encoded record pair.
Otherwise, it is an iterator.
"""
# pylint: disable=import-outside-toplevel
from ..record import load_from_file
super(ApplyGraphBest, self).__init__()
......
......@@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable,invalid-name
# pylint: disable=unused-variable,invalid-name, not-context-manager
"""
Decorator and utilities for the integration with TOPI and Relay
99.9% copy-paste of implementation by @MerryMercy
......@@ -37,7 +37,7 @@ def _lower(mod,
params):
""" Helper to lower VTA properly.
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
from tvm.relay.backend import graph_runtime_codegen
......@@ -114,6 +114,7 @@ def extract_from_multiple_program(mods, params, ops, target, target_host=None,
task: Array of autotvm.task.Task
collected tasks
"""
# pylint: disable=import-outside-toplevel
import tvm.relay.op
from tvm import relay
import topi
......
......@@ -76,6 +76,7 @@ class TaskExtractEnv:
registered = None
def __init__(self, allow_duplicate=False):
# pylint: disable=import-outside-toplevel
import topi
# topi compute -> autotvm task name
......@@ -168,6 +169,7 @@ class TaskExtractEnv:
def _register_topi_task(self):
"""register tuning wrapper for topi function"""
# pylint: disable=import-outside-toplevel
import topi
# Avoid double registration for certain targets
......
......@@ -147,6 +147,7 @@ def check_backend(tophub_location, backend):
if os.path.isfile(os.path.join(AUTOTVM_TOPHUB_ROOT_PATH, package_name)):
return True
# pylint: disable=import-outside-toplevel
if sys.version_info >= (3,):
import urllib.request as urllib2
else:
......
......@@ -53,6 +53,7 @@ def log_to_file(file_out, protocol='json'):
for inp, result in zip(inputs, results):
file_out.write(record.encode(inp, result, protocol) + "\n")
# pylint: disable=import-outside-toplevel
from pathlib import Path
if isinstance(file_out, Path):
file_out = str(file_out)
......
......@@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=consider-using-enumerate, invalid-name
# pylint: disable=consider-using-enumerate, invalid-name, invalid-sequence-index
"""
Cost model optimizer based on simulated annealing
"""
......
......@@ -420,6 +420,7 @@ def _extract_curve_feature_log(arg):
def custom_callback(stopping_rounds, metric, fevals, evals=(), log_file=None,
maximize=False, verbose_eval=True):
"""callback function for xgboost to support multiple custom evaluation functions"""
# pylint: disable=import-outside-toplevel
from xgboost.core import EarlyStopException
from xgboost.callback import _fmt_metric
from xgboost.training import aggcv
......
......@@ -467,7 +467,7 @@ def _build_for_device(flist, target, target_host):
func = ir_pass.InferFragment(func)
warp_size = target.thread_warp_size
func = ir_pass.LowerThreadAllreduce(func, warp_size)
fsplits = [s for s in ir_pass.SplitHostDevice(func)]
fsplits = list(ir_pass.SplitHostDevice(func))
fhost.append(fsplits[0])
for x in fsplits[1:]:
fdevice.append(x)
......
......@@ -76,7 +76,6 @@ def get_target_by_dump_machine(compiler):
msg += py_str(out)
return None
return py_str(out)
else:
return None
return get_target_triple
......
......@@ -54,6 +54,7 @@ def to_pytorch_func(tvm_func):
wrapped_func: Function
Wrapped tvm function that operates on PyTorch tensors
"""
# pylint: disable=import-outside-toplevel
import torch
import torch.utils.dlpack
return convert_func(tvm_func, torch.Tensor, torch.utils.dlpack.to_dlpack)
......@@ -15,9 +15,6 @@
# specific language governing permissions and limitations
# under the License.
"""Helper utility for downloading"""
from __future__ import print_function
from __future__ import absolute_import as _abs
import os
import sys
import time
......@@ -48,10 +45,8 @@ def download(url, path, overwrite=False, size_compare=False, verbose=1, retries=
retries: int, optional
Number of time to retry download, default at 3.
"""
if sys.version_info >= (3,):
# pylint: disable=import-outside-toplevel
import urllib.request as urllib2
else:
import urllib2
if os.path.isfile(path) and not overwrite:
if size_compare:
......@@ -114,7 +109,6 @@ def download(url, path, overwrite=False, size_compare=False, verbose=1, retries=
if os.path.exists(tempfile):
os.remove(tempfile)
raise err
else:
print("download failed due to {}, retrying, {} attempt{} left"
.format(repr(err), retries, 's' if retries > 1 else ''))
......
......@@ -49,7 +49,7 @@ def to_mxnet_func(func, const_loc=None):
Run asynchrously in MXNet's async engine.
"""
# only import mxnet when wrap get called.
# pylint: disable=import-self
# pylint: disable=import-self, import-outside-toplevel
import mxnet
if isinstance(func, Module):
func = func.entry_func
......
......@@ -15,7 +15,6 @@
# specific language governing permissions and limitations
# under the License.
"""Common system utilities"""
from __future__ import absolute_import as _abs
import os
import tempfile
import shutil
......@@ -167,35 +166,3 @@ def which(exec_name):
if os.path.isfile(full_path) and os.access(full_path, os.X_OK):
return full_path
return None
def get_lower_ir(s):
"""Get lower ir code of a schedule.
This is useful for debug, since you don't have to find all inputs/outputs
for a schedule in a fused subgraph.
Parameters
----------
s: Schedule
Returns
-------
ir: str
The lower ir
"""
from .. import tensor
from ..build_module import lower
outputs = s.outputs
inputs = []
def find_all(op):
if isinstance(op, tensor.PlaceholderOp):
inputs.append(op.output(0))
else:
for x in op.input_tensors:
find_all(x.op)
for out in outputs:
find_all(out)
return lower(s, inputs, simple_mode=True)
......@@ -50,7 +50,8 @@ def script(pyfunc):
hybrid_func : function
A decorated hybrid script function.
"""
def wrapped_func(func, *args, **kwargs): #pylint: disable=missing-docstring
# pylint: disable=import-outside-toplevel, missing-docstring
def wrapped_func(func, *args, **kwargs):
from .util import _is_tvm_arg_types
if _is_tvm_arg_types(args):
src = _pruned_source(func)
......
......@@ -69,6 +69,7 @@ def bind(func_id, args):
def _math_intrin(func_id, args):
# pylint: disable=import-outside-toplevel
from .. import intrin
return getattr(intrin, func_id)(*args)
......
......@@ -198,7 +198,7 @@ class HybridParser(ast.NodeVisitor):
ty, entry = self.symbols[key] #pylint: disable=invalid-name
if ty in [Symbol.Input, Symbol.OutputBuffer]:
continue
elif 'Buffer' in ty.name:
if 'Buffer' in ty.name:
_buf = entry
_scope = 'global' if ty is Symbol.BufferVar else ty.name[:-6].lower()
to_pop.append(key)
......
......@@ -70,6 +70,7 @@ def _pruned_source(func):
def replace_io(body, rmap):
"""Replacing tensors usage according to the dict given"""
# pylint: disable=import-outside-toplevel
from .. import ir_pass
def replace(op):
......
......@@ -78,7 +78,7 @@ class ParseError(Exception):
class OpWrapper:
"""Overload the __call__ for op."""
pass
class ExprOp(OpWrapper):
"""Call an expr. The default, but does not handle attrs well."""
......@@ -273,7 +273,7 @@ class ParseTreeToRelayIR(RelayVisitor):
def _type_expr_name(self, e):
if isinstance(e, adt.Constructor):
return "`{0}` ADT constructor".format(e.belong_to.name_hint)
elif isinstance(e, ty.GlobalTypeVar):
if isinstance(e, ty.GlobalTypeVar):
if e.kind == ty.Kind.AdtHandle:
return "ADT definition"
return "function definition"
......@@ -623,7 +623,7 @@ class ParseTreeToRelayIR(RelayVisitor):
def call(self, func, args, attrs, type_args):
if isinstance(func, OpWrapper):
return func(args, attrs, type_args)
elif isinstance(func, adt.Constructor):
if isinstance(func, adt.Constructor):
return func(*args)
return expr.Call(func, args, attrs, type_args)
......
......@@ -384,7 +384,7 @@ def detect_feature(a, b=None):
"""
if isinstance(a, Module):
a, b = b, a
return set([Feature(int(x)) for x in _analysis.detect_feature(a, b)])
return {Feature(int(x)) for x in _analysis.detect_feature(a, b)}
def structural_hash(value):
......
......@@ -44,8 +44,9 @@ def lower(sch, inputs, func_name, source_func):
lowered_funcs : List[tvm.LoweredFunc]
The result of lowering.
"""
# pylint: disable=broad-except, import-outside-toplevel
import traceback
# pylint: disable=broad-except
try:
f = _build.lower(sch, inputs, name=func_name)
# logging.debug("lower function %s", func_name)
......
......@@ -86,7 +86,7 @@ class CompileEngine(Object):
cached_func: CachedFunc
The result of lowering.
"""
# pylint: disable=broad-except
# pylint: disable=broad-except, import-outside-toplevel
try:
key = _get_cache_key(source_func, target)
return _backend._CompileEngineLower(self, key)
......
......@@ -407,7 +407,6 @@ def create_executor(kind="debug",
return _interpreter.Interpreter(mod, ctx, target)
if kind == "graph":
return GraphExecutor(mod, ctx, target)
elif kind == "vm":
if kind == "vm":
return VMExecutor(mod, ctx, target)
else:
raise RuntimeError("unknown execution strategy: {0}".format(kind))
......@@ -20,7 +20,7 @@ from __future__ import absolute_import
from ..api import register_func
# pylint: disable=unused-argument
# pylint: disable=unused-argument, import-outside-toplevel
def _debugger_init(expr, stack):
import pdb
pdb.set_trace()
......
......@@ -125,7 +125,6 @@ class Expr(RelayNode):
def __rsub__(self, other):
if isinstance(other, _Number):
raise TypeError('convert "%s" with `const` first' % str(other))
else:
raise TypeError("type %s not supported" % str(type(other)))
def __mul__(self, other):
......@@ -150,7 +149,6 @@ class Expr(RelayNode):
def __rdiv__(self, other):
if isinstance(other, _Number):
raise TypeError('convert "%s" with `const` first' % str(other))
else:
raise TypeError("type %s not supported" % str(type(other)))
def __truediv__(self, other):
......
......@@ -401,6 +401,7 @@ class Caffe2NetDef(object):
params : dict
A dict of name: tvm.nd.array pairs, used as pretrained weights
"""
# pylint: disable=import-outside-toplevel
from caffe2.python import workspace
workspace.RunNetOnce(init_net)
......
......@@ -302,7 +302,7 @@ class ExprTable(object):
self.exprs[name] = expr
def has_expr(self, name):
return True if name in self.exprs else False
return name in self.exprs
def set_padding(self, paddings):
self.paddings = paddings
......@@ -391,7 +391,7 @@ class AttrCvt(object):
if k in self._excludes:
raise NotImplementedError('Attribute %s in operator %s is not' +
' supported.', k, op_name)
elif k in self._disables:
if k in self._disables:
logging.warning("Attribute %s is disabled in relay.sym.%s", k, op_name)
elif k in self._ignores:
if k != 'tvm_custom':
......@@ -485,6 +485,7 @@ def infer_value(input_val, params):
portion of the relay graph. This is often needed for functions that
whose output shape depends on the value of a tensor.
"""
# pylint: disable=import-outside-toplevel
from tvm.contrib import graph_runtime
# Check that all free variables have associated parameters.
assert all(var.name_hint in params.keys() for var in analysis.free_vars(
......
......@@ -14,7 +14,8 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self, unused-argument, unused-variable, inconsistent-return-statements
# pylint: disable=invalid-name, import-self, unused-argument, unused-variable
# pylint: disable=inconsistent-return-statements, import-outside-toplevel
"""CoreML frontend."""
from __future__ import absolute_import as _abs
import math
......@@ -111,7 +112,6 @@ def _BatchnormLayerParams(op, inexpr, etab):
if op.instanceNormalization:
raise tvm.error.OpNotImplemented(
'Operator "instance normalization" is not supported in frontend CoreML.')
else:
params = {'gamma':etab.new_const(list(op.gamma.floatValue)),
'beta':etab.new_const(list(op.beta.floatValue)),
'moving_mean':etab.new_const(list(op.mean.floatValue)),
......@@ -197,7 +197,6 @@ def _PoolingLayerParams(op, inexpr, etab):
raise tvm.error.OpNotImplemented(
'Only Max and Average Pooling are supported in frontend CoreML.')
else:
params = {'pool_size':list(op.kernelSize),
'strides':list(op.stride)}
......@@ -297,8 +296,6 @@ def _PaddingLayerParams(op, inexpr, etab):
(0, 0),
(pad_t, pad_b),
(pad_l, pad_r)))
else:
raise tvm.error.OpNotImplemented(
'Non-constant padding is not supported in frontend CoreML.')
......
......@@ -14,9 +14,8 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self
# pylint: disable=invalid-name, import-self, import-outside-toplevel
"""Keras frontend."""
from __future__ import absolute_import as _abs
import sys
import numpy as np
import tvm
......@@ -133,7 +132,7 @@ def _convert_advanced_activation(inexpr, keras_layer, etab):
# f(x) = max_value, for x >= max_value
# f(x) = x, for threshold <= x < max_value
return _op.clip(inexpr, a_min=0., a_max=float(keras_layer.max_value))
elif keras_layer.max_value and _op.greater(threshold, inexpr).astype('float32'):
if keras_layer.max_value and _op.greater(threshold, inexpr).astype('float32'):
# f(x) = negative_slope * (inexpr - threshold)
negative_slope = _expr.const(keras_layer.negative_slope, dtype='float32')
return _op.multiply(negative_slope, _op.subtract(inexpr, threshold))
......
......@@ -16,15 +16,13 @@
# under the License.
# pylint: disable=invalid-name, import-self, len-as-condition
"""Utility functions common to NNVM and MxNet conversion."""
from __future__ import absolute_import as _abs
import warnings
from .. import expr as _expr
from .. import op as _op
from .common import get_relay_op
from .common import infer_type as _infer_type
def _warn_not_used(attr, op='nnvm'):
import warnings
err = "{} is ignored in {}.".format(attr, op)
warnings.warn(err)
......
......@@ -15,6 +15,7 @@
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self, len-as-condition, unused-argument, too-many-lines
# pylint: disable=import-outside-toplevel
"""ONNX: Open Neural Network Exchange frontend for Relay."""
from __future__ import absolute_import as _abs
......
......@@ -16,6 +16,7 @@
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, unused-argument, too-many-lines, len-as-condition, broad-except
# pylint: disable=import-outside-toplevel
"""TF: Tensorflow frontend."""
from __future__ import absolute_import as _abs
from __future__ import print_function
......
......@@ -15,8 +15,8 @@
# specific language governing permissions and limitations
# under the License.
"""TF: Tensorflow parser"""
from __future__ import absolute_import as _abs
from __future__ import print_function
# pylint: disable=import-outside-toplevel, assignment-from-no-return
import os
from tvm.contrib import util
......
......@@ -14,7 +14,8 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, too-many-lines
# pylint: disable=invalid-name, unused-argument, too-many-lines, import-outside-toplevel
"""Tensorflow lite frontend."""
from __future__ import absolute_import as _abs
import math
......@@ -1458,7 +1459,6 @@ class OperatorConverter(object):
raise tvm.error.OpNotImplemented(
'Operator {} with fused activation is not supported yet.'
.format('qnn.op.pool2d'))
else:
out = self.convert_fused_activation_function(out, fused_activation_fn)
return out
......
......@@ -46,6 +46,7 @@ from ..base import register_relay_node
def _register_op_make():
# pylint: disable=import-outside-toplevel
from . import _make
from .. import expr
expr._op_make = _make
......
......@@ -200,7 +200,6 @@ def take_shape_func(attrs, inputs, out_ndims):
"""
if attrs.axis is None:
return [_take_no_axis_shape_func(inputs[1], out_ndims[0])]
else:
axis = get_const_int(attrs.axis)
data_ndim = int(inputs[0].shape[0])
if axis < 0:
......@@ -275,13 +274,13 @@ def argwhere_shape_func(attrs, inputs, out_ndims):
"""
if len(inputs[0].shape) == 1:
return [_argwhere_shape_func_1d(inputs[0])]
elif len(inputs[0].shape) == 2:
if len(inputs[0].shape) == 2:
return [_argwhere_shape_func_2d(inputs[0])]
elif len(inputs[0].shape) == 3:
if len(inputs[0].shape) == 3:
return [_argwhere_shape_func_3d(inputs[0])]
elif len(inputs[0].shape) == 4:
if len(inputs[0].shape) == 4:
return [_argwhere_shape_func_4d(inputs[0])]
elif len(inputs[0].shape) == 5:
if len(inputs[0].shape) == 5:
return [_argwhere_shape_func_5d(inputs[0])]
return ValueError("Does not support rank higher than 5 in argwhere")
......
......@@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, invalid-name, unused-argument, too-many-arguments
# pylint: disable=no-else-return, invalid-name, unused-argument, too-many-arguments, consider-using-in
"""Backend compiler related feature registration"""
from __future__ import absolute_import
......@@ -265,6 +265,7 @@ def schedule_conv2d(attrs, outs, target):
@reg.register_alter_op_layout("nn.conv2d")
def alter_op_layout_conv2d(attrs, inputs, tinfos):
"""Alternate the layout of conv2d"""
# pylint: disable=import-outside-toplevel
from ... import op
return topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, op)
......@@ -309,7 +310,7 @@ def convert_conv2d(attrs, inputs, tinfos, desired_layout):
result : tvm.relay.Expr
The transformed expr
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
data_layout = attrs['data_layout']
kernel_layout = attrs['kernel_layout']
......
......@@ -14,6 +14,8 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-outside-toplevel
"""Transform operators."""
from . import _make
......
......@@ -22,6 +22,7 @@ from .. import register_func
@register_func("relay.fromtext")
def fromtext(data, source_name=None):
"""Parse a Relay program."""
# pylint: disable=import-outside-toplevel
from tvm.relay import _parser
x = _parser.fromtext(data + "\n", source_name)
if x is None:
......
......@@ -16,7 +16,7 @@
# under the License.
#pylint: disable=unused-argument
"""The register functions for the QNN dialect."""
from tvm.relay.op.op import register as register
from tvm.relay.op.op import register
def register_qnn_legalize(op_name, legal_op=None, level=10):
"""Register legal transformation function for a QNN op
......
......@@ -88,7 +88,7 @@ def add_partition_generic(ref_call, new_args, ctx):
lhs = new_args[0].realize()
rhs = new_args[1].realize()
return _forward_op(ref_call, [lhs, rhs])
elif not lhs_cond and rhs_cond:
if not lhs_cond and rhs_cond:
# - introduced by residual connection in ResNet
# ...
# %13 = nn.conv2d(%12, %meta[relay.Constant])
......@@ -104,7 +104,7 @@ def add_partition_generic(ref_call, new_args, ctx):
# ...
rhs = new_args[1].realize()
return _forward_op(ref_call, [lhs, rhs])
elif lhs_cond and not rhs_cond:
if lhs_cond and not rhs_cond:
if _analysis.check_constant(rhs):
# - introduced by batch_norm: add(out, bias)
return QPartitionExpr(_forward_op(ref_call, [lhs, rhs]))
......@@ -121,10 +121,10 @@ def add_partition_generic(ref_call, new_args, ctx):
# ...
lhs = new_args[0].realize()
return _forward_op(ref_call, [lhs, rhs])
elif not lhs_cond and not rhs_cond:
if not lhs_cond and not rhs_cond:
# trivial case
return None
else:
raise ValueError
......
......@@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#pylint: disable=unused-argument
#pylint: disable=unused-argument, not-context-manager
"""Automatic quantization toolkit."""
from __future__ import absolute_import
from . import _quantize
......
......@@ -41,7 +41,6 @@ class WithScope(object):
def __exit__(self, ptype, value, trace):
if value:
raise value
else:
self._exit_cb()
def _make_lets(bindings, ret_value):
......
......@@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument, no-init
# pylint: disable=invalid-name, unused-variable, unused-argument, no-init, unpacking-non-sequence
"""
Compile DarkNet Models
====================
......
......@@ -85,7 +85,7 @@ def residual_unit(data,
data=act1, channels=num_filter, kernel_size=(1, 1),
strides=stride, name=name+'_sc')
return relay.add(conv3, shortcut)
else:
bn1 = layers.batch_norm_infer(data=data, epsilon=2e-5, name=name + '_bn1')
act1 = relay.nn.relu(data=bn1)
conv1 = layers.conv2d(
......@@ -96,6 +96,7 @@ def residual_unit(data,
conv2 = layers.conv2d(
data=act2, channels=num_filter, kernel_size=(3, 3),
strides=(1, 1), padding=(1, 1), name=name + '_conv2')
if dim_match:
shortcut = data
else:
......
......@@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument, no-init
# pylint: disable=invalid-name, unused-variable, unused-argument, no-init, import-outside-toplevel
"""
Tensorflow Model Helpers
========================
......@@ -346,7 +346,7 @@ def get_workload_ptb():
sample_data_file = 'simple-examples.tgz'
sample_url = sample_repo+sample_data_file
ptb_model_file = 'RNN/ptb/ptb_model_with_lstmblockcell.pb'
# pylint: disable=import-outside-toplevel
import tarfile
file_path = download_testdata(sample_url, sample_data_file, module=['data', 'ptb_data'])
dir_path = os.path.dirname(file_path)
......
......@@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument, no-init
# pylint: disable=invalid-name, unused-variable, unused-argument, no-init,
"""
Yolo detection boxes helper functions
====================
......@@ -224,6 +224,7 @@ def _draw_label(im, r, c, label, rgb):
_set_pixel(im, i+c, j+r, k, val)#rgb[k] * val)
def _get_label(font_path, labelstr, rgb):
# pylint: disable=import-outside-toplevel
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
......
......@@ -508,7 +508,6 @@ class Proxy(object):
except socket.error as sock_err:
if sock_err.errno in [98, 48]:
continue
else:
raise sock_err
if not self.port:
raise ValueError("cannot bind to any port in [%d, %d)" % (port, port_end))
......@@ -569,7 +568,7 @@ def websocket_proxy_server(url, key=""):
magic = struct.unpack('<i', msg[:4])[0]
if magic == base.RPC_CODE_DUPLICATE:
raise RuntimeError("key: %s has already been used in proxy" % key)
elif magic == base.RPC_CODE_MISMATCH:
if magic == base.RPC_CODE_MISMATCH:
logging.info("RPCProxy do not have matching client key %s", key)
elif magic != base.RPC_CODE_SUCCESS:
raise RuntimeError("%s is not RPC Proxy" % url)
......
......@@ -161,7 +161,6 @@ def _listen_loop(sock, port, rpc_key, tracker_addr, load_library, custom_addr):
conn.close()
logger.warning("mismatch key from %s", addr)
continue
else:
conn.sendall(struct.pack("<i", base.RPC_CODE_SUCCESS))
conn.sendall(struct.pack("<i", len(server_key)))
conn.sendall(server_key.encode("utf-8"))
......@@ -208,6 +207,7 @@ def _listen_loop(sock, port, rpc_key, tracker_addr, load_library, custom_addr):
server_proc.join(opts.get("timeout", None))
if server_proc.is_alive():
logger.info("Timeout in RPC session, kill..")
# pylint: disable=import-outside-toplevel
import psutil
parent = psutil.Process(server_proc.pid)
# terminate worker childs
......@@ -233,7 +233,8 @@ def _connect_proxy_loop(addr, key, load_library):
magic = struct.unpack("<i", base.recvall(sock, 4))[0]
if magic == base.RPC_CODE_DUPLICATE:
raise RuntimeError("key: %s has already been used in proxy" % key)
elif magic == base.RPC_CODE_MISMATCH:
if magic == base.RPC_CODE_MISMATCH:
logger.warning("RPCProxy do not have matching client key %s", key)
elif magic != base.RPC_CODE_SUCCESS:
raise RuntimeError("%s is not RPC Proxy" % str(addr))
......@@ -380,7 +381,6 @@ class Server(object):
except socket.error as sock_err:
if sock_err.errno in [98, 48]:
continue
else:
raise sock_err
if not self.port:
raise ValueError("cannot bind to any port in [%d, %d)" % (port, port_end))
......
......@@ -92,8 +92,8 @@ class TCPHandler(object):
except socket.error as err:
if err.args[0] in (errno.EAGAIN, errno.EWOULDBLOCK):
break
else:
self.on_error(err)
if self._pending_write:
self._ioloop.update_handler(
self._sock.fileno(), self._ioloop.READ | self._ioloop.ERROR | self._ioloop.WRITE)
......
......@@ -393,7 +393,6 @@ class Tracker(object):
except socket.error as sock_err:
if sock_err.errno in [98, 48]:
continue
else:
raise sock_err
if not self.port:
raise ValueError("cannot bind to any port in [%d, %d)" % (port, port_end))
......
......@@ -15,7 +15,7 @@
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-import
# pylint: disable=invalid-name, unused-import, import-outside-toplevel
"""Runtime Module namespace."""
import ctypes
import struct
......
......@@ -184,7 +184,7 @@ class NDArray(NDArrayBase):
"""
if isinstance(target, NDArrayBase):
return self._copyto(target)
elif isinstance(target, TVMContext):
if isinstance(target, TVMContext):
res = empty(self.shape, self.dtype, target)
return self._copyto(res)
raise ValueError("Unsupported target type %s" % str(type(target)))
......
......@@ -179,7 +179,6 @@ class BaseComputeOp(Operation):
@tvm._ffi.register_object
class ComputeOp(BaseComputeOp):
"""Scalar operation."""
pass
@tvm._ffi.register_object
......
......@@ -112,7 +112,7 @@ def decl_tensor_intrin(op,
raise TypeError("expect Operation")
inputs = op.input_tensors
binds = binds if binds else {}
tensors = [x for x in inputs]
tensors = list(inputs)
for i in range(op.num_outputs):
tensors.append(op.output(i))
......
......@@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, no-else-return, unused-argument
# pylint: disable=invalid-name, unused-variable, no-else-return, unused-argument, import-outside-toplevel
"""Conv2D schedule for ARM CPU"""
from __future__ import absolute_import as _abs
......@@ -528,8 +528,7 @@ def _alter_conv2d_layout_arm(attrs, inputs, tinfos, F):
Unlike other TOPI functions, this function operates on both graph level and operator level,
so we have to pass 'F' to make it support our two versions of graph IR, Relay.
"""
copy_inputs = [s for s in inputs]
copy_inputs = list(inputs)
new_attrs = {k: attrs[k] for k in attrs.keys()}
if F.__name__ == 'tvm.relay.op':
......
......@@ -74,7 +74,6 @@ def conv2d_bifrost(cfg, data, kernel, strides, padding, dilation, layout, out_dt
if layout == 'NCHW':
return conv2d_spatial_pack_nchw(cfg, data, kernel, strides, padding,
dilation, out_dtype, num_tile=3)
else:
raise ValueError("Unsupported layout {}".format(layout))
......
......@@ -328,7 +328,7 @@ def _alter_conv2d_layout(attrs, inputs, tinfos, F):
if 'cudnn' in tvm.target.current_target().libs or 'miopen' in tvm.target.current_target().libs:
return None
copy_inputs = [s for s in inputs]
copy_inputs = list(inputs)
new_attrs = {k: attrs[k] for k in attrs.keys()}
......
......@@ -14,7 +14,8 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-member, too-many-locals, too-many-arguments, too-many-statements, singleton-comparison, unused-argument
# pylint: disable=invalid-name, no-member, too-many-locals, too-many-arguments, too-many-statements, singleton-comparison
# pylint: disable=bad-continuation, unused-argument
"""Non-maximum suppression operator"""
import math
import tvm
......
......@@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, singleton-comparison
# pylint: disable=invalid-name, singleton-comparison, bad-continuation
"""Proposal operator"""
import math
import tvm
......
......@@ -54,7 +54,7 @@ def schedule_softmax(outs):
if len(softmax.shape) > 2:
ops = [max_elem.op, expsum.op, softmax.op]
if exp != None:
if exp is not None:
ops.append(exp.op)
for op in ops:
......@@ -64,7 +64,7 @@ def schedule_softmax(outs):
block_x = tvm.thread_axis("blockIdx.x")
thread_x = tvm.thread_axis((0, num_thread), "threadIdx.x")
if exp != None:
if exp is not None:
s[exp].bind(exp.op.axis[0], block_x)
s[max_elem].bind(max_elem.op.axis[0], block_x)
......
......@@ -42,6 +42,7 @@ def _schedule_sort(outs):
outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs
s = tvm.create_schedule([x.op for x in outs])
scheduled_ops = []
# pylint: disable=import-outside-toplevel
from .injective import schedule_injective_from_existing
def traverse(op):
if tag.is_injective(op.tag):
......
......@@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument, no-member
# pylint: disable=invalid-name, unused-variable, unused-argument, no-member, import-outside-toplevel
"""Schedule for vision operators"""
from __future__ import absolute_import as _abs
import tvm
......
......@@ -275,7 +275,7 @@ def schedule_softmax(outs):
raise ValueError('Tag is expected to be softmax_output or log_softmax_output. \
Got {0}'.format(op_tag))
if exp != None:
if exp is not None:
s[exp].compute_at(s[softmax], s[softmax].op.axis[1])
s[expsum].compute_at(s[softmax], s[softmax].op.axis[1])
......
......@@ -38,7 +38,7 @@ from ..util import simplify, get_const_tuple
def _get_default_config(cfg, data, kernel, strides, padding, out_dtype, is_depthwise=False):
if is_depthwise:
raise RuntimeError("Depthwise not supported for intel graphics.")
else:
batch_size, in_channel, height, width = get_const_tuple(data.shape)
out_channel, _, hkernel, _ = get_const_tuple(kernel.shape)
HSTR, _ = strides
......@@ -189,7 +189,7 @@ def __topi_nn_conv2d_NCHWc(*args, **kwargs):
@conv2d_alter_layout.register(["intel_graphics"])
def _alter_conv2d_layout(attrs, inputs, tinfo, F):
copy_inputs = [s for s in inputs]
copy_inputs = list(inputs)
new_attrs = {k : attrs[k] for k in attrs.keys()}
if F.__name__ == 'tvm.relay.op':
......
......@@ -60,7 +60,7 @@ def bitpack(data, bits, pack_axis, bit_axis, pack_type, name="QuantizeInput"):
for i in range(n+1):
if i == bit_axis:
continue
elif i == pack_axis:
if i == pack_axis:
idx[j] = indices[i] * data_width + k
else:
idx[j] = indices[i]
......@@ -88,4 +88,3 @@ def binary_op_multiplier(pack_dtype):
pack_dtype: string
pack type for the operator (must be a uint)"""
return int(pack_dtype[4:])
\ No newline at end of file
......@@ -66,9 +66,9 @@ def conv2d(input, filter, strides, padding, dilation, layout='NCHW', out_dtype=N
# default declaration
if layout == 'NCHW':
return conv2d_nchw(input, filter, strides, padding, dilation, out_dtype)
elif layout == 'HWCN':
if layout == 'HWCN':
return conv2d_hwcn(input, filter, strides, padding, dilation, out_dtype)
elif layout == 'NHWC':
if layout == 'NHWC':
return conv2d_nhwc(input, filter, strides, padding, dilation, out_dtype)
raise ValueError("not support this layout {} yet".format(layout))
......@@ -764,6 +764,7 @@ def conv2d_winograd_nnpack_weight_transform(kernel, convolution_algorithm, out_d
output : tvm.Tensor
4-D with shape [alpha, alpha, CO, CI]
"""
# pylint: disable=import-outside-toplevel
from tvm.contrib import nnpack
return nnpack.convolution_inference_weight_transform(
kernel, algorithm=convolution_algorithm, dtype=out_dtype)
......
......@@ -76,7 +76,7 @@ def fifo_buffer(data, buffer, axis):
buffer[i + data_size],
data[i - buflen + data_size]),
name='new_buffer')
elif len(buffer.shape) == 2:
if len(buffer.shape) == 2:
if axis == 0:
return tvm.compute(buffer.shape,
lambda i, j:
......
......@@ -51,7 +51,7 @@ def schedule_softmax(outs):
raise ValueError('Tag is expected to be softmax_output or log_softmax_output. \
Got {0}'.format(op_tag))
if exp != None:
if exp is not None:
s[exp].opengl()
s[max_elem].opengl()
......
......@@ -62,7 +62,7 @@ def one_hot(indices, on_value, off_value, depth, axis, dtype):
indices_index += 1
out = np.empty(oshape)
output_indices = [index for index in np.ndindex(out.shape)]
output_indices = list(np.ndindex(out.shape))
for output_index in output_indices:
indices_indices = []
for i, out_idx in enumerate(output_index):
......
......@@ -238,13 +238,10 @@ def strided_set(a, v, begin, end, strides=None):
from_val = []
index_tuple = []
for i in range(n):
from_val.append(
within_index(begin[i], end[i], strides[i], indices[i]))
from_val.append(within_index(begin[i], end[i], strides[i], indices[i]))
index_tuple.append(
make_idx(begin[i], end[i], strides[i], a.shape[i], indices[i]))
return tvm.if_then_else(tvm.all(*from_val),
v(*index_tuple),
a(*indices))
return tvm.if_then_else(tvm.all(*from_val), v(*index_tuple), a(*indices))
return tvm.compute(a.shape, _select, name="strided_set")
......@@ -568,7 +565,7 @@ def sequence_mask(data, valid_length, mask_value=0, axis=0):
assert len(data.shape) >= 2,\
"only support data.ndim >= 2, received data.shape = {}".format(data.shape)
assert axis == 0 or axis == 1, "only support axis = 0, 1, received axis = {}".format(axis)
assert axis in (0, 1), "only support axis = 0, 1, received axis = {}".format(axis)
return cpp.sequence_mask(data, valid_length, mask_value, axis)
......
......@@ -25,7 +25,6 @@ from . import tag, cpp
class InvalidShapeError(ValueError):
"""Invalid shape for a topi function. i.e. call winograd template for non-3x3 kernel)"""
pass
def nchw_pack_layout(layout_info):
"""Check whether the layout type is NCHWinic"""
......@@ -350,7 +349,7 @@ def get_shape(src_shape, src_layout, dst_layout):
layout_mapping = bijective_layout(src_layout, dst_layout)
dst_indices = layout_mapping.forward_index(
tvm.convert([i for i in range(len(src_layout))]))
tvm.convert(list(range(len(src_layout)))))
return get_const_tuple(tuple([src_shape[i.value] for i in dst_indices]))
......
......@@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, singleton-comparison
# pylint: disable=invalid-name, singleton-comparison, bad-continuation
"""Proposal operator"""
import math
import tvm
......
......@@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member,import-outside-toplevel
"""Conv2D schedule on x86"""
import logging
......@@ -126,7 +126,7 @@ def _declaration_conv(cfg, data, kernel, strides, padding, dilation, layout, out
# # specialize for INT8 1X1 conv on X86
# return conv2d_avx_1x1._declaration_conv_nhwc_pack(cfg, data, kernel, strides,
# padding, dilation, out_dtype)
elif layout == 'NHWC':
if layout == 'NHWC':
return nn.conv2d_nhwc(data, kernel, strides, padding, dilation, out_dtype)
raise ValueError("not support this layout {} yet".format(layout))
......
......@@ -63,7 +63,7 @@ def _alter_conv2d_layout(attrs, inputs, tinfo, F):
is_depthwise = groups == kshape[0] and kshape[1] == 1
# Save the input exprs.
copy_inputs = [s for s in inputs]
copy_inputs = list(inputs)
# Set the new attrs
new_attrs = {k : attrs[k] for k in attrs.keys()}
......
......@@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member, import-outside-toplevel
"""Conv2D int8 schedule on x86"""
import re
......@@ -70,7 +70,7 @@ def _is_int8_hw_support(data_dtype, kernel_dtype):
# 3) Check target
mcpu = tvm.target.current_target().mcpu
is_target_support = False
if mcpu == 'skylake-avx512' or mcpu == 'cascadelake':
if mcpu in ('skylake-avx512', 'cascadelake'):
is_target_support = True
return is_dtype_support and is_llvm_support and is_target_support
......
......@@ -63,7 +63,7 @@ def schedule_softmax(outs):
s[max_elem].compute_at(s[softmax], fused_outer_axes)
s[expsum].compute_at(s[softmax], fused_outer_axes)
if exp != None:
if exp is not None:
s[exp].compute_at(s[softmax], fused_outer_axes)
return s
......@@ -21,6 +21,6 @@ import tvm
def get_fp32_len():
mcpu = tvm.target.current_target().mcpu
fp32_vec_len = 8
if mcpu == 'skylake-avx512' or mcpu == 'cascadelake':
if mcpu in ('skylake-avx512', 'cascadelake'):
fp32_vec_len = 16
return fp32_vec_len
......@@ -79,7 +79,6 @@ compilation guide to get Xilinx toolchains setup) and add it to your \
$VTA_CACHE_PATH. Alternatively edit your config.json back to its default \
settings. You can see the list of available bitstreams under {}"
.format(url, BITSTREAM_URL))
else:
raise RuntimeError(
# This could happen when trying to access the URL behind a proxy
"Something went wrong when trying to access {}. Check your \
......
......@@ -231,9 +231,9 @@ class Environment(object):
"""The target host"""
if self.TARGET in ["pynq", "de10nano"]:
return "llvm -target=armv7-none-linux-gnueabihf"
elif self.TARGET == "ultra96":
if self.TARGET == "ultra96":
return "llvm -target=aarch64-linux-gnu"
elif self.TARGET in ["sim", "tsim"]:
if self.TARGET in ["sim", "tsim"]:
return "llvm"
raise ValueError("Unknown target %s" % self.TARGET)
......
......@@ -66,6 +66,7 @@ def server_start():
@tvm.register_func("tvm.contrib.vta.init", override=True)
def program_fpga(file_name):
# pylint: disable=import-outside-toplevel
from pynq import xlnk
# Reset xilinx driver
xlnk.Xlnk().xlnk_reset()
......
......@@ -15,9 +15,7 @@
# specific language governing permissions and limitations
# under the License.
"""Additional IR Pass for VTA"""
# pylint: disable=len-as-condition
from __future__ import absolute_import as _abs
# pylint: disable=len-as-condition, no-else-return
import tvm
from topi import util
......
......@@ -43,6 +43,7 @@ def main():
bitstream_program(args.target, args.bitstream)
def pynq_bitstream_program(bitstream_path):
# pylint: disable=import-outside-toplevel
from pynq import Bitstream
bitstream = Bitstream(bitstream_path)
bitstream.download()
......
......@@ -151,14 +151,12 @@ class ExprPack(ExprMutator):
assert not self.start_pack
self.start_pack = True
return _pack_batch_channel(args[0], oshape, self.bfactor, self.cfactor)
elif call.op == self.bitpack_end:
if call.op == self.bitpack_end:
if self.start_pack:
self.start_pack = False
data = args[0]
data_shape = _get_shape(call.args[0])
return _unpack_batch_channel(data, data_shape)
else:
pass
if self.start_pack:
# Operator cases
if call.op == self.conv2d and odtype == 'int32':
......@@ -188,7 +186,8 @@ class ExprPack(ExprMutator):
kernel_layout=kernel_layout,
out_dtype=call.attrs.out_dtype)
return conv2d
elif call.op == self.conv2d_transpose and odtype == 'int32':
if call.op == self.conv2d_transpose and odtype == 'int32':
self.number_of_conv2d += 1
assert 8 % self.weight_bits == 0
w_lanes = 8 // self.weight_bits
......@@ -213,7 +212,7 @@ class ExprPack(ExprMutator):
output_padding=call.attrs.output_padding,
out_dtype=call.attrs.out_dtype)
return conv2d
elif call.op == self.add and \
if call.op == self.add and \
tuple(input_types[0].shape) == tuple(input_types[1].shape):
pass
elif call.op == self.add and len(input_types[1].shape) == 3:
......@@ -272,7 +271,7 @@ def get_subgraph(expr, start_name, stop_name, start_name_idx, stop_name_idx, cou
_recursion(anf.body, start_found, stop_found,
operator_current_idx),
anf.ret_type, anf.type_params, anf.attrs)
elif isinstance(anf, relay.expr.Let):
if isinstance(anf, relay.expr.Let):
value = anf.value
if isinstance(value, relay.expr.Call):
if isinstance(value.op, relay.op.Op):
......
......@@ -127,7 +127,6 @@ def compute_conv2d_transpose(attrs, inputs, output_type, target):
if is_packed_layout(layout):
return [topi.nn.conv2d_transpose_nchw(
inputs[0], inputs[1], strides, padding, out_dtype)]
else:
# If it's not packed, run on ARM CPU
with tvm.target.arm_cpu(tvm.target.current_target().model):
return _nn.compute_conv2d_transpose(attrs, inputs, output_type, target)
......@@ -145,7 +144,6 @@ def schedule_conv2d_transpose(attrs, outputs, target):
if target.device_name == "vta":
if is_packed_layout(layout):
return topi.nn.schedule_conv2d_transpose_nchw(outputs)
else:
# If it's not packed, run on ARM CPU
with tvm.target.arm_cpu(tvm.target.current_target().model):
return _nn.schedule_conv2d_transpose(attrs, outputs, tvm.target.current_target())
......
......@@ -23,7 +23,6 @@ import os
import tvm
from tvm import autotvm
from tvm.contrib.util import get_lower_ir
import topi
import vta
import vta.testing
......
......@@ -23,7 +23,6 @@ import os
import tvm
from tvm import autotvm
from tvm.contrib.util import get_lower_ir
import topi
import vta
import vta.testing
......
......@@ -23,7 +23,6 @@ import os
import tvm
from tvm import autotvm
from tvm.contrib.util import get_lower_ir
import topi
import vta
import vta.testing
......
......@@ -23,7 +23,6 @@ import os
import tvm
from tvm import autotvm
from tvm.contrib.util import get_lower_ir
import topi
import vta
import vta.testing
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment