Commit 996cf30e by Kim Committed by Tianqi Chen

remove PEP498 f-string new feature for support python3.5 (#4250)

parent 1b053ec0
...@@ -537,7 +537,7 @@ def _test_split(in_shape, axis, num_or_size_splits, dtype): ...@@ -537,7 +537,7 @@ def _test_split(in_shape, axis, num_or_size_splits, dtype):
num_split = len(num_or_size_splits) if isinstance(num_or_size_splits, list) else num_or_size_splits num_split = len(num_or_size_splits) if isinstance(num_or_size_splits, list) else num_or_size_splits
tf.split(in_data, num_or_size_splits, axis=axis) tf.split(in_data, num_or_size_splits, axis=axis)
compare_tf_with_tvm([np_data], ['in_data:0'], [f'split:{n}' for n in range(num_split)]) compare_tf_with_tvm([np_data], ['in_data:0'], ['split:{0}'.format(n) for n in range(num_split)])
# and now test together with concat # and now test together with concat
tf.reset_default_graph() tf.reset_default_graph()
...@@ -586,7 +586,7 @@ def _test_unstack(ip_shape, axis, dtype): ...@@ -586,7 +586,7 @@ def _test_unstack(ip_shape, axis, dtype):
in_data = tf.placeholder(dtype, ip_shape, name="in_data") in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.unstack(in_data, axis=axis) tf.unstack(in_data, axis=axis)
compare_tf_with_tvm([np_data], ['in_data:0'], [f'unstack:{n}' for n in range(ip_shape[axis])]) compare_tf_with_tvm([np_data], ['in_data:0'], ['unstack:{0}'.format(n) for n in range(ip_shape[axis])])
tf.reset_default_graph() tf.reset_default_graph()
in_data = tf.placeholder(dtype, ip_shape, name="in_data") in_data = tf.placeholder(dtype, ip_shape, name="in_data")
......
...@@ -215,7 +215,7 @@ class ParseTreeToRelayIR(RelayVisitor): ...@@ -215,7 +215,7 @@ class ParseTreeToRelayIR(RelayVisitor):
def mk_global_var(self, name: str) -> expr.GlobalVar: def mk_global_var(self, name: str) -> expr.GlobalVar:
"""Create a new GlobalVar and add it to the GlobalVar scope.""" """Create a new GlobalVar and add it to the GlobalVar scope."""
if name in self.global_vars: if name in self.global_vars:
raise ParseError(f"duplicate global var \"{name}\"") raise ParseError("duplicate global var \"{0}\"".format(name))
var = expr.GlobalVar(name) var = expr.GlobalVar(name)
self.global_vars[name] = var self.global_vars[name] = var
return var return var
...@@ -252,14 +252,15 @@ class ParseTreeToRelayIR(RelayVisitor): ...@@ -252,14 +252,15 @@ class ParseTreeToRelayIR(RelayVisitor):
new_typ_name = self._type_expr_name(new_expr) new_typ_name = self._type_expr_name(new_expr)
existing_typ_name = self._type_expr_name(self.global_type_vars[name]) existing_typ_name = self._type_expr_name(self.global_type_vars[name])
raise ParseError( raise ParseError(
f"{new_typ_name} `{name}` conflicts with existing {existing_typ_name}") "{0} `{1}` conflicts with existing {2}".format(new_typ_name,\
name, existing_typ_name))
def _type_expr_name(self, e): def _type_expr_name(self, e):
if isinstance(e, adt.Constructor): if isinstance(e, adt.Constructor):
return f"`{e.belong_to.var.name}` ADT constructor" return "`{0}` ADT constructor".format(e.belong_to.var.name)
elif isinstance(e, ty.GlobalTypeVar): elif isinstance(e, ty.GlobalTypeVar):
if e.kind == ty.Kind.AdtHandle: if e.kind == ty.Kind.AdtHandle:
return f"ADT definition" return "ADT definition"
return "function definition" return "function definition"
def visitProjection(self, ctx): def visitProjection(self, ctx):
...@@ -282,7 +283,7 @@ class ParseTreeToRelayIR(RelayVisitor): ...@@ -282,7 +283,7 @@ class ParseTreeToRelayIR(RelayVisitor):
raise ParseError("unrecognized BOOL_LIT: `{}`".format(node_text)) raise ParseError("unrecognized BOOL_LIT: `{}`".format(node_text))
if node_type == RelayLexer.QUOTED_STRING: if node_type == RelayLexer.QUOTED_STRING:
return literal_eval(node_text) return literal_eval(node_text)
raise ParseError(f"unhandled terminal \"{node_text}\" of type `{node_type}`") raise ParseError("unhandled terminal \"{0}\" of type `{1}`".format(node_text, node_type))
def visitGeneralIdent(self, ctx): def visitGeneralIdent(self, ctx):
name = ctx.getText() name = ctx.getText()
...@@ -310,14 +311,14 @@ class ParseTreeToRelayIR(RelayVisitor): ...@@ -310,14 +311,14 @@ class ParseTreeToRelayIR(RelayVisitor):
var_name = ctx.CNAME().getText() var_name = ctx.CNAME().getText()
global_var = self.global_vars.get(var_name, None) global_var = self.global_vars.get(var_name, None)
if global_var is None: if global_var is None:
raise ParseError(f"unbound global var `{var_name}`") raise ParseError("unbound global var `{0}`".format(var_name))
return global_var return global_var
def visitLocalVar(self, ctx): def visitLocalVar(self, ctx):
var_name = ctx.CNAME().getText() var_name = ctx.CNAME().getText()
local_var = lookup(self.var_scopes, var_name) local_var = lookup(self.var_scopes, var_name)
if local_var is None: if local_var is None:
raise ParseError(f"unbound local var `{var_name}`") raise ParseError("unbound local var `{0}`".format(var_name))
return local_var return local_var
def visitGraphVar(self, ctx): def visitGraphVar(self, ctx):
...@@ -557,7 +558,7 @@ class ParseTreeToRelayIR(RelayVisitor): ...@@ -557,7 +558,7 @@ class ParseTreeToRelayIR(RelayVisitor):
elif match_type == "match?": elif match_type == "match?":
complete_match = False complete_match = False
else: else:
raise RuntimeError(f"unknown match type {match_type}") raise RuntimeError("unknown match type {0}".format(match_type))
match_data = self.visit(ctx.expr()) match_data = self.visit(ctx.expr())
match_clauses = ctx.matchClauseList() match_clauses = ctx.matchClauseList()
......
...@@ -50,7 +50,7 @@ class LinearizeRetType: ...@@ -50,7 +50,7 @@ class LinearizeRetType:
for field_ty in typ.fields: for field_ty in typ.fields:
_unpack(field_ty, out) _unpack(field_ty, out)
else: else:
raise Exception(f"unsupported Relay type: {typ}") raise Exception("unsupported Relay type: {0}".format(typ))
output = [] output = []
_unpack(self.typ, output) _unpack(self.typ, output)
...@@ -67,7 +67,7 @@ class LinearizeRetType: ...@@ -67,7 +67,7 @@ class LinearizeRetType:
_pack(value[i], field_ty, tuple_out) _pack(value[i], field_ty, tuple_out)
out.append(expr.Tuple(tuple_out)) out.append(expr.Tuple(tuple_out))
else: else:
raise Exception(f"unsupported Relay type: {typ}") raise Exception("unsupported Relay type: {0}".format(typ))
if len(seq) == 1: if len(seq) == 1:
return seq[0] return seq[0]
...@@ -144,11 +144,11 @@ class ManifestAllocPass(ExprMutator): ...@@ -144,11 +144,11 @@ class ManifestAllocPass(ExprMutator):
size = self.compute_storage(tensor_type) size = self.compute_storage(tensor_type)
alignment = self.compute_alignment(tensor_type.dtype) alignment = self.compute_alignment(tensor_type.dtype)
dtype = tensor_type.dtype dtype = tensor_type.dtype
sto = scope.let(f"storage_{i}", self.alloc_storage( sto = scope.let("storage_{0}".format(i), self.alloc_storage(
size, alignment, dtype)) size, alignment, dtype))
# TODO(@jroesch): There is a bug with typing based on the constant shape. # TODO(@jroesch): There is a bug with typing based on the constant shape.
tensor = self.alloc_tensor(sto, shape, dtype, tensor_type.shape) tensor = self.alloc_tensor(sto, shape, dtype, tensor_type.shape)
return scope.let(f"tensor_{i}", tensor) return scope.let("tensor_{0}".format(i), tensor)
def visit_let(self, let): def visit_let(self, let):
scope = ScopeBuilder() scope = ScopeBuilder()
...@@ -192,13 +192,13 @@ class ManifestAllocPass(ExprMutator): ...@@ -192,13 +192,13 @@ class ManifestAllocPass(ExprMutator):
if state == 2: if state == 2:
sh_of = self.visit(self.shape_of(arg)) sh_of = self.visit(self.shape_of(arg))
shape_func_ins.append( shape_func_ins.append(
scope.let(f"in_shape_{i}", sh_of)) scope.let("in_shape_{0}".format(i), sh_of))
is_inputs.append(0) is_inputs.append(0)
# Pass Inputs # Pass Inputs
elif state == 1: elif state == 1:
new_arg = self.visit(arg) new_arg = self.visit(arg)
shape_func_ins.append( shape_func_ins.append(
scope.let(f"in_shape_{i}", new_arg)) scope.let("in_shape_{0}".format(i), new_arg))
is_inputs.append(1) is_inputs.append(1)
# TODO(@jroesch): handle 3rd case # TODO(@jroesch): handle 3rd case
else: else:
...@@ -208,7 +208,7 @@ class ManifestAllocPass(ExprMutator): ...@@ -208,7 +208,7 @@ class ManifestAllocPass(ExprMutator):
for i, out in enumerate(cfunc.outputs): for i, out in enumerate(cfunc.outputs):
tt = ty.TensorType(out.shape, out.dtype) tt = ty.TensorType(out.shape, out.dtype)
alloc = self.make_static_allocation(scope, tt, i) alloc = self.make_static_allocation(scope, tt, i)
alloc = scope.let(f"shape_func_out_{i}", alloc) alloc = scope.let("shape_func_out_{0}".format(i), alloc)
out_shapes.append(alloc) out_shapes.append(alloc)
shape_call = self.shape_func( shape_call = self.shape_func(
...@@ -226,7 +226,7 @@ class ManifestAllocPass(ExprMutator): ...@@ -226,7 +226,7 @@ class ManifestAllocPass(ExprMutator):
size = self.compute_storage_in_relay( size = self.compute_storage_in_relay(
out_shape, out_type.dtype) out_shape, out_type.dtype)
alignment = self.compute_alignment(out_type.dtype) alignment = self.compute_alignment(out_type.dtype)
sto = scope.let(f"storage_{i}", self.alloc_storage( sto = scope.let("storage_{i}".format(i=i), self.alloc_storage(
size, alignment, out_type.dtype)) size, alignment, out_type.dtype))
storages.append(sto) storages.append(sto)
...@@ -238,7 +238,7 @@ class ManifestAllocPass(ExprMutator): ...@@ -238,7 +238,7 @@ class ManifestAllocPass(ExprMutator):
out_shape, out_shape,
out_type.dtype, out_type.dtype,
out_type.shape) out_type.shape)
alloc = scope.let(f"out_{i}", alloc) alloc = scope.let("out_{i}".format(i=i), alloc)
outs.append(alloc) outs.append(alloc)
invoke = self.invoke_tvm(call.op, ins, expr.Tuple(outs)) invoke = self.invoke_tvm(call.op, ins, expr.Tuple(outs))
......
...@@ -128,7 +128,7 @@ from tvm.contrib import cc ...@@ -128,7 +128,7 @@ from tvm.contrib import cc
def test_add(target_dir): def test_add(target_dir):
if not tvm.module.enabled("cuda"): if not tvm.module.enabled("cuda"):
print(f"skip {__file__} because cuda is not enabled...") print("skip {__file__} because cuda is not enabled...".format(__file__=__file__))
return return
n = tvm.var("n") n = tvm.var("n")
A = tvm.placeholder((n,), name='A') A = tvm.placeholder((n,), name='A')
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment