Commit 996cf30e by Kim Committed by Tianqi Chen

remove PEP498 f-string new feature for support python3.5 (#4250)

parent 1b053ec0
......@@ -537,7 +537,7 @@ def _test_split(in_shape, axis, num_or_size_splits, dtype):
num_split = len(num_or_size_splits) if isinstance(num_or_size_splits, list) else num_or_size_splits
tf.split(in_data, num_or_size_splits, axis=axis)
compare_tf_with_tvm([np_data], ['in_data:0'], [f'split:{n}' for n in range(num_split)])
compare_tf_with_tvm([np_data], ['in_data:0'], ['split:{0}'.format(n) for n in range(num_split)])
# and now test together with concat
tf.reset_default_graph()
......@@ -586,7 +586,7 @@ def _test_unstack(ip_shape, axis, dtype):
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.unstack(in_data, axis=axis)
compare_tf_with_tvm([np_data], ['in_data:0'], [f'unstack:{n}' for n in range(ip_shape[axis])])
compare_tf_with_tvm([np_data], ['in_data:0'], ['unstack:{0}'.format(n) for n in range(ip_shape[axis])])
tf.reset_default_graph()
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
......
......@@ -215,7 +215,7 @@ class ParseTreeToRelayIR(RelayVisitor):
def mk_global_var(self, name: str) -> expr.GlobalVar:
"""Create a new GlobalVar and add it to the GlobalVar scope."""
if name in self.global_vars:
raise ParseError(f"duplicate global var \"{name}\"")
raise ParseError("duplicate global var \"{0}\"".format(name))
var = expr.GlobalVar(name)
self.global_vars[name] = var
return var
......@@ -252,14 +252,15 @@ class ParseTreeToRelayIR(RelayVisitor):
new_typ_name = self._type_expr_name(new_expr)
existing_typ_name = self._type_expr_name(self.global_type_vars[name])
raise ParseError(
f"{new_typ_name} `{name}` conflicts with existing {existing_typ_name}")
"{0} `{1}` conflicts with existing {2}".format(new_typ_name,\
name, existing_typ_name))
def _type_expr_name(self, e):
if isinstance(e, adt.Constructor):
return f"`{e.belong_to.var.name}` ADT constructor"
return "`{0}` ADT constructor".format(e.belong_to.var.name)
elif isinstance(e, ty.GlobalTypeVar):
if e.kind == ty.Kind.AdtHandle:
return f"ADT definition"
return "ADT definition"
return "function definition"
def visitProjection(self, ctx):
......@@ -282,7 +283,7 @@ class ParseTreeToRelayIR(RelayVisitor):
raise ParseError("unrecognized BOOL_LIT: `{}`".format(node_text))
if node_type == RelayLexer.QUOTED_STRING:
return literal_eval(node_text)
raise ParseError(f"unhandled terminal \"{node_text}\" of type `{node_type}`")
raise ParseError("unhandled terminal \"{0}\" of type `{1}`".format(node_text, node_type))
def visitGeneralIdent(self, ctx):
name = ctx.getText()
......@@ -310,14 +311,14 @@ class ParseTreeToRelayIR(RelayVisitor):
var_name = ctx.CNAME().getText()
global_var = self.global_vars.get(var_name, None)
if global_var is None:
raise ParseError(f"unbound global var `{var_name}`")
raise ParseError("unbound global var `{0}`".format(var_name))
return global_var
def visitLocalVar(self, ctx):
var_name = ctx.CNAME().getText()
local_var = lookup(self.var_scopes, var_name)
if local_var is None:
raise ParseError(f"unbound local var `{var_name}`")
raise ParseError("unbound local var `{0}`".format(var_name))
return local_var
def visitGraphVar(self, ctx):
......@@ -557,7 +558,7 @@ class ParseTreeToRelayIR(RelayVisitor):
elif match_type == "match?":
complete_match = False
else:
raise RuntimeError(f"unknown match type {match_type}")
raise RuntimeError("unknown match type {0}".format(match_type))
match_data = self.visit(ctx.expr())
match_clauses = ctx.matchClauseList()
......
......@@ -50,7 +50,7 @@ class LinearizeRetType:
for field_ty in typ.fields:
_unpack(field_ty, out)
else:
raise Exception(f"unsupported Relay type: {typ}")
raise Exception("unsupported Relay type: {0}".format(typ))
output = []
_unpack(self.typ, output)
......@@ -67,7 +67,7 @@ class LinearizeRetType:
_pack(value[i], field_ty, tuple_out)
out.append(expr.Tuple(tuple_out))
else:
raise Exception(f"unsupported Relay type: {typ}")
raise Exception("unsupported Relay type: {0}".format(typ))
if len(seq) == 1:
return seq[0]
......@@ -144,11 +144,11 @@ class ManifestAllocPass(ExprMutator):
size = self.compute_storage(tensor_type)
alignment = self.compute_alignment(tensor_type.dtype)
dtype = tensor_type.dtype
sto = scope.let(f"storage_{i}", self.alloc_storage(
sto = scope.let("storage_{0}".format(i), self.alloc_storage(
size, alignment, dtype))
# TODO(@jroesch): There is a bug with typing based on the constant shape.
tensor = self.alloc_tensor(sto, shape, dtype, tensor_type.shape)
return scope.let(f"tensor_{i}", tensor)
return scope.let("tensor_{0}".format(i), tensor)
def visit_let(self, let):
scope = ScopeBuilder()
......@@ -192,13 +192,13 @@ class ManifestAllocPass(ExprMutator):
if state == 2:
sh_of = self.visit(self.shape_of(arg))
shape_func_ins.append(
scope.let(f"in_shape_{i}", sh_of))
scope.let("in_shape_{0}".format(i), sh_of))
is_inputs.append(0)
# Pass Inputs
elif state == 1:
new_arg = self.visit(arg)
shape_func_ins.append(
scope.let(f"in_shape_{i}", new_arg))
scope.let("in_shape_{0}".format(i), new_arg))
is_inputs.append(1)
# TODO(@jroesch): handle 3rd case
else:
......@@ -208,7 +208,7 @@ class ManifestAllocPass(ExprMutator):
for i, out in enumerate(cfunc.outputs):
tt = ty.TensorType(out.shape, out.dtype)
alloc = self.make_static_allocation(scope, tt, i)
alloc = scope.let(f"shape_func_out_{i}", alloc)
alloc = scope.let("shape_func_out_{0}".format(i), alloc)
out_shapes.append(alloc)
shape_call = self.shape_func(
......@@ -226,7 +226,7 @@ class ManifestAllocPass(ExprMutator):
size = self.compute_storage_in_relay(
out_shape, out_type.dtype)
alignment = self.compute_alignment(out_type.dtype)
sto = scope.let(f"storage_{i}", self.alloc_storage(
sto = scope.let("storage_{i}".format(i=i), self.alloc_storage(
size, alignment, out_type.dtype))
storages.append(sto)
......@@ -238,7 +238,7 @@ class ManifestAllocPass(ExprMutator):
out_shape,
out_type.dtype,
out_type.shape)
alloc = scope.let(f"out_{i}", alloc)
alloc = scope.let("out_{i}".format(i=i), alloc)
outs.append(alloc)
invoke = self.invoke_tvm(call.op, ins, expr.Tuple(outs))
......
......@@ -128,7 +128,7 @@ from tvm.contrib import cc
def test_add(target_dir):
if not tvm.module.enabled("cuda"):
print(f"skip {__file__} because cuda is not enabled...")
print("skip {__file__} because cuda is not enabled...".format(__file__=__file__))
return
n = tvm.var("n")
A = tvm.placeholder((n,), name='A')
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment