Unverified Commit 8df97ff6 by Samuel Committed by GitHub

[Pytorch]layernorm bug fix and testcase updated (#5257)

parent 608e9458
......@@ -166,7 +166,8 @@ def _ones():
elif isinstance(data, (torch.Tensor, np.ndarray)):
shape = data.shape
else:
assert "data type {} could not be parsed in ones op" % (type(data))
msg = "Data type %s could not be parsed in ones op" % (type(data))
raise AssertionError(msg)
dtype_map = {6: "float32", 3: "int32"}
dtype_id = inputs[1]
......@@ -186,7 +187,8 @@ def _zeros():
elif isinstance(data, (torch.Tensor, np.ndarray)):
shape = data.shape
else:
assert "data type {} could not be parsed in zeros op" % (type(data))
msg = "Data type %s could not be parsed in zeros op" % (type(data))
raise AssertionError(msg)
dtype_map = {6: "float32", 3: "int32"}
dtype_id = inputs[1]
......@@ -354,7 +356,8 @@ def _convolution():
for infer in inferred_shape:
weight_shape.append(infer)
else:
assert "data type {} could not be parsed in conv op" % (type(weight))
msg = "Data type %s could not be parsed in conv op" % (type(weight))
raise AssertionError(msg)
# Transposed convolutions have IOHW layout.
if use_transpose:
......@@ -523,12 +526,12 @@ def _layer_norm():
assert ndims == 1, "Support only normalization over last one dimension."
return _op.nn.layer_norm(data,
gamma=inputs[1],
beta=inputs[2],
gamma=inputs[2],
beta=inputs[3],
axis=-1,
epsilon=float(inputs[4]),
center=False,
scale=False)
center=True,
scale=True)
return _impl
def _transpose():
......@@ -543,7 +546,8 @@ def _transpose():
elif isinstance(data, (torch.Tensor, np.ndarray)):
ndims = data.shape
else:
assert "data type {} could not be parsed in transpose op" % (type(data))
msg = "Data type %s could not be parsed in transpose op" % (type(data))
raise AssertionError(msg)
if isinstance(data, tvm.runtime.NDArray):
ndims = len(data.shape)
......
......@@ -562,8 +562,16 @@ def test_forward_instancenorm():
verify_model(ins_norm.eval(), input_data=inp)
def test_forward_layernorm():
inp = torch.rand((20, 5, 10, 10))
verify_model(torch.nn.LayerNorm(10).eval(), input_data=inp)
def init_weight(m):
torch.nn.init.normal_(m.weight, 0, 0.01)
torch.nn.init.normal_(m.bias, 0.02)
inp_2d = torch.rand((1, 16, 10, 10))
inp_3d = torch.rand((1, 16, 10, 10, 10))
for ln, inp in [(torch.nn.LayerNorm(10), inp_2d),
(torch.nn.LayerNorm(10), inp_3d)]:
init_weight(ln.eval())
verify_model(ln.eval(), input_data=inp)
def test_forward_transpose():
torch.set_grad_enabled(False)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment