Commit bcacb764 by masahi Committed by Tianqi Chen

[Relay] Register compute and schedule for upsampling, with miscellaneous fixes (#2171)

parent a3530f8f
......@@ -26,11 +26,8 @@ class RelayNode(NodeBase):
def astext(self, show_meta_data=True, annotate=None):
"""Get the text format of the expression.
Returns
-------
text : str
The text format of the expression.
Parameters
----------
show_meta_data : bool
Whether to include meta data section in the text
if there is meta data.
......@@ -44,6 +41,11 @@ class RelayNode(NodeBase):
meta data section is necessary to fully parse the text format.
However, it can contain dumps that are big(constat weights),
so it can be helpful to skip printing the meta data section.
Returns
-------
text : str
The text format of the expression.
"""
return _expr.RelayPrint(self, show_meta_data, annotate)
......
......@@ -274,8 +274,8 @@ def create_executor(kind="debug",
kind : str
The type of executor
mod : relay.Mod
The mod
mod : tvm.relay.Module
The Relay module containing collection of functions
ctx : tvm.TVMContext
The context to execute the code.
......
......@@ -76,7 +76,7 @@ def compute_conv2d(attrs, inputs, out_type, target):
out = topi.nn.depthwise_conv2d_nchw(
inputs[0], inputs[1], strides, padding, dilation, out_dtype=out_dtype)
elif layout == "NHWC" and \
kernel_layout == "HWOI" and\
weight_layout == "HWOI" and\
get_const_int(inputs[1].shape[2]) == groups and \
get_const_int(inputs[1].shape[3]) == 1:
out = topi.nn.depthwise_conv2d_nhwc(
......@@ -242,3 +242,12 @@ def schedule_l2_normalize(attrs, outs, target):
return topi.generic.schedule_l2_normalize(outs)
reg.register_pattern("nn.l2_normalize", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_schedule("nn.upsampling")
def schedule_upsampling(_, outs, target):
"""Schedule definition of upsampling"""
with target:
return topi.generic.schedule_injective(outs)
reg.register_pattern("nn.upsampling", OpPattern.INJECTIVE)
......@@ -5,6 +5,9 @@
*/
#include <tvm/relay/op.h>
#include <tvm/relay/attrs/nn.h>
#include <tvm/relay/op_attr_types.h>
#include <topi/elemwise.h>
#include <topi/nn/upsampling.h>
#include "../layout.h"
namespace tvm {
......@@ -82,7 +85,27 @@ RELAY_REGISTER_OP("nn.upsampling")
.set_num_inputs(1)
.add_argument("data", "Tensor", "The input tensor.")
.set_support_level(2)
.add_type_rel("UpSampling", UpSamplingRel);
.add_type_rel("UpSampling", UpSamplingRel)
.set_attr<FTVMCompute>(
"FTVMCompute", [](const Attrs& attrs,
const Array<Tensor>& inputs,
const Type& out_type,
const Target& target) {
const auto* param = attrs.as<UpSamplingAttrs>();
const auto* out_ttype = out_type.as<TensorTypeNode>();
CHECK(param != nullptr);
CHECK(param->layout == "NCHW" || param->layout == "NHWC");
CHECK(out_ttype != nullptr);
Array<IndexExpr> oshape;
if (param->layout == "NCHW") {
oshape.push_back(out_ttype->shape[2]);
oshape.push_back(out_ttype->shape[3]);
} else if (param->layout == "NHWC") {
oshape.push_back(out_ttype->shape[1]);
oshape.push_back(out_ttype->shape[2]);
}
return Array<Tensor>{ topi::nn::upsampling(inputs[0], oshape, param->layout, param->method)};
});
} // namespace relay
} // namespace tvm
......@@ -1212,7 +1212,7 @@ bool SplitRel(const Array<Type>& types,
auto indices = param->indices_or_sections.as<ArrayNode>()->data;
auto begin = IndexExpr(make_zero(Int(32)));
std::vector<Type> fields;
for (uint i = 0; i < indices.size(); ++i) {
for (unsigned int i = 0; i < indices.size(); ++i) {
CHECK(reporter->Assert(IndexExpr(indices[i]) > begin))
<< "indices_or_sections need to be a sorted ascending list";
std::vector<IndexExpr>&& oshape = AsVector(data->shape);
......
......@@ -412,6 +412,42 @@ def test_batch_flatten():
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
def _test_upsampling(layout, method):
n, c, h, w = tvm.var("n"), 16, 32, 32
scale = 2
dtype = "float32"
def get_shape():
if layout == "NCHW":
return (c, h, w), (c, h*scale, w*scale)
else:
return (h, w, c), (h*scale, w*scale, c)
ishape, oshape = get_shape()
x = relay.var("x", relay.TensorType((n,) + ishape, dtype))
y = relay.nn.upsampling(x, scale=scale, layout=layout, method=method)
yy = relay.ir_pass.infer_type(y)
assert yy.checked_type == relay.TensorType((n,) + oshape, dtype)
dshape = (1,) + ishape
x = relay.var("x", shape=dshape)
y = relay.nn.upsampling(x, scale=scale, layout=layout, method=method)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
if method == "NEAREST_NEIGHBOR":
ref = topi.testing.upsampling_python(data, scale, layout)
else:
ref = topi.testing.bilinear_resize_python(data, (h*scale, w*scale), layout)
for target, ctx in ctx_list():
executor = relay.create_executor("graph", ctx=ctx, target=target)
out = executor.evaluate(func)(data)
tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5, atol=1e-5)
def test_upsampling():
_test_upsampling("NCHW", "NEAREST_NEIGHBOR")
_test_upsampling("NCHW", "BILINEAR")
_test_upsampling("NHWC", "NEAREST_NEIGHBOR")
_test_upsampling("NHWC", "BILINEAR")
if __name__ == "__main__":
test_pool2d()
test_avg_pool2d_no_count_pad()
......@@ -425,3 +461,4 @@ if __name__ == "__main__":
test_conv2d_transpose_run()
test_conv2d_run()
test_batch_flatten()
test_upsampling()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment