Unverified Commit 2b661231 by masahi Committed by GitHub

[TOPI, Relay refactor] Move Dilation2d from nn to image namespace (#5110)

parent 5088a034
......@@ -57,7 +57,6 @@ List of operators
topi.nn.relu
topi.nn.leaky_relu
topi.nn.dilate
topi.nn.dilation2d
topi.nn.pool
topi.nn.global_pool
topi.nn.adaptive_pool
......@@ -106,6 +105,7 @@ List of operators
topi.layout_transform
topi.image.resize
topi.image.crop_and_resize
topi.image.dilation2d
topi.argsort
topi.topk
topi.sequence_mask
......@@ -198,7 +198,6 @@ topi.nn
.. autofunction:: topi.nn.upsampling
.. autofunction:: topi.nn.softmax
.. autofunction:: topi.nn.dense
.. autofunction:: topi.nn.dilation2d
.. autofunction:: topi.nn.batch_matmul
.. autofunction:: topi.nn.log_softmax
.. autofunction:: topi.nn.conv2d_nchw
......
......@@ -70,7 +70,6 @@ This level enables typical convnet models.
tvm.relay.nn.conv2d
tvm.relay.nn.conv2d_transpose
tvm.relay.nn.dense
tvm.relay.nn.dilation2d
tvm.relay.nn.max_pool2d
tvm.relay.nn.max_pool3d
tvm.relay.nn.avg_pool2d
......@@ -171,6 +170,7 @@ This level enables additional math and transform operators.
tvm.relay.image.resize
tvm.relay.image.crop_and_resize
tvm.relay.image.dilation2d
tvm.relay.vision.multibox_prior
tvm.relay.vision.multibox_transform_loc
tvm.relay.vision.nms
......@@ -250,7 +250,6 @@ Level 2 Definitions
.. autofunction:: tvm.relay.nn.conv2d
.. autofunction:: tvm.relay.nn.conv2d_transpose
.. autofunction:: tvm.relay.nn.dense
.. autofunction:: tvm.relay.nn.dilation2d
.. autofunction:: tvm.relay.nn.max_pool2d
.. autofunction:: tvm.relay.nn.max_pool3d
.. autofunction:: tvm.relay.nn.avg_pool2d
......@@ -339,6 +338,7 @@ Level 5 Definitions
-------------------
.. autofunction:: tvm.relay.image.resize
.. autofunction:: tvm.relay.image.crop_and_resize
.. autofunction:: tvm.relay.image.dilation2d
.. autofunction:: tvm.relay.vision.multibox_prior
.. autofunction:: tvm.relay.vision.multibox_transform_loc
.. autofunction:: tvm.relay.vision.nms
......
......@@ -91,6 +91,41 @@ struct CropAndResizeAttrs : public tvm::AttrsNode<CropAndResizeAttrs> {
}
};
/*! \brief Attributes used in dilation operators */
struct Dilation2DAttrs : public tvm::AttrsNode<Dilation2DAttrs> {
Array<IndexExpr> strides;
Array<IndexExpr> padding;
Array<IndexExpr> dilations;
std::string data_layout;
std::string kernel_layout;
DataType out_dtype;
TVM_DECLARE_ATTRS(Dilation2DAttrs, "relay.attrs.Dilation2DAttrs") {
TVM_ATTR_FIELD(strides).set_default(Array<IndexExpr>({1, 1}))
.describe("Specifies the strides of the sliding window. [stride_height, stride_width].");
TVM_ATTR_FIELD(padding).set_default(Array<IndexExpr>({0, 0}))
.describe("If padding is non-zero, then the input is implicitly zero-padded"
"Padding support both symmetric and asymmetric as"
"one int : same padding used on all sides"
"two int : bottom, right will use same padding as top, left"
"four int : padding width in the order of (top, left, bottom, right)");
TVM_ATTR_FIELD(dilations).set_default(Array<IndexExpr>({1, 1}))
.describe("Specifies the dilation rate to use. [dilation_height, dilation_width]");
TVM_ATTR_FIELD(data_layout).set_default("NCHW")
.describe("Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
"'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
"dimensions respectively. Convolution is applied on the 'H' and"
"'W' dimensions.");
TVM_ATTR_FIELD(kernel_layout).set_default("IHW")
.describe("Dimension ordering of weight. Can be 'IHW', 'HWI', etc."
"'I', 'H', 'W' stands for input_channel, height, and width"
"dimensions respectively.");
TVM_ATTR_FIELD(out_dtype)
.set_default(NullValue<DataType>())
.describe("Output data type, set to explicit type under mixed precision setting");
}
};
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_ATTRS_IMAGE_H_
......@@ -155,43 +155,6 @@ struct Conv2DAttrs : public tvm::AttrsNode<Conv2DAttrs> {
}
};
/*! \brief Attributes used in dilation operators */
struct Dilation2DAttrs : public tvm::AttrsNode<Dilation2DAttrs> {
Array<IndexExpr> strides;
Array<IndexExpr> padding;
Array<IndexExpr> dilations;
std::string data_layout;
std::string kernel_layout;
DataType out_dtype;
TVM_DECLARE_ATTRS(Dilation2DAttrs, "relay.attrs.Dilation2DAttrs") {
TVM_ATTR_FIELD(strides).set_default(Array<IndexExpr>({1, 1}))
.describe("Specifies the strides of the sliding window. [stride_height, stride_width].");
TVM_ATTR_FIELD(padding).set_default(Array<IndexExpr>({0, 0}))
.describe("If padding is non-zero, then the input is implicitly zero-padded"
"Padding support both symmetric and asymmetric as"
"one int : same padding used on all sides"
"two int : bottom, right will use same padding as top, left"
"four int : padding width in the order of (top, left, bottom, right)");
TVM_ATTR_FIELD(dilations).set_default(Array<IndexExpr>({1, 1}))
.describe("Specifies the dilation rate to use. [dilation_height, dilation_width]");
TVM_ATTR_FIELD(data_layout).set_default("NCHW")
.describe("Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
"'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
"dimensions respectively. Convolution is applied on the 'H' and"
"'W' dimensions.");
TVM_ATTR_FIELD(kernel_layout).set_default("IHW")
.describe("Dimension ordering of weight. Can be 'IHW', 'HWI', etc."
"'I', 'H', 'W' stands for input_channel, height, and width"
"dimensions respectively.");
TVM_ATTR_FIELD(out_dtype)
.set_default(NullValue<DataType>())
.describe("Output data type, set to explicit type under mixed precision setting");
}
};
/*! \brief Attributes used in winograd weight transformation operators */
struct Conv2DWinogradWeightTransformAttrs :
public tvm::AttrsNode<Conv2DWinogradWeightTransformAttrs> {
......
......@@ -20,6 +20,9 @@ from __future__ import absolute_import
import topi
from .. import op as reg
from .. import strategy
from ..op import OpPattern
# resize
@reg.register_compute("image.resize")
......@@ -47,3 +50,8 @@ def compute_crop_and_resize(attrs, inputs, out_type):
extrapolation_value, out_dtype)]
reg.register_injective_schedule("image.crop_and_resize")
# dilation2d
reg.register_strategy("image.dilation2d", strategy.dilation2d_strategy)
reg.register_pattern("image.dilation2d", OpPattern.OUT_ELEMWISE_FUSABLE)
......@@ -113,3 +113,60 @@ def crop_and_resize(data,
"""
return _make.crop_and_resize(data, boxes, box_indices, crop_size,
layout, method, extrapolation_value, out_dtype)
def dilation2d(data,
weight,
strides=(1, 1),
padding=(0, 0),
dilations=(1, 1),
data_layout="NCHW",
kernel_layout="IHW",
out_dtype=""):
r"""Morphological Dilation 2D.
This operator takes the weight as the dilation kernel and dilates it with
data to produce an output. In the default case, where the data_layout is `NCHW`
and kernel_layout is `OIHW`, dilation2d takes in a data Tensor with shape
`(batch_size, in_channels, height, width)`, and a weight Tensor with shape
`(channels, kernel_height, kernel_width)` to produce an output Tensor
with the following rule:
.. math::
\mbox{out}[b, c, y, x] = \max_{dy, dx}
\mbox{data}[b, c, \mbox{strides}[0] * y + dy, \mbox{strides}[1] * x + dx] +
\mbox{weight}[c, dy, dx]
Padding and dilation are applied to data and weight respectively before the computation.
This operator accepts data layout specification. Semantically, the operator
will convert the layout to the canonical layout
(`NCHW` for data and `IHW` for weight) and perform the computation.
weight : tvm.relay.Expr
The weight expressions.
strides : Optional[Tuple[int]]
The strides of convolution.
padding : Optional[Tuple[int]]
The padding of convolution on both sides of inputs before convolution.
dilations : Optional[Tuple[int]]
Specifies the dilation rate to be used for dilated convolution.
data_layout : Optional[str]
Layout of the input.
kernel_layout : Optional[str]
Layout of the weight.
out_dtype : Optional[str]
Specifies the output data type.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.dilation2d(data, weight, strides, padding, dilations, data_layout,
kernel_layout, out_dtype)
......@@ -178,9 +178,6 @@ def legalize_conv2d_transpose(attrs, inputs, types):
reg.register_strategy("nn.conv3d", strategy.conv3d_strategy)
reg.register_pattern("nn.conv3d", OpPattern.OUT_ELEMWISE_FUSABLE)
# dilation2d
reg.register_strategy("nn.dilation2d", strategy.dilation2d_strategy)
reg.register_pattern("nn.dilation2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# conv1d_transpose
reg.register_strategy("nn.conv1d_transpose", strategy.conv1d_transpose_strategy)
......
......@@ -2463,60 +2463,3 @@ def adaptive_avg_pool3d(data,
"""
output_size = [] or output_size
return _make.adaptive_avg_pool3d(data, output_size, layout)
def dilation2d(data,
weight,
strides=(1, 1),
padding=(0, 0),
dilations=(1, 1),
data_layout="NCHW",
kernel_layout="IHW",
out_dtype=""):
r"""Dilation 2D.
This operator takes the weight as the dilation kernel and dilates it with
data to produce an output. In the default case, where the data_layout is `NCHW`
and kernel_layout is `OIHW`, dilation2d takes in a data Tensor with shape
`(batch_size, in_channels, height, width)`, and a weight Tensor with shape
`(channels, kernel_height, kernel_width)` to produce an output Tensor
with the following rule:
.. math::
\mbox{out}[b, c, y, x] = \max_{dy, dx}
\mbox{data}[b, c, \mbox{strides}[0] * y + dy, \mbox{strides}[1] * x + dx] +
\mbox{weight}[c, dy, dx]
Padding and dilation are applied to data and weight respectively before the computation.
This operator accepts data layout specification. Semantically, the operator
will convert the layout to the canonical layout
(`NCHW` for data and `IHW` for weight) and perform the computation.
weight : tvm.relay.Expr
The weight expressions.
strides : Optional[Tuple[int]]
The strides of convolution.
padding : Optional[Tuple[int]]
The padding of convolution on both sides of inputs before convolution.
dilations : Optional[Tuple[int]]
Specifies the dilation rate to be used for dilated convolution.
data_layout : Optional[str]
Layout of the input.
kernel_layout : Optional[str]
Layout of the weight.
out_dtype : Optional[str]
Specifies the output data type.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.dilation2d(data, weight, strides, padding, dilations, data_layout,
kernel_layout, out_dtype)
......@@ -44,11 +44,6 @@ class Conv2DWinogradNNPACKWeightTransformAttrs(Attrs):
"""Attributes for nn.contrib_conv2d_winograd_nnpack_weight_transform"""
@tvm._ffi.register_object("relay.attrs.Dilation2DAttrs")
class Dilation2DAttrs(Attrs):
"""Attributes for nn.dilation2d"""
@tvm._ffi.register_object("relay.attrs.GlobalPool2DAttrs")
class GlobalPool2DAttrs(Attrs):
"""Attributes for nn.global_pool"""
......@@ -124,10 +119,17 @@ class DeformableConv2DAttrs(Attrs):
class ResizeAttrs(Attrs):
"""Attributes for image.resize"""
@tvm._ffi.register_object("relay.attrs.CropAndResizeAttrs")
class CropAndResizeAttrs(Attrs):
"""Attributes for image.crop_and_resize"""
@tvm._ffi.register_object("relay.attrs.Dilation2DAttrs")
class Dilation2DAttrs(Attrs):
"""Attributes for image.dilation2d"""
@tvm._ffi.register_object("relay.attrs.ArgsortAttrs")
class ArgsortAttrs(Attrs):
"""Attributes for algorithm.argsort"""
......
......@@ -479,13 +479,13 @@ def dilation2d_strategy(attrs, inputs, out_type, target):
if layout == "NCHW":
assert kernel_layout == "IHW"
strategy.add_implementation(
wrap_compute_dilation2d(topi.nn.dilation2d_nchw),
wrap_compute_dilation2d(topi.image.dilation2d_nchw),
wrap_topi_schedule(topi.generic.schedule_dilation2d_nchw),
name="dilation2d_nchw.generic")
elif layout == "NHWC":
assert kernel_layout == "HWI"
strategy.add_implementation(
wrap_compute_dilation2d(topi.nn.dilation2d_nhwc),
wrap_compute_dilation2d(topi.image.dilation2d_nhwc),
wrap_topi_schedule(topi.generic.schedule_dilation2d_nhwc),
name="dilation2d_nhwc.generic")
else:
......
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file dilation2d.cc
* \brief Morphological dilation operator
*/
#include <tvm/tir/data_layout.h>
#include <tvm/relay/op.h>
#include <tvm/relay/attrs/image.h>
#include "../op_common.h"
namespace tvm {
namespace relay {
// relay.image.dilation2d
TVM_REGISTER_NODE_TYPE(Dilation2DAttrs);
template<typename T>
Array<Array<Layout> > Dilation2DInferCorrectLayout(
const Attrs& attrs,
const Array<Layout>& new_in_layouts,
const Array<Layout>& old_in_layouts,
const Array<tvm::relay::Type> &old_in_types) {
const T* params = attrs.as<T>();
return Array<Array<Layout> >{{params->data_layout, params->kernel_layout},
{params->data_layout}};
}
// Positional relay function to create dilation2d operator
// used by frontend FFI.
Expr MakeDilation2D(Expr data,
Expr weight,
Array<IndexExpr> strides,
Array<IndexExpr> padding,
Array<IndexExpr> dilations,
std::string data_layout,
std::string kernel_layout,
DataType out_dtype) {
auto attrs = make_object<Dilation2DAttrs>();
attrs->strides = std::move(strides);
attrs->padding = std::move(padding);
attrs->dilations = std::move(dilations);
attrs->data_layout = std::move(data_layout);
attrs->kernel_layout = std::move(kernel_layout);
attrs->out_dtype = std::move(out_dtype);
static const Op& op = Op::Get("image.dilation2d");
return CallNode::make(op, {data, weight}, Attrs(attrs), {});
}
template <typename AttrType>
bool Dilation2DRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
const TypeReporter& reporter) {
CHECK_EQ(types.size(), 3);
const auto* data = types[0].as<TensorTypeNode>();
const auto* weight = types[1].as<TensorTypeNode>();
if (data == nullptr) return false;
static const Layout kNCHW("NCHW");
static const Layout kOIHW("IHW");
const AttrType* param = attrs.as<AttrType>();
CHECK(param != nullptr);
const Layout in_layout(param->data_layout);
const Layout kernel_layout(param->kernel_layout);
const auto trans_in_layout = BijectiveLayoutNode::make(in_layout, kNCHW);
CHECK(trans_in_layout.defined())
<< "Dilation2D only support input layouts that are convertible from NCHW."
<< " But got " << in_layout;
const auto trans_kernel_layout = BijectiveLayoutNode::make(kernel_layout, kOIHW);
CHECK(trans_kernel_layout.defined())
<< "Dilation2D only support kernel layouts that are convertible from OIHW."
<< " But got " << kernel_layout;
Layout out_layout(param->data_layout);
const auto trans_out_layout = BijectiveLayoutNode::make(out_layout, kNCHW);
CHECK(trans_out_layout.defined())
<< "Dilation2D only support output layouts that are convertible from NCHW."
<< " But got " << out_layout;
Array<IndexExpr> dshape_nchw = trans_in_layout.ForwardShape(data->shape);
IndexExpr channels, dilated_ksize_y, dilated_ksize_x;
// use weight to infer the conv shape.
if (weight == nullptr) return false;
auto wshape = trans_kernel_layout.ForwardShape(weight->shape);
channels = wshape[0];
dilated_ksize_y = 1 + (wshape[1] - 1) * param->dilations[0];
dilated_ksize_x = 1 + (wshape[2] - 1) * param->dilations[1];
// dilation
Array<IndexExpr> oshape({dshape_nchw[0], channels, 0, 0});
IndexExpr pad_h, pad_w;
GetPaddingHeightWidth(param->padding, &pad_h, &pad_w);
if (!dshape_nchw[2].as<tir::AnyNode>()) {
oshape.Set(2, indexdiv(dshape_nchw[2] + pad_h - dilated_ksize_y,
param->strides[0]) + 1);
} else {
oshape.Set(2, dshape_nchw[2]);
}
if (!dshape_nchw[3].as<tir::AnyNode>()) {
oshape.Set(3, indexdiv(dshape_nchw[3] + pad_w - dilated_ksize_x,
param->strides[1]) + 1);
} else {
oshape.Set(3, dshape_nchw[3]);
}
DataType out_dtype = param->out_dtype;
if (out_dtype.bits() == 0) {
out_dtype = data->dtype;
}
oshape = trans_out_layout.BackwardShape(oshape);
// assign output type
reporter->Assign(types[2], TensorType(oshape, out_dtype));
return true;
}
TVM_REGISTER_GLOBAL("relay.op.image._make.dilation2d")
.set_body_typed(MakeDilation2D);
RELAY_REGISTER_OP("image.dilation2d")
.describe(R"code(Computes grayscale dilation of 4D input and 3D filter.
- **data**: This depends on the `layout` parameter. Input is 4D array of shape
(batch_size, in_channels, height, width) if `layout` is `NCHW`.
- **weight**: (in_channels, height, width)
- **out**: This depends on the `layout` parameter. Output is 4D array of shape
(batch_size, channels, out_height, out_width) if `layout` is `NCHW`.
)code" TVM_ADD_FILELINE)
.set_attrs_type<Dilation2DAttrs>()
.set_num_inputs(2)
.add_argument("data", "Tensor", "The input tensor.")
.add_argument("weight", "Tensor", "The weight tensor.")
.set_support_level(2)
.add_type_rel("Dilation2D", Dilation2DRel<Dilation2DAttrs>)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout",
Dilation2DInferCorrectLayout<Dilation2DAttrs>);
} // namespace relay
} // namespace tvm
......@@ -19,7 +19,7 @@
/*!
* \file resize.cc
* \brief Image operators
* \brief Image resize operators
*/
#include <tvm/tir/data_layout.h>
#include <tvm/relay/op.h>
......
......@@ -1023,66 +1023,5 @@ Expr MakeDeformableConv2D(Expr data,
TVM_REGISTER_GLOBAL("relay.op.nn._make.deformable_conv2d")
.set_body_typed(MakeDeformableConv2D);
// relay.nn.dilation2d
TVM_REGISTER_NODE_TYPE(Dilation2DAttrs);
template<typename T>
Array<Array<Layout> > Dilation2DInferCorrectLayout(
const Attrs& attrs,
const Array<Layout>& new_in_layouts,
const Array<Layout>& old_in_layouts,
const Array<tvm::relay::Type> &old_in_types) {
const T* params = attrs.as<T>();
// We always make other operators to fit the layouts of convolution layers
// So this inference ignores all inputs
return Array<Array<Layout> >{{params->data_layout, params->kernel_layout},
{params->data_layout}};
}
// Positional relay function to create dilation2d operator
// used by frontend FFI.
Expr MakeDilation2D(Expr data,
Expr weight,
Array<IndexExpr> strides,
Array<IndexExpr> padding,
Array<IndexExpr> dilations,
std::string data_layout,
std::string kernel_layout,
DataType out_dtype) {
auto attrs = make_object<Dilation2DAttrs>();
attrs->strides = std::move(strides);
attrs->padding = std::move(padding);
attrs->dilations = std::move(dilations);
attrs->data_layout = std::move(data_layout);
attrs->kernel_layout = std::move(kernel_layout);
attrs->out_dtype = std::move(out_dtype);
static const Op& op = Op::Get("nn.dilation2d");
return CallNode::make(op, {data, weight}, Attrs(attrs), {});
}
TVM_REGISTER_GLOBAL("relay.op.nn._make.dilation2d")
.set_body_typed(MakeDilation2D);
RELAY_REGISTER_OP("nn.dilation2d")
.describe(R"code(Computes grayscale dilation of 4D input and 3D filter.
- **data**: This depends on the `layout` parameter. Input is 4D array of shape
(batch_size, in_channels, height, width) if `layout` is `NCHW`.
- **weight**: (in_channels, height, width)
- **out**: This depends on the `layout` parameter. Output is 4D array of shape
(batch_size, channels, out_height, out_width) if `layout` is `NCHW`.
)code" TVM_ADD_FILELINE)
.set_attrs_type<Dilation2DAttrs>()
.set_num_inputs(2)
.add_argument("data", "Tensor", "The input tensor.")
.add_argument("weight", "Tensor", "The weight tensor.")
.set_support_level(2)
.add_type_rel("Dilation2D", Dilation2DRel<Dilation2DAttrs>)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout",
Dilation2DInferCorrectLayout<Dilation2DAttrs>);
} // namespace relay
} // namespace tvm
......@@ -360,77 +360,6 @@ bool Conv3DRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
return true;
}
template <typename AttrType>
bool Dilation2DRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
const TypeReporter& reporter) {
CHECK_EQ(types.size(), 3);
const auto* data = types[0].as<TensorTypeNode>();
const auto* weight = types[1].as<TensorTypeNode>();
if (data == nullptr) return false;
static const Layout kNCHW("NCHW");
static const Layout kOIHW("IHW");
const AttrType* param = attrs.as<AttrType>();
CHECK(param != nullptr);
const Layout in_layout(param->data_layout);
const Layout kernel_layout(param->kernel_layout);
const auto trans_in_layout = BijectiveLayoutNode::make(in_layout, kNCHW);
CHECK(trans_in_layout.defined())
<< "Dilation2D only support input layouts that are convertible from NCHW."
<< " But got " << in_layout;
const auto trans_kernel_layout = BijectiveLayoutNode::make(kernel_layout, kOIHW);
CHECK(trans_kernel_layout.defined())
<< "Dilation2D only support kernel layouts that are convertible from OIHW."
<< " But got " << kernel_layout;
Layout out_layout(param->data_layout);
const auto trans_out_layout = BijectiveLayoutNode::make(out_layout, kNCHW);
CHECK(trans_out_layout.defined())
<< "Dilation2D only support output layouts that are convertible from NCHW."
<< " But got " << out_layout;
Array<IndexExpr> dshape_nchw = trans_in_layout.ForwardShape(data->shape);
IndexExpr channels, dilated_ksize_y, dilated_ksize_x;
// use weight to infer the conv shape.
if (weight == nullptr) return false;
auto wshape = trans_kernel_layout.ForwardShape(weight->shape);
channels = wshape[0];
dilated_ksize_y = 1 + (wshape[1] - 1) * param->dilations[0];
dilated_ksize_x = 1 + (wshape[2] - 1) * param->dilations[1];
// dilation
Array<IndexExpr> oshape({dshape_nchw[0], channels, 0, 0});
IndexExpr pad_h, pad_w;
GetPaddingHeightWidth(param->padding, &pad_h, &pad_w);
if (!dshape_nchw[2].as<tir::AnyNode>()) {
oshape.Set(2, indexdiv(dshape_nchw[2] + pad_h - dilated_ksize_y,
param->strides[0]) + 1);
} else {
oshape.Set(2, dshape_nchw[2]);
}
if (!dshape_nchw[3].as<tir::AnyNode>()) {
oshape.Set(3, indexdiv(dshape_nchw[3] + pad_w - dilated_ksize_x,
param->strides[1]) + 1);
} else {
oshape.Set(3, dshape_nchw[3]);
}
DataType out_dtype = param->out_dtype;
if (out_dtype.bits() == 0) {
out_dtype = data->dtype;
}
oshape = trans_out_layout.BackwardShape(oshape);
// assign output type
reporter->Assign(types[2], TensorType(oshape, out_dtype));
return true;
}
template<typename T>
Array<Array<Layout> > ConvInferCorrectLayout(
const Attrs& attrs,
......
......@@ -1219,113 +1219,6 @@ def test_depthwise_conv2d_int8():
graph, lib, params = relay.build(func, target, params=parameters)
def test_dilation2d_infer_type():
# symbolic in batch dimension
n, h, w, c = te.var("n"), 224, 224, 10
x = relay.var("x", relay.ty.TensorType((n, c, h, w), "float32"))
kc, kh, kw = 10, 8, 8
w = relay.var("w", relay.ty.TensorType((kc, kw, kh), "float32"))
y = relay.nn.dilation2d(x, w,
# kernel_size=(3, 3),
strides=[1, 1, 1, 1],
dilations=[1, 1, 1, 1],
padding=[0, 0, 0, 0])
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 10, 217, 217), "float32")
def test_dilation2d_run():
def run_test_dilation2d(indata, kernel, out,
dtype='float32',
strides=[1, 1],
padding=[0, 0],
dilations=[1, 1],
except_targets=['cuda'],
**attrs):
dshape = indata.shape
kshape = kernel.shape
if except_targets is None:
except_targets = []
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", shape=kshape, dtype=dtype)
y = relay.nn.dilation2d(x, w,
strides=strides,
dilations=dilations,
padding=padding,
**attrs)
func = relay.Function([x, w], y)
for target, ctx in ctx_list():
if target in except_targets:
continue
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(indata, kernel)
tvm.testing.assert_allclose(op_res.asnumpy(), out, rtol=1e-5, atol=1e-5)
def _convert_data(indata, kernel, out, layout=None):
indata = np.asarray(indata)
kernel = np.asarray(kernel)
out = np.asarray(out)
if layout == 'NCHW':
indata = indata.transpose([0, 3, 1, 2])
kernel = kernel.transpose([2, 0, 1])
out = out.transpose([0, 3, 1, 2])
return indata, kernel, out
image = [[[[.1], [.2]], [[.3], [.4]]]]
kernel = [[[.4], [.3]], [[.1], [.0]]]
out = [[[[.5]]]]
run_test_dilation2d(*_convert_data(image, kernel, out, layout='NCHW'))
run_test_dilation2d(*_convert_data(image, kernel, out), data_layout='NHWC', kernel_layout='HWI')
image = [[[[.1], [.2]], [[.3], [.4]]]]
kernel = [[[.4], [.3]], [[.1], [.0]]]
out = [[[[.5], [.6]], [[.7], [.8]]]]
run_test_dilation2d(*_convert_data(image, kernel, out, layout='NCHW'), padding=[0, 0, 1, 1])
run_test_dilation2d(*_convert_data(image, kernel, out), padding=[0, 0, 1, 1],
data_layout='NHWC', kernel_layout='HWI')
image = [[[[.1, .2, .0], [.2, .3, .1]], [[.3, .4, .2], [.4, .5, .3]]]]
kernel = [[[.4, .5, .3], [.3, .4, .2]], [[.1, .2, .0], [.0, .1, -.1]]]
out = [[[[.5, .7, .3], [.6, .8, .4]], [[.7, .9, .5], [.8, 1., .6]]]]
run_test_dilation2d(*_convert_data(image, kernel, out, layout='NCHW'), padding=[0, 0, 1, 1])
run_test_dilation2d(*_convert_data(image, kernel, out), padding=[0, 0, 1, 1],
data_layout='NHWC', kernel_layout='HWI')
image = [[[[.1], [.2]], [[.3], [.4]]], [[[.2], [.3]], [[.4], [.5]]]]
kernel = [[[.4], [.3]], [[.1], [.0]]]
out = [[[[.5], [.6]], [[.7], [.8]]], [[[.6], [.7]], [[.8], [.9]]]]
run_test_dilation2d(*_convert_data(image, kernel, out, layout='NCHW'), padding=[0, 0, 1, 1])
run_test_dilation2d(*_convert_data(image, kernel, out), padding=[0, 0, 1, 1],
data_layout='NHWC', kernel_layout='HWI')
image = [[[[.1], [.2]], [[.3], [.4]]]]
kernel = [[[.4], [.3]]]
out = [[[[.5]], [[.7]]]]
run_test_dilation2d(*_convert_data(image, kernel, out, layout='NCHW'))
run_test_dilation2d(*_convert_data(image, kernel, out),
data_layout='NHWC', kernel_layout='HWI')
image = [[[[.1], [.2], [.3]], [[.4], [.5], [.6]], [[.7], [.8], [.9]]]]
kernel = [[[.4], [.3]], [[.1], [.2]]]
out = [[[[.7], [.8], [.6]], [[1.0], [1.1], [.9]], [[.8], [.9], [.9]]]]
run_test_dilation2d(*_convert_data(image, kernel, out, layout='NCHW'), padding=[1, 1], dilations=[2, 2])
run_test_dilation2d(*_convert_data(image, kernel, out), padding=[1, 1], dilations=[2, 2],
data_layout='NHWC', kernel_layout='HWI')
image = [[[[.1], [.2], [.3], [.4]], [[.5], [.6], [.7], [.8]],
[[.9], [1.0], [1.1], [1.2]]]]
kernel = [[[.4], [.3]], [[.1], [.2]]]
out = [[[[.8], [1.0]], [[1.2], [1.4]]]]
run_test_dilation2d(*_convert_data(image, kernel, out, layout='NCHW'), strides=[1, 2])
run_test_dilation2d(*_convert_data(image, kernel, out), strides=[1, 2],
data_layout='NHWC', kernel_layout='HWI')
def test_bitserial_conv2d_infer_type():
# Basic shape test with ambiguous batch.
n, c, h, w = te.size_var("n"), 32, 224, 224
......@@ -1381,5 +1274,3 @@ if __name__ == "__main__":
test_upsampling3d()
test_conv2d_int8_intrinsics()
test_depthwise_conv2d_int8()
test_dilation2d_infer_type()
test_dilation2d_run()
......@@ -671,6 +671,113 @@ def test_space_to_depth():
verify_space_to_depth((1, 4, 4, 4), 2, layout)
def test_dilation2d_infer_type():
# symbolic in batch dimension
n, h, w, c = te.var("n"), 224, 224, 10
x = relay.var("x", relay.ty.TensorType((n, c, h, w), "float32"))
kc, kh, kw = 10, 8, 8
w = relay.var("w", relay.ty.TensorType((kc, kw, kh), "float32"))
y = relay.image.dilation2d(x, w,
# kernel_size=(3, 3),
strides=[1, 1, 1, 1],
dilations=[1, 1, 1, 1],
padding=[0, 0, 0, 0])
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 10, 217, 217), "float32")
def test_dilation2d_run():
def run_test_dilation2d(indata, kernel, out,
dtype='float32',
strides=[1, 1],
padding=[0, 0],
dilations=[1, 1],
except_targets=['cuda'],
**attrs):
dshape = indata.shape
kshape = kernel.shape
if except_targets is None:
except_targets = []
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", shape=kshape, dtype=dtype)
y = relay.image.dilation2d(x, w,
strides=strides,
dilations=dilations,
padding=padding,
**attrs)
func = relay.Function([x, w], y)
for target, ctx in ctx_list():
if target in except_targets:
continue
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(indata, kernel)
tvm.testing.assert_allclose(op_res.asnumpy(), out, rtol=1e-5, atol=1e-5)
def _convert_data(indata, kernel, out, layout=None):
indata = np.asarray(indata)
kernel = np.asarray(kernel)
out = np.asarray(out)
if layout == 'NCHW':
indata = indata.transpose([0, 3, 1, 2])
kernel = kernel.transpose([2, 0, 1])
out = out.transpose([0, 3, 1, 2])
return indata, kernel, out
image = [[[[.1], [.2]], [[.3], [.4]]]]
kernel = [[[.4], [.3]], [[.1], [.0]]]
out = [[[[.5]]]]
run_test_dilation2d(*_convert_data(image, kernel, out, layout='NCHW'))
run_test_dilation2d(*_convert_data(image, kernel, out), data_layout='NHWC', kernel_layout='HWI')
image = [[[[.1], [.2]], [[.3], [.4]]]]
kernel = [[[.4], [.3]], [[.1], [.0]]]
out = [[[[.5], [.6]], [[.7], [.8]]]]
run_test_dilation2d(*_convert_data(image, kernel, out, layout='NCHW'), padding=[0, 0, 1, 1])
run_test_dilation2d(*_convert_data(image, kernel, out), padding=[0, 0, 1, 1],
data_layout='NHWC', kernel_layout='HWI')
image = [[[[.1, .2, .0], [.2, .3, .1]], [[.3, .4, .2], [.4, .5, .3]]]]
kernel = [[[.4, .5, .3], [.3, .4, .2]], [[.1, .2, .0], [.0, .1, -.1]]]
out = [[[[.5, .7, .3], [.6, .8, .4]], [[.7, .9, .5], [.8, 1., .6]]]]
run_test_dilation2d(*_convert_data(image, kernel, out, layout='NCHW'), padding=[0, 0, 1, 1])
run_test_dilation2d(*_convert_data(image, kernel, out), padding=[0, 0, 1, 1],
data_layout='NHWC', kernel_layout='HWI')
image = [[[[.1], [.2]], [[.3], [.4]]], [[[.2], [.3]], [[.4], [.5]]]]
kernel = [[[.4], [.3]], [[.1], [.0]]]
out = [[[[.5], [.6]], [[.7], [.8]]], [[[.6], [.7]], [[.8], [.9]]]]
run_test_dilation2d(*_convert_data(image, kernel, out, layout='NCHW'), padding=[0, 0, 1, 1])
run_test_dilation2d(*_convert_data(image, kernel, out), padding=[0, 0, 1, 1],
data_layout='NHWC', kernel_layout='HWI')
image = [[[[.1], [.2]], [[.3], [.4]]]]
kernel = [[[.4], [.3]]]
out = [[[[.5]], [[.7]]]]
run_test_dilation2d(*_convert_data(image, kernel, out, layout='NCHW'))
run_test_dilation2d(*_convert_data(image, kernel, out),
data_layout='NHWC', kernel_layout='HWI')
image = [[[[.1], [.2], [.3]], [[.4], [.5], [.6]], [[.7], [.8], [.9]]]]
kernel = [[[.4], [.3]], [[.1], [.2]]]
out = [[[[.7], [.8], [.6]], [[1.0], [1.1], [.9]], [[.8], [.9], [.9]]]]
run_test_dilation2d(*_convert_data(image, kernel, out, layout='NCHW'), padding=[1, 1], dilations=[2, 2])
run_test_dilation2d(*_convert_data(image, kernel, out), padding=[1, 1], dilations=[2, 2],
data_layout='NHWC', kernel_layout='HWI')
image = [[[[.1], [.2], [.3], [.4]], [[.5], [.6], [.7], [.8]],
[[.9], [1.0], [1.1], [1.2]]]]
kernel = [[[.4], [.3]], [[.1], [.2]]]
out = [[[[.8], [1.0]], [[1.2], [1.4]]]]
run_test_dilation2d(*_convert_data(image, kernel, out, layout='NCHW'), strides=[1, 2])
run_test_dilation2d(*_convert_data(image, kernel, out), strides=[1, 2],
data_layout='NHWC', kernel_layout='HWI')
if __name__ == "__main__":
test_resize_infer_type()
test_resize()
......@@ -687,3 +794,5 @@ if __name__ == "__main__":
test_deformable_conv2d()
test_depth_to_space()
test_space_to_depth()
test_dilation2d_infer_type()
test_dilation2d_run()
......@@ -38,3 +38,4 @@ from .extern import *
from .vision import *
from .sort import *
from .search import *
from .image import *
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""The default schedule used by various operators"""
import tvm
from tvm import te
def default_schedule(outs, auto_inline):
"""Default schedule for llvm."""
target = tvm.target.Target.current(allow_none=False)
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
if target.target_name != "llvm":
raise RuntimeError("schedule not registered for '%s'" % target)
s = te.create_schedule([x.op for x in outs])
if auto_inline:
x = outs[0]
te.schedule.AutoInlineInjective(s)
s[x].fuse(s[x].op.axis)
return s
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Generic image operators"""
from .default import default_schedule as _default_schedule
def schedule_dilation2d_nchw(outs):
"""Schedule for dilation2d
Parameters
----------
outs : Array of Tensor
The computation graph description of dilation2d
in the format of an array of tensors.
Returns
-------
sch : Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_dilation2d_nhwc(outs):
"""Schedule for dilation2d
Parameters
----------
outs : Array of Tensor
The computation graph description of dilation2d
in the format of an array of tensors.
Returns
-------
sch : Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
......@@ -16,21 +16,8 @@
# under the License.
# pylint: disable=invalid-name,unused-argument
"""Generic nn operators"""
import tvm
from tvm import te
def _default_schedule(outs, auto_inline):
"""Default schedule for llvm."""
target = tvm.target.Target.current(allow_none=False)
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
if target.target_name not in ("llvm", "c"):
raise RuntimeError("schedule not registered for '%s'" % target)
s = te.create_schedule([x.op for x in outs])
if auto_inline:
x = outs[0]
te.schedule.AutoInlineInjective(s)
s[x].fuse(s[x].op.axis)
return s
from .default import default_schedule as _default_schedule
def schedule_conv1d_ncw(outs):
......@@ -648,33 +635,3 @@ def schedule_batch_matmul(outs):
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_dilation2d_nchw(outs):
"""Schedule for dilation2d
Parameters
----------
outs : Array of Tensor
The computation graph description of dilation2d
in the format of an array of tensors.
Returns
-------
sch : Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_dilation2d_nhwc(outs):
"""Schedule for dilation2d
Parameters
----------
outs : Array of Tensor
The computation graph description of dilation2d
in the format of an array of tensors.
Returns
-------
sch : Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
......@@ -17,7 +17,8 @@
# pylint: disable=invalid-name, no-member
"""Generic search operators"""
from __future__ import absolute_import as _abs
from .vision import _default_schedule
from .default import default_schedule as _default_schedule
def schedule_argwhere(outs):
"""Schedule for argwhere operator.
......
......@@ -15,9 +15,10 @@
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-member
"""Generic vision operators"""
"""Generic sort operators"""
from __future__ import absolute_import as _abs
from .vision import _default_schedule
from .default import default_schedule as _default_schedule
def schedule_argsort(outs):
"""Schedule for argsort operator.
......
......@@ -18,21 +18,9 @@
"""Generic vision operators"""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from .. import cpp
from .default import default_schedule as _default_schedule
def _default_schedule(outs, auto_inline):
"""Default schedule for llvm."""
target = tvm.target.Target.current(allow_none=False)
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
if target.target_name != "llvm":
raise RuntimeError("schedule not registered for '%s'" % target)
s = te.create_schedule([x.op for x in outs])
if auto_inline:
x = outs[0]
te.schedule.AutoInlineInjective(s)
s[x].fuse(s[x].op.axis)
return s
def schedule_reorg(outs):
"""Schedule for reorg
......
......@@ -20,3 +20,4 @@
from __future__ import absolute_import as _abs
from .resize import *
from .dilation2d import *
......@@ -20,12 +20,12 @@
from __future__ import absolute_import as _abs
from tvm import te
from topi.util import simplify
from .pad import pad
from .util import get_pad_tuple
from ..nn.pad import pad
from ..nn.util import get_pad_tuple
def dilation2d_nchw(input, filter, stride, padding, dilations, out_dtype=None):
"""Dilation2D operator in NCHW layout.
"""Morphological dilation operator in NCHW layout.
Parameters
----------
......@@ -96,7 +96,7 @@ def dilation2d_nchw(input, filter, stride, padding, dilations, out_dtype=None):
def dilation2d_nhwc(input, filter, stride, padding, dilations, out_dtype=None):
"""Dilation2D operator in NHWC layout.
"""Morphological 2d dilation NHWC layout.
Parameters
----------
......
......@@ -24,7 +24,6 @@ from .conv2d import *
from .conv3d import *
from .deformable_conv2d import *
from .depthwise_conv2d import *
from .dilation2d import *
from .elemwise import *
from .dilate import *
from .flatten import *
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment