Unverified Commit 81ff0613 by Cody Yu Committed by GitHub

Move Ops in relay.op.contrib.* (#4942)

* move contrib

* lint

* address comment

* address comment
parent 6b1136dd
...@@ -313,7 +313,7 @@ def _mx_pooling(inputs, attrs): ...@@ -313,7 +313,7 @@ def _mx_pooling(inputs, attrs):
def _mx_adaptive_avg_pooling(inputs, attrs): def _mx_adaptive_avg_pooling(inputs, attrs):
output_size = attrs.get_int_tuple("output_size", []) output_size = attrs.get_int_tuple("output_size", [])
return _op.contrib.adaptive_avg_pool2d(inputs[0], output_size) return _op.nn.adaptive_avg_pool2d(inputs[0], output_size)
def _mx_dropout(inputs, attrs): def _mx_dropout(inputs, attrs):
......
...@@ -151,7 +151,7 @@ def _adaptive_avg_2d(): ...@@ -151,7 +151,7 @@ def _adaptive_avg_2d():
data = inputs[0] data = inputs[0]
output_size = _infer_shape(inputs[1]) output_size = _infer_shape(inputs[1])
return _op.contrib.contrib.adaptive_avg_pool2d( return _op.nn.adaptive_avg_pool2d(
data, data,
output_size=output_size) output_size=output_size)
return _impl return _impl
...@@ -161,7 +161,7 @@ def _adaptive_max_2d(): ...@@ -161,7 +161,7 @@ def _adaptive_max_2d():
data = inputs[0] data = inputs[0]
output_size = _infer_shape(inputs[1]) output_size = _infer_shape(inputs[1])
return _op.contrib.contrib.adaptive_max_pool2d( return _op.nn.adaptive_max_pool2d(
data, data,
output_size=output_size) output_size=output_size)
return _impl return _impl
......
...@@ -32,7 +32,6 @@ from . import annotation ...@@ -32,7 +32,6 @@ from . import annotation
from . import memory from . import memory
from . import image from . import image
from . import vision from . import vision
from . import contrib
from . import op_attrs from . import op_attrs
......
...@@ -70,6 +70,8 @@ register_injective_schedule("minimum") ...@@ -70,6 +70,8 @@ register_injective_schedule("minimum")
register_injective_schedule("right_shift") register_injective_schedule("right_shift")
register_injective_schedule("left_shift") register_injective_schedule("left_shift")
register_injective_schedule("shape_of") register_injective_schedule("shape_of")
register_injective_schedule("ndarray_size")
# zeros # zeros
@register_compute("zeros") @register_compute("zeros")
......
...@@ -17,5 +17,3 @@ ...@@ -17,5 +17,3 @@
# pylint: disable=wildcard-import # pylint: disable=wildcard-import
"""Neural network related operators.""" """Neural network related operators."""
from __future__ import absolute_import as _abs from __future__ import absolute_import as _abs
from .contrib import *
from . import _contrib
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Backend compiler related feature registration"""
from __future__ import absolute_import
from .. import op as reg
from .. import strategy
from ..op import OpPattern
# adaptive_max_pool2d
reg.register_schedule("contrib.adaptive_max_pool2d", strategy.schedule_adaptive_pool)
reg.register_pattern("contrib.adaptive_max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# adaptive_avg_pool2d
reg.register_schedule("contrib.adaptive_avg_pool2d", strategy.schedule_adaptive_pool)
reg.register_pattern("contrib.adaptive_avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# relay.contrib.ndarray_size
reg.register_injective_schedule("contrib.ndarray_size")
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Constructor APIs"""
import tvm._ffi
tvm._ffi._init_api("relay.op.contrib._make", __name__)
...@@ -17,115 +17,3 @@ ...@@ -17,115 +17,3 @@
#pylint: disable=invalid-name, too-many-lines #pylint: disable=invalid-name, too-many-lines
"""Contrib operations.""" """Contrib operations."""
from __future__ import absolute_import as _abs from __future__ import absolute_import as _abs
from . import _make
def adaptive_max_pool2d(data,
output_size=None,
layout="NCHW"):
r"""2D adaptive max pooling operator. This operator is experimental.
This operator takes data as input and does 2D max value calculation
across each window represented by WxH.
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with shape
(batch_size, in_channels, output_height, output_width).
The pooling kernel and stride sizes are automatically chosen for
desired output sizes.
For output_size:
If this argument is not provided, input height and width will be used
as output height and width.
If a single integer is provided for output_size, the output size is
(N x C x output_size x output_size) for any input (NCHW).
If a tuple of integers (height, width) are provided for output_size,
the output size is (N x C x height x width) for any input (NCHW).
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
output_size : tuple of int. optional
Output height and width.
layout : str, optional
Layout of the input.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
output_size = [] or output_size
return _make.adaptive_max_pool2d(data, output_size, layout)
def adaptive_avg_pool2d(data,
output_size=None,
layout="NCHW"):
r"""2D adaptive average pooling operator. This operator is experimental.
This operator takes data as input and does 2D average value calculation
across each window represented by WxH.
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with shape
(batch_size, in_channels, output_height, output_width).
The pooling kernel and stride sizes are automatically chosen for
desired output sizes.
For output_size:
If this argument is not provided, input height and width will be used
as output height and width.
If a single integer is provided for output_size, the output size is
(N x C x output_size x output_size) for any input (NCHW).
If a tuple of integers (height, width) are provided for output_size,
the output size is (N x C x height x width) for any input (NCHW).
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
output_size : tuple of int. optional
Output height and width.
layout : str, optional
Layout of the input.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
output_size = [] or output_size
return _make.adaptive_avg_pool2d(data, output_size, layout)
def ndarray_size(data, dtype="int32"):
"""Get number of elements of input tensor.
Parameters
----------
data : tvm.relay.Expr
The input tensor.
dtype : str, optional
The target data type.
Returns
-------
result : tvm.relay.Expr
The number of elements of input tensor.
"""
return _make.ndarray_size(data, dtype)
...@@ -247,6 +247,16 @@ reg.register_schedule("nn.global_avg_pool2d", strategy.schedule_adaptive_pool) ...@@ -247,6 +247,16 @@ reg.register_schedule("nn.global_avg_pool2d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.global_avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE) reg.register_pattern("nn.global_avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# adaptive_max_pool2d
reg.register_schedule("nn.adaptive_max_pool2d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.adaptive_max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# adaptive_avg_pool2d
reg.register_schedule("nn.adaptive_avg_pool2d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.adaptive_avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# leaky_relu # leaky_relu
reg.register_broadcast_schedule("nn.leaky_relu") reg.register_broadcast_schedule("nn.leaky_relu")
reg.register_pattern("nn.leaky_relu", OpPattern.ELEMWISE) reg.register_pattern("nn.leaky_relu", OpPattern.ELEMWISE)
......
...@@ -2277,3 +2277,97 @@ def space_to_depth(data, block_size, layout='NCHW'): ...@@ -2277,3 +2277,97 @@ def space_to_depth(data, block_size, layout='NCHW'):
in_height / block_size, in_width / block_size] in_height / block_size, in_width / block_size]
""" """
return _make.space_to_depth(data, block_size, layout) return _make.space_to_depth(data, block_size, layout)
def adaptive_max_pool2d(data,
output_size=None,
layout="NCHW"):
r"""2D adaptive max pooling operator. This operator is experimental.
This operator takes data as input and does 2D max value calculation
across each window represented by WxH.
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with shape
(batch_size, in_channels, output_height, output_width).
The pooling kernel and stride sizes are automatically chosen for
desired output sizes.
For output_size:
If this argument is not provided, input height and width will be used
as output height and width.
If a single integer is provided for output_size, the output size is
(N x C x output_size x output_size) for any input (NCHW).
If a tuple of integers (height, width) are provided for output_size,
the output size is (N x C x height x width) for any input (NCHW).
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
output_size : tuple of int. optional
Output height and width.
layout : str, optional
Layout of the input.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
output_size = [] or output_size
return _make.adaptive_max_pool2d(data, output_size, layout)
def adaptive_avg_pool2d(data,
output_size=None,
layout="NCHW"):
r"""2D adaptive average pooling operator. This operator is experimental.
This operator takes data as input and does 2D average value calculation
across each window represented by WxH.
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with shape
(batch_size, in_channels, output_height, output_width).
The pooling kernel and stride sizes are automatically chosen for
desired output sizes.
For output_size:
If this argument is not provided, input height and width will be used
as output height and width.
If a single integer is provided for output_size, the output size is
(N x C x output_size x output_size) for any input (NCHW).
If a tuple of integers (height, width) are provided for output_size,
the output size is (N x C x height x width) for any input (NCHW).
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
output_size : tuple of int. optional
Output height and width.
layout : str, optional
Layout of the input.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
output_size = [] or output_size
return _make.adaptive_avg_pool2d(data, output_size, layout)
...@@ -974,3 +974,22 @@ def shape_of(data, dtype="int32"): ...@@ -974,3 +974,22 @@ def shape_of(data, dtype="int32"):
The shape tensor. The shape tensor.
""" """
return _make.shape_of(data, dtype) return _make.shape_of(data, dtype)
def ndarray_size(data, dtype="int32"):
"""Get number of elements of input tensor.
Parameters
----------
data : tvm.relay.Expr
The input tensor.
dtype : str, optional
The target data type.
Returns
-------
result : tvm.relay.Expr
The number of elements of input tensor.
"""
return _make.ndarray_size(data, dtype)
...@@ -499,21 +499,21 @@ Array<te::Tensor> AdaptivePool2DCompute(const Attrs& attrs, ...@@ -499,21 +499,21 @@ Array<te::Tensor> AdaptivePool2DCompute(const Attrs& attrs,
mode, layout.name()) }; mode, layout.name()) };
} }
// relay.contrib.adaptive_avg_pool2d // relay.nn.adaptive_avg_pool2d
Expr MakeAdaptiveAvgPool2D(Expr data, Expr MakeAdaptiveAvgPool2D(Expr data,
Array<IndexExpr> output_size, Array<IndexExpr> output_size,
std::string layout) { std::string layout) {
auto attrs = make_object<AdaptivePool2DAttrs>(); auto attrs = make_object<AdaptivePool2DAttrs>();
attrs->output_size = std::move(output_size); attrs->output_size = std::move(output_size);
attrs->layout = std::move(layout); attrs->layout = std::move(layout);
static const Op& op = Op::Get("contrib.adaptive_avg_pool2d"); static const Op& op = Op::Get("nn.adaptive_avg_pool2d");
return CallNode::make(op, {data}, Attrs(attrs), {}); return CallNode::make(op, {data}, Attrs(attrs), {});
} }
TVM_REGISTER_GLOBAL("relay.op.contrib._make.adaptive_avg_pool2d") TVM_REGISTER_GLOBAL("relay.op.nn._make.adaptive_avg_pool2d")
.set_body_typed(MakeAdaptiveAvgPool2D); .set_body_typed(MakeAdaptiveAvgPool2D);
RELAY_REGISTER_OP("contrib.adaptive_avg_pool2d") RELAY_REGISTER_OP("nn.adaptive_avg_pool2d")
.describe(R"code(Adaptive average pooling operation for 2D data. .describe(R"code(Adaptive average pooling operation for 2D data.
- **data**: This depends on the `layout` parameter. Input is 4D array of shape - **data**: This depends on the `layout` parameter. Input is 4D array of shape
...@@ -538,21 +538,21 @@ RELAY_REGISTER_OP("contrib.adaptive_avg_pool2d") ...@@ -538,21 +538,21 @@ RELAY_REGISTER_OP("contrib.adaptive_avg_pool2d")
.set_attr<FTVMCompute>("FTVMCompute", AdaptivePool2DCompute<topi::nn::kAvgPool>); .set_attr<FTVMCompute>("FTVMCompute", AdaptivePool2DCompute<topi::nn::kAvgPool>);
// relay.contrib.adaptive_max_pool2d // relay.nn.adaptive_max_pool2d
Expr MakeAdaptiveMaxPool2D(Expr data, Expr MakeAdaptiveMaxPool2D(Expr data,
Array<IndexExpr> output_size, Array<IndexExpr> output_size,
std::string layout) { std::string layout) {
auto attrs = make_object<AdaptivePool2DAttrs>(); auto attrs = make_object<AdaptivePool2DAttrs>();
attrs->output_size = std::move(output_size); attrs->output_size = std::move(output_size);
attrs->layout = std::move(layout); attrs->layout = std::move(layout);
static const Op& op = Op::Get("contrib.adaptive_max_pool2d"); static const Op& op = Op::Get("nn.adaptive_max_pool2d");
return CallNode::make(op, {data}, Attrs(attrs), {}); return CallNode::make(op, {data}, Attrs(attrs), {});
} }
TVM_REGISTER_GLOBAL("relay.op.contrib._make.adaptive_max_pool2d") TVM_REGISTER_GLOBAL("relay.op.nn._make.adaptive_max_pool2d")
.set_body_typed(MakeAdaptiveMaxPool2D); .set_body_typed(MakeAdaptiveMaxPool2D);
RELAY_REGISTER_OP("contrib.adaptive_max_pool2d") RELAY_REGISTER_OP("nn.adaptive_max_pool2d")
.describe(R"code(Adaptive max pooling operation for 2D data. .describe(R"code(Adaptive max pooling operation for 2D data.
- **data**: This depends on the `layout` parameter. Input is 4D array of shape - **data**: This depends on the `layout` parameter. Input is 4D array of shape
......
...@@ -359,15 +359,15 @@ Array<te::Tensor> NdarraySizeCompute(const Attrs& attrs, ...@@ -359,15 +359,15 @@ Array<te::Tensor> NdarraySizeCompute(const Attrs& attrs,
return Array<te::Tensor>{topi::ndarray_size(inputs[0], param->dtype)}; return Array<te::Tensor>{topi::ndarray_size(inputs[0], param->dtype)};
} }
TVM_REGISTER_GLOBAL("relay.op.contrib._make.ndarray_size") TVM_REGISTER_GLOBAL("relay.op._make.ndarray_size")
.set_body_typed([](Expr data, DataType dtype) { .set_body_typed([](Expr data, DataType dtype) {
auto attrs = make_object<NdarraySizeAttrs>(); auto attrs = make_object<NdarraySizeAttrs>();
attrs->dtype = dtype; attrs->dtype = dtype;
static const Op& op = Op::Get("contrib.ndarray_size"); static const Op& op = Op::Get("ndarray_size");
return CallNode::make(op, {data}, Attrs(attrs), {}); return CallNode::make(op, {data}, Attrs(attrs), {});
}); });
RELAY_REGISTER_OP("contrib.ndarray_size") RELAY_REGISTER_OP("ndarray_size")
.describe(R"code(Returns a tensor representing the number of elements of input tensor. .describe(R"code(Returns a tensor representing the number of elements of input tensor.
)code" TVM_ADD_FILELINE) )code" TVM_ADD_FILELINE)
......
...@@ -335,7 +335,7 @@ def test_shape_of(): ...@@ -335,7 +335,7 @@ def test_shape_of():
def test_ndarray_size(): def test_ndarray_size():
def verify_ndarray_size(shape): def verify_ndarray_size(shape):
x = relay.var("x", shape=shape) x = relay.var("x", shape=shape)
func = relay.Function([x], relay.op.contrib.ndarray_size(x)) func = relay.Function([x], relay.op.ndarray_size(x))
func = run_infer_type(func) func = run_infer_type(func)
x_data = np.random.uniform(size=shape).astype("float32") x_data = np.random.uniform(size=shape).astype("float32")
...@@ -374,7 +374,7 @@ def verify_adaptive_pool2d(dshape, out_size, pool_type, layout="NCHW", dtype="fl ...@@ -374,7 +374,7 @@ def verify_adaptive_pool2d(dshape, out_size, pool_type, layout="NCHW", dtype="fl
l_sl = slice(l_start, l_end) l_sl = slice(l_start, l_end)
np_out[i, j, k, l] = np_op(np_data[i, j, k_sl, l_sl]) np_out[i, j, k, l] = np_op(np_data[i, j, k_sl, l_sl])
opfunc = relay.contrib.adaptive_avg_pool2d if pool_type == "avg" else relay.contrib.adaptive_max_pool2d opfunc = relay.nn.adaptive_avg_pool2d if pool_type == "avg" else relay.nn.adaptive_max_pool2d
x = relay.var("x", relay.TensorType((n, c, h, w), "float32")) x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = opfunc(x, out_size, layout) y = opfunc(x, out_size, layout)
func = relay.Function([x], y) func = relay.Function([x], y)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment