Commit 69d2f9bd by masahi Committed by Wuwei Lin

[TOPI] Remove cpp upsampling and resize op (#4769)

* remove cpp upsampling

* remove cpp resize
parent 1ae44cf0
...@@ -25,6 +25,8 @@ ...@@ -25,6 +25,8 @@
#define TOPI_DETAIL_TENSOR_UTILS_H_ #define TOPI_DETAIL_TENSOR_UTILS_H_
#include <tvm/te/operation.h>
namespace topi { namespace topi {
namespace detail { namespace detail {
using namespace tvm; using namespace tvm;
...@@ -50,7 +52,45 @@ inline bool is_empty_shape(const Array<PrimExpr>& x) { ...@@ -50,7 +52,45 @@ inline bool is_empty_shape(const Array<PrimExpr>& x) {
return is_empty; return is_empty;
} }
/*!
* \brief Sample a point in a tensor using bilinear interpolation.
*
* \param input The input tensor.
* \param indices The index of the target point, which can be fractional
* \param max_y The maximum of y dimension
* \param max_x The maximum of x dimension
*
* \return The interpolated value in the given index.
*/
inline PrimExpr bilinear_sample_nchw(const Tensor& input, const Array<PrimExpr>& indices,
const PrimExpr max_y, const PrimExpr max_x) {
auto in_y = indices[2];
auto yf = tvm::floor(in_y);
auto yc = tvm::cast(DataType::Int(32), tvm::ceil(in_y));
auto y0 = tvm::cast(DataType::Int(32), tvm::floor(in_y));
auto y1 = tvm::if_then_else((yc > max_y), max_y, yc);
auto y_lerp = in_y - yf;
auto in_x = indices[3];
auto xf = tvm::floor(in_x);
auto xc = tvm::cast(DataType::Int(32), tvm::ceil(in_x));
auto x0 = tvm::cast(DataType::Int(32), tvm::floor(in_x));
auto x1 = tvm::if_then_else((xc > max_x), max_x, xc);
auto x_lerp = in_x - xf;
auto A = input(indices[0], indices[1], y0, x0);
auto B = input(indices[0], indices[1], y0, x1);
auto C = input(indices[0], indices[1], y1, x0);
auto D = input(indices[0], indices[1], y1, x1);
return A * ( 1 - x_lerp) * ( 1 - y_lerp) +
B * x_lerp * (1 - y_lerp) +
C * (1 - x_lerp) * y_lerp +
D * x_lerp * y_lerp;
}
} // namespace detail } // namespace detail
} // namespace topi } // namespace topi
#endif // TOPI_DETAIL_TENSOR_UTILS_H_ #endif // TOPI_DETAIL_TENSOR_UTILS_H_
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file topi/image/resize.h
* \brief image resize constructors
*/
#ifndef TOPI_IMAGE_RESIZE_H_
#define TOPI_IMAGE_RESIZE_H_
#include <tvm/te/operation.h>
#include <topi/tags.h>
#include <topi/elemwise.h>
#include <topi/detail/ravel_unravel.h>
#include <topi/detail/constant_utils.h>
#include <string>
#include <vector>
#include <iterator>
#include <algorithm>
namespace topi {
namespace image {
using namespace tvm;
using namespace tvm::te;
/*!
* \brief Sample a point in a tensor using bilinear interpolation.
*
* \param input The input tensor.
* \param indices The index of the target point, which can be fractional
* \param max_y The maximum of y dimension
* \param max_x The maximum of x dimension
*
* \return The interpolated value in the given index.
*/
inline PrimExpr bilinear_sample_nchw(const Tensor& input, const Array<PrimExpr>& indices,
const PrimExpr max_y, const PrimExpr max_x) {
auto in_y = indices[2];
auto yf = tvm::floor(in_y);
auto yc = tvm::cast(DataType::Int(32), tvm::ceil(in_y));
auto y0 = tvm::cast(DataType::Int(32), tvm::floor(in_y));
auto y1 = tvm::if_then_else((yc > max_y), max_y, yc);
auto y_lerp = in_y - yf;
auto in_x = indices[3];
auto xf = tvm::floor(in_x);
auto xc = tvm::cast(DataType::Int(32), tvm::ceil(in_x));
auto x0 = tvm::cast(DataType::Int(32), tvm::floor(in_x));
auto x1 = tvm::if_then_else((xc > max_x), max_x, xc);
auto x_lerp = in_x - xf;
auto A = input(indices[0], indices[1], y0, x0);
auto B = input(indices[0], indices[1], y0, x1);
auto C = input(indices[0], indices[1], y1, x0);
auto D = input(indices[0], indices[1], y1, x1);
return A * ( 1 - x_lerp) * ( 1 - y_lerp) +
B * x_lerp * (1 - y_lerp) +
C * (1 - x_lerp) * y_lerp +
D * x_lerp * y_lerp;
}
/*!
* \brief Resize given tensor to given shape using nearest neighbour for NHWC
*
* \param input The input tensor.
* \param shape Output shape to resize to.
* \param align_corners To preserve centers of 4 corner pixels
* \param name Name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor resized to given shape
*/
inline Tensor resize_nearest_neighbor_nhwc(const Tensor& input,
const Array<PrimExpr>& shape,
bool align_corners = false,
std::string name = "tensor",
std::string tag = kInjective) {
Array<PrimExpr> out_shape;
out_shape.push_back(input->shape[0]);
out_shape.push_back(cast(DataType::Int(32), shape[0]));
out_shape.push_back(cast(DataType::Int(32), shape[1]));
out_shape.push_back(input->shape[3]);
return compute(
out_shape, [&](const Array<Var>& indices) {
Array<PrimExpr> idx;
idx.push_back(indices[0]);
idx.push_back(indices[1] * input->shape[1] / shape[0]);
idx.push_back(indices[2] * input->shape[2] / shape[1]);
idx.push_back(indices[3]);
return input(idx);
}, name, tag);
}
/*!
* \brief Resize given tensor to given shape using nearest neighbour for NCHW
*
* \param input The input tensor.
* \param shape Output shape to resize to.
* \param align_corners To preserve centers of 4 corner pixels
* \param name Name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor resized to given shape
*/
inline Tensor resize_nearest_neighbor_nchw(const Tensor& input,
const Array<PrimExpr>& shape,
bool align_corners = false,
std::string name = "tensor",
std::string tag = kInjective) {
Array<PrimExpr> out_shape;
out_shape.push_back(input->shape[0]);
out_shape.push_back(input->shape[1]);
out_shape.push_back(cast(DataType::Int(32), shape[0]));
out_shape.push_back(cast(DataType::Int(32), shape[1]));
return compute(
out_shape, [&](const Array<Var>& indices) {
Array<PrimExpr> idx;
idx.push_back(indices[0]);
idx.push_back(indices[1]);
idx.push_back(indices[2] * input->shape[2] / shape[0]);
idx.push_back(indices[3] * input->shape[3] / shape[1]);
return input(idx);
}, name, tag);
}
/*!
* \brief Resize given tensor to given shape using nearest neighbour for NCHWc
*
* \param input The input tensor.
* \param shape Output shape to resize to.
* \param align_corners To preserve centers of 4 corner pixels
* \param name Name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor resized to given shape
*/
inline Tensor resize_nearest_neighbor_nchwc(const Tensor& input,
const Array<PrimExpr>& shape,
bool align_corners = false,
std::string name = "tensor",
std::string tag = kInjective) {
Array<PrimExpr> out_shape;
out_shape.push_back(input->shape[0]);
out_shape.push_back(input->shape[1]);
out_shape.push_back(cast(DataType::Int(32), shape[0]));
out_shape.push_back(cast(DataType::Int(32), shape[1]));
out_shape.push_back(input->shape[4]);
return compute(
out_shape, [&](const Array<Var>& indices) {
Array<PrimExpr> idx;
idx.push_back(indices[0]);
idx.push_back(indices[1]);
idx.push_back(indices[2] * input->shape[2] / shape[0]);
idx.push_back(indices[3] * input->shape[3] / shape[1]);
idx.push_back(indices[4]);
return input(idx);
}, name, tag);
}
/*!
* \brief Resize given tensor to given shape using nearest neighbour
*
* \param input The input tensor.
* \param shape Output shape to resize to.
* \param layout input layout
* \param align_corners To preserve centers of 4 corner pixels
* \param name Name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor resized to given shape
*/
inline Tensor resize_nearest_neighbor(const Tensor& input,
const Array<PrimExpr>& shape,
std::string layout = "NCHW",
bool align_corners = false,
std::string name = "tensor",
std::string tag = kInjective) {
auto base_layout = layout.substr(0, 4);
if (layout == "NHWC") {
return resize_nearest_neighbor_nhwc(input, shape, align_corners);
} else if (layout == "NCHW") {
return resize_nearest_neighbor_nchw(input, shape, align_corners);
} else if (base_layout == "NCHW") {
// NCHWc
return resize_nearest_neighbor_nchwc(input, shape, align_corners);
} else {
LOG(FATAL) << "Unknown layout: " << layout;
return Tensor();
}
}
/*!
* \brief Resize given tensor to given shape using bilinear interpolation for NHWC
*
* \param input The input tensor.
* \param shape Output shape to resize to.
* \param align_corners To preserve centers of 4 corner pixels
* \param name Name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor resized to given shape
*/
inline Tensor resize_bilinear_nhwc(const Tensor& input,
const Array<PrimExpr>& shape,
bool align_corners = false,
std::string name = "tensor",
std::string tag = kInjective) {
Array<PrimExpr> out_shape;
out_shape.push_back(input->shape[0]);
out_shape.push_back(cast(DataType::Int(32), shape[0]));
out_shape.push_back(cast(DataType::Int(32), shape[1]));
out_shape.push_back(input->shape[3]);
PrimExpr cone = make_const(DataType::Int(32), 1);
auto in_height = as_const_int(input->shape[1]);
auto in_width = as_const_int(input->shape[2]);
auto out_height = as_const_int(shape[0]);
auto out_width = as_const_int(shape[1]);
PrimExpr y_ratio;
PrimExpr x_ratio;
if (!align_corners) {
y_ratio = make_const(DataType::Float(32), (static_cast<float>(*in_height) /
static_cast<float>(*out_height)));
x_ratio = make_const(DataType::Float(32), (static_cast<float>(*in_width) /
static_cast<float>(*out_width)));
} else {
y_ratio = make_const(DataType::Float(32), (static_cast<float>(*in_height - 1) /
static_cast<float>(*out_height - 1)));
x_ratio = make_const(DataType::Float(32), (static_cast<float>(*in_width - 1) /
static_cast<float>(*out_width - 1)));
}
PrimExpr other_y = tvm::tir::Simplify(input->shape[1] - cone);
PrimExpr other_x = tvm::tir::Simplify(input->shape[2] - cone);
return compute(
out_shape, [&](const Array<Var>& indices) {
auto in_y = indices[1] * y_ratio;
auto yf = tvm::floor(in_y);
auto yc = tvm::cast(DataType::Int(32), tvm::ceil(in_y));
auto y0 = tvm::cast(DataType::Int(32), tvm::floor(in_y));
auto y1 = tvm::if_then_else((yc > other_y), other_y, yc);
auto y_lerp = in_y - yf;
auto in_x = indices[2] * x_ratio;
auto xf = tvm::floor(in_x);
auto xc = tvm::cast(DataType::Int(32), tvm::ceil(in_x));
auto x0 = tvm::cast(DataType::Int(32), tvm::floor(in_x));
auto x1 = tvm::if_then_else((xc > other_x), other_x, xc);
auto x_lerp = in_x - xf;
auto A = input(indices[0], y0, x0, indices[3]);
auto B = input(indices[0], y0, x1, indices[3]);
auto C = input(indices[0], y1, x0, indices[3]);
auto D = input(indices[0], y1, x1, indices[3]);
return A * ( 1 - x_lerp) * ( 1 - y_lerp) +
B * x_lerp * (1 - y_lerp) +
C * (1 - x_lerp) * y_lerp +
D * x_lerp * y_lerp;
}, name, tag);
}
/*!
* \brief Resize given tensor to given shape using bilinear interpolation for NCHW
*
* \param input The input tensor.
* \param shape Output shape to resize to.
* \param align_corners To preserve centers of 4 corner pixels
* \param name Name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor resized to given shape
*/
inline Tensor resize_bilinear_nchw(const Tensor& input,
const Array<PrimExpr>& shape,
bool align_corners = false,
std::string name = "tensor",
std::string tag = kInjective) {
Array<PrimExpr> out_shape;
out_shape.push_back(input->shape[0]);
out_shape.push_back(input->shape[1]);
out_shape.push_back(cast(DataType::Int(32), shape[0]));
out_shape.push_back(cast(DataType::Int(32), shape[1]));
PrimExpr cone = make_const(DataType::Int(32), 1);
auto in_height = as_const_int(input->shape[2]);
auto in_width = as_const_int(input->shape[3]);
auto out_height = as_const_int(shape[0]);
auto out_width = as_const_int(shape[1]);
PrimExpr y_ratio;
PrimExpr x_ratio;
if (!align_corners) {
y_ratio = make_const(DataType::Float(32), (static_cast<float>(*in_height) /
static_cast<float>(*out_height)));
x_ratio = make_const(DataType::Float(32), (static_cast<float>(*in_width) /
static_cast<float>(*out_width)));
} else {
y_ratio = make_const(DataType::Float(32), (static_cast<float>(*in_height - 1) /
static_cast<float>(*out_height - 1)));
x_ratio = make_const(DataType::Float(32), (static_cast<float>(*in_width - 1) /
static_cast<float>(*out_width - 1)));
}
PrimExpr other_y = tvm::tir::Simplify(input->shape[2] - cone);
PrimExpr other_x = tvm::tir::Simplify(input->shape[3] - cone);
return compute(
out_shape, [&](const Array<Var>& indices) {
auto in_y = indices[2] * y_ratio;
auto in_x = indices[3] * x_ratio;
return bilinear_sample_nchw(input, {indices[0], indices[1], in_y, in_x}, other_y, other_x);
}, name, tag);
}
/*!
* \brief Resize given tensor to given shape using bilinear interpolation
*
* \param input The input tensor.
* \param shape Output shape to resize to.
* \param layout input layout
* \param align_corners To preserve centers of 4 corner pixels
* \param name Name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor resized to given shape
*/
inline Tensor resize_bilinear(const Tensor& input,
const Array<tvm::PrimExpr>& shape,
std::string layout = "NCHW",
bool align_corners = false,
std::string name = "tensor",
std::string tag = kInjective) {
Tensor ret;
if (layout == "NHWC") {
ret = resize_bilinear_nhwc(input, shape, align_corners);
} else {
ret = resize_bilinear_nchw(input, shape, align_corners);
}
return cast(ret, input->dtype);
}
/*!
* \brief Resize given tensor to given shape
*
* \param input The input tensor.
* \param shape Output shape to resize to.
* \param layout input layout
* \param align_corners To preserve centers of 4 corner pixels
* \param mode Algorithm to use (NEAREST_NEIGHBOR / BILINEAR)
* \param name Name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor resized to given shape
*/
inline Tensor resize(const Tensor& input,
const Array<PrimExpr>& shape,
std::string layout = "NCHW",
bool align_corners = false,
std::string mode = "BILINEAR",
std::string name = "tensor",
std::string tag = kInjective) {
if (mode == "NEAREST_NEIGHBOR") {
return resize_nearest_neighbor(input, shape, layout, align_corners);
} else {
return resize_bilinear(input, shape, layout, align_corners);
}
}
} // namespace image
} // namespace topi
#endif // TOPI_IMAGE_RESIZE_H_
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file topi/nn/upsampling.h
* \brief upsampling op constructors
*/
#ifndef TOPI_NN_UPSAMPLING_H_
#define TOPI_NN_UPSAMPLING_H_
#include <string>
#include <vector>
#include <iterator>
#include <algorithm>
#include "topi/image/resize.h"
namespace topi {
namespace nn {
using namespace tvm;
using namespace tvm::te;
using namespace topi::image;
/*!
* \brief Upsample given tensor to given shape
*
* \param input The input tensor.
* \param shape Output shape to upsample.
* \param layout input layout
* \param mode Algorithm to use (NEAREST_NEIGHBOR / BILINEAR)
* \param name Name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor upsampled to given shape
*/
inline Tensor upsampling(const Tensor& input,
const Array<PrimExpr> shape,
std::string layout = "NCHW",
std::string mode = "NEAREST_NEIGHBOR",
std::string name = "tensor",
std::string tag = kInjective) {
return resize(input, shape, layout, false, mode);
}
} // namespace nn
} // namespace topi
#endif // TOPI_NN_UPSAMPLING_H_
...@@ -23,5 +23,4 @@ from . import vision ...@@ -23,5 +23,4 @@ from . import vision
from . import x86 from . import x86
from . import generic from . import generic
from . import rocm from . import rocm
from . import image
from . import util from . import util
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI for image TOPI ops and schedules"""
from tvm._ffi.function import _init_api_prefix
_init_api_prefix("topi.cpp.image", "topi.image")
...@@ -20,7 +20,7 @@ import tvm ...@@ -20,7 +20,7 @@ import tvm
from .util import get_pad_tuple from .util import get_pad_tuple
from ..util import get_const_tuple from ..util import get_const_tuple
from ..cpp.image import bilinear_sample_nchw from ..cpp.util import bilinear_sample_nchw
@tvm.target.generic_func @tvm.target.generic_func
def deformable_conv2d_nchw(data, offset, kernel, strides, padding, dilation, deformable_groups, def deformable_conv2d_nchw(data, offset, kernel, strides, padding, dilation, deformable_groups,
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
"""Roi align operator""" """Roi align operator"""
import tvm import tvm
from ...util import get_const_tuple from ...util import get_const_tuple
from ...cpp.image import bilinear_sample_nchw from ...cpp.util import bilinear_sample_nchw
@tvm.target.generic_func @tvm.target.generic_func
......
...@@ -43,13 +43,11 @@ ...@@ -43,13 +43,11 @@
#include <topi/nn/mapping.h> #include <topi/nn/mapping.h>
#include <topi/nn/pooling.h> #include <topi/nn/pooling.h>
#include <topi/nn/softmax.h> #include <topi/nn/softmax.h>
#include <topi/nn/upsampling.h>
#include <topi/nn/l2_normalize.h> #include <topi/nn/l2_normalize.h>
#include <topi/nn/local_response_norm.h> #include <topi/nn/local_response_norm.h>
#include <topi/nn/batch_matmul.h> #include <topi/nn/batch_matmul.h>
#include <topi/vision/reorg.h> #include <topi/vision/reorg.h>
#include <topi/image/resize.h>
#include <topi/generic/default.h> #include <topi/generic/default.h>
#include <topi/generic/extern.h> #include <topi/generic/extern.h>
#include <topi/generic/injective.h> #include <topi/generic/injective.h>
...@@ -451,12 +449,6 @@ TVM_REGISTER_GLOBAL("topi.one_hot") ...@@ -451,12 +449,6 @@ TVM_REGISTER_GLOBAL("topi.one_hot")
*rv = one_hot(args[0], args[1], args[2], depth, axis, dtype); *rv = one_hot(args[0], args[1], args[2], depth, axis, dtype);
}); });
/* Ops from nn/upsampling.h */
TVM_REGISTER_GLOBAL("topi.nn.upsampling")
.set_body([](TVMArgs args, TVMRetValue *rv) {
*rv = nn::upsampling(args[0], args[1], args[2], args[3]);
});
/* Ops from nn/bnn.h */ /* Ops from nn/bnn.h */
TVM_REGISTER_GLOBAL("topi.nn.binarize_pack") TVM_REGISTER_GLOBAL("topi.nn.binarize_pack")
.set_body([](TVMArgs args, TVMRetValue *rv) { .set_body([](TVMArgs args, TVMRetValue *rv) {
...@@ -581,17 +573,6 @@ TVM_REGISTER_GLOBAL("topi.vision.reorg") ...@@ -581,17 +573,6 @@ TVM_REGISTER_GLOBAL("topi.vision.reorg")
*rv = vision::reorg(args[0], args[1]); *rv = vision::reorg(args[0], args[1]);
}); });
/* Ops from image/resize.h */
TVM_REGISTER_GLOBAL("topi.image.bilinear_sample_nchw")
.set_body([](TVMArgs args, TVMRetValue *rv) {
*rv = image::bilinear_sample_nchw(args[0], args[1], args[2], args[3]);
});
TVM_REGISTER_GLOBAL("topi.image.resize")
.set_body([](TVMArgs args, TVMRetValue *rv) {
*rv = image::resize(args[0], args[1], args[2], args[3], args[4]);
});
/* Generic schedules */ /* Generic schedules */
TVM_REGISTER_GLOBAL("topi.generic.default_schedule") TVM_REGISTER_GLOBAL("topi.generic.default_schedule")
.set_body([](TVMArgs args, TVMRetValue *rv) { .set_body([](TVMArgs args, TVMRetValue *rv) {
...@@ -755,6 +736,11 @@ TVM_REGISTER_GLOBAL("topi.util.is_empty_shape") ...@@ -755,6 +736,11 @@ TVM_REGISTER_GLOBAL("topi.util.is_empty_shape")
*rv = topi::detail::is_empty_shape(args[0]); *rv = topi::detail::is_empty_shape(args[0]);
}); });
TVM_REGISTER_GLOBAL("topi.util.bilinear_sample_nchw")
.set_body([](TVMArgs args, TVMRetValue *rv) {
*rv = detail::bilinear_sample_nchw(args[0], args[1], args[2], args[3]);
});
/*! \brief Builder function for instantiating schedules. */ /*! \brief Builder function for instantiating schedules. */
using FTVMScheduleBuilder = std::function< using FTVMScheduleBuilder = std::function<
tvm::te::Schedule(const tvm::Target& target, const tvm::Array<tvm::te::Tensor>& outs)>; tvm::te::Schedule(const tvm::Target& target, const tvm::Array<tvm::te::Tensor>& outs)>;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment