Commit cbdd14f1 by Nicolas Vasilache Committed by Tianqi Chen

[TOPI] C++ doc (#320)

parent b0c42f3b
Subproject commit 30a85d860567aa30d013a5e75fbd1b0ee2ebe93c Subproject commit 326e2fa18734f0592d257da6b8cfaae90a499c5c
/* /*!
* Copyright (c) 2017 by Contributors * Copyright (c) 2017 by Contributors
* \brief Broadcast op constructions * \brief Broadcast op constructions
* \file broadcast.h * \file topi/broadcast.h
*/ */
#ifndef TOPI_BROADCAST_H_ #ifndef TOPI_BROADCAST_H_
#define TOPI_BROADCAST_H_ #define TOPI_BROADCAST_H_
#include <topi/detail/broadcast.h> #include <string>
#include "topi/detail/broadcast.h"
#include "topi/tags.h"
namespace topi { namespace topi {
inline tvm::Tensor broadcast_to(const tvm::Tensor& I, /*!
const tvm::Array<tvm::Expr>& output_shape) { * \brief Creates an operation that broadcasts a tensor into a compatible
CHECK_GE(output_shape.size(), I->shape.size()) * shape according to numpy's rules
*
* \param t The input tensor
* \param output_shape The target output shape, must be compatible
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is a broadcast operation
*/
inline tvm::Tensor broadcast_to(const tvm::Tensor& t,
const tvm::Array<tvm::Expr>& output_shape,
std::string name = "tensor",
std::string tag = kBroadcast) {
CHECK_GE(output_shape.size(), t->shape.size())
<< "Not a broadcast, output dimensionality smaller than input.\noutput: " << "Not a broadcast, output dimensionality smaller than input.\noutput: "
<< output_shape << "\nvs\ninput: " << I; << output_shape << "\nvs\ninput: " << t;
auto bh = detail::BroadcastShape(output_shape, I->shape); auto bh = detail::BroadcastShape(output_shape, t->shape);
CHECK_EQ(output_shape.size(), bh.common_shape.size()); CHECK_EQ(output_shape.size(), bh.common_shape.size());
for (int i = 0; i < output_shape.size(); ++i) { for (int i = 0; i < output_shape.size(); ++i) {
CHECK(tvm::ir::Equal(output_shape[i], bh.common_shape[i])); CHECK(tvm::ir::Equal(output_shape[i], bh.common_shape[i]));
} }
auto l = [&](tvm::Array<tvm::Var> ovars) { auto l = [&](tvm::Array<tvm::Var> ovars) {
return I(detail::InputIndexFromBroadcast(ovars, I, bh.vars2, bh.all_vars)); return t(detail::InputIndexFromBroadcast(ovars, t, bh.vars2, bh.all_vars));
}; };
return tvm::compute( return tvm::compute(
tvm::Array<tvm::Expr>(bh.common_shape.begin(), bh.common_shape.end()), l); tvm::Array<tvm::Expr>(bh.common_shape.begin(), bh.common_shape.end()),
l,
name,
tag);
} }
inline tvm::Tensor broadcast_add(const tvm::Tensor& A, const tvm::Tensor& B) { /*!
* \brief Creates an operation that performs pointwise addition of 2 tensors
* and broadcasts them into a common compatible shape where necessary,
* according to numpy's rules
*
* \param A The first tensor to add
* \param B The second tensor to add
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is a pointwise addition with broadcast
*/
inline tvm::Tensor broadcast_add(const tvm::Tensor& A,
const tvm::Tensor& B,
std::string name = "tensor",
std::string tag = kBroadcast) {
auto l = [&](tvm::Expr a, tvm::Expr b) { return a + b; }; auto l = [&](tvm::Expr a, tvm::Expr b) { return a + b; };
return detail::WithBroadcast(l, A, B); return detail::WithBroadcast(l, A, B, name, tag);
} }
inline tvm::Tensor broadcast_sub(const tvm::Tensor& A, const tvm::Tensor& B) { /*!
* \brief Creates an operation that performs pointwise subtraction of 2 tensors
* and broadcasts them into a common compatible shape where necessary,
* according to numpy's rules
*
* \param A The first tensor
* \param B The second tensor to subtract from the first
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is a pointwise subtraction with broadcast
*/
inline tvm::Tensor broadcast_sub(const tvm::Tensor& A,
const tvm::Tensor& B,
std::string name = "tensor",
std::string tag = kBroadcast) {
auto l = [&](tvm::Expr a, tvm::Expr b) { return a - b; }; auto l = [&](tvm::Expr a, tvm::Expr b) { return a - b; };
return detail::WithBroadcast(l, A, B); return detail::WithBroadcast(l, A, B, name, tag);
} }
inline tvm::Tensor broadcast_mul(const tvm::Tensor& A, const tvm::Tensor& B) { /*!
* \brief Creates an operation that performs pointwise multiplication of 2
* tensors and broadcasts them into a common compatible shape where necessary,
* according to numpy's rules
*
* \param A The first tensor to multiply
* \param B The second tensor to multiply
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is a pointwise multiplication with broadcast
*/
inline tvm::Tensor broadcast_mul(const tvm::Tensor& A,
const tvm::Tensor& B,
std::string name = "tensor",
std::string tag = kBroadcast) {
auto l = [&](tvm::Expr a, tvm::Expr b) { return a * b; }; auto l = [&](tvm::Expr a, tvm::Expr b) { return a * b; };
return detail::WithBroadcast(l, A, B); return detail::WithBroadcast(l, A, B, name, tag);
} }
inline tvm::Tensor broadcast_div(const tvm::Tensor& A, const tvm::Tensor& B) { /*!
* \brief Creates an operation that performs pointwise division of 2 tensors
* and broadcasts them into a common compatible shape where necessary,
* according to numpy's rules
*
* \param A The first tensor
* \param B The second tensor to divide the first tensor with
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is a pointwise division with broadcast
*/
inline tvm::Tensor broadcast_div(const tvm::Tensor& A,
const tvm::Tensor& B,
std::string name = "tensor",
std::string tag = kBroadcast) {
auto l = [&](tvm::Expr a, tvm::Expr b) { return a / b; }; auto l = [&](tvm::Expr a, tvm::Expr b) { return a / b; };
return detail::WithBroadcast(l, A, B); return detail::WithBroadcast(l, A, B, name, tag);
} }
inline tvm::Tensor broadcast_mod(const tvm::Tensor& A, const tvm::Tensor& B) { /*!
* \brief Creates an operation that performs pointwise modulo remainder of 2
* tensors and broadcasts them into a common compatible shape where necessary,
* according to numpy's rules
*
* \param A The first tensor
* \param B The second tensor to compute A % B
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is a pointwise modulo remainder with
* broadcast
*/
inline tvm::Tensor broadcast_mod(const tvm::Tensor& A,
const tvm::Tensor& B,
std::string name = "tensor",
std::string tag = kBroadcast) {
auto l = [&](tvm::Expr a, tvm::Expr b) { return a % b; }; auto l = [&](tvm::Expr a, tvm::Expr b) { return a % b; };
return detail::WithBroadcast(l, A, B); return detail::WithBroadcast(l, A, B, name, tag);
} }
} // namespace topi } // namespace topi
......
/* /*!
* Copyright (c) 2017 by Contributors * Copyright (c) 2017 by Contributors
* \brief Detail broadcast. * \brief Detail broadcast.
* \file broadcast.h * \file topi/detail/broadcast.h
*/ */
#ifndef TOPI_DETAIL_BROADCAST_H_ #ifndef TOPI_DETAIL_BROADCAST_H_
#define TOPI_DETAIL_BROADCAST_H_ #define TOPI_DETAIL_BROADCAST_H_
#include <algorithm> #include <algorithm>
#include <deque> #include <deque>
#include <string>
#include "tvm/ir_pass.h" #include "tvm/ir_pass.h"
#include "tvm/tvm.h" #include "tvm/tvm.h"
...@@ -90,15 +91,21 @@ inline tvm::Array<tvm::Expr> InputIndexFromBroadcast( ...@@ -90,15 +91,21 @@ inline tvm::Array<tvm::Expr> InputIndexFromBroadcast(
template <typename FBinaryExpr> template <typename FBinaryExpr>
inline tvm::Tensor WithBroadcast(FBinaryExpr op, const tvm::Tensor& A, inline tvm::Tensor WithBroadcast(FBinaryExpr op,
const tvm::Tensor& B) { const tvm::Tensor& A,
const tvm::Tensor& B,
std::string name = "tensor",
std::string tag = "") {
auto bh = BroadcastShape(A->shape, B->shape); auto bh = BroadcastShape(A->shape, B->shape);
auto l = [&](tvm::Array<tvm::Var> ovars) { auto l = [&](tvm::Array<tvm::Var> ovars) {
return op(A(InputIndexFromBroadcast(ovars, A, bh.vars1, bh.all_vars)), return op(A(InputIndexFromBroadcast(ovars, A, bh.vars1, bh.all_vars)),
B(InputIndexFromBroadcast(ovars, B, bh.vars2, bh.all_vars))); B(InputIndexFromBroadcast(ovars, B, bh.vars2, bh.all_vars)));
}; };
return tvm::compute( return tvm::compute(
tvm::Array<tvm::Expr>(bh.common_shape.begin(), bh.common_shape.end()), l); tvm::Array<tvm::Expr>(bh.common_shape.begin(), bh.common_shape.end()),
l,
name,
tag);
} }
} // namespace detail } // namespace detail
......
...@@ -6,17 +6,22 @@ ...@@ -6,17 +6,22 @@
#ifndef TOPI_EWISE_H_ #ifndef TOPI_EWISE_H_
#define TOPI_EWISE_H_ #define TOPI_EWISE_H_
#include <tvm/tvm.h> #include <string>
#include "topi/tags.h"
#include "tvm/tvm.h"
namespace topi { namespace topi {
using namespace tvm; using namespace tvm;
// Unary intrinsic operators // Unary intrinsic operators
#define TOPI_DECLARE_UNARY_OP(OpName) \ #define TOPI_DECLARE_UNARY_OP(OpName) \
inline Tensor OpName(const Tensor& x) { \ inline Tensor OpName(const Tensor& x, \
std::string name = "tensor", \
std::string tag = kElementWise) { \
return compute(x->shape, [&](const Array<Var>& i) { \ return compute(x->shape, [&](const Array<Var>& i) { \
return ::tvm::OpName(x(i)); \ return ::tvm::OpName(x(i)); \
}, "tensor", "ewise"); \ }, name, tag); \
} }
TOPI_DECLARE_UNARY_OP(exp); TOPI_DECLARE_UNARY_OP(exp);
......
/* /*!
* Copyright (c) 2017 by Contributors * Copyright (c) 2017 by Contributors
* \brief NN op constructions * \brief NN op constructions
* \file nn.h * \file nn.h
...@@ -7,7 +7,9 @@ ...@@ -7,7 +7,9 @@
#define TOPI_NN_H_ #define TOPI_NN_H_
#include <algorithm> #include <algorithm>
#include <string>
#include "topi/tags.h"
#include "tvm/ir.h" #include "tvm/ir.h"
#include "tvm/ir_pass.h" #include "tvm/ir_pass.h"
#include "tvm/tvm.h" #include "tvm/tvm.h"
...@@ -27,17 +29,65 @@ tvm::Expr Map(const tvm::Array<tvm::Expr>& exprs, T op) { ...@@ -27,17 +29,65 @@ tvm::Expr Map(const tvm::Array<tvm::Expr>& exprs, T op) {
} // namespace detail } // namespace detail
/*!
* \brief Creates an operation that performs a rectified linear unit
*
* \param t The input tensor
* \param threshold The relu threshold (default 0)
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is the relu operation
*/
template <typename T> template <typename T>
inline tvm::Tensor relu(const tvm::Tensor& x, T threshold = static_cast<T>(0)) { inline tvm::Tensor relu(const tvm::Tensor& t,
T threshold = static_cast<T>(0),
std::string name = "tensor",
std::string tag = kElementWise) {
return tvm::compute( return tvm::compute(
x->shape, t->shape,
[&](const tvm::Array<tvm::Var>& i) { return tvm::max(x(i), threshold); }, [&](const tvm::Array<tvm::Var>& i) { return tvm::max(t(i), threshold); },
"tensor", "ewise"); name,
tag);
} }
inline tvm::Tensor pad( /*!
const tvm::Tensor& t, const tvm::Array<tvm::Expr>& pad_before, * \brief Creates an operation that performs padding
tvm::Array<tvm::Expr> pad_after = tvm::Array<tvm::Expr>()) { *
* \param t The input tensor
* \param pad_before An Array of Expr describing the padding before the
* respective iterator
* \param pad_after An Array of Expr describing the padding after the
* respective iterator
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is the relu operation
*
* \note
* The pad_after Array must either be empty or have the same length as
* pad_before
* When pad_after is empty, it takes the same values as pad_before (symmetric
* padding)
* The pad Array applies from the leading dimensions and skips missing
* trailing dimensions:
*
* pad(t(i, j, k), {1}, {0}) returns the equivalent operation for
* the following pseudocode:
* for i in [1, t.shape[0] + 2]:
* for i in [1, t.shape[0] + 2]:
* for i in [1, t.shape[0] + 2]:
* name(i,j,k) =
* (1 <= i <= t.shape[0] + 1) ?
* t(i-1, j, k) : 0;
*
*
*/
inline tvm::Tensor pad(const tvm::Tensor& t,
const tvm::Array<tvm::Expr>& pad_before,
tvm::Array<tvm::Expr> pad_after = tvm::Array<tvm::Expr>(),
std::string name = "tensor",
std::string tag = kElementWise) {
if (pad_after.size() < pad_before.size()) { if (pad_after.size() < pad_before.size()) {
for (int i = pad_after.size(); i < pad_before.size(); ++i) { for (int i = pad_after.size(); i < pad_before.size(); ++i) {
pad_after.push_back(pad_before[i]); pad_after.push_back(pad_before[i]);
...@@ -74,14 +124,30 @@ inline tvm::Tensor pad( ...@@ -74,14 +124,30 @@ inline tvm::Tensor pad(
} }
return tvm::select(detail::Map(sel, tvm::ir::And::make), t(indices), 0); return tvm::select(detail::Map(sel, tvm::ir::And::make), t(indices), 0);
}; };
return tvm::compute(output_shape, l, "tensor", "ewise"); return tvm::compute(output_shape, l, name, tag);
} }
// Returns a compute that calculates a row-major matrix multiplication: /*!
// A(i, k) * B(k, j), if trans_a == trans_b * \brief Creates an operation that calculates a matrix multiplication
// the usual transposed combinations, otherwise * (row-major notation):
inline tvm::Tensor matmult(const tvm::Tensor& A, const tvm::Tensor& B, * A(i, k) * B(k, j), if trans_a == trans_b
bool trans_a = false, bool trans_b = false) { * the usual transposed combinations, otherwise
*
* \param A The matrix A
* \param B The matrix B
* \param trans_a Is A's layout transposed?
* \param trans_b Is B's layout transposed?
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is the matmult operation
*/
inline tvm::Tensor matmult(const tvm::Tensor& A,
const tvm::Tensor& B,
bool trans_a = false,
bool trans_b = false,
std::string name = "tensor",
std::string tag = kMatMult) {
tvm::Array<tvm::Expr> output_shape{A->shape[trans_a ? 1 : 0], tvm::Array<tvm::Expr> output_shape{A->shape[trans_a ? 1 : 0],
B->shape[trans_b ? 0 : 1]}; B->shape[trans_b ? 0 : 1]};
auto k = tvm::reduce_axis(tvm::Range{0, A->shape[trans_a ? 0 : 1]}, "k"); auto k = tvm::reduce_axis(tvm::Range{0, A->shape[trans_a ? 0 : 1]}, "k");
...@@ -89,12 +155,37 @@ inline tvm::Tensor matmult(const tvm::Tensor& A, const tvm::Tensor& B, ...@@ -89,12 +155,37 @@ inline tvm::Tensor matmult(const tvm::Tensor& A, const tvm::Tensor& B,
return tvm::sum((trans_a ? A[k][i] : A[i][k]) * (trans_b ? B[j][k] : B[k][j]), return tvm::sum((trans_a ? A[k][i] : A[i][k]) * (trans_b ? B[j][k] : B[k][j]),
{k}); {k});
}; };
return tvm::compute(output_shape, l); return tvm::compute(output_shape, l, name, tag);
} }
inline tvm::Tensor conv2d_nchw(const tvm::Tensor& I, const tvm::Tensor& W, /*!
int pad_h = 0, int pad_w = 0, int stride_h = 1, * \brief Creates an operation that performs a 2-D convolution with an
int stride_w = 1) { * NCHW-layout
*
* \param I The 4-D input tensor
* \param W The 4-D weight tensor
* \param pad_h A static constant padding amount applied to the height of the
* image, before and after (symmetric padding)
* \param pad_w A static constant padding amount applied to the width of the
* image, before and after (symmetric padding)
* \param stride_h A static constant striding amount applied to the height of
* the image, before and after (symmetric padding)
* \param stride_w A static constant strindingamount applied to the width of
* the image, before and after (symmetric padding)
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is the 2-D convolution operation (NCHW
* layout)
*/
inline tvm::Tensor conv2d_nchw(const tvm::Tensor& I,
const tvm::Tensor& W,
int pad_h = 0,
int pad_w = 0,
int stride_h = 1,
int stride_w = 1,
std::string name = "tensor",
std::string tag = kConv2dNCHW) {
CHECK_EQ(4, I->shape.size()); CHECK_EQ(4, I->shape.size());
CHECK_EQ(4, W->shape.size()); CHECK_EQ(4, W->shape.size());
auto pH = I->shape[2]; auto pH = I->shape[2];
...@@ -116,12 +207,36 @@ inline tvm::Tensor conv2d_nchw(const tvm::Tensor& I, const tvm::Tensor& W, ...@@ -116,12 +207,36 @@ inline tvm::Tensor conv2d_nchw(const tvm::Tensor& I, const tvm::Tensor& W,
T(b, i, stride_h * h + kh, stride_w * w + kw) * W(i, o, kh, kw), T(b, i, stride_h * h + kh, stride_w * w + kw) * W(i, o, kh, kw),
{i, kh, kw}); {i, kh, kw});
}; };
return tvm::compute(output_shape, l); return tvm::compute(output_shape, l, name, tag);
} }
inline tvm::Tensor conv2d_hwcn(const tvm::Tensor& I, const tvm::Tensor& W, /*!
int pad_h = 0, int pad_w = 0, int stride_h = 1, * \brief Creates an operation for 2-D convolution layer with an HWCN-layout
int stride_w = 1) { *
* \param I The 4-D input tensor
* \param W The 4-D weight tensor
* \param pad_h A static constant padding amount applied to the height of the
* image, before and after (symmetric padding)
* \param pad_w A static constant padding amount applied to the width of the
* image, before and after (symmetric padding)
* \param stride_h A static constant striding amount applied to the height of
* the image, before and after (symmetric padding)
* \param stride_w A static constant strindingamount applied to the width of
* the image, before and after (symmetric padding)
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is the 2-D convolution operation
* (HWCN layout)
*/
inline tvm::Tensor conv2d_hwcn(const tvm::Tensor& I,
const tvm::Tensor& W,
int pad_h = 0,
int pad_w = 0,
int stride_h = 1,
int stride_w = 1,
std::string name = "tensor",
std::string tag = kConv2dHWCN) {
CHECK_EQ(4, I->shape.size()); CHECK_EQ(4, I->shape.size());
CHECK_EQ(4, W->shape.size()); CHECK_EQ(4, W->shape.size());
auto pH = I->shape[2]; auto pH = I->shape[2];
...@@ -141,13 +256,38 @@ inline tvm::Tensor conv2d_hwcn(const tvm::Tensor& I, const tvm::Tensor& W, ...@@ -141,13 +256,38 @@ inline tvm::Tensor conv2d_hwcn(const tvm::Tensor& I, const tvm::Tensor& W,
T(stride_h * h + kh, stride_w * w + kw, i, b) * W(kh, kw, i, o), T(stride_h * h + kh, stride_w * w + kw, i, b) * W(kh, kw, i, o),
{i, kh, kw}); {i, kh, kw});
}; };
return tvm::compute(output_shape, l); return tvm::compute(output_shape, l, name, tag);
} }
/*!
* \brief Creates an operation that performs a 2-D depthwise convolution with
* an NCHW-layout
*
* \param I The 4-D input tensor
* \param W The 4-D weight tensor
* \param pad_h A static constant padding amount applied to the height of the
* image, before and after (symmetric padding)
* \param pad_w A static constant padding amount applied to the width of the
* image, before and after (symmetric padding)
* \param stride_h A static constant striding amount applied to the height of
* the image, before and after (symmetric padding)
* \param stride_w A static constant strindingamount applied to the width of
* the image, before and after (symmetric padding)
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is the 2-D depthwise convolution operation
* (NCHW layout)
*/
inline tvm::Tensor depthwise_conv2d_nchw(const tvm::Tensor& I, inline tvm::Tensor depthwise_conv2d_nchw(const tvm::Tensor& I,
const tvm::Tensor& W, int pad_h = 0, const tvm::Tensor& W,
int pad_w = 0, int stride_h = 1, int pad_h = 0,
int stride_w = 1) { int pad_w = 0,
int stride_h = 1,
int stride_w = 1,
std::string name = "tensor",
std::string tag = kDepthwiseConv2d) {
CHECK_EQ(4, I->shape.size()); CHECK_EQ(4, I->shape.size());
CHECK_EQ(4, W->shape.size()); CHECK_EQ(4, W->shape.size());
auto pH = I->shape[2]; auto pH = I->shape[2];
...@@ -170,13 +310,37 @@ inline tvm::Tensor depthwise_conv2d_nchw(const tvm::Tensor& I, ...@@ -170,13 +310,37 @@ inline tvm::Tensor depthwise_conv2d_nchw(const tvm::Tensor& I,
W(i / pCM, o % pCM, kh, kw), W(i / pCM, o % pCM, kh, kw),
{i, kh, kw}); {i, kh, kw});
}; };
return tvm::compute(output_shape, l); return tvm::compute(output_shape, l, name, tag);
} }
/*!
* \brief Creates an operation that performs a 2-D group convolution with
* an NGCHW-layout
*
* \param I The 5-D input tensor
* \param W The 5-D weight tensor
* \param pad_h A static constant padding amount applied to the height of the
* image, before and after (symmetric padding)
* \param pad_w A static constant padding amount applied to the width of the
* image, before and after (symmetric padding)
* \param stride_h A static constant striding amount applied to the height of
* the image, before and after (symmetric padding)
* \param stride_w A static constant strindingamount applied to the width of
* the image, before and after (symmetric padding)
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is the 2-D groupconvolution operation
* (NCHW layout)
*/
inline tvm::Tensor group_conv2d_ngchw(const tvm::Tensor& I, inline tvm::Tensor group_conv2d_ngchw(const tvm::Tensor& I,
const tvm::Tensor& W, int pad_h = 0, const tvm::Tensor& W,
int pad_w = 0, int stride_h = 1, int pad_h = 0,
int stride_w = 1) { int pad_w = 0,
int stride_h = 1,
int stride_w = 1,
std::string name = "tensor",
std::string tag = kGroupConv2d) {
CHECK_EQ(5, I->shape.size()); CHECK_EQ(5, I->shape.size());
CHECK_EQ(5, W->shape.size()); CHECK_EQ(5, W->shape.size());
auto pH = I->shape[2]; auto pH = I->shape[2];
...@@ -195,12 +359,17 @@ inline tvm::Tensor group_conv2d_ngchw(const tvm::Tensor& I, ...@@ -195,12 +359,17 @@ inline tvm::Tensor group_conv2d_ngchw(const tvm::Tensor& I,
auto T = (pad_h == 0 && pad_w == 0) auto T = (pad_h == 0 && pad_w == 0)
? I ? I
: pad(I, {tvm::Expr(0), tvm::Expr(0), tvm::Expr(0), pad_h, pad_w}); : pad(I, {tvm::Expr(0), tvm::Expr(0), tvm::Expr(0), pad_h, pad_w});
auto l = [&](tvm::Var b, tvm::Var g, tvm::Var o, tvm::Var h, tvm::Var w) { auto l = [&](tvm::Array<tvm::Var> args) {
tvm::Var b = args[0];
tvm::Var g = args[1];
tvm::Var o = args[2];
tvm::Var h = args[3];
tvm::Var w = args[4];
return tvm::sum( return tvm::sum(
I(b, g, i, stride_h * h + kh, stride_w * w + kw) * W(g, i, o, kh, kw), I(b, g, i, stride_h * h + kh, stride_w * w + kw) * W(g, i, o, kh, kw),
{i, kh, kw}); {i, kh, kw});
}; };
return tvm::compute(output_shape, l); return tvm::compute(output_shape, l, name, tag);
} }
} // namespace topi } // namespace topi
......
/*!
* Copyright (c) 2017 by Contributors
* \brief Tag definitions
* \file tags.h
*/
#ifndef TOPI_TAGS_H_
#define TOPI_TAGS_H_
namespace topi {
constexpr auto kElementWise = "ewise";
constexpr auto kBroadcast = "bcast";
constexpr auto kMatMult = "matmult";
constexpr auto kConv2dNCHW = "conv2d_nchw";
constexpr auto kConv2dHWCN = "conv2d_hwcn";
constexpr auto kDepthwiseConv2d = "depthwise_conv2d";
constexpr auto kGroupConv2d = "group_conv2d";
} // namespace topi
#endif // TOPI_TAGS_H_
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment