Commit cbdd14f1 by Nicolas Vasilache Committed by Tianqi Chen

[TOPI] C++ doc (#320)

parent b0c42f3b
Subproject commit 30a85d860567aa30d013a5e75fbd1b0ee2ebe93c Subproject commit 326e2fa18734f0592d257da6b8cfaae90a499c5c
/* /*!
* Copyright (c) 2017 by Contributors * Copyright (c) 2017 by Contributors
* \brief Broadcast op constructions * \brief Broadcast op constructions
* \file broadcast.h * \file topi/broadcast.h
*/ */
#ifndef TOPI_BROADCAST_H_ #ifndef TOPI_BROADCAST_H_
#define TOPI_BROADCAST_H_ #define TOPI_BROADCAST_H_
#include <topi/detail/broadcast.h> #include <string>
#include "topi/detail/broadcast.h"
#include "topi/tags.h"
namespace topi { namespace topi {
inline tvm::Tensor broadcast_to(const tvm::Tensor& I, /*!
const tvm::Array<tvm::Expr>& output_shape) { * \brief Creates an operation that broadcasts a tensor into a compatible
CHECK_GE(output_shape.size(), I->shape.size()) * shape according to numpy's rules
*
* \param t The input tensor
* \param output_shape The target output shape, must be compatible
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is a broadcast operation
*/
inline tvm::Tensor broadcast_to(const tvm::Tensor& t,
const tvm::Array<tvm::Expr>& output_shape,
std::string name = "tensor",
std::string tag = kBroadcast) {
CHECK_GE(output_shape.size(), t->shape.size())
<< "Not a broadcast, output dimensionality smaller than input.\noutput: " << "Not a broadcast, output dimensionality smaller than input.\noutput: "
<< output_shape << "\nvs\ninput: " << I; << output_shape << "\nvs\ninput: " << t;
auto bh = detail::BroadcastShape(output_shape, I->shape); auto bh = detail::BroadcastShape(output_shape, t->shape);
CHECK_EQ(output_shape.size(), bh.common_shape.size()); CHECK_EQ(output_shape.size(), bh.common_shape.size());
for (int i = 0; i < output_shape.size(); ++i) { for (int i = 0; i < output_shape.size(); ++i) {
CHECK(tvm::ir::Equal(output_shape[i], bh.common_shape[i])); CHECK(tvm::ir::Equal(output_shape[i], bh.common_shape[i]));
} }
auto l = [&](tvm::Array<tvm::Var> ovars) { auto l = [&](tvm::Array<tvm::Var> ovars) {
return I(detail::InputIndexFromBroadcast(ovars, I, bh.vars2, bh.all_vars)); return t(detail::InputIndexFromBroadcast(ovars, t, bh.vars2, bh.all_vars));
}; };
return tvm::compute( return tvm::compute(
tvm::Array<tvm::Expr>(bh.common_shape.begin(), bh.common_shape.end()), l); tvm::Array<tvm::Expr>(bh.common_shape.begin(), bh.common_shape.end()),
l,
name,
tag);
} }
inline tvm::Tensor broadcast_add(const tvm::Tensor& A, const tvm::Tensor& B) { /*!
* \brief Creates an operation that performs pointwise addition of 2 tensors
* and broadcasts them into a common compatible shape where necessary,
* according to numpy's rules
*
* \param A The first tensor to add
* \param B The second tensor to add
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is a pointwise addition with broadcast
*/
inline tvm::Tensor broadcast_add(const tvm::Tensor& A,
const tvm::Tensor& B,
std::string name = "tensor",
std::string tag = kBroadcast) {
auto l = [&](tvm::Expr a, tvm::Expr b) { return a + b; }; auto l = [&](tvm::Expr a, tvm::Expr b) { return a + b; };
return detail::WithBroadcast(l, A, B); return detail::WithBroadcast(l, A, B, name, tag);
} }
inline tvm::Tensor broadcast_sub(const tvm::Tensor& A, const tvm::Tensor& B) { /*!
* \brief Creates an operation that performs pointwise subtraction of 2 tensors
* and broadcasts them into a common compatible shape where necessary,
* according to numpy's rules
*
* \param A The first tensor
* \param B The second tensor to subtract from the first
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is a pointwise subtraction with broadcast
*/
inline tvm::Tensor broadcast_sub(const tvm::Tensor& A,
const tvm::Tensor& B,
std::string name = "tensor",
std::string tag = kBroadcast) {
auto l = [&](tvm::Expr a, tvm::Expr b) { return a - b; }; auto l = [&](tvm::Expr a, tvm::Expr b) { return a - b; };
return detail::WithBroadcast(l, A, B); return detail::WithBroadcast(l, A, B, name, tag);
} }
inline tvm::Tensor broadcast_mul(const tvm::Tensor& A, const tvm::Tensor& B) { /*!
* \brief Creates an operation that performs pointwise multiplication of 2
* tensors and broadcasts them into a common compatible shape where necessary,
* according to numpy's rules
*
* \param A The first tensor to multiply
* \param B The second tensor to multiply
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is a pointwise multiplication with broadcast
*/
inline tvm::Tensor broadcast_mul(const tvm::Tensor& A,
const tvm::Tensor& B,
std::string name = "tensor",
std::string tag = kBroadcast) {
auto l = [&](tvm::Expr a, tvm::Expr b) { return a * b; }; auto l = [&](tvm::Expr a, tvm::Expr b) { return a * b; };
return detail::WithBroadcast(l, A, B); return detail::WithBroadcast(l, A, B, name, tag);
} }
inline tvm::Tensor broadcast_div(const tvm::Tensor& A, const tvm::Tensor& B) { /*!
* \brief Creates an operation that performs pointwise division of 2 tensors
* and broadcasts them into a common compatible shape where necessary,
* according to numpy's rules
*
* \param A The first tensor
* \param B The second tensor to divide the first tensor with
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is a pointwise division with broadcast
*/
inline tvm::Tensor broadcast_div(const tvm::Tensor& A,
const tvm::Tensor& B,
std::string name = "tensor",
std::string tag = kBroadcast) {
auto l = [&](tvm::Expr a, tvm::Expr b) { return a / b; }; auto l = [&](tvm::Expr a, tvm::Expr b) { return a / b; };
return detail::WithBroadcast(l, A, B); return detail::WithBroadcast(l, A, B, name, tag);
} }
inline tvm::Tensor broadcast_mod(const tvm::Tensor& A, const tvm::Tensor& B) { /*!
* \brief Creates an operation that performs pointwise modulo remainder of 2
* tensors and broadcasts them into a common compatible shape where necessary,
* according to numpy's rules
*
* \param A The first tensor
* \param B The second tensor to compute A % B
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is a pointwise modulo remainder with
* broadcast
*/
inline tvm::Tensor broadcast_mod(const tvm::Tensor& A,
const tvm::Tensor& B,
std::string name = "tensor",
std::string tag = kBroadcast) {
auto l = [&](tvm::Expr a, tvm::Expr b) { return a % b; }; auto l = [&](tvm::Expr a, tvm::Expr b) { return a % b; };
return detail::WithBroadcast(l, A, B); return detail::WithBroadcast(l, A, B, name, tag);
} }
} // namespace topi } // namespace topi
......
/* /*!
* Copyright (c) 2017 by Contributors * Copyright (c) 2017 by Contributors
* \brief Detail broadcast. * \brief Detail broadcast.
* \file broadcast.h * \file topi/detail/broadcast.h
*/ */
#ifndef TOPI_DETAIL_BROADCAST_H_ #ifndef TOPI_DETAIL_BROADCAST_H_
#define TOPI_DETAIL_BROADCAST_H_ #define TOPI_DETAIL_BROADCAST_H_
#include <algorithm> #include <algorithm>
#include <deque> #include <deque>
#include <string>
#include "tvm/ir_pass.h" #include "tvm/ir_pass.h"
#include "tvm/tvm.h" #include "tvm/tvm.h"
...@@ -90,15 +91,21 @@ inline tvm::Array<tvm::Expr> InputIndexFromBroadcast( ...@@ -90,15 +91,21 @@ inline tvm::Array<tvm::Expr> InputIndexFromBroadcast(
template <typename FBinaryExpr> template <typename FBinaryExpr>
inline tvm::Tensor WithBroadcast(FBinaryExpr op, const tvm::Tensor& A, inline tvm::Tensor WithBroadcast(FBinaryExpr op,
const tvm::Tensor& B) { const tvm::Tensor& A,
const tvm::Tensor& B,
std::string name = "tensor",
std::string tag = "") {
auto bh = BroadcastShape(A->shape, B->shape); auto bh = BroadcastShape(A->shape, B->shape);
auto l = [&](tvm::Array<tvm::Var> ovars) { auto l = [&](tvm::Array<tvm::Var> ovars) {
return op(A(InputIndexFromBroadcast(ovars, A, bh.vars1, bh.all_vars)), return op(A(InputIndexFromBroadcast(ovars, A, bh.vars1, bh.all_vars)),
B(InputIndexFromBroadcast(ovars, B, bh.vars2, bh.all_vars))); B(InputIndexFromBroadcast(ovars, B, bh.vars2, bh.all_vars)));
}; };
return tvm::compute( return tvm::compute(
tvm::Array<tvm::Expr>(bh.common_shape.begin(), bh.common_shape.end()), l); tvm::Array<tvm::Expr>(bh.common_shape.begin(), bh.common_shape.end()),
l,
name,
tag);
} }
} // namespace detail } // namespace detail
......
...@@ -6,17 +6,22 @@ ...@@ -6,17 +6,22 @@
#ifndef TOPI_EWISE_H_ #ifndef TOPI_EWISE_H_
#define TOPI_EWISE_H_ #define TOPI_EWISE_H_
#include <tvm/tvm.h> #include <string>
#include "topi/tags.h"
#include "tvm/tvm.h"
namespace topi { namespace topi {
using namespace tvm; using namespace tvm;
// Unary intrinsic operators // Unary intrinsic operators
#define TOPI_DECLARE_UNARY_OP(OpName) \ #define TOPI_DECLARE_UNARY_OP(OpName) \
inline Tensor OpName(const Tensor& x) { \ inline Tensor OpName(const Tensor& x, \
std::string name = "tensor", \
std::string tag = kElementWise) { \
return compute(x->shape, [&](const Array<Var>& i) { \ return compute(x->shape, [&](const Array<Var>& i) { \
return ::tvm::OpName(x(i)); \ return ::tvm::OpName(x(i)); \
}, "tensor", "ewise"); \ }, name, tag); \
} }
TOPI_DECLARE_UNARY_OP(exp); TOPI_DECLARE_UNARY_OP(exp);
......
/*!
* Copyright (c) 2017 by Contributors
* \brief Tag definitions
* \file tags.h
*/
#ifndef TOPI_TAGS_H_
#define TOPI_TAGS_H_
namespace topi {
constexpr auto kElementWise = "ewise";
constexpr auto kBroadcast = "bcast";
constexpr auto kMatMult = "matmult";
constexpr auto kConv2dNCHW = "conv2d_nchw";
constexpr auto kConv2dHWCN = "conv2d_hwcn";
constexpr auto kDepthwiseConv2d = "depthwise_conv2d";
constexpr auto kGroupConv2d = "group_conv2d";
} // namespace topi
#endif // TOPI_TAGS_H_
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment