Commit 76356efd by alex-weaver Committed by Tianqi Chen

TOPI C++ bugfixes (#864)

* TOPI C++ bugfixes

* Fix lint
parent cb465e7e
......@@ -66,7 +66,7 @@ inline tvm::Tensor dense_cuda(const Target& target,
*
* \return A schedule for the given ops.
*/
Schedule schedule_dense(const Target &target, const Array<Tensor>& outs) {
inline Schedule schedule_dense(const Target &target, const Array<Tensor>& outs) {
if (target.target_name == "cuda" &&
target.libs.count("cublas") > 0) {
return topi::generic::schedule_extern(target, outs);
......
......@@ -25,7 +25,7 @@ namespace cuda {
*
* \return The schedule given by sch
*/
Schedule ScheduleOutputForExtern(Target target, Operation op, Schedule sch) {
inline Schedule ScheduleOutputForExtern(Target target, Operation op, Schedule sch) {
auto x = op.output(0);
auto fused = Fuse(sch[x], sch[x]->op.as<ComputeOpNode>()->axis);
auto num_thread = target.max_num_threads;
......@@ -45,7 +45,7 @@ Schedule ScheduleOutputForExtern(Target target, Operation op, Schedule sch) {
*
* \return A schedule for the op.
*/
Schedule schedule_extern(const Target& target, Array<Tensor> outs) {
inline Schedule schedule_extern(const Target& target, Array<Tensor> outs) {
Array<Operation> out_ops;
for (auto t : outs) {
out_ops.push_back(t->op);
......
......@@ -22,7 +22,7 @@ namespace cuda {
* \param op The operation representing the injective operation.
* \param s The schedule to apply this scheduling to
*/
void ScheduleInjectiveOp(const Target &target, Operation op, Schedule s) {
inline void ScheduleInjectiveOp(const Target &target, Operation op, Schedule s) {
auto x = op.output(0);
auto fused = Fuse(s[x], s[x]->op.as<ComputeOpNode>()->axis);
auto num_thread = target.max_num_threads;
......@@ -40,7 +40,7 @@ void ScheduleInjectiveOp(const Target &target, Operation op, Schedule s) {
*
* \return A schedule for the given ops.
*/
Schedule schedule_injective(const Target &target, const Array<Tensor>& outs) {
inline Schedule schedule_injective(const Target &target, const Array<Tensor>& outs) {
Array<Operation> out_ops;
for (auto t : outs) {
out_ops.push_back(t->op);
......
......@@ -25,7 +25,7 @@ namespace cuda {
*
* \return A schedule for the given ops.
*/
Schedule schedule_pool(const Target &target, const Array<Tensor>& outs) {
inline Schedule schedule_pool(const Target &target, const Array<Tensor>& outs) {
Array<Operation> out_ops;
for (auto t : outs) {
out_ops.push_back(t->op);
......@@ -90,7 +90,7 @@ Schedule schedule_pool(const Target &target, const Array<Tensor>& outs) {
*
* \return A schedule for the given ops.
*/
Schedule schedule_global_pool(const Target &target, const Array<Tensor>& outs) {
inline Schedule schedule_global_pool(const Target &target, const Array<Tensor>& outs) {
Array<Operation> out_ops;
for (auto t : outs) {
out_ops.push_back(t->op);
......
......@@ -24,7 +24,7 @@ namespace cuda {
*
* \return A schedule for the given ops.
*/
Schedule schedule_softmax(const Target &target, const Array<Tensor>& outs) {
inline Schedule schedule_softmax(const Target &target, const Array<Tensor>& outs) {
Array<Operation> out_ops;
for (auto t : outs) {
out_ops.push_back(t->op);
......
......@@ -21,7 +21,7 @@ using namespace tvm;
* \return True iff the given array contains the given item.
*/
template<typename T>
bool contains(Array<T> array, T item) {
inline bool contains(Array<T> array, T item) {
for (auto& i : array) {
if (i == item) {
return true;
......
......@@ -22,7 +22,7 @@ using namespace tvm;
*
* \return true if the given expr is a constant int or uint, false otherwise.
*/
bool IsConstInt(Expr expr) {
inline bool IsConstInt(Expr expr) {
return
expr->derived_from<tvm::ir::IntImm>() ||
expr->derived_from<tvm::ir::UIntImm>();
......@@ -36,7 +36,7 @@ bool IsConstInt(Expr expr) {
*
* \return The integer value.
*/
int64_t GetConstInt(Expr expr) {
inline int64_t GetConstInt(Expr expr) {
if (expr->derived_from<tvm::ir::IntImm>()) {
return expr.as<tvm::ir::IntImm>()->value;
}
......@@ -56,7 +56,7 @@ int64_t GetConstInt(Expr expr) {
*
* \return A vector of the integer values
*/
std::vector<int> GetConstIntValues(Array<Expr> exprs, const std::string& var_name) {
inline std::vector<int> GetConstIntValues(Array<Expr> exprs, const std::string& var_name) {
std::vector<int> result;
for (auto expr : exprs) {
CHECK(IsConstInt(expr)) << "All elements of " << var_name << " must be constant integers";
......
......@@ -24,7 +24,7 @@ using namespace tvm;
*
* \return The Buffer object
*/
Buffer DeclExternBuffer(Array<Expr> shape,
inline Buffer DeclExternBuffer(Array<Expr> shape,
Type dtype,
std::string name) {
auto data = var(name, Handle());
......@@ -56,7 +56,7 @@ using FExtern = std::function<Expr(Array<Buffer>, Array<Buffer>)>;
* be one output Tensor for each element of out_shapes, with dtype equal to the corresponding
* element of out_types.
*/
Array<Tensor> make_extern(const Array< Array<Expr> >& out_shapes,
inline Array<Tensor> make_extern(const Array< Array<Expr> >& out_shapes,
const std::vector<Type>& out_types,
const Array<Tensor>& inputs,
FExtern fextern,
......@@ -95,7 +95,7 @@ Array<Tensor> make_extern(const Array< Array<Expr> >& out_shapes,
*
* \return An expression representing the pack operation
*/
Expr pack_buffer(Buffer buf) {
inline Expr pack_buffer(Buffer buf) {
CHECK_GT(buf->shape.size(), 0) << "buf shape must have at least one element";
auto shape = tvm::ir::Call::make(Handle(), tvm::ir::intrinsic::tvm_stack_make_shape,
buf->shape, tvm::ir::Call::CallType::Intrinsic);
......@@ -127,7 +127,7 @@ Expr pack_buffer(Buffer buf) {
*
* \return An expression representing the invocation
*/
Expr call_packed(Array<Expr> args) {
inline Expr call_packed(Array<Expr> args) {
return tvm::ir::Call::make(Int(32), tvm::ir::intrinsic::tvm_call_packed,
args, tvm::ir::Call::CallType::Intrinsic);
}
......
......@@ -20,7 +20,7 @@ using namespace tvm;
*
* \return The fused iteration variable
*/
IterVar Fuse(Stage stage, const Array<IterVar>& args) {
inline IterVar Fuse(Stage stage, const Array<IterVar>& args) {
CHECK_GE(args.size(), 1) << "Fuse requires at least 1 arg";
auto fused = args[0];
......
......@@ -23,7 +23,7 @@ using namespace tvm;
* \return An array of 4 elements, representing padding sizes for
* each individual side. The array is in the order { top, left, bottom, right }
*/
Array<Expr> GetPadTuple(Expr pad_h, Expr pad_w) {
inline Array<Expr> GetPadTuple(Expr pad_h, Expr pad_w) {
pad_h *= 2;
pad_w *= 2;
......
......@@ -24,7 +24,7 @@ namespace generic {
*
* \return A schedule for the given ops.
*/
Schedule default_schedule(const Target& target, Array<Tensor> outs, bool auto_inline) {
inline Schedule default_schedule(const Target& target, Array<Tensor> outs, bool auto_inline) {
Array<Operation> out_ops;
for (auto t : outs) {
out_ops.push_back(t->op);
......
......@@ -23,7 +23,7 @@ namespace generic {
*
* \return A schedule for the op.
*/
Schedule schedule_extern(const Target& target, Array<Tensor> outs) {
inline Schedule schedule_extern(const Target& target, Array<Tensor> outs) {
Array<Operation> out_ops;
for (auto t : outs) {
out_ops.push_back(t->op);
......
......@@ -24,7 +24,7 @@ namespace generic {
*
* \return A schedule for the given ops.
*/
Schedule schedule_injective(const Target &target, const Array<Tensor>& outs) {
inline Schedule schedule_injective(const Target &target, const Array<Tensor>& outs) {
Array<Operation> out_ops;
for (auto t : outs) {
out_ops.push_back(t->op);
......
......@@ -33,7 +33,7 @@ inline Tensor flatten(const Tensor& x,
auto ishape = x->shape;
int dim = 1;
for (size_t i = 1; i < ishape.size(); ++i) {
dim = dim * static_cast<int>(GetConstInt(ishape[i]));
dim = dim * static_cast<int>(topi::detail::GetConstInt(ishape[i]));
}
Array<Expr> oshape({ ishape[0], dim });
......
......@@ -67,7 +67,7 @@ inline tvm::Tensor dense_rocm(const Target& target,
*
* \return A schedule for the given ops.
*/
Schedule schedule_dense(const Target &target, const Array<Tensor>& outs) {
inline Schedule schedule_dense(const Target &target, const Array<Tensor>& outs) {
if (target.target_name == "rocm" &&
target.libs.count("rocblas") > 0) {
return topi::generic::schedule_extern(target, outs);
......
......@@ -23,7 +23,7 @@ namespace x86 {
*
* \return A schedule for the given ops.
*/
Schedule schedule_binarize_pack(const Target &target, const Array<Tensor>& outs) {
inline Schedule schedule_binarize_pack(const Target &target, const Array<Tensor>& outs) {
Array<Operation> out_ops;
for (auto t : outs) {
out_ops.push_back(t->op);
......@@ -55,7 +55,7 @@ Schedule schedule_binarize_pack(const Target &target, const Array<Tensor>& outs)
*
* \return A schedule for the given ops.
*/
Schedule schedule_binary_dense(const Target &target, const Array<Tensor>& outs) {
inline Schedule schedule_binary_dense(const Target &target, const Array<Tensor>& outs) {
Array<Operation> out_ops;
for (auto t : outs) {
out_ops.push_back(t->op);
......
......@@ -24,7 +24,9 @@ namespace x86 {
*
* \return A schedule for the given ops.
*/
Schedule default_schedule(const Target &target, const Array<Tensor>& outs, bool auto_inline) {
inline Schedule default_schedule(const Target &target,
const Array<Tensor>& outs,
bool auto_inline) {
Array<Operation> out_ops;
for (auto t : outs) {
out_ops.push_back(t->op);
......
......@@ -23,7 +23,7 @@ namespace x86 {
*
* \return A schedule for the given ops.
*/
Schedule schedule_injective(const Target &target, const Array<Tensor>& outs) {
inline Schedule schedule_injective(const Target &target, const Array<Tensor>& outs) {
Array<Operation> out_ops;
for (auto t : outs) {
out_ops.push_back(t->op);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment