Commit 3cad2478 by Tianqi Chen Committed by GitHub

[OP] Initial Stucture of Op Library (#198)

* [OP] Initial start of op library

* add gtest
parent 69aefaa3
...@@ -53,7 +53,7 @@ RUNTIME_DEP = $(RUNTIME_OBJ) ...@@ -53,7 +53,7 @@ RUNTIME_DEP = $(RUNTIME_OBJ)
# The flags # The flags
LDFLAGS = -pthread -lm -ldl LDFLAGS = -pthread -lm -ldl
CFLAGS = -std=c++11 -Wall -O2\ CFLAGS = -std=c++11 -Wall -O2\
-Iinclude -I$(DLPACK_PATH)/include -I$(DMLC_CORE_PATH)/include -IHalideIR/src -fPIC -Iinclude -I$(DLPACK_PATH)/include -I$(DMLC_CORE_PATH)/include -IHalideIR/src -Itopi/include -fPIC
LLVM_CFLAGS= -fno-rtti -DDMLC_ENABLE_RTTI=0 LLVM_CFLAGS= -fno-rtti -DDMLC_ENABLE_RTTI=0
FRAMEWORKS = FRAMEWORKS =
OBJCFLAGS = -fno-objc-arc OBJCFLAGS = -fno-objc-arc
...@@ -155,11 +155,13 @@ LIBHALIDEIR: ...@@ -155,11 +155,13 @@ LIBHALIDEIR:
+ cd HalideIR; make lib/libHalideIR.a ; cd $(ROOTDIR) + cd HalideIR; make lib/libHalideIR.a ; cd $(ROOTDIR)
cpplint: cpplint:
python dmlc-core/scripts/lint.py topi cpp topi/include;
python dmlc-core/scripts/lint.py tvm cpp include src verilog\ python dmlc-core/scripts/lint.py tvm cpp include src verilog\
examples/extension/src examples/graph_executor/src examples/extension/src examples/graph_executor/src
pylint: pylint:
pylint python/tvm --rcfile=$(ROOTDIR)/tests/lint/pylintrc pylint python/tvm --rcfile=$(ROOTDIR)/tests/lint/pylintrc
pylint topi/python/topi --rcfile=$(ROOTDIR)/tests/lint/pylintrc
lint: cpplint pylint lint: cpplint pylint
......
Subproject commit a6c5701219e635fea808d264aefc5b03c3aec314 Subproject commit 2e1fcedfed1253289ade441a56427e44fb9c98aa
TVM Examples TVM Examples
============ ============
This folder contains various example projects on how to u This folder contains various example projects using TVM.
- [extension](extension) how to extend TVM C++ api along with python API. - [extension](extension) how to extend TVM C++ api along with python API.
- [operator](operator) implementation of operators. - [graph_executor](graph_executor) build graph executor.
- See also [topi](../topi) for operators and operator recipes.
...@@ -7,8 +7,8 @@ ...@@ -7,8 +7,8 @@
#define TVM_EXPR_H_ #define TVM_EXPR_H_
#include <ir/Expr.h> #include <ir/Expr.h>
#include <ir/IRPrinter.h>
#include <ir/IROperator.h> #include <ir/IROperator.h>
#include <ir/IRPrinter.h>
#include <string> #include <string>
#include <algorithm> #include <algorithm>
#include "./base.h" #include "./base.h"
...@@ -40,8 +40,6 @@ using Halide::Internal::as_const_uint; ...@@ -40,8 +40,6 @@ using Halide::Internal::as_const_uint;
using Halide::Internal::const_true; using Halide::Internal::const_true;
using Halide::Internal::const_false; using Halide::Internal::const_false;
using Halide::Internal::is_no_op; using Halide::Internal::is_no_op;
using Halide::likely;
using Halide::likely_if_innermost;
inline Type TVMShapeIndexType() { inline Type TVMShapeIndexType() {
if (std::is_signed<tvm_index_t>::value) { if (std::is_signed<tvm_index_t>::value) {
...@@ -217,37 +215,8 @@ IterVar reduce_axis(Range dom, std::string name = "rv"); ...@@ -217,37 +215,8 @@ IterVar reduce_axis(Range dom, std::string name = "rv");
using Domain = Array<Range>; using Domain = Array<Range>;
// functions
using Halide::cast;
using Halide::min;
using Halide::max;
using Halide::abs;
using Halide::select;
/*!
* \brief sum of of source expression over axis
* \param source The source expression.
* \param axis List of iteration variables that will be used for reduction.
*/
Expr sum(Expr source, Array<IterVar> axis);
/*!
* \brief max of of source expression over axis
* \param source The source expression.
* \param axis List of iteration variables that will be used for reduction.
*/
Expr max(Expr source, Array<IterVar> axis);
/*!
* \brief max of of source expression over axis
* \param source The source expression.
* \param axis List of iteration variables that will be used for reduction.
*/
Expr min(Expr source, Array<IterVar> axis);
// print functions for expr // print functions for expr
std::ostream& operator<<(std::ostream& os, const NodeRef& n); // NOLINT(*) std::ostream& operator<<(std::ostream& os, const NodeRef& n); // NOLINT(*)
// definition of Node. // definition of Node.
/*! /*!
* \brief An iteration variable representing an iteration * \brief An iteration variable representing an iteration
......
/*!
* Copyright (c) 2017 by Contributors
* \file ir_operator.h
* \brief Common operators of Expr
*/
#ifndef TVM_IR_OPERATOR_H_
#define TVM_IR_OPERATOR_H_
#include <algorithm>
#include "./expr.h"
#include "./ir.h"
namespace tvm {
using Halide::likely;
using Halide::likely_if_innermost;
// functions
using Halide::cast;
using Halide::min;
using Halide::max;
using Halide::abs;
using Halide::select;
/*!
* \brief sum of of source expression over axis
* \param source The source expression.
* \param axis List of iteration variables that will be used for reduction.
*/
Expr sum(Expr source, Array<IterVar> axis);
/*!
* \brief max of of source expression over axis
* \param source The source expression.
* \param axis List of iteration variables that will be used for reduction.
*/
Expr max(Expr source, Array<IterVar> axis);
/*!
* \brief max of of source expression over axis
* \param source The source expression.
* \param axis List of iteration variables that will be used for reduction.
*/
Expr min(Expr source, Array<IterVar> axis);
// Unary intrinsic operators
#define TVM_DECLARE_INTRIN_UNARY(OpName) \
inline Expr OpName(Expr x) { \
return ir::Call::make(x.type(), #OpName, {x}, ir::Call::PureExtern); \
} \
TVM_DECLARE_INTRIN_UNARY(exp);
TVM_DECLARE_INTRIN_UNARY(tanh);
TVM_DECLARE_INTRIN_UNARY(sigmoid);
} // namespace tvm
#endif // TVM_IR_OPERATOR_H_
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <vector> #include <vector>
#include <unordered_map> #include <unordered_map>
#include "./expr.h" #include "./expr.h"
#include "./ir_operator.h"
#include "./tensor.h" #include "./tensor.h"
#include "./schedule.h" #include "./schedule.h"
#include "./arithmetic.h" #include "./arithmetic.h"
......
...@@ -64,6 +64,12 @@ class Tensor : public NodeRef { ...@@ -64,6 +64,12 @@ class Tensor : public NodeRef {
*/ */
Expr operator()(Array<Expr> indices) const; Expr operator()(Array<Expr> indices) const;
/*! /*!
* \brief Take elements from the tensor
* \param indices the indices.
* \return the result expression representing tensor read.
*/
Expr operator()(Array<Var> indices) const;
/*!
* \brief data structure to represent a slice that fixes first k coordinates. * \brief data structure to represent a slice that fixes first k coordinates.
* This is used to enable syntax sugar of Tensor[x][y][z] to get the element. * This is used to enable syntax sugar of Tensor[x][y][z] to get the element.
*/ */
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include "./base.h" #include "./base.h"
#include "./expr.h" #include "./expr.h"
#include "./ir_operator.h"
#include "./tensor.h" #include "./tensor.h"
#include "./operation.h" #include "./operation.h"
#include "./packed_func_ext.h" #include "./packed_func_ext.h"
......
...@@ -46,33 +46,6 @@ IterVar reduce_axis(Range dom, std::string name) { ...@@ -46,33 +46,6 @@ IterVar reduce_axis(Range dom, std::string name) {
dom, Var(name), kCommReduce); dom, Var(name), kCommReduce);
} }
Expr sum(Expr source, Array<IterVar> rdom) {
Var x("x"), y("y");
Expr result = ir::Add::make(x, y);
Expr identity_element = make_zero(source.type());
ir::CommReducer combiner =
ir::CommReducerNode::make({x}, {y}, {result}, {identity_element});
return ir::Reduce::make(combiner, {source}, rdom, make_const(Bool(1), true), 0);
}
Expr max(Expr source, Array<IterVar> rdom) {
Var x("x"), y("y");
Expr result = ir::Max::make(x, y);
Expr identity_element = source.type().min();
ir::CommReducer combiner =
ir::CommReducerNode::make({x}, {y}, {result}, {identity_element});
return ir::Reduce::make(combiner, {source}, rdom, make_const(Bool(1), true), 0);
}
Expr min(Expr source, Array<IterVar> rdom) {
Var x("x"), y("y");
Expr result = ir::Min::make(x, y);
Expr identity_element = source.type().max();
ir::CommReducer combiner =
ir::CommReducerNode::make({x}, {y}, {result}, {identity_element});
return ir::Reduce::make(combiner, {source}, rdom, make_const(Bool(1), true), 0);
}
std::ostream& operator<<(std::ostream& os, const NodeRef& n) { // NOLINT(*) std::ostream& operator<<(std::ostream& os, const NodeRef& n) { // NOLINT(*)
IRPrinter(os).print(n); IRPrinter(os).print(n);
return os; return os;
......
/*!
* Copyright (c) 2017 by Contributors
* \file ir_operator.cc
*/
#include <tvm/base.h>
#include <tvm/ir.h>
namespace tvm {
Expr sum(Expr source, Array<IterVar> rdom) {
Var x("x"), y("y");
Expr result = ir::Add::make(x, y);
Expr identity_element = make_zero(source.type());
ir::CommReducer combiner =
ir::CommReducerNode::make({x}, {y}, {result}, {identity_element});
return ir::Reduce::make(combiner, {source}, rdom, make_const(Bool(1), true), 0);
}
Expr max(Expr source, Array<IterVar> rdom) {
Var x("x"), y("y");
Expr result = ir::Max::make(x, y);
Expr identity_element = source.type().min();
ir::CommReducer combiner =
ir::CommReducerNode::make({x}, {y}, {result}, {identity_element});
return ir::Reduce::make(combiner, {source}, rdom, make_const(Bool(1), true), 0);
}
Expr min(Expr source, Array<IterVar> rdom) {
Var x("x"), y("y");
Expr result = ir::Min::make(x, y);
Expr identity_element = source.type().max();
ir::CommReducer combiner =
ir::CommReducerNode::make({x}, {y}, {result}, {identity_element});
return ir::Reduce::make(combiner, {source}, rdom, make_const(Bool(1), true), 0);
}
} // namespace tvm
...@@ -9,6 +9,11 @@ ...@@ -9,6 +9,11 @@
namespace tvm { namespace tvm {
Expr Tensor::operator()(Array<Var> indices) const {
Array<Expr> arr(indices.begin(), indices.end());
return operator()(arr);
}
Expr Tensor::operator()(Array<Expr> indices) const { Expr Tensor::operator()(Array<Expr> indices) const {
using Halide::Internal::Call; using Halide::Internal::Call;
CHECK_EQ(ndim(), indices.size()) CHECK_EQ(ndim(), indices.size())
......
...@@ -14,7 +14,6 @@ TEST(Tensor, Basic) { ...@@ -14,7 +14,6 @@ TEST(Tensor, Basic) {
}, "C"); }, "C");
Tensor::Slice x = A[n]; Tensor::Slice x = A[n];
LOG(INFO) << C->op.as<ComputeOpNode>()->body;
} }
TEST(Tensor, Reduce) { TEST(Tensor, Reduce) {
......
#include <tvm/tvm.h>
#include <topi/ewise.h>
#include <gtest/gtest.h>
namespace topi {
TEST(Tensor, Basic) {
using namespace tvm;
Var m("m"), n("n"), l("l");
Tensor A = placeholder({m, l}, Float(32), "A");
auto C = topi::exp(A);
}
}
int main(int argc, char ** argv) {
testing::InitGoogleTest(&argc, argv);
testing::FLAGS_gtest_death_test_style = "threadsafe";
return RUN_ALL_TESTS();
}
import tvm
import topi
def test_ewise():
m = tvm.var('m')
l = tvm.var('l')
A = tvm.placeholder((m, l), name='A')
def test_apply(func, name):
B = func(A)
assert tuple(B.shape) == tuple(A.shape)
assert B.op.body[0].name == name
test_apply(topi.exp, "exp")
test_apply(topi.tanh, "tanh")
test_apply(topi.sigmoid, "sigmoid")
if __name__ == "__main__":
test_ewise()
#!/bin/bash #!/bin/bash
export PYTHONPATH=python export PYTHONPATH=python:topi/python
TVM_FFI=ctypes python -m nose -v tests/python/unittest || exit -1 TVM_FFI=ctypes python -m nose -v tests/python/unittest || exit -1
TVM_FFI=ctypes python3 -m nose -v tests/python/unittest || exit -1 TVM_FFI=ctypes python3 -m nose -v tests/python/unittest || exit -1
......
# Operator Collections # TVM Operator Inventory
This folder contains collections of operators on perf tunning operators with TVM.
The collection is contributed and maintained by the community.
## Perf Workflow topi is the operator collection library for TVM intended at sharing the effort of crafting and
optimizing tvm generated kernels. The goal
- Provide sugars for operator declaration
- Give common primitives for fused op creation.
- Provide commonly used schedules under each architectures
## Organization
- [include](include) C++ library, header only
- [python](python) python library
- [recipe](recipe) Recipe collections containing useful operator examples.
## Guidelines
- Use numpy-style naming convention for known ops
- Seperate operator declaration from schedule when possible.
- This can be inconvenient but enables more general scheduling across ops.
- We can always recover the tensors from its outputs by traversing the tree.
- Deliberately assert the requirements
- Some kernels have requirements on shape and data layout, assert them
- Data layout aware, if not specified in argument or in function, assume NCHW by default.
## Performance Tuning Workflow
Since TVM is work in progress, some optimization might not be perfect. Since TVM is work in progress, some optimization might not be perfect.
One quick way I find useful is to do codegen plus manual modification. One quick way I find useful is to do codegen plus manual modification.
The workflow is: The workflow is:
......
/*!
* Copyright (c) 2017 by Contributors
* \file topi.h
* \brief Elementwise op constructions
*/
#ifndef TOPI_EWISE_H_
#define TOPI_EWISE_H_
#include <tvm/tvm.h>
namespace topi {
using namespace tvm;
// Unary intrinsic operators
#define TOPI_DECLARE_UNARY_OP(OpName) \
inline Tensor OpName(const Tensor& x) { \
return compute(x->shape, [&](const Array<Var>& i) { \
return ::tvm::OpName(x(i)); \
}); \
}
TOPI_DECLARE_UNARY_OP(exp);
TOPI_DECLARE_UNARY_OP(tanh);
TOPI_DECLARE_UNARY_OP(sigmoid);
} // namespace topi
#endif // TOPI_EWISE_H_
# pylint: disable=redefined-builtin, wildcard-import
"""TVM Operator Inventory."""
from __future__ import absolute_import as _abs
from .ewise import *
"""CUDA specific declaration and schedule."""
"""Elementwise operators"""
from __future__ import absolute_import as _abs
import tvm
def exp(x):
"""Take exponential of input x.
Parameters
----------
x : tvm.Tensor
Input argument.
Returns
-------
y : tvm.Tensor
The result.
"""
return tvm.compute(x.shape, lambda *i: tvm.exp(x(*i)))
def tanh(x):
"""Take hyperbolic tanh of input x.
Parameters
----------
x : tvm.Tensor
Input argument.
Returns
-------
y : tvm.Tensor
The result.
"""
return tvm.compute(x.shape, lambda *i: tvm.tanh(x(*i)))
def sigmoid(x):
"""Take sigmoid tanh of input x.
Parameters
----------
x : tvm.Tensor
Input argument.
Returns
-------
y : tvm.Tensor
The result.
"""
return tvm.compute(x.shape, lambda *i: tvm.sigmoid(x(*i)))
"""Example code to do square matrix multiplication."""
import tvm import tvm
import os import os
from tvm.contrib import nvcc_compiler from tvm.contrib import nvcc_compiler
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment