Commit 2fafa935 by tqchen

Add Operation class

parent adf4bfef
......@@ -8,6 +8,7 @@
#include <tvm/ir_node.h>
#include <unordered_map>
#include <vector>
#include "./expr.h"
namespace tvm {
......
/*!
* Copyright (c) 2016 by Contributors
* \file operation.h
* \brief Operation node can generate one or multiple Tensors
*/
#ifndef TVM_OPERATION_H_
#define TVM_OPERATION_H_
#include <string>
#include "./expr.h"
#include "./domain.h"
namespace tvm {
// internal node container for Operation
class OperationNode;
/*! \brief Split over input domain */
class Operation : public NodeRef {
public:
/*! \brief default constructor */
Operation() {}
explicit Operation(std::shared_ptr<Node> n) : NodeRef(n) {}
/*!
* \brief access the internal node container
* \return the pointer to the internal node container
*/
inline const OperationNode* operator->() const;
};
/*!
* \brief base class of operation node.
*/
class OperationNode : public Node {
public:
/*! \brief The domain of iteration of this op. */
Domain domain;
/*! \brief optional name of the operation */
std::string name;
/*! \brief index iteration variables on the domain of operation. */
Array<Var> iter_var;
};
/*!
* \brief A Compute op that compute a tensor over certain range.
*/
class ComputeOpNode : public OperationNode {
public:
/*! \brief the compute expression */
Expr body;
/*! \brief constructor */
ComputeOpNode() {}
const char* type_key() const final {
return "ComputeOp";
}
void VisitAttrs(AttrVisitor* v) final {
v->Visit("domain", &domain);
v->Visit("name", &name);
v->Visit("iter_var", &iter_var);
v->Visit("body", &body);
}
static Operation make(Domain domain,
std::string name,
Array<Var> iter_var,
Expr body);
};
// Implementations of inline functions
inline const OperationNode* Operation::operator->() const {
return static_cast<const OperationNode*>(node_.get());
}
} // namespace tvm
#endif // TVM_OPERATION_H_
......@@ -14,29 +14,13 @@
#include "./base.h"
#include "./expr.h"
#include "./operation.h"
namespace tvm {
// Internal node container of Tensor
class TensorNode;
/*! \brief The compute function to specify the input source of a Tensor */
using FCompute = std::function<Expr (const Array<Var>& i)>;
// converters from other functions into fcompute
inline FCompute GetFCompute(std::function<Expr(Var x)> f) {
return [f] (const Array<Var>& i) { return f(i[0]); };
}
inline FCompute GetFCompute(std::function<Expr(Var, Var)> f) {
return [f] (const Array<Var>& i) { return f(i[0], i[1]); };
}
inline FCompute GetFCompute(std::function<Expr(Var, Var, Var)> f) {
return [f] (const Array<Var>& i) { return f(i[0], i[1], i[2]); };
}
inline FCompute GetFCompute(std::function<Expr(Var, Var, Var, Var)> f) {
return [f] (const Array<Var>& i) { return f(i[0], i[1], i[2], i[3]); };
}
using Halide::IR::FunctionRef;
/*!
......@@ -58,22 +42,6 @@ class Tensor : public FunctionRef {
std::string name = "tensor",
Type dtype = Float(32));
/*!
* \brief constructor of intermediate result.
* \param shape Shape of the tensor.
* \param fcompute The compute function to create the tensor.
* \param name The optional name of the tensor.
*/
Tensor(Array<Expr> shape, FCompute fcompute, std::string name = "tensor");
// same constructor, specialized for different fcompute function
Tensor(Array<Expr> shape, std::function<Expr(Var)> f, std::string name = "tensor")
:Tensor(shape, GetFCompute(f), name) {}
Tensor(Array<Expr> shape, std::function<Expr(Var, Var)> f, std::string name = "tensor")
:Tensor(shape, GetFCompute(f), name) {}
Tensor(Array<Expr> shape, std::function<Expr(Var, Var, Var)> f, std::string name = "tensor")
:Tensor(shape, GetFCompute(f), name) {}
Tensor(Array<Expr> shape, std::function<Expr(Var, Var, Var, Var)> f, std::string name = "tensor")
:Tensor(shape, GetFCompute(f), name) {}
/*!
* \brief access the internal node container
* \return the pointer to the internal node container
*/
......@@ -100,6 +68,58 @@ class Tensor : public FunctionRef {
friend std::ostream& operator<<(std::ostream &os, const Tensor& t);
};
/*! \brief The compute function to specify the input source of a Tensor */
using FCompute = std::function<Expr (const Array<Var>& i)>;
// converters from other functions into fcompute
inline FCompute GetFCompute(std::function<Expr(Var x)> f) {
return [f] (const Array<Var>& i) { return f(i[0]); };
}
inline FCompute GetFCompute(std::function<Expr(Var, Var)> f) {
return [f] (const Array<Var>& i) { return f(i[0], i[1]); };
}
inline FCompute GetFCompute(std::function<Expr(Var, Var, Var)> f) {
return [f] (const Array<Var>& i) { return f(i[0], i[1], i[2]); };
}
inline FCompute GetFCompute(std::function<Expr(Var, Var, Var, Var)> f) {
return [f] (const Array<Var>& i) { return f(i[0], i[1], i[2], i[3]); };
}
/*!
* \brief Construct a new tensor by computing over shape,
* using the computation rule: result_tensor[axis] = fcompute(axis)
* \param shape Shape of the tensor.
* \param fcompute The compute function to create the tensor.
* \param name The optional name of the tensor.
*/
Tensor Compute(Array<Expr> shape, FCompute fcompute, std::string name = "tensor");
// same as compute, specialized for different fcompute function
inline Tensor Compute(Array<Expr> shape,
std::function<Expr(Var)> f,
std::string name = "tensor") {
FCompute fc = [f] (const Array<Var>& i) { return f(i[0]); };
return Compute(shape, fc, name);
}
inline Tensor Compute(Array<Expr> shape,
std::function<Expr(Var, Var)> f,
std::string name = "tensor") {
FCompute fc = [f] (const Array<Var>& i) { return f(i[0], i[1]); };
return Compute(shape, fc, name);
}
inline Tensor Compute(Array<Expr> shape,
std::function<Expr(Var, Var, Var)> f,
std::string name = "tensor") {
FCompute fc = [f] (const Array<Var>& i) { return f(i[0], i[1], i[2]); };
return Compute(shape, fc, name);
}
inline Tensor Compute(Array<Expr> shape,
std::function<Expr(Var, Var, Var, Var)> f,
std::string name = "tensor") {
FCompute fc = [f] (const Array<Var>& i) { return f(i[0], i[1], i[2], i[3]); };
return Compute(shape, fc, name);
}
/*! \brief Node to represent a tensor */
class TensorNode : public FunctionBaseNode {
public:
......@@ -109,10 +129,10 @@ class TensorNode : public FunctionBaseNode {
std::string name;
/*! \brief data type in the content of the tensor */
Type dtype;
/*! \brief The index representing each dimension, used by source expression. */
Array<Var> dim_var;
/*! \brief source expression */
Expr source;
/*! \brief the source operation, can be None */
Operation source_op;
/*! \brief the output index from source operation */
int source_index{0};
/*! \brief constructor */
TensorNode() {}
const char* type_key() const final {
......@@ -122,8 +142,8 @@ class TensorNode : public FunctionBaseNode {
v->Visit("shape", &shape);
v->Visit("name", &name);
v->Visit("dtype", &dtype);
v->Visit("dim_var", &dim_var);
v->Visit("source", &source);
v->Visit("source_op", &source_op);
v->Visit("source_index", &source_index);
}
const std::string& func_name() const final {
return name;
......@@ -134,8 +154,8 @@ class TensorNode : public FunctionBaseNode {
static Tensor make(Array<Expr> shape,
std::string name,
Type dtype,
Array<Var> dim_var,
Expr source);
Operation source_op,
int source_index);
};
// implementations
......@@ -150,7 +170,6 @@ inline size_t Tensor::ndim() const {
inline std::ostream& operator<<(std::ostream &os, const Tensor& t) { // NOLINT(*)
os << "Tensor(shape=" << t->shape
<< ", source=" << t->source
<< ", name=" << t->name << ')';
return os;
}
......
......@@ -44,20 +44,44 @@ def convert(value):
return value
def Tensor(shape, fcompute=None, dtype=None, name="TensorObj"):
"""Construct a tensor object in dataflow.
def placeholder(shape, dtype = None, name="TensorObj"):
"""Construct an empty tensor object.
Parameters
----------
shape: Tuple of Expr
The shape of the tensor
dtype: str, optional
The data type of the tensor
name: str, optional
The name hint of the tensor
Returns
-------
tensor: tensor.Tensor
The created tensor
"""
dtype = float32 if dtype is None else dtype
return _function_internal._Tensor(
shape, name, dtype, None, 0)
def compute(shape, fcompute, name="TensorCompute"):
"""Construct a new tensor by computing over the shape domain.
The compute rule is result[axis] = fcompute(axis)
Parameters
----------
shape: Tuple of Expr
The shape of the tensor
fcompute: lambda function of *indices-> value
Specifies the input source expression
dtype: str, optional
The data type of the tensor, must specify when fcompute is not specified.
name: str, optional
The name hint of the tensor
......@@ -68,14 +92,12 @@ def Tensor(shape, fcompute=None, dtype=None, name="TensorObj"):
"""
ndim = len(shape)
dim_var = [Var("dim_var%d" % i) for i in range(ndim)]
if fcompute:
source = fcompute(*dim_var)
return _function_internal._Tensor(
shape, name, source.dtype, dim_var, source)
else:
dtype = float32 if dtype is None else dtype
return _function_internal._Tensor(
shape, name, dtype, None, None)
body = fcompute(*dim_var)
dom = [Range(0, x) for x in shape]
op_node = _function_internal._ComputeOp(
dom, name, dim_var, body)
return _function_internal._Tensor(
shape, name, body.dtype, op_node, 0)
def RDomain(dom):
......
......@@ -58,7 +58,6 @@ TVM_REGISTER_API(_make_Allocate)
TVM_REGISTER_API(_make_LetStmt)
.set_body([](const ArgStack& args, RetValue *ret) {
if (args.size() == 3) {
*ret = LetStmt::make(args.at(0),
args.at(1),
......
......@@ -88,6 +88,14 @@ TVM_REGISTER_API(_Tensor)
args.at(4));
});
TVM_REGISTER_API(_ComputeOp)
.set_body([](const ArgStack& args, RetValue *ret) {
*ret = ComputeOpNode::make(args.at(0),
args.at(1),
args.at(2),
args.at(3));
});
TVM_REGISTER_API(_RDomain)
.set_body([](const ArgStack& args, RetValue *ret) {
*ret = RDomain(args.at(0).operator Domain());
......
/*!
* Copyright (c) 2016 by Contributors
* \file operation.cc
*/
#include <tvm/operation.h>
#include <tvm/tensor.h>
#include <memory>
namespace tvm {
Tensor Compute(Array<Expr> shape, FCompute fcompute, std::string name) {
auto node = std::make_shared<TensorNode>();
auto op_node = std::make_shared<ComputeOpNode>();
node->name = name;
node->shape = shape;
// compute dimension.
size_t ndim = node->shape.size();
std::vector<Var> dim_index;
for (size_t i = 0; i < ndim; ++i) {
std::ostringstream os;
os << "dim_var" << i;
dim_index.push_back(Var(os.str()));
}
std::vector<Range> dom;
for (size_t i = 0; i < ndim; ++i) {
dom.push_back(Range(0, shape[i]));
}
op_node->iter_var = Array<Var>(dim_index);
op_node->domain = Domain(dom);
op_node->body = fcompute(op_node->iter_var);
op_node->name = name;
node->dtype = op_node->body.type();
node->source_op = Operation(op_node);
node->source_index = 0;
return Tensor(node);
}
Operation ComputeOpNode::make(Domain domain,
std::string name,
Array<Var> iter_var,
Expr body) {
auto n = std::make_shared<ComputeOpNode>();
n->domain = domain;
n->name = name;
n->iter_var = iter_var;
n->body = body;
return Operation(n);
}
TVM_REGISTER_NODE_TYPE(ComputeOpNode);
} // namespace tvm
......@@ -16,23 +16,6 @@ Tensor::Tensor(Array<Expr> shape, std::string name, Type dtype) {
node_ = std::move(node);
}
Tensor::Tensor(Array<Expr> shape, FCompute fcompute, std::string name) {
auto node = std::make_shared<TensorNode>();
node->name = std::move(name);
node->shape = std::move(shape);
size_t ndim = node->shape.size();
std::vector<Var> dim_index;
for (size_t i = 0; i < ndim; ++i) {
std::ostringstream os;
os << "dim_index" << i;
dim_index.push_back(Var(os.str()));
}
node->dim_var = Array<Var>(dim_index);
node->source = fcompute(node->dim_var);
node->dtype = node->source.type();
node_ = std::move(node);
}
Expr Tensor::operator()(Array<Expr> indices) const {
using Halide::Internal::Call;
CHECK_EQ(ndim(), indices.size())
......@@ -42,21 +25,18 @@ Expr Tensor::operator()(Array<Expr> indices) const {
(*this)->dtype, (*this)->name, indices, Call::Halide, *this);
}
Tensor TensorNode::make(Array<Expr> shape,
std::string name,
Type dtype,
Array<Var> dim_var,
Expr source) {
Operation source_op,
int source_index) {
auto n = std::make_shared<TensorNode>();
if (source.defined()) {
CHECK_EQ(source.type(), dtype);
CHECK_EQ(dim_var.size(), shape.size());
}
n->shape = shape;
n->name = name;
n->dtype = dtype;
n->dim_var = dim_var;
n->source = source;
n->source_op = source_op;
n->source_index = source_index;
return Tensor(n);
}
......
......@@ -121,7 +121,6 @@ class IRConvertSSA : public IRMutator {
} else {
Expr e = IRMutator::Mutate(expr);
return e;
}
}
......
......@@ -9,7 +9,7 @@ TEST(Tensor, Basic) {
Tensor A({m, l}, "A");
Tensor B({n, l}, "B");
auto C = Tensor({m, n}, [&](Var i, Var j) {
auto C = Compute({m, n}, [&](Var i, Var j) {
return A(i, j) * B(j, i);
}, "C");
}
......
......@@ -4,9 +4,9 @@ def test_schedule_create():
m = tvm.Var('m')
n = tvm.Var('n')
l = tvm.Var('l')
A = tvm.Tensor((m, l), name='A')
B = tvm.Tensor((n, l), name='B')
T = tvm.Tensor((m, n, l), lambda i, j, k: A(i, k) * B(j, k))
A = tvm.placeholder((m, l), name='A')
B = tvm.placeholder((n, l), name='B')
T = tvm.compute((m, n, l), lambda i, j, k: A(i, k) * B(j, k))
sch = tvm.Schedule(T, scope="shared")
tk1 = tvm.Split(0, 10)
......
......@@ -4,9 +4,9 @@ def test_tensor():
m = tvm.Var('m')
n = tvm.Var('n')
l = tvm.Var('l')
A = tvm.Tensor((m, l), name='A')
B = tvm.Tensor((n, l), name='B')
T = tvm.Tensor((m, n, l), lambda i, j, k: A(i, k) * B(j, k))
A = tvm.placeholder((m, l), name='A')
B = tvm.placeholder((n, l), name='B')
T = tvm.compute((m, n, l), lambda i, j, k: A(i, k) * B(j, k))
print(T.source)
assert(tuple(T.shape) == (m, n, l))
......@@ -16,11 +16,11 @@ def test_tensor_reduce():
m = tvm.Var('m')
n = tvm.Var('n')
l = tvm.Var('l')
A = tvm.Tensor((m, l), name='A')
B = tvm.Tensor((n, l), name='B')
T = tvm.Tensor((m, n, l), lambda i, j, k: A(i, k) * B(j, k))
A = tvm.placeholder((m, l), name='A')
B = tvm.placeholder((n, l), name='B')
T = tvm.compute((m, n, l), lambda i, j, k: A(i, k) * B(j, k))
rd = tvm.RDomain(tvm.Range(A.shape[1]))
C = tvm.Tensor((m, n), lambda i, j: tvm.sum(T(i, j, rd.index[0]), rdom=rd))
C = tvm.compute((m, n), lambda i, j: tvm.sum(T(i, j, rd.index[0]), rdom=rd))
print(C.source)
if __name__ == "__main__":
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment