Commit d5d19449 by Siju Committed by Tianqi Chen

[RELAY]Ops Dense, leaky_relu (#1828)

parent 8b01540d
......@@ -51,6 +51,7 @@ This level enables typical convnet models.
tvm.relay.nn.conv2d
tvm.relay.nn.conv2d_transpose
tvm.relay.nn.dense
tvm.relay.nn.max_pool2d
tvm.relay.nn.avg_pool2d
tvm.relay.nn.global_max_pool2d
......@@ -70,6 +71,7 @@ This level enables additional math and transform operators.
:nosignatures:
tvm.relay.zeros
tvm.relay.nn.leaky_relu
tvm.relay.zeros_like
tvm.relay.ones
tvm.relay.ones_like
......@@ -137,6 +139,7 @@ Level 2 Definitions
-------------------
.. autofunction:: tvm.relay.nn.conv2d
.. autofunction:: tvm.relay.nn.conv2d_transpose
.. autofunction:: tvm.relay.nn.dense
.. autofunction:: tvm.relay.nn.max_pool2d
.. autofunction:: tvm.relay.nn.avg_pool2d
.. autofunction:: tvm.relay.nn.global_max_pool2d
......@@ -149,6 +152,7 @@ Level 2 Definitions
Level 3 Definitions
-------------------
.. autofunction:: tvm.relay.nn.leaky_relu
.. autofunction:: tvm.relay.floor
.. autofunction:: tvm.relay.ceil
.. autofunction:: tvm.relay.trunc
......
......@@ -202,6 +202,18 @@ struct GlobalPool2DAttrs : public tvm::AttrsNode<GlobalPool2DAttrs> {
}
};
/*! \brief Attributes for dense operator */
struct DenseAttrs : public tvm::AttrsNode<DenseAttrs> {
IndexExpr units;
TVM_DECLARE_ATTRS(DenseAttrs, "relay.attrs.DenseAttrs") {
TVM_ATTR_FIELD(units)
.describe("Number of hidden units of the dense transformation.");
}
};
/*! \brief Attributes for upsampling operator */
struct UpSamplingAttrs : public tvm::AttrsNode<UpSamplingAttrs> {
int scale;
......@@ -237,6 +249,18 @@ struct PadAttrs : public tvm::AttrsNode<PadAttrs> {
}
};
/*! \brief Attributes for leaky relu operator */
struct LeakyReluAttrs : public tvm::AttrsNode<LeakyReluAttrs> {
double alpha;
TVM_DECLARE_ATTRS(DenseAttrs, "relay.attrs.LeakyReluAttrs") {
TVM_ATTR_FIELD(alpha).set_lower_bound(0.0).set_default(0.25)
.describe("Slope coefficient for the negative half axis.");
}
};
/*! \brief Attributes used in dropout operator */
struct DropoutAttrs : public tvm::AttrsNode<DropoutAttrs> {
double rate;
......@@ -272,6 +296,7 @@ struct BatchNormAttrs : public tvm::AttrsNode<BatchNormAttrs> {
}
}; // struct BatchNormAttrs
/*! \brief Attributes for LRN operator */
struct LRNAttrs : public tvm::AttrsNode<LRNAttrs> {
IndexExpr size;
......
......@@ -430,6 +430,34 @@ def batch_flatten(data):
"""
return _make.batch_flatten(data)
def dense(data, weight, units=None):
"""Dense operator.
Applies a linear transformation
.. math::
`Y = X * W`
Parameters
----------
data : relay.Expr
The input data to the operator.
weight : relay.Expr
The weight expressions.
units : int, optional
Number of hidden units of the dense transformation.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.dense(data, weight, units)
def relu(data):
"""Rectified linear unit.
......@@ -449,6 +477,30 @@ def relu(data):
return _make.relu(data)
def leaky_relu(data, alpha):
"""This operator takes data as input and does Leaky version
of a Rectified Linear Unit.
.. math::
`y = x > 0 ? x : alpha * x`
Parameters
----------
data : relay.Expr
The input data to the operator.
alpha : float
Slope coefficient for the negative half axis.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.leaky_relu(data, alpha)
def pad(data,
pad_width,
pad_value=0.0):
......
......@@ -15,6 +15,104 @@
namespace tvm {
namespace relay {
TVM_REGISTER_NODE_TYPE(DenseAttrs);
bool DenseRel(const Array<Type>& types,
int num_inputs,
const Attrs& attrs,
const TypeReporter& reporter) {
CHECK_EQ(types.size(), 3);
const auto* data = types[0].as<TensorTypeNode>();
const auto* weight = types[1].as<TensorTypeNode>();
if (data == nullptr) return false;
const DenseAttrs* param = attrs.as<DenseAttrs>();
CHECK(param != nullptr);
CHECK(static_cast<int>(data->shape.size()) != 0);
Array<tvm::Expr> oshape = data->shape;
if (param->units.defined()) {
Array<tvm::Expr> dshape = data->shape;
// validate the weight shape is proper if defined
// Assign weight type
Array<IndexExpr> wshape({dshape[dshape.size() - 1], param->units});
reporter->Assign(types[1], TensorTypeNode::make(wshape, data->dtype));
oshape.Set((oshape.size() - 1), param->units);
} else {
if (weight == nullptr) return false;
Array<tvm::Expr> wshape = weight->shape;
oshape.Set((oshape.size() - 1), wshape[wshape.size() - 1]);
}
// assign output type
reporter->Assign(types[2], TensorTypeNode::make(oshape, data->dtype));
return true;
}
// Positional relay function to create dense operator used by frontend FFI.
Expr MakeDense(Expr data,
Expr weight,
IndexExpr units) {
auto attrs = make_node<DenseAttrs>();
attrs->units = units;
static const Op& op = Op::Get("nn.dense");
return CallNode::make(op, {data, weight}, Attrs(attrs), {});
}
TVM_REGISTER_API("relay.op.nn._make.dense")
.set_body([](const TVMArgs& args, TVMRetValue* rv) {
runtime::detail::unpack_call<Expr, 3>(MakeDense, args, rv);
});
RELAY_REGISTER_OP("nn.dense")
.describe(R"code(Applies a linear transformation: :math:`Y = XW^T`.
- **data**: `(x1, x2, ..., xn, input_dim)`
- **weight**: `(units, input_dim)`
- **out**: `(x1, x2, ..., xn, units)`.
)code" TVM_ADD_FILELINE)
.set_num_inputs(2)
.add_argument("data", "nD Tensor", "Input data.")
.add_argument("weight", "2D Tensor", "Weight matrix.")
.set_support_level(2)
.add_type_rel("Dense", DenseRel);
// Positional relay function to create leaky relu operator used by frontend FFI.
Expr MakeLeakyRelu(Expr data,
double alpha) {
auto attrs = make_node<LeakyReluAttrs>();
attrs->alpha = alpha;
static const Op& op = Op::Get("nn.leaky_relu");
return CallNode::make(op, {data}, Attrs(attrs), {});
}
TVM_REGISTER_API("relay.op.nn._make.leaky_relu")
.set_body([](const TVMArgs& args, TVMRetValue* rv) {
runtime::detail::unpack_call<Expr, 2>(MakeLeakyRelu, args, rv);
});
RELAY_REGISTER_OP("nn.leaky_relu")
.describe(R"code(Leaky version of a Rectified Linear Unit.
`y = x > 0 ? x : alpha * x`
)code" TVM_ADD_FILELINE)
.set_num_inputs(1)
.add_argument("data", "Tensor", "Input data.")
.set_support_level(3)
.add_type_rel("Identity", IdentityRel);
TVM_REGISTER_API("relay.op.nn._make.softmax")
.set_body([](const TVMArgs& args, TVMRetValue* rv) {
auto make_func = [](Expr data, int axis) {
......
......@@ -219,6 +219,47 @@ def test_pad_infer_type():
ftype = func.checked_type
assert ftype.ret_type == relay.TensorType((n + 2, 6, 9, w + 8), "float32")
def test_dense_infer_type():
ib = relay.ir_builder.IRBuilder()
n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w")
x = ib.param("x", relay.ty.TensorType((n, c, h, w), "float32"))
w = ib.param("w", relay.ty.TensorType((w, 2), "float32"))
with ib.function(x, w) as func:
ib.ret(relay.nn.dense(x, w, units=2))
ib.ret(func)
func = relay.ir_pass.infer_type(ib.env, func.to_func())
ftype = func.checked_type
assert ftype.ret_type == relay.ty.TensorType((n, c, h, 2), "float32")
ib = relay.ir_builder.IRBuilder()
n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), 2
x = ib.param("x", relay.ty.TensorType((n, c, h, w), "float32"))
wh, ww = tvm.var("wh"), tvm.var("ww")
w = ib.param("w", relay.ty.TensorType((wh, ww), "float32"))
with ib.function(x, w) as func:
ib.ret(relay.nn.dense(x, w))
ib.ret(func)
func = relay.ir_pass.infer_type(ib.env, func.to_func())
ftype = func.checked_type
assert ftype.ret_type == relay.ty.TensorType((n, c, h, ww), "float32")
ib = relay.ir_builder.IRBuilder()
n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), 2
x = ib.param("x", relay.ty.TensorType((n, c, h, w), "float32"))
w = ib.param("w", relay.ty.IncompleteType())
with ib.function(x, w) as func:
ib.ret(relay.nn.dense(x, w, units=2))
ib.ret(func)
func = relay.ir_pass.infer_type(ib.env, func.to_func())
ftype = func.checked_type
assert ftype.ret_type == relay.ty.TensorType((n, c, h, 2), "float32")
if __name__ == "__main__":
test_conv2d_infer_type()
......@@ -227,3 +268,4 @@ if __name__ == "__main__":
test_flatten_infer_type()
test_pad_infer_type()
test_conv2d_transpose_infer_type()
test_dense_infer_type()
......@@ -208,6 +208,17 @@ def test_full_like():
ftype = func.checked_type
assert ftype.ret_type == relay.TensorType((n, c, h, w), "float32")
def test_infer_type_leaky_relu():
ib = relay.ir_builder.IRBuilder()
n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w")
x = ib.param("x", relay.ty.TensorType((n, c, h, w), "float32"))
with ib.function(x) as func:
ib.ret(relay.nn.leaky_relu(x, alpha=0.1))
ib.ret(func)
func = relay.ir_pass.infer_type(ib.env, func.to_func())
ftype = func.checked_type
assert ftype.ret_type == relay.ty.TensorType((n, c, h, w), "float32")
if __name__ == "__main__":
test_single_op()
......@@ -220,5 +231,6 @@ if __name__ == "__main__":
test_take_infer_type()
test_full()
test_full_like()
test_infer_type_leaky_relu()
test_squeeze_axes_infer_type()
test_squeeze_default_axes_infer_type()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment