Commit 6f420e0f by Siju Committed by Tianqi Chen

[RELAY][OPS]LRN and L2_Normalize (#1860)

parent d7b88f4c
......@@ -39,6 +39,7 @@ This level enables fully connected multi-layer perceptron.
tvm.relay.sigmoid
tvm.relay.nn.relu
**Level 2: Convolutions**
This level enables typical convnet models.
......@@ -53,6 +54,8 @@ This level enables typical convnet models.
tvm.relay.nn.global_avg_pool2d
tvm.relay.nn.upsampling
tvm.relay.nn.batch_flatten
tvm.relay.nn.lrn
tvm.relay.nn.l2_normalize
**Level 3: Additional Math And Transform Operators**
......@@ -131,6 +134,8 @@ Level 2 Definitions
.. autofunction:: tvm.relay.nn.global_avg_pool2d
.. autofunction:: tvm.relay.nn.upsampling
.. autofunction:: tvm.relay.nn.batch_flatten
.. autofunction:: tvm.relay.nn.lrn
.. autofunction:: tvm.relay.nn.l2_normalize
Level 3 Definitions
......
......@@ -173,6 +173,44 @@ struct UpSamplingAttrs : public tvm::AttrsNode<UpSamplingAttrs> {
};
/*! \brief Attributes for LRN operator */
struct LRNAttrs : public tvm::AttrsNode<LRNAttrs> {
IndexExpr size;
IndexExpr axis;
double bias;
double alpha;
double beta;
TVM_DECLARE_ATTRS(LRNAttrs, "relay.attrs.LRNAttrs") {
TVM_ATTR_FIELD(size).set_default(5)
.describe("The size of the local region to be considered for normalization.");
TVM_ATTR_FIELD(axis).set_default(1)
.describe("Axis of input data layout channel.");
TVM_ATTR_FIELD(bias).set_default(2)
.describe("The offset parameter to avoid division by 0.");
TVM_ATTR_FIELD(alpha).set_default(0.0001)
.describe("The scaling parameter.");
TVM_ATTR_FIELD(beta).set_default(0.75)
.describe("The exponent parameter.");
}
};
/*! \brief Attributes for L2Normalize operator */
struct L2NormalizeAttrs : public tvm::AttrsNode<L2NormalizeAttrs> {
double eps;
Array<IndexExpr> axis;
TVM_DECLARE_ATTRS(L2NormalizeAttrs, "relay.attrs.L2NormalizeAttrs") {
TVM_ATTR_FIELD(eps)
.describe("A lower bound value for the norm, to avoid division by 0.");
TVM_ATTR_FIELD(axis)
.describe("Axis over the normalization applied.");
}
};
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_ATTRS_NN_H_
......@@ -383,3 +383,66 @@ def relu(data):
The computed result.
"""
return _make.relu(data)
def lrn(data, size=5, axis=1, bias=2, alpha=.00001, beta=0.75):
"""This operator takes data as input and does local response normalization.
Normalize the input in a local region across or within feature maps.
Each input value is divided by (data / (bias + (alpha * sum_data ^2 /size))^beta)
where n is the size of each local region, and the sum is taken over the region
centered at that value (zero padding is added where necessary).
.. math::
(data / (bias + (alpha * sum_data ^2 /size))^beta)
Parameters
----------
data : relay.Expr
The input data to the operator.
size : int, optional
The size of the local region to be considered for normalization.
axis : int, optional
Input data layout channel axis. Default value is 1 for NCHW format
bias : float, optional
The offset parameter to avoid dividing by 0.
alpha : float, optional
The scaling parameter.
beta : float, optional
The exponent parameter.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.lrn(data, size, axis, alpha, beta, bias)
def l2_normalize(data, eps, axis=None):
"""Perform L2 normalization on the input data
.. math::
y(i, j) = x(i, j) / sqrt(max(sum(x^2), eps))
Parameters
----------
data : relay.Expr
The input data to the operator.
eps : float
epsilon value
axis : list of int, optional
axis over the normalization applied
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.l2_normalize(data, eps, axis)
......@@ -143,5 +143,79 @@ RELAY_REGISTER_UNARY_OP("relay.op.nn._make.", "relu")
.set_support_level(1)
.add_type_rel("Identity", IdentityRel);
// Positional relay function to create LRN operator used by frontend FFI.
Expr MakeLRN(Expr data,
IndexExpr size,
IndexExpr axis,
double alpha,
double beta,
double bias) {
auto attrs = make_node<LRNAttrs>();
attrs->size = size;
attrs->axis = axis;
attrs->alpha = alpha;
attrs->beta = beta;
attrs->bias = bias;
static const Op& op = Op::Get("nn.lrn");
return CallNode::make(op, {data}, Attrs(attrs), {});
}
TVM_REGISTER_API("relay.op.nn._make.lrn")
.set_body([](const TVMArgs& args, TVMRetValue* rv) {
runtime::detail::unpack_call<Expr, 6>(MakeLRN, args, rv);
});
RELAY_REGISTER_OP("nn.lrn")
.describe(R"code(LRN layer.
Normalize the input in a local region across or within feature maps.
Each input value is divided by (1 + (\alpha/n) \sum_i x_i^2)^\beta,
where n is the size of each local region, and the sum is taken over the region
centered at that value (zero padding is added where necessary).
.. math::
data / (bias + (alpha * sum_data ^2 /size))^beta
- **data**: The input tensor.
)code" TVM_ADD_FILELINE)
.set_num_inputs(1)
.add_argument("data", "Tensor", "The input tensor.")
.set_support_level(2)
.add_type_rel("Identity", IdentityRel);
// Positional relay function to create L2Normalize operator used by frontend FFI.
Expr MakeL2Normalize(Expr data,
double eps,
Array<IndexExpr> axis) {
auto attrs = make_node<L2NormalizeAttrs>();
attrs->eps = eps;
attrs->axis = std::move(axis);
static const Op& op = Op::Get("nn.l2_normalize");
return CallNode::make(op, {data}, Attrs(attrs), {});
}
TVM_REGISTER_API("relay.op.nn._make.l2_normalize")
.set_body([](const TVMArgs& args, TVMRetValue* rv) {
runtime::detail::unpack_call<Expr, 3>(MakeL2Normalize, args, rv);
});
RELAY_REGISTER_OP("nn.l2_normalize")
.describe(R"code(L2 Normalization layer.
Normalizes along dimension axis using an L2 norm
.. math::
output = x / sqrt(max(sum(x^2), epsilon))
- **data**: The input tensor.
)code" TVM_ADD_FILELINE)
.set_num_inputs(1)
.add_argument("data", "Tensor", "The input tensor.")
.set_support_level(2)
.add_type_rel("Identity", IdentityRel);
} // namespace relay
} // namespace tvm
......@@ -168,6 +168,30 @@ def test_concatenate_infer_type():
assert ftype.ret_type == relay.ty.TensorType(
(n, t + t, 100), "float32")
def test_lrn():
ib = relay.ir_builder.IRBuilder()
n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w")
x = ib.param("x", relay.ty.TensorType((n, c , h, w), "float32"))
with ib.function(x) as func:
ib.ret(relay.nn.lrn(x, size=10, axis=2, bias=0.5, alpha=.00001, beta=0.75))
ib.ret(func)
func = relay.ir_pass.infer_type(ib.env, func.to_func())
ftype = func.checked_type
assert ftype.ret_type == relay.ty.TensorType((n, c , h, w), "float32")
def test_l2_normalize():
ib = relay.ir_builder.IRBuilder()
n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w")
x = ib.param("x", relay.ty.TensorType((n, c , h, w), "float32"))
with ib.function(x) as func:
ib.ret(relay.nn.l2_normalize(x, eps=0.001, axis=[1]))
ib.ret(func)
func = relay.ir_pass.infer_type(ib.env, func.to_func())
ftype = func.checked_type
assert ftype.ret_type == relay.ty.TensorType((n, c , h, w), "float32")
if __name__ == "__main__":
test_unary_op()
......@@ -178,3 +202,5 @@ if __name__ == "__main__":
test_log_softmax()
test_binary_op()
test_binary_broadcast_op()
test_lrn()
test_l2_normalize()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment