Commit a226973b by Yao Wang Committed by Siva

[Frontend]Add TensorFlow FloorMod (#4308)

* Add tf FloorMod

* Add floor_div/mod into topi and relay

* Add to rst

* Fix test
parent 2baf310e
......@@ -163,7 +163,9 @@ topi
.. autofunction:: topi.subtract
.. autofunction:: topi.multiply
.. autofunction:: topi.divide
.. autofunction:: topi.floor_divide
.. autofunction:: topi.mod
.. autofunction:: topi.floor_mod
.. autofunction:: topi.maximum
.. autofunction:: topi.minimum
.. autofunction:: topi.power
......
......@@ -149,6 +149,7 @@ Supported Ops
- Fill
- Floor
- FloorDiv
- FloorMod
- FusedBatchNorm
- FusedBatchNormV2
- Gather
......
......@@ -226,7 +226,9 @@ Level 1 Definitions
.. autofunction:: tvm.relay.subtract
.. autofunction:: tvm.relay.multiply
.. autofunction:: tvm.relay.divide
.. autofunction:: tvm.relay.floor_divide
.. autofunction:: tvm.relay.mod
.. autofunction:: tvm.relay.floor_mod
.. autofunction:: tvm.relay.tanh
.. autofunction:: tvm.relay.concatenate
.. autofunction:: tvm.relay.expand_dims
......
......@@ -1243,8 +1243,13 @@ def _topk():
def _floordiv():
def _impl(inputs, attr, params):
assert len(inputs) == 2
div = AttrCvt('divide')(inputs, attr)
return get_relay_op('floor')(div)
return AttrCvt('floor_divide')(inputs, attr)
return _impl
def _floormod():
def _impl(inputs, attr, params):
assert len(inputs) == 2
return AttrCvt('floor_mod')(inputs, attr)
return _impl
def _logical(name):
......@@ -1437,6 +1442,7 @@ _convert_map = {
'Fill' : _fill(),
'Floor' : AttrCvt('floor'),
'FloorDiv' : _floordiv(),
'FloorMod' : _floormod(),
'FusedBatchNorm' : _fused_batch_norm(),
'FusedBatchNormV2' : _fused_batch_norm(),
'Gather' : _gather(),
......
......@@ -50,8 +50,10 @@ register_schedule("add", schedule_broadcast)
register_schedule("subtract", schedule_broadcast)
register_schedule("multiply", schedule_broadcast)
register_schedule("divide", schedule_broadcast)
register_schedule("floor_divide", schedule_broadcast)
register_schedule("power", schedule_injective)
register_schedule("mod", schedule_broadcast)
register_schedule("floor_mod", schedule_broadcast)
register_schedule("logical_and", schedule_broadcast)
register_schedule("logical_or", schedule_broadcast)
register_schedule("equal", schedule_broadcast)
......@@ -166,7 +168,9 @@ register_shape_func("add", False, broadcast_shape_func)
register_shape_func("subtract", False, broadcast_shape_func)
register_shape_func("multiply", False, broadcast_shape_func)
register_shape_func("divide", False, broadcast_shape_func)
register_shape_func("floor_divide", False, broadcast_shape_func)
register_shape_func("mod", False, broadcast_shape_func)
register_shape_func("floor_mod", False, broadcast_shape_func)
register_shape_func("logical_and", False, broadcast_shape_func)
register_shape_func("logical_or", False, broadcast_shape_func)
register_shape_func("equal", False, broadcast_shape_func)
......
......@@ -397,6 +397,24 @@ def divide(lhs, rhs):
return _make.divide(lhs, rhs)
def floor_divide(lhs, rhs):
"""Floor division with numpy-style broadcasting.
Parameters
----------
lhs : relay.Expr
The left hand side input data
rhs : relay.Expr
The right hand side input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.floor_divide(lhs, rhs)
def power(lhs, rhs):
"""Power with numpy-style broadcasting.
......@@ -433,6 +451,24 @@ def mod(lhs, rhs):
return _make.mod(lhs, rhs)
def floor_mod(lhs, rhs):
"""Floor mod with numpy-style broadcasting.
Parameters
----------
lhs : relay.Expr
The left hand side input data
rhs : relay.Expr
The right hand side input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.floor_mod(lhs, rhs)
def logical_and(lhs, rhs):
"""logical AND with numpy-style broadcasting.
......
......@@ -83,6 +83,12 @@ RELAY_REGISTER_BINARY_OP("divide")
.set_attr<FTVMCompute>("FTVMCompute", RELAY_BINARY_COMPUTE(topi::divide));
RELAY_REGISTER_BINARY_OP("floor_divide")
.describe("Elementwise floor divide with broadcasting")
.set_support_level(1)
.set_attr<FTVMCompute>("FTVMCompute", RELAY_BINARY_COMPUTE(topi::floor_divide));
RELAY_REGISTER_BINARY_OP("multiply")
.describe("Elementwise multiply with broadcasting")
.set_support_level(1)
......@@ -101,6 +107,12 @@ RELAY_REGISTER_BINARY_OP("mod")
.set_attr<FTVMCompute>("FTVMCompute", RELAY_BINARY_COMPUTE(topi::mod));
RELAY_REGISTER_BINARY_OP("floor_mod")
.describe("Elementwise floor mod with broadcasting")
.set_support_level(1)
.set_attr<FTVMCompute>("FTVMCompute", RELAY_BINARY_COMPUTE(topi::floor_mod));
RELAY_REGISTER_BINARY_OP("logical_and")
.describe("Elementwise logical AND with broadcasting")
.set_support_level(4)
......
......@@ -1069,8 +1069,6 @@ def test_forward_stridedslice():
#######################################################################
# FloorDiv, RealDiv
# -----------------
def _test_forward_divide(ip_shape, dtype):
np_numer = np.random.uniform(-100, 100, size=ip_shape).astype(dtype)
np_denomin = np.random.uniform(1, 100, size=ip_shape).astype(dtype)
......@@ -1083,7 +1081,7 @@ def _test_forward_divide(ip_shape, dtype):
def _test_forward_floordiv(ip_shape, dtype):
np_numer = np.random.uniform(-100, 100, size=ip_shape).astype(dtype)
np_numer = np.random.uniform(1, 100, size=ip_shape).astype(dtype)
tf.reset_default_graph()
numerator = tf.placeholder(dtype, ip_shape, name="numer")
tf.math.floordiv(numerator, tf.constant(5, dtype=dtype), name='FloorDiv')
......@@ -1095,6 +1093,26 @@ def test_forward_divide():
_test_forward_divide((4,), 'int32')
_test_forward_divide((4, 3, 7), 'float32')
_test_forward_floordiv((4, 3, 7), 'float32')
_test_forward_floordiv((4, 3, 7), 'int32')
#######################################################################
# FloorMod
# --------
def _test_forward_floormod(in_shape, if_shape, dtype):
np_numer = np.random.uniform(1, 100, size=in_shape).astype(dtype)
np_factor = np.random.uniform(1, 100, size=if_shape).astype(dtype)
tf.reset_default_graph()
numerator = tf.placeholder(dtype, in_shape, name="numer")
factor = tf.placeholder(dtype, if_shape, name="factor")
tf.floormod(numerator, factor, name='FloorMod')
compare_tf_with_tvm([np_numer, np_factor], ['numer:0', 'factor:0'], 'FloorMod:0')
def test_forward_floormod():
'''test FloorMod'''
_test_forward_floormod((10,), (10,), 'float32')
_test_forward_floormod((8, 2), (1,), 'float32')
_test_forward_floormod((4, 3, 7), (4, 3, 7), 'float32')
_test_forward_floormod((4, 3, 7), (4, 3, 7), 'int32')
#######################################################################
......@@ -2793,6 +2811,7 @@ if __name__ == '__main__':
test_forward_sin()
test_forward_negative()
test_forward_divide()
test_forward_floordiv()
test_forward_abs()
test_forward_softplus()
test_forward_sqrt()
......@@ -2806,6 +2825,7 @@ if __name__ == '__main__':
test_forward_erf()
test_forward_squared_difference()
test_forward_add_n()
test_forward_floormod()
# Reductions
test_forward_argminmax()
......
......@@ -126,7 +126,9 @@ def test_binary_op():
for opfunc, ref in [(relay.add, np.add),
(relay.subtract, np.subtract),
(relay.multiply, np.multiply),
(relay.divide, np.divide)]:
(relay.divide, np.divide),
(relay.floor_divide, np.floor_divide),
(relay.floor_mod, np.fmod)]:
for dtype in ['float16', 'float32']:
check_binary_op(opfunc, ref, dtype)
......
......@@ -196,6 +196,25 @@ TOPI_DEFINE_OP_OVERLOAD(operator*, multiply);
TOPI_DEFINE_BCAST_OP(divide, { return div(a, b); });
/*!
* \fn floor divide
* \brief Compute floor(A / B) with auto-broadcasting.
*
* \param A The first tensor, or Expr
* \param B The second tensor, or Expr
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return The result.
*/
TOPI_DEFINE_BCAST_OP(floor_divide, {
if (a.type().is_int() || a.type().is_uint()) {
return floordiv(a, b);
} else {
return floor(div(a, b));
}
});
/*!
* \fn mod
* \brief Compute A % B with auto-broadcasting.
*
......@@ -209,6 +228,25 @@ TOPI_DEFINE_BCAST_OP(divide, { return div(a, b); });
TOPI_DEFINE_BCAST_OP(mod, { return truncmod(a, b); });
/*!
* \fn floor mod
* \brief Compute A - floor_div(A, B) * B with auto-broadcasting.
*
* \param A The first tensor, or Expr
* \param B The second tensor, or Expr
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return The result.
*/
TOPI_DEFINE_BCAST_OP(floor_mod, {
if (a.type().is_int() || a.type().is_uint()) {
return floormod(a, b);
} else {
return a - floor_divide(a, b) * b;
}
});
/*!
* \fn maximum
* \brief Compute maximum(A, B) with auto-broadcasting.
*
......
......@@ -116,6 +116,25 @@ def divide(lhs, rhs):
return _cpp.divide(lhs, rhs)
def floor_divide(lhs, rhs):
"""Floor division with auto-broadcasting
Parameters
----------
lhs : tvm.Tensor or Expr
The left operand
rhs : tvm.Tensor or Expr
The right operand
Returns
-------
ret : tvm.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.floor_divide(lhs, rhs)
def mod(lhs, rhs):
"""Modulus with auto-broadcasting
......@@ -135,6 +154,25 @@ def mod(lhs, rhs):
return _cpp.mod(lhs, rhs)
def floor_mod(lhs, rhs):
"""Floor modulus with auto-broadcasting
Parameters
----------
lhs : tvm.Tensor or Expr
The left operand
rhs : tvm.Tensor or Expr
The right operand
Returns
-------
ret : tvm.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.floor_mod(lhs, rhs)
def maximum(lhs, rhs):
"""Take element-wise maximum of two tensors with auto-broadcasting
......
......@@ -122,7 +122,9 @@ TOPI_REGISTER_BCAST_OP("topi.add", topi::add);
TOPI_REGISTER_BCAST_OP("topi.subtract", topi::subtract);
TOPI_REGISTER_BCAST_OP("topi.multiply", topi::multiply);
TOPI_REGISTER_BCAST_OP("topi.divide", topi::divide);
TOPI_REGISTER_BCAST_OP("topi.floor_divide", topi::floor_divide);
TOPI_REGISTER_BCAST_OP("topi.mod", topi::mod);
TOPI_REGISTER_BCAST_OP("topi.floor_mod", topi::floor_mod);
TOPI_REGISTER_BCAST_OP("topi.maximum", topi::maximum);
TOPI_REGISTER_BCAST_OP("topi.minimum", topi::minimum);
TOPI_REGISTER_BCAST_OP("topi.power", topi::power);
......
......@@ -139,6 +139,13 @@ def test_divide():
verify_broadcast_binary_ele(
(2, 3, 1, 32), (64, 32), topi.divide, np.divide, rhs_min=0.0001)
def test_floor_divide():
verify_broadcast_binary_ele(
None, (10,), topi.floor_divide, np.floor_divide, rhs_min=0.0001)
verify_broadcast_binary_ele(
(), None, topi.floor_divide, np.floor_divide, rhs_min=0.0001)
verify_broadcast_binary_ele(
(2, 3, 1, 32), (64, 32), topi.floor_divide, np.floor_divide, rhs_min=0.0001)
def test_maximum_minmum():
verify_broadcast_binary_ele(
......@@ -156,6 +163,11 @@ def test_mod():
verify_broadcast_binary_ele(
(1, 2, 2), (2,), topi.mod, np.mod, lhs_min=0.001, rhs_min=1, dtype="int32")
def test_floor_mod():
verify_broadcast_binary_ele(
(1, 2, 2), (2,), topi.floor_mod, np.fmod, lhs_min=0.001, rhs_min=1, dtype="int32")
verify_broadcast_binary_ele(
(3, 4, 5), (3, 4, 5), topi.floor_mod, np.fmod, lhs_min=0.001, rhs_min=1, dtype="float32")
def test_cmp():
# explicit specify the output type
......@@ -298,9 +310,11 @@ if __name__ == "__main__":
test_shift()
test_cmp()
test_mod()
test_floor_mod()
test_subtract()
test_multiply()
test_divide()
test_floor_divide()
test_maximum_minmum()
test_power()
test_broadcast_to()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment