test_op_level1.py 12.1 KB
Newer Older
1
import math
2
import tvm
3
import numpy as np
4
from tvm import relay
5
from tvm.relay.testing import ctx_list
6
import topi.testing
7

8 9 10 11 12 13 14 15
def sigmoid(x):
    one = np.ones_like(x)
    return one / (one + np.exp(-x))

def relu(x):
    x_copy = np.copy(x)
    np.maximum(x_copy, 0, x_copy)
    return x_copy
16

17
def test_unary_op():
18 19 20 21
    def check_single_op(opfunc, ref):
        shape = (10, 4)
        dtype = 'float32'
        tp = relay.TensorType(shape, dtype)
22 23 24 25 26 27 28
        x = relay.var("x", tp)
        y = opfunc(x)
        # test printer
        assert ("%0 = {}(%x)".format(y.op.name)) in y.astext()
        # test type inference
        assert relay.ir_pass.infer_type(y).checked_type == tp

29 30 31
        if ref is not None:
            data = np.random.rand(*shape).astype(dtype)
            ref_res = ref(data)
32 33 34 35 36 37 38 39
            func = relay.Function([x], y)
            for target, ctx in ctx_list():
                # use graph by execuor default for testing, as we need
                # create function explicitly to avoid constant-folding.
                intrp = relay.create_executor("graph", ctx=ctx, target=target)
                op_res = intrp.evaluate(func)(data)
                np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)

40 41

    for opfunc, ref in [(tvm.relay.log, np.log),
42 43 44 45 46
                        (tvm.relay.exp, np.exp),
                        (tvm.relay.sqrt, np.sqrt),
                        (tvm.relay.sigmoid, sigmoid),
                        (tvm.relay.tanh, np.tanh),
                        (relay.nn.relu, relu)]:
47
        check_single_op(opfunc, ref)
48

49

50
def test_binary_op():
51 52 53 54 55
    def inst(vars, sh):
        return [vars.get(s, s) for s in sh]

    def check_binary_op(opfunc, ref):
        # TODO(@jroesch): this piece of code improperly uses type variables.
56
        n = tvm.var("n")
57 58 59 60
        s1 = (5, n, 5)
        s2 = (n, 1)
        t1 = relay.TensorType(s1)
        t2 = relay.TensorType(s2)
61 62 63 64 65 66 67
        x = relay.var("x", t1)
        y = relay.var("y", t2)
        z = opfunc(x, y)
        # test printer
        assert ("%0 = {}(%x, %y)".format(z.op.name)) in z.astext()
        assert relay.ir_pass.infer_type(z).checked_type == t1

68 69 70 71 72 73 74 75 76
        if ref is not None:
            t1 = relay.TensorType((5, 10, 5))
            t2 = relay.TensorType((5, 10, 5))
            x = relay.var("x", t1)
            y = relay.var("y", t2)
            z = opfunc(x, y)
            x_data = np.random.rand(5, 10, 5).astype(t1.dtype)
            y_data = np.random.rand(5, 10, 5).astype(t2.dtype)
            ref_res = ref(x_data, y_data)
77
            func = relay.Function([x, y], z)
78

79 80 81 82 83 84
            for target, ctx in ctx_list():
                # use graph by execuor default for testing, as we need
                # create function explicitly to avoid constant-folding.
                intrp = relay.create_executor("graph", ctx=ctx, target=target)
                op_res = intrp.evaluate(func)(x_data, y_data)
                np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
85 86

    for opfunc, ref in [(relay.add, np.add),
87 88 89
                        (relay.subtract, np.subtract),
                        (relay.multiply, np.multiply),
                        (relay.divide, np.divide)]:
90
        check_binary_op(opfunc, ref)
91

92

93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
def test_expand_dims():
    # based on topi test
    def verify_expand_dims(dshape, dtype, oshape, axis, num_newaxis):
        x = relay.Var("x", relay.TensorType(dshape, dtype))
        func = relay.Function([x], relay.expand_dims(x, axis, num_newaxis))
        for target, ctx in ctx_list():
            data = np.random.uniform(size=dshape).astype(dtype)
            ref_res = data.reshape(oshape)
            intrp = relay.create_executor("graph", ctx=ctx, target=target)
            op_res = intrp.evaluate(func)(data)
            np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)

    verify_expand_dims((3, 10), 'float32', (3, 10, 1, 1), 2, 2)
    verify_expand_dims((3, 10), 'float32', (1, 3, 10), -3, 1)


109
def test_bias_add():
110 111 112 113
    xshape=(10, 2, 3, 4)
    bshape=(2,)
    dtype="float32"
    x = relay.var("x", shape=xshape)
114 115 116 117
    bias = relay.var("bias")
    z = relay.nn.bias_add(x, bias)
    zz = relay.ir_pass.infer_type(z)
    assert "axis=" not in zz.astext()
118 119 120 121 122 123 124 125 126 127
    assert zz.args[1].checked_type == relay.TensorType(bshape)

    func = relay.Function([x, bias], z)
    x_data = np.random.uniform(size=xshape).astype(dtype)
    y_data = np.random.uniform(size=bshape).astype(dtype)
    ref_res = x_data + y_data.reshape((2, 1, 1))
    for target, ctx in ctx_list():
        intrp = relay.create_executor("graph", ctx=ctx, target=target)
        op_res = intrp.evaluate(func)(x_data, y_data)
        np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
128 129


130 131
def test_expand_dims_infer_type():
    n, t, d = tvm.var("n"), tvm.var("t"), 100
132 133 134 135 136
    x = relay.var("x", shape=(n, t, d))
    y = relay.expand_dims(x, axis=2)
    assert "axis=2" in y.astext()
    checked = relay.ir_pass.infer_type(y)
    assert checked.checked_type == relay.TensorType((n, t, 1, 100))
137 138


139
def test_softmax():
140 141
    shape = (10, 4)
    x = relay.var("x", shape=shape)
142 143 144
    y = relay.nn.softmax(x, axis=1)
    assert "nn.softmax" in y.astext()
    yy = relay.ir_pass.infer_type(y)
145 146 147 148 149 150 151 152
    assert yy.checked_type == relay.TensorType(shape)
    func = relay.Function([x], y)
    x_data = np.random.uniform(size=shape).astype("float32")
    ref_res = topi.testing.softmax_python(x_data)
    for target, ctx in ctx_list():
        intrp = relay.create_executor("graph", ctx=ctx, target=target)
        op_res = intrp.evaluate(func)(x_data)
        np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
153 154


155
def test_log_softmax():
156 157 158
    shape = (10, 4)
    x = relay.var("x", shape=shape)
    y = relay.nn.log_softmax(x, axis=1)
159 160
    assert "nn.log_softmax" in y.astext()
    yy = relay.ir_pass.infer_type(y)
161 162 163 164 165 166 167 168
    assert yy.checked_type == relay.TensorType(shape)
    func = relay.Function([x], y)
    x_data = np.random.uniform(size=shape).astype("float32")
    ref_res = topi.testing.log_softmax_python(x_data)
    for target, ctx in ctx_list():
        intrp = relay.create_executor("graph", ctx=ctx, target=target)
        op_res = intrp.evaluate(func)(x_data)
        np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
169

170

171
def test_concatenate():
172
    n, t, d = tvm.var("n"), tvm.var("t"), 100
173 174 175 176 177 178
    x = relay.var("x", shape=(n, t, d))
    y = relay.var("y", shape=(n, t, d))
    z = relay.concatenate((x, y), axis=-1)
    assert "axis=" in z.astext()
    zz = relay.ir_pass.infer_type(z)
    assert zz.checked_type == relay.TensorType((n, t, 200))
179

180
    x = relay.exp(x)
181 182 183
    z = relay.concatenate((x, y), axis=2)
    zz = relay.ir_pass.infer_type(z)
    assert zz.checked_type == relay.TensorType((n, t, 200))
184

185 186 187
    z = relay.concatenate((x, y), axis=1)
    zz = relay.ir_pass.infer_type(z)
    assert zz.checked_type == relay.TensorType((n, t + t, 100))
188

189 190
    x = relay.var("x", shape=(10, 5))
    y = relay.var("y", shape=(10, 5))
191
    t = relay.var("z", shape=())
192
    z = relay.concatenate((x, y), axis=1)
193
    z = relay.add(z, t)
194
    # Check result.
195
    func = relay.Function([x, y, t], z)
196 197
    x_data = np.random.rand(10, 5).astype('float32')
    y_data = np.random.rand(10, 5).astype('float32')
198 199
    t_data = np.random.uniform(size=()).astype('float32')
    ref_res = np.concatenate((x_data, y_data), axis=1) + t_data
200 201 202 203

    for target, ctx in ctx_list():
        intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
        intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
204
        op_res1 = intrp1.evaluate(func)(x_data, y_data, t_data)
205
        tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=0.01)
206
        op_res2 = intrp2.evaluate(func)(x_data, y_data, t_data)
207
        tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=0.01)
208

209 210
def test_dropout():
    n, t, d = tvm.var("n"), tvm.var("t"), tvm.var("d")
211 212
    input_ty = relay.TensorType((n, t, d), "float32")
    x = relay.var("x", input_ty)
213
    y = relay.nn.dropout(x, rate=0.75)
214 215 216
    assert "rate=" in y.astext()
    yy = relay.ir_pass.infer_type(y)
    assert yy.checked_type == input_ty
217 218 219 220


def test_batch_norm():
    # beta and gamma ignored
221 222 223 224 225 226 227
    data = relay.var("data", relay.TensorType((3, 2, 1)))
    beta = relay.var("beta", relay.TensorType((2,)))
    gamma = relay.var("gamma", relay.TensorType((2,)))
    moving_mean = relay.var("moving_mean", relay.TensorType((2,)))
    moving_var = relay.var("moving_var", relay.TensorType((2,)))
    y = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var,
                            center=False, scale=False)
228
    yy = relay.ir_pass.infer_type(y.astuple())
229 230 231 232 233
    assert "center=" in yy.astext()
    assert yy.checked_type == relay.ty.TupleType(tvm.convert([
        relay.TensorType((3, 2, 1), "float32"),
        relay.TensorType((2,), "float32"),
        relay.TensorType((2,), "float32")
234 235
    ]))

236 237 238 239
    beta = relay.var("beta", relay.TensorType((3,)))
    gamma = relay.var("gamma", relay.TensorType((3,)))
    moving_mean = relay.var("moving_mean", relay.TensorType((3,)))
    moving_var = relay.var("moving_var", relay.TensorType((3,)))
240

241 242
    y = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var,
                            axis=0, center=False, scale=False)
243
    yy = relay.ir_pass.infer_type(y.astuple())
244
    assert yy.checked_type == relay.ty.TupleType(tvm.convert([
245 246 247 248 249 250
        relay.ty.TensorType((3, 2, 1), "float32"),
        relay.ty.TensorType((3,), "float32"),
        relay.ty.TensorType((3,), "float32")
    ]))

    # axis=-1
251 252 253 254 255 256 257
    data = relay.var("data", relay.TensorType((1, 2, 3)))
    beta = relay.var("beta", relay.TensorType((3,)))
    gamma = relay.var("gamma", relay.TensorType((3,)))
    moving_mean = relay.var("moving_mean", relay.TensorType((3,)))
    moving_var = relay.var("moving_var", relay.TensorType((3,)))
    y = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var,
                            axis=-1, center=False, scale=False)
258
    yy = relay.ir_pass.infer_type(y.astuple())
259
    assert yy.checked_type == relay.ty.TupleType(tvm.convert([
260 261 262 263 264 265
        relay.ty.TensorType((1, 2, 3), "float32"),
        relay.ty.TensorType((3,), "float32"),
        relay.ty.TensorType((3,), "float32")
    ]))


266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
def test_dense():
    n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w")
    x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
    w = relay.var("w", relay.TensorType((2, w), "float32"))
    y = relay.nn.dense(x, w, units=2)
    "units=2" in y.astext()
    yy = relay.ir_pass.infer_type(y)
    assert yy.checked_type == relay.TensorType((n, c, h, 2), "float32")

    n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), 2
    x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
    wh, ww = tvm.var("wh"), tvm.var("ww")
    w = relay.var("w", relay.TensorType((ww, wh), "float32"))
    y = relay.nn.dense(x, w)
    yy = relay.ir_pass.infer_type(y)
    assert yy.checked_type == relay.TensorType((n, c, h, ww), "float32")

    n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), 2
    x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
    w = relay.var("w", relay.IncompleteType())
    y = relay.nn.dense(x, w, units=2)
    yy = relay.ir_pass.infer_type(y)
    assert yy.checked_type == relay.TensorType((n, c, h, 2), "float32")

    x = relay.var("x", shape=(10, 5))
    w = relay.var("w", shape=(2, 5))
    z = relay.nn.dense(x, w)

    # Check result.
    func = relay.Function([x, w], z)
    x_data = np.random.rand(10, 5).astype('float32')
    w_data = np.random.rand(2, 5).astype('float32')
    ref_res = np.dot(x_data, w_data.T)

    for target, ctx in ctx_list():
        intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
        intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
        op_res1 = intrp1.evaluate(func)(x_data, w_data)
        tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
        op_res2 = intrp2.evaluate(func)(x_data, w_data)
        tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)


309
if __name__ == "__main__":
310
    test_concatenate()
311
    test_bias_add()
312
    test_unary_op()
313
    test_binary_op()
314
    test_expand_dims_infer_type()
315
    test_expand_dims()
316
    test_softmax()
317
    test_log_softmax()
318 319
    test_dropout()
    test_batch_norm()
320
    test_dense()