test_op_level2.py 25.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
17 18
""" Support level2 operator test cases.
"""
Zhi committed
19
import numpy as np
20 21
import tvm
from tvm import relay
Zhi committed
22
from tvm.relay import transform
23 24
from tvm.relay.testing import ctx_list
import topi.testing
25

Zhi committed
26 27 28
def run_infer_type(expr):
    mod = relay.Module.from_expr(expr)
    mod = transform.InferType()(mod)
29
    entry = mod["main"]
Zhi committed
30 31
    return entry if isinstance(expr, relay.Function) else entry.body

32 33 34
def test_conv2d_infer_type():
    # symbolic in batch dimension
    n, c, h, w = tvm.var("n"), 10, 224, 224
35 36 37 38 39 40
    x = relay.var("x", relay.ty.TensorType((n, c, h, w), "float32"))
    w = relay.var("w")
    y = relay.nn.conv2d(x, w,
                        kernel_size=(3, 3),
                        padding=(1, 1),
                        channels=2)
Zhi committed
41
    yy = run_infer_type(y)
42
    assert yy.checked_type ==  relay.TensorType(
43
        (n, 2, 224, 224), "float32")
44
    assert yy.args[1].checked_type == relay.TensorType(
45 46 47 48
        (2, 10, 3, 3), "float32")

    # infer by shape of w, mixed precision
    n, c, h, w = tvm.var("n"), 10, 224, 224
49 50 51 52
    x = relay.var("x", relay.TensorType((n, c, h, w), "int8"))
    w = relay.var("w", relay.TensorType((2, 10, 3, 3), "int8"))
    y = relay.nn.conv2d(x, w, out_dtype="int32")
    assert "out_dtype=\"int32\"" in y.astext()
Zhi committed
53
    yy = run_infer_type(y)
54
    assert yy.checked_type ==  relay.TensorType(
55 56
        (n, 2, 222, 222), "int32")

57 58 59 60 61 62 63 64 65 66
    # infer shape in case of different dtypes for input and weight.
    n, c, h, w = tvm.var("n"), 10, 224, 224
    x = relay.var("x", relay.TensorType((n, c, h, w), "uint8"))
    w = relay.var("w", relay.TensorType((2, 10, 3, 3), "int8"))
    y = relay.nn.conv2d(x, w, out_dtype="int32")
    assert "out_dtype=\"int32\"" in y.astext()
    yy = run_infer_type(y)
    assert yy.checked_type ==  relay.TensorType(
        (n, 2, 222, 222), "int32")

67 68
    # Infer with a different layout
    n, c, h, w = 4, 32, 224, 224
69 70 71
    x = relay.var("x", relay.TensorType((n//4, c//4, h, w, 4, 4), "int8"))
    wt = relay.var("w")
    y = relay.nn.conv2d(x, wt,
72 73 74 75
                        kernel_size=(3, 3),
                        padding=(1, 1),
                        channels=16,
                        data_layout="NCHW4n4c",
76
                        kernel_layout="OIHW4o4i",
77
                        out_dtype="int32")
Zhi committed
78
    yy = run_infer_type(y)
79
    assert yy.checked_type ==  relay.TensorType(
80
        (1, 4, 224, 224, 4, 4), "int32")
81
    assert yy.args[1].checked_type == relay.TensorType(
82 83
        (4, 8, 3, 3, 4, 4), "int8")

84 85 86 87 88 89 90 91 92 93
    # Infer with NHWC
    n, c, h, w = 4, 32, 224, 224
    x = relay.var("x", relay.TensorType((n, h, w, c), "int8"))
    wt = relay.var("w")
    y = relay.nn.conv2d(x, wt,
                        kernel_size=(3, 3),
                        padding=(1, 1),
                        channels=16,
                        data_layout="NHWC",
                        out_dtype="int32")
Zhi committed
94
    yy = run_infer_type(y)
95 96 97 98
    assert yy.checked_type ==  relay.TensorType(
        (n, h, w, 16), "int32")


99 100 101 102 103 104
def test_conv2d_run():
    def run_test_conv2d(dtype, out_dtype, scale, dshape, kshape,
                        padding=(1, 1),
                        fref=None,
                        groups=1,
                        dilation=(1, 1),
105
                        except_targets=None,
106
                        **attrs):
107
        if except_targets is None:
108 109
            except_targets = []

110 111
        x = relay.var("x", shape=dshape, dtype=dtype)
        w = relay.var("w", dtype=dtype)
112 113 114 115 116 117 118 119 120 121 122
        y = relay.nn.conv2d(x, w,
                            padding=padding,
                            dilation=dilation,
                            groups=groups,
                            **attrs)
        func = relay.Function([x, w], y)
        data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
        kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
        dkernel = topi.testing.dilate_python(kernel, (1, 1) + dilation)
        if fref is None:
            ref_res = topi.testing.conv2d_nchw_python(
123 124
                data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding,
                groups=groups)
125 126 127
        else:
            ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype))

128

129
        for target, ctx in ctx_list():
130 131
            if target in except_targets:
                continue
132 133 134 135 136 137 138 139 140 141 142 143
            intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
            op_res1 = intrp1.evaluate(func)(data, kernel)
            tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)

    # depthwise conv2d
    dshape = (1, 32, 18, 18)
    kshape = (32, 1, 3, 3)
    run_test_conv2d("float32", "float32", 1, dshape, kshape,
                    padding=(1, 1), channels=32, groups=32, kernel_size=(3 ,3),
                    fref=lambda x, w: topi.testing.depthwise_conv2d_python_nchw(
                        x, w, (1, 1), "SAME"))

144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
    # CUDA is disabled for 'direct' schedule:
    # https://github.com/dmlc/tvm/pull/3070#issuecomment-486597553
    # group conv2d
    dshape = (1, 32, 18, 18)
    kshape = (32, 4, 3, 3)
    run_test_conv2d("float32", "float32", 1, dshape, kshape,
                    padding=(1, 1), channels=32, groups=8, kernel_size=(3 ,3),
                    except_targets=['cuda'])
    # also group conv2d
    dshape = (1, 32, 18, 18)
    kshape = (64, 1, 3, 3)
    run_test_conv2d("float32", "float32", 1, dshape, kshape,
                    padding=(1, 1), channels=64, groups=32, kernel_size=(3 ,3),
                    except_targets=['cuda'])

159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
    # normal conv2d
    dshape = (1, 3, 224, 224)
    kshape = (10, 3, 3, 3)
    run_test_conv2d("float32", "float32", 1, dshape, kshape,
                    padding=(1, 1), channels=10, kernel_size=(3 ,3))
    # mixed precision
    run_test_conv2d("int8", "int32", 1, dshape, kshape,
                    padding=(1, 1), channels=10, kernel_size=(3 ,3))
    kshape = (10, 3, 1, 3)
    # mixed precision.
    run_test_conv2d("int8", "int32", 1, dshape, kshape,
                    padding=(0, 1), channels=10, kernel_size=(1 ,3))
    # dilated conv2d
    dshape = (1, 3, 18, 18)
    kshape = (10, 3, 3, 3)
    run_test_conv2d("float32", "float32", 1, dshape, kshape,
                    padding=(1, 1), channels=10, kernel_size=(3 ,3), dilation=(3, 3))


178 179 180
def test_conv2d_transpose_infer_type():
    # symbolic in batch dimension
    n, c, h, w = tvm.var("n"), 10, 10, 12
181 182 183 184 185 186 187
    x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
    w = relay.var("w", relay.IncompleteType())
    y = relay.nn.conv2d_transpose(x, w,
                                  kernel_size=(3, 3),
                                  padding=(1, 1),
                                  channels=15)
    assert "channels=15" in y.astext()
Zhi committed
188
    yy = run_infer_type(y)
189
    assert yy.checked_type == relay.TensorType(
190
        (n, 15, 10, 12), "float32")
191
    assert yy.args[1].checked_type == relay.TensorType(
192 193 194 195
        (10, 15, 3, 3), "float32")

    # infer by shape of w, mixed precision
    n, c, h, w = tvm.var("n"), 10, 10, 12
196 197 198 199 200 201
    x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
    w = relay.var("w", relay.TensorType((12, 11, 5, 5), "float32"))
    y = relay.nn.conv2d_transpose(x, w,
                                  output_padding=(1, 1),
                                  channels=11,
                                  data_layout="NHWC")
Zhi committed
202
    yy = run_infer_type(y)
203
    assert yy.checked_type == relay.TensorType(
204 205
        (n, 15, 15, 11), "float32")

206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232

def test_conv2d_transpose_run():
    dshape = (1, 3, 18, 18)
    kshape = (3, 10, 3, 3)
    oshape = (1, 10, 37, 37)
    x = relay.var("x", shape=dshape)
    w = relay.var("w")
    y = relay.nn.conv2d_transpose(x, w,
                                  channels=10, kernel_size=(3,3), strides=(2,2),
                                  padding=(1,1), output_padding=(2, 2))
    func = relay.Function([x, w], y)
    dtype = "float32"
    data = np.random.uniform(size=dshape).astype(dtype)
    kernel = np.random.uniform(size=kshape).astype(dtype)
    c_np = topi.testing.conv2d_transpose_nchw_python(
        data, kernel, 2, 1)
    d_np = np.zeros(shape=oshape)
    d_np[:,:,0:c_np.shape[2],0:c_np.shape[3]] = c_np
    ref_res = d_np

    for target, ctx in ctx_list():
        intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
        op_res1 = intrp1.evaluate(func)(data, kernel)
        tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)



233 234
def test_upsampling_infer_type():
    n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w")
235
    x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
236
    y = relay.nn.upsampling(x, scale=2, layout="NCHW", method="bilinear")
237
    "method=\"BINLINEAR\"" in y.astext()
Zhi committed
238
    yy = run_infer_type(y)
239
    assert yy.checked_type == relay.TensorType((n, c, h*2, w*2), "float32")
240
    n, c = tvm.var("n"), tvm.var("c")
241
    x = relay.var("x", relay.TensorType((n, c, 100, 200), "float32"))
242
    y = relay.nn.upsampling(x, scale=2, layout="NCHW", method="bilinear")
Zhi committed
243
    yy = run_infer_type(y)
244
    assert yy.checked_type == relay.TensorType((n, c, 200, 400), "float32")
245

246 247

def _test_pool2d(opfunc, reffunc):
248
    n, c, h, w = tvm.var("n"), 10, 224, 224
249 250 251
    x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
    y = opfunc(x, pool_size=(1, 1))
    assert "pool_size=" in y.astext()
Zhi committed
252
    yy = run_infer_type(y)
253
    assert yy.checked_type == relay.TensorType((n, 10, 224, 224), "float32")
254 255 256 257 258 259 260 261 262 263 264 265
    # test execution
    dtype = "float32"
    dshape = (1, 3, 28, 28)
    x = relay.var("x", shape=dshape)
    y = opfunc(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
    func = relay.Function([x], y)
    data = np.random.uniform(size=dshape).astype(dtype)
    ref_res = reffunc(data.reshape(1,3,14,2,14,2), axis=(3,5))
    for target, ctx in ctx_list():
        intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
        op_res1 = intrp1.evaluate(func)(data)
        tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
266

267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
def _test_pool2d_int(opfunc, reffunc, dtype):
    n, c, h, w = tvm.var("n"), 10, 224, 224
    x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
    y = opfunc(x, pool_size=(1, 1))
    assert "pool_size=" in y.astext()
    yy = run_infer_type(y)
    assert yy.checked_type == relay.TensorType((n, 10, 224, 224), dtype)
    # test execution
    dtype = "int32"
    dshape = (1, 3, 28, 28)
    x = relay.var("x", shape=dshape, dtype=dtype)
    y = opfunc(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
    func = relay.Function([x], y)
    data = np.random.random_integers(low=-128, high=128, size=dshape)
    ref_res = reffunc(data.reshape(1,3,14,2,14,2), axis=(3,5)).astype(dtype)
    for target, ctx in ctx_list():
        intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
        op_res1 = intrp1.evaluate(func)(data)
        tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
286 287

def _test_global_pool2d(opfunc, reffunc):
288
    n, c, h, w = tvm.var("n"), tvm.var("c"), 224, 224
289 290
    x = relay.var("x", relay.TensorType((n, h, w, c), "float32"))
    y = opfunc(x, layout="NHWC")
Zhi committed
291
    yy = run_infer_type(y)
292
    assert yy.checked_type == relay.TensorType((n, 1, 1, c), "float32")
293 294

    n, c, h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w")
295 296
    x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
    y = opfunc(x)
Zhi committed
297
    yy = run_infer_type(y)
298
    assert yy.checked_type == relay.TensorType((n, c, 1, 1), "float32")
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
    # test execution
    dtype = "float32"
    dshape = (1, 1024, 7, 7)
    x = relay.var("x", shape=dshape)
    y = opfunc(x)
    func = relay.Function([x], y)
    data = np.random.uniform(size=dshape).astype(dtype)
    ref_res = reffunc(data, axis=(2,3), keepdims=True)
    for target, ctx in ctx_list():
        intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
        op_res1 = intrp1.evaluate(func)(data)
        tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)


def test_pool2d():
    _test_pool2d(relay.nn.max_pool2d, np.max)
    _test_pool2d(relay.nn.avg_pool2d, np.mean)
316 317
    _test_pool2d_int(relay.nn.avg_pool2d, np.mean, 'int32')
    _test_pool2d_int(relay.nn.avg_pool2d, np.mean, 'uint16')
318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
    _test_global_pool2d(relay.nn.global_max_pool2d, np.max)
    _test_global_pool2d(relay.nn.global_avg_pool2d, np.mean)


def test_avg_pool2d_no_count_pad():
    kh, kw = (4, 4)
    sh, sw = (2, 2)
    ph, pw = (2, 2)
    n = 1
    (ic, ih, iw) = (3, 28, 28)
    (oc, oh, ow) = (3, 15, 15)
    dshape = (n, ic, ih, iw)
    x = relay.var("x", shape=dshape)
    y = relay.nn.avg_pool2d(x,
                            pool_size=(kh, kw),
                            strides=(sw, sw),
                            padding=(ph, pw),
                            count_include_pad=False)
    func = relay.Function([x], y)
    dtype = "float32"
    a_np = np.random.uniform(low=0.001, size=(n, ic, ih, iw)).astype(dtype)
    pad_np = np.zeros(shape=(n, ic, ih+2*ph, iw+2*pw)).astype(dtype)
    no_zero = (range(n), range(ic), (range(ph, ih+ph)), (range(pw, iw+pw)))
    pad_np[np.ix_(*no_zero)] = a_np
    b_np = np.zeros(shape=(n, oc, oh, ow)).astype(dtype)
    for i in range(oh):
        for j in range(ow):
            pad_count = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw] > 0, axis=(2,3))
            b_np[:,:,i,j] = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw],
                                   axis=(2,3)) / np.maximum(pad_count, 1)
    ref_res = np.maximum(b_np, 0.0)
    data = a_np

    for target, ctx in ctx_list():
        intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
        op_res1 = intrp1.evaluate(func)(data)
        tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
355 356 357

def test_flatten_infer_type():
    d1, d2, d3, d4 = tvm.var("d1"), tvm.var("d2"), tvm.var("d3"), tvm.var("d4")
358 359
    x = relay.var("x", relay.TensorType((d1, d2, d3, d4), "float32"))
    y = relay.nn.batch_flatten(x)
Zhi committed
360
    yy = run_infer_type(y)
361
    assert yy.checked_type == relay.TensorType((d1, ((d2*d3)*d4)), "float32")
362

363 364
    x = relay.var("x", relay.TensorType((3, 2, 4, 3), "float32"))
    y = relay.nn.batch_flatten(x)
Zhi committed
365
    yy = run_infer_type(y)
366
    assert yy.checked_type == relay.TensorType((3, 24), "float32")
367

368 369
    x = relay.var("x", relay.TensorType((d1, 2, d3, 3), "float32"))
    y = relay.nn.batch_flatten(x)
Zhi committed
370
    yy = run_infer_type(y)
371
    assert yy.checked_type == relay.TensorType((d1, ((2*d3)*3)), "float32")
372

373 374 375 376 377
    shape = (1, 5, 10, 10)
    o_shape = (1, 500)
    dtype = "float32"
    x = relay.var("x", relay.TensorType(shape, dtype))
    z = relay.nn.batch_flatten(x)
Zhi committed
378
    yy = run_infer_type(z)
379 380 381 382 383 384 385 386 387 388 389 390 391
    assert yy.checked_type == relay.TensorType(o_shape, dtype)
    func = relay.Function([x], z)
    x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
    ref_res = x_data.flatten().reshape(o_shape)

    for target, ctx in ctx_list():
        intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
        intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
        op_res1 = intrp1.evaluate(func)(x_data)
        tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
        op_res2 = intrp2.evaluate(func)(x_data)
        tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)

392 393 394
def test_pad_infer_type():
    # entirely concrete case
    n, c, h, w = 1, 2, 3, 4
395 396 397
    t = relay.var("t", relay.TensorType((n, c, h, w), "float32"))
    y = relay.nn.pad(t, ((1, 1), (2, 2), (3, 3), (4, 4)))
    "pad_width=" in y.astext()
Zhi committed
398
    yy = run_infer_type(y)
399
    assert yy.checked_type == relay.TensorType((3, 6, 9, 12), "float32")
400 401 402

    # some symbolic values
    n, c, h, w = tvm.var("n"), 2, 3, tvm.var("w")
403 404
    t = relay.var("t", relay.TensorType((n, c, h, w), "float32"))
    y = relay.nn.pad(t, ((1, 1), (2, 2), (3, 3), (4, 4)))
Zhi committed
405
    yy = run_infer_type(y)
406
    assert yy.checked_type == relay.TensorType((n + 2, 6, 9, w + 8), "float32")
407

408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
def test_pad_run():
    def _test_run(dtype):
        dshape = (4, 10, 7, 7)
        x = relay.var("x", shape=dshape)
        y = relay.nn.pad(x, ((1, 1), (2, 2), (3, 3), (4, 4)))
        func = relay.Function([x], y)
        data = np.random.uniform(size=dshape).astype(dtype)
        ref_res = np.pad(data, ((1, 1), (2, 2), (3, 3), (4, 4)), 'constant')
        for target, ctx in ctx_list():
            intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
            op_res1 = intrp1.evaluate(func)(data)
            tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)

    _test_run('float32')
    _test_run('int32')
423

424 425 426 427 428
def test_lrn():
    n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w")
    x = relay.var("x", shape=(n, c , h, w))
    y = relay.nn.lrn(x, size=10, axis=2, bias=0.5, alpha=.00001, beta=0.75)
    "alpha=" in y.astext()
Zhi committed
429
    yy = run_infer_type(y)
430
    assert yy.checked_type == relay.TensorType((n, c , h, w))
431

432 433 434 435 436 437 438 439 440
    shape = (1, 5, 10, 10)
    dtype = "float32"
    x = relay.var("x", relay.TensorType(shape, dtype))
    size=5
    axis=1
    bias=0.5
    alpha=.00001
    beta=0.75
    z = relay.nn.lrn(x, size=size, axis=axis, bias=bias, alpha=alpha, beta=beta)
Zhi committed
441
    yy = run_infer_type(z)
442 443 444 445 446 447 448 449 450 451 452 453 454
    assert yy.checked_type == relay.TensorType(shape, dtype)
    func = relay.Function([x], z)
    x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
    ref_res = topi.testing.lrn_python(x_data, size, axis, bias, alpha, beta)

    for target, ctx in ctx_list():
        intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
        intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
        op_res1 = intrp1.evaluate(func)(x_data)
        tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
        op_res2 = intrp2.evaluate(func)(x_data)
        tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)

455 456 457 458 459
def test_l2_normalize():
    n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w")
    x = relay.var("x", shape=(n, c , h, w))
    y = relay.nn.l2_normalize(x, eps=0.001, axis=[1])
    "axis=" in y.astext()
Zhi committed
460
    yy = run_infer_type(y)
461
    assert yy.checked_type == relay.TensorType((n, c , h, w))
462

463 464 465 466 467 468
    shape = (1, 5, 10, 10)
    dtype = "float32"
    x = relay.var("x", relay.TensorType(shape, dtype))
    eps=0.001
    axis=1
    z = relay.nn.l2_normalize(x, eps=0.001, axis=[axis])
Zhi committed
469
    yy = run_infer_type(z)
470 471 472 473 474 475 476 477 478 479 480 481 482
    assert yy.checked_type == relay.TensorType(shape, dtype)
    func = relay.Function([x], z)
    x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
    ref_res = topi.testing.l2_normalize_python(x_data, eps, axis)

    for target, ctx in ctx_list():
        intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
        intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
        op_res1 = intrp1.evaluate(func)(x_data)
        tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
        op_res2 = intrp2.evaluate(func)(x_data)
        tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)

483

484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504
def batch_flatten(data):
    shape = data.shape
    target_dim = 1
    for i in range(len(shape) - 1):
        target_dim = target_dim * shape[i + 1]
    return np.reshape(data, (shape[0], target_dim))


def test_batch_flatten():
    t1 = relay.TensorType((5, 10, 5))
    x = relay.Var("x", t1)
    func = relay.Function([x], relay.nn.batch_flatten(x))

    data = np.random.rand(5, 10, 5).astype(t1.dtype)
    ref_res = batch_flatten(data)
    for target, ctx in ctx_list():
        intrp = relay.create_executor("graph", ctx=ctx, target=target)
        op_res = intrp.evaluate(func)(data)
        np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)


505
def _test_upsampling(layout, method, align_corners=False):
506 507 508 509 510 511 512 513 514 515
    n, c, h, w = tvm.var("n"), 16, 32, 32
    scale = 2
    dtype = "float32"
    def get_shape():
        if layout == "NCHW":
            return (c, h, w), (c, h*scale, w*scale)
        else:
            return (h, w, c), (h*scale, w*scale, c)
    ishape, oshape = get_shape()
    x = relay.var("x", relay.TensorType((n,) + ishape, dtype))
516 517
    y = relay.nn.upsampling(x, scale=scale, layout=layout,
                            method=method, align_corners=align_corners)
Zhi committed
518
    yy = run_infer_type(y)
519 520 521
    assert yy.checked_type == relay.TensorType((n,) + oshape, dtype)
    dshape = (1,) + ishape
    x = relay.var("x", shape=dshape)
522 523
    y = relay.nn.upsampling(x, scale=scale, layout=layout,
                            method=method, align_corners=align_corners)
524 525
    func = relay.Function([x], y)
    data = np.random.uniform(size=dshape).astype(dtype)
526
    if method == "nearest_neighbor":
527
        ref = topi.testing.upsampling_python(data, (scale, scale), layout)
528 529 530 531 532 533 534 535 536
    else:
        ref = topi.testing.bilinear_resize_python(data, (h*scale, w*scale), layout)
    for target, ctx in ctx_list():
        executor = relay.create_executor("graph", ctx=ctx, target=target)
        out = executor.evaluate(func)(data)
        tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5, atol=1e-5)


def test_upsampling():
537 538 539 540
    _test_upsampling("NCHW", "nearest_neighbor")
    _test_upsampling("NCHW", "bilinear", True)
    _test_upsampling("NHWC", "nearest_neighbor")
    _test_upsampling("NHWC", "bilinear", True)
541 542


543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601
def test_conv2d_int8_intrinsics():
    def _compile(input_dtype, weight_dtype, output_dtype, target):
        n, ic, h, w, oc, ch, cw = 1, 16, 224, 224, 32, 3, 3
        x = relay.var("x", relay.TensorType((n, ic, h, w), input_dtype))
        w = relay.var("w", relay.TensorType((oc, ic, ch, cw), weight_dtype))
        y = relay.nn.conv2d(x, w,
                            kernel_size=(ch, cw),
                            channels=oc,
                            padding=(1, 1),
                            dilation=(1, 1),
                            out_dtype=output_dtype)
        func = relay.Function([x, w], y)
        wdata = np.random.rand(oc, ic, ch, cw) * 10
        parameters = {"w": tvm.nd.array(wdata.astype(weight_dtype))}
        with relay.build_config(opt_level=3):
            graph, lib, params = relay.build(func, target, params=parameters)
        assembly = lib.get_source("asm")
        return assembly

    # compile conv2d for x86 (skylake) and test assembly contains *pmadd* instructions
    target = "llvm -mcpu=skylake-avx512"
    name = "llvm.x86.avx512.pmaddubs.w.512"
    llvm_id = tvm.codegen.llvm_lookup_intrinsic_id(name)
    if llvm_id != 0:
        # Intel Int8 instruction need uint8 data and int8 kernel
        asm = _compile(input_dtype="uint8",
                       weight_dtype="int8",
                       output_dtype="int32",
                       target=target)
        # Check that intrinisic is present in the assembly.
        assert "pmaddubs" in asm

        # Ensure that code is generated when datatypes are not HW supported.
        asm = _compile(input_dtype="int8",
                       weight_dtype="int8",
                       output_dtype="int32",
                       target=target)
        # Check that intrinisic is not present in the assembly.
        assert "pmaddubs" not in asm

        # Ensure that code is generated when datatypes are not HW supported.
        asm = _compile(input_dtype="uint8",
                       weight_dtype="uint8",
                       output_dtype="int32",
                       target=target)
        # Check that intrinisic is not present in the assembly.
        assert "pmaddubs" not in asm

    # Check that a vectorized instruction is generated for older Intel
    # generations, because we default to NCHWc layout.
    target = "llvm -mcpu=core-avx2"
    asm = _compile(input_dtype="int8",
                  weight_dtype="int8",
                  output_dtype="int32",
                  target=target)
    # Check that vector int mult and add instructions are generated.
    assert "vpmulld" in asm and "vpadd" in asm


602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623
def test_bitserial_conv2d_infer_type():
    # Basic shape test with ambiguous batch.
    n, c, h, w = tvm.var("n"), 32, 224, 224
    x = relay.var("x", relay.ty.TensorType((n, c, h, w), "int16"))
    w = relay.var("w", relay.ty.TensorType((32, 32, 3, 3), "int16"))
    y = relay.nn.bitserial_conv2d(
        x, w, kernel_size=(3, 3), padding=(0, 0), channels=32)
    yy = run_infer_type(y)
    assert yy.checked_type ==  relay.TensorType(
        (n, 32, 222, 222), "int16")


def test_bitpack_infer_type():
    # Test axis packing shape inference.
    o, i, h, w = 32, 32, 128, 128
    x = relay.var("x", relay.ty.TensorType((o, i, h, w), "int16"))
    y = relay.nn.bitpack(x, bit_axis=4, pack_axis=1, pack_type='uint16', bits=1)
    yy = run_infer_type(y)
    assert yy.checked_type ==  relay.TensorType(
        (32, 2, 128, 128, 1), "uint16")


624
if __name__ == "__main__":
625 626
    test_pool2d()
    test_avg_pool2d_no_count_pad()
627 628
    test_lrn()
    test_l2_normalize()
629
    test_conv2d_infer_type()
630
    test_bitpack_infer_type()
631 632
    test_upsampling_infer_type()
    test_flatten_infer_type()
633
    test_pad_infer_type()
634
    test_pad_run()
635
    test_conv2d_transpose_infer_type()
636 637
    test_conv2d_transpose_run()
    test_conv2d_run()
638
    test_bitserial_conv2d_infer_type()
639
    test_batch_flatten()
640
    test_upsampling()
641
    test_conv2d_int8_intrinsics()