test_backend_graph_runtime.py 3.73 KB
Newer Older
1 2
import numpy as np

3
import tvm
4
from tvm import relay
5
from tvm.contrib import graph_runtime
6 7 8
from tvm.relay.ir_pass import infer_type
from tvm.relay.scope_builder import ScopeBuilder
from tvm.relay.op import add
9
from tvm.relay.module import Module
10 11

# @tq, @jr should we put this in testing ns?
12
def check_rts(expr, args, expected_result, mod=None):
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
    """
    Check that evaluating `expr` applied to the arguments produces
    `result` on both the evaluator and TVM runtime.

    Parameters
    ----------
    expr:
        The expression to evaluate

    args: list of Expr
        The arguments to supply the expr.

    expected_result:
        The expected result of running the expression.
    """
28 29
    intrp = relay.create_executor('debug', mod=mod)
    graph = relay.create_executor('graph', mod=mod)
30 31
    eval_result = intrp.evaluate(expr)(*args)
    rts_result = graph.evaluate(expr)(*args)
32
    tvm.testing.assert_allclose(eval_result.asnumpy(), rts_result.asnumpy())
33
    tvm.testing.assert_allclose(eval_result.asnumpy(), expected_result)
34 35 36 37 38 39 40 41 42 43 44 45 46

def test_add_op_scalar():
    """
    Program:
        fn (x, y) {
            return x + y;
        }
    """
    x = relay.var('x', shape=())
    y = relay.var('y', shape=())
    func = relay.Function([x, y], add(x, y))
    x_data = np.array(10.0, dtype='float32')
    y_data = np.array(1.0, dtype='float32')
47
    check_rts(func, [x_data, y_data], x_data + y_data)
48 49 50 51 52 53 54 55 56 57 58 59 60

def test_add_op_tensor():
    """
    Program:
        fn (x, y) {
            return x + y;
        }
    """
    x = relay.var('x', shape=(10, 5))
    y = relay.var('y', shape=(10, 5))
    func = relay.Function([x, y], add(x, y))
    x_data = np.random.rand(10, 5).astype('float32')
    y_data = np.random.rand(10, 5).astype('float32')
61
    check_rts(func, [x_data, y_data], x_data + y_data)
62 63 64 65 66 67 68 69 70 71 72 73 74

def test_add_op_broadcast():
    """
    Program:
        fn (x, y) {
            return x + y;
        }
    """
    x = relay.var('x', shape=(10, 5))
    y = relay.var('y', shape=(1, 5))
    func = relay.Function([x, y], add(x, y))
    x_data = np.random.rand(10, 5).astype('float32')
    y_data = np.random.rand(1, 5).astype('float32')
75
    check_rts(func, [x_data, y_data], x_data + y_data)
76

77 78 79 80

def test_with_params():
    x = relay.var('x', shape=(10, 5))
    y = relay.var('y', shape=(1, 5))
81 82 83
    z = relay.add(x, y)
    z = relay.exp(z)
    func = relay.Function([x, y], z)
84 85 86 87 88 89 90 91 92
    x_data = np.random.rand(10, 5).astype('float32')
    y_data = np.random.rand(1, 5).astype('float32')
    params = {"y": y_data}
    graph, lib, params = relay.build(func, "llvm", params=params)
    mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0))
    mod.set_input(**params)
    mod.set_input(x=x_data)
    mod.run()
    res = mod.get_output(0).asnumpy()
93
    ref_res = np.exp(y_data + x_data)
94 95 96
    tvm.testing.assert_allclose(res, ref_res)


97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
def test_plan_memory():
    # it is sufficient to cycle through two memories.

    x = relay.var("x", shape=(10,))
    y = relay.var("x", shape=(1,))
    y2 = relay.exp(y)
    z = relay.add(x, y2)
    z = relay.exp(z)
    z = relay.exp(z)
    z = relay.exp(z)
    z = relay.exp(z)
    z = relay.exp(z)
    func = relay.Function([x, y], z)
    func = relay.ir_pass.infer_type(func)
    func = relay.ir_pass.fuse_ops(func, opt_level=0)
    func = relay.ir_pass.infer_type(func)
    smap = relay.backend._backend.GraphPlanMemory(func)
    storage_ids = set()
    for k, v in smap.items():
        for x in v:
            storage_ids.add(x.value)

    # Current rule requires vars have unique storage id
    # because we don't do inplace, we will need another
    # two alternating temporary space.
    assert len(storage_ids) == 4


125
if __name__ == "__main__":
126
    test_plan_memory()
127
    test_with_params()
128 129 130
    test_add_op_scalar()
    test_add_op_tensor()
    test_add_op_broadcast()