test_codegen_blob.py 3.92 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.

import numpy as np
from tvm import relay
from tvm.relay import testing
from tvm.contrib import graph_runtime
import tvm
23
from tvm import te
24 25 26 27
import ctypes

def test_resnet18():
    for device in ["llvm", "cuda"]:
28
        if not tvm.runtime.enabled(device):
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
            print("skip because %s is not enabled..." % device)
            return

    def verify(data):
        mod, params = relay.testing.resnet.get_workload(num_layers=18)
        with relay.build_config(opt_level=3):
            graph, lib, graph_params = relay.build_module.build(mod, "llvm", params=params)
        ctx = tvm.cpu()
        module = graph_runtime.create(graph, lib, ctx)
        module.set_input("data", data)
        module.set_input(**graph_params)
        module.run()
        out = module.get_output(0).asnumpy()
        return out

    resnet18_mod, resnet18_params = relay.testing.resnet.get_workload(num_layers=18)
    with relay.build_config(opt_level=3):
        graph, resnet18_gpu_lib, graph_params = relay.build_module.build(resnet18_mod, "cuda", params=resnet18_params)

    from tvm.contrib import util
    temp = util.tempdir()
    path_lib = temp.relpath("deploy_lib.so")
    resnet18_gpu_lib.export_library(path_lib)
    with open(temp.relpath("deploy_graph.json"), "w") as fo:
        fo.write(graph)
    with open(temp.relpath("deploy_param.params"), "wb") as fo:
        fo.write(relay.save_param_dict(graph_params))

57
    loaded_lib = tvm.runtime.load_module(path_lib)
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
    loaded_json = open(temp.relpath("deploy_graph.json")).read()
    loaded_params = bytearray(open(temp.relpath("deploy_param.params"), "rb").read())
    data = np.random.uniform(-1, 1, size=(1, 3, 224, 224)).astype("float32")
    ctx = tvm.gpu()
    module = graph_runtime.create(loaded_json, loaded_lib, ctx)
    module.load_params(loaded_params)
    module.set_input("data", data)
    module.run()
    out = module.get_output(0).asnumpy()

    tvm.testing.assert_allclose(out, verify(data), atol=1e-5)


def test_system_lib():
    ctx = tvm.gpu(0)
    for device in ["llvm", "cuda"]:
74
        if not tvm.runtime.enabled(device):
75 76 77
            print("skip because %s is not enabled..." % device)
            return
    nn = 12
78 79 80 81
    n = tvm.runtime.convert(nn)
    A = te.placeholder((n,), name='A')
    B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name='B')
    s = te.create_schedule(B.op)
82
    bx, tx = s[B].split(B.op.axis[0], factor=4)
83 84
    s[B].bind(bx, te.thread_axis("blockIdx.x"))
    s[B].bind(tx, te.thread_axis("threadIdx.x"))
85 86 87 88 89 90 91 92 93 94 95

    from tvm.contrib import util
    temp = util.tempdir()
    fn_add = tvm.build(s, [A, B], target="cuda", target_host="llvm -system-lib", name="add")
    path_obj = temp.relpath("add.o")
    path_lib = temp.relpath("deploy_lib.so")
    fn_add.save(path_obj)
    fn_add.export_library(path_lib)
    # Load dll, will trigger system library registration
    dll = ctypes.CDLL(path_lib)
    # Load the system wide library
96
    m = tvm.runtime.system_lib()
97 98 99 100 101 102 103 104
    a = tvm.nd.array(np.random.uniform(size=nn).astype(A.dtype), ctx)
    b = tvm.nd.array(np.zeros(nn, dtype=A.dtype), ctx)
    m['add'](a, b)
    np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1)


if __name__ == "__main__":
    test_resnet18()
105
    #test_system_lib()