tophub.py 6.53 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
17 18 19 20
"""
TopHub: Tensor Operator Hub
To get the best performance, we typically need auto-tuning for the specific devices.
TVM releases pre-tuned parameters in TopHub for some common networks and hardware targets.
21
TVM will download these parameters for you when you call nnvm.compiler.build_module .
22
"""
23
# pylint: disable=invalid-name
24 25 26

import logging
import os
27
import sys
28 29 30 31

from .task import ApplyHistoryBest
from .. import target as _target
from ..contrib.download import download
32
from .record import load_from_file
33

34
# root path to store TopHub files
35 36
AUTOTVM_TOPHUB_ROOT_PATH = os.path.join(os.path.expanduser('~'), ".tvm", "tophub")

37 38
# the version of each package
PACKAGE_VERSION = {
39
    'arm_cpu': "v0.04",
40
    'llvm':    "v0.03",
41

42 43 44
    'cuda':    "v0.04",
    'rocm':    "v0.02",
    'opencl':  "v0.02",
45
    'mali':    "v0.05",
46

47
    'vta':     "v0.06",
48 49
}

50
logger = logging.getLogger('autotvm')
51 52 53 54 55

def _alias(name):
    """convert alias for some packages"""
    table = {
        'vtacpu': 'vta',
56 57

        'metal': 'opencl',
58
        'vulkan': 'opencl',
59
        'nvptx': 'cuda',
60 61 62 63
    }
    return table.get(name, name)


64
def context(target, extra_files=None):
65
    """Return the dispatch context with pre-tuned parameters.
66 67
    This function will load the corresponding *.log files in AUTOTVM_TOPHUB_ROOT_PATH.
    If cannot find them, it will download them from TopHub github repo.
68 69 70 71
    Users can also add their own files in argument `extra_files`.

    Parameters
    ----------
72
    target: Target or List of Target
73 74 75 76
        The compilation target
    extra_files: list of str, optional
        Extra log files to load
    """
77
    best_context = ApplyHistoryBest([])
78

79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
    targets = target if isinstance(target, (list, tuple)) else [target]

    for tgt in targets:
        if isinstance(tgt, str):
            tgt = _target.create(tgt)

        possible_names = []
        for opt in tgt.options:
            if opt.startswith("-device"):
                device = _alias(opt[8:])
                possible_names.append(device)
        possible_names.append(tgt.target_name)

        all_packages = list(PACKAGE_VERSION.keys())
        for name in possible_names:
            name = _alias(name)
            if name in all_packages:
96 97
                if not check_backend(name):
                    continue
98 99 100 101

                filename = "%s_%s.log" % (name, PACKAGE_VERSION[name])
                best_context.load(os.path.join(AUTOTVM_TOPHUB_ROOT_PATH, filename))
                break   # only load one file to avoid some fallback template mismatch problem
102 103 104 105 106 107 108 109

    if extra_files:
        for filename in extra_files:
            best_context.load(filename)

    return best_context


110
def check_backend(backend):
111 112 113 114 115 116
    """Check whether have pre-tuned parameters of the certain target.
    If not, will download it.

    Parameters
    ----------
    backend: str
117
        The name of backend.
118 119 120 121 122

    Returns
    ----------
    success: bool
        Whether the check is successful.
123 124
    """
    backend = _alias(backend)
125
    assert backend in PACKAGE_VERSION, 'Cannot find backend "%s" in TopHub' % backend
126

127 128 129
    version = PACKAGE_VERSION[backend]
    package_name = "%s_%s.log" % (backend, version)
    if os.path.isfile(os.path.join(AUTOTVM_TOPHUB_ROOT_PATH, package_name)):
130
        return True
131 132 133 134 135 136

    if sys.version_info >= (3,):
        import urllib.request as urllib2
    else:
        import urllib2
    try:
137
        download_package(package_name)
138
        return True
139 140
    except urllib2.URLError as e:
        logging.warning("Failed to download tophub package for %s: %s", backend, e)
141
        return False
142 143


144 145
def download_package(package_name):
    """Download pre-tuned parameters of operators for a backend
146

147 148 149 150
    Parameters
    ----------
    package_name: str
        The name of package
151
    """
152 153 154 155 156 157 158 159 160 161 162
    rootpath = AUTOTVM_TOPHUB_ROOT_PATH

    if not os.path.isdir(rootpath):
        # make directory
        splits = os.path.split(rootpath)
        for j in range(1, len(splits)+1):
            path = os.path.join(*splits[:j])
            if not os.path.isdir(path):
                os.mkdir(path)

    logger.info("Download pre-tuned parameters package %s", package_name)
163
    download("https://raw.githubusercontent.com/uwsampl/tvm-distro/master/tophub/%s"
164
             % package_name, os.path.join(rootpath, package_name), True, verbose=0)
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212


# global cache for load_reference_log
REFERENCE_LOG_CACHE = {}

def load_reference_log(backend, model, workload_name, template_key):
    """ Load reference log from TopHub to support fallback in template.
    Template will use these reference logs to choose fallback config.

    Parameters
    ----------
    backend: str
        The backend name
    model: str
        The name of the model
    workload_name: str
        The name of the workload. (The first item in the workload tuple)
    template_key: str
        The template key
    """

    backend = _alias(backend)
    version = PACKAGE_VERSION[backend]
    package_name = "%s_%s.log" % (backend, version)
    filename = os.path.join(AUTOTVM_TOPHUB_ROOT_PATH, package_name)

    global REFERENCE_LOG_CACHE
    key = (backend, model, workload_name, template_key)

    if key not in REFERENCE_LOG_CACHE:
        tmp = []
        if os.path.isfile(os.path.join(AUTOTVM_TOPHUB_ROOT_PATH, package_name)):
            find = False
            inp = None
            for inp, res in load_from_file(filename):
                if model == inp.target.model:
                    find = True
                    break
            if not find and inp:
                model = inp.target.model

            for inp, res in load_from_file(filename):
                if (model == inp.target.model and inp.task.workload[0] == workload_name and
                        inp.config.template_key == template_key):
                    tmp.append((inp, res))
        REFERENCE_LOG_CACHE[key] = tmp

    return REFERENCE_LOG_CACHE[key]