tophub.py 7.39 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
17 18 19 20
"""
TopHub: Tensor Operator Hub
To get the best performance, we typically need auto-tuning for the specific devices.
TVM releases pre-tuned parameters in TopHub for some common networks and hardware targets.
21 22
TVM will download these parameters for you when you call
nnvm.compiler.build_module or relay.build.
23
"""
24
# pylint: disable=invalid-name
25 26 27

import logging
import os
28
import sys
29 30 31 32

from .task import ApplyHistoryBest
from .. import target as _target
from ..contrib.download import download
33
from .record import load_from_file
34 35 36 37 38 39 40 41 42 43
from .util import EmptyContext

# environment variable to read TopHub location
AUTOTVM_TOPHUB_LOC_VAR = "TOPHUB_LOCATION"

# default location of TopHub
AUTOTVM_TOPHUB_DEFAULT_LOC = "https://raw.githubusercontent.com/uwsampl/tvm-distro/master/tophub"

# value of AUTOTVM_TOPHUB_LOC_VAR to specify to not read from TopHub
AUTOTVM_TOPHUB_NONE_LOC = "NONE"
44

45
# root path to store TopHub files
46 47
AUTOTVM_TOPHUB_ROOT_PATH = os.path.join(os.path.expanduser('~'), ".tvm", "tophub")

48 49
# the version of each package
PACKAGE_VERSION = {
50 51
    'arm_cpu':          "v0.04",
    'llvm':             "v0.03",
52

53 54 55 56 57
    'cuda':             "v0.05",
    'rocm':             "v0.03",
    'opencl':           "v0.03",
    'mali':             "v0.05",
    'intel_graphics':   "v0.01",
58

59
    'vta':              "v0.06",
60 61
}

62
logger = logging.getLogger('autotvm')
63 64 65 66 67

def _alias(name):
    """convert alias for some packages"""
    table = {
        'vtacpu': 'vta',
68 69

        'metal': 'opencl',
70
        'vulkan': 'opencl',
71
        'nvptx': 'cuda',
72 73 74
    }
    return table.get(name, name)

75 76 77
def _get_tophub_location():
    location = os.getenv(AUTOTVM_TOPHUB_LOC_VAR, None)
    return AUTOTVM_TOPHUB_DEFAULT_LOC if location is None else location
78

79
def context(target, extra_files=None):
80
    """Return the dispatch context with pre-tuned parameters.
81 82
    This function will load the corresponding *.log files in AUTOTVM_TOPHUB_ROOT_PATH.
    If cannot find them, it will download them from TopHub github repo.
83 84 85 86
    Users can also add their own files in argument `extra_files`.

    Parameters
    ----------
87
    target: Target or List of Target
88 89 90 91
        The compilation target
    extra_files: list of str, optional
        Extra log files to load
    """
92 93 94 95
    tophub_location = _get_tophub_location()
    if tophub_location == AUTOTVM_TOPHUB_NONE_LOC:
        return EmptyContext()

96
    best_context = ApplyHistoryBest([])
97

98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
    targets = target if isinstance(target, (list, tuple)) else [target]

    for tgt in targets:
        if isinstance(tgt, str):
            tgt = _target.create(tgt)

        possible_names = []
        for opt in tgt.options:
            if opt.startswith("-device"):
                device = _alias(opt[8:])
                possible_names.append(device)
        possible_names.append(tgt.target_name)

        all_packages = list(PACKAGE_VERSION.keys())
        for name in possible_names:
            name = _alias(name)
            if name in all_packages:
115
                if not check_backend(tophub_location, name):
116
                    continue
117 118 119 120

                filename = "%s_%s.log" % (name, PACKAGE_VERSION[name])
                best_context.load(os.path.join(AUTOTVM_TOPHUB_ROOT_PATH, filename))
                break   # only load one file to avoid some fallback template mismatch problem
121 122 123 124 125 126 127 128

    if extra_files:
        for filename in extra_files:
            best_context.load(filename)

    return best_context


129
def check_backend(tophub_location, backend):
130 131 132 133 134 135
    """Check whether have pre-tuned parameters of the certain target.
    If not, will download it.

    Parameters
    ----------
    backend: str
136
        The name of backend.
137 138 139 140 141

    Returns
    ----------
    success: bool
        Whether the check is successful.
142 143
    """
    backend = _alias(backend)
144
    assert backend in PACKAGE_VERSION, 'Cannot find backend "%s" in TopHub' % backend
145

146 147 148
    version = PACKAGE_VERSION[backend]
    package_name = "%s_%s.log" % (backend, version)
    if os.path.isfile(os.path.join(AUTOTVM_TOPHUB_ROOT_PATH, package_name)):
149
        return True
150 151 152 153 154 155

    if sys.version_info >= (3,):
        import urllib.request as urllib2
    else:
        import urllib2
    try:
156
        download_package(tophub_location, package_name)
157
        return True
158 159
    except urllib2.URLError as e:
        logging.warning("Failed to download tophub package for %s: %s", backend, e)
160
        return False
161 162


163
def download_package(tophub_location, package_name):
164
    """Download pre-tuned parameters of operators for a backend
165

166 167
    Parameters
    ----------
168 169 170
    tophub_location: str
        The location to download TopHub parameters from

171 172
    package_name: str
        The name of package
173
    """
174 175 176 177 178 179 180 181 182 183
    rootpath = AUTOTVM_TOPHUB_ROOT_PATH

    if not os.path.isdir(rootpath):
        # make directory
        splits = os.path.split(rootpath)
        for j in range(1, len(splits)+1):
            path = os.path.join(*splits[:j])
            if not os.path.isdir(path):
                os.mkdir(path)

184 185 186
    download_url = "{0}/{1}".format(tophub_location, package_name)
    logger.info("Download pre-tuned parameters package from %s", download_url)
    download(download_url, os.path.join(rootpath, package_name), True, verbose=0)
187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234


# global cache for load_reference_log
REFERENCE_LOG_CACHE = {}

def load_reference_log(backend, model, workload_name, template_key):
    """ Load reference log from TopHub to support fallback in template.
    Template will use these reference logs to choose fallback config.

    Parameters
    ----------
    backend: str
        The backend name
    model: str
        The name of the model
    workload_name: str
        The name of the workload. (The first item in the workload tuple)
    template_key: str
        The template key
    """

    backend = _alias(backend)
    version = PACKAGE_VERSION[backend]
    package_name = "%s_%s.log" % (backend, version)
    filename = os.path.join(AUTOTVM_TOPHUB_ROOT_PATH, package_name)

    global REFERENCE_LOG_CACHE
    key = (backend, model, workload_name, template_key)

    if key not in REFERENCE_LOG_CACHE:
        tmp = []
        if os.path.isfile(os.path.join(AUTOTVM_TOPHUB_ROOT_PATH, package_name)):
            find = False
            inp = None
            for inp, res in load_from_file(filename):
                if model == inp.target.model:
                    find = True
                    break
            if not find and inp:
                model = inp.target.model

            for inp, res in load_from_file(filename):
                if (model == inp.target.model and inp.task.workload[0] == workload_name and
                        inp.config.template_key == template_key):
                    tmp.append((inp, res))
        REFERENCE_LOG_CACHE[key] = tmp

    return REFERENCE_LOG_CACHE[key]