Unverified Commit 6e1cd825 by Tianqi Chen Committed by GitHub

[REFACTOR] tvm.hybrid -> te.hybrid (#5223)

Rationale: The current hybrid module is more aligned with the te part.
We might consider add a new varient of hybrid script that support the unified IR later.
This refactor paves for the potential later changes.
parent 62b3195b
.. Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
.. http://www.apache.org/licenses/LICENSE-2.0
.. Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
tvm.hybrid
----------
.. automodule:: tvm.hybrid
:members:
:imported-members:
:autosummary:
......@@ -33,7 +33,6 @@ Python API
rpc
contrib
graph_runtime
hybrid
relay/index
vta/index
topi
......@@ -23,3 +23,11 @@ tvm.te
:members:
:imported-members:
:autosummary:
tvm.te.hybrid
-------------
.. automodule:: tvm.te.hybrid
:members:
:imported-members:
:autosummary:
......@@ -33,11 +33,11 @@ Software Emulation
~~~~~~~~~~~~~~~~~~
Both software emulation and compilation are supported. To define a function,
you need to use ``tvm.hybrid.script`` decorator to indicate this is a hybrid function:
you need to use ``tvm.te.hybrid.script`` decorator to indicate this is a hybrid function:
.. code-block:: python
@tvm.hybrid.script
@tvm.te.hybrid.script
def outer_product(a, b, c):
c = output_tensor((100, 99), 'float32')
for i in range(a.shape[0]):
......@@ -85,7 +85,7 @@ to LLVM module.
Tuning
~~~~~~
Follow up the example above, you can use some tvm like interfaces to tune the code:
Follow up the example above, you can use some tvm like interfaces to tune the code:
.. code-block:: python
......@@ -101,7 +101,7 @@ loop manipulation (``split`` and ``fuse``), and ``reorder``.
This is a preliminary function, so users should be in charge of the correctness
of the functionality after tuning. Specifically, users should be careful when
fusing and reorderding imperfect loops.
fusing and reorderding imperfect loops.
Loops
~~~~~
......@@ -119,7 +119,7 @@ Users can access containers by either constants or constants loops annotated.
.. code-block:: python
@tvm.hybrid.script
@tvm.te.hybrid.script
def foo(a, b): # b is a tvm.container.Array
c = output_tensor(a.shape, a.dtype)
for i in const_range(len(a)): # because you have b access, i should be explicitly annotated as const_range
......
......@@ -57,9 +57,6 @@ from . import testing
# tvm.driver
from .driver import build, lower
# tvm.hybrid
from . import hybrid
# others
from . import arith
......
......@@ -18,9 +18,9 @@
from __future__ import absolute_import
from tvm.runtime import convert
from tvm.te.hybrid import script
from topi.util import get_const_int, get_const_tuple
from . import op as _reg
from ...hybrid import script
_reg.register_reduce_schedule("argmax")
_reg.register_reduce_schedule("argmin")
......
......@@ -19,11 +19,11 @@
import topi
from tvm.runtime import convert
from tvm.te.hybrid import script
from topi.util import get_const_tuple
from .op import register_compute, register_shape_func
from .op import register_broadcast_schedule, register_injective_schedule
from .op import register_pattern, OpPattern
from ...hybrid import script
register_broadcast_schedule("log")
......
......@@ -19,13 +19,13 @@
from __future__ import absolute_import
import tvm
from tvm import te
from tvm.te.hybrid import script
from tvm.runtime import convert
import topi
from topi.util import get_const_int, get_const_tuple
from . import op as _reg
from . import strategy
from .op import OpPattern
from ...hybrid import script
_reg.register_broadcast_schedule("broadcast_to")
_reg.register_broadcast_schedule("broadcast_to_like")
......
......@@ -22,11 +22,11 @@ import topi
from topi.util import get_const_tuple
from tvm.runtime import convert
from tvm.te.hybrid import script
from .. import op as reg
from .. import strategy
from ..op import OpPattern
from .._tensor import elemwise_shape_func
from ....hybrid import script
# relu
reg.register_broadcast_schedule("nn.relu")
......
......@@ -34,3 +34,4 @@ from .operation import thread_axis, reduce_axis
from .tensor import PlaceholderOp, ComputeOp, TensorComputeOp, ScanOp, ExternOp, HybridOp
from .autodiff import gradient
from . import hybrid
......@@ -31,8 +31,7 @@ HalideIR.
import inspect
import tvm._ffi
from tvm.driver.build_module import form_body
from .._ffi.base import decorate
from tvm._ffi.base import decorate
from .module import HybridModule
from .parser import source_to_op
......@@ -95,4 +94,4 @@ def build(sch, inputs, outputs, name="hybrid_func"):
return HybridModule(src, name)
tvm._ffi._init_api("tvm.hybrid")
tvm._ffi._init_api("tvm.hybrid", __name__)
......@@ -23,7 +23,7 @@ To enable this feature, you need to build with -DUSE_HYBRID_DUMP=ON.
import ast
from ..contrib import util
from tvm.contrib import util
from .util import _internal_assert
from .util import _is_tvm_arg_types
from .parser import source_to_op
......@@ -52,7 +52,7 @@ class HybridModule(object):
temp = util.tempdir()
dst = temp.relpath("script.py")
with open(dst, 'w') as f:
f.write("import tvm\n@tvm.hybrid.script\n%s" % src)
f.write("import tvm\n@tvm.te.hybrid.script\n%s" % src)
if name is not None:
self.name = name
......
......@@ -17,7 +17,7 @@
"""Intrinsics of TVM-Python Hybrid Script for Python emulation runtime"""
import numpy
from .. import target
from tvm import target
class bind(object): #pylint: disable=invalid-name
......
......@@ -18,8 +18,8 @@ import tvm, inspect, sys, traceback, numpy, pytest, types, os
from tvm import te
from tvm.contrib import util
from tvm.hybrid import script
from tvm.hybrid.runtime import HYBRID_GLOBALS
from tvm.te.hybrid import script
from tvm.te.hybrid.runtime import HYBRID_GLOBALS
@pytest.mark.skip
def run_and_check(func, args, var_dict={}, target='llvm', sch=None, outs=None):
......@@ -80,7 +80,7 @@ def run_and_check(func, args, var_dict={}, target='llvm', sch=None, outs=None):
module_args = [i for i in args if isinstance(i, (te.tensor.Tensor, tvm.tir.Var))]
module_outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
h_module = tvm.hybrid.build(sch, module_args, module_outs)
h_module = te.hybrid.build(sch, module_args, module_outs)
return h_module, module_args, module_outs
......@@ -146,7 +146,7 @@ def test_outer_product():
temp = util.tempdir()
path = temp.relpath('%s.py' % func.name)
func.save(path)
func_ = tvm.hybrid.HybridModule()
func_ = te.hybrid.HybridModule()
func_.load(path)
run_and_check(func_, ins, {n: 99, m: 101}, outs=outs)
......@@ -348,7 +348,7 @@ def test_bind():
run_and_check(func, ins, outs=outs, target='cuda')
@tvm.hybrid.script
@te.hybrid.script
def foo(a):
c = output_tensor((a.shape[0],), a.dtype)
total = allocate((1,), a.dtype, 'local')
......@@ -370,7 +370,7 @@ def test_bind():
func, ins, outs = run_and_check(foo, [a], target='cuda')
run_and_check(func, ins, outs=outs, target='cuda')
@tvm.hybrid.script
@te.hybrid.script
def max_threads(a):
b = output_tensor(a.shape, a.dtype)
n = a.shape[0]
......@@ -433,7 +433,7 @@ def test_math_intrin():
# test non caconical loops
def test_non_zero():
@tvm.hybrid.script
@te.hybrid.script
def blur(a):
b = output_tensor((30, 30), 'float32')
for i in range(2, 32):
......@@ -449,7 +449,7 @@ def test_non_zero():
func, ins, outs = run_and_check(blur, [a])
run_and_check(func, ins, outs=outs)
@tvm.hybrid.script
@te.hybrid.script
def triangle(a, b):
c = output_tensor((10, 10), dtype='float32')
for i in range(10):
......@@ -464,7 +464,7 @@ def test_non_zero():
run_and_check(func, ins, outs=outs)
def test_allocate():
@tvm.hybrid.script
@te.hybrid.script
def blur2d(a):
b = output_tensor((30, 30), 'float32')
for i in range(30):
......@@ -483,7 +483,7 @@ def test_allocate():
run_and_check(func, ins, outs=outs)
if tvm.gpu().exist:
@tvm.hybrid.script
@te.hybrid.script
def share_vec_add(a, b):
c = output_tensor((256, ), 'float32')
shared = allocate((256, ), 'float32', 'shared')
......@@ -505,7 +505,7 @@ def test_allocate():
print('[Warning] No GPU found! Skip shared mem test!')
def test_upstream():
@tvm.hybrid.script
@te.hybrid.script
def upstream(a):
b = output_tensor((20, ), 'float32')
for i in range(20):
......@@ -535,7 +535,7 @@ def test_upstream():
tvm.testing.assert_allclose(tvm_d.asnumpy(), ref, 1e-5, 1e-5)
def test_downstream():
@tvm.hybrid.script
@te.hybrid.script
def downstream(a):
b = output_tensor((20, ), 'float32')
for i in range(20):
......@@ -562,7 +562,7 @@ def test_downstream():
tvm.testing.assert_allclose(tvm_c.asnumpy(), ref, 1e-5, 1e-5)
def test_const_param():
@tvm.hybrid.script
@te.hybrid.script
def add_something(a, b):
c = output_tensor((11, ), 'int32')
for i in range(11):
......@@ -588,7 +588,7 @@ def test_const_param():
tvm.testing.assert_allclose(nd_c.asnumpy(), ref, 1e-5, 1e-5)
def test_value_index():
@tvm.hybrid.script
@te.hybrid.script
def kernel_a(a):
b = output_tensor((16, ), 'int32')
c = output_tensor((4, 4), 'int32')
......@@ -597,7 +597,7 @@ def test_value_index():
c[i // 4, i % 4] = a[i] + 1
return b, c
@tvm.hybrid.script
@te.hybrid.script
def kernel_b(b, a):
c = output_tensor((4, 4), 'int32')
for i in range(4):
......@@ -621,7 +621,7 @@ def test_value_index():
tvm.testing.assert_allclose(res.asnumpy(), ref)
def test_func_call():
@tvm.hybrid.script
@te.hybrid.script
def foo(a, b):
for i in range(len(a)):
a[i] = i + 1.0
......@@ -640,7 +640,7 @@ def test_func_call():
run_and_check(func, ins, outs=outs)
def test_bool():
@tvm.hybrid.script
@te.hybrid.script
def foo(a):
b = output_tensor(a.shape, a.dtype)
b[0] = 1.2
......@@ -655,7 +655,7 @@ def test_bool():
run_and_check(func, ins, outs=outs)
def test_const_range():
@tvm.hybrid.script
@te.hybrid.script
def foo(a, b):
c = output_tensor(a.shape, a.dtype)
d = output_tensor(a.shape, 'int32')
......@@ -675,7 +675,7 @@ def test_const_range():
func, ins, outs = run_and_check(foo, [a, b])
run_and_check(func, ins, outs=outs)
@tvm.hybrid.script
@te.hybrid.script
def goo(a, b):
c = output_tensor(a.shape, a.dtype)
len_b = len(b)
......@@ -692,7 +692,7 @@ def test_const_range():
func, ins, outs = run_and_check(goo, [a, b])
run_and_check(func, ins, outs=outs)
@tvm.hybrid.script
@te.hybrid.script
def hoo(a, b):
c = output_tensor(a.shape, a.dtype)
len_b = len(b)
......@@ -779,7 +779,7 @@ def test_capture():
constant_list = [[1, 2], [3, n]]
const_value = 1
@tvm.hybrid.script
@te.hybrid.script
def add_something(a):
c = output_tensor((constant_tuple[1],), 'int32')
for i in range(constant_tuple[1]):
......
......@@ -495,7 +495,7 @@ def test_replace_dataflow():
def test_large_input():
@tvm.hybrid.script
@te.hybrid.script
def compute(a, b):
n = 16384
c = output_tensor((n, n), 'int32')
......
......@@ -16,7 +16,7 @@
# under the License.
# pylint: disable=invalid-name, too-many-arguments, too-many-nested-blocks
"""Argwhere operator"""
from tvm import hybrid
from tvm.te import hybrid
@hybrid.script
def hybrid_argwhere_1d(output_shape, condition):
......
......@@ -19,7 +19,7 @@
import tvm
from tvm import te
from tvm import hybrid
from tvm.te import hybrid
from ..sort import argsort
@hybrid.script
......
......@@ -18,7 +18,7 @@
"""SSD multibox operators"""
import tvm
from tvm import hybrid
from tvm.te import hybrid
from tvm.tir import exp, sqrt
import topi
......
......@@ -19,7 +19,7 @@
import math
import tvm
from tvm import hybrid
from tvm.te import hybrid
from ..tensor import full
from ..util import get_const_tuple
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment