Commit b8234498 by Tianqi Chen Committed by GitHub

[DOC] API doc organization. (#90)

* [DOC] API doc organization.

* remove breathe for now
parent b5702eca
.rst-content .hidden-section {
display: none;
}
.rst-toc .hidden-section {
display: none;
}
nav .hidden-section {
display: inherit;
}
tvm.build
---------
.. autofunction:: tvm.lower
.. autofunction:: tvm.build
tvm.collections
---------------
Collections contains data structures used in TVM DSL.
.. automodule:: tvm.collections
:members:
Developer API
-------------
This page contains modules that are used by developers of TVM.
tvm.node
~~~~~~~~
Node is the base class of all TVM AST. Normally user do not
need to touch this api.
.. autoclass:: tvm.node.NodeBase
:members:
.. autoclass:: tvm.node.Node
:members:
.. autofunction:: tvm.register_node
tvm.expr
~~~~~~~~
.. automodule:: tvm.expr
:members:
:undoc-members:
tvm.codegen
~~~~~~~~~~~
.. automodule:: tvm.codegen
:members:
:undoc-members:
tvm.stmt
~~~~~~~~
.. automodule:: tvm.stmt
:members:
:undoc-members:
tvm.ir_pass
~~~~~~~~~~~
.. automodule:: tvm.ir_pass
:members:
.. autosummary::
tvm.ir_pass.Inline
tvm.ir_pass.Simplify
tvm.ir_pass.ConvertSSA
tvm.ir_pass.VerifySSA
tvm.ir_pass.CanonicalSimplify
tvm.ir_pass.StorageFlatten
tvm.ir_pass.VectorizeLoop
tvm.ir_pass.UnrollLoop
tvm.ir_pass.StorageSync
tvm.ir_pass.MakeAPI
tvm.ir_pass.SplitHostDevice
tvm.ir_pass.InjectVirtualThread
tvm.ir_pass.LoopPartition
tvm.ir_pass.RemoveNoOp
tvm.ir_pass.SplitPipeline
tvm.ir_pass.LowerThreadAllreduce
tvm.ir_pass.NarrowChannelAccess
tvm.make
~~~~~~~~
.. automodule:: tvm.make
:members:
tvm.Function
------------
.. autoclass:: tvm.Function
.. autofunction:: tvm.register_func
.. autofunction:: tvm.get_global_func
Python API
==========
.. toctree::
:maxdepth: 2
tvm
tensor
schedule
build
module
ndarray
collection
node
function
dev
tvm.module
----------
.. autoclass:: tvm.module.Module
:members:
:inherited-members:
.. autofunction:: tvm.module.load
.. autofunction:: tvm.module.enabled
tvm.ndarray
-----------
tvm.ndarray provides a minimum runtime array API to testing out
the correctness of the program.
.. autoclass:: tvm.ndarray.TVMContext
:members:
.. autoclass:: tvm.ndarray.NDArray
:members:
:inherited-members:
.. autofunction:: tvm.cpu
.. autofunction:: tvm.gpu
.. autofunction:: tvm.opencl
.. autofunction:: tvm.ndarray.array
tvm.schedule
------------
The `tvm.schedule` module contains classes of scheduling
structure of tvm.
.. autoclass:: tvm.schedule.IterVar
:members:
.. autofunction:: tvm.create_schedule
.. autoclass:: tvm.schedule.Schedule
:members:
:inherited-members:
.. autoclass:: tvm.schedule.Stage
:members:
:inherited-members:
tvm.tensor
----------
The `tvm.tensor` module contains declaration of Tensor
and Operation class for computation declaration.
.. autoclass:: tvm.tensor.Tensor
:members:
:inherited-members:
.. autoclass:: tvm.tensor.Operation
:members:
:inherited-members:
.. autoclass:: tvm.tensor.ComputeOp
:members:
:show-inheritance:
.. autoclass:: tvm.tensor.PlaceholderOp
:members:
:show-inheritance:
.. autoclass:: tvm.tensor.ScanOp
:members:
:show-inheritance:
.. autoclass:: tvm.tensor.ExternOp
:members:
:show-inheritance:
tvm
---
tvm is a library root namespace contains functions for
declaring computation.
.. autosummary::
tvm.load_json
tvm.save_json
tvm.var
tvm.const
tvm.convert
tvm.placeholder
tvm.compute
tvm.scan
tvm.extern
tvm.call_packed
tvm.decl_buffer
tvm.reduce_axis
tvm.thread_axis
tvm.sum
tvm.min
tvm.max
.. autofunction:: tvm.load_json
.. autofunction:: tvm.save_json
.. autofunction:: tvm.var
.. autofunction:: tvm.const
.. autofunction:: tvm.convert
.. autofunction:: tvm.placeholder
.. autofunction:: tvm.compute
.. autofunction:: tvm.scan
.. autofunction:: tvm.extern
.. autofunction:: tvm.call_packed
.. autofunction:: tvm.decl_buffer
.. autofunction:: tvm.reduce_axis
.. autofunction:: tvm.thread_axis
.. autofunction:: tvm.sum
.. autofunction:: tvm.min
.. autofunction:: tvm.max
...@@ -26,7 +26,6 @@ curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) ...@@ -26,7 +26,6 @@ curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
libpath = os.path.join(curr_path, '../python/') libpath = os.path.join(curr_path, '../python/')
sys.path.insert(0, libpath) sys.path.insert(0, libpath)
# -- General configuration ------------------------------------------------ # -- General configuration ------------------------------------------------
# General information about the project. # General information about the project.
...@@ -51,11 +50,15 @@ release = tvm.__version__ ...@@ -51,11 +50,15 @@ release = tvm.__version__
extensions = [ extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.autodoc',
'sphinx.ext.autosummary', 'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon', 'sphinx.ext.napoleon',
'sphinx.ext.mathjax', 'sphinx.ext.mathjax',
'sphinx_gallery.gen_gallery', 'sphinx_gallery.gen_gallery',
] ]
breathe_projects = {'tvm' : 'doxygen/xml/'}
breathe_default_project = 'tvm'
# Add any paths that contain templates here, relative to this directory. # Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates'] templates_path = ['_templates']
...@@ -132,7 +135,7 @@ if not on_rtd and html_theme == 'rtd': ...@@ -132,7 +135,7 @@ if not on_rtd and html_theme == 'rtd':
# Add any paths that contain custom static files (such as style sheets) here, # Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files, # relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css". # so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static'] html_static_path = ['_static']
# Output file base name for HTML help builder. # Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc' htmlhelp_basename = project + 'doc'
...@@ -153,12 +156,19 @@ latex_documents = [ ...@@ -153,12 +156,19 @@ latex_documents = [
def run_doxygen(folder): def run_doxygen(folder):
"""Run the doxygen make command in the designated folder.""" """Run the doxygen make command in the designated folder."""
try: try:
retcode = subprocess.call("cd %s; make doxygen" % folder, shell=True) retcode = subprocess.call("cd %s; make doc" % folder, shell=True)
if retcode < 0: if retcode < 0:
sys.stderr.write("doxygen terminated by signal %s" % (-retcode)) sys.stderr.write("doxygen terminated by signal %s" % (-retcode))
except OSError as e: except OSError as e:
sys.stderr.write("doxygen execution failed: %s" % e) sys.stderr.write("doxygen execution failed: %s" % e)
intersphinx_mapping = {
'python': ('https://docs.python.org/{.major}'.format(sys.version_info), None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('http://matplotlib.org/', None),
}
examples_dirs = ['../tutorials/python'] examples_dirs = ['../tutorials/python']
gallery_dirs = ['tutorials'] gallery_dirs = ['tutorials']
...@@ -171,18 +181,24 @@ def generate_doxygen_xml(app): ...@@ -171,18 +181,24 @@ def generate_doxygen_xml(app):
def setup(app): def setup(app):
# Add hook for building doxygen xml when needed # Add hook for building doxygen xml when needed
# no c++ API for now # no c++ API for now
# app.connect("builder-inited", generate_doxygen_xml) app.connect("builder-inited", generate_doxygen_xml)
app.add_stylesheet('css/tvm_theme.css')
app.add_config_value('recommonmark_config', { app.add_config_value('recommonmark_config', {
'url_resolver': lambda url: github_doc_root + url, 'url_resolver': lambda url: github_doc_root + url,
}, True) }, True)
app.add_transform(AutoStructify) app.add_transform(AutoStructify)
sphinx_gallery_conf = { sphinx_gallery_conf = {
'backreferences_dir': 'gen_modules/backreferences', 'backreferences_dir': 'gen_modules/backreferences',
'doc_module': ('tvm'), 'doc_module': ('tvm', 'numpy'),
'reference_url': { 'reference_url': {
'tvm': None 'tvm': None,
}, 'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.9.1'},
'examples_dirs': examples_dirs, 'examples_dirs': examples_dirs,
'gallery_dirs': gallery_dirs 'gallery_dirs': gallery_dirs,
'find_mayavi_figures': False,
'filename_pattern': '.py',
'expected_failing_examples': []
} }
...@@ -9,7 +9,7 @@ Contents ...@@ -9,7 +9,7 @@ Contents
.. toctree:: .. toctree::
:maxdepth: 1 :maxdepth: 1
how_to/contribute
how_to/install how_to/install
tutorials/index tutorials/index
python/api api/python/index
how_to/contribute
Python API
==========
tvm
---
tvm is a library root namespace contains functions for
declaring computation.
.. autofunction:: tvm.load_json
.. autofunction:: tvm.save_json
.. autofunction:: tvm.var
.. autofunction:: tvm.convert
.. autofunction:: tvm.placeholder
.. autofunction:: tvm.compute
.. autofunction:: tvm.scan
.. autofunction:: tvm.extern
.. autofunction:: tvm.reduce_axis
.. autofunction:: tvm.sum
tvm.tensor
----------
The `tvm.tensor` module contains declaration of Tensor
and Operation class for computation declaration.
.. autoclass:: tvm.tensor.Tensor
:members:
:inherited-members:
.. autoclass:: tvm.tensor.Operation
:members:
:inherited-members:
tvm.schedule
------------
.. autofunction:: tvm.create_schedule
.. autoclass:: tvm.schedule.Schedule
:members:
.. autoclass:: tvm.schedule.Stage
:members:
tvm.build
---------
.. autofunction:: tvm.lower
.. autofunction:: tvm.build
tvm.ndarray
-----------
tvm.ndarray provides a minimum runtime array API to testing out
the correctness of the program.
.. autofunction:: tvm.cpu
.. autofunction:: tvm.gpu
.. autofunction:: tvm.vpi
.. autofunction:: tvm.opencl
.. autofunction:: tvm.ndarray.array
.. autoclass:: tvm.ndarray.TVMContext
:members:
.. autoclass:: tvm.ndarray.NDArray
:members:
:inherited-members:
tvm.Function
------------
.. autofunction:: tvm.register_func
.. autoclass:: tvm.Function
tvm.module
----------
.. autofunction:: tvm.module.load
.. autofunction:: tvm.module.load
.. autoclass:: tvm.module.Module
:members:
:inherited-members:
tvm.node
--------
tvm.node provides
.. autofunction:: tvm.register_node
.. autoclass:: tvm.node.NodeBase
:members:
.. autoclass:: tvm.node.Node
:members:
tvm.expr
--------
.. automodule:: tvm.expr
:members:
...@@ -136,7 +136,27 @@ def _make_tvm_args(args, temp_args): ...@@ -136,7 +136,27 @@ def _make_tvm_args(args, temp_args):
class Function(object): class Function(object):
"""The Function object used in TVM. """The PackedFunc object used in TVM.
Function plays an key role to bridge front and backend in TVM.
Function provide a type-erased interface, you can call function with positional arguments.
The compiled module returns Function.
TVM backend also registers and exposes its API as Functions.
For example, the developer function exposed in tvm.ir_pass are actually
C++ functions that are registered as PackedFunc
The following are list of common usage scenario of tvm.Function.
- Automatic exposure of C++ API into python
- To call PackedFunc from python side
- To call python callbacks to inspect results in generated code
- Bring python hook into C++ backend
See Also
--------
tvm.register_func: How to register global function.
tvm.get_global_func: How to get global function.
""" """
__slots__ = ["handle", "is_global"] __slots__ = ["handle", "is_global"]
# pylint: disable=no-member # pylint: disable=no-member
...@@ -299,6 +319,26 @@ def register_func(func_name, f=None): ...@@ -299,6 +319,26 @@ def register_func(func_name, f=None):
------- -------
fregister : function fregister : function
Register function if f is not specified. Register function if f is not specified.
Examples
--------
The following code registers my_packed_func as global function.
Note that we simply get it back from global function table to invoke
it from python side. However, we can also invoke the same function
from C++ backend, or in the compiled TVM code.
.. code-block:: python
targs = (10, 10.0, "hello")
@tvm.register_func
def my_packed_func(*args):
assert(tuple(args) == targs)
return 10
# Get it out from global function table
f = tvm.get_global_func("my_packed_func")
assert isinstance(f, tvm.nd.Function)
y = f(*targs)
assert y == 10
""" """
if callable(func_name): if callable(func_name):
f = func_name f = func_name
...@@ -328,7 +368,7 @@ def get_global_func(name): ...@@ -328,7 +368,7 @@ def get_global_func(name):
Returns Returns
------- -------
func : tvm.nd.Function func : tvm.Function
The function to be returned. The function to be returned.
""" """
handle = FunctionHandle() handle = FunctionHandle()
...@@ -355,27 +395,60 @@ def list_global_func_names(): ...@@ -355,27 +395,60 @@ def list_global_func_names():
return fnames return fnames
def _init_api_functions(root_namespace): def _get_api(f):
"""List and add all the functions to current module.""" flocal = f
module_obj = sys.modules["%s.api" % root_namespace] def my_api_func(*args):
module_internal = sys.modules["%s._api_internal" % root_namespace] """
This is a type erased API that calls into Global PackedFunc.
These APIs corresponds to functions registered from C++ backend
and can be used as developer functions.
args : list
The positional arguments to the function call.
Returns
-------
value : int, float, None, Node or Function
The result of the API function call.
"""
return flocal(*args)
return my_api_func
def _init_api(mod):
"""Initialize api for a given module name
mod : str
The name of the module.
"""
module = sys.modules[mod]
namespace_match = { namespace_match = {
"_make_": sys.modules["%s.make" % root_namespace], "_make_": "tvm.make",
"_arith_": sys.modules["%s.arith" % root_namespace], "_arith_": "tvm.arith",
"_pass_": sys.modules["%s.ir_pass" % root_namespace], "_pass_": "tvm.ir_pass",
"_codegen_": sys.modules["%s.codegen" % root_namespace], "_codegen_": "tvm.codegen",
"_module_": sys.modules["%s.module" % root_namespace], "_module_": "tvm.module",
"_schedule_": sys.modules["%s.schedule" % root_namespace] "_schedule_": "tvm.schedule"
} }
for name in list_global_func_names(): for name in list_global_func_names():
fname = name fname = name
target_module = module_internal if name.startswith('_') else module_obj target = "tvm.api"
for k, v in namespace_match.items(): for k, v in namespace_match.items():
if name.startswith(k): if name.startswith(k):
fname = name[len(k):] fname = name[len(k):]
target_module = v target = v
if target != mod:
continue
if mod == "tvm.api" and name.startswith("_"):
target_module = sys.modules["tvm._api_internal"]
else:
target_module = module
f = get_global_func(name) f = get_global_func(name)
setattr(target_module, fname, f) ff = _get_api(f)
ff.__name__ = fname
ff.__doc__ = ("TVM PackedFunc %s. " % fname)
setattr(target_module, ff.__name__, ff)
def _init_module_module(module_class): def _init_module_module(module_class):
......
...@@ -186,13 +186,14 @@ def register_node(type_key=None): ...@@ -186,13 +186,14 @@ def register_node(type_key=None):
type_key : str or cls type_key : str or cls
The type key of the node The type key of the node
""" """
node_name = type_key if isinstance(type_key, str) else type_key.__name__
def register(cls):
"""internal register function"""
NODE_TYPE[node_name] = cls
return cls
if isinstance(type_key, str): if isinstance(type_key, str):
def register(cls):
"""internal register function"""
NODE_TYPE[type_key] = cls
return cls
return register return register
else: else:
cls = type_key return register(type_key)
NODE_TYPE[cls.__name__] = cls
return cls
...@@ -8,13 +8,14 @@ from ._ctypes._types import TVMType ...@@ -8,13 +8,14 @@ from ._ctypes._types import TVMType
from ._ctypes._node import register_node, NodeBase from ._ctypes._node import register_node, NodeBase
from ._ctypes._node import convert_to_node as _convert_to_node from ._ctypes._node import convert_to_node as _convert_to_node
from ._ctypes._function import Function from ._ctypes._function import Function
from ._ctypes._function import _init_api_functions, register_func, get_global_func from ._ctypes._function import _init_api, register_func, get_global_func
from ._ctypes._function import convert_to_tvm_func as _convert_tvm_func from ._ctypes._function import convert_to_tvm_func as _convert_tvm_func
from . import _api_internal from . import _api_internal
from . import _base from . import _base
from . import make as _make from . import make as _make
from . import expr as _expr from . import expr as _expr
from . import tensor as _tensor from . import tensor as _tensor
from . import schedule as _schedule
from . import collections as _collections from . import collections as _collections
int32 = "int32" int32 = "int32"
...@@ -31,6 +32,27 @@ def const(value, dtype=None): ...@@ -31,6 +32,27 @@ def const(value, dtype=None):
return _api_internal._const(value, dtype) return _api_internal._const(value, dtype)
def convert(value):
"""Convert value to TVM node or function.
Parameters
----------
value : python value
Returns
-------
tvm_val : Node or Function
Converted value in TVM
"""
if isinstance(value, (Function, NodeBase)):
return value
if callable(value):
return _convert_tvm_func(value)
else:
return _convert_to_node(value)
def load_json(json_str): def load_json(json_str):
"""Load tvm object from json_str. """Load tvm object from json_str.
...@@ -179,8 +201,8 @@ def scan(init, update, state_placeholder, inputs=None, name="scan"): ...@@ -179,8 +201,8 @@ def scan(init, update, state_placeholder, inputs=None, name="scan"):
.. code-block:: python .. code-block:: python
# The following code is equivalent to numpy.cumsum # The following code is equivalent to numpy.cumsum
m = tvm.Var("m") m = tvm.var("m")
n = tvm.Var("n") n = tvm.var("n")
X = tvm.placeholder((m, n), name="X") X = tvm.placeholder((m, n), name="X")
s_state = tvm.placeholder((m, n)) s_state = tvm.placeholder((m, n))
s_init = tvm.compute((1, n), lambda _, i: X[0, i]) s_init = tvm.compute((1, n), lambda _, i: X[0, i])
...@@ -281,7 +303,10 @@ def decl_buffer(shape, dtype=None, ...@@ -281,7 +303,10 @@ def decl_buffer(shape, dtype=None,
strides=None, strides=None,
byte_offset=None, byte_offset=None,
offset_alignment=0): offset_alignment=0):
"""Decleare a new symbolic buffer """Decleare a new symbolic buffer.
Normally buffer is created automatically during lower and build.
This is only needed if user want to specify their own buffer layout.
Parameters Parameters
---------- ----------
...@@ -370,6 +395,11 @@ def thread_axis(dom=None, tag='', name=''): ...@@ -370,6 +395,11 @@ def thread_axis(dom=None, tag='', name=''):
name : str, optional name : str, optional
The name of the var. The name of the var.
Returns
-------
axis : IterVar
The thread itervar.
""" """
if isinstance(dom, _base.string_types): if isinstance(dom, _base.string_types):
tag, dom = dom, None tag, dom = dom, None
...@@ -446,7 +476,7 @@ def min(lhs, rhs=None, axis=None, where=None): ...@@ -446,7 +476,7 @@ def min(lhs, rhs=None, axis=None, where=None):
""" """
if rhs and axis: if rhs and axis:
raise ValueError("Can only take one argument, rhs or axis") raise ValueError("Can only take one argument, rhs or axis")
if isinstance(rhs, (_collections.IterVar, list)): if isinstance(rhs, (_schedule.IterVar, list)):
axis, rhs = rhs, axis axis, rhs = rhs, axis
if rhs: if rhs:
return _make.Min(lhs, rhs) return _make.Min(lhs, rhs)
...@@ -479,7 +509,7 @@ def max(lhs, rhs=None, axis=None, where=None): ...@@ -479,7 +509,7 @@ def max(lhs, rhs=None, axis=None, where=None):
""" """
if rhs and axis: if rhs and axis:
raise ValueError("Can only take one argument, rhs or axis") raise ValueError("Can only take one argument, rhs or axis")
if isinstance(rhs, (_collections.IterVar, list)): if isinstance(rhs, (_schedule.IterVar, list)):
axis, rhs = rhs, axis axis, rhs = rhs, axis
if rhs: if rhs:
return _make.Max(lhs, rhs) return _make.Max(lhs, rhs)
...@@ -487,26 +517,4 @@ def max(lhs, rhs=None, axis=None, where=None): ...@@ -487,26 +517,4 @@ def max(lhs, rhs=None, axis=None, where=None):
x = _make.Reduce("Max", expr, axis, where) x = _make.Reduce("Max", expr, axis, where)
return x return x
_init_api("tvm.api")
def convert(value):
"""Convert value to TVM node or function.
Parameters
----------
value : python value
Returns
-------
tvm_val : Node or Function
Converted value in TVM
"""
if isinstance(value, (Function, NodeBase)):
return value
if callable(value):
return _convert_tvm_func(value)
else:
return _convert_to_node(value)
_init_api_functions("tvm")
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
from __future__ import absolute_import as _abs from __future__ import absolute_import as _abs
from ._ctypes._node import NodeBase, register_node from ._ctypes._node import NodeBase, register_node
from ._ctypes._function import _init_api
from . import _api_internal from . import _api_internal
class IntSet(NodeBase): class IntSet(NodeBase):
...@@ -36,3 +37,5 @@ class StrideSet(IntSet): ...@@ -36,3 +37,5 @@ class StrideSet(IntSet):
class ModularSet(IntSet): class ModularSet(IntSet):
"""Represent range of (coeff * x + base) for x in Z """ """Represent range of (coeff * x + base) for x in Z """
pass pass
_init_api("tvm.arith")
...@@ -134,10 +134,10 @@ def build(sch, ...@@ -134,10 +134,10 @@ def build(sch,
fapi = ir_pass.LowerThreadAllreduce(fapi, warp_size) fapi = ir_pass.LowerThreadAllreduce(fapi, warp_size)
fsplits = [s for s in ir_pass.SplitHostDevice(fapi)] fsplits = [s for s in ir_pass.SplitHostDevice(fapi)]
if len(fsplits) > 1: if len(fsplits) > 1:
mhost = codegen.build(fsplits[0], target_host) mhost = codegen.build_module(fsplits[0], target_host)
if target: if target:
mdev = codegen.build(fsplits[1:], target) mdev = codegen.build_module(fsplits[1:], target)
mhost.import_module(mdev) mhost.import_module(mdev)
return mhost return mhost
else: else:
return codegen.build(fsplits[0], target) return codegen.build_module(fsplits[0], target)
"""Code generation related functions""" """Code generation related functions."""
from ._ctypes._function import _init_api
def build_module(lowered_func, target):
"""Build lowered_func into Module.
Parameters
----------
lowered_func : LoweredFunc
The lowered function
target : str
The target module type.
Returns
-------
module : Module
The corressponding module.
"""
return _Build(lowered_func, target)
def enabled(target):
"""Whether target is enabled for codegen.
Parameters
----------
target : str
The target module type.
Returns
-------
enabled : boolean
Whether the target module is enabled.
"""
return _Enabled(target)
_init_api("tvm.codegen")
...@@ -2,11 +2,16 @@ ...@@ -2,11 +2,16 @@
from __future__ import absolute_import as _abs from __future__ import absolute_import as _abs
from ._ctypes._node import NodeBase, register_node from ._ctypes._node import NodeBase, register_node
from . import _api_internal from . import _api_internal
from . import expr as _expr
@register_node @register_node
class Array(NodeBase): class Array(NodeBase):
"""Array container of TVM""" """Array container of TVM.
You do not need to create Array explicitly.
Normally python list and tuple will be converted automatically
to Array during tvm function call.
You may get Array in return values of TVM function call.
"""
def __getitem__(self, i): def __getitem__(self, i):
if isinstance(i, slice): if isinstance(i, slice):
start = i.start if i.start is not None else 0 start = i.start if i.start is not None else 0
...@@ -26,7 +31,13 @@ class Array(NodeBase): ...@@ -26,7 +31,13 @@ class Array(NodeBase):
@register_node @register_node
class Map(NodeBase): class Map(NodeBase):
"""Map container of TVM""" """Map container of TVM.
You do not need to create Map explicitly.
Normally python dict will be converted automatically
to Array during tvm function call.
You may get Map in return values of TVM function call.
"""
def __getitem__(self, k): def __getitem__(self, k):
return _api_internal._MapGetItem(self, k) return _api_internal._MapGetItem(self, k)
...@@ -47,22 +58,12 @@ class Map(NodeBase): ...@@ -47,22 +58,12 @@ class Map(NodeBase):
@register_node @register_node
class Range(NodeBase): class Range(NodeBase):
"""Represent range in TVM""" """Represent range in TVM.
pass
@register_node
class IterVar(NodeBase, _expr.ExprOp):
"""Represent iteration variable."""
DataPar = 0
ThreadIndex = 1
CommReduce = 2
Ordered = 3
DimInfo = 4
Unrolled = 5
Vectorized = 6
Parallelized = 7
You do not need to create Range explicitly.
Python list and tuple will be converted automatically to Range in api functions.
"""
pass
@register_node @register_node
class LoweredFunc(NodeBase): class LoweredFunc(NodeBase):
......
"""Module to declare Expression class""" """Expression AST Node in TVM.
User do not need to deal with expression AST node directly.
But they can be helpful for developer to do quick proptyping.
While not displayed in the document and python file.
Each expression node have subfields that can be visited from python side.
For example, you can use addexp.a to get the left operand of an Add node.
.. code-block:: python
x = tvm.var("n")
y = x + 2
assert(isinstance(y, tvm.expr.Add))
assert(y.a == x)
"""
# pylint: disable=missing-docstring # pylint: disable=missing-docstring
from __future__ import absolute_import as _abs from __future__ import absolute_import as _abs
from ._ctypes._node import NodeBase, register_node from ._ctypes._node import NodeBase, register_node
...@@ -75,7 +90,7 @@ class LogicalExpr(Expr): ...@@ -75,7 +90,7 @@ class LogicalExpr(Expr):
@register_node("Variable") @register_node("Variable")
class Var(Expr): class Var(Expr):
"""Symbolic variable expression.""" """Symbolic variable."""
pass pass
@register_node @register_node
......
"""Namespace of IR pass functions""" """Namespace of IR pass functions.
This namespace is used for developers. While you do not see any declarations.
The functions are automatically exported from C++ side via PackedFunc.
Each api is a PackedFunc that can be called in a positional argument manner.
You can read "include/tvm/pass.h" for the function signature of these functions.
"""
from ._ctypes._function import _init_api
_init_api("tvm.ir_pass")
"""namespace of IR node builder make function""" """namespace of IR node builder make function
This namespace is used for developers. While you do not see any declarations.
The functions are automatically exported from C++ side via PackedFunc.
Each api is a PackedFunc that can be called in a positional argument manner.
You can use make function to build the IR node.
"""
from ._ctypes._function import _init_api
_init_api("tvm.make")
"""Runtime module related stuffs""" """Runtime module related stuffs"""
from __future__ import absolute_import as _abs from __future__ import absolute_import as _abs
from ._ctypes._function import ModuleBase, _init_module_module from ._ctypes._function import ModuleBase, _init_module_module
from ._ctypes._function import _init_api
class Module(ModuleBase): class Module(ModuleBase):
"""Module container of all TVM generated functions""" """Module container of all TVM generated functions"""
...@@ -28,7 +30,7 @@ class Module(ModuleBase): ...@@ -28,7 +30,7 @@ class Module(ModuleBase):
Returns Returns
---------- ----------
modules : list of Modules modules : list of Module
The module The module
""" """
nmod = _ImportsSize(self) nmod = _ImportsSize(self)
...@@ -58,7 +60,36 @@ def load(path, fmt=""): ...@@ -58,7 +60,36 @@ def load(path, fmt=""):
fmt : str, optional fmt : str, optional
The format of the file, if not specified The format of the file, if not specified
it will be inferred from suffix of the file. it will be inferred from suffix of the file.
Returns
-------
module : Module
The loaded module
""" """
return _LoadFromFile(path, fmt) return _LoadFromFile(path, fmt)
def enabled(target):
"""Whether module runtime is enabled for target
Parameters
----------
target : str
The target device type.
Returns
-------
enabled : bool
Whether runtime is enabled.
Examples
--------
The following code checks if gpu is enabled.
>>> tvm.module.enabled("gpu")
"""
return _Enabled(target)
_init_api("tvm.module")
_init_module_module(Module) _init_module_module(Module)
...@@ -3,11 +3,14 @@ from __future__ import absolute_import as _abs ...@@ -3,11 +3,14 @@ from __future__ import absolute_import as _abs
from ._ctypes._node import NodeBase, register_node from ._ctypes._node import NodeBase, register_node
from . import _api_internal from . import _api_internal
from . import tensor as _tensor from . import tensor as _tensor
from . import expr as _expr
from . import collections as _collections from . import collections as _collections
from ._ctypes._function import _init_api
@register_node @register_node
class Buffer(NodeBase): class Buffer(NodeBase):
"""Represent a Buffer in TVM.""" """Represent a symbolic buffer in TVM."""
pass pass
@register_node @register_node
...@@ -20,6 +23,29 @@ class Fuse(NodeBase): ...@@ -20,6 +23,29 @@ class Fuse(NodeBase):
"""Fuse operation on axis.""" """Fuse operation on axis."""
pass pass
@register_node
class IterVar(NodeBase, _expr.ExprOp):
"""Represent iteration variable.
IterVar is normally created by Operation, to represent
axis iterations in the computation.
It can also created by schedule primitives like :any:`tvm.schedule.Stage.split`.
See Also
--------
tvm.thread_axis: Create thread axis IterVar.
tvm.reduce_axis: Create reduce axis IterVar.
"""
DataPar = 0
ThreadIndex = 1
CommReduce = 2
Ordered = 3
DimInfo = 4
Unrolled = 5
Vectorized = 6
Parallelized = 7
_tensor.iter_var_cls = IterVar
def create_schedule(ops): def create_schedule(ops):
"""Create a schedule for list of ops """Create a schedule for list of ops
...@@ -343,3 +369,5 @@ class Stage(NodeBase): ...@@ -343,3 +369,5 @@ class Stage(NodeBase):
The iteration to be parallelized. The iteration to be parallelized.
""" """
_api_internal._StageParallel(self, var) _api_internal._StageParallel(self, var)
_init_api("tvm.schedule")
"""Statement classes""" """Statement AST Node in TVM.
User do not need to deal with AST node directly.
But they can be helpful for developer to do quick proptyping.
While not displayed in the document and python file.
Each statement node have subfields that can be visited from python side.
.. code-block:: python
x = tvm.var("n")
a = tvm.var("array", tvm.handle)
st = tvm.make.Store(a, x + 1, 1)
assert isinstance(st, tvm.stmt.Store)
assert(st.buffer_var == a)
"""
from __future__ import absolute_import as _abs from __future__ import absolute_import as _abs
from ._ctypes._node import NodeBase, register_node from ._ctypes._node import NodeBase, register_node
......
"""Tensor related abstractions""" """Tensor and Computation abstraction objects"""
# pylint: disable=invalid-name
from __future__ import absolute_import as _abs from __future__ import absolute_import as _abs
from ._ctypes._node import NodeBase, SliceBase, register_node, convert_to_node from ._ctypes._node import NodeBase, SliceBase, register_node, convert_to_node
from . import collections as _collections
from . import _api_internal from . import _api_internal
from . import make as _make from . import make as _make
from . import expr as _expr from . import expr as _expr
...@@ -19,6 +19,7 @@ class TensorSlice(SliceBase, _expr.ExprOp): ...@@ -19,6 +19,7 @@ class TensorSlice(SliceBase, _expr.ExprOp):
indices = (indices,) indices = (indices,)
return TensorSlice(self.tensor, self.indices + indices) return TensorSlice(self.tensor, self.indices + indices)
itervar_cls = None
@register_node @register_node
class Tensor(NodeBase): class Tensor(NodeBase):
...@@ -30,10 +31,10 @@ class Tensor(NodeBase): ...@@ -30,10 +31,10 @@ class Tensor(NodeBase):
indices = convert_to_node(indices) indices = convert_to_node(indices)
args = [] args = []
for x in indices: for x in indices:
if isinstance(x, _collections.IterVar): if isinstance(x, _expr.Expr):
args.append(x.var)
elif isinstance(x, _expr.Expr):
args.append(x) args.append(x)
elif isinstance(x, iter_var_cls):
args.append(x.var)
else: else:
raise ValueError("The indices must be expression") raise ValueError("The indices must be expression")
...@@ -95,20 +96,35 @@ class Operation(NodeBase): ...@@ -95,20 +96,35 @@ class Operation(NodeBase):
""" """
return _api_internal._OpGetOutput(self, index) return _api_internal._OpGetOutput(self, index)
@register_node @register_node
class PlaceholderOp(Operation): class PlaceholderOp(Operation):
"""Placeholder operation.""" """Placeholder operation."""
pass pass
@register_node @register_node
class ComputeOp(Operation): class ComputeOp(Operation):
"""Compute operation.""" """Compute operation."""
pass @property
def axis(self):
"""Represent axis of IterVar, only defined when it is a ComputeOp"""
return self.__getattr__("axis")
@property
def reduce_axis(self):
"""Represent axis of reductions, only defined when it is a ComputeOp"""
return self.__getattr__("reduce_axis")
@register_node @register_node
class ScanOp(Operation): class ScanOp(Operation):
"""Scan operation.""" """Scan operation."""
pass @property
def scan_axis(self):
"""Represent axis of scan, only defined when it is a ScanOp"""
return self.__getattr__("scan_axis")
@register_node @register_node
class ExternOp(Operation): class ExternOp(Operation):
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
namespace tvm { namespace tvm {
namespace codegen { namespace codegen {
TVM_REGISTER_API(_codegen_build) TVM_REGISTER_API(_codegen__Build)
.set_body([](TVMArgs args, TVMRetValue *ret) { .set_body([](TVMArgs args, TVMRetValue *ret) {
if (args[0].IsNodeType<LoweredFunc>()) { if (args[0].IsNodeType<LoweredFunc>()) {
*ret = Build({args[0]}, args[1]); *ret = Build({args[0]}, args[1]);
...@@ -21,7 +21,7 @@ TVM_REGISTER_API(_codegen_build) ...@@ -21,7 +21,7 @@ TVM_REGISTER_API(_codegen_build)
} }
}); });
TVM_REGISTER_API(_codegen_enabled) TVM_REGISTER_API(_codegen__Enabled)
.set_body([](TVMArgs args, TVMRetValue *ret) { .set_body([](TVMArgs args, TVMRetValue *ret) {
*ret = TargetEnabled(args[0]); *ret = TargetEnabled(args[0]);
}); });
......
...@@ -97,7 +97,7 @@ bool RuntimeEnabled(const std::string& target) { ...@@ -97,7 +97,7 @@ bool RuntimeEnabled(const std::string& target) {
return runtime::Registry::Get(load_f_name) != nullptr; return runtime::Registry::Get(load_f_name) != nullptr;
} }
TVM_REGISTER_GLOBAL(_module_enabled) TVM_REGISTER_GLOBAL(_module__Enabled)
.set_body([](TVMArgs args, TVMRetValue *ret) { .set_body([](TVMArgs args, TVMRetValue *ret) {
*ret = RuntimeEnabled(args[0]); *ret = RuntimeEnabled(args[0]);
}); });
......
...@@ -38,7 +38,7 @@ def test_dot(): ...@@ -38,7 +38,7 @@ def test_dot():
if not tvm.codegen.enabled(target): if not tvm.codegen.enabled(target):
print("Target %s is not enabled" % target) print("Target %s is not enabled" % target)
return return
f = tvm.codegen.build(fapi, target) f = tvm.codegen.build_module(fapi, target)
# verify # verify
ctx = tvm.cpu(0) ctx = tvm.cpu(0)
a = tvm.nd.array(np.random.uniform(size=(nn,)).astype(A.dtype), ctx) a = tvm.nd.array(np.random.uniform(size=(nn,)).astype(A.dtype), ctx)
......
...@@ -32,8 +32,8 @@ def test_add_pipeline(): ...@@ -32,8 +32,8 @@ def test_add_pipeline():
if not tvm.codegen.enabled(device): if not tvm.codegen.enabled(device):
return return
ctx = tvm.gpu(0) if device == "cuda" else tvm.cl(0) ctx = tvm.gpu(0) if device == "cuda" else tvm.cl(0)
mhost = tvm.codegen.build(fsplits[0], host) mhost = tvm.codegen.build_module(fsplits[0], host)
mdev = tvm.codegen.build(fsplits[1:], device) mdev = tvm.codegen.build_module(fsplits[1:], device)
mhost.import_module(mdev) mhost.import_module(mdev)
code = mdev.get_source() code = mdev.get_source()
f = mhost.entry_func f = mhost.entry_func
...@@ -54,8 +54,8 @@ def test_add_pipeline(): ...@@ -54,8 +54,8 @@ def test_add_pipeline():
return return
ctx = tvm.gpu(0) if device == "cuda" else tvm.cl(0) ctx = tvm.gpu(0) if device == "cuda" else tvm.cl(0)
fmt = "ptx" if device == "cuda" else "cl" fmt = "ptx" if device == "cuda" else "cl"
mhost = tvm.codegen.build(fsplits[0], host) mhost = tvm.codegen.build_module(fsplits[0], host)
mdev = tvm.codegen.build(fsplits[1:], device) mdev = tvm.codegen.build_module(fsplits[1:], device)
temp = testing.tempdir() temp = testing.tempdir()
mpath = temp.relpath("test.%s" % fmt) mpath = temp.relpath("test.%s" % fmt)
mdev.save(mpath) mdev.save(mpath)
......
...@@ -5,7 +5,7 @@ def run_jit(fapi, check): ...@@ -5,7 +5,7 @@ def run_jit(fapi, check):
for target in ["llvm", "stackvm"]: for target in ["llvm", "stackvm"]:
if not tvm.codegen.enabled(target): if not tvm.codegen.enabled(target):
continue continue
f = tvm.codegen.build(fapi, target) f = tvm.codegen.build_module(fapi, target)
s = f.get_source() s = f.get_source()
check(f) check(f)
......
...@@ -29,6 +29,13 @@ def test_ir(): ...@@ -29,6 +29,13 @@ def test_ir():
stmt = tvm.make.Evaluate(z) stmt = tvm.make.Evaluate(z)
assert isinstance(stmt, tvm.stmt.Evaluate) assert isinstance(stmt, tvm.stmt.Evaluate)
def test_ir2():
x = tvm.var("n")
a = tvm.var("array", tvm.handle)
st = tvm.make.Store(a, x + 1, 1)
assert isinstance(st, tvm.stmt.Store)
assert(st.buffer_var == a)
def test_let(): def test_let():
x = tvm.var('x') x = tvm.var('x')
y = tvm.var('y') y = tvm.var('y')
......
...@@ -86,8 +86,8 @@ def test_vectorize(): ...@@ -86,8 +86,8 @@ def test_vectorize():
xo, yo, xi, yi = s[T].tile(T.op.axis[0], T.op.axis[1], x_factor=10, y_factor=5) xo, yo, xi, yi = s[T].tile(T.op.axis[0], T.op.axis[1], x_factor=10, y_factor=5)
s[T].vectorize(yi) s[T].vectorize(yi)
s[T].unroll(xi) s[T].unroll(xi)
UNROLL = tvm.collections.IterVar.Unrolled UNROLL = tvm.schedule.IterVar.Unrolled
VECTORIZE = tvm.collections.IterVar.Vectorized VECTORIZE = tvm.schedule.IterVar.Vectorized
assert s[T].iter_var_attrs[xi].iter_type == UNROLL assert s[T].iter_var_attrs[xi].iter_type == UNROLL
assert s[T].iter_var_attrs[yi].iter_type == VECTORIZE assert s[T].iter_var_attrs[yi].iter_type == VECTORIZE
......
...@@ -36,7 +36,7 @@ def test_dso_module_load(): ...@@ -36,7 +36,7 @@ def test_dso_module_load():
tvm.make.Load(dtype, Ab.data, i) + 1, tvm.make.Load(dtype, Ab.data, i) + 1,
i + 1)) i + 1))
fapi = tvm.ir_pass.MakeAPI(stmt, "ramp", [Ab], 0) fapi = tvm.ir_pass.MakeAPI(stmt, "ramp", [Ab], 0)
m = tvm.codegen.build(fapi, "llvm") m = tvm.codegen.build_module(fapi, "llvm")
for name in names: for name in names:
m.save(name) m.save(name)
......
...@@ -48,8 +48,8 @@ def test_add_pipeline(): ...@@ -48,8 +48,8 @@ def test_add_pipeline():
if not tvm.codegen.enabled(device): if not tvm.codegen.enabled(device):
return return
ctx = tvm.vpi(0) ctx = tvm.vpi(0)
mhost = tvm.codegen.build(fsplits[0], host) mhost = tvm.codegen.build_module(fsplits[0], host)
mdev = tvm.codegen.build(fsplits[1:], device) mdev = tvm.codegen.build_module(fsplits[1:], device)
mhost.import_module(mdev) mhost.import_module(mdev)
code = mdev.get_source() code = mdev.get_source()
f = mhost.entry_func f = mhost.entry_func
......
.. _tutorials-index: Tutorials
=========
TVM Tutorials These tutorials are tutorial generated by sphinx-gallery.
=============
.. _notebook_examples:
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment