Commit 6ab6bb3f by Tianqi Chen Committed by GitHub

[PYTHON] addon->contrib add docs (#107)

parent 535a97c9
Contrib APIs
------------
.. automodule:: tvm.contrib
tvm.contrib.nvcc_compiler
~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: tvm.contrib.nvcc_compiler
:members:
tvm.contrib.cc_compiler
~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: tvm.contrib.cc_compiler
:members:
tvm.contrib.util
~~~~~~~~~~~~~~~~
.. automodule:: tvm.contrib.util
:members:
......@@ -13,4 +13,5 @@ Python API
ndarray
collection
function
contrib
dev
......@@ -16,6 +16,7 @@ The user facing API for computation declaration.
tvm.decl_buffer
tvm.reduce_axis
tvm.thread_axis
tvm.comm_reducer
tvm.sum
tvm.min
tvm.max
......@@ -32,6 +33,7 @@ The user facing API for computation declaration.
.. autofunction:: tvm.decl_buffer
.. autofunction:: tvm.reduce_axis
.. autofunction:: tvm.thread_axis
.. autofunction:: tvm.comm_reducer
.. autofunction:: tvm.sum
.. autofunction:: tvm.min
.. autofunction:: tvm.max
"""Addon utilities to TVM python package.
These features are useful to have not not essential to TVM.
"""
......@@ -437,15 +437,17 @@ def comm_reducer(fcombine, fidentity, name="reduce"):
Returns
-------
reducer : function
A function which creates a reduce expression over axis. There are two
to use it:
1. accept (expr, axis, where) to produce an Reduce Expr on
specified axis;
2. simply use it with multiple Exprs.
A function which creates a reduce expression over axis.
There are two to use it:
1. accept (expr, axis, where) to produce an Reduce Expr on
specified axis;
2. simply use it with multiple Exprs.
Example
-------
.. code-block:: python
n = tvm.var('n')
m = tvm.var('m')
mysum = tvm.comm_reducer(lambda x, y: x+y,
......@@ -480,15 +482,15 @@ def comm_reducer(fcombine, fidentity, name="reduce"):
def reducer(expr, axis, where=None, *args):
if isinstance(axis, (_schedule.IterVar, list)):
assert len(args) == 0
assert not args
return _make_reduce(expr, axis, where)
else:
if where is None:
assert len(args) == 0
return _reduce_directly(expr, axis)
return _reduce_directly(expr, axis, where, *args)
if where is None:
assert not args
return _reduce_directly(expr, axis)
return _reduce_directly(expr, axis, where, *args)
doc_str = """Create a {0} expression over axis.
Parameters
----------
expr : Expr
......@@ -505,6 +507,7 @@ def comm_reducer(fcombine, fidentity, name="reduce"):
Example
-------
.. code-block:: python
m = tvm.var("m")
n = tvm.var("n")
A = tvm.placeholder((m, n), name="A")
......
......@@ -81,7 +81,7 @@ def lower(sch,
def build(sch,
args=None,
target="llvm",
target_host="stackvm",
target_host=None,
name="default_function",
binds=None,
max_auto_unroll_step=8,
......@@ -101,6 +101,12 @@ def build(sch,
target_host : str, optional
Host compilation target, if target is device.
When TVM compiles device specific program such as CUDA,
we also need host(CPU) side code to interact with the driver
setup the dimensions and parameters correctly.
target_host is used to specify the host side codegen target.
By default, llvm is used if it is enabled,
otherwise a stackvm intepreter is used.
name : str, optional
The name of result function.
......@@ -142,6 +148,8 @@ def build(sch,
fsplits = [s for s in ir_pass.SplitHostDevice(fapi)]
if len(fsplits) > 1:
if not target_host:
target_host = "llvm" if codegen.enabled("llvm") else "stackvm"
mhost = codegen.build_module(fsplits[0], target_host)
if target:
mdev = codegen.build_module(fsplits[1:], target)
......
"""Contrib APIs of TVM python package.
Contrib API provides many useful not core features.
Some of these are useful utilities to interact with
thirdparty libraries and tools.
"""
"""Util to compile with C++ code"""
"""Util to invoke c++ compilers in the system."""
# pylint: disable=invalid-name
from __future__ import absolute_import as _abs
import sys
......
# pylint: disable=invalid-name
"""Util to compile with NVCC"""
"""Utility to invoke nvcc compiler in the system"""
from __future__ import absolute_import as _abs
import os
import sys
......
"""Utilities to make tempdir"""
"""Common system utilities"""
from __future__ import absolute_import as _abs
import os
import tempfile
import shutil
class TempDirectory(object):
"""Helper object to manage temp directory during testing"""
"""Helper object to manage temp directory during testing.
Automatically removes the directory when it went out of scope.
"""
def __init__(self):
self.temp_dir = tempfile.mkdtemp()
......@@ -19,15 +22,26 @@ class TempDirectory(object):
----------
name : str
The name of the file.
Returns
-------
path : str
The concatenated path.
"""
return os.path.join(self.temp_dir, name)
def listdir(self):
""""List contents in the dir"""
""""List contents in the dir.
Returns
-------
names : list
The content of directory
"""
return os.listdir(self.temp_dir)
def tempdir():
"""Return a new temp dir which deletes the contents when exit
"""Create temp dir which deletes the contents when exit.
Returns
-------
......
......@@ -10,7 +10,7 @@ from .. import _api_internal
from .._ffi.base import string_types
from .._ffi.node import NodeBase, register_node
from .._ffi.function import register_func
from . import testing
from . import util
@register_node
class VPISession(NodeBase):
......@@ -201,8 +201,7 @@ def session(file_names, codes=None):
"""
if isinstance(file_names, string_types):
file_names = [file_names]
path = testing.tempdir()
path = util.tempdir()
if codes:
if isinstance(codes, (list, tuple)):
......
......@@ -79,15 +79,12 @@ def test_add():
s[C].vectorize(x)
# one line to build the function.
def check_device(device, host="stackvm"):
if not tvm.codegen.enabled(host):
print("skip because %s is not enabled.." % host)
return
def check_device(device):
if not tvm.codegen.enabled(device):
print("skip because %s is not enabled.." % device)
return
fadd = tvm.build(s, [A, B, C],
device, host,
device,
name="myadd")
ctx = tvm.gpu(0) if device == "cuda" else tvm.cl(0)
# launch the kernel.
......@@ -101,7 +98,7 @@ def test_add():
if tvm.module.enabled("opencl"):
tvm.module.init_opencl()
check_device("cuda", "llvm")
check_device("cuda")
check_device("opencl")
......
import tvm
from tvm.addon import nvcc_compiler
from tvm.contrib import nvcc_compiler
import numpy as np
def test_gemm():
......
import tvm
import os
from tvm.addon import nvcc_compiler
from tvm.contrib import nvcc_compiler
import numpy as np
TASK="gemm"
......
......@@ -11,7 +11,7 @@ import tvm
import time
import os
import argparse
from tvm.addon import nvcc_compiler
from tvm.contrib import nvcc_compiler
import numpy as np
# Quick knobs
......
import tvm
from tvm.addon import testing
from tvm.contrib import util
import numpy as np
def test_add_pipeline():
......@@ -57,7 +57,7 @@ def test_add_pipeline():
fmt = "ptx" if device == "cuda" else "cl"
mhost = tvm.codegen.build_module(fsplits[0], host)
mdev = tvm.codegen.build_module(fsplits[1:], device)
temp = testing.tempdir()
temp = util.tempdir()
mpath = temp.relpath("test.%s" % fmt)
mdev.save(mpath)
mdev2 = tvm.module.load(mpath)
......
......@@ -5,15 +5,16 @@ def test_add_pipeline():
nn = 1024
n = tvm.convert(nn)
A = tvm.placeholder((n,), name='A')
def extern_generator(ins, outs):
"""Manually write the IR for the extern function, add pipeline"""
i = tvm.var('i')
stmt = tvm.make.For(
i, 0, n, 0, 0,
tvm.make.Store(outs[0].data,
tvm.make.Load(A.dtype, ins[0].data, i) +
1, i))
return stmt
ib = tvm.ir_builder.create()
dout = ib.buffer_ptr(outs[0])
din = ib.buffer_ptr(ins[0])
with ib.for_range(0, n) as i:
dout[i] = din[i] + 1
return ib.get()
C = tvm.extern(A.shape, [A], extern_generator, name='C')
s = tvm.create_schedule(C.op)
......
import tvm
from tvm.addon import cc_compiler as cc, testing
from tvm.contrib import cc_compiler as cc, util
import os
import numpy as np
import subprocess
......@@ -25,7 +25,7 @@ def test_dso_module_load():
if not tvm.codegen.enabled("llvm"):
return
dtype = 'int64'
temp = testing.tempdir()
temp = util.tempdir()
def save_object(names):
n = tvm.var('n')
......
import tvm
from tvm.addon import testing, verilog
from tvm.contrib import verilog
import numpy as np
def lower(s, args, name):
......
import tvm
import numpy as np
from tvm.addon import verilog
from tvm.contrib import verilog
def test_buffer_doublebuff():
# Test the tvm_buffer.v module as a double buffer
......
import tvm
import numpy as np
from tvm.addon import verilog
from tvm.contrib import verilog
def test_buffer_fifo():
# Test the tvm_buffer.v module as a fifo
......
import tvm
import numpy as np
from tvm.addon import verilog
from tvm.contrib import verilog
def test_buffer_linebuff():
# Test the tvm_buffer.v module as a line buffer
......
import tvm
from tvm.addon import verilog
from tvm.contrib import verilog
from testing_util import FIFODelayedWriter, FIFODelayedReader
def run_with_lag(n, read_lag, write_lag):
......
import tvm
from tvm.addon import verilog
from tvm.contrib import verilog
def test_counter():
# Start a new session by run simulation on test_counter.v
......
import tvm
from tvm.addon import verilog
from tvm.contrib import verilog
def test_loop():
sess = verilog.session([
......
import tvm
import numpy as np
from tvm.addon import verilog
from tvm.contrib import verilog
class FIFOReader(object):
"""Auxiliary class to read from FIFO """
......
import tvm
import numpy as np
from tvm.addon import verilog
from tvm.contrib import verilog
def test_mmap():
n = 10
......
......@@ -175,10 +175,10 @@ print(dev_module.get_source())
# - Then it saves the device module into a ptx file.
# - cc.create_shared calls a env compiler(gcc) to create a shared library
#
from tvm.addon import cc_compiler as cc
from tvm.addon import testing
from tvm.contrib import cc_compiler as cc
from tvm.contrib import util
temp = testing.tempdir()
temp = util.tempdir()
fadd_cuda.save(temp.relpath("myadd.o"))
fadd_cuda.imported_modules[0].save(temp.relpath("myadd.ptx"))
cc.create_shared(temp.relpath("myadd.so"), [temp.relpath("myadd.o")])
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment