Unverified Commit 78fa1d5e by ANSHUMAN TRIPATHY Committed by GitHub

Docs and Readme updated as per new namespace change (#4989)

parent 7eed17b9
......@@ -27,16 +27,16 @@ We use two python scripts for this tutorial.
- build.py - a script to synthesize FPGA bitstream.
```
import tvm
from tvm import te
tgt_host="llvm"
tgt="aocl_sw_emu"
n = tvm.var("n")
A = tvm.placeholder((n,), name='A')
B = tvm.placeholder((n,), name='B')
C = tvm.compute(A.shape, lambda i: A[i] + B[i], name="C")
n = te.var("n")
A = te.placeholder((n,), name='A')
B = te.placeholder((n,), name='B')
C = te.compute(A.shape, lambda i: A[i] + B[i], name="C")
s = tvm.create_schedule(C.op)
s = te.create_schedule(C.op)
px, x = s[C].split(C.op.axis[0], nparts=1)
s[C].bind(px, tvm.thread_axis("pipeline"))
......
......@@ -27,16 +27,17 @@ We use two python scripts for this tutorial.
- build.py - a script to synthesize FPGA bitstream.
```python
import tvm
from tvm import te
tgt_host="llvm"
tgt="sdaccel"
n = tvm.var("n")
A = tvm.placeholder((n,), name='A')
B = tvm.placeholder((n,), name='B')
C = tvm.compute(A.shape, lambda i: A[i] + B[i], name="C")
n = te.var("n")
A = te.placeholder((n,), name='A')
B = te.placeholder((n,), name='B')
C = te.compute(A.shape, lambda i: A[i] + B[i], name="C")
s = tvm.create_schedule(C.op)
s = te.create_schedule(C.op)
px, x = s[C].split(C.op.axis[0], nparts=1)
s[C].bind(px, tvm.thread_axis("pipeline"))
......
......@@ -51,9 +51,9 @@ We use a simple example that uses the low level TVM API directly. The example is
::
n = 1024
A = tvm.placeholder((n,), name='A')
B = tvm.placeholder((n,), name='B')
C = tvm.compute(A.shape, lambda i: A[i] + B[i], name="C")
A = tvm.te.placeholder((n,), name='A')
B = tvm.te.placeholder((n,), name='B')
C = tvm.te.compute(A.shape, lambda i: A[i] + B[i], name="C")
Here, types of ``A``, ``B``, ``C`` are ``tvm.tensor.Tensor``, defined in ``python/tvm/te/tensor.py``. The Python ``Tensor`` is backed by C++ ``Tensor``, implemented in ``include/tvm/te/tensor.h`` and ``src/te/tensor.cc``. All Python types in TVM can be thought of as a handle to the underlying C++ type with the same name. If you look at the definition of Python ``Tensor`` type below, you can see it is a subclass of ``Object``.
......
......@@ -623,15 +623,16 @@ Above, we discussed the behavior of PassUpDomain on Split relations only. In the
::
import tvm
from tvm import te
n = 4
m = 4
A = tvm.placeholder((n, m), name='A')
B = tvm.compute((n, m), lambda bi, bj: A[bi, bj]+2, name='B')
C = tvm.compute((n, m), lambda ci, cj: B[ci, cj]*3, name='C')
A = te.placeholder((n, m), name='A')
B = te.compute((n, m), lambda bi, bj: A[bi, bj]+2, name='B')
C = te.compute((n, m), lambda ci, cj: B[ci, cj]*3, name='C')
s = tvm.create_schedule(C.op)
s = te.create_schedule(C.op)
fused_axes = s[C].fuse(C.op.axis[0], C.op.axis[1])
xo, xi = s[C].split(fused_axes, 4)
......
......@@ -260,8 +260,9 @@ For example, in the following code, we accessed the op field of the TensorNode.
.. code:: python
import tvm
from tvm import te
x = tvm.placeholder((3,4), name="x")
x = te.placeholder((3,4), name="x")
# access the op field of TensorNode
print(x.op.name)
......
......@@ -64,8 +64,8 @@ The current parse interface looks like:
.. code-block:: python
a = tvm.placeholder((100, ), name='a')
b = tvm.placeholder((99, ), name='b')
a = tvm.te.placeholder((100, ), name='a')
b = tvm.te.placeholder((99, ), name='b')
parser = tvm.hybrid.parse(outer_product, [a, b]) # return the parser of this function
......@@ -74,8 +74,8 @@ or ``tvm.container.Array``, to this function, it returns a op node:
.. code-block:: python
a = tvm.placeholder((100, ), name='a')
b = tvm.placeholder((99, ), name='b')
a = tvm.te.placeholder((100, ), name='a')
b = tvm.te.placeholder((99, ), name='b')
c = outer_product(a, b, c) # return the output tensor(s) of the operator
You can use any methods that can be applied on a TVM ``OpNode``, like create_schedule, although
......@@ -90,7 +90,7 @@ Follow up the example above, you can use some tvm like interfaces to tune the co
.. code-block:: python
i, j = c.op.axis
sch = tvm.create_schedule(op)
sch = te.create_schedule(op)
jo, ji = sch.split(j, 4)
sch.vectorize(ji)
......
......@@ -96,14 +96,15 @@ There's nothing special for this part. The following Python snippet generate add
```python
import os
import tvm
from tvm import te
from tvm.contrib import cc, util
def test_add(target_dir):
n = tvm.var("n")
A = tvm.placeholder((n,), name='A')
B = tvm.placeholder((n,), name='B')
C = tvm.compute(A.shape, lambda i: A[i] + B[i], name="C")
s = tvm.create_schedule(C.op)
n = te.var("n")
A = te.placeholder((n,), name='A')
B = te.placeholder((n,), name='B')
C = te.compute(A.shape, lambda i: A[i] + B[i], name="C")
s = te.create_schedule(C.op)
fadd = tvm.build(s, [A, B, C], "llvm", target_host="llvm", name="myadd")
fadd.save(os.path.join(target_dir, "add_cpu.o"))
......
......@@ -122,17 +122,18 @@ One can use the following Python snippet to generate `add_gpu.so` which add two
```python
import os
import tvm
from tvm import te
from tvm.contrib import cc
def test_add(target_dir):
if not tvm.runtime.enabled("cuda"):
print("skip {__file__} because cuda is not enabled...".format(__file__=__file__))
return
n = tvm.var("n")
A = tvm.placeholder((n,), name='A')
B = tvm.placeholder((n,), name='B')
C = tvm.compute(A.shape, lambda i: A[i] + B[i], name="C")
s = tvm.create_schedule(C.op)
n = te.var("n")
A = te.placeholder((n,), name='A')
B = te.placeholder((n,), name='B')
C = te.compute(A.shape, lambda i: A[i] + B[i], name="C")
s = te.create_schedule(C.op)
bx, tx = s[C].split(C.op.axis[0], factor=64)
s[C].bind(bx, tvm.thread_axis("blockIdx.x"))
s[C].bind(tx, tvm.thread_axis("threadIdx.x"))
......
......@@ -87,16 +87,17 @@ the compilation process.
```python
import tvm
from tvm import te
from tvm.contrib import emscripten
import os
def prepare_test_libs(base_path):
target = "llvm -target=asmjs-unknown-emscripten -system-lib"
if not tvm.runtime.enabled(target):
raise RuntimeError("Target %s is not enbaled" % target)
n = tvm.var("n")
A = tvm.placeholder((n,), name='A')
B = tvm.compute(A.shape, lambda *i: A(*i) + 1.0, name='B')
s = tvm.create_schedule(B.op)
n = te.var("n")
A = te.placeholder((n,), name='A')
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name='B')
s = te.create_schedule(B.op)
fadd1 = tvm.build(s, [A, B], target, name="add_one")
obj_path = os.path.join(base_path, "test_add_one.bc")
fadd1.save(obj_path)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment