Commit 836cf13a by Pariksheet Pinjari Committed by Tianqi Chen

Correction in documentation (#1810)

parent 9afde69b
...@@ -18,7 +18,7 @@ def test_matmul_add(): ...@@ -18,7 +18,7 @@ def test_matmul_add():
print("skip because %s is not enabled..." % target) print("skip because %s is not enabled..." % target)
return return
if not tvm.get_global_func("tvm.contrib.cblas.matmul", True): if not tvm.get_global_func("tvm.contrib.cblas.matmul", True):
print("skip because extern function is not avalable") print("skip because extern function is not available")
return return
ctx = tvm.cpu(0) ctx = tvm.cpu(0)
f = tvm.build(s, [A, B, D, bias], target) f = tvm.build(s, [A, B, D, bias], target)
......
...@@ -16,7 +16,7 @@ def test_matmul_add(): ...@@ -16,7 +16,7 @@ def test_matmul_add():
print("skip because %s is not enabled..." % target) print("skip because %s is not enabled..." % target)
return return
if not tvm.get_global_func("tvm.contrib.cublas.matmul", True): if not tvm.get_global_func("tvm.contrib.cublas.matmul", True):
print("skip because extern function is not avalable") print("skip because extern function is not available")
return return
ctx = tvm.gpu(0) ctx = tvm.gpu(0)
f = tvm.build(s, [A, B, C], target) f = tvm.build(s, [A, B, C], target)
......
...@@ -33,7 +33,7 @@ def test_matmul(): ...@@ -33,7 +33,7 @@ def test_matmul():
def verify(A, B, D, s, target="metal"): def verify(A, B, D, s, target="metal"):
if not tvm.get_global_func("tvm.contrib.mps.matmul", True): if not tvm.get_global_func("tvm.contrib.mps.matmul", True):
print("skip because extern function is not avalable") print("skip because extern function is not available")
return return
ctx = tvm.metal(0) ctx = tvm.metal(0)
f = tvm.build(s, [A, B, D], "metal") f = tvm.build(s, [A, B, D], "metal")
...@@ -64,7 +64,7 @@ def test_conv2d(): ...@@ -64,7 +64,7 @@ def test_conv2d():
def verify(A, B, C, target="llvm"): def verify(A, B, C, target="llvm"):
if not tvm.get_global_func("tvm.contrib.mps.conv2d", True): if not tvm.get_global_func("tvm.contrib.mps.conv2d", True):
print("skip because extern function is not avalable") print("skip because extern function is not available")
return return
ctx = tvm.metal(0) ctx = tvm.metal(0)
f = tvm.build(s1, [A, B, C], "metal") f = tvm.build(s1, [A, B, C], "metal")
......
...@@ -19,7 +19,7 @@ def test_fully_connected_output(): ...@@ -19,7 +19,7 @@ def test_fully_connected_output():
print("skip because %s is not enabled..." % target) print("skip because %s is not enabled..." % target)
return return
if not tvm.get_global_func("tvm.contrib.nnpack.fully_connected_output", True): if not tvm.get_global_func("tvm.contrib.nnpack.fully_connected_output", True):
print("skip because extern function is not avalable") print("skip because extern function is not available")
return return
ctx = tvm.cpu(0) ctx = tvm.cpu(0)
f = tvm.build(s, [A, B, D, bias], target) f = tvm.build(s, [A, B, D, bias], target)
...@@ -49,7 +49,7 @@ def test_fully_connected_inference(): ...@@ -49,7 +49,7 @@ def test_fully_connected_inference():
print("skip because %s is not enabled..." % target) print("skip because %s is not enabled..." % target)
return return
if not tvm.get_global_func("tvm.contrib.nnpack.fully_connected_inference", True): if not tvm.get_global_func("tvm.contrib.nnpack.fully_connected_inference", True):
print("skip because extern function is not avalable") print("skip because extern function is not available")
return return
ctx = tvm.cpu(0) ctx = tvm.cpu(0)
f = tvm.build(s, [A, B, D, bias], target) f = tvm.build(s, [A, B, D, bias], target)
...@@ -128,7 +128,7 @@ def test_convolution_inference(): ...@@ -128,7 +128,7 @@ def test_convolution_inference():
print("skip because %s is not enabled..." % target) print("skip because %s is not enabled..." % target)
return return
if not tvm.get_global_func("tvm.contrib.nnpack.fully_connected_inference", True): if not tvm.get_global_func("tvm.contrib.nnpack.fully_connected_inference", True):
print("skip because extern function is not avalable") print("skip because extern function is not available")
return return
ctx = tvm.cpu(0) ctx = tvm.cpu(0)
f = tvm.build(s, [data, kernel, bias, output], target) f = tvm.build(s, [data, kernel, bias, output], target)
...@@ -173,7 +173,7 @@ def test_convolution_output(): ...@@ -173,7 +173,7 @@ def test_convolution_output():
print("skip because %s is not enabled..." % target) print("skip because %s is not enabled..." % target)
return return
if not tvm.get_global_func("tvm.contrib.nnpack.fully_connected_inference", True): if not tvm.get_global_func("tvm.contrib.nnpack.fully_connected_inference", True):
print("skip because extern function is not avalable") print("skip because extern function is not available")
return return
ctx = tvm.cpu(0) ctx = tvm.cpu(0)
f = tvm.build(s, [data, kernel, bias, output], target) f = tvm.build(s, [data, kernel, bias, output], target)
......
...@@ -13,7 +13,7 @@ def test_randint(): ...@@ -13,7 +13,7 @@ def test_randint():
print("skip because %s is not enabled..." % target) print("skip because %s is not enabled..." % target)
return return
if not tvm.get_global_func("tvm.contrib.random.randint", True): if not tvm.get_global_func("tvm.contrib.random.randint", True):
print("skip because extern function is not avalable") print("skip because extern function is not available")
return return
ctx = tvm.cpu(0) ctx = tvm.cpu(0)
f = tvm.build(s, [A], target) f = tvm.build(s, [A], target)
...@@ -37,7 +37,7 @@ def test_uniform(): ...@@ -37,7 +37,7 @@ def test_uniform():
print("skip because %s is not enabled..." % target) print("skip because %s is not enabled..." % target)
return return
if not tvm.get_global_func("tvm.contrib.random.uniform", True): if not tvm.get_global_func("tvm.contrib.random.uniform", True):
print("skip because extern function is not avalable") print("skip because extern function is not available")
return return
ctx = tvm.cpu(0) ctx = tvm.cpu(0)
f = tvm.build(s, [A], target) f = tvm.build(s, [A], target)
...@@ -61,7 +61,7 @@ def test_normal(): ...@@ -61,7 +61,7 @@ def test_normal():
print("skip because %s is not enabled..." % target) print("skip because %s is not enabled..." % target)
return return
if not tvm.get_global_func("tvm.contrib.random.normal", True): if not tvm.get_global_func("tvm.contrib.random.normal", True):
print("skip because extern function is not avalable") print("skip because extern function is not available")
return return
ctx = tvm.cpu(0) ctx = tvm.cpu(0)
f = tvm.build(s, [A], target) f = tvm.build(s, [A], target)
......
...@@ -16,7 +16,7 @@ def test_matmul_add(): ...@@ -16,7 +16,7 @@ def test_matmul_add():
print("skip because %s is not enabled..." % target) print("skip because %s is not enabled..." % target)
return return
if not tvm.get_global_func("tvm.contrib.rocblas.matmul", True): if not tvm.get_global_func("tvm.contrib.rocblas.matmul", True):
print("skip because extern function is not avalable") print("skip because extern function is not available")
return return
ctx = tvm.rocm(0) ctx = tvm.rocm(0)
f = tvm.build(s, [A, B, C], target) f = tvm.build(s, [A, B, C], target)
......
...@@ -64,7 +64,7 @@ inline tvm::Tensor relu(const tvm::Tensor& t, ...@@ -64,7 +64,7 @@ inline tvm::Tensor relu(const tvm::Tensor& t,
* \param name The name of the operation * \param name The name of the operation
* \param tag The tag to mark the operation * \param tag The tag to mark the operation
* *
* \return A Tensor whose op member is the relu operation * \return A Tensor whose op member is the leaky relu operation
*/ */
inline tvm::Tensor leaky_relu(const tvm::Tensor& t, inline tvm::Tensor leaky_relu(const tvm::Tensor& t,
double alpha = 0.1, double alpha = 0.1,
...@@ -90,7 +90,7 @@ inline tvm::Tensor leaky_relu(const tvm::Tensor& t, ...@@ -90,7 +90,7 @@ inline tvm::Tensor leaky_relu(const tvm::Tensor& t,
* \param name The name of the operation * \param name The name of the operation
* \param tag The tag to mark the operation * \param tag The tag to mark the operation
* *
* \return A Tensor whose op member is the relu operation * \return A Tensor whose op member is the parametric relu operation
*/ */
inline tvm::Tensor prelu(const tvm::Tensor &x, inline tvm::Tensor prelu(const tvm::Tensor &x,
const tvm::Tensor &slope, const tvm::Tensor &slope,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment