Commit ad8733ea by Tianqi Chen Committed by GitHub

[DOCS][APP] Add Example for C++ deployment (#398)

* [DOCS][APP] Add Example for C++ deployment

* fix lint
parent 0eec95bf
# TVM Application Extensions
# TVM Application Extensions and Examples
This folder contains various extension projects using TVM,
they also serve as examples on how to use TVM in your own project.
......@@ -6,3 +6,7 @@ If you are interested in writing optimized kernels with TVM, checkout [TOPI: TVM
- [extension](extension) How to extend TVM C++ api along with python API.
- [graph_executor](graph_executor) Build nnvm graph executor with TVM.
- [ios_rpc](ios_rpc) iOS RPC server.
- [android_rpc](android_rpc) Android RPC server.
- [cpp_deploy](cpp_deploy) Example to deploy with C++ runtime.
# Minimum Makefile for the extension package
TVM_ROOT=$(shell cd ../..; pwd)
NNVM_PATH=nnvm
DMLC_CORE=${TVM_ROOT}/dmlc-core
PKG_CFLAGS = -std=c++11 -O2 -fPIC\
-I${TVM_ROOT}/include\
-I${DMLC_CORE}/include\
-I${TVM_ROOT}/dlpack/include\
PKG_LDFLAGS = -L${TVM_ROOT}/lib -ltvm_runtime
lib/cpp_deploy: cpp_deploy.cc lib/test_addone_sys.o
@mkdir -p $(@D)
$(CXX) $(PKG_CFLAGS) -o $@ $^ $(PKG_LDFLAGS)
lib/test_addone_sys.o: prepare_test_libs.py
python prepare_test_libs.py
C++ Deployment API Example
==========================
This folder contains an example to demonstrate how to use TVM C++ Runtime
API to load and run modules built by TVM.
Type the following command to run the example under current folder(need to build TVM first).
```bash
./run_example.sh
```
/*!
* Copyright (c) 2017 by Contributors
* \brief Example code on load and run TVM module.s
* \file cpp_deploy_example.cc
*/
#include <cstdio>
#include <dlpack/dlpack.h>
#include <tvm/runtime/module.h>
#include <tvm/runtime/registry.h>
#include <tvm/runtime/packed_func.h>
void Verify(tvm::runtime::Module mod, std::string fname) {
// Get the function from the module.
tvm::runtime::PackedFunc f = mod.GetFunction(fname);
CHECK(f != nullptr);
// Allocate the DLPack data structures.
//
// Note that we use TVM runtime API to allocate the DLTensor in this example.
// TVM accept DLPack compatible DLTensors, so function can be invoked
// as long as we pass correct pointer to DLTensor array.
//
// For more information please refer to dlpack.
// One thing to notice is that DLPack contains alignment requirement for
// the data pointer and TVM takes advantage of that.
// If you plan to use your customized data container, please
// make sure the DLTensor you pass in meet the alignment requirement.
//
DLTensor* x;
DLTensor* y;
int ndim = 1;
int dtype_code = kFloat;
int dtype_bits = 32;
int dtype_lanes = 1;
int device_type = kCPU;
int device_id = 0;
int64_t shape[1] = {10};
TVMArrayAlloc(shape, ndim, dtype_code, dtype_bits, dtype_lanes,
device_type, device_id, &x);
TVMArrayAlloc(shape, ndim, dtype_code, dtype_bits, dtype_lanes,
device_type, device_id, &y);
for (int i = 0; i < shape[0]; ++i) {
static_cast<float*>(x->data)[i] = i;
}
// Invoke the function
// PackedFunc is a function that can be invoked via positional argument.
// The signature of the function is specified in tvm.build
f(x, y);
// Print out the output
for (int i = 0; i < shape[0]; ++i) {
CHECK_EQ(static_cast<float*>(y->data)[i], i + 1.0f);
}
LOG(INFO) << "Finish verification...";
}
int main(void) {
// Normally we can directly
tvm::runtime::Module mod_dylib =
tvm::runtime::Module::LoadFromFile("lib/test_addone_dll.so");
LOG(INFO) << "Verify dynamic loading from test_addone_dll.so";
Verify(mod_dylib, "addone");
// For libraries that are directly packed as system lib and linked together with the app
// We can directly use GetSystemLib to get the system wide library.
LOG(INFO) << "Verify load function from system lib";
tvm::runtime::Module mod_syslib = (*tvm::runtime::Registry::Get("module._GetSystemLib"))();
Verify(mod_syslib, "addonesys");
return 0;
}
"""Script to prepare test_addone.so"""
import tvm
import os
def prepare_test_libs(base_path):
n = tvm.var("n")
A = tvm.placeholder((n,), name='A')
B = tvm.compute(A.shape, lambda *i: A(*i) + 1.0, name='B')
s = tvm.create_schedule(B.op)
fadd_dylib = tvm.build(s, [A, B], "llvm", name="addone")
fadd_syslib = tvm.build(s, [A, B], "llvm --system-lib", name="addonesys")
dylib_path = os.path.join(base_path, "test_addone_dll.so")
syslib_path = os.path.join(base_path, "test_addone_sys.o")
fadd_dylib.export_library(dylib_path)
fadd_syslib.save(syslib_path)
if __name__ == "__main__":
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
prepare_test_libs(os.path.join(curr_path, "./lib"))
#!/bin/bash
echo "Build the libraries.."
make
echo "Run the example"
export LD_LIBRARY_PATH=../../lib
export DYLD_LIBRARY_PATH=../../lib
lib/cpp_deploy
\ No newline at end of file
......@@ -20,7 +20,6 @@ Index
topi.min
topi.nn.relu
topi.nn.dilate
topi.nn.scale_shift
topi.nn.conv2d_nchw
topi.nn.conv2d_hwcn
topi.nn.depthwise_conv2d_nchw
......@@ -55,7 +54,6 @@ topi.nn
~~~~~~~
.. autofunction:: topi.nn.relu
.. autofunction:: topi.nn.dilate
.. autofunction:: topi.nn.scale_shift
.. autofunction:: topi.nn.conv2d_nchw
.. autofunction:: topi.nn.conv2d_hwcn
.. autofunction:: topi.nn.depthwise_conv2d_nchw
......
......@@ -39,7 +39,7 @@ class Module {
* \return The result function.
* This function will return PackedFunc(nullptr) if function do not exist.
*/
PackedFunc GetFunction(const std::string& name, bool query_imports);
PackedFunc GetFunction(const std::string& name, bool query_imports = false);
/*!
* \brief Import another module into this module.
* \param other The module to be imported.
......@@ -56,7 +56,7 @@ class Module {
* Re-create import relationship by calling Import.
*/
static Module LoadFromFile(const std::string& file_name,
const std::string& format);
const std::string& format = "");
/*! \return internal container */
inline ModuleNode* operator->();
/*! \return internal container */
......
......@@ -44,6 +44,7 @@ def comm_reduce(data, axis=None, keepdims=False, func=tvm.sum):
Parameters
----------
data : tvm.Tensor
The input data
axis : None or int or tuple of int
Axis or axes along which a sum is performed.
......@@ -120,7 +121,7 @@ def sum(data, axis=None, keepdims=False):
keepdims : bool
If this is set to True, the axes which are reduced are left in the result as dimensions
with size one.
with size one.
With this option, the result will broadcast correctly against the input array.
Returns
......@@ -145,7 +146,7 @@ def max(data, axis=None, keepdims=False):
keepdims : bool
If this is set to True, the axes which are reduced are left in the result as dimensions
with size one.
with size one.
With this option, the result will broadcast correctly against the input array.
Returns
......@@ -170,7 +171,7 @@ def min(data, axis=None, keepdims=False):
keepdims : bool
If this is set to True, the axes which are reduced are left in the result as dimensions
with size one.
with size one.
With this option, the result will broadcast correctly against the input array.
Returns
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment