Commit 40901446 by Tianqi Chen

[RUNTIME] Remove runtime, rely on tvm only (#38)

parent 3a5b9fcb
...@@ -53,10 +53,10 @@ else ...@@ -53,10 +53,10 @@ else
NO_WHOLE_ARCH= --no-whole-archive NO_WHOLE_ARCH= --no-whole-archive
endif endif
all: lib/libnnvm.a lib/libnnvm_top.$(SHARED_LIBRARY_SUFFIX) lib/libnnvm_top_runtime.$(SHARED_LIBRARY_SUFFIX) all: lib/libnnvm.a lib/libnnvm_top.$(SHARED_LIBRARY_SUFFIX)
SRC = $(wildcard src/*.cc src/c_api/*.cc src/core/*.cc src/pass/*.cc) SRC = $(wildcard src/*.cc src/c_api/*.cc src/core/*.cc src/pass/*.cc)
SRC_TOP = $(wildcard src/top/*/*.cc src/runtime/*.cc src/compiler/*.cc src/compiler/*/*.cc) SRC_TOP = $(wildcard src/top/*/*.cc src/compiler/*.cc src/compiler/*/*.cc)
ALL_OBJ = $(patsubst %.cc, build/%.o, $(SRC)) ALL_OBJ = $(patsubst %.cc, build/%.o, $(SRC))
TOP_OBJ = $(patsubst %.cc, build/%.o, $(SRC_TOP)) TOP_OBJ = $(patsubst %.cc, build/%.o, $(SRC_TOP))
ALL_DEP = $(ALL_OBJ) ALL_DEP = $(ALL_OBJ)
...@@ -78,10 +78,6 @@ lib/libnnvm_top.$(SHARED_LIBRARY_SUFFIX): lib/libnnvm.a ${TOP_OBJ} ...@@ -78,10 +78,6 @@ lib/libnnvm_top.$(SHARED_LIBRARY_SUFFIX): lib/libnnvm.a ${TOP_OBJ}
@mkdir -p $(@D) @mkdir -p $(@D)
$(CXX) $(CFLAGS) -shared -o $@ $(filter %.o, $^) $(LDFLAGS) -Wl,${WHOLE_ARCH} lib/libnnvm.a -Wl,${NO_WHOLE_ARCH} $(CXX) $(CFLAGS) -shared -o $@ $(filter %.o, $^) $(LDFLAGS) -Wl,${WHOLE_ARCH} lib/libnnvm.a -Wl,${NO_WHOLE_ARCH}
lib/libnnvm_top_runtime.$(SHARED_LIBRARY_SUFFIX): deploy/nnvm_runtime.cc
@mkdir -p $(@D)
$(CXX) $(CFLAGS) -shared -o $@ $(filter %.cc, $^) $(LDFLAGS)
cython: cython:
cd python; python setup.py build_ext --inplace cd python; python setup.py build_ext --inplace
......
All in One Deployment File
==========================
This folder contains an all in one deployment file that contains minimum dependencies
needed to run nnvm top runtime.
\ No newline at end of file
/*!
* Copyright (c) 2017 by Contributors
* All in one runtime
* \file nnvm_runtime.cc
*/
/*
#include "../src/core/graph.cc"
#include "../src/core/node.cc"
#include "../src/core/pass.cc"
#include "../src/core/op.cc"
#include "../src/pass/saveload_json.cc"
#include "../src/runtime/graph_executor.cc"*/
#include "../src/runtime/graph_runtime.cc"
...@@ -174,6 +174,10 @@ class Graph(object): ...@@ -174,6 +174,10 @@ class Graph(object):
check_call(_LIB.NNGraphGetSymbol(self.handle, ctypes.byref(shandle))) check_call(_LIB.NNGraphGetSymbol(self.handle, ctypes.byref(shandle)))
return Symbol(shandle) return Symbol(shandle)
def _tvm_graph_json(self):
"""Get TVM graph json"""
return self.apply("SaveJSON").json_attr("json")
@property @property
def index(self): def index(self):
if not self._index: if not self._index:
......
"""Runtime environment for nnvm relies on TVM."""
import tvm
from tvm.contrib import rpc
class Module(object):
"""Wrapper runtime module.
This is a thin wrapper of the underlying TVM module.
you can also directly call set_input, run, and get_output
of underlying module functions
Parameters
----------
tvm_module : tvm.Module
The interal tvm module
"""
def __init__(self, tvm_module):
self.tvm_module = tvm_module
self._set_input = tvm_module["set_input"]
self._run = tvm_module["run"]
self._get_output = tvm_module["get_output"]
def set_input(self, key=None, value=None, **params):
"""Set inputs to the module via kwargs
Parameters
----------
key : int or str
The input key
value : the input value.
The input key
params : dict of str to NDArray
Additonal arguments
"""
if key:
self._set_input(key, tvm.nd.array(value))
for k, v in params.items():
self._set_input(k, tvm.nd.array(v))
return self
def run(self, **input_dict):
"""Run forward execution of the graph
Parameters
----------
input_dict: dict of str to NDArray
List of input values to be feed to
"""
if input_dict:
self.set_input(**input_dict)
self._run()
def get_output(self, index, out):
"""Get index-th output to out
Parameters
----------
index : int
The input index
out : tvm.NDArray
The output array container
"""
self._get_output(index, out)
return out
def __getitem__(self, key):
"""Get internal module function
Parameters
----------
key : str
The key to the module.
"""
return self.tvm_module[key]
def create(graph, libmod, ctx):
"""Create a runtime executor module given the graph and module.
Parameters
----------
graph : The graph to be deployed
The graph to be loaded.
libmod : tvm.Module
The module of the corresponding function
ctx : TVMContext
The context to deploy the module, can be local or remote.
Returns
-------
graph_module : tvm.Module
Runtime graph module to execute the graph.
"""
json_str = graph if isinstance(graph, str) else graph.apply("SaveJSON").json_attr("json")
device_type = ctx.device_type
device_id = ctx.device_id
if device_type >= rpc.RPC_SESS_MASK:
assert libmod.type_key == "rpc"
assert rpc._SessTableIndex(libmod) == ctx._rpc_sess._tbl_index
hmod = rpc._ModuleHandle(libmod)
fcreate = ctx._rpc_sess.get_function("nnvm.runtime.remote_create")
device_type = device_type % rpc.RPC_SESS_MASK
return Module(fcreate(json_str, hmod, device_type, device_id))
fcreate = tvm.get_global_func("nnvm.runtime.create")
return Module(fcreate(json_str, libmod, device_type, device_id))
...@@ -14,11 +14,31 @@ ...@@ -14,11 +14,31 @@
#include <tvm/runtime/packed_func.h> #include <tvm/runtime/packed_func.h>
#include <tvm/lowered_func.h> #include <tvm/lowered_func.h>
#include "./compile_engine.h" #include "./compile_engine.h"
#include "../runtime/graph_executor.h" #include "../../tvm/src/runtime/graph/graph_runtime.h"
namespace nnvm { namespace nnvm {
namespace compiler { namespace compiler {
using tvm::runtime::TVMOpParam;
// parser
inline void TVMOpParamParser(nnvm::NodeAttrs* attrs) {
TVMOpParam param;
param.Init(attrs->dict);
attrs->parsed = std::move(param);
}
NNVM_REGISTER_OP(tvm_op)
.set_attr_parser(TVMOpParamParser)
.set_num_inputs([](const NodeAttrs& attrs) {
const TVMOpParam& param = nnvm::get<TVMOpParam>(attrs.parsed);
return param.num_inputs;
})
.set_num_outputs([](const NodeAttrs& attrs) {
const TVMOpParam& param = nnvm::get<TVMOpParam>(attrs.parsed);
return param.num_outputs;
});
using namespace tvm; using namespace tvm;
// The single fuse rule. // The single fuse rule.
......
/*!
* Copyright (c) 2017 by Contributors
*
* Runtime module for graph deployment.
*
* \file graph_executor.h
*/
#ifndef NNVM_RUNTIME_GRAPH_EXECUTOR_H_
#define NNVM_RUNTIME_GRAPH_EXECUTOR_H_
#include <dmlc/io.h>
#include <tvm/runtime/packed_func.h>
#include <tvm/runtime/module.h>
#include <nnvm/graph.h>
#include <nnvm/graph_attr_types.h>
#include <nnvm/tuple.h>
#include <nnvm/pass.h>
#include <vector>
#include <string>
#include "./graph_runtime.h"
namespace nnvm {
namespace runtime {
/*!
* \brief TVM Graph Executor.
* This is a minimum graph executor, embedded in TVM runtime
* without any framework dependency.
*
* This runtime can be acccesibly in various language via
* TVM runtime PackedFunc API.
*/
class GraphExecutor : public ::tvm::runtime::ModuleNode {
public:
/*!
* \return The type key of the executor.
*/
const char* type_key() const final {
return "GraphExecutor";
}
/*!
* \brief Get member function to front-end
* \param name The name of the function.
* \param sptr_to_self The pointer to the module node.
* \return The corresponding member function.
*/
tvm::runtime::PackedFunc GetFunction(
const std::string& name,
const std::shared_ptr<ModuleNode>& sptr_to_self) final;
/*! \brief destructor */
~GraphExecutor();
/*!
* \brief Initialize the graph executor with graph and context.
* \param graph The execution graph.
* \param module The module containing the compiled functions.
* \param ctx The context where the graph should sit on
*/
void Init(Graph graph,
tvm::runtime::Module module,
TVMContext ctx);
/*!
* \brief Get the input index given the name of input.
* \param name The name of the input.
* \return The index of input.
*/
int GetInputIndex(const std::string& name);
/*!
* \brief set index-th input to the graph.
* \param index The input index.
* \param data The input data.
*/
void SetInput(int index, DLTensor* data);
/*!
* \brief Copy index-th output to data_out.
* \param index The output index.
* \param data_out the output data.
*/
void GetOutput(int index, DLTensor* data_out);
/*!
* \brief Load parameters from binary stream
* \param strm The input stream.
*/
void LoadParams(dmlc::Stream* strm);
/*!
* \brief Load parameters from parameter blob.
* \param param_blob A binary blob of parameter.
*/
void LoadParams(const std::string& param_blob);
/*!
* \brief Execute the graph, update output.
*/
void Run();
private:
/*! \brief Setup the temporal storage */
void SetupStorage();
/*! \brief Setup the executors */
void SetupOpExecs();
/*!
* \brief Create a executtion function given input.
* \param attrs The node attributes
* \param args The arguments to the functor, including inputs and outputs.
* \param num_inputs Number of inputs
* \return The created executor.
*/
std::function<void()> CreateTVMOp(const NodeAttrs& attrs,
const std::vector<DLTensor>& args,
size_t num_inputs);
/*! \brief The graph */
Graph graph_;
/*! \brief The code module */
tvm::runtime::Module module_;
/*! \brief execution context */
TVMContext ctx_;
/*! \brief common storage pool */
std::vector<DLTensor*> storage_pool_;
/*! \brief data shape of each node entry */
std::vector<TShape> data_shape_;
/*! \brief data entry of each node */
std::vector<DLTensor> data_entry_;
/*! \brief operator on each node */
std::vector<std::function<void()> > op_execs_;
};
} // namespace runtime
} // namespace nnvm
#endif // NNVM_RUNTIME_GRAPH_EXECUTOR_H_
/*!
* Copyright (c) 2017 by Contributors
*
* Runtime module for graph deployment.
*
* \file graph_executor.h
*/
#ifndef NNVM_RUNTIME_GRAPH_RUNTIME_H_
#define NNVM_RUNTIME_GRAPH_RUNTIME_H_
#include <dmlc/parameter.h>
#include <string>
namespace nnvm {
namespace runtime {
/*! \brief Magic number for NDArray file */
constexpr uint64_t kTVMNDArrayMagic = 0xDD5E40F096B4A13F;
/*! \brief Magic number for NDArray list file */
constexpr uint64_t kTVMNDArrayListMagic = 0xF7E58D4F05049CB7;
/*! \brief operator attributes about tvm op */
struct TVMOpParam : public dmlc::Parameter<TVMOpParam> {
std::string func_name;
uint32_t num_inputs;
uint32_t num_outputs;
uint32_t flatten_data;
DMLC_DECLARE_PARAMETER(TVMOpParam) {
DMLC_DECLARE_FIELD(func_name);
DMLC_DECLARE_FIELD(num_inputs).set_default(1);
DMLC_DECLARE_FIELD(num_outputs).set_default(1);
DMLC_DECLARE_FIELD(flatten_data).set_default(0);
}
};
} // namespace runtime
} // namespace nnvm
#endif // NNVM_RUNTIME_GRAPH_RUNTIME_H_
import numpy as np import numpy as np
import tvm import tvm
from tvm.contrib import graph_runtime
import nnvm.symbol as sym import nnvm.symbol as sym
import nnvm.compiler import nnvm.compiler
import nnvm.runtime
from nnvm.compiler.build_module import _run_graph, precompute_prune from nnvm.compiler.build_module import _run_graph, precompute_prune
def test_compile(): def test_compile():
...@@ -14,7 +14,7 @@ def test_compile(): ...@@ -14,7 +14,7 @@ def test_compile():
dtype = tvm.float32 dtype = tvm.float32
shape_dict = {"x": shape, "y": shape} shape_dict = {"x": shape, "y": shape}
def verify(graph, lib): def verify(graph, lib):
m = nnvm.runtime.create(graph, lib, tvm.cpu(0)) m = graph_runtime.create(graph, lib, tvm.cpu(0))
# get member functions # get member functions
set_input, run, get_output = m["set_input"], m["run"], m["get_output"] set_input, run, get_output = m["set_input"], m["run"], m["get_output"]
na = tvm.nd.array(np.random.uniform(size=shape).astype(dtype)) na = tvm.nd.array(np.random.uniform(size=shape).astype(dtype))
...@@ -67,7 +67,7 @@ def test_precompute_prune(): ...@@ -67,7 +67,7 @@ def test_precompute_prune():
graph, lib, params = nnvm.compiler.build( graph, lib, params = nnvm.compiler.build(
z, "llvm", shape={"y": ny.shape}, params=params) z, "llvm", shape={"y": ny.shape}, params=params)
assert graph.index.num_nodes == 4 assert graph.index.num_nodes == 4
m = nnvm.runtime.create(graph, lib, tvm.cpu(0)) m = graph_runtime.create(graph, lib, tvm.cpu(0))
params["y"] = ny params["y"] = ny
res = tvm.nd.empty(shape) res = tvm.nd.empty(shape)
m.run(**params) m.run(**params)
......
import numpy as np import numpy as np
import tvm import tvm
from tvm.contrib import graph_runtime
import nnvm.symbol as sym import nnvm.symbol as sym
import nnvm.compiler import nnvm.compiler
import nnvm.runtime
def test_compile_cache(): def test_compile_cache():
x = sym.Variable("x") x = sym.Variable("x")
...@@ -12,7 +12,7 @@ def test_compile_cache(): ...@@ -12,7 +12,7 @@ def test_compile_cache():
dtype = tvm.float32 dtype = tvm.float32
shape_dict = {"x": shape, "y": shape} shape_dict = {"x": shape, "y": shape}
def verify(graph, lib): def verify(graph, lib):
m = nnvm.runtime.create(graph, lib, tvm.cpu(0)) m = graph_runtime.create(graph, lib, tvm.cpu(0))
# get member functions # get member functions
na = tvm.nd.array(np.random.uniform(size=shape).astype(dtype)) na = tvm.nd.array(np.random.uniform(size=shape).astype(dtype))
nb = tvm.nd.array(np.random.uniform(size=shape).astype(dtype)) nb = tvm.nd.array(np.random.uniform(size=shape).astype(dtype))
......
...@@ -2,6 +2,7 @@ import nnvm ...@@ -2,6 +2,7 @@ import nnvm
import numpy as np import numpy as np
import tvm import tvm
import topi import topi
from tvm.contrib import graph_runtime
from nnvm import symbol as sym from nnvm import symbol as sym
from nnvm.compiler import graph_util, graph_attr from nnvm.compiler import graph_util, graph_attr
from nnvm.testing import ctx_list from nnvm.testing import ctx_list
...@@ -17,7 +18,7 @@ def test_ewise_injective(): ...@@ -17,7 +18,7 @@ def test_ewise_injective():
for target, ctx in ctx_list(): for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(y, target, shape_dict) graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)
assert graph.index.num_nodes == 2 assert graph.index.num_nodes == 2
m = nnvm.runtime.create(graph, lib, ctx) m = graph_runtime.create(graph, lib, ctx)
x_np = np.random.uniform(size=dshape).astype(dtype) x_np = np.random.uniform(size=dshape).astype(dtype)
m.run(x=x_np) m.run(x=x_np)
out = m.get_output(0, tvm.nd.empty((10, 6))) out = m.get_output(0, tvm.nd.empty((10, 6)))
...@@ -39,7 +40,7 @@ def test_conv_ewise_injective(): ...@@ -39,7 +40,7 @@ def test_conv_ewise_injective():
for target, ctx in ctx_list(): for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(y, target, shape_dict) graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)
m = nnvm.runtime.create(graph, lib, ctx) m = graph_runtime.create(graph, lib, ctx)
# print(graph.ir(join_entry_attrs=["shape"])) # print(graph.ir(join_entry_attrs=["shape"]))
assert graph.index.num_nodes == 5 assert graph.index.num_nodes == 5
# set input # set input
...@@ -66,7 +67,7 @@ def test_injective_reduce_injective(): ...@@ -66,7 +67,7 @@ def test_injective_reduce_injective():
for target, ctx in ctx_list(): for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(y, target, shape_dict) graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)
m = nnvm.runtime.create(graph, lib, ctx) m = graph_runtime.create(graph, lib, ctx)
assert graph.index.num_nodes == 2 assert graph.index.num_nodes == 2
data = np.random.uniform(size=dshape).astype(dtype) data = np.random.uniform(size=dshape).astype(dtype)
m.run(x=data) m.run(x=data)
......
from tvm.contrib import util, rpc
import tvm import tvm
from tvm.contrib import util, rpc, graph_runtime
import nnvm.symbol as sym import nnvm.symbol as sym
import nnvm.compiler import nnvm.compiler
import nnvm.runtime
import numpy as np import numpy as np
def test_rpc_executor(): def test_rpc_executor():
...@@ -29,7 +28,7 @@ def test_rpc_executor(): ...@@ -29,7 +28,7 @@ def test_rpc_executor():
rlib = remote.load_module("net.o") rlib = remote.load_module("net.o")
# Create remotemodule # Create remotemodule
m = nnvm.runtime.create(graph, rlib, remote.cpu(0)) m = graph_runtime.create(graph, rlib, remote.cpu(0))
# get member functions # get member functions
set_input, run, get_output = m["set_input"], m["run"], m["get_output"] set_input, run, get_output = m["set_input"], m["run"], m["get_output"]
na = tvm.nd.array(np.ones(shape).astype(dtype), ctx) na = tvm.nd.array(np.ones(shape).astype(dtype), ctx)
......
import numpy as np import numpy as np
import tvm import tvm
from tvm.contrib import graph_runtime
import topi import topi
import nnvm.symbol as sym import nnvm.symbol as sym
import nnvm.compiler import nnvm.compiler
import nnvm.runtime
from nnvm.testing.config import ctx_list from nnvm.testing.config import ctx_list
def test_relu(): def test_relu():
...@@ -15,7 +15,7 @@ def test_relu(): ...@@ -15,7 +15,7 @@ def test_relu():
oshape = dshape oshape = dshape
for target, ctx in ctx_list(): for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(y, target, {"x": dshape}) graph, lib, _ = nnvm.compiler.build(y, target, {"x": dshape})
m = nnvm.runtime.create(graph, lib, ctx) m = graph_runtime.create(graph, lib, ctx)
data = np.random.uniform(size=dshape).astype(dtype) data = np.random.uniform(size=dshape).astype(dtype)
m.run(x=data) m.run(x=data)
data = (data < 0) * data * 0.3 + (data>0) * data - 0.2 data = (data < 0) * data * 0.3 + (data>0) * data - 0.2
...@@ -32,7 +32,7 @@ def test_exp(): ...@@ -32,7 +32,7 @@ def test_exp():
oshape = dshape oshape = dshape
for target, ctx in ctx_list(): for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(y, target, {"x": dshape}) graph, lib, _ = nnvm.compiler.build(y, target, {"x": dshape})
m = nnvm.runtime.create(graph, lib, ctx) m = graph_runtime.create(graph, lib, ctx)
data = np.random.uniform(size=dshape).astype(dtype) data = np.random.uniform(size=dshape).astype(dtype)
m.run(x=data) m.run(x=data)
out = m.get_output(0, tvm.nd.empty(oshape, dtype)) out = m.get_output(0, tvm.nd.empty(oshape, dtype))
...@@ -49,7 +49,7 @@ def test_log(): ...@@ -49,7 +49,7 @@ def test_log():
for target, ctx in ctx_list(): for target, ctx in ctx_list():
with nnvm.compiler.build_config(opt_level=1): with nnvm.compiler.build_config(opt_level=1):
graph, lib, _ = nnvm.compiler.build(y, target, {"x": dshape}) graph, lib, _ = nnvm.compiler.build(y, target, {"x": dshape})
m = nnvm.runtime.create(graph, lib, ctx) m = graph_runtime.create(graph, lib, ctx)
data = np.random.uniform(size=dshape).astype(dtype) data = np.random.uniform(size=dshape).astype(dtype)
m.run(x=data) m.run(x=data)
out = m.get_output(0, tvm.nd.empty(oshape, dtype)) out = m.get_output(0, tvm.nd.empty(oshape, dtype))
...@@ -66,7 +66,7 @@ def test_tanh(): ...@@ -66,7 +66,7 @@ def test_tanh():
for target, ctx in ctx_list(): for target, ctx in ctx_list():
with nnvm.compiler.build_config(opt_level=1): with nnvm.compiler.build_config(opt_level=1):
graph, lib, _ = nnvm.compiler.build(y, target, {"x": dshape}) graph, lib, _ = nnvm.compiler.build(y, target, {"x": dshape})
m = nnvm.runtime.create(graph, lib, ctx) m = graph_runtime.create(graph, lib, ctx)
data = np.random.uniform(size=dshape).astype(dtype) data = np.random.uniform(size=dshape).astype(dtype)
m.run(x=data) m.run(x=data)
out = m.get_output(0, tvm.nd.empty(oshape, dtype)) out = m.get_output(0, tvm.nd.empty(oshape, dtype))
...@@ -82,7 +82,7 @@ def test_sigmoid(): ...@@ -82,7 +82,7 @@ def test_sigmoid():
oshape = dshape oshape = dshape
for target, ctx in ctx_list(): for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(y, target, {"x": dshape}) graph, lib, _ = nnvm.compiler.build(y, target, {"x": dshape})
m = nnvm.runtime.create(graph, lib, ctx) m = graph_runtime.create(graph, lib, ctx)
data = np.random.uniform(size=dshape).astype(dtype) data = np.random.uniform(size=dshape).astype(dtype)
m.run(x=data) m.run(x=data)
out = m.get_output(0, tvm.nd.empty(oshape, dtype)) out = m.get_output(0, tvm.nd.empty(oshape, dtype))
...@@ -99,7 +99,7 @@ def test_softmax(): ...@@ -99,7 +99,7 @@ def test_softmax():
for target, ctx in ctx_list(): for target, ctx in ctx_list():
with nnvm.compiler.build_config(opt_level=1): with nnvm.compiler.build_config(opt_level=1):
graph, lib, _ = nnvm.compiler.build(y, target, {"x": dshape}) graph, lib, _ = nnvm.compiler.build(y, target, {"x": dshape})
m = nnvm.runtime.create(graph, lib, ctx) m = graph_runtime.create(graph, lib, ctx)
data = np.random.uniform(size=dshape).astype(dtype) data = np.random.uniform(size=dshape).astype(dtype)
m.run(x=data) m.run(x=data)
out = m.get_output(0, tvm.nd.empty(oshape, dtype)) out = m.get_output(0, tvm.nd.empty(oshape, dtype))
...@@ -116,7 +116,7 @@ def test_log_softmax(): ...@@ -116,7 +116,7 @@ def test_log_softmax():
for target, ctx in ctx_list(): for target, ctx in ctx_list():
with nnvm.compiler.build_config(opt_level=1): with nnvm.compiler.build_config(opt_level=1):
graph, lib, _ = nnvm.compiler.build(y, target, {"x": dshape}) graph, lib, _ = nnvm.compiler.build(y, target, {"x": dshape})
m = nnvm.runtime.create(graph, lib, ctx) m = graph_runtime.create(graph, lib, ctx)
data = np.random.uniform(size=dshape).astype(dtype) data = np.random.uniform(size=dshape).astype(dtype)
m.run(x=data) m.run(x=data)
out = m.get_output(0, tvm.nd.empty(oshape, dtype)) out = m.get_output(0, tvm.nd.empty(oshape, dtype))
...@@ -136,7 +136,7 @@ def test_dense(): ...@@ -136,7 +136,7 @@ def test_dense():
} }
for target, ctx in ctx_list(): for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(y, target, shape) graph, lib, _ = nnvm.compiler.build(y, target, shape)
m = nnvm.runtime.create(graph, lib, ctx) m = graph_runtime.create(graph, lib, ctx)
x_np = np.random.uniform(size=shape["x"]).astype(dtype) x_np = np.random.uniform(size=shape["x"]).astype(dtype)
w_np = np.random.uniform(size=shape["dense_weight"]).astype(dtype) w_np = np.random.uniform(size=shape["dense_weight"]).astype(dtype)
b_np = np.random.uniform(size=shape["dense_bias"]).astype(dtype) b_np = np.random.uniform(size=shape["dense_bias"]).astype(dtype)
...@@ -162,7 +162,7 @@ def test_batchnorm(): ...@@ -162,7 +162,7 @@ def test_batchnorm():
for target, ctx in ctx_list(): for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(y, "llvm", {"x": shape}) graph, lib, _ = nnvm.compiler.build(y, "llvm", {"x": shape})
m = nnvm.runtime.create(graph, lib, tvm.cpu(0)) m = graph_runtime.create(graph, lib, tvm.cpu(0))
x_np = np.random.uniform(size=shape).astype(dtype) x_np = np.random.uniform(size=shape).astype(dtype)
mean_np = np.random.uniform(size=shape[1]).astype(dtype) mean_np = np.random.uniform(size=shape[1]).astype(dtype)
var_np = np.random.uniform(size=shape[1]).astype(dtype) var_np = np.random.uniform(size=shape[1]).astype(dtype)
......
import numpy as np import numpy as np
import tvm import tvm
from tvm.contrib import graph_runtime
import topi import topi
import nnvm.symbol as sym import nnvm.symbol as sym
import nnvm.compiler import nnvm.compiler
import nnvm.runtime
from nnvm.testing.config import ctx_list from nnvm.testing.config import ctx_list
...@@ -19,7 +19,7 @@ def test_conv2d(): ...@@ -19,7 +19,7 @@ def test_conv2d():
shape_dict = {"x": dshape} shape_dict = {"x": dshape}
for target, ctx in ctx_list(): for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(y, target, shape_dict) graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)
m = nnvm.runtime.create(graph, lib, ctx) m = graph_runtime.create(graph, lib, ctx)
data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype)) data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
kernel = tvm.nd.array(np.random.uniform(size=kshape).astype(dtype)) kernel = tvm.nd.array(np.random.uniform(size=kshape).astype(dtype))
bias = tvm.nd.array(np.random.uniform(size=kshape[0]).astype(dtype)) bias = tvm.nd.array(np.random.uniform(size=kshape[0]).astype(dtype))
...@@ -42,7 +42,7 @@ def test_grouped_conv2d(): ...@@ -42,7 +42,7 @@ def test_grouped_conv2d():
shape_dict = {"x": dshape} shape_dict = {"x": dshape}
for target, ctx in ctx_list(): for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(y, target, shape_dict) graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)
m = nnvm.runtime.create(graph, lib, ctx) m = graph_runtime.create(graph, lib, ctx)
data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype)) data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
kernel = tvm.nd.array(np.random.uniform(size=kshape).astype(dtype)) kernel = tvm.nd.array(np.random.uniform(size=kshape).astype(dtype))
bias = tvm.nd.array(np.random.uniform(size=kshape[0]).astype(dtype)) bias = tvm.nd.array(np.random.uniform(size=kshape[0]).astype(dtype))
...@@ -63,7 +63,7 @@ def test_max_pool2d(): ...@@ -63,7 +63,7 @@ def test_max_pool2d():
shape_dict = {"x": dshape} shape_dict = {"x": dshape}
for target, ctx in ctx_list(): for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(y, target, shape_dict) graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)
m = nnvm.runtime.create(graph, lib, ctx) m = graph_runtime.create(graph, lib, ctx)
data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype)) data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
m.run(x=data) m.run(x=data)
out = m.get_output(0, tvm.nd.empty(oshape, dtype)) out = m.get_output(0, tvm.nd.empty(oshape, dtype))
...@@ -80,7 +80,7 @@ def test_avg_pool2d(): ...@@ -80,7 +80,7 @@ def test_avg_pool2d():
shape_dict = {"x": dshape} shape_dict = {"x": dshape}
for target, ctx in ctx_list(): for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(y, target, shape_dict) graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)
m = nnvm.runtime.create(graph, lib, ctx) m = graph_runtime.create(graph, lib, ctx)
data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype)) data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
m.run(x=data) m.run(x=data)
out = m.get_output(0, tvm.nd.empty(oshape, dtype)) out = m.get_output(0, tvm.nd.empty(oshape, dtype))
...@@ -97,7 +97,7 @@ def test_global_max_pool2d(): ...@@ -97,7 +97,7 @@ def test_global_max_pool2d():
shape_dict = {"x": dshape} shape_dict = {"x": dshape}
for target, ctx in ctx_list(): for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(y, target, shape_dict) graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)
m = nnvm.runtime.create(graph, lib, ctx) m = graph_runtime.create(graph, lib, ctx)
data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype)) data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
m.run(x=data) m.run(x=data)
out = m.get_output(0, tvm.nd.empty(oshape, dtype)) out = m.get_output(0, tvm.nd.empty(oshape, dtype))
...@@ -114,7 +114,7 @@ def test_global_avg_pool2d(): ...@@ -114,7 +114,7 @@ def test_global_avg_pool2d():
shape_dict = {"x": dshape} shape_dict = {"x": dshape}
for target, ctx in ctx_list(): for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(y, target, shape_dict) graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)
m = nnvm.runtime.create(graph, lib, ctx) m = graph_runtime.create(graph, lib, ctx)
data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype)) data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
m.run(x=data) m.run(x=data)
out = m.get_output(0, tvm.nd.empty(oshape, dtype)) out = m.get_output(0, tvm.nd.empty(oshape, dtype))
......
import numpy as np import numpy as np
import tvm import tvm
from tvm.contrib import graph_runtime
import topi import topi
import nnvm.symbol as sym import nnvm.symbol as sym
import nnvm.compiler import nnvm.compiler
import nnvm.runtime
from nnvm.testing.config import ctx_list from nnvm.testing.config import ctx_list
def verify_transpose(dshape, axes): def verify_transpose(dshape, axes):
...@@ -16,7 +16,7 @@ def verify_transpose(dshape, axes): ...@@ -16,7 +16,7 @@ def verify_transpose(dshape, axes):
dtype = "float32" dtype = "float32"
for target, ctx in ctx_list(): for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(y, target, {"x": dshape}) graph, lib, _ = nnvm.compiler.build(y, target, {"x": dshape})
m = nnvm.runtime.create(graph, lib, ctx) m = graph_runtime.create(graph, lib, ctx)
# set input # set input
data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype)) data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
m.run(x=data) m.run(x=data)
...@@ -31,7 +31,7 @@ def verify_reduce(dshape, fnp, fsym, **kwargs): ...@@ -31,7 +31,7 @@ def verify_reduce(dshape, fnp, fsym, **kwargs):
dtype = "float32" dtype = "float32"
for target, ctx in ctx_list(): for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(y, target, {"x": dshape}) graph, lib, _ = nnvm.compiler.build(y, target, {"x": dshape})
m = nnvm.runtime.create(graph, lib, ctx) m = graph_runtime.create(graph, lib, ctx)
# set input # set input
data = np.random.uniform(size=dshape).astype(dtype) data = np.random.uniform(size=dshape).astype(dtype)
out_np = fnp(data + 1, **kwargs) out_np = fnp(data + 1, **kwargs)
......
...@@ -2,9 +2,9 @@ import numpy as np ...@@ -2,9 +2,9 @@ import numpy as np
import topi import topi
import tvm import tvm
from tvm.contrib import graph_runtime
import nnvm.symbol as sym import nnvm.symbol as sym
import nnvm.compiler import nnvm.compiler
import nnvm.runtime
from nnvm.testing.config import ctx_list from nnvm.testing.config import ctx_list
from nnvm import frontend from nnvm import frontend
import mxnet as mx import mxnet as mx
...@@ -28,7 +28,7 @@ def test_mxnet_frontend_impl(mx_symbol, data_shape=(1, 3, 224, 224), out_shape=( ...@@ -28,7 +28,7 @@ def test_mxnet_frontend_impl(mx_symbol, data_shape=(1, 3, 224, 224), out_shape=(
dshape = x.shape dshape = x.shape
shape_dict = {'data': dshape} shape_dict = {'data': dshape}
graph, lib, params = nnvm.compiler.build(new_sym, target, shape_dict, params=params) graph, lib, params = nnvm.compiler.build(new_sym, target, shape_dict, params=params)
m = nnvm.runtime.create(graph, lib, ctx) m = graph_runtime.create(graph, lib, ctx)
# set inputs # set inputs
m.set_input("data", tvm.nd.array(x.astype(dtype))) m.set_input("data", tvm.nd.array(x.astype(dtype)))
m.set_input(**params) m.set_input(**params)
......
...@@ -8,10 +8,9 @@ This is an example of using NNVM to compile MobileNet model and deploy its infer ...@@ -8,10 +8,9 @@ This is an example of using NNVM to compile MobileNet model and deploy its infer
To begin with, we import nnvm(for compilation) and TVM(for deployment). To begin with, we import nnvm(for compilation) and TVM(for deployment).
""" """
import tvm import tvm
from tvm.contrib import nvcc, graph_runtime
import nnvm.compiler import nnvm.compiler
import nnvm.runtime
import nnvm.testing import nnvm.testing
from tvm.contrib import nvcc
###################################################################### ######################################################################
# Register the NVCC Compiler Option # Register the NVCC Compiler Option
...@@ -66,12 +65,14 @@ graph, lib, params = nnvm.compiler.build( ...@@ -66,12 +65,14 @@ graph, lib, params = nnvm.compiler.build(
# Run the Compiled Module # Run the Compiled Module
# ----------------------- # -----------------------
# #
# To deploy the module, we call :any:`nnvm.runtime.create` passing in the graph the lib and context. # To deploy the module, we call :any:`tvm.contrib.graph_runtime.create` passing in the graph the lib and context.
# Thanks to TVM, we can deploy the compiled module to many platforms and languages. # Thanks to TVM, we can deploy the compiled module to many platforms and languages.
# The deployment module is designed to contain minimum dependencies. # The deployment module is designed to contain minimum dependencies.
# This example runs on the same machine. # This example runs on the same machine.
#
# Note that the code below no longer depends on NNVM, and only relies TVM's runtime to run(deploy).
module = nnvm.runtime.create(graph, lib, ctx) module = graph_runtime.create(graph, lib, ctx)
# set input # set input
module.set_input(**params) module.set_input(**params)
# run # run
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment