Commit 7fffbeb3 by Tianqi Chen

[DOCS] Add install docs, fix Jenkins (#57)

parent d25138e6
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ // See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
// nnvm libraries // nnvm libraries
nnvm_lib = "tvm/lib/libtvm.so, tvm/lib/libtvm_runtime.so, lib/libnnvm_top.so, config.mk" nnvm_lib = "tvm/lib/libtvm.so, tvm/lib/libtvm_runtime.so, lib/libnnvm_compiler.so"
// command to start a docker container // command to start a docker container
docker_run = 'tests/ci_build/ci_build.sh' docker_run = 'tests/ci_build/ci_build.sh'
...@@ -47,11 +47,11 @@ stage("Sanity Check") { ...@@ -47,11 +47,11 @@ stage("Sanity Check") {
def make(docker_type, make_flag) { def make(docker_type, make_flag) {
timeout(time: max_time, unit: 'MINUTES') { timeout(time: max_time, unit: 'MINUTES') {
try { try {
sh "${docker_run} ${docker_type} ./tests/script/task_build.sh ${make_flag}" sh "${docker_run} ${docker_type} ./tests/scripts/task_build.sh ${make_flag}"
} catch (exc) { } catch (exc) {
echo 'Incremental compilation failed. Fall back to build from scratch' echo 'Incremental compilation failed. Fall back to build from scratch'
sh "${docker_run} ${docker_type} ./tests/script/task_clean.sh" sh "${docker_run} ${docker_type} ./tests/scripts/task_clean.sh"
sh "${docker_run} ${docker_type} ./tests/script/task_build.sh ${make_flag}" sh "${docker_run} ${docker_type} ./tests/scripts/task_build.sh ${make_flag}"
} }
} }
} }
...@@ -119,7 +119,7 @@ stage('Deploy') { ...@@ -119,7 +119,7 @@ stage('Deploy') {
ws('workspace/nnvm/deploy-docs') { ws('workspace/nnvm/deploy-docs') {
if (env.BRANCH_NAME == "master") { if (env.BRANCH_NAME == "master") {
unpack_lib('mydocs', 'docs.tgz') unpack_lib('mydocs', 'docs.tgz')
sh "tar xf docs.tgz -C /var/docs" sh "tar xf docs.tgz -C /var/nnvm-docs"
} }
} }
} }
......
...@@ -53,12 +53,12 @@ else ...@@ -53,12 +53,12 @@ else
NO_WHOLE_ARCH= --no-whole-archive NO_WHOLE_ARCH= --no-whole-archive
endif endif
all: lib/libnnvm.a lib/libnnvm_top.$(SHARED_LIBRARY_SUFFIX) all: lib/libnnvm.a lib/libnnvm_compiler.$(SHARED_LIBRARY_SUFFIX)
SRC = $(wildcard src/*.cc src/c_api/*.cc src/core/*.cc src/pass/*.cc) SRC = $(wildcard src/*.cc src/c_api/*.cc src/core/*.cc src/pass/*.cc)
SRC_TOP = $(wildcard src/top/*/*.cc src/compiler/*.cc src/compiler/*/*.cc) SRC_COMPILER = $(wildcard src/top/*/*.cc src/compiler/*.cc src/compiler/*/*.cc)
ALL_OBJ = $(patsubst %.cc, build/%.o, $(SRC)) ALL_OBJ = $(patsubst %.cc, build/%.o, $(SRC))
TOP_OBJ = $(patsubst %.cc, build/%.o, $(SRC_TOP)) TOP_OBJ = $(patsubst %.cc, build/%.o, $(SRC_COMPILER))
ALL_DEP = $(ALL_OBJ) ALL_DEP = $(ALL_OBJ)
include tests/cpp/unittest.mk include tests/cpp/unittest.mk
...@@ -74,7 +74,7 @@ lib/libnnvm.a: $(ALL_DEP) ...@@ -74,7 +74,7 @@ lib/libnnvm.a: $(ALL_DEP)
@mkdir -p $(@D) @mkdir -p $(@D)
ar crv $@ $(filter %.o, $?) ar crv $@ $(filter %.o, $?)
lib/libnnvm_top.$(SHARED_LIBRARY_SUFFIX): lib/libnnvm.a ${TOP_OBJ} lib/libnnvm_compiler.$(SHARED_LIBRARY_SUFFIX): lib/libnnvm.a ${TOP_OBJ}
@mkdir -p $(@D) @mkdir -p $(@D)
$(CXX) $(CFLAGS) -shared -o $@ $(filter %.o, $^) $(LDFLAGS) -Wl,${WHOLE_ARCH} lib/libnnvm.a -Wl,${NO_WHOLE_ARCH} $(CXX) $(CFLAGS) -shared -o $@ $(filter %.o, $^) $(LDFLAGS) -Wl,${WHOLE_ARCH} lib/libnnvm.a -Wl,${NO_WHOLE_ARCH}
......
NNVM Change Log
===============
This file records the changes in TVM library in reverse chronological order.
## 0.8rc
- This is major change in NNVM to introduce end to end compiler stack.
- The NNVM compiler stack ready
- Core tensor operators
- integrates compiler with TVM
- The libnnvm.a is still independent from compiler modules.
## 0.7
- NNVM graph
- Basic pass of serialization, gradient, infer_shape, place_deice, plan_memory
# NNVM: Graph IR Stack for Deep Learning Systems # NNVM: Open Compiler for AI Frameworks
[![Build Status](https://travis-ci.org/dmlc/nnvm.svg?branch=master)](https://travis-ci.org/dmlc/nnvm) [![Build Status](https://travis-ci.org/dmlc/nnvm.svg?branch=master)](https://travis-ci.org/dmlc/nnvm)
[![GitHub license](http://dmlc.github.io/img/apache2.svg)](./LICENSE) [![GitHub license](http://dmlc.github.io/img/apache2.svg)](./LICENSE)
NNVM is a reusable computational graph compilation stack for deep learning systems. It provides modules to: NNVM compiler offers reusable computation graph optimization and compilation for deep learning systems.
It is backed by the [TVM stack](http://tvmlang.org) and provides modules to:
- Represent deep learning workloads from front-end frameworks via a graph IR. - Represent deep learning workloads from front-end frameworks via a graph IR.
- Optimize computation graphs to improve performance. - Optimize computation graphs to improve performance.
- Compile into executable modules and deploy to different hardware backends with minimum dependency. - Compile into executable modules and deploy to different hardware backends with minimum dependency.
NNVM is designed to add new frontend, operators and graph optimizations in a decentralized fashion without changing the core interface. It is part of [TVM stack](https://github.com/dmlc/tvm). The compiler toolchain can target hardware backends supported by TVM. NNVM is designed to add new frontend, operators and graph optimizations in a decentralized fashion without changing the core interface.
The compiled module can be deployed to server, mobile, embedded devices and browsers with minimum dependency, in languages including c++, python, javascript, java, objective-c. The compiled module can be deployed to server, mobile, embedded devices and browsers with minimum dependency, in languages including c++, python, javascript, java, objective-c.
The following code snippet demonstrates the general workflow of nnvm. The following code snippet demonstrates the general workflow of nnvm compiler.
```python ```python
import tvm import tvm
......
# Contribute to NNVM
NNVM has been developed by community members.
Everyone is more than welcome to contribute.
It is a way to make the project better and more accessible to more users.
NNVM compiler relies on TVM to deploy to different hardware backends.
You can improve the compiler performance by contributing to [TVM](https://github.com/dmlc/tvm)
- Please update [NEWS.md](https://github.com/dmlc/nnvm/blob/master/NEWS.md) to
add note on your changes to the API or added a new document.
## Guidelines
* [Submit Pull Request](#submit-pull-request)
* [Git Workflow Howtos](#git-workflow-howtos)
- [How to resolve conflict with master](#how-to-resolve-conflict-with-master)
- [How to combine multiple commits into one](#how-to-combine-multiple-commits-into-one)
- [What is the consequence of force push](#what-is-the-consequence-of-force-push)
* [Document](#document)
* [Testcases](#testcases)
* [Examples](#examples)
* [Core Library](#core-library)
* [Python Package](#python-package)
## Submit Pull Request
* Before submit, please rebase your code on the most recent version of master, you can do it by
```bash
git remote add upstream [url to nnvm repo]
git fetch upstream
git rebase upstream/master
```
* If you have multiple small commits,
it might be good to merge them together(use git rebase then squash) into more meaningful groups.
* Send the pull request!
- Fix the problems reported by automatic checks
- If you are contributing a new module or new function, add a test.
## Git Workflow Howtos
### How to resolve conflict with master
- First rebase to most recent master
```bash
# The first two steps can be skipped after you do it once.
git remote add upstream [url to nnvm repo]
git fetch upstream
git rebase upstream/master
```
- The git may show some conflicts it cannot merge, say ```conflicted.py```.
- Manually modify the file to resolve the conflict.
- After you resolved the conflict, mark it as resolved by
```bash
git add conflicted.py
```
- Then you can continue rebase by
```bash
git rebase --continue
```
- Finally push to your fork, you may need to force push here.
```bash
git push --force
```
### How to combine multiple commits into one
Sometimes we want to combine multiple commits, especially when later commits are only fixes to previous ones,
to create a PR with set of meaningful commits. You can do it by following steps.
- Before doing so, configure the default editor of git if you haven't done so before.
```bash
git config core.editor the-editor-you-like
```
- Assume we want to merge last 3 commits, type the following commands
```bash
git rebase -i HEAD~3
```
- It will pop up an text editor. Set the first commit as ```pick```, and change later ones to ```squash```.
- After you saved the file, it will pop up another text editor to ask you modify the combined commit message.
- Push the changes to your fork, you need to force push.
```bash
git push --force
```
### Reset to the most recent master
You can always use git reset to reset your version to the most recent master.
Note that all your ***local changes will get lost***.
So only do it when you do not have local changes or when your pull request just get merged.
```bash
git reset --hard [hash tag of master]
git push --force
```
### What is the consequence of force push
The previous two tips requires force push, this is because we altered the path of the commits.
It is fine to force push to your own fork, as long as the commits changed are only yours.
## Testcases
- All the testcases are in tests
## Core Library
- Follow Google C style for C++.
- We use doxygen to document all the interface code.
- You can reproduce the linter checks by typing ```make lint```
## Python Package
- Always add docstring to the new functions in numpydoc format.
- You can reproduce the linter checks by typing ```make lint```
Deploy Compiled Modules
=======================
NNVM compiled modules are fully embedded in TVM runtime as long as ```GRAPH_RUNTIME``` option
is enabled in tvm runtime. Check out the [TVM documentation](http://docs.tvmlang.org/) for
how to deploy TVM runtime to your system.
In a nutshell, we will need three items to deploy a compiled module.
Checkout our tutorials on getting started with NNVM compiler for more details.
- The graph json data which contains the execution graph.
- The tvm module library of compiled functions.
- The parameter blobs for stored parameters.
We can then use TVM's runtime API to deploy the compiled module.
Here is an example in python.
```python
import tvm
# tvm module for compiled functions.
loaded_lib = tvm.module.load("deploy.so")
# json graph
loaded_json = open(temp.relpath("deploy.json")).read()
# parameters in binary
loaded_params = bytearray(open(temp.relpath("deploy.params"), "rb").read())
fcreate = tvm.get_global_func("tvm.graph_runtime.create")
ctx = tvm.gpu(0)
gmodule = fcreate(loaded_json, loaded_lib, ctx.device_type, ctx.device_id)
set_input, get_output, run = gmodule["set_input"], gmodule["get_output"], gmodule["run"]
set_input("x", tvm.nd.array(x_np))
gmodule["load_params"](loaded_params)
run()
out = tvm.nd.empty(shape)
get_output(0, out)
print(out.asnumpy())
```
Installation Guide
==================
This page gives instructions on how to build and install the nnvm compiler package from
scratch on various systems. It consists of two steps:
1. First build the shared library from the C++ codes (`libnnvm_compiler.so` for linux/osx and `libnnvm_compiler.dll` for windows).
2. Setup for the language packages (e.g. Python Package).
To get started, clone nnvm repo from github. It is important to clone the submodules along, with ```--recursive``` option.
```bash
git clone --recursive https://github.com/dmlc/nnvm
```
For windows users who use github tools, you can open the git shell, and type the following command.
```bash
git submodule init
git submodule update --recursive
```
NNVM compiler depend on TVM and TOPI, so make sure you install them by following [TVM document](http://docs.tvmlang.org/)
## Contents
- [Build the Shared Library](#build-the-shared-library)
- [Python Package Installation](#python-package-installation)
## Build the Shared Library
Our goal is to build the shared library:
- On Linux/OSX the target library is `libnnvm_compiler.so`
- On Windows the target library is `libnnvm_compiler.dll`
The minimal building requirement is
- A recent c++ compiler supporting C++ 11 (g++-4.8 or higher)
You can edit `make/config.mk` to change the compile options, and then build by
`make`. If everything goes well, we can go to the specific language installation section.
## Python Package Installation
The python package is located at python
There are several ways to install the package:
1. Set the environment variable `PYTHONPATH` to tell python where to find
the library. For example, assume we cloned `nnvm` on the home directory
`~`. then we can added the following line in `~/.bashrc`.
It is ***recommended for developers*** who may change the codes.
The changes will be immediately reflected once you pulled the code and rebuild the project (no need to call ```setup``` again)
```bash
export PYTHONPATH=/path/to/nnvm/python:${PYTHONPATH}
```
2. Install tvm python bindings by `setup.py`:
```bash
# install tvm package for the current user
# NOTE: if you installed python via homebrew, --user is not needed during installaiton
# it will be automatically installed to your user directory.
# providing --user flag may trigger error during installation in such case.
cd python; python setup.py install --user; cd ..
```
...@@ -10,6 +10,9 @@ Contents ...@@ -10,6 +10,9 @@ Contents
self self
top top
how_to/install
tutorials/index tutorials/index
how_to/contribute
how_to/deploy
api/python/index api/python/index
dev/index dev/index
# pylint: disable=invalid-name # pylint: disable=invalid-name
"""Helper utility to save parameter dict""" """Helper utility to save parameter dict"""
import ctypes
import tvm import tvm
from tvm._ffi.runtime_ctypes import TVMArrayHandle
_save_param_dict = tvm.get_global_func("nnvm.compiler._save_param_dict") _save_param_dict = tvm.get_global_func("nnvm.compiler._save_param_dict")
_load_param_dict = tvm.get_global_func("nnvm.compiler._load_param_dict") _load_param_dict = tvm.get_global_func("nnvm.compiler._load_param_dict")
...@@ -62,6 +64,6 @@ def load_param_dict(param_bytes): ...@@ -62,6 +64,6 @@ def load_param_dict(param_bytes):
param_dict = {} param_dict = {}
for i in range(size): for i in range(size):
key = load_mod(1, i) key = load_mod(1, i)
dltensor_handle = load_mod(2, i) dltensor_handle = ctypes.cast(load_mod(2, i), TVMArrayHandle)
param_dict[key] = tvm.nd.NDArray(dltensor_handle, False) param_dict[key] = tvm.nd.NDArray(dltensor_handle, False)
return param_dict return param_dict
...@@ -26,7 +26,7 @@ def find_lib_path(): ...@@ -26,7 +26,7 @@ def find_lib_path():
if hasattr(__builtin__, "NNVM_LIBRARY_NAME"): if hasattr(__builtin__, "NNVM_LIBRARY_NAME"):
lib_name = __builtin__.NNVM_LIBRARY_NAME lib_name = __builtin__.NNVM_LIBRARY_NAME
else: else:
lib_name = "libnnvm_top" lib_name = "libnnvm_compiler"
api_path = os.path.join(base_path, '../../lib/') api_path = os.path.join(base_path, '../../lib/')
cmake_build_path = os.path.join(base_path, '../../build/Release/') cmake_build_path = os.path.join(base_path, '../../build/Release/')
......
...@@ -281,7 +281,7 @@ nnvm::Graph GraphFuseCompile(nnvm::Graph g) { ...@@ -281,7 +281,7 @@ nnvm::Graph GraphFuseCompile(nnvm::Graph g) {
} }
} }
// schedule on root node, and use master's schedule // schedule on root node, and use master's schedule
if (nid != root_id) { if (static_cast<int>(nid) != root_id) {
for (uint32_t index = 0; index < inode.source->num_outputs(); ++index) { for (uint32_t index = 0; index < inode.source->num_outputs(); ++index) {
uint32_t eid = idx.entry_id(nid, index); uint32_t eid = idx.entry_id(nid, index);
subgraph_vec[eid] = NodeEntry{gnode, index, 0}; subgraph_vec[eid] = NodeEntry{gnode, index, 0};
...@@ -300,7 +300,7 @@ nnvm::Graph GraphFuseCompile(nnvm::Graph g) { ...@@ -300,7 +300,7 @@ nnvm::Graph GraphFuseCompile(nnvm::Graph g) {
const auto& inode = idx[nid]; const auto& inode = idx[nid];
if (inode.source->is_variable()) continue; if (inode.source->is_variable()) continue;
int root_id = group_vec[nid]; int root_id = group_vec[nid];
if (nid != root_id) continue; if (static_cast<int>(nid) != root_id) continue;
int master = master_vec[root_id]; int master = master_vec[root_id];
FuseEntry& fe = fuse_vec[root_id]; FuseEntry& fe = fuse_vec[root_id];
...@@ -336,7 +336,7 @@ nnvm::Graph GraphFuseCompile(nnvm::Graph g) { ...@@ -336,7 +336,7 @@ nnvm::Graph GraphFuseCompile(nnvm::Graph g) {
continue; continue;
} }
int root_id = group_vec[nid]; int root_id = group_vec[nid];
if (nid != root_id) continue; if (static_cast<int>(nid) != root_id) continue;
FuseEntry& fe = fuse_vec[root_id]; FuseEntry& fe = fuse_vec[root_id];
const IndexedGraph& subidx = fe.subgraph.indexed_graph(); const IndexedGraph& subidx = fe.subgraph.indexed_graph();
nnvm::NodePtr np = nnvm::Node::Create(); nnvm::NodePtr np = nnvm::Node::Create();
......
...@@ -226,7 +226,7 @@ inline bool SplitInferShape(const NodeAttrs& attrs, ...@@ -226,7 +226,7 @@ inline bool SplitInferShape(const NodeAttrs& attrs,
CHECK_LT(param.axis, dshape.ndim()); CHECK_LT(param.axis, dshape.ndim());
TShape oshape = dshape; TShape oshape = dshape;
dim_t begin = 0; dim_t begin = 0;
for (size_t i = 0; i < num_outputs - 1; ++i) { for (dim_t i = 0; i < num_outputs - 1; ++i) {
CHECK_GT(param.indices_or_sections[i], begin) CHECK_GT(param.indices_or_sections[i], begin)
<< "indices_or_sections need to be a sorted ascending list"; << "indices_or_sections need to be a sorted ascending list";
oshape[param.axis] = param.indices_or_sections[i] - begin; oshape[param.axis] = param.indices_or_sections[i] - begin;
......
...@@ -35,6 +35,8 @@ RUN bash /install/ubuntu_install_mxnet.sh ...@@ -35,6 +35,8 @@ RUN bash /install/ubuntu_install_mxnet.sh
COPY install/ubuntu_install_onnx.sh /install/ubuntu_install_onnx.sh COPY install/ubuntu_install_onnx.sh /install/ubuntu_install_onnx.sh
RUN bash /install/ubuntu_install_onnx.sh RUN bash /install/ubuntu_install_onnx.sh
RUN pip install Pillow
# Environment variables # Environment variables
ENV PATH=/usr/local/nvidia/bin:${PATH} ENV PATH=/usr/local/nvidia/bin:${PATH}
ENV PATH=/usr/local/cuda/bin:${PATH} ENV PATH=/usr/local/cuda/bin:${PATH}
......
...@@ -26,7 +26,7 @@ import numpy as np ...@@ -26,7 +26,7 @@ import numpy as np
# In this section, we download a pretrained imagenet model and classify an image. # In this section, we download a pretrained imagenet model and classify an image.
from mxnet.gluon.model_zoo.vision import get_model from mxnet.gluon.model_zoo.vision import get_model
from mxnet.gluon.utils import download from mxnet.gluon.utils import download
import Image from PIL import Image
from matplotlib import pyplot as plt from matplotlib import pyplot as plt
block = get_model('resnet18_v1', pretrained=True) block = get_model('resnet18_v1', pretrained=True)
img_name = 'cat.jpg' img_name = 'cat.jpg'
......
...@@ -41,7 +41,7 @@ sym, params = nnvm.frontend.from_onnx(onnx_graph) ...@@ -41,7 +41,7 @@ sym, params = nnvm.frontend.from_onnx(onnx_graph)
# Load a test image # Load a test image
# --------------------------------------------- # ---------------------------------------------
# A single cat dominates the examples! # A single cat dominates the examples!
import Image from PIL import Image
img_url = 'https://github.com/dmlc/mxnet.js/blob/master/data/cat.png?raw=true' img_url = 'https://github.com/dmlc/mxnet.js/blob/master/data/cat.png?raw=true'
with open('cat.jpg', 'w') as f: with open('cat.jpg', 'w') as f:
f.write(urllib2.urlopen(img_url).read()) f.write(urllib2.urlopen(img_url).read())
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment