Commit bb2b8620 by tqchen Committed by Tianqi Chen

[DOCS] documentation merge

parent 891b4e06
......@@ -7,7 +7,8 @@
tvm_runtime = "lib/libtvm_runtime.so, config.mk"
tvm_lib = "lib/libtvm.so, " + tvm_runtime
// LLVM upstream lib
tvm_multilib = "lib/libtvm_llvm40.so, lib/libtvm_llvm50.so, lib/libtvm_llvm60.so, lib/libtvm_topi.so, " + tvm_runtime
tvm_multilib = "lib/libtvm_llvm40.so, lib/libtvm_llvm50.so, lib/libtvm_llvm60.so, " +
"lib/libtvm_topi.so, nnvm/lib/libnnvm_compiler.so, " + tvm_runtime
// command to start a docker container
docker_run = 'tests/ci_build/ci_build.sh'
......@@ -50,11 +51,11 @@ stage("Sanity Check") {
def make(docker_type, make_flag) {
timeout(time: max_time, unit: 'MINUTES') {
try {
sh "${docker_run} ${docker_type} make ${make_flag}"
sh "${docker_run} ${docker_type} ./tests/scripts/task_build.sh ${make_flag}"
} catch (exc) {
echo 'Incremental compilation failed. Fall back to build from scratch'
sh "${docker_run} ${docker_type} make clean"
sh "${docker_run} ${docker_type} make ${make_flag}"
sh "${docker_run} ${docker_type} ./tests/scripts/task_clean.sh"
sh "${docker_run} ${docker_type} ./tests/scripts/task_build.sh ${make_flag}"
}
}
}
......@@ -231,6 +232,7 @@ stage('Integration Test') {
sh "${docker_run} gpu ./tests/scripts/task_python_integration.sh"
sh "${docker_run} gpu ./tests/scripts/task_python_topi.sh"
sh "${docker_run} gpu ./tests/scripts/task_cpp_topi.sh"
sh "${docker_run} gpu ./tests/scripts/task_python_nnvm.sh"
}
}
}
......
......@@ -10,7 +10,8 @@ endif
include $(config)
.PHONY: clean install installdev all test doc pylint cpplint lint verilog cython cython2 cython3 web runtime
.PHONY: clean install installdev all test doc pylint cpplint lint\
verilog cython cython2 cython3 web topi nnvm runtime
ifndef DMLC_CORE_PATH
DMLC_CORE_PATH = $(ROOTDIR)/dmlc-core
......@@ -36,6 +37,7 @@ EMCC_FLAGS= -std=c++11 -DDMLC_LOG_STACK_TRACE=0\
-s EXTRA_EXPORTED_RUNTIME_METHODS="['cwrap','getValue','setValue','addFunction']"\
-s USE_GLFW=3 -s USE_WEBGL2=1 -lglfw\
$(INCLUDE_FLAGS)
# llvm configuration
ifdef LLVM_CONFIG
LLVM_VERSION=$(shell $(LLVM_CONFIG) --version| cut -b 1,3)
......@@ -62,6 +64,8 @@ SGX_SRC = $(wildcard src/runtime/sgx/untrusted/*.cc)
RPC_SRC = $(wildcard src/runtime/rpc/*.cc)
GRAPH_SRC = $(wildcard src/runtime/graph/*.cc)
RUNTIME_SRC = $(wildcard src/runtime/*.cc)
# TOPI
TOPI_SRC = $(wildcard topi/src/*.cc)
# Objectives
......@@ -78,7 +82,7 @@ RPC_OBJ = $(patsubst src/%.cc, build/%.o, $(RPC_SRC))
GRAPH_OBJ = $(patsubst src/%.cc, build/%.o, $(GRAPH_SRC))
CC_OBJ = $(patsubst src/%.cc, build/%.o, $(CC_SRC)) $(LLVM_OBJ)
RUNTIME_OBJ = $(patsubst src/%.cc, build/%.o, $(RUNTIME_SRC))
TOPI_OBJ = $(patsubst topi/%.cc, build/%.o, $(TOPI_SRC))
TOPI_OBJ = $(patsubst topi/src/%.cc, build/topi/%.o, $(TOPI_SRC))
CONTRIB_OBJ =
# Deps
......@@ -253,6 +257,7 @@ runtime: lib/libtvm_runtime.$(SHARED_LIBRARY_SUFFIX)
web: lib/libtvm_web_runtime.js lib/libtvm_web_runtime.bc
topi: lib/libtvm_topi.$(SHARED_LIBRARY_SUFFIX)
include tests/cpp/unittest.mk
test: $(TEST)
......@@ -291,9 +296,9 @@ build/%.o: src/%.cc
$(CXX) $(CFLAGS) -MM -MT build/$*.o $< >build/$*.d
$(CXX) -c $(CFLAGS) -c $< -o $@
build/src/%.o: topi/src/%.cc
build/topi/%.o: topi/src/%.cc
@mkdir -p $(@D)
$(CXX) $(CFLAGS) -MM -MT build/src/$*.o $< >build/src/$*.d
$(CXX) $(CFLAGS) -MM -MT build/$*.o $< >build/$*.d
$(CXX) -c $(CFLAGS) -c $< -o $@
lib/libtvm.${SHARED_LIBRARY_SUFFIX}: $(ALL_DEP) $(RUNTIME_DEP)
......@@ -326,12 +331,14 @@ LIBHALIDEIR:
cpplint:
python dmlc-core/scripts/lint.py topi cpp topi/include;
python dmlc-core/scripts/lint.py nnvm cpp nnvm/include nnvm/src;
python dmlc-core/scripts/lint.py tvm cpp include src verilog\
examples/extension/src examples/graph_executor/src
pylint:
pylint python/tvm --rcfile=$(ROOTDIR)/tests/lint/pylintrc
pylint topi/python/topi --rcfile=$(ROOTDIR)/tests/lint/pylintrc
pylint nnvm/python/nnvm --rcfile=$(ROOTDIR)/tests/lint/pylintrc
jnilint:
python dmlc-core/scripts/lint.py tvm4j-jni cpp jvm/native/src
......
......@@ -753,7 +753,7 @@ WARN_LOGFILE =
# spaces.
# Note: If this tag is empty the current directory is searched.
INPUT = include/tvm topi/include/topi
INPUT = include/tvm topi/include/topi nnvm/include/nnvm
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
......
......@@ -25,6 +25,7 @@ from recommonmark.transform import AutoStructify
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../python/'))
sys.path.insert(0, os.path.join(curr_path, '../topi/python'))
sys.path.insert(0, os.path.join(curr_path, '../nnvm/python'))
# -- General configuration ------------------------------------------------
......@@ -40,6 +41,7 @@ source_parsers = {
'.md': CommonMarkParser
}
os.environ['TVM_BUILD_DOC'] = '1'
os.environ['NNVM_BUILD_DOC'] = '1'
# Version information.
import tvm
version = tvm.__version__
......@@ -187,7 +189,8 @@ gallery_dirs = ['tutorials']
subsection_order = ExplicitOrder(
['../tutorials/language',
'../tutorials/optimize',
'../tutorials/deployment'])
'../tutorials/deployment',
'../tutorials/nnvm'])
def generate_doxygen_xml(app):
......
/*!
* Copyright (c) 2017 by Contributors
* \file api_registry.h
* \file tvm/api_registry.h
* \brief This files include necessary headers to
* be used to register an global API function.
*/
......
/*!
* Copyright (c) 2016 by Contributors
* \file arithmetic.h
* \file tvm/arithmetic.h
* \brief Algebra and set operations and simplifications.
*/
#ifndef TVM_ARITHMETIC_H_
......
/*!
* Copyright (c) 2016 by Contributors
* \file base.h
* \file tvm/base.h
* \brief Defines the base data structure
*/
#ifndef TVM_BASE_H_
......
/*!
* Copyright (c) 2016 by Contributors
* \file buffer.h
* \file tvm/buffer.h
* \brief Symbolic n-dimensional array, to represent a memory buffer.
*/
#ifndef TVM_BUFFER_H_
......
/*!
* Copyright (c) 2017 by Contributors
* \file build_module.h
* \file tvm/build_module.h
* \brief Functions for compiling ops.
*/
#ifndef TVM_BUILD_MODULE_H_
......
/*!
* Copyright (c) 2016 by Contributors
* \file c_dsl_api.h
* \file tvm/c_dsl_api.h
*
* \brief TVM DSL Node C API, used to interact to DSL compilation.
*
......
/*!
* Copyright (c) 2017 by Contributors
* \file channel.h
* \file tvm/channel.h
* \brief Channel object for pipeline.
*/
#ifndef TVM_CHANNEL_H_
......
/*!
* Copyright (c) 2016 by Contributors
* \file codegen.h
* \file tvm/codegen.h
* \brief Collection of Lowlevel IR pass to codegen.
*/
#ifndef TVM_CODEGEN_H_
......
/*!
* Copyright (c) 2016 by Contributors
* \file expr.h
* \file tvm/expr.h
* \brief The Expr and related elements in DataFlow construction.
*/
#ifndef TVM_EXPR_H_
......
/*!
* Copyright (c) 2016 by Contributors
* \file ir.h
* \file tvm/ir.h
* \brief Additional high level nodes in the IR
*/
#ifndef TVM_IR_H_
......
/*!
* Copyright (c) 2017 by Contributors
* \file ir_functor_ext.h
* \file tvm/ir_functor_ext.h
* \brief More powerful Visitor that allows define function signatures.
*/
#ifndef TVM_IR_FUNCTOR_EXT_H_
......
/*!
* Copyright (c) 2016 by Contributors
* \file ir_mutator.h
* \file tvm/ir_mutator.h
* \brief Defines general IRMutation pass
*/
#ifndef TVM_IR_MUTATOR_H_
......
/*!
* Copyright (c) 2017 by Contributors
* \file ir_operator.h
* \file tvm/ir_operator.h
* \brief Common operators of Expr
*/
#ifndef TVM_IR_OPERATOR_H_
......
/*!
* Copyright (c) 2016 by Contributors
* \file ir_pass.h
* \file tvm/ir_pass.h
* \brief Collection of IR pass functions
*
* When the pass functions in this file are for Stmt,
......
/*!
* Copyright (c) 2016 by Contributors
* \file ir_visitor.h
* \file tvm/ir_visitor.h
* \brief Visitor to quickly visit IR trees
*/
#ifndef TVM_IR_VISITOR_H_
......
/*!
* Copyright (c) 2018 by Contributors
* \file logging.h
* \file tvm/logging.h
* \brief logging utilities on top of dmlc-core
*/
#ifndef TVM_LOGGING_H_
......
/*!
* Copyright (c) 2017 by Contributors
* \file lowered_func.h
* \file tvm/lowered_func.h
* \brief Information about a lowered TVM function.
* This data structure is final step toward codegen.
*/
......
/*!
* Copyright (c) 2016 by Contributors
* \file operation.h
* \file tvm/operation.h
* \brief Operation node can generate one or multiple Tensors
*/
#ifndef TVM_OPERATION_H_
......
/*!
* Copyright (c) 2016 by Contributors
* \file packed_func_ext.h
* \file tvm/packed_func_ext.h
* \brief Extension package to PackedFunc
* This enales pass NodeRef types into/from PackedFunc.
*/
......
/*!
* Copyright (c) 2017 by Contributors
* \file c_backend_api.h
* \file tvm/runtime/c_backend_api.h
* \brief TVM runtime backend API.
*
* The functions defined in this header are intended to be
......
/*!
* Copyright (c) 2016 by Contributors
* \file c_runtime_api.h
* \file tvm/runtime/c_runtime_api.h
* \brief TVM runtime library.
*
* The philosophy of TVM project is to customize the compilation
......
/*!
* Copyright (c) 2017 by Contributors
* \file config.h
* \file tvm/runtime/config.h
* \brief Runtime library related configurations.
*/
#ifndef TVM_RUNTIME_CONFIG_H_
......
/*!
* Copyright (c) 2016 by Contributors
* \file device_api.h
* \file tvm/runtime/device_api.h
* \brief Abstract device memory management API
*/
#ifndef TVM_RUNTIME_DEVICE_API_H_
......
/*!
* Copyright (c) 2017 by Contributors
* \file module.h
* \file tvm/runtime/module.h
* \brief Runtime container of the functions generated by TVM,
* This is used to support dynamically link, load and save
* functions from different convention under unified API.
......
/*!
* Copyright (c) 2017 by Contributors
* \file packed_func.h
* \file tvm/runtime/packed_func.h
* \brief Type-erased function used across TVM API.
*/
#ifndef TVM_RUNTIME_PACKED_FUNC_H_
......
/*!
* Copyright (c) 2017 by Contributors
* \file registry.h
* \file tvm/runtime/registry.h
* \brief This file defines the TVM global function registry.
*
* The registered functions will be made available to front-end
......
/*!
* Copyright (c) 2018 by Contributors
* \file threading_backend.h
* \file tvm/runtime/threading_backend.h
* \brief Utilities for manipulating thread pool threads.
*/
#ifndef TVM_RUNTIME_THREADING_BACKEND_H_
......
/*!
* Copyright (c) 2017 by Contributors
* \file util.h
* \file tvm/runtime/util.h
* \brief Useful runtime util.
*/
#ifndef TVM_RUNTIME_UTIL_H_
......
/*!
* Copyright (c) 2016 by Contributors
* \file schedule.h
* \file tvm/schedule.h
* \brief Define a schedule.
*/
#ifndef TVM_SCHEDULE_H_
......
/*!
* Copyright (c) 2016 by Contributors
* \file schedule_pass.h
* \file tvm/schedule_pass.h
* \brief Collection of Schedule pass functions.
*
* These passes works on the schedule hyper-graph
......
/*!
* Copyright (c) 2017 by Contributors
* \file target_info.h
* \file tvm/target_info.h
* \brief Various information about target.
*/
#ifndef TVM_TARGET_INFO_H_
......
/*!
* Copyright (c) 2016 by Contributors
* \file tensor.h
* \file tvm/tensor.h
* \brief Dataflow tensor object
*/
#ifndef TVM_TENSOR_H_
......
/*!
* Copyright (c) 2017 by Contributors
* \file tensor_intrin.h
* \file tvm/tensor_intrin.h
* \brief Tensor intrinsic operations.
*/
#ifndef TVM_TENSOR_INTRIN_H_
......
/*!
* Copyright (c) 2016 by Contributors
* \file tvm.h
* \file tvm/tvm.h
* \brief Header to include all C++ API.
*/
#ifndef TVM_TVM_H_
......
......@@ -19,10 +19,10 @@ endif()
# include path
include_directories(BEFORE "include")
include_directories("tvm/include")
include_directories("tvm/dlpack/include")
include_directories("tvm/HalideIR/src")
include_directories("tvm/topi/include")
include_directories("../include")
include_directories("../dlpack/include")
include_directories("../HalideIR/src")
include_directories("../topi/include")
set(NNVM_LINKER_LIBS "")
set(NNVM_COMPILER_LINKER_LIBS "")
......@@ -48,8 +48,7 @@ if(MSVC)
endif()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc")
find_library(TVM_LIB tvm
HINTS ${CMAKE_CURRENT_SOURCE_DIR}/../tvm/build/Release
HINTS ${CMAKE_CURRENT_SOURCE_DIR}/tvm/build/Release)
HINTS ${CMAKE_CURRENT_SOURCE_DIR}/../build/Release)
message(STATUS "Build with TVM libary: " ${TVM_LIB})
list(APPEND NNVM_COMPILER_LINKER_LIBS ${TVM_LIB})
else(MSVC)
......@@ -86,8 +85,8 @@ file(GLOB_RECURSE COMPILER_SRCS
src/top/*.cc
)
if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/dmlc-core/CMakeLists.txt)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/dmlc-core/include)
if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/../dmlc-core/CMakeLists.txt)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../dmlc-core/include)
elseif(DMLC_CORE_PATH)
include_directories(${DMLC_CORE_PATH}/include)
endif()
......
#!groovy
// -*- mode: groovy -*-
// Jenkins pipeline
// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
// nnvm libraries
nnvm_lib = "tvm/lib/libtvm.so, tvm/lib/libtvm_runtime.so, lib/libnnvm_compiler.so"
// command to start a docker container
docker_run = 'tests/ci_build/ci_build.sh'
// timeout in minutes
max_time = 60
// initialize source codes
def init_git() {
checkout scm
retry(5) {
timeout(time: 2, unit: 'MINUTES') {
sh 'git submodule update --init --recursive'
}
}
}
def init_git_win() {
checkout scm
retry(5) {
timeout(time: 2, unit: 'MINUTES') {
bat 'git submodule update --init --recursive'
}
}
}
stage("Sanity Check") {
timeout(time: max_time, unit: 'MINUTES') {
node('linux') {
ws('workspace/tvm/sanity') {
init_git()
sh "${docker_run} lint ./tests/scripts/task_lint.sh"
}
}
}
}
// Run make. First try to do an incremental make from a previous workspace in hope to
// accelerate the compilation. If something wrong, clean the workspace and then
// build from scratch.
def make(docker_type, make_flag) {
timeout(time: max_time, unit: 'MINUTES') {
try {
sh "${docker_run} ${docker_type} ./tests/scripts/task_build.sh ${make_flag}"
} catch (exc) {
echo 'Incremental compilation failed. Fall back to build from scratch'
sh "${docker_run} ${docker_type} ./tests/scripts/task_clean.sh"
sh "${docker_run} ${docker_type} ./tests/scripts/task_build.sh ${make_flag}"
}
}
}
// pack libraries for later use
def pack_lib(name, libs) {
sh """
echo "Packing ${libs} into ${name}"
echo ${libs} | sed -e 's/,/ /g' | xargs md5sum
"""
stash includes: libs, name: name
}
// unpack libraries saved before
def unpack_lib(name, libs) {
unstash name
sh """
echo "Unpacked ${libs} from ${name}"
echo ${libs} | sed -e 's/,/ /g' | xargs md5sum
"""
}
stage('Build') {
timeout(time: max_time, unit: 'MINUTES') {
node('GPU' && 'linux') {
ws('workspace/nnvm/build-gpu') {
init_git()
make('gpu', '-j2')
pack_lib('gpu', nnvm_lib)
}
}
}
}
stage('Tests') {
parallel 'python': {
node('GPU' && 'linux') {
ws('workspace/nnvm/it-python-gpu') {
init_git()
unpack_lib('gpu', nnvm_lib)
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} gpu ./tests/scripts/task_python_test.sh"
sh "${docker_run} gpu ./tests/scripts/task_frontend_test.sh"
}
}
}
},
'docs': {
node('GPU' && 'linux') {
ws('workspace/nnvm/docs-python-gpu') {
init_git()
unpack_lib('gpu', nnvm_lib)
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} gpu ./tests/scripts/task_python_docs.sh"
}
pack_lib('mydocs', 'docs.tgz')
}
}
}
}
stage('Deploy') {
node('docker' && 'doc') {
ws('workspace/nnvm/deploy-docs') {
if (env.BRANCH_NAME == "master") {
unpack_lib('mydocs', 'docs.tgz')
sh "tar xf docs.tgz -C /var/nnvm-docs"
}
}
}
}
......@@ -9,20 +9,16 @@ endif
endif
include $(config)
TVMPATH = ..
export LDFLAGS = -pthread -lm
export CFLAGS = -std=c++11 -Wall -O2 -Iinclude -fPIC
CFLAGS += -Itvm/include -Itvm/dlpack/include -Itvm/HalideIR/src -Itvm/topi/include
ifdef DMLC_CORE_PATH
CFLAGS += -I$(DMLC_CORE_PATH)/include
else
CFLAGS += -I$(ROOTDIR)/dmlc-core/include
endif
CFLAGS += -I$(TVMPATH)/include -I$(TVMPATH)/dlpack/include -I$(TVMPATH)/HalideIR/src -I$(TVMPATH)/topi/include
ifdef DMLC_CORE_PATH
CFLAGS += -I$(DMLC_CORE_PATH)/include
else
CFLAGS += -I$(ROOTDIR)/dmlc-core/include
CFLAGS += -I$(ROOTDIR)/../dmlc-core/include
endif
ifneq ($(ADD_CFLAGS), NONE)
......@@ -89,15 +85,15 @@ cyclean:
lint: pylint cpplint
doc:
doxygen docs/Doxyfile
cpplint:
python dmlc-core/scripts/lint.py nnvm cpp include src
python ../dmlc-core/scripts/lint.py nnvm cpp include src
pylint:
pylint python/nnvm --rcfile=$(ROOTDIR)/tests/lint/pylintrc
doc:
doxygen docs/Doxyfile
clean:
$(RM) -rf build lib bin *~ */*~ */*/*~ */*/*/*~ */*.o */*/*.o */*/*/*.o cli_test
......
NNVM Change Log
===============
This file records the changes in TVM library in reverse chronological order.
## 0.8rc
- This is major change in NNVM to introduce end to end compiler stack.
- The NNVM compiler stack ready
- Core tensor operators
- integrates compiler with TVM
- The libnnvm.a is still independent from compiler modules.
## 0.7
- NNVM graph
- Basic pass of serialization, gradient, infer_shape, place_deice, plan_memory
/*!
* Copyright (c) 2016 by Contributors
* \file base.h
* \file nnvm/base.h
* \brief Configuration of nnvm as well as basic data structure.
*/
#ifndef NNVM_BASE_H_
......
/*!
* Copyright (c) 2016 by Contributors
* \file c_api.h
* \file nnvm/c_api.h
* \brief C API of NNVM symbolic construction and pass.
* Enables construction and transformation of Graph
* in any other host languages.
......
/*!
* Copyright (c) 2017 by Contributors
* \file packed_func_ext.h
* \file nnvm/compiler/packed_func_ext.h
* \brief Extension to enable packed functionn for nnvm types
*/
#ifndef NNVM_COMPILER_PACKED_FUNC_EXT_H_
......
/*!
* Copyright (c) 2016 by Contributors
* \file util.h
* \file nnvm/compiler/util.h
* \brief Utility functions for nnvm compiler
*/
#ifndef NNVM_COMPILER_UTIL_H_
......
/*!
* Copyright (c) 2016 by Contributors
* \file graph.h
* \file nnvm/graph.h
* \brief Configuation of nnvm as well as basic data structure.
*/
#ifndef NNVM_GRAPH_H_
......
/*!
* Copyright (c) 2016 by Contributors
* \file graph_attr_types.h
* \file nnvm/graph_attr_types.h
* \brief Data structures that can appear in graph attributes.
*/
#ifndef NNVM_GRAPH_ATTR_TYPES_H_
......
/*!
* Copyright (c) 2018 by Contributors
* \file layout.h
* \file nnvm/layout.h
* \brief Layout expression.
* The layout is composed of upper cases, lower cases and numbers,
* where upper case indicates a (super-)dimension and
......
/*!
* Copyright (c) 2016 by Contributors
* \file node.h
* \file nnvm/node.h
* \brief Graph node data structure.
*/
#ifndef NNVM_NODE_H_
......
/*!
* Copyright (c) 2016 by Contributors
* \file op.h
* \file nnvm/op.h
* \brief Operator information structor.
*/
#ifndef NNVM_OP_H_
......
/*!
* Copyright (c) 2016 by Contributors
* \file pass.h
* \file nnvm/pass.h
* \brief Pass that can be applied to a graph.
*/
#ifndef NNVM_PASS_H_
......
/*!
* Copyright (c) 2016 by Contributors
* \file pass_functions.h
* \file nnvm/pass_functions.h
* \brief Pass functions that simply redirect the calls to ApplyPass
*
* This file serves as documentation on how to use functions implemented in "src/pass".
......
/*!
* Copyright (c) 2016 by Contributors
* \file symbolic.h
* \file nnvm/symbolic.h
* \brief Symbolic graph construction API
*
* This API is optional, but useful to allow user
......
/*!
* Copyright (c) 2017 by Contributors
* \file nn.h
* \file nnvm/top/nn.h
* \brief Auxiliary param for tensor primitive.
*/
#ifndef NNVM_TOP_NN_H_
......
/*!
* Copyright (c) 2017 by Contributors
* \file tensor.h
* \file nnvm/top/tensor.h
* \brief Auxiliary param for tensor primitive.
*/
#ifndef NNVM_TOP_TENSOR_H_
......
/*!
* Copyright (c) 2016 by Contributors
* \file tuple.h
* \file nnvm/tuple.h
* \brief Data structure Tuple and TShape to store dynamic sized shapes.
*/
#ifndef NNVM_TUPLE_H_
......
......@@ -59,8 +59,8 @@ inline bool DotCorrectLayout(const NodeAttrs& attrs,
// concat lhs and rhs layout
const Layout& lhs_out = param.transpose_a ? lhs.reverse() : lhs;
const Layout& rhs_out = param.transpose_b ? rhs.reverse() : rhs;
Layout out = std::move(lhs_out.sublayout(0, lhs_out.ndim()-1) +
rhs_out.sublayout(1, rhs_out.ndim()-1));
Layout out = lhs_out.sublayout(0, lhs_out.ndim()-1) +
rhs_out.sublayout(1, rhs_out.ndim()-1);
NNVM_ASSIGN_LAYOUT(*olayouts, 0, out);
}
return true;
......
......@@ -28,6 +28,23 @@ RUN cd recommonmark; python setup.py install
# Enable doxygen for c++ doc build
RUN apt-get update && apt-get install -y doxygen graphviz libprotobuf-dev protobuf-compiler
COPY install/ubuntu_install_java.sh /install/ubuntu_install_java.sh
RUN bash /install/ubuntu_install_java.sh
COPY install/ubuntu_install_nodejs.sh /install/ubuntu_install_nodejs.sh
RUN bash /install/ubuntu_install_nodejs.sh
COPY install/ubuntu_install_rocm.sh /install/ubuntu_install_rocm.sh
RUN bash /install/ubuntu_install_rocm.sh
COPY install/ubuntu_install_opengl.sh /install/ubuntu_install_opengl.sh
RUN bash /install/ubuntu_install_opengl.sh
COPY install/ubuntu_install_vulkan.sh /install/ubuntu_install_vulkan.sh
RUN bash /install/ubuntu_install_vulkan.sh
# DL Frameworks
COPY install/ubuntu_install_mxnet.sh /install/ubuntu_install_mxnet.sh
RUN bash /install/ubuntu_install_mxnet.sh
......@@ -53,3 +70,10 @@ ENV CPLUS_INCLUDE_PATH=/usr/local/cuda/include:${CPLUS_INCLUDE_PATH}
ENV C_INCLUDE_PATH=/usr/local/cuda/include:${C_INCLUDE_PATH}
ENV LIBRARY_PATH=/usr/local/cuda/lib64:/usr/local/nvidia/lib64:${LIBRARY_PATH}
ENV LD_LIBRARY_PATH=/usr/local/cuda/lib64:/usr/local/nvidia/lib64:${LD_LIBRARY_PATH}
ENV LD_LIBRARY_PATH=/opt/rocm/lib:${LD_LIBRARY_PATH}
ENV PATH=/node_modules/.bin:${PATH}
ENV VULKAN_SDK=/usr/local/VulkanSDK/1.0.65.0/x86_64
ENV PATH=${PATH}:${VULKAN_SDK}/bin
ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${VULKAN_SDK}/lib
ENV VK_LAYER_PATH=${VULKAN_SDK}/etc/explicit_layer.d
#!/bin/bash
echo "Build TVM..."
cd tvm
cp make/config.mk .
echo USE_CUDNN=1 >> config.mk
echo USE_CUDA=1 >> config.mk
echo USE_OPENCL=1 >> config.mk
echo LLVM_CONFIG=llvm-config-4.0 >> config.mk
echo USE_RPC=1 >> config.mk
echo USE_BLAS=openblas >> config.mk
echo USE_GRAPH_RUNTIME=1 >> config.mk
make "$@"
cd ..
echo "Build NNVM..."
make "$@"
#!/bin/bash
export PYTHONPATH=python:tvm/python:tvm/topi/python
echo "Running unittest..."
python -m nose -v tests/python/unittest || exit -1
python3 -m nose -v tests/python/unittest || exit -1
echo "Running compiler test..."
python -m nose -v tests/python/compiler || exit -1
python3 -m nose -v tests/python/compiler || exit -1
Tutorials
=========
This page contains the tutorials about NNVM.
FROM nvidia/cuda:8.0-cudnn7-devel
# Base scripts
RUN apt-get update --fix-missing
COPY install/ubuntu_install_core.sh /install/ubuntu_install_core.sh
RUN bash /install/ubuntu_install_core.sh
......@@ -11,8 +12,8 @@ RUN bash /install/ubuntu_install_python.sh
COPY install/ubuntu_install_llvm.sh /install/ubuntu_install_llvm.sh
RUN bash /install/ubuntu_install_llvm.sh
COPY install/ubuntu_install_iverilog.sh /install/ubuntu_install_iverilog.sh
RUN bash /install/ubuntu_install_iverilog.sh
COPY install/ubuntu_install_opencl.sh /install/ubuntu_install_opencl.sh
RUN bash /install/ubuntu_install_opencl.sh
COPY install/ubuntu_install_python_package.sh /install/ubuntu_install_python_package.sh
RUN bash /install/ubuntu_install_python_package.sh
......@@ -24,42 +25,31 @@ RUN bash /install/ubuntu_install_sphinx.sh
RUN git clone https://github.com/rtfd/recommonmark
RUN cd recommonmark; python setup.py install
COPY install/ubuntu_install_java.sh /install/ubuntu_install_java.sh
RUN bash /install/ubuntu_install_java.sh
# Enable doxygen for c++ doc build
RUN apt-get update && apt-get install -y doxygen graphviz libprotobuf-dev protobuf-compiler
COPY install/ubuntu_install_nodejs.sh /install/ubuntu_install_nodejs.sh
RUN bash /install/ubuntu_install_nodejs.sh
# DL Frameworks
COPY install/ubuntu_install_mxnet.sh /install/ubuntu_install_mxnet.sh
RUN bash /install/ubuntu_install_mxnet.sh
COPY install/ubuntu_install_rocm.sh /install/ubuntu_install_rocm.sh
RUN bash /install/ubuntu_install_rocm.sh
COPY install/ubuntu_install_onnx.sh /install/ubuntu_install_onnx.sh
RUN bash /install/ubuntu_install_onnx.sh
COPY install/ubuntu_install_opengl.sh /install/ubuntu_install_opengl.sh
RUN bash /install/ubuntu_install_opengl.sh
COPY install/ubuntu_install_coreml.sh /install/ubuntu_install_coreml.sh
RUN bash /install/ubuntu_install_coreml.sh
COPY install/ubuntu_install_opencl.sh /install/ubuntu_install_opencl.sh
RUN bash /install/ubuntu_install_opencl.sh
COPY install/ubuntu_install_keras.sh /install/ubuntu_install_keras.sh
RUN bash /install/ubuntu_install_keras.sh
# Enable doxygen for c++ doc build
RUN apt-get update && apt-get install -y doxygen graphviz
# Install vulkan
COPY install/ubuntu_install_vulkan.sh /install/ubuntu_install_vulkan.sh
RUN bash /install/ubuntu_install_vulkan.sh
COPY install/ubuntu_install_darknet.sh /install/ubuntu_install_darknet.sh
RUN bash /install/ubuntu_install_darknet.sh
RUN pip install Pillow
# Environment variables
ENV PATH=/node_modules/.bin:${PATH}
ENV PATH=/usr/local/nvidia/bin:${PATH}
ENV PATH=/usr/local/cuda/bin:${PATH}
ENV CPLUS_INCLUDE_PATH=/usr/local/cuda/include:${CPLUS_INCLUDE_PATH}
ENV C_INCLUDE_PATH=/usr/local/cuda/include:${C_INCLUDE_PATH}
ENV LIBRARY_PATH=/usr/local/cuda/lib64:/usr/local/nvidia/lib64:${LIBRARY_PATH}
ENV LD_LIBRARY_PATH=/usr/local/cuda/lib64:/usr/local/nvidia/lib64:${LD_LIBRARY_PATH}
ENV LD_LIBRARY_PATH=/opt/rocm/lib:${LD_LIBRARY_PATH}
ENV PATH=/node_modules/.bin:${PATH}
ENV VULKAN_SDK=/usr/local/VulkanSDK/1.0.65.0/x86_64
ENV PATH=${PATH}:${VULKAN_SDK}/bin
ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${VULKAN_SDK}/lib
ENV VK_LAYER_PATH=${VULKAN_SDK}/etc/explicit_layer.d
#install the necessary dependancies, cffi, opencv
wget 'https://github.com/siju-samuel/darknet/blob/master/lib/libdarknet.so?raw=true' -O libdarknet.so
pip2 install opencv-python cffi
pip3 install opencv-python cffi
pip2 install keras tensorflow h5py
pip2 install mxnet
pip3 install mxnet
pip2 install onnx>=1.1.0
pip3 install onnx>=1.1.0
pip2 install http://download.pytorch.org/whl/cu75/torch-0.2.0.post3-cp27-cp27mu-manylinux1_x86_64.whl
pip2 install torchvision
pip3 install http://download.pytorch.org/whl/cu75/torch-0.2.0.post3-cp35-cp35m-manylinux1_x86_64.whl
pip3 install torchvision
#!/bin/bash
echo "Build TVM..."
make "$@"
cd nnvm
echo "Build NNVM..."
make "$@"
#!/bin/bash
echo "Cleanup data..."
cd tvm
cd nnvm
make clean
cd ..
make clean
#!/bin/bash
export PYTHONPATH=nnvm/python:python:topi/python
echo "Running unittest..."
python -m nose -v nnvm/tests/python/unittest || exit -1
python3 -m nose -v nnvm/tests/python/unittest || exit -1
echo "Running compiler test..."
python -m nose -v nnvm/tests/python/compiler || exit -1
python3 -m nose -v nnvm/tests/python/compiler || exit -1
echo "Running ONNX frontend test..."
python -m nose -v tests/python/frontend/onnx || exit -1
echo "Running MXNet frontend test..."
python -m nose -v tests/python/frontend/mxnet || exit -1
echo "Running Keras frontend test..."
python -m nose -v tests/python/frontend/keras || exit -1
/*!
* Copyright (c) 2017 by Contributors
* \brief NN op constructions
* \file nn.h
* \file topi/nn.h
*/
#ifndef TOPI_NN_H_
#define TOPI_NN_H_
......
Compile Deep Learning Models
----------------------------
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment