Commit e531d022 by tqchen Committed by Tianqi Chen

[BUILD][DOCS] Migrate VTA CI, test, build, docs

parent bc410130
......@@ -8,11 +8,6 @@ include(cmake/util/FindVulkan.cmake)
include(cmake/util/FindLLVM.cmake)
include(cmake/util/FindROCM.cmake)
if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/build/private/local_config.cmake)
include(${CMAKE_CURRENT_SOURCE_DIR}/build/private/local_config.cmake)
endif()
if(EXISTS ${CMAKE_CURRENT_BINARY_DIR}/config.cmake)
include(${CMAKE_CURRENT_BINARY_DIR}/config.cmake)
else()
......@@ -40,6 +35,8 @@ tvm_option(USE_RTTI "Build with RTTI" ON)
tvm_option(USE_MSVC_MT "Build with MT" OFF)
tvm_option(INSTALL_DEV "Install compiler infrastructure" OFF)
tvm_option(USE_VTA_CFG "Use a specific json file for VTA runtime" "")
# Contrib library options
tvm_option(USE_BLAS "The blas library to be linked" none)
tvm_option(USE_MKL_PATH "MKL root path when use MKL blas" none)
......@@ -52,8 +49,9 @@ tvm_option(USE_NNPACK "Build with nnpack support" OFF)
tvm_option(USE_RANDOM "Build with random support" OFF)
# include directories
include_directories(BEFORE "nnvm/include")
include_directories("include")
include_directories("nnvm/include")
include_directories("dmlc-core/include")
include_directories("HalideIR/src")
include_directories("dlpack/include")
include_directories("topi/include")
......@@ -148,20 +146,8 @@ if(USE_GRAPH_RUNTIME)
endif(USE_GRAPH_RUNTIME_DEBUG)
endif(USE_GRAPH_RUNTIME)
if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/dmlc-core/CMakeLists.txt)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/dmlc-core/include)
if (INSTALL_DEV)
install(
DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/dmlc-core/include/." DESTINATION "include"
FILES_MATCHING
PATTERN "*.h"
)
endif()
elseif(DMLC_CORE_PATH)
include_directories(${DMLC_CORE_PATH}/include)
endif()
# Module rules
include(cmake/modules/VTA.cmake)
include(cmake/modules/CUDA.cmake)
include(cmake/modules/OpenCL.cmake)
include(cmake/modules/OpenGL.cmake)
......@@ -174,7 +160,6 @@ include(cmake/modules/contrib/Random.cmake)
include(cmake/modules/contrib/Sort.cmake)
include(cmake/modules/contrib/NNPack.cmake)
# Target rrules
add_library(tvm SHARED ${COMPILER_SRCS} ${RUNTIME_SRCS})
add_library(tvm_topi SHARED ${TOPI_SRCS})
add_library(tvm_runtime SHARED ${RUNTIME_SRCS})
......@@ -207,7 +192,6 @@ endif()
# Custom targets
add_custom_target(runtime DEPENDS tvm_runtime)
# Installation rulse
install(TARGETS tvm_runtime DESTINATION lib${LIB_SUFFIX})
if(WIN32)
......
......@@ -8,7 +8,7 @@ tvm_runtime = "build/libtvm_runtime.so, build/config.cmake"
tvm_lib = "build/libtvm.so, " + tvm_runtime
// LLVM upstream lib
tvm_multilib = "build/libtvm.so, " +
"build/libtvm_topi.so, build/libnnvm_compiler.so, " + tvm_runtime
"build/libvta.so, build/libtvm_topi.so, build/libnnvm_compiler.so, " + tvm_runtime
// command to start a docker container
docker_run = 'docker/build.sh'
......@@ -134,6 +134,7 @@ stage('Build') {
pack_lib('cpu', tvm_lib)
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} ci_cpu ./tests/scripts/task_cpp_unittest.sh"
sh "${docker_run} ci_cpu ./tests/scripts/task_python_vta.sh"
}
}
}
......@@ -179,6 +180,7 @@ stage('Unit Test') {
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} ci_i386 ./tests/scripts/task_python_unittest.sh"
sh "${docker_run} ci_i386 ./tests/scripts/task_python_integration.sh"
sh "${docker_run} ci_i386 ./tests/scripts/task_python_vta.sh"
}
}
}
......
ROOTDIR = $(CURDIR)
.PHONY: clean all test doc pylint cpplint lint\
cython cython2 cython3 web runtime
cython cython2 cython3 web runtime vta
ifndef DMLC_CORE_PATH
DMLC_CORE_PATH = $(ROOTDIR)/dmlc-core
......@@ -20,9 +20,11 @@ all:
@mkdir -p build && cd build && cmake .. && $(MAKE)
runtime:
@mkdir -p build && cd build && cmake .. && $(MAKE) runtime
vta:
@mkdir -p build && cd build && cmake .. && $(MAKE) vta
cpptest:
@mkdir -p build && cd build && cmake .. && $(MAKE) cpptest
......@@ -48,6 +50,7 @@ build/libtvm_web_runtime.js: build/libtvm_web_runtime.bc
# Lint scripts
cpplint:
python3 dmlc-core/scripts/lint.py vta cpp vta/include vta/src
python3 dmlc-core/scripts/lint.py topi cpp topi/include;
python3 dmlc-core/scripts/lint.py nnvm cpp nnvm/include nnvm/src;
python3 dmlc-core/scripts/lint.py tvm cpp include src verilog\
......@@ -57,6 +60,7 @@ pylint:
python3 -m pylint python/tvm --rcfile=$(ROOTDIR)/tests/lint/pylintrc
python3 -m pylint topi/python/topi --rcfile=$(ROOTDIR)/tests/lint/pylintrc
python3 -m pylint nnvm/python/nnvm --rcfile=$(ROOTDIR)/tests/lint/pylintrc
python3 -m pylint vta/python/vta --rcfile=$(ROOTDIR)/tests/lint/pylintrc
jnilint:
python3 dmlc-core/scripts/lint.py tvm4j-jni cpp jvm/native/src
......
#!/bin/bash
PROJROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../../" && pwd )"
export PYTHONPATH=${PYTHONPATH}:${PROJROOT}/python:${PROJROOT}/vta/python
python -m vta.exec.rpc_server
# CMake Build rules for VTA
find_program(PYTHON python)
if(MSVC)
message(STATUS "VTA build is skipped in Windows..")
elseif(PYTHON)
set(VTA_CONFIG ${PYTHON} ${CMAKE_CURRENT_SOURCE_DIR}/vta/config/vta_config.py)
if(EXISTS ${CMAKE_CURRENT_BINARY_DIR}/vta_config.json)
message(STATUS "Use VTA config " ${CMAKE_CURRENT_BINARY_DIR}/vta_config.json)
set(VTA_CONFIG ${PYTHON} ${CMAKE_CURRENT_SOURCE_DIR}/vta/config/vta_config.py
--use-cfg=${CMAKE_CURRENT_BINARY_DIR}/vta_config.json)
endif()
execute_process(COMMAND ${VTA_CONFIG} --target OUTPUT_VARIABLE __vta_target)
string(STRIP ${__vta_target} VTA_TARGET)
message(STATUS "Build VTA runtime with target: " ${VTA_TARGET})
execute_process(COMMAND ${VTA_CONFIG} --defs OUTPUT_VARIABLE __vta_defs)
string(REGEX MATCHALL "(^| )-D[A-Za-z0-9_=.]*" VTA_DEFINITIONS "${__vta_defs}")
file(GLOB VTA_RUNTIME_SRCS vta/src/*.cc)
file(GLOB __vta_target_srcs vta/src/${VTA_TARGET}/*.cc)
list(APPEND VTA_RUNTIME_SRCS ${__vta_target_srcs})
add_library(vta SHARED ${VTA_RUNTIME_SRCS})
target_include_directories(vta PUBLIC vta/include)
foreach(__def ${VTA_DEFINITIONS})
string(SUBSTRING ${__def} 3 -1 __strip_def)
target_compile_definitions(vta PUBLIC ${__strip_def})
endforeach()
if(APPLE)
set_target_properties(vta PROPERTIES LINK_FLAGS "-undefined dynamic_lookup")
endif(APPLE)
# PYNQ rules
if(${VTA_TARGET} STREQUAL "pynq")
find_library(__sds_lib NAMES sds_lib PATHS /usr/lib)
find_library(__dma_lib NAMES dma PATHS
"/opt/python3.6/lib/python3.6/site-packages/pynq/drivers/"
"/opt/python3.6/lib/python3.6/site-packages/pynq/lib/")
target_link_libraries(vta ${__sds_lib} ${__dma_lib})
endif()
else()
message(STATUS "Cannot found python in env, VTA build is skipped..")
endif()
......@@ -753,7 +753,7 @@ WARN_LOGFILE =
# spaces.
# Note: If this tag is empty the current directory is searched.
INPUT = include/tvm topi/include/topi nnvm/include/nnvm
INPUT = include/tvm topi/include/topi nnvm/include/nnvm vta/include/vta
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
......
......@@ -46,6 +46,8 @@ help:
clean:
rm -rf $(BUILDDIR)/*
rm -rf gen_modules
rm -rf tutorials
rm -rf vta/tutorials
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
......
The documentation of tvm is generated with recommonmark and sphinx.
TVM Documentations
==================
This folder contains the source of TVM documents
- A hosted version of doc is at http://docs.tvm.ai
- pip install sphinx>=1.5.5 sphinx-gallery sphinx_rtd_theme matplotlib Image recommonmark
- pip install sphinx>=1.5.5 sphinx-gallery sphinx_rtd_theme matplotlib Image recommonmark Pillow
- Build tvm first in the root folder.
- To build locally, you need to enable USE_CUDA, USE_OPENCL, LLVM_CONFIG in config.mk and then type "make html" in this folder.
Only Execute Specified Tutorials
--------------------------------
The document build process will execute all the tutorials in the sphinx gallery.
This will cause failure in some cases when certain machines do not have necessary
environment. You can set ```TVM_TUTORIAL_EXEC_PATTERN``` to only execute
the path that matches the regular expression pattern.
For example, to only build tutorials under /vta/tutorials, run
```bash
TVM_TUTORIAL_EXEC_PATTERN=/vta/tutorials make html
```
To only build one specific file, do
```bash
# The slash \ is used to get . in regular expression
TVM_TUTORIAL_EXEC_PATTERN=file_name\.py make html
```
......@@ -6,10 +6,6 @@ tvm.hybrid
tvm.hybrid.parse
tvm.hybrid.script
tvm.hybrid.popcount
tvm.hybrid.sigmoid
.. autofunction:: tvm.hybrid.parse
.. autofunction:: tvm.hybrid.script
.. autofunction:: tvm.hybrid.popcount
.. autofunction:: tvm.hybrid.sigmoid
......@@ -20,5 +20,6 @@ Python API
contrib
dev
topi
vta/index
nnvm/index
hybrid
Python API
==========
VTA API
=======
This document contains the python API to VTA compiler toolchain.
......
......@@ -26,6 +26,7 @@ curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../python/'))
sys.path.insert(0, os.path.join(curr_path, '../topi/python'))
sys.path.insert(0, os.path.join(curr_path, '../nnvm/python'))
sys.path.insert(0, os.path.join(curr_path, '../vta/python'))
# -- General configuration ------------------------------------------------
......@@ -184,16 +185,17 @@ intersphinx_mapping = {
from sphinx_gallery.sorting import ExplicitOrder
examples_dirs = ['../tutorials/']
gallery_dirs = ['tutorials']
examples_dirs = ["../tutorials/", "../vta/tutorials/"]
gallery_dirs = ["tutorials", "vta/tutorials"]
subsection_order = ExplicitOrder(
['../tutorials/language',
'../tutorials/optimize',
'../tutorials/vta',
'../tutorials/topi',
'../tutorials/deployment',
'../tutorials/nnvm'])
def generate_doxygen_xml(app):
"""Run the doxygen make commands if we're on the ReadTheDocs server"""
run_doxygen('..')
......@@ -220,7 +222,7 @@ sphinx_gallery_conf = {
'examples_dirs': examples_dirs,
'gallery_dirs': gallery_dirs,
'subsection_order': subsection_order,
'filename_pattern': os.environ.get("TVM_TUTORIAL_EXEC_PATTERN", ".py"),
'find_mayavi_figures': False,
'filename_pattern': '.py',
'expected_failing_examples': []
}
......@@ -8,6 +8,7 @@ Get Started
install/index
tutorials/index
vta/index
deploy/index
contribute/index
faq
......
Installation
============
To install TVM, please read :ref:`install-from-source`.
If you are interested in deploying to mobile/embedded devices,
you do not need to install the entire tvm stack on your device,
......
tutorials
\ No newline at end of file
VTA Hardware Design Overview
============================
VTA: Deep Learning Accelerator Stack
====================================
Specialized accelerators are key enablers of future deep learning workloads. TVM stack targets specialized accelerators.
VTA(versatile tensor accelerator) is a generic, modular open-source deep learning accelerator.
This page contains links to all the resources related to VTA:
.. toctree::
:maxdepth: 1
install
tutorials/index
hardware
Features
--------
VTA have the following key features:
- Generic, modular open-source hardware
- Streamlined workflow to deploy to FPGAs.
- Simulator support to protoype compilation passes on regular workstations.
- Driver and JIT runtime for both simulated and FPGA hardware backend.
- End to end TVM stack integration
#!/bin/bash
export PYTHONPATH=python:nnvm/python:vta/python:topi/python
echo "Running unittest..."
python -m nose -v vta/tests/python/unittest || exit -1
python3 -m nose -v vta/tests/python/unittest || exit -1
echo "Running integration test..."
python -m nose -v vta/tests/python/integration || exit -1
python3 -m nose -v vta/tests/python/integration || exit -1
Contributing to VTA
===================
VTA is part of TVM software/hardware stack.
We adopts Apache style committer model.
The package is developed and used by the community.
We actively seek committers that come from community contributors who:
- Made substantial contributions to the project.
- All forms of contributions are valued (see detail in next section).
- Willing to spend time on maintaining and lead the project.
Contributions
-------------
We value all forms of contributions, here is a non-comprehensive
list of contributions that are welcomed
- Documentation and usage examples
- Hardware implementations of the design.
- Community participation, answering questions and issues.
- Code readability and developer guide
- We welcome contributions that add code comments
to improve readability
- We also welcome contributions to docs to explain the
design choices of the internal.
- Test cases to make the codebase more robust
- Tutorials, blog posts, talks that promote the project.
How to Contribute
-----------------
See [Contributor guide](docs/how_to/contribute.md) on how to contribute.
Committers
----------
Committers are people who have made substantial contribution to the project and granted write access to the project.
- [Thierry Moreau](http://homes.cs.washington.edu/~moreau/), University of Washington
- [Tianqi Chen](https://github.com/tqchen), University of Washington
#!groovy
// -*- mode: groovy -*-
// Jenkins pipeline
// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
// nnvm libraries
vta_lib = "lib/libvta.so, lib/libvta.so.json, config.json"
vta_lib += ", tvm/build/libtvm.so, tvm/build/libtvm_topi.so, tvm/build/libnnvm_compiler.so"
// command to start a docker container
docker_run = 'tests/ci_build/ci_build.sh'
// timeout in minutes
max_time = 60
// initialize source codes
def init_git() {
checkout scm
retry(5) {
timeout(time: 2, unit: 'MINUTES') {
sh 'git submodule update --init --recursive'
}
}
}
def init_git_win() {
checkout scm
retry(5) {
timeout(time: 2, unit: 'MINUTES') {
bat 'git submodule update --init --recursive'
}
}
}
stage("Sanity Check") {
timeout(time: max_time, unit: 'MINUTES') {
node('linux') {
ws('workspace/vta/sanity') {
init_git()
sh "${docker_run} lint ./tests/scripts/task_lint.sh"
}
}
}
}
// Run make. First try to do an incremental make from a previous workspace in hope to
// accelerate the compilation. If something wrong, clean the workspace and then
// build from scratch.
def make(docker_type, make_flag) {
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} ${docker_type} cp make/sim_sample.json config.json"
try {
sh "${docker_run} ${docker_type} ./tests/scripts/task_build.sh ${make_flag}"
} catch (exc) {
echo 'Incremental compilation failed. Fall back to build from scratch'
sh "${docker_run} ${docker_type} ./tests/scripts/task_clean.sh"
sh "${docker_run} ${docker_type} ./tests/scripts/task_build.sh ${make_flag}"
}
}
}
// pack libraries for later use
def pack_lib(name, libs) {
sh """
echo "Packing ${libs} into ${name}"
echo ${libs} | sed -e 's/,/ /g' | xargs md5sum
"""
stash includes: libs, name: name
}
// unpack libraries saved before
def unpack_lib(name, libs) {
unstash name
sh """
echo "Unpacked ${libs} from ${name}"
echo ${libs} | sed -e 's/,/ /g' | xargs md5sum
"""
}
stage('Build') {
timeout(time: max_time, unit: 'MINUTES') {
node('linux') {
ws('workspace/vta/build') {
init_git()
make('cpu', '-j2')
pack_lib('cpu', vta_lib)
}
}
}
}
stage('Tests') {
parallel 'python': {
node('linux') {
ws('workspace/vta/it-python') {
init_git()
unpack_lib('cpu', vta_lib)
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} cpu ./tests/scripts/task_python_test.sh"
}
}
}
},
'docs': {
node('linux') {
ws('workspace/vta/docs-python') {
init_git()
unpack_lib('cpu', vta_lib)
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} cpu ./tests/scripts/task_python_docs.sh"
}
pack_lib('mydocs', 'docs.tgz')
}
}
}
}
stage('Deploy') {
node('docker' && 'doc') {
ws('workspace/vta/deploy-docs') {
if (env.BRANCH_NAME == "master") {
unpack_lib('mydocs', 'docs.tgz')
sh "tar xf docs.tgz -C /var/vta-docs"
}
}
}
}
ROOTDIR = $(CURDIR)
export LDFLAGS = -pthread -lm
export CFLAGS = -std=c++11 -Wall -O2 -Iinclude -fPIC
VTA_CONFIG = python make/vta_config.py
CFLAGS += `${VTA_CONFIG} --cflags`
LDFLAGS += `${VTA_CONFIG} --ldflags`
VTA_TARGET := $(shell ${VTA_CONFIG} --target)
UNAME_S := $(shell uname -s)
ifeq ($(UNAME_S), Darwin)
SHARED_LIBRARY_SUFFIX := dylib
WHOLE_ARCH= -all_load
NO_WHOLE_ARCH= -noall_load
LDFLAGS += -undefined dynamic_lookup
else
SHARED_LIBRARY_SUFFIX := so
WHOLE_ARCH= --whole-archive
NO_WHOLE_ARCH= --no-whole-archive
endif
VTA_LIB_SRC = $(wildcard src/*.cc)
ifeq (${VTA_TARGET}, pynq)
VTA_LIB_SRC += $(wildcard src/pynq/*.cc)
endif
ifeq (${VTA_TARGET}, sim)
VTA_LIB_SRC += $(wildcard src/sim/*.cc)
endif
VTA_LIB_OBJ = $(patsubst src/%.cc, build/%.o, $(VTA_LIB_SRC))
all: lib/libvta.so lib/libvta.so.json
build/%.o: src/%.cc
@mkdir -p $(@D)
$(CXX) $(CFLAGS) -MM -MT build/$*.o $< >build/$*.d
$(CXX) -c $(CFLAGS) -c $< -o $@
lib/libvta.so.json: lib/libvta.so
@mkdir -p $(@D)
${VTA_CONFIG} --cfg-json > $@
lib/libvta.so: $(VTA_LIB_OBJ)
@mkdir -p $(@D)
$(CXX) $(CFLAGS) -shared -o $@ $(filter %.o, $^) $(LDFLAGS)
lint: pylint cpplint
cpplint:
python3 tvm/dmlc-core/scripts/lint.py vta cpp include src
pylint:
python3 -m pylint python/vta --rcfile=$(ROOTDIR)/tests/lint/pylintrc
doc:
doxygen docs/Doxyfile
clean:
$(RM) -rf build lib bin *~ */*~ */*/*~ */*/*/*~ */*.o */*/*.o */*/*/*.o
-include build/*.d
-include build/*/*.d
-include build/*/*/*.d
-include build/*/*/*/*.d
VTA Change Log
==============
This file records the changes in VTA stack in reverse chronological order.
## Initial version
- Vivado based hardware.
- Driver for PYNQ board.
- Runtime library.
- TVM compiler stack.
- Resnet-18 example.
VTA: Open, Modular, Deep Learning Accelerator Stack
===================================================
[![Build Status](http://mode-gpu.cs.washington.edu:8080/buildStatus/icon?job=uwsaml/vta/master)](http://mode-gpu.cs.washington.edu:8080/job/uwsaml/job/vta/job/master/)
[![GitHub license](http://dmlc.github.io/img/apache2.svg)](./LICENSE)
VTA(versatile tensor accelerator) is an open-source deep learning accelerator stack.
It is not just an open-source hardware, but is an end to end solution that includes
the entire software stack on top of VTA open-source hardware.
......@@ -18,16 +15,4 @@ The key features include:
- Customized and extendible TVM compiler backend.
- Flexible RPC support to ease the deployment, and program FPGAs with Python
VTA is part of our effort on [TVM Stack](http://www.tvmlang.org/).
VTA Installation
----------------
To get started with VTA, please follow the [Installation Guide](docs/how_to/install.md)
ResNet-18 Inference Example
---------------------------
To offload ResNet-18 inference, follow the [ResNet-18 Guide](examples/resnet18/pynq/README.md)
License
-------
© Contributors, 2018. Licensed under an [Apache-2.0](https://github.com/tmoreau89/vta/blob/master/LICENSE) license.
VTA is part of our effort on TVM Stack.
#!/bin/bash
export PYTHONPATH=${PYTHONPATH}:/home/xilinx/vta/tvm/python:/home/xilinx/vta/python
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/opt/python3.6/lib/python3.6/site-packages/pynq/drivers/
python -m vta.exec.rpc_server
# VTA Configuration
Each VTA runtime/hardware configuration is specified by config.json file.
You can copy the config.json to project root and modify the configuration
Each VTA runtime/hardware configuration is specified by vta_config.json file.
You can copy the vta_config.json to tvm project root and modify the configuration
before you type make.
The config is going to affect the behavior of python package as well as
the hardware runtime build.
\ No newline at end of file
the hardware runtime build.
......@@ -7,8 +7,8 @@ import argparse
def get_pkg_config(cfg):
"""Get the pkg config object."""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
proj_root = os.path.abspath(os.path.join(curr_path, "../"))
pkg_config_py = os.path.join(proj_root, "python/vta/pkg_config.py")
proj_root = os.path.abspath(os.path.join(curr_path, "../../"))
pkg_config_py = os.path.join(proj_root, "vta/python/vta/pkg_config.py")
libpkg = {"__file__": pkg_config_py}
exec(compile(open(pkg_config_py, "rb").read(), pkg_config_py, "exec"), libpkg, libpkg)
PkgConfig = libpkg["PkgConfig"]
......@@ -18,14 +18,22 @@ def get_pkg_config(cfg):
def main():
"""Main funciton"""
parser = argparse.ArgumentParser()
parser.add_argument("--use-cfg", type=str, default="",
help="path to the config json")
parser.add_argument("--cflags", action="store_true",
help="print the cflags")
parser.add_argument("--defs", action="store_true",
help="print the macro defs")
parser.add_argument("--sources", action="store_true",
help="print the source file paths")
parser.add_argument("--update", action="store_true",
help="Print out the json option.")
parser.add_argument("--ldflags", action="store_true",
help="print the cflags")
parser.add_argument("--cfg-json", action="store_true",
help="print all the config json")
parser.add_argument("--save-cfg-json", type=str, default="",
help="save config json to file")
parser.add_argument("--target", action="store_true",
help="print the target")
parser.add_argument("--cfg-str", action="store_true",
......@@ -66,11 +74,14 @@ def main():
curr_path = os.path.dirname(
os.path.abspath(os.path.expanduser(__file__)))
proj_root = os.path.abspath(os.path.join(curr_path, "../"))
proj_root = os.path.abspath(os.path.join(curr_path, "../../"))
path_list = [
os.path.join(proj_root, "config.json"),
os.path.join(proj_root, "make/config.json")
os.path.join(proj_root, "vta_config.json"),
os.path.join(proj_root, "build", "vta_config.json"),
os.path.join(proj_root, "vta/config/vta_config.json")
]
if args.use_cfg:
path_list = [args.use_cfg]
ok_path_list = [p for p in path_list if os.path.exists(p)]
if not ok_path_list:
raise RuntimeError("Cannot find config in %s" % str(path_list))
......@@ -82,6 +93,12 @@ def main():
if args.target:
print(pkg.target)
if args.defs:
print(" ".join(pkg.macro_defs))
if args.sources:
print(" ".join(pkg.lib_source))
if args.cflags:
cflags_str = " ".join(pkg.cflags)
if cfg["TARGET"] == "pynq":
......@@ -94,6 +111,10 @@ def main():
if args.cfg_json:
print(pkg.cfg_json)
if args.save_cfg_json:
with open(args.save_cfg_json, "w") as fo:
fo.write(pkg.cfg_json)
if args.cfg_str:
# Needs to match the BITSTREAM string in python/vta/environment.py
cfg_str = "{}x{}x{}_{}bx{}b_{}_{}_{}_{}_{}MHz_{}ns_v{}".format(
......
doxygen
modules
tutorials
_build
gen_modules
\ No newline at end of file
This source diff could not be displayed because it is too large. You can view the blob instead.
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = python3 -m sphinx
PAPER =
BUILDDIR = _build
# User-friendly check for sphinx-build
#ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
#$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively# you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
#endif
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " applehelp to make an Apple Help Book"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " xml to make Docutils-native XML files"
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
@echo " coverage to run coverage check of the documentation (if enabled)"
clean:
rm -rf $(BUILDDIR)/*
rm -rf gen_modules
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/rabit.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/rabit.qhc"
applehelp:
$(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
@echo
@echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
@echo "N.B. You won't be able to view it unless you put it in" \
"~/Library/Documentation/Help or install it in your application" \
"bundle."
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/rabit"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/rabit"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
latexpdfja:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through platex and dvipdfmx..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
coverage:
$(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
@echo "Testing of coverage in the sources finished, look at the " \
"results in $(BUILDDIR)/coverage/python.txt."
xml:
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
@echo
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
pseudoxml:
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
@echo
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
The documentation of vta is generated with recommonmark and sphinx.
- pip install sphinx>=1.5.5 sphinx-gallery sphinx_rtd_theme matplotlib Image recommonmark
- Type "make html" to generate the doc
- If we only want to build doxygen docs: at project root, type "make doc"
.rst-content .hidden-section {
display: none;
}
.rst-toc .hidden-section {
display: none;
}
nav .hidden-section {
display: inherit;
}
Links to API References
=======================
This page contains links to API references that are build with different doc build system.
* `C++ doyxgen API <doxygen/index.html>`_
# -*- coding: utf-8 -*-
#
# documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 23 19:40:08 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os, subprocess
import shlex
import recommonmark
import sphinx_gallery
from tvm.contrib import rpc, graph_runtime
from recommonmark.parser import CommonMarkParser
from recommonmark.transform import AutoStructify
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../python/'))
# -- General configuration ------------------------------------------------
# General information about the project.
project = u'vta'
author = u'%s developers' % project
copyright = u'2018, %s' % author
github_doc_root = 'https://github.com/uwsaml/vta/tree/master/docs/'
# add markdown parser
CommonMarkParser.github_doc_root = github_doc_root
source_parsers = {
'.md': CommonMarkParser
}
os.environ['VTA_BUILD_DOC'] = '1'
# Version information.
import vta
version = vta.__version__
release = vta.__version__
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
'sphinx_gallery.gen_gallery',
]
breathe_projects = {'vta' : 'doxygen/xml/'}
breathe_default_project = 'vta'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ['.rst', '.md']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# generate autosummary even if no references
autosummary_generate = True
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme is set by the make target
html_theme = os.environ.get('VTA_THEME', 'rtd')
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# only import rtd theme and set it if want to build docs locally
if not on_rtd and html_theme == 'rtd':
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, '%s.tex' % project, project,
author, 'manual'),
]
# hook for doxygen
def run_doxygen(folder):
"""Run the doxygen make command in the designated folder."""
try:
retcode = subprocess.call("cd %s; make doc" % folder, shell=True)
retcode = subprocess.call("rm -rf _build/html/doxygen", shell=True)
retcode = subprocess.call("mkdir -p _build/html", shell=True)
retcode = subprocess.call("cp -rf doxygen/html _build/html/doxygen", shell=True)
if retcode < 0:
sys.stderr.write("doxygen terminated by signal %s" % (-retcode))
except OSError as e:
sys.stderr.write("doxygen execution failed: %s" % e)
intersphinx_mapping = {
'python': ('https://docs.python.org/{.major}'.format(sys.version_info), None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('http://matplotlib.org/', None),
'tvm': ('https://docs.tvm.ai/', None),
}
from sphinx_gallery.sorting import ExplicitOrder
examples_dirs = ['../tutorials/']
gallery_dirs = ['tutorials']
subsection_order = ExplicitOrder([])
def generate_doxygen_xml(app):
"""Run the doxygen make commands if we're on the ReadTheDocs server"""
run_doxygen('..')
def setup(app):
# Add hook for building doxygen xml when needed
# no c++ API for now
app.connect("builder-inited", generate_doxygen_xml)
app.add_stylesheet('css/tvm_theme.css')
app.add_config_value('recommonmark_config', {
'url_resolver': lambda url: github_doc_root + url,
'auto_doc_ref': True
}, True)
app.add_transform(AutoStructify)
sphinx_gallery_conf = {
'backreferences_dir': 'gen_modules/backreferences',
'doc_module': ('vta', 'numpy'),
'reference_url': {
'vta': None,
'tvm': 'https://docs.tvm.ai',
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.9.1'},
'examples_dirs': examples_dirs,
'gallery_dirs': gallery_dirs,
'subsection_order': subsection_order,
'find_mayavi_figures': False,
'filename_pattern': '.py',
'expected_failing_examples': []
}
VTA Design and Developer Guide
==============================
Building an hardware stack for deep learning involves many
many systems-level design decisions.
In this part of documentation, we share the rationale for the specific choices made when designing VTA.
.. toctree::
:maxdepth: 2
runtime
# VTA Runtime System
TODO Document the hardware runtime system.
\ No newline at end of file
# Contribute to VTA
VTA has been developed by community members.
Everyone is more than welcome to contribute.
It is a way to make the project better and more accessible to more users.
VTA is part of TVM software/hardware stack,
you can improve the compiler performance by contributing to [TVM](https://github.com/dmlc/tvm)
- Please add your name to [CONTRIBUTORS.md](https://github.com/dmlc/vta/blob/master/CONTRIBUTORS.md)
- Please update [NEWS.md](https://github.com/dmlc/vta/blob/master/NEWS.md) to add note on your changes to the API or added a new document.
## Guidelines
* [Submit Pull Request](#submit-pull-request)
* [Git Workflow Howtos](#git-workflow-howtos)
- [How to resolve conflict with master](#how-to-resolve-conflict-with-master)
- [How to combine multiple commits into one](#how-to-combine-multiple-commits-into-one)
- [What is the consequence of force push](#what-is-the-consequence-of-force-push)
* [Document](#document)
* [Testcases](#testcases)
* [Core Library](#core-library)
* [Python Package](#python-package)
## Submit Pull Request
* Before submit, please rebase your code on the most recent version of master, you can do it by
```bash
git remote add upstream [url to vta repo]
git fetch upstream
git rebase upstream/master
```
* If you have multiple small commits,
it might be good to merge them together(use git rebase then squash) into more meaningful groups.
* Send the pull request!
- Fix the problems reported by automatic checks
- If you are contributing a new module or new function, add a test.
## Git Workflow Howtos
### How to resolve conflict with master
- First rebase to most recent master
```bash
# The first two steps can be skipped after you do it once.
git remote add upstream [url to vta repo]
git fetch upstream
git rebase upstream/master
```
- The git may show some conflicts it cannot merge, say ```conflicted.py```.
- Manually modify the file to resolve the conflict.
- After you resolved the conflict, mark it as resolved by
```bash
git add conflicted.py
```
- Then you can continue rebase by
```bash
git rebase --continue
```
- Finally push to your fork, you may need to force push here.
```bash
git push --force
```
### How to combine multiple commits into one
Sometimes we want to combine multiple commits, especially when later commits are only fixes to previous ones,
to create a PR with set of meaningful commits. You can do it by following steps.
- Before doing so, configure the default editor of git if you haven't done so before.
```bash
git config core.editor the-editor-you-like
```
- Assume we want to merge last 3 commits, type the following commands
```bash
git rebase -i HEAD~3
```
- It will pop up an text editor. Set the first commit as ```pick```, and change later ones to ```squash```.
- After you saved the file, it will pop up another text editor to ask you modify the combined commit message.
- Push the changes to your fork, you need to force push.
```bash
git push --force
```
### Reset to the most recent master
You can always use git reset to reset your version to the most recent master.
Note that all your ***local changes will get lost***.
So only do it when you do not have local changes or when your pull request just get merged.
```bash
git reset --hard [hash tag of master]
git push --force
```
### What is the consequence of force push
The previous two tips requires force push, this is because we altered the path of the commits.
It is fine to force push to your own fork, as long as the commits changed are only yours.
## Testcases
- All the testcases are in tests
## Core Library
- Follow Google C style for C++.
- We use doxygen to document all the interface code.
- You can reproduce the linter checks by typing ```make lint```
## Python Package
- Always add docstring to the new functions in numpydoc format.
- You can reproduce the linter checks by typing ```make lint```
VTA Documentation
=================
Welcome to VTA documentation.
Contents
--------
.. toctree::
:maxdepth: 1
self
how_to/install
tutorials/index
how_to/contribute
api/python/index
dev/index
api_links
quantize_graph.json
quantize_params.pkl
synset.txt
*.jpg
vta.bit
\ No newline at end of file
# Resnet-18 Example on Pynq-based VTA Design
Follow the first two parts of the [Installation Guide](../../../docs/how_to/install.md) to make sure that the VTA python libraries are installed, and that the RPC server is running on the Pynq FPGA dev board.
We recommend leaving the `config.json` to its default parameterization (of course you can change the target between "sim" and "pynq").
Simply run the example program. We rely on pickle to store parameters which now only works with python2.
```bash
python2 imagenet_predict.py
```
The script will first download the following files into `_data/` directory:
* `cat.jpg` which provides a test sample for the ImageNet classifier
* `quantize_graph.json` which describes the NNVM graph of the 8-bit ResNet-18
* `quantize_params.plk` which contains the network parameters
* `synset.txt` which contains the ImageNet categories
Next, it will run imagenet classification using the ResNet18 architecture on a VTA design that performs 8-bit integer inference, to perform classification on a cat image `cat.jpg`.
The script reports runtime measured on the Pynq board (in seconds), and the top-1 result category:
```
('x', (1, 3, 224, 224))
Build complete...
('TVM prediction top-1:', 281, 'tabby, tabby cat')
t-cost=0.41906
```
......@@ -25,7 +25,7 @@ NO_DSP = false
NO_ALU = false
# Process VTA JSON config
VTA_CONFIG = python $(CURDIR)/../../make/vta_config.py
VTA_CONFIG = python $(CURDIR)/../../config/vta_config.py
CFLAGS := $(shell ${VTA_CONFIG} --cflags)
VTA_TARGET := $(shell ${VTA_CONFIG} --target)
......
......@@ -297,11 +297,12 @@ def _init_env():
"""Iniitalize the default global env"""
curr_path = os.path.dirname(
os.path.abspath(os.path.expanduser(__file__)))
proj_root = os.path.abspath(os.path.join(curr_path, "../../"))
proj_root = os.path.abspath(os.path.join(curr_path, "../../../"))
path_list = [
os.path.join(curr_path, "config.json"),
os.path.join(proj_root, "config.json"),
os.path.join(proj_root, "make/config.json")
os.path.join(curr_path, "vta_config.json"),
os.path.join(proj_root, "build", "vta_config.json"),
os.path.join(proj_root, "vta_config.json"),
os.path.join(proj_root, "vta/config/vta_config.json")
]
path_list = [p for p in path_list if os.path.exists(p)]
if not path_list:
......
......@@ -11,23 +11,25 @@ import ctypes
import json
import tvm
from tvm._ffi.base import c_str
from tvm.contrib import rpc, cc
from tvm import rpc
from tvm.contrib import cc
from ..environment import get_env
from ..pkg_config import PkgConfig
from ..libinfo import find_libvta
@tvm.register_func("tvm.contrib.rpc.server.start", override=True)
@tvm.register_func("tvm.rpc.server.start", override=True)
def server_start():
"""VTA RPC server extension."""
# pylint: disable=unused-variable
curr_path = os.path.dirname(
os.path.abspath(os.path.expanduser(__file__)))
proj_root = os.path.abspath(os.path.join(curr_path, "../../.."))
dll_path = os.path.abspath(os.path.join(proj_root, "lib/libvta.so"))
cfg_path = os.path.abspath(os.path.join(proj_root, "lib/libvta.so.json"))
proj_root = os.path.abspath(os.path.join(curr_path, "../../../../"))
dll_path = find_libvta()[0]
cfg_path = os.path.abspath(os.path.join(proj_root, "build/vta_config.json"))
runtime_dll = []
_load_module = tvm.get_global_func("tvm.contrib.rpc.server.load_module")
_load_module = tvm.get_global_func("tvm.rpc.server.load_module")
def load_vta_dll():
"""Try to load vta dll"""
......@@ -36,7 +38,7 @@ def server_start():
logging.info("Loading VTA library: %s", dll_path)
return runtime_dll[0]
@tvm.register_func("tvm.contrib.rpc.server.load_module", override=True)
@tvm.register_func("tvm.rpc.server.load_module", override=True)
def load_module(file_name):
load_vta_dll()
return _load_module(file_name)
......@@ -48,11 +50,11 @@ def server_start():
@tvm.register_func("tvm.contrib.vta.init", override=True)
def program_fpga(file_name):
path = tvm.get_global_func("tvm.contrib.rpc.server.workpath")(file_name)
path = tvm.get_global_func("tvm.rpc.server.workpath")(file_name)
load_vta_dll().VTAProgram(c_str(path))
logging.info("Program FPGA with %s", file_name)
@tvm.register_func("tvm.contrib.rpc.server.shutdown", override=True)
@tvm.register_func("tvm.rpc.server.shutdown", override=True)
def server_shutdown():
if runtime_dll:
runtime_dll[0].VTARuntimeShutdown()
......
"""Library information."""
from __future__ import absolute_import
import sys
import os
def _get_lib_name():
if sys.platform.startswith('win32'):
return "vta.dll"
if sys.platform.startswith('darwin'):
return "libvta.dylib"
return "libvta.so"
def find_libvta(optional=False):
"""Find VTA library"""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
lib_search = [curr_path]
lib_search += [os.path.join(curr_path, "..", "..", "..", "build",)]
lib_search += [os.path.join(curr_path, "..", "..", "..", "build", "Release")]
lib_name = _get_lib_name()
lib_path = [os.path.join(x, lib_name) for x in lib_search]
lib_found = [x for x in lib_path if os.path.exists(x)]
if not lib_found and not optional:
raise RuntimeError("Cannot find libvta: candidates are: " % str(lib_path))
return lib_found
......@@ -41,14 +41,14 @@ class PkgConfig(object):
# include path
self.include_path = [
"-I%s/include" % proj_root,
"-I%s/tvm/include" % proj_root,
"-I%s/tvm/dlpack/include" % proj_root,
"-I%s/tvm/dmlc-core/include" % proj_root
"-I%s/vta/include" % proj_root,
"-I%s/dlpack/include" % proj_root,
"-I%s/dmlc-core/include" % proj_root
]
# List of source files that can be used to build standalone library.
self.lib_source = []
self.lib_source += glob.glob("%s/src/*.cc" % proj_root)
self.lib_source += glob.glob("%s/src/%s/*.cc" % (proj_root, cfg["TARGET"]))
self.lib_source += glob.glob("%s/vta/src/*.cc" % proj_root)
self.lib_source += glob.glob("%s/vta/src/%s/*.cc" % (proj_root, cfg["TARGET"]))
# macro keys
self.macro_defs = []
self.cfg_dict = {}
......
"""Utilities to start simulator."""
import os
import ctypes
import json
import tvm
from ..libinfo import find_libvta
def _load_lib():
"""Load local library, assuming they are simulator."""
# pylint: disable=unused-variable
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
dll_path = [
os.path.abspath(os.path.join(curr_path, "../../../lib/libvta.so")),
]
runtime_dll = []
if not all(os.path.exists(f) for f in dll_path):
lib_path = find_libvta(optional=True)
if not lib_path:
return []
try:
for fname in dll_path:
runtime_dll.append(ctypes.CDLL(fname, ctypes.RTLD_GLOBAL))
return runtime_dll
return [ctypes.CDLL(lib_path[0], ctypes.RTLD_GLOBAL)]
except OSError:
return []
......
......@@ -2,7 +2,7 @@
from __future__ import absolute_import as _abs
import os
from tvm.contrib import rpc
from tvm import rpc
from ..environment import get_env
from . import simulator
......
......@@ -8,6 +8,7 @@ import tvm
import topi
from nnvm.top import registry as reg, OpPattern
from nnvm.top import nn as _nn
from ..environment import get_env
......@@ -238,9 +239,9 @@ def is_packed_layout(layout):
"""Check if layout is packed layout"""
if layout == "NCHW":
return False
assert "n" in layout
assert "c" in layout
return True
if "n" in layout and "c" in layout:
return True
return False
@reg.register_alter_op_layout("conv2d", level=15)
def alter_conv2d_layout(*_):
......@@ -255,27 +256,18 @@ def compute_conv2d(attrs, inputs, out):
strides = attrs.get_int_tuple("strides")
dilation = attrs.get_int_tuple("dilation")
groups = attrs.get_int("groups")
channels = attrs.get_int("channels")
layout = attrs["layout"]
out_dtype = attrs['out_dtype']
assert dilation == (1, 1), "not support dilate now"
assert attrs.get_bool("use_bias") is False
if is_packed_layout(layout):
assert groups == 1
return packed_conv2d(inputs[0], inputs[1],
padding, strides, out_dtype=out_dtype)
if groups == 1:
out = topi.nn.conv2d(inputs[0], inputs[1], strides, padding, out_dtype=out_dtype)
elif groups == get_const_int(inputs[0].shape[1]) and groups == channels:
out = topi.nn.depthwise_conv2d_nchw(
inputs[0], inputs[1], strides, padding, out_dtype=out_dtype)
else:
raise ValueError("not support arbitrary group number for now")
return out
return _nn.compute_conv2d(attrs, inputs, out)
@reg.register_schedule("conv2d", level=15)
def schedule_quantized_conv2d(attrs, outs, target):
def schedule_conv2d(attrs, outs, target):
""" 2D convolution schedule.
"""
layout = attrs["layout"]
......@@ -288,8 +280,7 @@ def schedule_quantized_conv2d(attrs, outs, target):
return tvm.create_schedule([x.op for x in outs])
else:
raise RuntimeError("not support target %s" % target)
with tvm.target.create(target):
return topi.generic.schedule_conv2d_nchw(outs)
return _nn.schedule_conv2d(attrs, outs, target)
def _get_workload(data, pad_data, kernel, output):
......
......@@ -8,7 +8,7 @@
#include <dmlc/thread_local.h>
#include <vta/runtime.h>
#include "../tvm/src/runtime/workspace_pool.h"
#include "../../src/runtime/workspace_pool.h"
namespace tvm {
......
......@@ -155,8 +155,8 @@ class UopKernel {
le.dst_factor = dst_factor;
le.src_factor = src_factor;
le.wgt_factor = wgt_factor;
assert(seq_.size() == 0);
assert(loop_.size() < 2);
CHECK_EQ(seq_.size(), 0U);
CHECK_LT(loop_.size(), 2U);
loop_.push_back(le);
++loop_ptr_;
}
......@@ -196,13 +196,13 @@ class UopKernel {
if (mode_ == 0xFFFFFFFF) {
mode_ = mode;
} else {
assert(mode_ == mode);
CHECK(mode_ == mode);
}
// Set reset_out field if unset
if (reset_out_ == 0xFFFFFFFF) {
reset_out_ = reset_out;
} else {
assert(reset_out_ == reset_out);
CHECK(reset_out_ == reset_out);
}
// Check kernel op and imm/imm_val in ALU mode
if (mode == 1) {
......@@ -211,9 +211,9 @@ class UopKernel {
use_imm_ = use_imm;
imm_val_ = imm_val;
} else {
assert(opcode_ == opcode);
assert(use_imm_ == use_imm);
assert(imm_val_ == imm_val);
CHECK(opcode_ == opcode);
CHECK(use_imm_ == use_imm);
CHECK(imm_val_ == imm_val);
}
}
}
......@@ -244,7 +244,7 @@ class UopKernel {
void VerifyDep(uint32_t dst_index) {
size_t step = std::min(static_cast<size_t>(2U), seq_.size());
for (size_t i = seq_.size() - step; i < seq_.size(); ++i) {
assert(seq_[i].dst_idx != dst_index);
CHECK(seq_[i].dst_idx != dst_index);
}
}
// The uop buffer
......@@ -293,7 +293,7 @@ class BaseQueue {
elem_bytes_ = elem_bytes;
dram_buffer_ = static_cast<char*>(VTAMemAlloc(
max_bytes, coherent || always_cache_));
assert(dram_buffer_ != nullptr);
CHECK(dram_buffer_ != nullptr);
dram_phy_addr_ = VTAMemGetPhyAddr(dram_buffer_);
}
/*!
......@@ -363,9 +363,9 @@ class UopQueue : public BaseQueue {
size_t num_op = kernel->size();
if (dram_end_ + num_op > kMaxElems) {
fautosync();
assert(dram_end_ <= kMaxElems);
CHECK(dram_end_ <= kMaxElems);
}
assert(num_op <= kMaxNumUop);
CHECK(num_op <= kMaxNumUop);
uint32_t uop_begin = 0;
if (sram_end_ + num_op > kMaxNumUop) {
// Need to evict
......@@ -390,7 +390,7 @@ class UopQueue : public BaseQueue {
kernel->sram_begin_ = uop_begin;
kernel->sram_end_ = sram_end_;
CHECK(kernel->cached());
assert(uop_begin != sram_end_);
CHECK(uop_begin != sram_end_);
cache_.insert(cache_.begin() + cache_ptr_, kernel);
cache_.erase(cache_.begin() + evict_begin, cache_.begin() + cache_ptr_);
cache_ptr_ = evict_begin + 1;
......@@ -398,7 +398,7 @@ class UopQueue : public BaseQueue {
// Flush as weight load
void FlushUopLoad(VTAMemInsn* insn) {
if (sram_begin_ != sram_end_) {
assert((dram_end_ - dram_begin_) == (sram_end_ - sram_begin_));
CHECK((dram_end_ - dram_begin_) == (sram_end_ - sram_begin_));
insn->memory_type = VTA_MEM_ID_UOP;
insn->sram_base = sram_begin_;
insn->dram_base = dram_phy_addr_ / kElemBytes + dram_begin_;
......@@ -433,12 +433,12 @@ class UopKernelMap {
UopKernel** Get(void* signature,
int nbytes) {
uint32_t key = 0;
assert(nbytes == 0 || nbytes == sizeof(int));
CHECK(nbytes == 0 || nbytes == sizeof(int));
if (nbytes == sizeof(int)) {
memcpy(&key, signature, sizeof(int));
key = key + 1;
}
assert(key < 100);
CHECK_LT(key, 100);
if (kmap_.size() <= key) {
kmap_.resize(key + 1, nullptr);
}
......@@ -490,8 +490,8 @@ class InsnQueue : public BaseQueue {
pending_pop_next_[to] = 1;
}
// Impossible condition
assert(from != kLoadStage || to != kStoreStage);
assert(to != kLoadStage || to != kComputeStage);
CHECK(from != kLoadStage || to != kStoreStage);
CHECK(to != kLoadStage || to != kComputeStage);
}
// Insert dependency push of load
void DepPush(int from, int to) {
......@@ -636,15 +636,15 @@ class InsnQueue : public BaseQueue {
// Count status in queues
if (c.mem.opcode == VTA_OPCODE_LOAD || c.mem.opcode == VTA_OPCODE_STORE) {
if (c.mem.opcode == VTA_OPCODE_STORE) {
assert(c.mem.pop_next_dep == false);
assert(c.mem.push_next_dep == false);
CHECK(c.mem.pop_next_dep == false);
CHECK(c.mem.push_next_dep == false);
if (c.mem.pop_prev_dep) g2s_queue--;
if (c.mem.push_prev_dep) s2g_queue++;
} else if (c.mem.opcode == VTA_OPCODE_LOAD &&
(c.mem.memory_type == VTA_MEM_ID_INP ||
c.mem.memory_type == VTA_MEM_ID_WGT) ) {
assert(c.mem.pop_prev_dep == false);
assert(c.mem.push_prev_dep == false);
CHECK(c.mem.pop_prev_dep == false);
CHECK(c.mem.push_prev_dep == false);
if (c.mem.pop_next_dep) g2l_queue--;
if (c.mem.push_next_dep) l2g_queue++;
} else {
......@@ -742,15 +742,15 @@ class InsnQueue : public BaseQueue {
// Count status in queues
if (c.mem.opcode == VTA_OPCODE_LOAD || c.mem.opcode == VTA_OPCODE_STORE) {
if (c.mem.opcode == VTA_OPCODE_STORE) {
assert(c.mem.pop_next_dep == false);
assert(c.mem.push_next_dep == false);
CHECK(c.mem.pop_next_dep == false);
CHECK(c.mem.push_next_dep == false);
if (c.mem.pop_prev_dep) g2s_queue--;
if (c.mem.push_prev_dep) s2g_queue++;
} else if (c.mem.opcode == VTA_OPCODE_LOAD &&
(c.mem.memory_type == VTA_MEM_ID_INP ||
c.mem.memory_type == VTA_MEM_ID_WGT) ) {
assert(c.mem.pop_prev_dep == false);
assert(c.mem.push_prev_dep == false);
CHECK(c.mem.pop_prev_dep == false);
CHECK(c.mem.push_prev_dep == false);
if (c.mem.pop_next_dep) g2l_queue--;
if (c.mem.push_next_dep) l2g_queue++;
} else {
......@@ -776,7 +776,7 @@ class InsnQueue : public BaseQueue {
void CommitPendingPop(int stage) {
// Handle the LD<->compute queue
// NOTE: pop executes on target(stage)
assert(stage > 0 && stage < 4);
CHECK(stage > 0 && stage < 4);
if (pending_pop_prev_[stage] ||
pending_pop_next_[stage]) {
PushNoop(stage, false, false,
......@@ -806,7 +806,7 @@ class InsnQueue : public BaseQueue {
VTAGenericInsn* NextInsn() {
VTAGenericInsn* insn = data() + dram_end_;
++dram_end_;
assert(dram_end_ < kMaxElems);
CHECK(dram_end_ < kMaxElems);
return insn;
}
// Create a new instruction for a given stage
......@@ -840,10 +840,10 @@ class InsnQueue : public BaseQueue {
if (insn->opcode == VTA_OPCODE_STORE) {
// FIXME: Right now memory_type is a 2-bit field which means that
// VTA_MEM_ID_OUT will appear as 0. For now we'll refrain from
// checking the memory_type to avoid an assertion error...
// checking the memory_type to avoid an CHECKion error...
return kStoreStage;
}
assert(false);
LOG(FATAL) << "not reached";
return kNoneStage;
}
// Push no-op
......@@ -888,7 +888,7 @@ class CommandQueue {
uop_queue_.InitSpace();
insn_queue_.InitSpace();
device_ = VTADeviceAlloc();
assert(device_ != nullptr);
CHECK(device_ != nullptr);
printf("Initialize VTACommandHandle...\n");
}
......@@ -906,8 +906,7 @@ class CommandQueue {
case VTA_MEM_ID_OUT: return VTA_INP_ELEM_BYTES;
default: break;
}
printf("Memory id not recognized: %d\n", memory_id);
assert(false);
LOG(FATAL) << "Memory id not recognized:" << memory_id;
return 0;
}
......@@ -999,7 +998,7 @@ class CommandQueue {
// NOTE: FINISH cannot contain pop
VTAGemInsn* insn = insn_queue_.CreateGemInsn();
insn->opcode = VTA_OPCODE_FINISH;
assert(!insn_queue_.PendingPop());
CHECK(!insn_queue_.PendingPop());
// Check if there are no instruction to execute at all
if (insn_queue_.count() == 0) return;
// Synchronization for the queues
......@@ -1010,17 +1009,17 @@ class CommandQueue {
insn_queue_.DumpInsn();
}
// Make sure that the last instruction is a finish instruction
assert(reinterpret_cast<VTAMemInsn*>(
CHECK(reinterpret_cast<VTAMemInsn*>(
insn_queue_.data())[insn_queue_.count()-1].opcode == VTA_OPCODE_FINISH);
// Make sure that we don't exceed contiguous physical memory limits
assert(insn_queue_.count() * sizeof(VTAGenericInsn) < VTA_MAX_XFER);
CHECK(insn_queue_.count() * sizeof(VTAGenericInsn) < VTA_MAX_XFER);
int timeout = VTADeviceRun(
device_,
insn_queue_.dram_phy_addr(),
insn_queue_.count(),
wait_cycles);
assert(timeout == 0);
CHECK_EQ(timeout, 0);
// Reset buffers
uop_queue_.Reset();
insn_queue_.Reset();
......@@ -1028,7 +1027,7 @@ class CommandQueue {
// Get record kernel
UopKernel* record_kernel() const {
assert(record_kernel_ != nullptr);
CHECK(record_kernel_ != nullptr);
return record_kernel_;
}
......@@ -1048,7 +1047,7 @@ class CommandQueue {
UopKernel** kptr = uptr[0]->Get(signature, nbytes);
if (kptr[0] == nullptr) {
record_kernel_ = new UopKernel(static_cast<char*>(signature), nbytes);
assert((*finit)(signature) == 0);
CHECK_EQ((*finit)(signature), 0);
kptr[0] = static_cast<UopKernel*>(record_kernel_);
if (debug_flag_ & VTA_DEBUG_DUMP_UOP) {
record_kernel_->Dump();
......@@ -1070,7 +1069,7 @@ class CommandQueue {
UopKernel** kptr = uptr[0]->Get(signature, nbytes);
if (kptr[0] == nullptr) {
record_kernel_ = new UopKernel(static_cast<char*>(signature), nbytes);
assert((*finit)(signature) == 0);
CHECK_EQ((*finit)(signature), 0);
kptr[0] = static_cast<UopKernel*>(record_kernel_);
if (debug_flag_ & VTA_DEBUG_DUMP_UOP) {
record_kernel_->Dump();
......
# For CPU
FROM ubuntu:16.04
RUN apt-get update --fix-missing
COPY install/ubuntu_install_core.sh /install/ubuntu_install_core.sh
RUN bash /install/ubuntu_install_core.sh
COPY install/ubuntu_install_python.sh /install/ubuntu_install_python.sh
RUN bash /install/ubuntu_install_python.sh
COPY install/ubuntu_install_python_package.sh /install/ubuntu_install_python_package.sh
RUN bash /install/ubuntu_install_python_package.sh
COPY install/ubuntu_install_llvm.sh /install/ubuntu_install_llvm.sh
RUN bash /install/ubuntu_install_llvm.sh
COPY install/ubuntu_install_sphinx.sh /install/ubuntu_install_sphinx.sh
RUN bash /install/ubuntu_install_sphinx.sh
# Enable doxygen for c++ doc build
RUN apt-get update && apt-get install -y doxygen graphviz
# Fix recommonmark to latest version
RUN git clone https://github.com/rtfd/recommonmark
RUN cd recommonmark; python3 setup.py install
# For lint test
FROM ubuntu:16.04
RUN apt-get update && apt-get install -y sudo wget
COPY install/ubuntu_install_python.sh /install/ubuntu_install_python.sh
RUN bash /install/ubuntu_install_python.sh
RUN apt-get install -y doxygen graphviz
RUN pip3 install cpplint pylint
# CI Build Scripts
This directory contains the files and setup instructions to run all tests.
## Run locally
To run locally, we need to first install
[docker](https://docs.docker.com/engine/installation/)
Then we can run the tasks defined in the [Jenkinsfile](../../Jenkinsfile) by
using (`ci_build.sh`)[./ci_build.sh]. For example
- lint the python codes
```bash
./ci_build.sh lint make pylint
```
- build codes with CUDA supports
```bash
./ci_build.sh gpu tests/scripts/task_build.sh
```
- do the python unittest
```bash
./ci_build.sh gpu tests/scripts/task_python_test.sh
```
- build the documents. The results will be available at `docs/_build/html`
```bash
tests/ci_build/ci_build.sh gpu tests/scripts/task_python_docs.sh
```
#!/usr/bin/env bash
#
# Execute command within a docker container
#
# Usage: ci_build.sh <CONTAINER_TYPE> [--dockerfile <DOCKERFILE_PATH>] [-it]
# <COMMAND>
#
# CONTAINER_TYPE: Type of the docker container used the run the build: e.g.,
# (cpu | gpu)
#
# DOCKERFILE_PATH: (Optional) Path to the Dockerfile used for docker build. If
# this optional value is not supplied (via the --dockerfile
# flag), will use Dockerfile.CONTAINER_TYPE in default
#
# COMMAND: Command to be executed in the docker container
#
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Get the command line arguments.
CONTAINER_TYPE=$( echo "$1" | tr '[:upper:]' '[:lower:]' )
shift 1
# Dockerfile to be used in docker build
DOCKERFILE_PATH="${SCRIPT_DIR}/Dockerfile.${CONTAINER_TYPE}"
DOCKER_CONTEXT_PATH="${SCRIPT_DIR}"
if [[ "$1" == "--dockerfile" ]]; then
DOCKERFILE_PATH="$2"
DOCKER_CONTEXT_PATH=$(dirname "${DOCKERFILE_PATH}")
echo "Using custom Dockerfile path: ${DOCKERFILE_PATH}"
echo "Using custom docker build context path: ${DOCKER_CONTEXT_PATH}"
shift 2
fi
if [[ "$1" == "-it" ]]; then
CI_DOCKER_EXTRA_PARAMS+=('-it')
shift 1
fi
if [[ ! -f "${DOCKERFILE_PATH}" ]]; then
echo "Invalid Dockerfile path: \"${DOCKERFILE_PATH}\""
exit 1
fi
COMMAND=("$@")
# Validate command line arguments.
if [ "$#" -lt 1 ] || [ ! -e "${SCRIPT_DIR}/Dockerfile.${CONTAINER_TYPE}" ]; then
supported_container_types=$( ls -1 ${SCRIPT_DIR}/Dockerfile.* | \
sed -n 's/.*Dockerfile\.\([^\/]*\)/\1/p' | tr '\n' ' ' )
echo "Usage: $(basename $0) CONTAINER_TYPE COMMAND"
echo " CONTAINER_TYPE can be one of [${supported_container_types}]"
echo " COMMAND is a command (with arguments) to run inside"
echo " the container."
exit 1
fi
# Use nvidia-docker if the container is GPU.
if [[ "${CONTAINER_TYPE}" == *"gpu"* ]]; then
DOCKER_BINARY="nvidia-docker"
else
DOCKER_BINARY="docker"
fi
# Helper function to traverse directories up until given file is found.
function upsearch () {
test / == "$PWD" && return || \
test -e "$1" && echo "$PWD" && return || \
cd .. && upsearch "$1"
}
# Set up WORKSPACE and BUILD_TAG. Jenkins will set them for you or we pick
# reasonable defaults if you run it outside of Jenkins.
WORKSPACE="${WORKSPACE:-${SCRIPT_DIR}/../../}"
BUILD_TAG="${BUILD_TAG:-nnvm-ci}"
# Determine the docker image name
DOCKER_IMG_NAME="${BUILD_TAG}.${CONTAINER_TYPE}"
# Under Jenkins matrix build, the build tag may contain characters such as
# commas (,) and equal signs (=), which are not valid inside docker image names.
DOCKER_IMG_NAME=$(echo "${DOCKER_IMG_NAME}" | sed -e 's/=/_/g' -e 's/,/-/g')
# Convert to all lower-case, as per requirement of Docker image names
DOCKER_IMG_NAME=$(echo "${DOCKER_IMG_NAME}" | tr '[:upper:]' '[:lower:]')
# Print arguments.
echo "WORKSPACE: ${WORKSPACE}"
echo "CI_DOCKER_EXTRA_PARAMS: ${CI_DOCKER_EXTRA_PARAMS[@]}"
echo "COMMAND: ${COMMAND[@]}"
echo "CONTAINER_TYPE: ${CONTAINER_TYPE}"
echo "BUILD_TAG: ${BUILD_TAG}"
echo "DOCKER CONTAINER NAME: ${DOCKER_IMG_NAME}"
echo ""
# Build the docker container.
echo "Building container (${DOCKER_IMG_NAME})..."
docker build -t ${DOCKER_IMG_NAME} \
-f "${DOCKERFILE_PATH}" "${DOCKER_CONTEXT_PATH}"
# Check docker build status
if [[ $? != "0" ]]; then
echo "ERROR: docker build failed."
exit 1
fi
# Run the command inside the container.
echo "Running '${COMMAND[@]}' inside ${DOCKER_IMG_NAME}..."
# By default we cleanup - remove the container once it finish running (--rm)
# and share the PID namespace (--pid=host) so the process inside does not have
# pid 1 and SIGKILL is propagated to the process inside (jenkins can kill it).
echo ${DOCKER_BINARY}
${DOCKER_BINARY} run --rm --pid=host \
-v ${WORKSPACE}:/workspace \
-w /workspace \
-e "CI_BUILD_HOME=/workspace" \
-e "CI_BUILD_USER=$(id -u -n)" \
-e "CI_BUILD_UID=$(id -u)" \
-e "CI_BUILD_GROUP=$(id -g -n)" \
-e "CI_BUILD_GID=$(id -g)" \
${CI_DOCKER_EXTRA_PARAMS[@]} \
${DOCKER_IMG_NAME} \
bash tests/ci_build/with_the_same_user \
${COMMAND[@]}
# install libraries for building c++ core on ubuntu
apt-get update && apt-get install -y --no-install-recommends --force-yes \
git make libgtest-dev cmake wget unzip libtinfo-dev libz-dev\
libcurl4-openssl-dev libopenblas-dev g++ sudo
cd /usr/src/gtest && cmake CMakeLists.txt && make && cp *.a /usr/lib
echo deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-4.0 main\
>> /etc/apt/sources.list.d/llvm.list
echo deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial-4.0 main\
>> /etc/apt/sources.list.d/llvm.list
echo deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-5.0 main\
>> /etc/apt/sources.list.d/llvm.list
echo deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial-5.0 main\
>> /etc/apt/sources.list.d/llvm.list
echo deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-6.0 main\
>> /etc/apt/sources.list.d/llvm.list
echo deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial-6.0 main\
>> /etc/apt/sources.list.d/llvm.list
echo deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial main\
>> /etc/apt/sources.list.d/llvm.list
echo deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial main\
>> /etc/apt/sources.list.d/llvm.list
wget -O - http://apt.llvm.org/llvm-snapshot.gpg.key|sudo apt-key add -
apt-get update && apt-get install -y --force-yes llvm-4.0 llvm-5.0 llvm-6.0 clang-6.0
# install python and pip, don't modify this, modify install_python_package.sh
apt-get update && apt-get install -y python-dev
# python 3.6
apt-get update && yes | apt-get install software-properties-common
add-apt-repository ppa:jonathonf/python-3.6 &&\
apt-get update && apt-get install -y python-pip python-dev python3.6 python3.6-dev
rm -f /usr/bin/python3 && ln -s /usr/bin/python3.6 /usr/bin/python3
# Install pip
cd /tmp && wget https://bootstrap.pypa.io/get-pip.py && python2 get-pip.py && python3.6 get-pip.py
# install libraries for python package on ubuntu
pip2 install nose pylint numpy nose-timer cython decorator scipy tornado
pip3 install nose pylint numpy nose-timer cython decorator scipy tornado typed_ast
pip3 install sphinx sphinx-gallery sphinx_rtd_theme matplotlib Image commonmark>=0.7.3 docutils>=0.11
#!/usr/bin/env bash
# This script is a wrapper creating the same user inside container as the one
# running the ci_build.sh outside the container. It also set the home directory
# for the user inside container to match the same absolute path as the workspace
# outside of container. Do not run this manually. It does not make sense. It is
# intended to be called by ci_build.sh only.
set -e
COMMAND=("$@")
if ! touch /this_is_writable_file_system; then
echo "You can't write to your filesystem!"
echo "If you are in Docker you should check you do not have too many images" \
"with too many files in them. Docker has some issue with it."
exit 1
else
rm /this_is_writable_file_system
fi
getent group "${CI_BUILD_GID}" || addgroup --gid "${CI_BUILD_GID}" "${CI_BUILD_GROUP}"
getent passwd "${CI_BUILD_UID}" || adduser --gid "${CI_BUILD_GID}" --uid "${CI_BUILD_UID}" \
--gecos "${CI_BUILD_USER} (generated by with_the_same_user script)" \
--disabled-password --home "${CI_BUILD_HOME}" --quiet "${CI_BUILD_USER}"
usermod -a -G sudo "${CI_BUILD_USER}"
echo "${CI_BUILD_USER} ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/90-nopasswd-sudo
HOME=${CI_BUILD_HOME}\
sudo -u "#${CI_BUILD_UID}" --preserve-env\
PATH=${PATH}\
LD_LIBRARY_PATH=${LD_LIBRARY_PATH}\
HOME=${CI_BUILD_HOME}\
${COMMAND[@]}
......@@ -262,5 +262,4 @@ def test_vta_conv2d():
if __name__ == "__main__":
test_cpu_conv2d()
exit(0)
test_vta_conv2d()
......@@ -5,7 +5,8 @@ import vta
import numpy as np
import topi
from collections import namedtuple
from tvm.contrib import rpc, util
from tvm import rpc
from tvm.contrib import util
import pandas as pd
host = os.environ.get("VTA_PYNQ_RPC_HOST", "pynq")
......
import os
import tvm
from tvm.contrib import rpc
from tvm import rpc
from vta import get_bitstream_path, download_bitstream, program_fpga, reconfig_runtime
host = os.environ.get("VTA_PYNQ_RPC_HOST", "pynq")
......
......@@ -2,7 +2,7 @@
import tvm
import numpy as np
import topi
from tvm.contrib import rpc, util
from tvm.contrib import util
import vta
import vta.testing
......
#!/bin/bash
echo "Build TVM..."
cd tvm
cp cmake/config.cmake .
echo set\(USE_LLVM llvm-config-5.0\) >> config.cmake
echo set\(USE_RPC ON\) >> config.cmake
echo set\(USE_BLAS openblas\) >> config.cmake
echo set\(USE_GRAPH_RUNTIME ON\) >> config.cmake
make "$@"
make cython
make cython3
cd ..
echo "Build VTA..."
make "$@"
#!/bin/bash
echo "Cleanup data..."
cd tvm
make clean
cd ..
make clean
#!/bin/bash
echo "Check codestyle of c++ code..."
make cpplint || exit -1
echo "Check codestyle of python code..."
make pylint || exit -1
echo "Check documentations of c++ code..."
make doc 2>log.txt
(cat log.txt| grep -v ENABLE_PREPROCESSING |grep -v "unsupported tag") > logclean.txt
echo "---------Error Log----------"
cat logclean.txt
echo "----------------------------"
(cat logclean.txt|grep warning) && exit -1
(cat logclean.txt|grep error) && exit -1
rm logclean.txt
rm log.txt
#!/bin/bash
cd tvm
make cython
make cython3
cd ../
mkdir -p docs/_build/html
# C++ doc
make doc
rm -rf python/vta/*.pyc python/vta/*/*.pyc
cd docs
PYTHONPATH=../python:../tvm/python:../tvm/topi/python:../tvm/nnvm/python make html || exit -1
cd _build/html
tar czf docs.tgz *
mv docs.tgz ../../../
#!/bin/bash
export PYTHONPATH=python:tvm/nnvm/python:tvm/python:tvm/topi/python
echo "Running unittest..."
python -m nose -v tests/python/unittest || exit -1
python3 -m nose -v tests/python/unittest || exit -1
echo "Running integration test..."
python -m nose -v tests/python/integration || exit -1
python3 -m nose -v tests/python/integration || exit -1
Tutorials
=========
This page contains the python tutorials about how to use TVM to program VTA.
VTA Tutorials
=============
......@@ -5,7 +5,7 @@
This tutorial provides an overview on how to use TVM to map a 2D convolution
workload efficiently on the VTA design.
We recommend covering the :ref:`mat-mult-opt` tutorial first.
We recommend covering the :ref:`vta-mat-mult-opt` tutorial first.
2D convolution is dominant in most computer vision deep neural networks.
In this tutorial, we will demonstrate TVM schedule optimizations to map
......@@ -26,7 +26,8 @@ import tvm
import vta
import numpy as np
from tvm.contrib import rpc, util
from tvm import rpc
from tvm.contrib import util
from vta.testing import simulator
# Load VTA parameters from the config.json file
......@@ -423,5 +424,3 @@ print("Successful 2D convolution test!")
# use of hardware specific optimizations, such as latency hiding with
# virtual threading.
#
......@@ -5,7 +5,7 @@ Simple Matrix Multiply
======================
**Author**: `Thierry Moreau <https://homes.cs.washington.edu/~moreau/>`_
In this tutorial, we will build on top of the :ref:`get-started` tutorial
In this tutorial, we will build on top of the :ref:`vta-get-started` tutorial
and introduce additional concepts required to implement matrix multiplication
on VTA with the TVM workflow.
"""
......@@ -22,7 +22,8 @@ import os
import tvm
import vta
import numpy as np
from tvm.contrib import rpc, util
from tvm import rpc
from tvm.contrib import util
from vta.testing import simulator
# Load VTA parameters from the config.json file
......@@ -107,7 +108,7 @@ elif env.TARGET == "sim":
# Tiling by a (2, 2) tile shape ensures that data within each tile is
# contiguous.
# The resulting tiled tensor has a shape of (2, 4, 2, 2).
#
#
# .. image:: https://raw.githubusercontent.com/uwsaml/web-data/master/vta/tutorial/data_tiling.png
# :align: center
# :width: 480px
......@@ -451,4 +452,3 @@ print("Successful matrix multiply test!")
# - Compiling the function to the VTA target.
# - Running the compiled module and verifying it against a numpy implementation.
#
"""
.. _mat-mult-opt:
.. _vta-mat-mult-opt:
Matrix Multiply Blocking
========================
......@@ -7,7 +7,7 @@ Matrix Multiply Blocking
This tutorial provides an overview on how to use TVM to map matrix
multiplication efficiently on the VTA design.
We recommend covering the :ref:`basic-mat-mult` tutorial first.
We recommend covering the :ref:`vta-basic-mat-mult` tutorial first.
In this tutorial, we will demonstrate TVM schedule optimizations to break large
neural network operators down onto smaller blocks to achieve computation within
......@@ -25,7 +25,8 @@ import os
import tvm
import vta
import numpy as np
from tvm.contrib import rpc, util
from tvm import rpc
from tvm.contrib import util
from vta.testing import simulator
# Load VTA parameters from the config.json file
......@@ -183,7 +184,7 @@ print(tvm.lower(s, [data, weight, res], simple_mode=True))
# :width: 480px
#
# .. note::
#
#
# The code after loop splitting and reordering is equivalent to the following
# pseudo-code. We ignore the batch axis since we are only performing single-batch
# inference in this example:
......@@ -359,5 +360,3 @@ print("Successful blocked matrix multiply test!")
# This allows us to map arbitrarily large computation onto limited
# hardware accelerator resources.
#
......@@ -29,7 +29,8 @@ import requests
import time
from nnvm.compiler import graph_attr
from tvm.contrib import graph_runtime, rpc, util
from tvm import rpc
from tvm.contrib import graph_runtime, util
from tvm.contrib.download import download
from vta.testing import simulator
......
"""
.. _get-started:
.. _vta-get-started:
Get Started with VTA
====================
......@@ -49,7 +49,8 @@ env = vta.get_env()
# the board with a VTA bitstream.
# We'll need the TVM RPC module and the VTA simulator module
from tvm.contrib import rpc, util
from tvm import rpc
from tvm.contrib import util
from vta.testing import simulator
# We read the Pynq RPC host IP address and port number from the OS environment
......@@ -384,4 +385,3 @@ print("Successful vector add test!")
# to learn more about the supported operations, schedule primitives
# and other features supported by TVM to program VTA.
#
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment