Unverified Commit 683ed4a3 by Thierry Moreau Committed by GitHub

[VTA] VTA hardware/software codebase re-org (#5037)

parent 14ba49c6
......@@ -81,7 +81,7 @@ jnilint:
python3 3rdparty/dmlc-core/scripts/lint.py tvm4j-jni cpp jvm/native/src
scalalint:
make -C vta/hardware/chisel lint
make -C vta/vta-hw/hardware/chisel lint
lint: cpplint pylint jnilint scalalint
......
......@@ -18,7 +18,7 @@
PROJROOT="$( cd "$( dirname '${BASH_SOURCE[0]}' )/../../" && pwd )"
# Derive target specified by vta_config.json
VTA_CONFIG=${PROJROOT}/vta/config/vta_config.py
VTA_CONFIG=${PROJROOT}/vta/vta-hw/config/vta_config.py
TARGET=$(python ${VTA_CONFIG} --target)
export PYTHONPATH=${PYTHONPATH}:${PROJROOT}/python:${PROJROOT}/vta/python
......
......@@ -18,14 +18,17 @@
# CMake Build rules for VTA
find_program(PYTHON NAMES python python3 python3.6)
# VTA sources directory
set(VTA_DIR ${CMAKE_CURRENT_SOURCE_DIR}/vta/vta-hw)
if(MSVC)
message(STATUS "VTA build is skipped in Windows..")
elseif(PYTHON)
set(VTA_CONFIG ${PYTHON} ${CMAKE_CURRENT_SOURCE_DIR}/vta/config/vta_config.py)
set(VTA_CONFIG ${PYTHON} ${VTA_DIR}/config/vta_config.py)
if(EXISTS ${CMAKE_CURRENT_BINARY_DIR}/vta_config.json)
message(STATUS "Use VTA config " ${CMAKE_CURRENT_BINARY_DIR}/vta_config.json)
set(VTA_CONFIG ${PYTHON} ${CMAKE_CURRENT_SOURCE_DIR}/vta/config/vta_config.py
set(VTA_CONFIG ${PYTHON} ${VTA_DIR}/config/vta_config.py
--use-cfg=${CMAKE_CURRENT_BINARY_DIR}/vta_config.json)
endif()
......@@ -40,18 +43,18 @@ elseif(PYTHON)
# Fast simulator driver build
if(USE_VTA_FSIM)
# Add fsim driver sources
file(GLOB FSIM_RUNTIME_SRCS vta/src/*.cc)
list(APPEND FSIM_RUNTIME_SRCS vta/src/sim/sim_driver.cc)
list(APPEND FSIM_RUNTIME_SRCS vta/src/vmem/virtual_memory.cc vta/src/vmem/virtual_memory.h)
list(APPEND FSIM_RUNTIME_SRCS vta/src/sim/sim_tlpp.cc)
file(GLOB FSIM_RUNTIME_SRCS ${VTA_DIR}/src/*.cc)
file(GLOB FSIM_RUNTIME_SRCS vta/runtime/*.cc)
list(APPEND FSIM_RUNTIME_SRCS ${VTA_DIR}/src/sim/sim_driver.cc)
list(APPEND FSIM_RUNTIME_SRCS ${VTA_DIR}/src/sim/sim_tlpp.cc)
list(APPEND FSIM_RUNTIME_SRCS ${VTA_DIR}/src/vmem/virtual_memory.cc)
# Target lib: vta_fsim
add_library(vta_fsim SHARED ${FSIM_RUNTIME_SRCS})
target_include_directories(vta_fsim PUBLIC vta/include)
target_include_directories(vta_fsim PUBLIC ${VTA_DIR}/include)
foreach(__def ${VTA_DEFINITIONS})
string(SUBSTRING ${__def} 3 -1 __strip_def)
target_compile_definitions(vta_fsim PUBLIC ${__strip_def})
endforeach()
include_directories("vta/include")
if(APPLE)
set_target_properties(vta_fsim PROPERTIES LINK_FLAGS "-undefined dynamic_lookup")
endif(APPLE)
......@@ -61,18 +64,18 @@ elseif(PYTHON)
# Cycle accurate simulator driver build
if(USE_VTA_TSIM)
# Add tsim driver sources
file(GLOB TSIM_RUNTIME_SRCS vta/src/*.cc)
list(APPEND TSIM_RUNTIME_SRCS vta/src/tsim/tsim_driver.cc)
list(APPEND TSIM_RUNTIME_SRCS vta/src/dpi/module.cc)
list(APPEND TSIM_RUNTIME_SRCS vta/src/vmem/virtual_memory.cc vta/src/vmem/virtual_memory.h)
file(GLOB TSIM_RUNTIME_SRCS ${VTA_DIR}/src/*.cc)
file(GLOB TSIM_RUNTIME_SRCS vta/runtime/*.cc)
list(APPEND TSIM_RUNTIME_SRCS ${VTA_DIR}/src/tsim/tsim_driver.cc)
list(APPEND TSIM_RUNTIME_SRCS ${VTA_DIR}/src/dpi/module.cc)
list(APPEND TSIM_RUNTIME_SRCS ${VTA_DIR}/src/vmem/virtual_memory.cc)
# Target lib: vta_tsim
add_library(vta_tsim SHARED ${TSIM_RUNTIME_SRCS})
target_include_directories(vta_tsim PUBLIC vta/include)
target_include_directories(vta_tsim PUBLIC ${VTA_DIR}/include)
foreach(__def ${VTA_DEFINITIONS})
string(SUBSTRING ${__def} 3 -1 __strip_def)
target_compile_definitions(vta_tsim PUBLIC ${__strip_def})
endforeach()
include_directories("vta/include")
if(APPLE)
set_target_properties(vta_tsim PROPERTIES LINK_FLAGS "-undefined dynamic_lookup")
endif(APPLE)
......@@ -80,15 +83,15 @@ elseif(PYTHON)
# VTA FPGA driver sources
if(USE_VTA_FPGA)
file(GLOB FPGA_RUNTIME_SRCS vta/src/*.cc)
file(GLOB FPGA_RUNTIME_SRCS ${VTA_HW_DIR}/src/*.cc)
# Rules for Zynq-class FPGAs with pynq OS support (see pynq.io)
if(${VTA_TARGET} STREQUAL "pynq" OR
${VTA_TARGET} STREQUAL "ultra96")
list(APPEND FPGA_RUNTIME_SRCS vta/src/pynq/pynq_driver.cc)
list(APPEND FPGA_RUNTIME_SRCS ${VTA_HW_DIR}/src/pynq/pynq_driver.cc)
# Rules for Pynq v2.4
find_library(__cma_lib NAMES cma PATH /usr/lib)
elseif(${VTA_TARGET} STREQUAL "de10nano") # DE10-Nano rules
file(GLOB FPGA_RUNTIME_SRCS vta/src/de10nano/*.cc vta/src/*.cc)
file(GLOB FPGA_RUNTIME_SRCS ${VTA_HW_DIR}/src/de10nano/*.cc ${VTA_HW_DIR}/src/*.cc)
endif()
# Target lib: vta
add_library(vta SHARED ${FPGA_RUNTIME_SRCS})
......@@ -102,7 +105,7 @@ elseif(PYTHON)
target_link_libraries(vta ${__cma_lib})
elseif(${VTA_TARGET} STREQUAL "de10nano") # DE10-Nano rules
#target_compile_definitions(vta PUBLIC VTA_MAX_XFER=2097152) # (1<<21)
target_include_directories(vta PUBLIC vta/src/de10nano)
target_include_directories(vta PUBLIC ${VTA_HW_DIR}/src/de10nano)
target_include_directories(vta PUBLIC 3rdparty)
target_include_directories(vta PUBLIC
"/usr/local/intelFPGA_lite/18.1/embedded/ds-5/sw/gcc/arm-linux-gnueabihf/include")
......
......@@ -770,7 +770,7 @@ WARN_LOGFILE =
# spaces.
# Note: If this tag is empty the current directory is searched.
INPUT = include/tvm topi/include/topi vta/include/vta
INPUT = include/tvm topi/include/topi vta/vta-hw/include/vta
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
......
......@@ -21,7 +21,7 @@ VTA Configuration
The VTA stack incorporates both a hardware accelerator stack and
a TVM based software stack.
VTA incorporates flexibility out of the box: by modifying the
``vta/config/vta_config.json`` high-level configuration file,
``vta/vta-hw/config/vta_config.json`` high-level configuration file,
the user can change the shape of the tensor intrinsic,
clock frequency, pipelining, data type width, and on-chip buffer sizes.
......
......@@ -53,17 +53,17 @@ HLS Hardware Source Organization
The VTA design is currently specified in Vivado HLS C++, which is only supported
by Xilinx toolchains.
The VTA hardware sources are contained under ``vta/hardware/xilinx/sources``:
The VTA hardware sources are contained under ``vta/vta-hw/hardware/xilinx/sources``:
- ``vta.cc`` contains the definitions for each VTA module, as well as a top
level behavioral model for the top-level VTA design.
- ``vta.h`` contains type definitions using Xilinx ``ap_int`` types, and
function prototypes declarations.
In addition preprocessor macros are defined under ``vta/include/vta/hw_spec.h``.
In addition preprocessor macros are defined under ``vta/vta-hw/include/vta/hw_spec.h``.
Much of these macro definitions are derived from the parameters listed in the
``vta/config/vta_config.json`` file.
The json file is processed by ``vta/config/vta_config.py`` to produce a string of
``vta/vta-hw/config/vta_config.json`` file.
The json file is processed by ``vta/vta-hw/config/vta_config.py`` to produce a string of
compile flags that define the preprocessor macros.
That string is used by the makefile in order to set those high-level
parameters in both the HLS hardware synthesis compiler, and the C++
......@@ -220,7 +220,7 @@ Microarchitectural Overview
---------------------------
We describe the modules that compose the VTA design.
The module definitions are contained in ``vta/hardware/xilinx/sources/vta.cc``.
The module definitions are contained in ``vta/vta-hw/hardware/xilinx/sources/vta.cc``.
Fetch Module
~~~~~~~~~~~~
......
......@@ -52,7 +52,7 @@ You are invited to try out our [VTA programming tutorials](https://docs.tvm.ai/v
### Advanced Configuration (optional)
VTA is a generic configurable deep learning accelerator.
The configuration is specified by `vta_config.json` under the TVM root folder.
The configuration is specified by `vta_config.json` under `vta/vta-hw/config`.
This file provides an architectural specification of the VTA accelerator to parameterize the TVM compiler stack and the VTA hardware stack.
The VTA configuration file also specifies the TVM compiler target.
......@@ -62,9 +62,9 @@ To do so,
```bash
cd <tvm root>
vim vta/config/vta_config.json
vim vta/vta-hw/config/vta_config.json
# edit vta_config.json
make vta
make
```
## VTA Pynq-Based Test Setup
......@@ -122,7 +122,7 @@ echo 'set(USE_VTA_FSIM OFF)' >> build/config.cmake
echo 'set(USE_VTA_TSIM OFF)' >> build/config.cmake
echo 'set(USE_VTA_FPGA ON)' >> build/config.cmake
# Copy pynq specific configuration
cp vta/config/pynq_sample.json vta/config/vta_config.json
cp vta/vta-hw/config/pynq_sample.json vta/vta-hw/config/vta_config.json
cd build
cmake ..
make runtime vta -j2
......@@ -156,7 +156,7 @@ In addition, you'll need to edit the `vta_config.json` file on the host to indic
```bash
# On the Host-side
cd <tvm root>
cp vta/config/pynq_sample.json vta/config/vta_config.json
cp vta/vta-hw/config/pynq_sample.json vta/vta-hw/config/vta_config.json
```
This time again, we will run the 2D convolution testbench.
......@@ -347,11 +347,11 @@ For this custom VTA bitstream compilation exercise, we'll change the frequency o
* Set the `HW_FREQ` field to `142`. The Pynq board supports 100, 142, 167 and 200MHz clocks. Note that the higher the frequency, the harder it will be to close timing. Increasing the frequency can lead to timing violation and thus faulty hardware execution.
* Set the `HW_CLK_TARGET` to `6`. This parameters refers to the target clock period in nano seconds for HLS - a lower clock period leads to more aggressive pipelining to achieve timing closure at higher frequencies. Technically a 142MHz clock would require a 7ns target, but we intentionally lower the clock target to 6ns to more aggressively pipeline our design.
Bitstream generation is driven by a top-level `Makefile` under `<tvm root>/vta/hardware/xilinx/`.
Bitstream generation is driven by a top-level `Makefile` under `<tvm root>/vta/vta-hw/hardware/xilinx/`.
If you just want to simulate the VTA design in software emulation to make sure that it is functional, enter:
```bash
cd <tvm root>/vta/hardware/xilinx
cd <tvm root>/vta/vta-hw/hardware/xilinx
make ip MODE=sim
```
......@@ -359,7 +359,7 @@ If you just want to generate the HLS-based VTA IP cores without launching the en
```bash
make ip
```
You'll be able to view the HLS synthesis reports under `<tvm root>/vta/build/hardware/xilinx/hls/` `<configuration>/<block>/solution0/syn/report/<block>_csynth.rpt`
You'll be able to view the HLS synthesis reports under `<tvm root>/vta/vta-hw/build/hardware/xilinx/hls/` `<configuration>/<block>/solution0/syn/report/<block>_csynth.rpt`
> Note: The `<configuration>` name is a string that summarizes the VTA configuration parameters listed in the `vta_config.json`. The `<block>` name refers to the specific module (or HLS function) that compose the high-level VTA pipeline.
Finally to run the full hardware compilation and generate the VTA bitstream, run:
......@@ -371,20 +371,20 @@ make
This process is lengthy, and can take around up to an hour to complete depending on your machine's specs.
We recommend setting the `VTA_HW_COMP_THREADS` variable in the Makefile to take full advantage of all the cores on your development machine.
Once the compilation completes, the generated bitstream can be found under `<tvm root>/vta/build/hardware/xilinx/vivado/<configuration>/export/vta.bit`.
Once the compilation completes, the generated bitstream can be found under `<tvm root>/vta/vta-hw/build/hardware/xilinx/vivado/<configuration>/export/vta.bit`.
### Chisel-based Custom VTA Bitstream Compilation for DE10-Nano
Similar to the HLS-based design, high-level hardware parameters in Chisel-based design are listed in the VTA configuration file [Configs.scala](https://github.com/apache/incubator-tvm/blob/master/vta/hardware/chisel/src/main/scala/core/Configs.scala), and they can be customized by the user.
Similar to the HLS-based design, high-level hardware parameters in Chisel-based design are listed in the VTA configuration file [Configs.scala](https://github.com/apache/incubator-tvm/blob/master/vta/vta-hw/hardware/chisel/src/main/scala/core/Configs.scala), and they can be customized by the user.
For Intel FPGA, bitstream generation is driven by a top-level `Makefile` under `<tvmroot>/vta/hardware/intel`.
For Intel FPGA, bitstream generation is driven by a top-level `Makefile` under `<tvmroot>/vta/vta-hw/hardware/intel`.
If you just want to generate the Chisel-based VTA IP core for the DE10-Nano board without compiling the design for the FPGA hardware, enter:
```bash
cd <tvmroot>/vta/hardware/intel
cd <tvmroot>/vta/vta-hw/hardware/intel
make ip
```
Then you'll be able to locate the generated verilog file at `<tvmroot>/vta/build/hardware/intel/chisel/<configuration>/VTA.DefaultDe10Config.v`.
Then you'll be able to locate the generated verilog file at `<tvmroot>/vta/vta-hw/build/hardware/intel/chisel/<configuration>/VTA.DefaultDe10Config.v`.
If you would like to run the full hardware compilation for the `de10nano` board:
```bash
......@@ -393,14 +393,14 @@ make
This process might be a bit lengthy, and might take up to half an hour to complete depending on the performance of your PC. The Quartus Prime software would automatically detect the number of cores available on your PC and try to utilize all of them to perform such process.
Once the compilation completes, the generated bistream can be found under `<tvmroot>/vta/build/hardware/intel/quartus/<configuration>/export/vta.rbf`. You can also open the Quartus project file (.qpf) available at `<tvmroot>/vta/build/hardware/intel/quartus/<configuration>/de10_nano_top.qpf` to look around the generated reports.
Once the compilation completes, the generated bistream can be found under `<tvmroot>vtay/vta-hw/build/hardware/intel/quartus/<configuration>/export/vta.rbf`. You can also open the Quartus project file (.qpf) available at `<tvmroot>/vta/vta-hw/build/hardware/intel/quartus/<configuration>/de10_nano_top.qpf` to look around the generated reports.
### Use the Custom Bitstream
We can program the new VTA FPGA bitstream by setting the bitstream path of the `vta.program_fpga()` function in the tutorial examples, or in the `test_program_rpc.py` script.
```python
vta.program_fpga(remote, bitstream="<tvm root>/vta/build/hardware/xilinx/vivado/<configuration>/export/vta.bit")
vta.program_fpga(remote, bitstream="<tvm root>/vta/vta-hw/build/hardware/xilinx/vivado/<configuration>/export/vta.bit")
```
Instead of downloading a pre-built bitstream from the VTA bitstream repository, TVM will instead use the new bitstream you just generated, which is a VTA design clocked at a higher frequency.
......
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
set -e
set -u
export PYTHONPATH=python:vta/python:topi/python
# cleanup pycache
find . -type f -path "*.pyc" | xargs rm -f
rm -rf ~/.tvm
# Rebuild cython
make cython3
# Reset default fsim simulation
cp vta/config/fsim_sample.json vta/config/vta_config.json
# Run unit tests in functional/fast simulator
echo "Running unittest in fsim..."
python3 -m pytest -v vta/tests/python/unittest
# Run unit tests in functional/fast simulator
echo "Running integration test in fsim..."
python3 -m pytest -v vta/tests/python/integration
......@@ -30,7 +30,7 @@ rm -rf ~/.tvm
make cython3
# Reset default fsim simulation
cp vta/config/fsim_sample.json vta/config/vta_config.json
cp vta/vta-hw/config/fsim_sample.json vta/vta-hw/config/vta_config.json
# Run unit tests in functional/fast simulator
echo "Running unittest in fsim..."
......
......@@ -30,16 +30,22 @@ rm -rf ~/.tvm
make cython3
# Set default VTA config to use TSIM cycle accurate sim
cp vta/config/tsim_sample.json vta/config/vta_config.json
cp vta/vta-hw/config/tsim_sample.json vta/vta-hw/config/vta_config.json
# Build and run the TSIM apps (disable until refactor is complete)
# echo "Test the TSIM apps..."
# make -C vta/vta-hw/apps/tsim_example/ run_verilog
# make -C vta/vta-hw/apps/tsim_example/ run_chisel
# make -C vta/vta-hw/apps/gemm/ default
# Check style of scala code
echo "Check style of scala code..."
make -C vta/hardware/chisel lint
make -C vta/vta-hw/hardware/chisel lint
# Build VTA chisel design and verilator simulator
echo "Building VTA chisel design..."
make -C vta/hardware/chisel cleanall
make -C vta/hardware/chisel USE_THREADS=0 lib
make -C vta/vta-hw/hardware/chisel cleanall
make -C vta/vta-hw/hardware/chisel USE_THREADS=0 lib
# Run unit tests in cycle accurate simulator
echo "Running unittest in tsim..."
......@@ -50,4 +56,4 @@ echo "Running integration test in tsim..."
python3 -m pytest -v vta/tests/python/integration
# Reset default fsim simulation
cp vta/config/fsim_sample.json vta/config/vta_config.json
cp vta/vta-hw/config/fsim_sample.json vta/vta-hw/config/vta_config.json
......@@ -312,7 +312,7 @@ def _init_env():
os.path.abspath(os.path.expanduser(__file__)))
proj_root = os.path.abspath(os.path.join(curr_path, "../../../"))
path_list = [
os.path.join(proj_root, "vta/config/vta_config.json")
os.path.join(proj_root, "vta/vta-hw/config/vta_config.json")
]
path_list = [p for p in path_list if os.path.exists(p)]
if not path_list:
......
......@@ -40,7 +40,7 @@ def _get_lib_name(lib_name):
def find_libvta(lib_vta, optional=False):
"""Find VTA library
"""Find VTA Chisel-based library
Returns
-------
......@@ -56,10 +56,8 @@ def find_libvta(lib_vta, optional=False):
Enable error check
"""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
lib_search = [curr_path]
lib_search += [os.path.join(curr_path, "..", "..", "build",)]
lib_search += [os.path.join(curr_path, "..", "..", "..", "build",)]
lib_search += [os.path.join(curr_path, "..", "..", "..", "build", "Release")]
lib_search = [os.path.join(curr_path, "..", "..", "..", "build",)]
lib_search += [os.path.join(curr_path, "..", "..", "vta-hw", "build")]
lib_name = _get_lib_name(lib_vta)
lib_path = [os.path.join(x, lib_name) for x in lib_search]
lib_found = [x for x in lib_path if os.path.exists(x)]
......
......@@ -66,21 +66,21 @@ class PkgConfig(object):
# Include path
self.include_path = [
"-I%s/include" % proj_root,
"-I%s/vta/include" % proj_root,
"-I%s/vta/vta-hw/include" % proj_root,
"-I%s/3rdparty/dlpack/include" % proj_root,
"-I%s/3rdparty/dmlc-core/include" % proj_root
]
# List of source files that can be used to build standalone library.
self.lib_source = []
self.lib_source += glob.glob("%s/vta/src/*.cc" % proj_root)
self.lib_source += glob.glob("%s/vta/vta-hw/src/*.cc" % proj_root)
if self.TARGET in ["pynq", "ultra96"]:
# add pynq drivers for any board that uses pynq driver stack (see pynq.io)
self.lib_source += glob.glob("%s/vta/src/pynq/*.cc" % (proj_root))
self.lib_source += glob.glob("%s/vta/vta-hw/src/pynq/*.cc" % (proj_root))
elif self.TARGET in ["de10nano"]:
self.lib_source += glob.glob("%s/vta/src/de10nano/*.cc" % (proj_root))
self.lib_source += glob.glob("%s/vta/vta-hw/src/de10nano/*.cc" % (proj_root))
self.include_path += [
"-I%s/vta/src/de10nano" % proj_root,
"-I%s/vta/vta-hw/src/de10nano" % proj_root,
"-I%s/3rdparty" % proj_root
]
......
......@@ -37,7 +37,7 @@ def _load_sw():
if env.TARGET == "tsim":
lib_hw = find_libvta("libvta_hw", optional=True)
assert lib_hw # make sure to build vta/hardware/chisel
assert lib_hw # make sure to build vta/vta-hw/hardware/chisel
try:
f = tvm.get_global_func("vta.tsim.init")
m = tvm.runtime.load_module(lib_hw[0], "vta-tsim")
......
......@@ -24,8 +24,8 @@
#include <tvm/runtime/registry.h>
#include <dmlc/thread_local.h>
#include <vta/runtime.h>
#include "runtime.h"
#include "../../src/runtime/workspace_pool.h"
......
......@@ -26,15 +26,17 @@
*/
#include <vta/driver.h>
#include <vta/hw_spec.h>
#include <vta/runtime.h>
#include <dmlc/logging.h>
#include <tvm/runtime/c_runtime_api.h>
#include <algorithm>
#include <cassert>
#include <cstring>
#include <vector>
#include <memory>
#include "runtime.h"
namespace vta {
// Avoid bad configurations.
......
......@@ -22,15 +22,15 @@
* \brief VTA runtime library.
*/
#ifndef VTA_RUNTIME_H_
#define VTA_RUNTIME_H_
#ifndef VTA_RUNTIME_RUNTIME_H_
#define VTA_RUNTIME_RUNTIME_H_
#ifdef __cplusplus
extern "C" {
#endif
#include <tvm/runtime/c_runtime_api.h>
#include "driver.h"
#include <vta/driver.h>
#define VTA_MEMCPY_H2D 1
#define VTA_MEMCPY_D2H 2
......@@ -291,4 +291,4 @@ TVM_DLL void VTASynchronize(VTACommandHandle cmd, uint32_t wait_cycles);
#ifdef __cplusplus
}
#endif
#endif // VTA_RUNTIME_H_
#endif // VTA_RUNTIME_RUNTIME_H_
......@@ -181,7 +181,7 @@ def compile_network(env, target, model, start_pack, stop_pack):
tracker_host = os.environ.get("TVM_TRACKER_HOST", '0.0.0.0')
tracker_port = int(os.environ.get("TVM_TRACKER_PORT", 9190))
# Load VTA parameters from the vta/config/vta_config.json file
# Load VTA parameters from the vta/vta-hw/config/vta_config.json file
env = vta.get_env()
# This target is used for cross compilation. You can query it by :code:`gcc -v` on your device.
......
......@@ -68,7 +68,7 @@ assert tvm.runtime.enabled("rpc")
# -------------------------------------
# Execute on CPU vs. VTA, and define the model.
# Load VTA parameters from the vta/config/vta_config.json file
# Load VTA parameters from the vta/vta-hw/config/vta_config.json file
env = vta.get_env()
# Set ``device=arm_cpu`` to run inference on the CPU
......
......@@ -111,7 +111,7 @@ names = [x.strip() for x in content]
# --------------------------------------
# Execute on CPU vs. VTA, and define the model.
# Load VTA parameters from the vta/config/vta_config.json file
# Load VTA parameters from the vta/vta-hw/config/vta_config.json file
env = vta.get_env()
# Set ``device=arm_cpu`` to run inference on the CPU
# or ``device=vta`` to run inference on the FPGA.
......
......@@ -43,7 +43,7 @@ from tvm import rpc
from tvm.contrib import util
from vta.testing import simulator
# Load VTA parameters from the vta/config/vta_config.json file
# Load VTA parameters from the vta/vta-hw/config/vta_config.json file
env = vta.get_env()
# We read the Pynq RPC host IP address and port number from the OS environment
......
......@@ -47,7 +47,7 @@ from tvm import rpc
from tvm.contrib import util
from vta.testing import simulator
# Load VTA parameters from the vta/config/vta_config.json file
# Load VTA parameters from the vta/vta-hw/config/vta_config.json file
env = vta.get_env()
# We read the Pynq RPC host IP address and port number from the OS environment
......
......@@ -46,7 +46,7 @@ from tvm import rpc
from tvm.contrib import util
from vta.testing import simulator
# Load VTA parameters from the vta/config/vta_config.json file
# Load VTA parameters from the vta/vta-hw/config/vta_config.json file
env = vta.get_env()
# We read the Pynq RPC host IP address and port number from the OS environment
......
......@@ -18,13 +18,13 @@
cmake_minimum_required(VERSION 3.2)
project(tsim C CXX)
set(TVM_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../../../)
set(VTA_DIR ${TVM_DIR}/vta)
set(TVM_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../../../../)
set(VTA_HW_DIR ${TVM_DIR}/3rdparty/vta-hw)
include_directories("${TVM_DIR}/include")
include_directories("${TVM_DIR}/3rdparty/dlpack/include")
include_directories("${TVM_DIR}/3rdparty/dmlc-core/include")
include_directories("${TVM_DIR}/vta/src/dpi")
include_directories("${VTA_HW_DIR}/src/dpi")
set(CMAKE_C_FLAGS "-O2 -Wall -fPIC -fvisibility=hidden")
set(CMAKE_CXX_FLAGS "-O2 -Wall -fPIC -fvisibility=hidden -std=c++11")
......@@ -35,11 +35,11 @@ if (CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND
endif()
file(GLOB TSIM_SW_SRC src/driver.cc)
list(APPEND TSIM_SW_SRC ${VTA_DIR}/src/vmem/virtual_memory.cc)
list(APPEND TSIM_SW_SRC ${VTA_DIR}/src/dpi/module.cc)
list(APPEND TSIM_SW_SRC ${VTA_HW_DIR}/src/vmem/virtual_memory.cc)
list(APPEND TSIM_SW_SRC ${VTA_HW_DIR}/src/dpi/module.cc)
add_library(sw SHARED ${TSIM_SW_SRC})
target_include_directories(sw PRIVATE ${VTA_DIR}/include ${VTA_DIR}/src)
target_include_directories(sw PRIVATE ${VTA_HW_DIR}/include ${VTA_HW_DIR}/src)
if(APPLE)
set_target_properties(sw PROPERTIES LINK_FLAGS "-undefined dynamic_lookup")
......
......@@ -15,13 +15,13 @@
# specific language governing permissions and limitations
# under the License.
export PYTHONPATH:=$(PWD)/python:$(PYTHONPATH)
export PYTHONPATH:=$(abspath .)/python:$(PYTHONPATH)
export PYTHONPATH:=$(abspath .)/../../../../python:$(PYTHONPATH)
BUILD_NAME = build
build_dir = $(abspath .)/$(BUILD_NAME)
default: chisel driver
python3 tests/python/chisel_accel.py serial
default: chisel driver serial parallel
serial:
python3 tests/python/chisel_accel.py serial
......
......@@ -17,7 +17,7 @@
VTA TSIM Application
======================
Prior to this application, please take a look at `<tvm-root>/vta/apps/tsim_example` for installation
Prior to this application, please take a look at `<tvm-root>/vta/vta-hw/apps/tsim_example` for installation
This is an application that performs Bit Serial Multiplication for GEMM utilizing TSIM.
**Bit Serial Multiplication for GEMM:**
......@@ -28,7 +28,7 @@ We approach this operation with slicing and shifting, like how basic multiplicat
We can sufficiently reduce the cycles required to perform a gemm given that the data bit width is small. This GEMM application uses TSIM for future accerlerator prototypes.
* Test Chisel3 backend with bit serial GEMM
* Go to `<tvm-root>/vta/apps/gemm`
* Go to `<tvm-root>/vta/vta-hw/apps/gemm`
* Run `make`
* If you have already compiled chisel backend (i.e. ran `make`)
......@@ -36,15 +36,15 @@ We can sufficiently reduce the cycles required to perform a gemm given that the
* Bit parallel test with another input set, run `make parallel`
* Some steps for creating your own custom TSIM application
* Go to `<tvm-root>/vta/apps/gemm`
* Go to `<tvm-root>/vta/vta-hw/apps/gemm`
* Create custom circuit within `./hardware/chisel/src/scala.main/accel/Compute.scala`
* Map the according Registers in `./hardware/chisel/src/scala.main/accel/RegFile.scala`
* Create your test script
* Map the registers in `./src/driver.cc` and link it with both `RegFile.scala` and the test script
* Understanding of `<tvm-root>/vta/apps/tsim_example`, which performs add by one to a vector, is highly encouraged to create a more complex application
* Understanding of `<tvm-root>/vta/vta-hw/apps/tsim_example`, which performs add by one to a vector, is highly encouraged to create a more complex application
* Some pointers
* Chisel3 tests in `<tvm-root>/vta/apps/gemm/tests/python`
* Chisel3 accelerator backend `<tvm-root>/vta/apps/gemm/hardware/chisel`
* Software C++ driver (backend) that handles the accelerator `<tvm-root>/vta/apps/gemm/src/driver.cc`
* Software Python driver (frontend) that handles the accelerator `<tvm-root>/vta/apps/gemm/python/accel`
* Chisel3 tests in `<tvm-root>/vta/vta-hw/apps/gemm/tests/python`
* Chisel3 accelerator backend `<tvm-root>/vta/vta-hw/apps/gemm/hardware/chisel`
* Software C++ driver (backend) that handles the accelerator `<tvm-root>/vta/vta-hw/apps/gemm/src/driver.cc`
* Software Python driver (frontend) that handles the accelerator `<tvm-root>vtay/vta-hw/apps/gemm/python/accel`
......@@ -38,7 +38,7 @@ USE_TRACE = 1
LIBNAME = libhw
vta_dir = $(abspath ../../../../)
tvm_dir = $(abspath ../../../../../)
tvm_dir = $(abspath ../../../../../../)
build_dir = $(abspath .)/$(BUILD_NAME)
verilator_build_dir = $(build_dir)/verilator
chisel_build_dir = $(build_dir)/chisel
......
......@@ -18,13 +18,13 @@
cmake_minimum_required(VERSION 3.2)
project(tsim C CXX)
set(TVM_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../../../)
set(VTA_DIR ${TVM_DIR}/vta)
set(TVM_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../../../../)
set(VTA_HW_DIR ${TVM_DIR}/vta/vta-hw)
include_directories("${TVM_DIR}/include")
include_directories("${TVM_DIR}/3rdparty/dlpack/include")
include_directories("${TVM_DIR}/3rdparty/dmlc-core/include")
include_directories("${TVM_DIR}/vta/src/dpi")
include_directories("${VTA_HW_DIR}/src/dpi")
set(CMAKE_C_FLAGS "-O2 -Wall -fPIC -fvisibility=hidden")
set(CMAKE_CXX_FLAGS "-O2 -Wall -fPIC -fvisibility=hidden -std=c++11")
......@@ -35,11 +35,11 @@ if (CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND
endif()
file(GLOB TSIM_SW_SRC src/driver.cc)
list(APPEND TSIM_SW_SRC ${VTA_DIR}/src/vmem/virtual_memory.cc)
list(APPEND TSIM_SW_SRC ${VTA_DIR}/src/dpi/module.cc)
list(APPEND TSIM_SW_SRC ${VTA_HW_DIR}/src/vmem/virtual_memory.cc)
list(APPEND TSIM_SW_SRC ${VTA_HW_DIR}/src/dpi/module.cc)
add_library(sw SHARED ${TSIM_SW_SRC})
target_include_directories(sw PRIVATE ${VTA_DIR}/include ${VTA_DIR}/src)
target_include_directories(sw PRIVATE ${VTA_HW_DIR}/include ${VTA_HW_DIR}/src)
if(APPLE)
set_target_properties(sw PROPERTIES LINK_FLAGS "-undefined dynamic_lookup")
......
......@@ -20,7 +20,9 @@ export PYTHONPATH:=$(PWD)/python:$(PYTHONPATH)
BUILD_NAME = build
build_dir = $(abspath .)/$(BUILD_NAME)
default: verilog driver
default: run_verilog
run_verilog: verilog driver
python3 tests/python/verilog_accel.py
run_chisel: chisel driver
......
......@@ -72,16 +72,16 @@ The default target language for these two implementations is Verilog. The follow
how to run both of them:
* Test Verilog backend
* Go to `<tvm-root>/vta/apps/tsim_example`
* Go to `<tvm-root>/vta/vta-hw/apps/tsim_example`
* Run `make`
* Test Chisel3 backend
* Go to `<tvm-root>/vta/apps/tsim_example`
* Go to `<tvm-root>/vta/vta-hw/apps/tsim_example`
* Run `make run_chisel`
* Some pointers
* Verilog and Chisel3 tests in `<tvm-root>/vta/apps/tsim_example/tests/python`
* Verilog accelerator backend `<tvm-root>/vta/apps/tsim_example/hardware/verilog`
* Chisel3 accelerator backend `<tvm-root>/vta/apps/tsim_example/hardware/chisel`
* Software C++ driver (backend) that handles the accelerator `<tvm-root>/vta/apps/tsim_example/src/driver.cc`
* Software Python driver (frontend) that handles the accelerator `<tvm-root>/vta/apps/tsim_example/python/accel`
* Verilog and Chisel3 tests in `<tvm-root>/vta/vta-hw/apps/tsim_example/tests/python`
* Verilog accelerator backend `<tvm-root>/vta/vta-hw/apps/tsim_example/hardware/verilog`
* Chisel3 accelerator backend `<tvm-root>/vta/vta-hw/apps/tsim_example/hardware/chisel`
* Software C++ driver (backend) that handles the accelerator `<tvm-root>/vta/vta-hw/apps/tsim_example/src/driver.cc`
* Software Python driver (frontend) that handles the accelerator `<tvm-root>vtay/vta-hw/apps/tsim_example/python/accel`
......@@ -38,7 +38,7 @@ USE_TRACE = 0
LIBNAME = libhw
vta_dir = $(abspath ../../../../)
tvm_dir = $(abspath ../../../../../)
tvm_dir = $(abspath ../../../../../../)
build_dir = $(abspath .)/$(BUILD_NAME)
verilator_build_dir = $(build_dir)/verilator
chisel_build_dir = $(build_dir)/chisel
......
......@@ -38,7 +38,7 @@ USE_TRACE = 0
LIBNAME = libhw
vta_dir = $(abspath ../../../../)
tvm_dir = $(abspath ../../../../../)
tvm_dir = $(abspath ../../../../../../)
build_dir = $(abspath .)/$(BUILD_NAME)
verilator_opt = --cc
......
......@@ -23,7 +23,7 @@ import argparse
def get_pkg_config(cfg):
"""Get the pkg config object."""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
proj_root = os.path.abspath(os.path.join(curr_path, "../../"))
proj_root = os.path.abspath(os.path.join(curr_path, "../../../"))
pkg_config_py = os.path.join(proj_root, "vta/python/vta/pkg_config.py")
libpkg = {"__file__": pkg_config_py}
exec(compile(open(pkg_config_py, "rb").read(), pkg_config_py, "exec"), libpkg, libpkg)
......@@ -107,9 +107,9 @@ def main():
curr_path = os.path.dirname(
os.path.abspath(os.path.expanduser(__file__)))
proj_root = os.path.abspath(os.path.join(curr_path, "../../"))
proj_root = os.path.abspath(os.path.join(curr_path, "../../../"))
path_list = [
os.path.join(proj_root, "vta/config/vta_config.json")
os.path.join(proj_root, "vta/vta-hw/config/vta_config.json")
]
if args.use_cfg:
path_list = [args.use_cfg]
......
......@@ -49,7 +49,7 @@ USE_TRACE_FST = 0
# This will significantly increase the trace size and should only be used
# on a per need basis for difficult debug problems.
USE_TRACE_DETAILED = 0
USE_THREADS = $(shell nproc)
USE_THREADS = 0
VTA_LIBNAME = libvta_hw
UNITTEST_NAME = all
CXX = g++
......@@ -65,7 +65,7 @@ CXX_HAS_ALIGN_NEW := $(shell [ $(CXX_MAJOR) -ge 7 ] && echo true)
config_test = $(TOP_TEST)$(CONFIG)
vta_dir = $(abspath ../../)
tvm_dir = $(abspath ../../../)
tvm_dir = $(abspath ../../../../)
verilator_build_dir = $(vta_dir)/$(BUILD_NAME)/verilator
chisel_build_dir = $(vta_dir)/$(BUILD_NAME)/chisel
......@@ -193,4 +193,5 @@ clean:
cleanall:
-rm -rf $(vta_dir)/$(BUILD_NAME)/chisel
-rm -rf $(vta_dir)/$(BUILD_NAME)/libvta_hw.so
-rm -rf $(vta_dir)/$(BUILD_NAME)/libvta_hw.dylib
-rm -rf $(vta_dir)/$(BUILD_NAME)/verilator
......@@ -62,7 +62,7 @@ resolvers ++= Seq(
val defaultVersions = Map(
"chisel3" -> "3.1.7",
"chisel-iotesters" -> "[1.2.5,1.3-SNAPSHOT["
"chisel-iotesters" -> "1.2.4"
)
libraryDependencies ++= Seq("chisel3","chisel-iotesters").map {
......
......@@ -17,4 +17,4 @@
* under the License.
*/
sbt.version = 1.1.1
sbt.version = 1.3.2
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment