Unverified Commit dc5f70ad by Tianqi Chen Committed by GitHub

[CI][DOCKER] Add ONNX runtime dep (#4314)

* [DOCKER] Add ONNX runtime dep

* Improve ci script
parent dab7172a
......@@ -64,6 +64,8 @@ docker_run = 'docker/bash.sh'
// timeout in minutes
max_time = 120
workspace = 'workspace/exec_${EXECUTOR_NUMBER}'
// initialize source codes
def init_git() {
checkout scm
......@@ -86,7 +88,7 @@ def init_git_win() {
stage("Sanity Check") {
timeout(time: max_time, unit: 'MINUTES') {
node('CPU') {
ws('workspace/tvm/sanity') {
ws('${workspace}/tvm/sanity') {
init_git()
sh "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh"
}
......@@ -134,7 +136,7 @@ def unpack_lib(name, libs) {
stage('Build') {
parallel 'BUILD: GPU': {
node('GPUBUILD') {
ws('workspace/tvm/build-gpu') {
ws('${workspace}/tvm/build-gpu') {
init_git()
sh """
mkdir -p build
......@@ -182,7 +184,7 @@ stage('Build') {
},
'BUILD: CPU': {
node('CPU') {
ws('workspace/tvm/build-cpu') {
ws('${workspace}/tvm/build-cpu') {
init_git()
sh """
mkdir -p build
......@@ -214,7 +216,7 @@ stage('Build') {
},
'BUILD : i386': {
node('CPU') {
ws('workspace/tvm/build-i386') {
ws('${workspace}/tvm/build-i386') {
init_git()
sh """
mkdir -p build
......@@ -239,7 +241,7 @@ stage('Build') {
stage('Unit Test') {
parallel 'python3: GPU': {
node('TensorCore') {
ws('workspace/tvm/ut-python-gpu') {
ws('${workspace}/tvm/ut-python-gpu') {
init_git()
unpack_lib('gpu', tvm_multilib)
timeout(time: max_time, unit: 'MINUTES') {
......@@ -251,7 +253,7 @@ stage('Unit Test') {
},
'python3: i386': {
node('CPU') {
ws('workspace/tvm/ut-python-i386') {
ws('${workspace}/tvm/ut-python-i386') {
init_git()
unpack_lib('i386', tvm_multilib)
timeout(time: max_time, unit: 'MINUTES') {
......@@ -264,7 +266,7 @@ stage('Unit Test') {
},
'java: GPU': {
node('GPU') {
ws('workspace/tvm/ut-java') {
ws('${workspace}/tvm/ut-java') {
init_git()
unpack_lib('gpu', tvm_multilib)
timeout(time: max_time, unit: 'MINUTES') {
......@@ -278,7 +280,7 @@ stage('Unit Test') {
stage('Integration Test') {
parallel 'topi: GPU': {
node('GPU') {
ws('workspace/tvm/topi-python-gpu') {
ws('${workspace}/tvm/topi-python-gpu') {
init_git()
unpack_lib('gpu', tvm_multilib)
timeout(time: max_time, unit: 'MINUTES') {
......@@ -289,7 +291,7 @@ stage('Integration Test') {
},
'frontend: GPU': {
node('GPU') {
ws('workspace/tvm/frontend-python-gpu') {
ws('${workspace}/tvm/frontend-python-gpu') {
init_git()
unpack_lib('gpu', tvm_multilib)
timeout(time: max_time, unit: 'MINUTES') {
......@@ -300,7 +302,7 @@ stage('Integration Test') {
},
'legacy: GPU': {
node('GPU') {
ws('workspace/tvm/legacy-python-gpu') {
ws('${workspace}/tvm/legacy-python-gpu') {
init_git()
unpack_lib('gpu', tvm_multilib)
timeout(time: max_time, unit: 'MINUTES') {
......@@ -311,7 +313,7 @@ stage('Integration Test') {
},
'docs: GPU': {
node('GPU') {
ws('workspace/tvm/docs-python-gpu') {
ws('${workspace}/tvm/docs-python-gpu') {
init_git()
unpack_lib('gpu', tvm_multilib)
timeout(time: max_time, unit: 'MINUTES') {
......@@ -343,7 +345,7 @@ stage('Build packages') {
stage('Deploy') {
node('doc') {
ws('workspace/tvm/deploy-docs') {
ws('${workspace}/tvm/deploy-docs') {
if (env.BRANCH_NAME == "master") {
unpack_lib('mydocs', 'docs.tgz')
sh "tar xf docs.tgz -C /var/docs"
......
......@@ -22,6 +22,7 @@ set -o pipefail
# fix to certain version for now
pip3 install onnx==1.5.0
pip3 install onnxruntime==1.0.0
# torch depends on a number of other packages, but unhelpfully, does
# not expose that in the wheel!!!
......
......@@ -21,6 +21,7 @@ set -u
export PYTHONPATH=nnvm/python:python:topi/python
# to avoid openblas threading error
export TVM_BIND_THREADS=0
export OMP_NUM_THREADS=1
# Rebuild cython
......
......@@ -21,6 +21,8 @@ set -u
export PYTHONPATH=python:topi/python:apps/extension/python
export LD_LIBRARY_PATH="build:${LD_LIBRARY_PATH:-}"
export TVM_BIND_THREADS=0
export TVM_NUM_THREADS=2
rm -rf python/tvm/*.pyc python/tvm/*/*.pyc python/tvm/*/*/*.pyc
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment