Jenkinsfile 11.4 KB
Newer Older
1
#!groovy
2
// -*- mode: groovy -*-
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20

// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements.  See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.  The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.  You may obtain a copy of the License at
//
//   http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.  See the License for the
// specific language governing permissions and limitations
// under the License.

21 22 23
// Jenkins pipeline
// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/

24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
// Docker env used for testing
// Different image may have different version tag
// because some of them are more stable than anoter.
//
// Docker images are maintained by PMC, cached in dockerhub
// and remains relatively stable over the time.
// Flow for upgrading docker env(need commiter)
//
// - Send PR to upgrade build script in the repo
// - Build the new docker image
// - Tag the docker image with a new version and push to tvmai
// - Update the version in the Jenkinsfile, send a PR
// - Fix any issues wrt to the new image version in the PR
// - Merge the PR and now we are in new version
// - Tag the new version as the lates
// - Periodically cleanup the old versions on local workers
//
41 42 43 44 45 46

// Hashtag in the source to build current CI docker builds
//
// - ci-cpu:v0.54: e7c88a99f830de30814df14eaa980547ecbd61c1
//

47
ci_lint = "tvmai/ci-lint:v0.51"
48
ci_gpu = "tvmai/ci-gpu:v0.56"
49
ci_cpu = "tvmai/ci-cpu:v0.54"
50
ci_i386 = "tvmai/ci-i386:v0.52"
51

52
// tvm libraries
53 54
tvm_runtime = "build/libtvm_runtime.so, build/config.cmake"
tvm_lib = "build/libtvm.so, " + tvm_runtime
55
// LLVM upstream lib
56
tvm_multilib = "build/libtvm.so, " +
57 58 59 60
               "build/libvta_tsim.so, " +
               "build/libvta_fsim.so, " +
               "build/libtvm_topi.so, " +
               "build/libnnvm_compiler.so, " + tvm_runtime
61

62
// command to start a docker container
63
docker_run = 'docker/bash.sh'
64
// timeout in minutes
Tianqi Chen committed
65
max_time = 120
66

67
workspace = "workspace/exec_${env.EXECUTOR_NUMBER}"
68

69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
// initialize source codes
def init_git() {
  checkout scm
  retry(5) {
    timeout(time: 2, unit: 'MINUTES') {
      sh 'git submodule update --init'
    }
  }
}

def init_git_win() {
    checkout scm
    retry(5) {
        timeout(time: 2, unit: 'MINUTES') {
            bat 'git submodule update --init'
        }
    }
}

stage("Sanity Check") {
  timeout(time: max_time, unit: 'MINUTES') {
90
    node('CPU') {
91
      ws("${workspace}/tvm/sanity") {
92
        init_git()
93
        sh "${docker_run} ${ci_lint}  ./tests/scripts/task_lint.sh"
94 95 96 97 98 99 100 101
      }
    }
  }
}

// Run make. First try to do an incremental make from a previous workspace in hope to
// accelerate the compilation. If something wrong, clean the workspace and then
// build from scratch.
102
def make(docker_type, path, make_flag) {
103 104
  timeout(time: max_time, unit: 'MINUTES') {
    try {
105
      sh "${docker_run} ${docker_type} ./tests/scripts/task_build.sh ${path} ${make_flag}"
106 107
      // always run cpp test when build
      sh "${docker_run} ${docker_type} ./tests/scripts/task_cpp_unittest.sh"
108 109
    } catch (exc) {
      echo 'Incremental compilation failed. Fall back to build from scratch'
110 111
      sh "${docker_run} ${docker_type} ./tests/scripts/task_clean.sh ${path}"
      sh "${docker_run} ${docker_type} ./tests/scripts/task_build.sh ${path} ${make_flag}"
112
      sh "${docker_run} ${docker_type} ./tests/scripts/task_cpp_unittest.sh"
113 114 115 116 117
    }
  }
}

// pack libraries for later use
118
def pack_lib(name, libs) {
119 120 121 122 123 124 125 126 127
  sh """
     echo "Packing ${libs} into ${name}"
     echo ${libs} | sed -e 's/,/ /g' | xargs md5sum
     """
  stash includes: libs, name: name
}


// unpack libraries saved before
128
def unpack_lib(name, libs) {
129 130 131 132 133 134 135 136
  unstash name
  sh """
     echo "Unpacked ${libs} from ${name}"
     echo ${libs} | sed -e 's/,/ /g' | xargs md5sum
     """
}

stage('Build') {
137 138
  parallel 'BUILD: GPU': {
    node('GPUBUILD') {
139
      ws("${workspace}/tvm/build-gpu") {
140 141
        init_git()
        sh """
142 143 144
           mkdir -p build
           cd build
           cp ../cmake/config.cmake .
145
           echo set\\(USE_CUBLAS ON\\) >> config.cmake
146 147 148
           echo set\\(USE_CUDNN ON\\) >> config.cmake
           echo set\\(USE_CUDA ON\\) >> config.cmake
           echo set\\(USE_OPENGL ON\\) >> config.cmake
149
           echo set\\(USE_MICRO ON\\) >> config.cmake
150
           echo set\\(USE_MICRO_STANDALONE_RUNTIME ON\\) >> config.cmake
151
           echo set\\(USE_LLVM llvm-config-9\\) >> config.cmake
152 153
           echo set\\(USE_NNPACK ON\\) >> config.cmake
           echo set\\(NNPACK_PATH /NNPACK/build/\\) >> config.cmake
154 155 156
           echo set\\(USE_RPC ON\\) >> config.cmake
           echo set\\(USE_SORT ON\\) >> config.cmake
           echo set\\(USE_GRAPH_RUNTIME ON\\) >> config.cmake
157
           echo set\\(USE_STACKVM_RUNTIME ON\\) >> config.cmake
158
           echo set\\(USE_GRAPH_RUNTIME_DEBUG ON\\) >> config.cmake
159
           echo set\\(USE_VM_PROFILER ON\\) >> config.cmake
160
           echo set\\(USE_ANTLR ON\\) >> config.cmake
161
           echo set\\(USE_BLAS openblas\\) >> config.cmake
162 163
           echo set\\(CMAKE_CXX_COMPILER g++\\) >> config.cmake
           echo set\\(CMAKE_CXX_FLAGS -Werror\\) >> config.cmake
164
           """
165
        make(ci_gpu, 'build', '-j2')
166
        pack_lib('gpu', tvm_multilib)
167
        // compiler test
168
        sh """
169 170 171 172 173
           mkdir -p build2
           cd build2
           cp ../cmake/config.cmake .
           echo set\\(USE_OPENCL ON\\) >> config.cmake
           echo set\\(USE_ROCM ON\\) >> config.cmake
174
           echo set\\(USE_VULKAN ON\\) >> config.cmake
175
           echo set\\(USE_MICRO ON\\) >> config.cmake
176
           echo set\\(USE_GRAPH_RUNTIME_DEBUG ON\\) >> config.cmake
177
           echo set\\(USE_VM_PROFILER ON\\) >> config.cmake
178
           echo set\\(CMAKE_CXX_COMPILER clang-7\\) >> config.cmake
179
           echo set\\(CMAKE_CXX_FLAGS -Werror\\) >> config.cmake
180
           """
181
        make(ci_gpu, 'build2', '-j2')
182 183 184
      }
    }
  },
185 186
  'BUILD: CPU': {
    node('CPU') {
187
      ws("${workspace}/tvm/build-cpu") {
188 189
        init_git()
        sh """
190 191 192 193
           mkdir -p build
           cd build
           cp ../cmake/config.cmake .
           echo set\\(USE_SORT ON\\) >> config.cmake
194
           echo set\\(USE_MICRO ON\\) >> config.cmake
195
           echo set\\(USE_MICRO_STANDALONE_RUNTIME ON\\) >> config.cmake
196
           echo set\\(USE_GRAPH_RUNTIME_DEBUG ON\\) >> config.cmake
197
           echo set\\(USE_VM_PROFILER ON\\) >> config.cmake
198
           echo set\\(USE_LLVM llvm-config-8\\) >> config.cmake
199 200
           echo set\\(USE_NNPACK ON\\) >> config.cmake
           echo set\\(NNPACK_PATH /NNPACK/build/\\) >> config.cmake
201
           echo set\\(USE_ANTLR ON\\) >> config.cmake
202 203
           echo set\\(CMAKE_CXX_COMPILER g++\\) >> config.cmake
           echo set\\(CMAKE_CXX_FLAGS -Werror\\) >> config.cmake
204
           echo set\\(HIDE_PRIVATE_SYMBOLS ON\\) >> config.cmake
205
           """
206
        make(ci_cpu, 'build', '-j2')
207
        pack_lib('cpu', tvm_lib)
208
        timeout(time: max_time, unit: 'MINUTES') {
209 210
          sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_unittest.sh"
          sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh"
211
          sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta.sh"
212
          sh "${docker_run} ${ci_cpu} ./tests/scripts/task_golang.sh"
213
        }
214 215
      }
    }
216
  },
217 218
  'BUILD : i386': {
    node('CPU') {
219
      ws("${workspace}/tvm/build-i386") {
220 221
        init_git()
        sh """
222 223 224 225 226
           mkdir -p build
           cd build
           cp ../cmake/config.cmake .
           echo set\\(USE_SORT ON\\) >> config.cmake
           echo set\\(USE_RPC ON\\) >> config.cmake
227
           echo set\\(USE_GRAPH_RUNTIME_DEBUG ON\\) >> config.cmake
228
           echo set\\(USE_MICRO_STANDALONE_RUNTIME ON\\) >> config.cmake
229
           echo set\\(USE_VM_PROFILER ON\\) >> config.cmake
230
           echo set\\(USE_LLVM llvm-config-4.0\\) >> config.cmake
231 232
           echo set\\(CMAKE_CXX_COMPILER g++\\) >> config.cmake
           echo set\\(CMAKE_CXX_FLAGS -Werror\\) >> config.cmake
233
           """
234
        make(ci_i386, 'build', '-j2')
235
        pack_lib('i386', tvm_multilib)
236 237
      }
    }
238 239 240 241
  }
}

stage('Unit Test') {
242
  parallel 'python3: GPU': {
243
    node('TensorCore') {
244
      ws("${workspace}/tvm/ut-python-gpu") {
245
        init_git()
246
        unpack_lib('gpu', tvm_multilib)
247
        timeout(time: max_time, unit: 'MINUTES') {
248 249
          sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_unittest.sh"
          sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_integration.sh"
250 251 252 253
        }
      }
    }
  },
254
  'python3: i386': {
255
    node('CPU') {
256
      ws("${workspace}/tvm/ut-python-i386") {
257
        init_git()
258
        unpack_lib('i386', tvm_multilib)
259
        timeout(time: max_time, unit: 'MINUTES') {
260 261 262
          sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_unittest.sh"
          sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration.sh"
          sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_vta.sh"
263
        }
264 265
      }
    }
266
  },
267 268
  'java: GPU': {
    node('GPU') {
269
      ws("${workspace}/tvm/ut-java") {
270
        init_git()
271
        unpack_lib('gpu', tvm_multilib)
272
        timeout(time: max_time, unit: 'MINUTES') {
273
          sh "${docker_run} ${ci_gpu} ./tests/scripts/task_java_unittest.sh"
274 275 276
        }
      }
    }
277 278 279 280
  }
}

stage('Integration Test') {
281 282
  parallel 'topi: GPU': {
    node('GPU') {
283
      ws("${workspace}/tvm/topi-python-gpu") {
284
        init_git()
285
        unpack_lib('gpu', tvm_multilib)
286
        timeout(time: max_time, unit: 'MINUTES') {
287
          sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_topi.sh"
288 289 290 291
        }
      }
    }
  },
292
  'frontend: GPU': {
293
    node('GPU') {
294
      ws("${workspace}/tvm/frontend-python-gpu") {
295 296 297
        init_git()
        unpack_lib('gpu', tvm_multilib)
        timeout(time: max_time, unit: 'MINUTES') {
298
          sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh"
299 300 301 302
        }
      }
    }
  },
303 304
  'legacy: GPU': {
    node('GPU') {
305
      ws("${workspace}/tvm/legacy-python-gpu") {
306 307 308 309 310 311 312 313
        init_git()
        unpack_lib('gpu', tvm_multilib)
        timeout(time: max_time, unit: 'MINUTES') {
          sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_legacy.sh"
        }
      }
    }
  },
314 315
  'docs: GPU': {
    node('GPU') {
316
      ws("${workspace}/tvm/docs-python-gpu") {
317
        init_git()
318
        unpack_lib('gpu', tvm_multilib)
319
        timeout(time: max_time, unit: 'MINUTES') {
320
          sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_docs.sh"
321
        }
322
        pack_lib('mydocs', 'docs.tgz')
323 324 325 326
      }
    }
  }
}
327

328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
/*
stage('Build packages') {
  parallel 'conda CPU': {
    node('CPU') {
      sh "${docker_run} tvmai/conda-cpu ./conda/build_cpu.sh
    }
  },
  'conda cuda': {
    node('CPU') {
      sh "${docker_run} tvmai/conda-cuda90 ./conda/build_cuda.sh
      sh "${docker_run} tvmai/conda-cuda100 ./conda/build_cuda.sh
    }
  }
  // Here we could upload the packages to anaconda for releases
  // and/or the master branch
}
*/

346
stage('Deploy') {
347
    node('doc') {
348
      ws("${workspace}/tvm/deploy-docs") {
349 350 351 352 353 354 355
        if (env.BRANCH_NAME == "master") {
           unpack_lib('mydocs', 'docs.tgz')
           sh "tar xf docs.tgz -C /var/docs"
        }
      }
    }
}