Jenkinsfile 8.53 KB
Newer Older
1
#!groovy
2
// -*- mode: groovy -*-
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20

// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements.  See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.  The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.  You may obtain a copy of the License at
//
//   http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.  See the License for the
// specific language governing permissions and limitations
// under the License.

21 22 23
// Jenkins pipeline
// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/

24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
// Docker env used for testing
// Different image may have different version tag
// because some of them are more stable than anoter.
//
// Docker images are maintained by PMC, cached in dockerhub
// and remains relatively stable over the time.
// Flow for upgrading docker env(need commiter)
//
// - Send PR to upgrade build script in the repo
// - Build the new docker image
// - Tag the docker image with a new version and push to tvmai
// - Update the version in the Jenkinsfile, send a PR
// - Fix any issues wrt to the new image version in the PR
// - Merge the PR and now we are in new version
// - Tag the new version as the lates
// - Periodically cleanup the old versions on local workers
//
41 42 43 44 45

// Hashtag in the source to build current CI docker builds
//
//

46
ci_lint = "tvmai/ci-lint:v0.60"
47
ci_gpu = "tvmai/ci-gpu:v0.61"
48
ci_cpu = "tvmai/ci-cpu:v0.61"
49
ci_i386 = "tvmai/ci-i386:v0.52"
50

51
// tvm libraries
52 53
tvm_runtime = "build/libtvm_runtime.so, build/config.cmake"
tvm_lib = "build/libtvm.so, " + tvm_runtime
54
// LLVM upstream lib
55
tvm_multilib = "build/libtvm.so, " +
56 57 58
               "build/libvta_tsim.so, " +
               "build/libvta_fsim.so, " +
               "build/libtvm_topi.so, " +
Tianqi Chen committed
59
               tvm_runtime
60

61
// command to start a docker container
62
docker_run = 'docker/bash.sh'
63
// timeout in minutes
Tianqi Chen committed
64
max_time = 120
65

66 67 68
def per_exec_ws(folder) {
  return "workspace/exec_${env.EXECUTOR_NUMBER}/" + folder
}
69

70 71
// initialize source codes
def init_git() {
72 73 74 75
  // Add more info about job node
  sh """
     echo "INFO: NODE_NAME=${NODE_NAME} EXECUTOR_NUMBER=${EXECUTOR_NUMBER}"
     """
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
  checkout scm
  retry(5) {
    timeout(time: 2, unit: 'MINUTES') {
      sh 'git submodule update --init'
    }
  }
}

def init_git_win() {
    checkout scm
    retry(5) {
        timeout(time: 2, unit: 'MINUTES') {
            bat 'git submodule update --init'
        }
    }
}

stage("Sanity Check") {
  timeout(time: max_time, unit: 'MINUTES') {
95
    node('CPU') {
96
      ws(per_exec_ws("tvm/sanity")) {
97
        init_git()
98
        sh "${docker_run} ${ci_lint}  ./tests/scripts/task_lint.sh"
99 100 101 102 103 104 105 106
      }
    }
  }
}

// Run make. First try to do an incremental make from a previous workspace in hope to
// accelerate the compilation. If something wrong, clean the workspace and then
// build from scratch.
107
def make(docker_type, path, make_flag) {
108 109
  timeout(time: max_time, unit: 'MINUTES') {
    try {
110
      sh "${docker_run} ${docker_type} ./tests/scripts/task_build.sh ${path} ${make_flag}"
111 112
      // always run cpp test when build
      sh "${docker_run} ${docker_type} ./tests/scripts/task_cpp_unittest.sh"
113 114
    } catch (exc) {
      echo 'Incremental compilation failed. Fall back to build from scratch'
115 116
      sh "${docker_run} ${docker_type} ./tests/scripts/task_clean.sh ${path}"
      sh "${docker_run} ${docker_type} ./tests/scripts/task_build.sh ${path} ${make_flag}"
117
      sh "${docker_run} ${docker_type} ./tests/scripts/task_cpp_unittest.sh"
118 119 120 121 122
    }
  }
}

// pack libraries for later use
123
def pack_lib(name, libs) {
124 125 126 127 128 129 130 131 132
  sh """
     echo "Packing ${libs} into ${name}"
     echo ${libs} | sed -e 's/,/ /g' | xargs md5sum
     """
  stash includes: libs, name: name
}


// unpack libraries saved before
133
def unpack_lib(name, libs) {
134 135 136 137 138 139 140 141
  unstash name
  sh """
     echo "Unpacked ${libs} from ${name}"
     echo ${libs} | sed -e 's/,/ /g' | xargs md5sum
     """
}

stage('Build') {
142 143
  parallel 'BUILD: GPU': {
    node('GPUBUILD') {
144
      ws(per_exec_ws("tvm/build-gpu")) {
145
        init_git()
146
        sh "${docker_run} ${ci_gpu} ./tests/scripts/task_config_build_gpu.sh"
147
        make(ci_gpu, 'build', '-j2')
148
        pack_lib('gpu', tvm_multilib)
149
        // compiler test
150
        sh "${docker_run} ${ci_gpu} ./tests/scripts/task_config_build_gpu_vulkan.sh"
151
        make(ci_gpu, 'build2', '-j2')
152 153 154
      }
    }
  },
155 156
  'BUILD: CPU': {
    node('CPU') {
157
      ws(per_exec_ws("tvm/build-cpu")) {
158
        init_git()
159
        sh "${docker_run} ${ci_cpu} ./tests/scripts/task_config_build_cpu.sh"
160
        make(ci_cpu, 'build', '-j2')
161
        pack_lib('cpu', tvm_lib)
162
        timeout(time: max_time, unit: 'MINUTES') {
163 164
          sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_unittest.sh"
          sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh"
165 166
          sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_fsim.sh"
          sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_tsim.sh"
167
          sh "${docker_run} ${ci_cpu} ./tests/scripts/task_golang.sh"
168
          sh "${docker_run} ${ci_cpu} ./tests/scripts/task_rust.sh"
169
        }
170 171
      }
    }
172
  },
173 174
  'BUILD : i386': {
    node('CPU') {
175
      ws(per_exec_ws("tvm/build-i386")) {
176
        init_git()
177
        sh "${docker_run} ${ci_i386} ./tests/scripts/task_config_build_i386.sh"
178
        make(ci_i386, 'build', '-j2')
179
        pack_lib('i386', tvm_multilib)
180 181
      }
    }
182 183 184 185
  }
}

stage('Unit Test') {
186
  parallel 'python3: GPU': {
187
    node('TensorCore') {
188
      ws(per_exec_ws("tvm/ut-python-gpu")) {
189
        init_git()
190
        unpack_lib('gpu', tvm_multilib)
191
        timeout(time: max_time, unit: 'MINUTES') {
192
          sh "${docker_run} ${ci_gpu} ./tests/scripts/task_sphinx_precheck.sh"
193 194
          sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_unittest.sh"
          sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_integration.sh"
195 196 197 198
        }
      }
    }
  },
199
  'python3: i386': {
200
    node('CPU') {
201
      ws(per_exec_ws("tvm/ut-python-i386")) {
202
        init_git()
203
        unpack_lib('i386', tvm_multilib)
204
        timeout(time: max_time, unit: 'MINUTES') {
205 206
          sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_unittest.sh"
          sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration.sh"
207
          sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_vta_fsim.sh"
208
        }
209 210
      }
    }
211
  },
212 213
  'java: GPU': {
    node('GPU') {
214
      ws(per_exec_ws("tvm/ut-java")) {
215
        init_git()
216
        unpack_lib('gpu', tvm_multilib)
217
        timeout(time: max_time, unit: 'MINUTES') {
218
          sh "${docker_run} ${ci_gpu} ./tests/scripts/task_java_unittest.sh"
219 220 221
        }
      }
    }
222 223 224 225
  }
}

stage('Integration Test') {
226 227
  parallel 'topi: GPU': {
    node('GPU') {
228
      ws(per_exec_ws("tvm/topi-python-gpu")) {
229
        init_git()
230
        unpack_lib('gpu', tvm_multilib)
231
        timeout(time: max_time, unit: 'MINUTES') {
232
          sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_topi.sh"
233 234 235 236
        }
      }
    }
  },
237
  'frontend: GPU': {
238
    node('GPU') {
239
      ws(per_exec_ws("tvm/frontend-python-gpu")) {
240 241 242
        init_git()
        unpack_lib('gpu', tvm_multilib)
        timeout(time: max_time, unit: 'MINUTES') {
243
          sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh"
244
        }
245 246 247
      }
    }
  },
248 249
  'docs: GPU': {
    node('GPU') {
250
      ws(per_exec_ws("tvm/docs-python-gpu")) {
251
        init_git()
252
        unpack_lib('gpu', tvm_multilib)
253
        timeout(time: max_time, unit: 'MINUTES') {
254
          sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_docs.sh"
255
        }
256
        pack_lib('mydocs', 'docs.tgz')
257 258 259 260
      }
    }
  }
}
261

262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
/*
stage('Build packages') {
  parallel 'conda CPU': {
    node('CPU') {
      sh "${docker_run} tvmai/conda-cpu ./conda/build_cpu.sh
    }
  },
  'conda cuda': {
    node('CPU') {
      sh "${docker_run} tvmai/conda-cuda90 ./conda/build_cuda.sh
      sh "${docker_run} tvmai/conda-cuda100 ./conda/build_cuda.sh
    }
  }
  // Here we could upload the packages to anaconda for releases
  // and/or the master branch
}
*/

280
stage('Deploy') {
281
    node('doc') {
282
      ws(per_exec_ws("tvm/deploy-docs")) {
283 284
        if (env.BRANCH_NAME == "master") {
           unpack_lib('mydocs', 'docs.tgz')
285
           sh "cp docs.tgz /var/docs/docs.tgz"
286 287 288 289 290
           sh "tar xf docs.tgz -C /var/docs"
        }
      }
    }
}