diff --git a/Jenkinsfile b/Jenkinsfile index c10b5c88abbe..3ed24b152568 100755 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -42,6 +42,7 @@ // Hashtag in the source to build current CI docker builds // // +import org.jenkinsci.plugins.pipeline.modeldefinition.Utils // NOTE: these lines are scanned by docker/dev_common.sh. Please update the regex as needed. --> ci_lint = "tlcpack/ci-lint:v0.67" @@ -103,23 +104,23 @@ def init_git() { } def init_git_win() { - checkout scm - retry(5) { - timeout(time: 2, unit: 'MINUTES') { - bat 'git submodule update --init -f' - } - } + checkout scm + retry(5) { + timeout(time: 2, unit: 'MINUTES') { + bat 'git submodule update --init -f' + } + } } def cancel_previous_build() { - // cancel previous build if it is not on main. - if (env.BRANCH_NAME != "main") { - def buildNumber = env.BUILD_NUMBER as int - // Milestone API allows us to cancel previous build - // with the same milestone number - if (buildNumber > 1) milestone(buildNumber - 1) - milestone(buildNumber) - } + // cancel previous build if it is not on main. + if (env.BRANCH_NAME != "main") { + def buildNumber = env.BUILD_NUMBER as int + // Milestone API allows us to cancel previous build + // with the same milestone number + if (buildNumber > 1) milestone(buildNumber - 1) + milestone(buildNumber) + } } cancel_previous_build() @@ -148,11 +149,15 @@ stage('Prepare') { } } -stage("Sanity Check") { +stage('Sanity Check') { timeout(time: max_time, unit: 'MINUTES') { node('CPU') { - ws(per_exec_ws("tvm/sanity")) { + ws(per_exec_ws('tvm/sanity')) { init_git() + is_docs_only_build = sh (returnStatus: true, script: ''' + ./tests/scripts/git_change_docs.sh + ''' + ) sh "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh" } } @@ -190,7 +195,6 @@ def pack_lib(name, libs) { stash includes: libs, name: name } - // unpack libraries saved before def unpack_lib(name, libs) { unstash name @@ -201,60 +205,72 @@ def unpack_lib(name, libs) { } stage('Build') { - parallel 'BUILD: GPU': { - node('GPUBUILD') { - ws(per_exec_ws("tvm/build-gpu")) { - init_git() - sh "${docker_run} ${ci_gpu} ./tests/scripts/task_config_build_gpu.sh" - make(ci_gpu, 'build', '-j2') - pack_lib('gpu', tvm_multilib) - // compiler test - sh "${docker_run} ${ci_gpu} ./tests/scripts/task_config_build_gpu_other.sh" - make(ci_gpu, 'build2', '-j2') + parallel 'BUILD: GPU': { + node('GPUBUILD') { + ws(per_exec_ws('tvm/build-gpu')) { + init_git() + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_config_build_gpu.sh" + make(ci_gpu, 'build', '-j2') + pack_lib('gpu', tvm_multilib) + // compiler test + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_config_build_gpu_other.sh" + make(ci_gpu, 'build2', '-j2') } } }, 'BUILD: CPU': { - node('CPU') { - ws(per_exec_ws("tvm/build-cpu")) { - init_git() - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_config_build_cpu.sh" - make(ci_cpu, 'build', '-j2') - pack_lib('cpu', tvm_multilib_tsim) - timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_unittest.sh" - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_fsim.sh" - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_tsim.sh" - // sh "${docker_run} ${ci_cpu} ./tests/scripts/task_golang.sh" - // TODO(@jroesch): need to resolve CI issue will turn back on in follow up patch - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_rust.sh" - junit "build/pytest-results/*.xml" + if (is_docs_only_build != 1) { + node('CPU') { + ws(per_exec_ws('tvm/build-cpu')) { + init_git() + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_config_build_cpu.sh" + make(ci_cpu, 'build', '-j2') + pack_lib('cpu', tvm_multilib_tsim) + timeout(time: max_time, unit: 'MINUTES') { + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_unittest.sh" + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_fsim.sh" + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_tsim.sh" + // sh "${docker_run} ${ci_cpu} ./tests/scripts/task_golang.sh" + // TODO(@jroesch): need to resolve CI issue will turn back on in follow up patch + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_rust.sh" + junit "build/pytest-results/*.xml" + } } } + } else { + Utils.markStageSkippedForConditional('BUILD: CPU') } - }, + }, 'BUILD: WASM': { - node('CPU') { - ws(per_exec_ws("tvm/build-wasm")) { - init_git() - sh "${docker_run} ${ci_wasm} ./tests/scripts/task_config_build_wasm.sh" - make(ci_wasm, 'build', '-j2') - timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_wasm} ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_wasm} ./tests/scripts/task_web_wasm.sh" + if (is_docs_only_build != 1) { + node('CPU') { + ws(per_exec_ws('tvm/build-wasm')) { + init_git() + sh "${docker_run} ${ci_wasm} ./tests/scripts/task_config_build_wasm.sh" + make(ci_wasm, 'build', '-j2') + timeout(time: max_time, unit: 'MINUTES') { + sh "${docker_run} ${ci_wasm} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_wasm} ./tests/scripts/task_web_wasm.sh" + } } } + } else { + Utils.markStageSkippedForConditional('BUILD: WASM') } }, 'BUILD : i386': { - node('CPU') { - ws(per_exec_ws("tvm/build-i386")) { - init_git() - sh "${docker_run} ${ci_i386} ./tests/scripts/task_config_build_i386.sh" - make(ci_i386, 'build', '-j2') - pack_lib('i386', tvm_multilib_tsim) + if ( is_docs_only_build != 1) { + node('CPU') { + ws(per_exec_ws('tvm/build-i386')) { + init_git() + sh "${docker_run} ${ci_i386} ./tests/scripts/task_config_build_i386.sh" + make(ci_i386, 'build', '-j2') + pack_lib('i386', tvm_multilib_tsim) + } } + } else { + Utils.markStageSkippedForConditional('BUILD : i386') } }, // 'BUILD : arm': { @@ -268,62 +284,79 @@ stage('Build') { // } // }, 'BUILD: QEMU': { - node('CPU') { - ws(per_exec_ws("tvm/build-qemu")) { - init_git() - sh "${docker_run} ${ci_qemu} ./tests/scripts/task_config_build_qemu.sh" - make(ci_qemu, 'build', '-j2') - timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_qemu} ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_qemu} ./tests/scripts/task_python_microtvm.sh" - junit "build/pytest-results/*.xml" + if (is_docs_only_build != 1) { + node('CPU') { + ws(per_exec_ws('tvm/build-qemu')) { + init_git() + sh "${docker_run} ${ci_qemu} ./tests/scripts/task_config_build_qemu.sh" + make(ci_qemu, 'build', '-j2') + timeout(time: max_time, unit: 'MINUTES') { + sh "${docker_run} ${ci_qemu} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_qemu} ./tests/scripts/task_python_microtvm.sh" + junit "build/pytest-results/*.xml" + } } } + } else { + Utils.markStageSkippedForConditional('BUILD: QEMU') } } } stage('Unit Test') { - parallel 'python3: GPU': { - node('TensorCore') { - ws(per_exec_ws("tvm/ut-python-gpu")) { - init_git() - unpack_lib('gpu', tvm_multilib) - timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_gpu} ./tests/scripts/task_sphinx_precheck.sh" - sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_unittest_gpuonly.sh" - sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_integration_gpuonly.sh" - junit "build/pytest-results/*.xml" + parallel 'python3: GPU': { + if (is_docs_only_build != 1) { + node('TensorCore') { + ws(per_exec_ws('tvm/ut-python-gpu')) { + init_git() + unpack_lib('gpu', tvm_multilib) + timeout(time: max_time, unit: 'MINUTES') { + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_sphinx_precheck.sh" + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_unittest_gpuonly.sh" + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_integration_gpuonly.sh" + junit "build/pytest-results/*.xml" + } + } } + } else { + Utils.markStageSkippedForConditional('python3: i386') } - } - }, - 'python3: CPU': { - node('CPU') { - ws(per_exec_ws("tvm/ut-python-cpu")) { - init_git() - unpack_lib('cpu', tvm_multilib_tsim) - timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh" - junit "build/pytest-results/*.xml" + }, + 'python3: CPU': { + if (is_docs_only_build != 1) { + node('CPU') { + ws(per_exec_ws("tvm/ut-python-cpu")) { + init_git() + unpack_lib('cpu', tvm_multilib_tsim) + timeout(time: max_time, unit: 'MINUTES') { + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh" + junit "build/pytest-results/*.xml" + } + } } + } else { + Utils.markStageSkippedForConditional('python3: i386') } - } - }, - 'python3: i386': { - node('CPU') { - ws(per_exec_ws("tvm/ut-python-i386")) { - init_git() - unpack_lib('i386', tvm_multilib) - timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_i386} ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_unittest.sh" - sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration.sh" - sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_vta_fsim.sh" - junit "build/pytest-results/*.xml" + }, + 'python3: i386': { + if (is_docs_only_build != 1) { + node('CPU') { + ws(per_exec_ws('tvm/ut-python-i386')) { + init_git() + unpack_lib('i386', tvm_multilib) + timeout(time: max_time, unit: 'MINUTES') { + sh "${docker_run} ${ci_i386} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_unittest.sh" + sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration.sh" + sh "${docker_run} ${ci_i386} ./tests/scripts/task_python_vta_fsim.sh" + junit "build/pytest-results/*.xml" + } + } } + } else { + Utils.markStageSkippedForConditional('python3: i386') } } }, @@ -351,15 +384,17 @@ stage('Unit Test') { sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh" sh "${docker_run} ${ci_gpu} ./tests/scripts/task_java_unittest.sh" } + } else { + Utils.markStageSkippedForConditional('java: GPU') } } - } } stage('Integration Test') { parallel 'topi: GPU': { + if (is_docs_only_build != 1) { node('GPU') { - ws(per_exec_ws("tvm/topi-python-gpu")) { + ws(per_exec_ws('tvm/topi-python-gpu')) { init_git() unpack_lib('gpu', tvm_multilib) timeout(time: max_time, unit: 'MINUTES') { @@ -369,31 +404,42 @@ stage('Integration Test') { } } } + } else { + Utils.markStageSkippedForConditional('topi: GPU') + } }, 'frontend: GPU': { - node('GPU') { - ws(per_exec_ws("tvm/frontend-python-gpu")) { - init_git() - unpack_lib('gpu', tvm_multilib) - timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh" - junit "build/pytest-results/*.xml" + if (is_docs_only_build != 1) { + node('GPU') { + ws(per_exec_ws('tvm/frontend-python-gpu')) { + init_git() + unpack_lib('gpu', tvm_multilib) + timeout(time: max_time, unit: 'MINUTES') { + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh" + junit "build/pytest-results/*.xml" + } } } + } else { + Utils.markStageSkippedForConditional('frontend: GPU') } }, 'frontend: CPU': { - node('CPU') { - ws(per_exec_ws("tvm/frontend-python-cpu")) { - init_git() - unpack_lib('cpu', tvm_multilib) - timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_ci_setup.sh" - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_frontend_cpu.sh" - junit "build/pytest-results/*.xml" + if (is_docs_only_build != 1) { + node('CPU') { + ws(per_exec_ws('tvm/frontend-python-cpu')) { + init_git() + unpack_lib('cpu', tvm_multilib) + timeout(time: max_time, unit: 'MINUTES') { + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_ci_setup.sh" + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_frontend_cpu.sh" + junit "build/pytest-results/*.xml" + } } } + } else { + Utils.markStageSkippedForConditional('frontend: CPU') } } // 'docs: GPU': { @@ -437,19 +483,19 @@ stage('Build packages') { sh "${docker_run} tlcpack/conda-cuda100 ./conda/build_cuda.sh } } - // Here we could upload the packages to anaconda for releases - // and/or the main branch +// Here we could upload the packages to anaconda for releases +// and/or the main branch } */ /* stage('Deploy') { node('doc') { - ws(per_exec_ws("tvm/deploy-docs")) { - if (env.BRANCH_NAME == "main") { - unpack_lib('mydocs', 'docs.tgz') - sh "cp docs.tgz /var/docs/docs.tgz" - sh "tar xf docs.tgz -C /var/docs" + ws(per_exec_ws('tvm/deploy-docs')) { + if (env.BRANCH_NAME == 'main') { + unpack_lib('mydocs', 'docs.tgz') + sh 'cp docs.tgz /var/docs/docs.tgz' + sh 'tar xf docs.tgz -C /var/docs' } } } diff --git a/tests/scripts/git_change_docs.sh b/tests/scripts/git_change_docs.sh new file mode 100755 index 000000000000..e623b5d92511 --- /dev/null +++ b/tests/scripts/git_change_docs.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +set -eux + +DOCS_DIR=0 +OTHER_DIR=0 +DOC_DIR="docs/" + +changed_files=`git diff --no-commit-id --name-only -r origin/main` + +for file in $changed_files; do + if grep -q "$DOC_DIR" <<< "$file"; then + DOCS_DIR=1 + else + OTHER_DIR=1 + break + fi +done + +if [ ${OTHER_DIR} -eq 1 ]; then + exit 0 +else + exit 1 +fi +