Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Merge remote-tracking branch 'upstream/master'
Browse files Browse the repository at this point in the history
  • Loading branch information
ZhennanQin committed Jun 29, 2018
2 parents a35bc9a + 2594fca commit ad90147
Show file tree
Hide file tree
Showing 125 changed files with 6,895 additions and 1,468 deletions.
79 changes: 40 additions & 39 deletions Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -93,22 +93,27 @@ echo ${libs} | sed -e 's/,/ /g' | xargs md5sum
}

def publish_test_coverage() {
sh 'curl -s https://codecov.io/bash | bash -s -'
sh 'curl --retry 10 -s https://codecov.io/bash | bash -s -'
}

def collect_test_results_unix(original_file_name, new_file_name) {
echo 'Saving python test results for ' + new_file_name
// Rename file to make it distinguishable. Unfortunately, it's not possible to get STAGE_NAME in a parallel stage
sh 'cp ' + original_file_name + ' ' + new_file_name
archiveArtifacts artifacts: new_file_name
if (fileExists(original_file_name)) {
// Rename file to make it distinguishable. Unfortunately, it's not possible to get STAGE_NAME in a parallel stage
// Thus, we have to pick a name manually and rename the files so that they can be stored separately.
sh 'cp ' + original_file_name + ' ' + new_file_name
archiveArtifacts artifacts: new_file_name
}
}

def collect_test_results_windows(original_file_name, new_file_name) {
echo 'Saving python test results for ' + new_file_name
// Rename file to make it distinguishable. Unfortunately, it's not possible to get STAGE_NAME in a parallel stage
bat 'xcopy ' + original_file_name + ' ' + new_file_name
archiveArtifacts artifacts: new_file_name
}
// Thus, we have to pick a name manually and rename the files so that they can be stored separately.
if (fileExists(original_file_name)) {
bat 'xcopy ' + original_file_name + ' ' + new_file_name + '*'
archiveArtifacts artifacts: new_file_name
}
}


def docker_run(platform, function_name, use_nvidia, shared_mem = '500m') {
def command = "ci/build.py --docker-registry ${env.DOCKER_CACHE_REGISTRY} %USE_NVIDIA% --platform %PLATFORM% --shm-size %SHARED_MEM% /work/runtime_functions.sh %FUNCTION_NAME%"
Expand Down Expand Up @@ -489,7 +494,7 @@ try {
}
} // End of stage('Build')

stage('Unit Test') {
stage('Tests') {
parallel 'Python2: CPU': {
node('mxnetlinux-cpu') {
ws('workspace/ut-python2-cpu') {
Expand Down Expand Up @@ -786,8 +791,7 @@ try {
del /S /Q ${env.WORKSPACE}\\pkg_vc14_cpu\\python\\*.pyc
C:\\mxnet\\test_cpu.bat"""
} finally {
// We are unable to modify test_cpu.bat, so we can't track test failures on Windows
// collect_test_results_windows('nosetests.xml', 'nosetests_windows_python2_cpu.xml')
collect_test_results_windows('nosetests_unittest.xml', 'nosetests_unittest_windows_python2_cpu.xml')
}
}
}
Expand All @@ -809,8 +813,7 @@ try {
del /S /Q ${env.WORKSPACE}\\pkg_vc14_cpu\\python\\*.pyc
C:\\mxnet\\test_cpu.bat"""
} finally {
// We are unable to modify test_cpu.bat, so we can't track test failures on Windows
// collect_test_results_windows('nosetests.xml', 'nosetests_windows_python3_cpu.xml')
collect_test_results_windows('nosetests_unittest.xml', 'nosetests_unittest_windows_python3_cpu.xml')
}
}
}
Expand All @@ -832,8 +835,8 @@ try {
del /S /Q ${env.WORKSPACE}\\pkg_vc14_gpu\\python\\*.pyc
C:\\mxnet\\test_gpu.bat"""
} finally {
// We are unable to modify test_cpu.bat, so we can't track test failures on Windows
// collect_test_results_windows('nosetests.xml', 'nosetests_windows_python2_gpu.xml')
collect_test_results_windows('nosetests_gpu_forward.xml', 'nosetests_gpu_forward_windows_python2_gpu.xml')
collect_test_results_windows('nosetests_gpu_operator.xml', 'nosetests_gpu_operator_windows_python2_gpu.xml')
}
}
}
Expand All @@ -855,8 +858,8 @@ try {
del /S /Q ${env.WORKSPACE}\\pkg_vc14_gpu\\python\\*.pyc
C:\\mxnet\\test_gpu.bat"""
} finally {
// We are unable to modify test_cpu.bat, so we can't track test failures on Windows
// collect_test_results_windows('nosetests.xml', 'nosetests_windows_python3_gpu.xml')
collect_test_results_windows('nosetests_gpu_forward.xml', 'nosetests_gpu_forward_windows_python3_gpu.xml')
collect_test_results_windows('nosetests_gpu_operator.xml', 'nosetests_gpu_operator_windows_python3_gpu.xml')
}
}
}
Expand All @@ -878,17 +881,14 @@ try {
del /S /Q ${env.WORKSPACE}\\pkg_vc14_gpu_mkldnn\\python\\*.pyc
C:\\mxnet\\test_gpu.bat"""
} finally {
// We are unable to modify test_cpu.bat, so we can't track test failures on Windows
// collect_test_results_windows('nosetests.xml', 'nosetests_windows_python3_mkldnn_Gpu.xml')
collect_test_results_windows('nosetests_gpu_forward.xml', 'nosetests_gpu_forward_windows_python3_gpu_mkldnn.xml')
collect_test_results_windows('nosetests_gpu_operator.xml', 'nosetests_gpu_operator_windows_python3_gpu_mkldnn.xml')
}
}
}
}
}
}

stage('Integration Test') {
parallel 'Onnx CPU': {
},
'Onnx CPU': {
node('mxnetlinux-cpu') {
ws('workspace/it-onnx-cpu') {
timeout(time: max_time, unit: 'MINUTES') {
Expand Down Expand Up @@ -945,19 +945,20 @@ try {
}
}
}
},
'dist-kvstore tests GPU': {
node('mxnetlinux-gpu') {
ws('workspace/it-dist-kvstore') {
timeout(time: max_time, unit: 'MINUTES') {
init_git()
unpack_lib('gpu')
docker_run('ubuntu_gpu', 'integrationtest_ubuntu_gpu_dist_kvstore', true)
publish_test_coverage()
}
}
}
}
// Disable until fixed https://github.com/apache/incubator-mxnet/issues/11441
// 'dist-kvstore tests GPU': {
// node('mxnetlinux-gpu') {
// ws('workspace/it-dist-kvstore') {
// timeout(time: max_time, unit: 'MINUTES') {
// init_git()
// unpack_lib('gpu')
// docker_run('ubuntu_gpu', 'integrationtest_ubuntu_gpu_dist_kvstore', true)
// publish_test_coverage()
// }
// }
// }
//}
}

stage('Deploy') {
Expand All @@ -982,8 +983,8 @@ try {
}
} finally {
node("mxnetlinux-cpu") {
// Only send email if master failed
if (currentBuild.result == "FAILURE" && env.BRANCH_NAME == "master") {
// Only send email if master or release branches failed
if (currentBuild.result == "FAILURE" && (env.BRANCH_NAME == "master" || env.BRANCH_NAME.startsWith("v"))) {
emailext body: 'Build for MXNet branch ${BRANCH_NAME} has broken. Please view the build at ${BUILD_URL}', replyTo: '${EMAIL}', subject: '[BUILD FAILED] Branch ${BRANCH_NAME} build ${BUILD_NUMBER}', to: '${EMAIL}'
}
// Remember to rethrow so the build is marked as failing
Expand Down
52 changes: 50 additions & 2 deletions LICENSE
Original file line number Diff line number Diff line change
Expand Up @@ -298,8 +298,6 @@
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.



=======================================================================================
Other Licenses
=======================================================================================
Expand Down Expand Up @@ -512,3 +510,53 @@
For details, see, 3rdparty/dmlc-core/include/dmlc/concurrentqueue.h

=======================================================================================

11. ONNX Export module
For details, see, python/mxnet/contrib/onnx/_export/LICENSE

# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Based on
# https://github.com/NVIDIA/mxnet_to_onnx/blob/master/mx2onnx_converter/#
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.


17 changes: 7 additions & 10 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -66,11 +66,8 @@ $(warning "USE_MKL2017 is deprecated. We will switch to USE_MKLDNN.")
endif

ifeq ($(USE_MKLDNN), 1)
RETURN_STRING := $(shell ./prepare_mkldnn.sh $(MKLDNN_ROOT))
LAST_WORD_INDEX := $(words $(RETURN_STRING))
# fetch the 2nd last word as MKLDNNROOT
MKLDNNROOT := $(word $(shell echo $$(($(LAST_WORD_INDEX) - 1))),$(RETURN_STRING))
MKLROOT := $(lastword $(RETURN_STRING))
MKLDNNROOT = $(ROOTDIR)/3rdparty/mkldnn/install
MKLROOT = $(ROOTDIR)/3rdparty/mkldnn/install
export USE_MKLML = 1
endif

Expand Down Expand Up @@ -434,11 +431,11 @@ endif
# For quick compile test, used smaller subset
ALLX_DEP= $(ALL_DEP)

build/src/%.o: src/%.cc
build/src/%.o: src/%.cc | mkldnn
@mkdir -p $(@D)
$(CXX) -std=c++11 -c $(CFLAGS) -MMD -c $< -o $@

build/src/%_gpu.o: src/%.cu
build/src/%_gpu.o: src/%.cu | mkldnn
@mkdir -p $(@D)
$(NVCC) $(NVCCFLAGS) $(CUDA_ARCH) -Xcompiler "$(CFLAGS)" -M -MT build/src/$*_gpu.o $< >build/src/$*_gpu.d
$(NVCC) -c -o $@ $(NVCCFLAGS) $(CUDA_ARCH) -Xcompiler "$(CFLAGS)" $<
Expand Down Expand Up @@ -507,6 +504,7 @@ ifeq ($(USE_CPP_PACKAGE), 1)
include cpp-package/cpp-package.mk
endif

include mkldnn.mk
include tests/cpp/unittest.mk

extra-packages: $(EXTRA_PACKAGES)
Expand Down Expand Up @@ -619,10 +617,9 @@ clean: cyclean $(EXTRA_PACKAGES_CLEAN)
$(RM) -r $(patsubst %, %/*.d, $(EXTRA_OPERATORS)) $(patsubst %, %/*/*.d, $(EXTRA_OPERATORS))
$(RM) -r $(patsubst %, %/*.o, $(EXTRA_OPERATORS)) $(patsubst %, %/*/*.o, $(EXTRA_OPERATORS))
else
clean: cyclean testclean $(EXTRA_PACKAGES_CLEAN)
clean: mkldnn_clean cyclean testclean $(EXTRA_PACKAGES_CLEAN)
$(RM) -r build lib bin *~ */*~ */*/*~ */*/*/*~ R-package/NAMESPACE R-package/man R-package/R/mxnet_generated.R \
R-package/inst R-package/src/image_recordio.h R-package/src/*.o R-package/src/*.so mxnet_*.tar.gz \
3rdparty/mkldnn/install/*
R-package/inst R-package/src/image_recordio.h R-package/src/*.o R-package/src/*.so mxnet_*.tar.gz
cd $(DMLC_CORE); $(MAKE) clean; cd -
cd $(PS_PATH); $(MAKE) clean; cd -
cd $(NNVM_PATH); $(MAKE) clean; cd -
Expand Down
4 changes: 2 additions & 2 deletions ci/Jenkinsfile_docker_cache
Original file line number Diff line number Diff line change
Expand Up @@ -70,8 +70,8 @@ try {
} finally {
node("restricted-mxnetlinux-cpu") {
// Only send email if master failed
if (currentBuild.result == "FAILURE" && env.BRANCH_NAME == "master") {
emailext body: 'Build for MXNet branch ${BRANCH_NAME} has broken. Please view the build at ${BUILD_URL}', replyTo: '${EMAIL}', subject: '[BUILD FAILED] Branch ${BRANCH_NAME} build ${BUILD_NUMBER}', to: '${EMAIL}'
if (currentBuild.result == "FAILURE") {
emailext body: 'Generating the Docker Cache has failed. Please view the build at ${BUILD_URL}', replyTo: '${EMAIL}', subject: '[DOCKER CACHE FAILED] Run ${BUILD_NUMBER}', to: '${EMAIL}'
}
// Remember to rethrow so the build is marked as failing
if (err) {
Expand Down
39 changes: 39 additions & 0 deletions ci/docker/Dockerfile.build.ubuntu_blc
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
# -*- mode: dockerfile -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Dockerfile to build and run MXNet on Ubuntu 16.04 for CPU

FROM ubuntu:16.04

WORKDIR /work/deps

COPY install/ubuntu_core.sh /work/
RUN /work/ubuntu_core.sh
COPY install/ubuntu_python.sh /work/
RUN /work/ubuntu_python.sh
COPY install/ubuntu_npm_blc.sh /work/
RUN /work/ubuntu_npm_blc.sh

ARG USER_ID=0
COPY install/ubuntu_adduser.sh /work/
RUN /work/ubuntu_adduser.sh

COPY runtime_functions.sh /work/

WORKDIR /work/mxnet
ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib
4 changes: 2 additions & 2 deletions ci/docker/Dockerfile.build.ubuntu_nightly_cpu
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ WORKDIR /work/deps

COPY install/ubuntu_core.sh /work/
RUN /work/ubuntu_core.sh
COPY install/ubuntu_ccache.sh /work/
RUN /work/ubuntu_ccache.sh
COPY install/deb_ubuntu_ccache.sh /work/
RUN /work/deb_ubuntu_ccache.sh
COPY install/ubuntu_python.sh /work/
RUN /work/ubuntu_python.sh
COPY install/ubuntu_scala.sh /work/
Expand Down
4 changes: 2 additions & 2 deletions ci/docker/Dockerfile.build.ubuntu_nightly_gpu
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ WORKDIR /work/deps

COPY install/ubuntu_core.sh /work/
RUN /work/ubuntu_core.sh
COPY install/ubuntu_ccache.sh /work/
RUN /work/ubuntu_ccache.sh
COPY install/deb_ubuntu_ccache.sh /work/
RUN /work/deb_ubuntu_ccache.sh
COPY install/ubuntu_python.sh /work/
RUN /work/ubuntu_python.sh
COPY install/ubuntu_scala.sh /work/
Expand Down
6 changes: 4 additions & 2 deletions ci/docker/install/ubuntu_nightly_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,10 @@

set -ex

#Install for Compilation warning Nightly Test
add-apt-repository ppa:ubuntu-toolchain-r/test
# Install for Compilation warning Nightly Test
# Adding ppas frequently fails due to busy gpg servers, retry 5 times with 5 minute delays.
for i in 1 2 3 4 5; do add-apt-repository -y ppa:ubuntu-toolchain-r/test && break || sleep 300; done

apt-get update
apt-get -y install time

Expand Down
37 changes: 37 additions & 0 deletions ci/docker/install/ubuntu_npm_blc.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
#!/bin/bash

# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.

# build and install are separated so changes to build don't invalidate
# the whole docker cache for the image

set -ex
echo 'Installing npm...'
apt-get update
apt-get install -y npm

echo "Obtaining NodeJS version 8.x"
curl -sL https://deb.nodesource.com/setup_8.x | bash -

echo "Installing nodejs"
apt-get install -y nodejs

# Install broken link checker utility
echo "Installing broken link checker utility"
npm install broken-link-checker -g

Loading

0 comments on commit ad90147

Please sign in to comment.