Skip to content

Commit

Permalink
remove MKL_EXPERIMENTAL and update make files for MKL-DNN (apache#9810)
Browse files Browse the repository at this point in the history
* replace MKL2017 references with MKL-DNN

* remove MKLML_ROOT

* MKL_README.md for Full MKL

* update test_mkldnn

* update Jenkinsfile

* update jenkins

* trigger Jenkins with new changes

* trigger Jenkins with new changes
  • Loading branch information
ashokei authored and marcoabreu committed Feb 25, 2018
1 parent c6a7aba commit 5c5a904
Show file tree
Hide file tree
Showing 9 changed files with 35 additions and 111 deletions.
18 changes: 18 additions & 0 deletions Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
mx_lib = 'lib/libmxnet.so, lib/libmxnet.a, dmlc-core/libdmlc.a, nnvm/lib/libnnvm.a'
// mxnet cmake libraries, in cmake builds we do not produce a libnvvm static library by default.
mx_cmake_lib = 'build/libmxnet.so, build/libmxnet.a, build/dmlc-core/libdmlc.a, build/tests/mxnet_unit_tests, build/3rdparty/openmp/runtime/src/libomp.so'
mx_cmake_mkldnn_lib = 'build/libmxnet.so, build/libmxnet.a, build/dmlc-core/libdmlc.a, build/tests/mxnet_unit_tests, build/3rdparty/openmp/runtime/src/libomp.so, build/3rdparty/mkldnn/src/libmkldnn.so, build/3rdparty/mkldnn/src/libmkldnn.so.0'
mx_mkldnn_lib = 'lib/libmxnet.so, lib/libmxnet.a, lib/libiomp5.so, lib/libmklml_gnu.so, lib/libmkldnn.so, lib/libmkldnn.so.0, lib/libmklml_intel.so, dmlc-core/libdmlc.a, nnvm/lib/libnnvm.a'
// command to start a docker container
docker_run = 'tests/ci_build/ci_build.sh'
Expand Down Expand Up @@ -260,6 +261,23 @@ try {
}
}
},
'GPU: CMake MKLDNN': {
node('mxnetlinux-cpu') {
ws('workspace/build-cmake-mkldnn-gpu') {
init_git()
def defines = """ \
-DUSE_CUDA=1 \
-DUSE_CUDNN=1 \
-DUSE_MKLML_MKL=1 \
-DUSE_MKLDNN=1 \
-DCMAKE_BUILD_TYPE=Release \
"""
def flag = "-v"
cmake("build_cuda", defines, flag)
pack_lib('cmake_mkldnn_gpu', mx_cmake_mkldnn_lib)
}
}
},
'GPU: CMake': {
node('mxnetlinux-cpu') {
ws('workspace/build-cmake-gpu') {
Expand Down
43 changes: 0 additions & 43 deletions MKL_README.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,46 +17,3 @@ Installing and enabling the full MKL installation enables MKL support for all op

5. Run 'sudo python setup.py install'

# MKL2017 PLUGIN

MKL2017 is an INTEL released library to accelerate Deep Neural Network (DNN) applications on Intel architecture.

MKL2017_ML is a subset of MKL2017 and only contains DNN acceleration feature, MKL2017 release cycle is longer then MKL2017_ML and MKL2017_ML support latest feature

This README shows the user how to setup and install MKL2017 library with mxnet.

## Build/Install MXNet with MKL:

1. Enable USE_MKL2017=1 in make/config.mk

1.1 By default, MKL_2017_EXPRIEMENTAL=0. If setting MKL_2017_EXPRIEMENTAL=1, MKL buffer will be created and transferred between layers to achiever much higher performance.

1.2 By default, MKLML_ROOT=/usr/local, MKL2017_ML will be used

1.2.1 when excute make, Makefile will execute "prepare_mkl.sh" to download the MKL2017_ML library under <MKLML_ROOT>

1.2.2 manually steps for download MKL2017_ML problem

1.2.2.1 wget https://github.com/dmlc/web-data/raw/master/mxnet/mklml-release/mklml_lnx_<MKL VERSION>.tgz

1.2.2.2 tar zxvf mklml_lnx_<MKL VERSION>.tgz

1.2.2.3 cp -rf mklml_lnx_<MKL VERSION>/* <MKLML_ROOT>/

1.2.3 Set LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$MKLML_ROOT/lib

1.3 If setting USE_BLAS=mkl

1.3.1 mshadow can also utilize mkl blas function in mklml package

1.4 MKL version compatibility

1.3.2.1 If you already have MKL installed and MKLROOT being set in your system, by default, it will not attempt to download the latest mklml package unless you unset MKLROOT.

2. Run 'make -jX'

3. Navigate into the python directory

4. Run 'sudo python setup.py install'


20 changes: 1 addition & 19 deletions docker_multiarch/arm.crosscompile.android.mk
Original file line number Diff line number Diff line change
Expand Up @@ -82,21 +82,6 @@ USE_OPENCV = 0
# use openmp for parallelization
USE_OPENMP = 1

# MKL ML Library for Intel CPU/Xeon Phi
# Please refer to MKL_README.md for details

# MKL ML Library folder, need to be root for /usr/local
# Change to User Home directory for standard user
# For USE_BLAS!=mkl only
MKLML_ROOT=/usr/local

# whether use MKL2017 library
USE_MKL2017 = 0

# whether use MKL2017 experimental feature for high performance
# Prerequisite USE_MKL2017=1
USE_MKL2017_EXPERIMENTAL = 0

# whether use NNPACK library
USE_NNPACK = 0

Expand All @@ -115,13 +100,10 @@ USE_LAPACK_PATH =
USE_INTEL_PATH = NONE

# If use MKL only for BLAS, choose static link automatically to allow python wrapper
ifeq ($(USE_MKL2017), 0)
USE_STATIC_MKL = NONE
ifeq ($(USE_BLAS), mkl)
USE_STATIC_MKL = 1
endif
else
USE_STATIC_MKL = NONE
endif

#----------------------------
# distributed computing
Expand Down
22 changes: 2 additions & 20 deletions docker_multiarch/arm.crosscompile.mk
Original file line number Diff line number Diff line change
Expand Up @@ -82,21 +82,6 @@ USE_OPENCV = 0
# use openmp for parallelization
USE_OPENMP = 1

# MKL ML Library for Intel CPU/Xeon Phi
# Please refer to MKL_README.md for details

# MKL ML Library folder, need to be root for /usr/local
# Change to User Home directory for standard user
# For USE_BLAS!=mkl only
MKLML_ROOT=/usr/local

# whether use MKL2017 library
USE_MKL2017 = 0

# whether use MKL2017 experimental feature for high performance
# Prerequisite USE_MKL2017=1
USE_MKL2017_EXPERIMENTAL = 0

# whether use NNPACK library
USE_NNPACK = 0

Expand All @@ -115,13 +100,10 @@ USE_LAPACK_PATH =
USE_INTEL_PATH = NONE

# If use MKL only for BLAS, choose static link automatically to allow python wrapper
ifeq ($(USE_MKL2017), 0)
USE_STATIC_MKL = NONE
ifeq ($(USE_BLAS), mkl)
USE_STATIC_MKL = 1
endif
else
USE_STATIC_MKL = NONE
endif

#----------------------------
# distributed computing
Expand Down Expand Up @@ -176,4 +158,4 @@ USE_CPP_PACKAGE = 0
# whether to use sframe integration. This requires build sframe
# git@github.com:dato-code/SFrame.git
# SFRAME_PATH = $(HOME)/SFrame
# MXNET_PLUGINS += plugin/sframe/plugin.mk
# MXNET_PLUGINS += plugin/sframe/plugin.mk
5 changes: 1 addition & 4 deletions docs/faq/perf.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,7 @@ Performance is mainly affected by the following 4 factors:
## Intel CPU

For using Intel Xeon CPUs for training and inference, we suggest enabling
both `USE_MKL2017 = 1` and `USE_MKL2017_EXPERIMENTAL = 1` in
`config.mk`. Check
[MKL_README.md](https://github.com/dmlc/mxnet/blob/master/MKL_README.md) for
details.
`USE_MKLDNN = 1` in`config.mk`.

We also find that setting the following two environment variables can help:
- `export KMP_AFFINITY=granularity=fine,compact,1,0` if there are two physical CPUs
Expand Down
2 changes: 1 addition & 1 deletion example/image-classification/benchmark_score.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def score(network, dev, batch_size, num_batches):
if __name__ == '__main__':
networks = ['alexnet', 'vgg-16', 'inception-bn', 'inception-v3', 'resnet-50', 'resnet-152']
devs = [mx.gpu(0)] if len(get_gpus()) > 0 else []
# Enable USE_MKL2017_EXPERIMENTAL for better CPU performance
# Enable USE_MKLDNN for better CPU performance
devs.append(mx.cpu())

batch_sizes = [1, 2, 4, 8, 16, 32]
Expand Down
16 changes: 2 additions & 14 deletions make/config.mk
Original file line number Diff line number Diff line change
Expand Up @@ -95,20 +95,8 @@ USE_LIBJPEG_TURBO_PATH = NONE
# use openmp for parallelization
USE_OPENMP = 1

# MKL ML Library for Intel CPU/Xeon Phi
# Please refer to MKL_README.md for details

# MKL ML Library folder, need to be root for /usr/local
# Change to User Home directory for standard user
# For USE_BLAS!=mkl only
MKLML_ROOT=/usr/local

# whether use MKL2017 library
USE_MKL2017 = 0

# whether use MKL2017 experimental feature for high performance
# Prerequisite USE_MKL2017=1
USE_MKL2017_EXPERIMENTAL = 0
# whether use MKL-DNN library
USE_MKLDNN = 0

# whether use NNPACK library
USE_NNPACK = 0
Expand Down
2 changes: 1 addition & 1 deletion make/osx.mk
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ USE_BLAS = apple
USE_LAPACK = 1

# by default, disable lapack when using MKL
# switch on when there is a full installation of MKL available (not just MKL2017/MKL_ML)
# switch on when there is a full installation of MKL available (not just MKL_ML)
ifeq ($(USE_BLAS), mkl)
USE_LAPACK = 0
endif
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,24 +16,24 @@
# under the License.

"""
MKLML related test cases
MKL-DNN related test cases
"""

import logging
import os
from sys import platform

def test_mklml_install():
def test_mkldnn_install():
"""
This test will verify that MXNet is built/installed correctly when
compiled with Intel MKLML library. The method will try to import
the mxnet module and see if the mklml library is mapped to this
compiled with Intel MKL-DNN library. The method will try to import
the mxnet module and see if the mkldnn library is mapped to this
process's address space.
"""
logging.basicConfig(level=logging.INFO)

if not platform.startswith('linux'):
logging.info("Bypass mklml install test for non-Linux OS")
logging.info("Bypass mkldnn install test for non-Linux OS")
return

try:
Expand All @@ -45,14 +45,14 @@ def test_mklml_install():

pid = os.getpid()
rc = os.system("cat /proc/" + str(pid) + \
"/maps | grep libmklml_ > /dev/null")
"/maps | grep libmkldnn > /dev/null")

if rc == 0:
logging.info("MXNet is built/installed correctly with MKLML")
logging.info("MXNet is built/installed correctly with MKL-DNN")
else:
assert 0, "MXNet is built/installed incorrectly with MKLML, please " \
assert 0, "MXNet is built/installed incorrectly with MKL-DNN, please " \
"double check your build/install steps or environment " \
"variable settings"

if __name__ == '__main__':
test_mklml_install()
test_mkldnn_install()

0 comments on commit 5c5a904

Please sign in to comment.