Skip to content

V1.12.0 tf disable mkl #12

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 5 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 16 additions & 14 deletions tensorflow/core/common_runtime/process_util.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ limitations under the License.
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/tracing.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/util.h"

namespace tensorflow {

Expand Down Expand Up @@ -56,24 +57,25 @@ int32 NumInterOpThreadsFromSessionOptions(const SessionOptions& options) {
const int32 inter_op = options.config.inter_op_parallelism_threads();
if (inter_op != 0) return inter_op;
#ifdef INTEL_MKL
// MKL library executes ops in parallel using OMP threads
// Set inter_op conservatively to avoid thread oversubscription that could
// lead to severe perf degradations and OMP resource exhaustion
int mkl_intra_op = 1;
if (!DisableMKL()) {
// MKL library executes ops in parallel using OMP threads
// Set inter_op conservatively to avoid thread oversubscription that could
// lead to severe perf degradations and OMP resource exhaustion
int mkl_intra_op = 1;
#ifdef _OPENMP
mkl_intra_op = omp_get_max_threads();
mkl_intra_op = omp_get_max_threads();
#endif // _OPENMP
CHECK_GE(mkl_intra_op, 1);
const int32 mkl_inter_op = std::max(
(port::NumSchedulableCPUs() + mkl_intra_op - 1) / mkl_intra_op, 2);
VLOG(0) << "Creating new thread pool with default inter op setting: "
<< mkl_inter_op
<< ". Tune using inter_op_parallelism_threads for best performance.";
return mkl_inter_op;
#else
DCHECK_GE(mkl_intra_op, 1);
const int32 mkl_inter_op = std::max(
(port::NumSchedulableCPUs() + mkl_intra_op - 1) / mkl_intra_op, 2);
VLOG(0) << "Creating new thread pool with default inter op setting: "
<< mkl_inter_op
<< ".Tune using inter_op_parallelism_threads for best performance.";
return mkl_inter_op;
}
#endif // INTEL_MKL
// Default to using the number of cores available in the process.
return port::NumSchedulableCPUs();
#endif // INTEL_MKL
}

thread::ThreadPool* NewThreadPoolFromSessionOptions(
Expand Down
6 changes: 5 additions & 1 deletion tensorflow/core/common_runtime/threadpool_device.cc
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ limitations under the License.
#include "tensorflow/core/platform/tracing.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/util/util.h"

#ifdef INTEL_MKL
#ifdef _OPENMP
Expand All @@ -49,6 +50,8 @@ ThreadPoolDevice::ThreadPoolDevice(const SessionOptions& options,
allocator_(allocator),
scoped_allocator_mgr_(new ScopedAllocatorMgr(name)) {
#ifdef INTEL_MKL
// Early return when MKL is disabled
if (DisableMKL()) return;
#ifdef _OPENMP
const char* user_omp_threads = getenv("OMP_NUM_THREADS");
if (user_omp_threads == nullptr) {
Expand Down Expand Up @@ -114,7 +117,8 @@ class MklCPUAllocatorFactory : public AllocatorFactory {
};

#ifdef ENABLE_MKL
REGISTER_MEM_ALLOCATOR("MklCPUAllocator", 200, MklCPUAllocatorFactory);
REGISTER_MEM_ALLOCATOR("MklCPUAllocator", (DisableMKL() ? 50 : 200),
MklCPUAllocatorFactory);
#endif // ENABLE_MKL

} // namespace
Expand Down
5 changes: 5 additions & 0 deletions tensorflow/core/graph/mkl_layout_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ limitations under the License.
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/tensor_format.h"
#include "tensorflow/core/util/util.h"

#include "tensorflow/core/graph/mkl_graph_util.h"
#include "tensorflow/core/graph/mkl_layout_pass.h"
Expand Down Expand Up @@ -4511,6 +4512,10 @@ Status MklLayoutRewritePass::Run(const GraphOptimizationPassOptions& options) {
if (options.graph == nullptr && options.partition_graphs == nullptr) {
return Status::OK();
}
if (DisableMKL()) {
VLOG(2) << "TF-MKL: Disabling MKL";
return Status::OK();
}

auto process_graph = [&](std::unique_ptr<Graph>* g) {
// Get the ownership of a graph
Expand Down
5 changes: 5 additions & 0 deletions tensorflow/core/graph/mkl_tfconversion_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ limitations under the License.
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/util.h"

#include "tensorflow/core/graph/mkl_graph_util.h"
#include "tensorflow/core/graph/mkl_tfconversion_pass.h"
Expand Down Expand Up @@ -424,6 +425,10 @@ Status MklToTfConversionPass::Run(const GraphOptimizationPassOptions& options) {
if (options.graph == nullptr && options.partition_graphs == nullptr) {
return Status::OK();
}
if (DisableMKL()) {
VLOG(2) << "TF-MKL: Disabling MKL";
return Status::OK();
}

auto process_graph = [&](std::unique_ptr<Graph>* g) {
// Get the ownership of graph
Expand Down
16 changes: 16 additions & 0 deletions tensorflow/core/util/util.cc
Original file line number Diff line number Diff line change
Expand Up @@ -120,4 +120,20 @@ string SliceDebugString(const TensorShape& shape, const int64 flat) {
return result;
}

#ifdef INTEL_MKL
bool DisableMKL() {
enum MklStatus { MKL_DEFAULT = 0, MKL_ON = 1, MKL_OFF = 2 };
static MklStatus status = MKL_DEFAULT;
if (status == MKL_DEFAULT) {
char* tf_disable_mkl = getenv("TF_DISABLE_MKL");
if ((tf_disable_mkl != NULL) && (std::stoi(tf_disable_mkl) == 1)) {
VLOG(2) << "TF-MKL: Disabling MKL";
status = MKL_OFF;
} else {
status = MKL_ON;
}
}
return status == MKL_OFF ? true : false;
}
#endif // INTEL_MKL
} // namespace tensorflow
5 changes: 5 additions & 0 deletions tensorflow/core/util/util.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,11 @@ string PrintMemory(const char* ptr, size_t n);
// "tensor", "tensor[i]", "tensor[i, j]", etc.
string SliceDebugString(const TensorShape& shape, const int64 flat);

// disable MKL in runtime
#ifdef INTEL_MKL
bool DisableMKL();
#endif // INTEL_MKL

} // namespace tensorflow

#endif // TENSORFLOW_CORE_UTIL_UTIL_H_