Skip to content

Commit

Permalink
Add channel_ to Shape2D calculation (apache#3181)
Browse files Browse the repository at this point in the history
* Add channel_ to Shape2D calculation

* scalapkg, add example multitask (apache#3186)

* RNN cell demo with ptb LSTM language model (apache#3197)

* rnn-cell demo (push to server for testing)

* a running example with cuDNN RNN cell

* Bulk lint fix (apache#3211)

* [TENSOR] Add FlatTo1D for all elementwise ops (apache#3238)

* Fix little bug on context (apache#3202)

* add PennTreeBank Language Model using lstm model in R (apache#2659)

* Add function 'print_summary' and some revise (apache#3161)

* Add function 'print_summary' and some revise

Add function 'print_summary' for print detail information of network, and format argument was add in 'plot_network'.
You can use 'print_summary' like:
"""
net = get_symbol(1000)
shape = {'softmax_label': (64, 12), 'data': (64, 3, 224, 224)}
mx.viz.print_summary(net, shape=shape)
"""
If without shape, the number of arguments would be nonsense currently.

* Update visualization.py

* Update visualization.py

* Update visualization.py

* Update visualization.py

* Update visualization.py

* Update visualization.py

* Update visualization.py

* Update visualization.py

* Update visualization.py

* Update visualization.py

* Update visualization.py

* Added my CmakeLists.txt for caffe plugin, etc.

* Revert "fix travis scala test config" (apache#3246)

This reverts parts of commit 3e15f62.
Reenables testing the Julia bindings

* [Scala] Code generation for Symbol (apache#3217)


[scala] auto-generate Symbol functions

* fix spelling errors (apache#3258)

Also align grammar and punctuation in short descriptions of features

* fix typo in run_test.sh (apache#3260)

* Copy slice along arbitrary axis (apache#3259)

* rnn-cell demo (push to server for testing)

* a running example with cuDNN RNN cell

* add copyslice along arbitrary axis for NDArray

* copy_slice_to as an ndarray operator

* Python interface to the _copy_slice_to operator

* fix lint error

* Enable concatenation for dim-1 vectors (apache#3264)

* fix PReLU backward computing (apache#3277)

* Add `reverse` option in Reshape (apache#3280)

* add scala example, end2end neural-style (apache#3267)

add scala example, end2end neural-style

* Improve multi-GPU performance (apache#3241)

* update kvstore

* update model.py

* bandwith tool

* update readme

* tiny

* fix lint

* fix batch size of dist_device_sync

* fix

* fix perf problem of kvstore when only using a single device

* roll back to previous strategy how to choose update_on_kvsotre

* add an optionl MXNET_ENABLE_GPU_P2P to control whether or not use p2p

* update dmlccore (apache#3293)

* Fix newer version of gtest and cpptest (apache#3294)

* when set use_global_stats then do not use cudnn (apache#3289)

* when set use_global_stats then do not use cudnn

* fix batch norm with use_global_stats

* Fix req+reserve_space in cudnn_rnn (apache#3274)

Fix req

Fix reserve_space

Allocate reserve_space using Storage

* add cudnn off option in Convolution (apache#3270)

* add support for building on power (apache#3302)

* add recent examples, collect some missing tutorials (apache#3340)

* CMake for caffe plugin
  • Loading branch information
cjolivier01 authored and piiswrong committed Sep 22, 2016
1 parent b3c4d25 commit 2d0e3ac
Show file tree
Hide file tree
Showing 2 changed files with 53 additions and 6 deletions.
54 changes: 50 additions & 4 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,15 @@ else(MSVC)
include(CheckCXXCompilerFlag)
check_cxx_compiler_flag("-std=c++11" SUPPORT_CXX11)
check_cxx_compiler_flag("-msse2" SUPPORT_MSSE2)
set(CMAKE_C_FLAGS "-O3 -Wall -msse2 -Wno-unknown-pragmas -fPIC")
set(CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS}")
set(CMAKE_C_FLAGS "-Wall -msse2 -Wno-unknown-pragmas -fPIC")
if(NDEBUG)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O3")
else(NDEBUG)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O0 -ggdb3")
endif(NDEBUG)
if(SUPPORT_CXX11)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
endif()
endif(MSVC)

if(USE_OPENCV)
Expand Down Expand Up @@ -72,7 +79,7 @@ if(USE_CUDNN)
add_definitions(-DUSE_CUDNN)
include_directories(SYSTEM ${CUDNN_INCLUDE})
list(APPEND mxnet_LINKER_LIBS ${CUDNN_LIBRARY})
add_definitions(-DMSHADOW_USE_CUDNN=1)
add_definitions(-DMSHADOW_USE_CUDNN=1)
endif()
endif()

Expand Down Expand Up @@ -135,6 +142,37 @@ if(USE_PLUGINS_WARPCTC)
list(APPEND CUDA ${PLUGINS_CUSRC})
endif()

if(USE_PLUGIN_CAFFE)
if(NOT DEFINED CAFFE_PATH)
if(EXISTS ${PROJECT_SOURCE_DIR}/caffe)
# Need newer FindCUDA.cmake that correctly handles -std=c++11
cmake_minimum_required(VERSION 3.3)
set(CAFFE_PATH ${PROJECT_SOURCE_DIR}/caffe)
endif()
endif()
list(APPEND CMAKE_MODULE_PATH ${CAFFE_PATH}/cmake)
include_directories(${CAFFE_PATH}/include)
include_directories(${CAFFE_PATH}/build/src)
include_directories(${CMAKE_BINARY_DIR}/caffe/include)
link_directories(${CAFFE_PATH}/build/lib)
if(NOT DEFINED CAFFE_PATH)
message(FATAL_ERROR "Please set CAFFE_PATH to point to the caffe source installation")
endif()
mxnet_source_group("Include\\plugin\\caffe" GLOB "plugin/caffe/*.h")
mxnet_source_group("Source\\plugin\\caffe" GLOB "plugin/caffe/*.cc")
mxnet_source_group("Cuda\\plugin\\caffe" GLOB "plugin/caffe/*.cu")
FILE(GLOB_RECURSE PLUGINS_SOURCE "plugin/caffe/*.cc" "plugin/caffe/*.h")
FILE(GLOB_RECURSE PLUGINS_CUSRC "plugin/caffe/*.cu")
list(APPEND SOURCE ${PLUGINS_SOURCE})
list(APPEND CUDA ${PLUGINS_CUSRC})
include_directories(${CMAKE_BINARY_DIR}/include)
list(APPEND mxnet_LINKER_LIBS
protobuf boost_system boost_thread boost_filesystem
gflags glog caffe
${Caffe_LINKER_LIBS}
)
endif()

if (NOT (EXTRA_OPERATORS STREQUAL ""))
mxnet_source_group("Extra" GLOB_RECURSE "${EXTRA_OPERATORS}/*.cc")
mxnet_source_group("Extra\\Cuda" GLOB_RECURSE "${EXTRA_OPERATORS}/*.cu")
Expand Down Expand Up @@ -163,15 +201,23 @@ if(USE_CUDA)
list(APPEND mxnet_LINKER_LIBS ${CUDA_cuda_LIBRARY})
else(MSVC)
list(APPEND mxnet_LINKER_LIBS nvrtc cuda)
link_directories("${CUDA_TOOLKIT_ROOT_DIR}/lib64")
endif()
list(APPEND SOURCE ${cuda_objs} ${CUDA})
endif()

# unsupported: if caffe is a subdirectory of mxnet, load its CMakeLists.txt as well
if(USE_PLUGIN_CAFFE)
if(EXISTS ${PROJECT_SOURCE_DIR}/caffe)
add_subdirectory(caffe)
endif()
endif()

if(NOT MSVC)
# Only add c++11 flags and definitions after cuda compiling
add_definitions(-DDMLC_USE_CXX11)
add_definitions(-DMSHADOW_IN_CXX11)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c++0x")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x")
else()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc")
Expand Down
5 changes: 3 additions & 2 deletions plugin/caffe/caffe_data_iter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ struct CaffeDataParam : public dmlc::Parameter<CaffeDataParam> {
template<typename Dtype>
class CaffeDataIter : public IIterator<TBlobBatch> {
public:
explicit CaffeDataIter(int type_flag) : batch_size_(0), channels_(0), width_(1), height_(1)
explicit CaffeDataIter(int type_flag) : batch_size_(0), channels_(1), width_(1), height_(1)
, type_flag_(type_flag), loc_(0)
{}
virtual ~CaffeDataIter(void) {}
Expand Down Expand Up @@ -100,7 +100,8 @@ class CaffeDataIter : public IIterator<TBlobBatch> {

if (top_size > DATA) {
if (param_.flat) {
batch_data_ = TBlob(nullptr, mshadow::Shape2(batch_size_, width_ * height_),
batch_data_ = TBlob(nullptr, mshadow::Shape2(batch_size_,
channels_ * width_ * height_),
cpu::kDevCPU, type_flag_);
} else {
batch_data_ = TBlob(nullptr, mxnet::TShape(top_[DATA]->shape().begin(),
Expand Down

0 comments on commit 2d0e3ac

Please sign in to comment.