From 3c66b307f7b6173a69cd4ccc9cf9f7541de964d2 Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Fri, 29 Sep 2017 19:57:02 +0800 Subject: [PATCH 01/27] Remove the pserver, trainer, evaluators and some useless gradientmachines when compile mobile inference library. --- CMakeLists.txt | 8 +++ cmake/util.cmake | 57 ++++++++++++------- paddle/CMakeLists.txt | 35 +++++++----- paddle/capi/CMakeLists.txt | 8 +-- paddle/gserver/CMakeLists.txt | 22 +++++++ .../gradientmachines/GradientMachine.cpp | 13 ++++- .../gradientmachines/GradientMachine.h | 7 ++- .../gradientmachines/NeuralNetwork.cpp | 18 ++++-- .../gserver/gradientmachines/NeuralNetwork.h | 3 + paddle/gserver/layers/Layer.cpp | 2 + 10 files changed, 128 insertions(+), 45 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 4921226ec1c90a..ec4e6e2e868039 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -86,6 +86,14 @@ if(ANDROID OR IOS) "Disable MKLDNN when cross-compiling for Android and iOS" FORCE) set(WITH_MKLML OFF CACHE STRING "Disable MKLML package when cross-compiling for Android and iOS" FORCE) + + if(WITH_C_API) + # Compile PaddlePaddle mobile inference library + set(MOBILE_INFERENCE ON) + add_definitions(-DPADDLE_MOBILE_INFERENCE) + endif() + set(WITH_TESTING OFF CACHE STRING "Disable TESTING when cross-compiling + for Android and iOS" FORCE) endif() set(THIRD_PARTY_PATH "${CMAKE_BINARY_DIR}/third_party" CACHE STRING diff --git a/cmake/util.cmake b/cmake/util.cmake index d1aee3e170a2d1..5ebfc0945fae4f 100644 --- a/cmake/util.cmake +++ b/cmake/util.cmake @@ -73,25 +73,44 @@ function(link_paddle_exe TARGET_NAME) generate_rdma_links() endif() - target_circle_link_libraries(${TARGET_NAME} - ARCHIVE_START - paddle_gserver - paddle_function - ARCHIVE_END - paddle_pserver - paddle_trainer_lib - paddle_network - paddle_math - paddle_utils - paddle_parameter - paddle_proto - paddle_cuda - paddle_optimizer - ${EXTERNAL_LIBS} - ${CMAKE_THREAD_LIBS_INIT} - ${CMAKE_DL_LIBS} - ${RDMA_LD_FLAGS} - ${RDMA_LIBS}) + if(MOBILE_INFERENCE) + target_circle_link_libraries(${TARGET_NAME} + ARCHIVE_START + paddle_gserver + paddle_function + ARCHIVE_END + paddle_math + paddle_utils + paddle_parameter + paddle_proto + paddle_cuda + paddle_optimizer + ${EXTERNAL_LIBS} + ${CMAKE_THREAD_LIBS_INIT} + ${CMAKE_DL_LIBS} + ${RDMA_LD_FLAGS} + ${RDMA_LIBS}) + else() + target_circle_link_libraries(${TARGET_NAME} + ARCHIVE_START + paddle_gserver + paddle_function + ARCHIVE_END + paddle_pserver + paddle_trainer_lib + paddle_network + paddle_math + paddle_utils + paddle_parameter + paddle_proto + paddle_cuda + paddle_optimizer + ${EXTERNAL_LIBS} + ${CMAKE_THREAD_LIBS_INIT} + ${CMAKE_DL_LIBS} + ${RDMA_LD_FLAGS} + ${RDMA_LIBS}) + endif() if(ANDROID) target_link_libraries(${TARGET_NAME} log) diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt index b435de80a22457..3eb494ae473342 100644 --- a/paddle/CMakeLists.txt +++ b/paddle/CMakeLists.txt @@ -3,25 +3,30 @@ add_subdirectory(function) add_subdirectory(utils) add_subdirectory(testing) add_subdirectory(math) -add_subdirectory(parameter) add_subdirectory(gserver) -add_subdirectory(pserver) -add_subdirectory(trainer) add_subdirectory(scripts) add_subdirectory(string) +add_subdirectory(parameter) -if(Boost_FOUND) - add_subdirectory(memory) - add_subdirectory(platform) - add_subdirectory(framework) - add_subdirectory(operators) - add_subdirectory(pybind) -endif() - -if(WITH_C_API) +if(MOBILE_INFERENCE) add_subdirectory(capi) -endif() +else() + add_subdirectory(pserver) + add_subdirectory(trainer) + + if(WITH_C_API) + add_subdirectory(capi) + endif() + + if(Boost_FOUND) + add_subdirectory(memory) + add_subdirectory(platform) + add_subdirectory(framework) + add_subdirectory(operators) + add_subdirectory(pybind) + endif() -if(WITH_SWIG_PY) - add_subdirectory(api) + if(WITH_SWIG_PY) + add_subdirectory(api) + endif() endif() diff --git a/paddle/capi/CMakeLists.txt b/paddle/capi/CMakeLists.txt index b9bbe58951c643..a19a19d7196e2e 100644 --- a/paddle/capi/CMakeLists.txt +++ b/paddle/capi/CMakeLists.txt @@ -37,9 +37,7 @@ set(PADDLE_CAPI_INFER_LIBS paddle_cuda paddle_function paddle_gserver - paddle_proto - paddle_pserver - paddle_network) + paddle_proto) cc_library(paddle_capi_whole DEPS paddle_capi ${PADDLE_CAPI_INFER_LIBS}) @@ -50,7 +48,9 @@ if(NOT IOS) add_library(paddle_capi_shared SHARED ${CAPI_SOURCES}) set_target_properties(paddle_capi_shared PROPERTIES LINK_FLAGS "${LINK_FLAGS}") target_include_directories(paddle_capi_shared PUBLIC ${CMAKE_CURRENT_BINARY_DIR}) - link_paddle_exe(paddle_capi_shared) + +link_paddle_exe(paddle_capi_shared) + endif() # install library & headers. diff --git a/paddle/gserver/CMakeLists.txt b/paddle/gserver/CMakeLists.txt index 62cff9361ccba3..cd469875df8abe 100644 --- a/paddle/gserver/CMakeLists.txt +++ b/paddle/gserver/CMakeLists.txt @@ -60,6 +60,28 @@ if(NOT WITH_PYTHON) dataproviders/PyDataProvider.h) endif() +if(MOBILE_INFERENCE) + # Remove evaluators + list(REMOVE_ITEM GSERVER_SOURCES + layers/ValidationLayer.cpp + evaluators/Evaluator.cpp + evaluators/DetectionMAPEvaluator.cpp + evaluators/CTCErrorEvaluator.cpp + evaluators/ChunkEvaluator.cpp) + + # Remove useless gradientmachines + list(REMOVE_ITEM GSERVER_SOURCES + gradientmachines/MultiNetwork.cpp + gradientmachines/RecurrentGradientMachine.cpp + gradientmachines/ParallelNeuralNetwork.cpp + gradientmachines/GradientMachineMode.cpp + gradientmachines/MultiGradientMachine.cpp) + + # Remove useless layers + list(REMOVE_ITEM GSERVER_SOURCES + layers/RecurrentLayerGroup.cpp) +endif() + if(WITH_GPU) cuda_add_library(paddle_gserver ${GSERVER_SOURCES}) else() diff --git a/paddle/gserver/gradientmachines/GradientMachine.cpp b/paddle/gserver/gradientmachines/GradientMachine.cpp index b44e4dc202f019..de5faf5e1e2b3e 100644 --- a/paddle/gserver/gradientmachines/GradientMachine.cpp +++ b/paddle/gserver/gradientmachines/GradientMachine.cpp @@ -17,12 +17,15 @@ limitations under the License. */ #include #include "paddle/utils/Logging.h" +#include "NeuralNetwork.h" +#include "hl_gpu.h" + +#ifndef PADDLE_MOBILE_INFERENCE #include "GradientMachineMode.h" #include "MultiGradientMachine.h" #include "MultiNetwork.h" -#include "NeuralNetwork.h" #include "ParallelNeuralNetwork.h" -#include "hl_gpu.h" +#endif namespace paddle { @@ -30,13 +33,16 @@ GradientMachine* GradientMachine::create( const ModelConfig& config, int mode, const std::vector& parameterTypes) { +#ifndef PADDLE_MOBILE_INFERENCE if (auto gm = IGradientMachineMode::tryCreateGradientMachine(mode, config)) { return gm; } if (FLAGS_trainer_count > 1) { return new MultiGradientMachine(config, FLAGS_use_gpu); } +#endif if (FLAGS_trainer_count == 1) { // single +#ifndef PADDLE_MOBILE_INFERENCE NeuralNetwork* nn; if (config.type() == "multi_nn") { /* multi submodel calculate, thread(s) will be initialized inside */ @@ -48,6 +54,9 @@ GradientMachine* GradientMachine::create( /* single thread calculate */ nn = NeuralNetwork::create(config); } +#else + NeuralNetwork* nn = NeuralNetwork::create(config); +#endif ParamInitCallback testParamInitCb = [](int paramId, Parameter* para) { para->enableType(PARAMETER_VALUE); }; diff --git a/paddle/gserver/gradientmachines/GradientMachine.h b/paddle/gserver/gradientmachines/GradientMachine.h index f9c82a2bef82b4..ebfe0573cfdbfb 100644 --- a/paddle/gserver/gradientmachines/GradientMachine.h +++ b/paddle/gserver/gradientmachines/GradientMachine.h @@ -20,13 +20,16 @@ limitations under the License. */ #include "ModelConfig.pb.h" #include "TrainerConfig.pb.h" #include "paddle/gserver/dataproviders/DataProvider.h" -#include "paddle/gserver/evaluators/Evaluator.h" #include "paddle/gserver/layers/Layer.h" #include "paddle/math/Matrix.h" #include "paddle/parameter/Parameter.h" #include "paddle/parameter/ParameterUpdaterBase.h" #include "paddle/utils/Thread.h" +#ifndef PADDLE_MOBILE_INFERENCE +#include "paddle/gserver/evaluators/Evaluator.h" +#endif + namespace paddle { /** * @brief A gradient machine is capable of calculating some outputs given @@ -147,6 +150,7 @@ class GradientMachine { virtual void onPassEnd() = 0; +#ifndef PADDLE_MOBILE_INFERENCE /** * Create an evaluator which can be used for eval() */ @@ -156,6 +160,7 @@ class GradientMachine { * evaluate using the given evaluator */ virtual void eval(Evaluator* evaluator) const = 0; +#endif std::vector& getParameters() { return parameters_; } diff --git a/paddle/gserver/gradientmachines/NeuralNetwork.cpp b/paddle/gserver/gradientmachines/NeuralNetwork.cpp index 26cff3e67710b2..dcf0acb5a2cc76 100644 --- a/paddle/gserver/gradientmachines/NeuralNetwork.cpp +++ b/paddle/gserver/gradientmachines/NeuralNetwork.cpp @@ -14,15 +14,17 @@ limitations under the License. */ #include "paddle/utils/Util.h" +#include "NeuralNetwork.h" +#include "hl_gpu.h" +#include "paddle/gserver/layers/AgentLayer.h" #include "paddle/utils/CustomStackTrace.h" #include "paddle/utils/Logging.h" +#include "paddle/utils/Stat.h" +#ifndef PADDLE_MOBILE_INFERENCE #include "MultiNetwork.h" -#include "NeuralNetwork.h" #include "RecurrentGradientMachine.h" -#include "hl_gpu.h" -#include "paddle/gserver/layers/AgentLayer.h" -#include "paddle/utils/Stat.h" +#endif namespace paddle { void parameterInitNN(int paramId, @@ -54,6 +56,7 @@ void parameterInitNN(int paramId, } NeuralNetwork* NeuralNetwork::create(const ModelConfig& config) { +#ifndef PADDLE_MOBILE_INFERENCE if (config.type() == "recurrent_nn") { return newNeuralNetwork("root"); } else if (config.type() == "multi_nn") { @@ -61,6 +64,9 @@ NeuralNetwork* NeuralNetwork::create(const ModelConfig& config) { } else { return newNeuralNetwork(); } +#else + return new NeuralNetwork(); +#endif } std::map NeuralNetwork::dllInitMap; @@ -304,6 +310,8 @@ void NeuralNetwork::onPassEnd() { } } +#ifndef PADDLE_MOBILE_INFERENCE + class CombinedEvaluator : public Evaluator { public: void addEvaluator(std::unique_ptr&& evaluator) { @@ -466,6 +474,8 @@ Evaluator* NeuralNetwork::makeEvaluator() const { void NeuralNetwork::eval(Evaluator* evaluator) const { evaluator->eval(*this); } +#endif + void NeuralNetwork::setOutputGrad(const std::vector& args) { CHECK_GE(outputLayers_.size(), args.size()); for (size_t i = 0; i < args.size(); ++i) { diff --git a/paddle/gserver/gradientmachines/NeuralNetwork.h b/paddle/gserver/gradientmachines/NeuralNetwork.h index 12810f642519b7..56a1ec78460731 100644 --- a/paddle/gserver/gradientmachines/NeuralNetwork.h +++ b/paddle/gserver/gradientmachines/NeuralNetwork.h @@ -97,9 +97,12 @@ class NeuralNetwork : public GradientMachine { virtual void onPassEnd(); +#ifndef PADDLE_MOBILE_INFERENCE virtual Evaluator* makeEvaluator() const; virtual void eval(Evaluator* evaluator) const; +#endif + virtual void resetState(); virtual void setOutputGrad(const std::vector& args); diff --git a/paddle/gserver/layers/Layer.cpp b/paddle/gserver/layers/Layer.cpp index e95f42c863b373..075e8166ef8e78 100644 --- a/paddle/gserver/layers/Layer.cpp +++ b/paddle/gserver/layers/Layer.cpp @@ -103,10 +103,12 @@ LayerPtr Layer::create(const LayerConfig& config) { return LayerPtr(new MultiClassCrossEntropy(config)); else if (type == "rank-cost") return LayerPtr(new RankingCost(config)); +#ifndef PADDLE_MOBILE_INFERENCE else if (type == "auc-validation") return LayerPtr(new AucValidation(config)); else if (type == "pnpair-validation") return LayerPtr(new PnpairValidation(config)); +#endif return LayerPtr(registrar_.createByType(config.type(), config)); } From bb07120b64528ba37de75c01ec2d1d71a2e9cb03 Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Fri, 29 Sep 2017 20:16:04 +0800 Subject: [PATCH 02/27] Remove dataproviders. --- paddle/gserver/CMakeLists.txt | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/paddle/gserver/CMakeLists.txt b/paddle/gserver/CMakeLists.txt index cd469875df8abe..5f39167afc34af 100644 --- a/paddle/gserver/CMakeLists.txt +++ b/paddle/gserver/CMakeLists.txt @@ -69,6 +69,14 @@ if(MOBILE_INFERENCE) evaluators/CTCErrorEvaluator.cpp evaluators/ChunkEvaluator.cpp) + # Remove dataproviders + list(REMOVE_ITEM GSERVER_SOURCES + dataproviders/DataProvider.cpp + dataproviders/MultiDataProvider.cpp + dataproviders/ProtoDataProvider.cpp + dataproviders/PyDataProvider2.cpp + dataproviders/PyDataProvider.cpp) + # Remove useless gradientmachines list(REMOVE_ITEM GSERVER_SOURCES gradientmachines/MultiNetwork.cpp From 33299ef972302c310cc2b117f4cb58377daa6bd1 Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Fri, 29 Sep 2017 20:39:36 +0800 Subject: [PATCH 03/27] Remove cuda. --- cmake/util.cmake | 1 - 1 file changed, 1 deletion(-) diff --git a/cmake/util.cmake b/cmake/util.cmake index 5ebfc0945fae4f..45a8d66120806d 100644 --- a/cmake/util.cmake +++ b/cmake/util.cmake @@ -83,7 +83,6 @@ function(link_paddle_exe TARGET_NAME) paddle_utils paddle_parameter paddle_proto - paddle_cuda paddle_optimizer ${EXTERNAL_LIBS} ${CMAKE_THREAD_LIBS_INIT} From ea4672bea0bdef1e73f18da8802cd8a467739299 Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Fri, 29 Sep 2017 20:47:51 +0800 Subject: [PATCH 04/27] Remove optimizer. --- CMakeLists.txt | 8 +++++--- cmake/util.cmake | 1 - 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ec4e6e2e868039..70b35154aaf1fa 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -168,9 +168,11 @@ endif(USE_NNPACK) add_subdirectory(proto) -# "add_subdirectory(go)" should be placed after the following loine, -# because it depends on paddle/optimizer. -add_subdirectory(paddle/optimizer) +if(NOT MOBILE_INFERENCE) + # "add_subdirectory(go)" should be placed after the following loine, + # because it depends on paddle/optimizer. + add_subdirectory(paddle/optimizer) +endif() # "add_subdirectory(paddle)" and "add_subdirectory(python)" should be # placed after this block, because they depends on it. diff --git a/cmake/util.cmake b/cmake/util.cmake index 45a8d66120806d..2ab1e8c8e4f4aa 100644 --- a/cmake/util.cmake +++ b/cmake/util.cmake @@ -83,7 +83,6 @@ function(link_paddle_exe TARGET_NAME) paddle_utils paddle_parameter paddle_proto - paddle_optimizer ${EXTERNAL_LIBS} ${CMAKE_THREAD_LIBS_INIT} ${CMAKE_DL_LIBS} From d94dd75e3b932fb7e792d6d1b56701ac2d76bd16 Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Fri, 29 Sep 2017 21:19:26 +0800 Subject: [PATCH 05/27] Remove string, scripts and retain cuda. --- cmake/util.cmake | 1 + paddle/CMakeLists.txt | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/cmake/util.cmake b/cmake/util.cmake index 2ab1e8c8e4f4aa..117ab7f49cdf4a 100644 --- a/cmake/util.cmake +++ b/cmake/util.cmake @@ -83,6 +83,7 @@ function(link_paddle_exe TARGET_NAME) paddle_utils paddle_parameter paddle_proto + paddle_cuda ${EXTERNAL_LIBS} ${CMAKE_THREAD_LIBS_INIT} ${CMAKE_DL_LIBS} diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt index 3eb494ae473342..7d2becbdd77274 100644 --- a/paddle/CMakeLists.txt +++ b/paddle/CMakeLists.txt @@ -1,18 +1,18 @@ add_subdirectory(cuda) add_subdirectory(function) add_subdirectory(utils) -add_subdirectory(testing) add_subdirectory(math) add_subdirectory(gserver) -add_subdirectory(scripts) -add_subdirectory(string) add_subdirectory(parameter) +add_subdirectory(testing) if(MOBILE_INFERENCE) add_subdirectory(capi) else() add_subdirectory(pserver) add_subdirectory(trainer) + add_subdirectory(string) + add_subdirectory(scripts) if(WITH_C_API) add_subdirectory(capi) From 6627801a586bf93f1d872c643c121e19d5c2f1bf Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Sat, 30 Sep 2017 15:32:29 +0800 Subject: [PATCH 06/27] Follow comments. --- CMakeLists.txt | 12 ++++++++---- paddle/capi/CMakeLists.txt | 4 +--- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 70b35154aaf1fa..7d549b864b7a01 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -87,11 +87,15 @@ if(ANDROID OR IOS) set(WITH_MKLML OFF CACHE STRING "Disable MKLML package when cross-compiling for Android and iOS" FORCE) - if(WITH_C_API) - # Compile PaddlePaddle mobile inference library - set(MOBILE_INFERENCE ON) - add_definitions(-DPADDLE_MOBILE_INFERENCE) + # Compile PaddlePaddle mobile inference library + if (NOT WITH_C_API) + set(WITH_C_API ON CACHE STRING + "Always compile the C_API when cross-compiling for Android and iOS" FORCE) endif() + set(MOBILE_INFERENCE ON) + add_definitions(-DPADDLE_MOBILE_INFERENCE) + + # TODO: Need Open the WITH_TESTING set(WITH_TESTING OFF CACHE STRING "Disable TESTING when cross-compiling for Android and iOS" FORCE) endif() diff --git a/paddle/capi/CMakeLists.txt b/paddle/capi/CMakeLists.txt index a19a19d7196e2e..2c458a78c598bf 100644 --- a/paddle/capi/CMakeLists.txt +++ b/paddle/capi/CMakeLists.txt @@ -48,9 +48,7 @@ if(NOT IOS) add_library(paddle_capi_shared SHARED ${CAPI_SOURCES}) set_target_properties(paddle_capi_shared PROPERTIES LINK_FLAGS "${LINK_FLAGS}") target_include_directories(paddle_capi_shared PUBLIC ${CMAKE_CURRENT_BINARY_DIR}) - -link_paddle_exe(paddle_capi_shared) - + link_paddle_exe(paddle_capi_shared) endif() # install library & headers. From ff7fdb7d705a34e224561cb53933b5477fef644b Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Tue, 3 Oct 2017 10:28:52 -0700 Subject: [PATCH 07/27] Add `CreateBackwardOp` function --- paddle/framework/backward.cc | 56 +++++++++++++++++++++++++++++++++ paddle/framework/backward.h | 5 +++ paddle/framework/op_desc.cc | 20 ++++++++++++ paddle/framework/op_desc.h | 7 +++++ paddle/framework/op_registry.cc | 5 +++ paddle/framework/op_registry.h | 2 ++ 6 files changed, 95 insertions(+) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 0ec18de5b8a0e7..1b4c5c025e63fe 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -222,5 +222,61 @@ std::unique_ptr Backward( return BackwardRecursive(forwardOp, no_grad_names, uid); } +// ==================================== // + +static bool AllGradInSet(const std::vector& names, + const std::unordered_set& set) { + for (const std::string& name : names) { + if (!set.count(GradVarName(name))) { + return false; + } + } + return true; +} + +std::vector CreatBackwardOps( + const OpDescBind& op_desc, unordered_map& no_grad_vars) { + std::vector grad_op_descs; + // All input gradients of forwarding operator do not need to calculat. + if (AllGradInSet(op_desc_.InputNames(), kGradVarSuffix, no_grad_vars)) { + return grad_op_descs; // empty vector + } + // All output gradients of forwarding operator do not need to calculate. + const std::vector& outputs = op_desc_.OutputNames(); + if (AllGradInSet(outputs, kGradVarSuffix, no_grad_vars)) { + for (const std::string& name : outputs) { + no_grad_vars.insert(GradVarName(name)); + } + return grad_op_descs; // empty vector + } + + grad_op_descs = OpRegistry::CreateGradOpDescs(op_desc); + + std::vector fill_zeros_ops; + for (OpDescBind& desc : grad_op_descs) { + for (const std::string& in_name : desc.InputNames()) { + if (no_grad_vars.count(in_name)) { + std::string prefix = in_name.substr( + 0, in_name.size() - sizeof(kGradVarSuffix) / sizeof(char) + 1); + std::string new_name = prefix + kZeroVarSuffix; + desc.Rename(in_name, new_name); + OpDescBind op_desc_bind( + {"fill_zeros_like", {{"X", {prefix}}}, {{"Y", {new_name}}}, {}}); + fill_zeros_ops.push_back(op_desc_bind); + } + } + for (const std::string& out_name : desc.OutputName()) { + if (no_grad_vars.count(out_name)) { + desc.Rename(out_name, kEmptyVarName); + } + } + } + grad_op_descs.insert(grad_op_descs.begin(), fill_zeros_ops.begin(), + fill_zeros_ops.end()); + + // TODO (fengjiayi): RNN op + return grad_op_descs; +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/backward.h b/paddle/framework/backward.h index 1ecf69881b3126..6aeddafb41e01a 100644 --- a/paddle/framework/backward.h +++ b/paddle/framework/backward.h @@ -23,5 +23,10 @@ namespace framework { extern std::unique_ptr Backward( const OperatorBase& forwardOp, const std::unordered_set& no_grad_vars); + +extern void AppendBackwardOps( + BlockDescBind& block_desc, + const std::unordered_set& no_grad_vars); + } // namespace framework } // namespace paddle diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc index 0c12c55dc09f6a..e98f8f11543bec 100644 --- a/paddle/framework/op_desc.cc +++ b/paddle/framework/op_desc.cc @@ -18,6 +18,15 @@ limitations under the License. */ namespace paddle { namespace framework { +OpDescBind::OpDescBind(const std::string &type, const VariableNameMap &inputs, + const VariableNameMap &outputs, + const AttributeMap &attrs) { + op_desc_.set_type(type); + inputs_ = inputs; + outputs_ = outputs; + attrs_ = attrs; +} + OpDesc *OpDescBind::Proto() { Sync(); return &op_desc_; @@ -112,6 +121,17 @@ const std::unordered_map &OpDescBind::GetAttrMap() return attrs_; } +void Rename(const std::string &old_name, const std::string &new_name) { + for (std : string &input : inputs_) { + std::replace(input.second.begin(), input.second.end(), old_name, new_name); + } + for (std::string &output : outputs_) { + std::repalce(output.second.begin(), output.second.end(), old_name, + new_name); + } + need_update_ = true; +} + void OpDescBind::Sync() { if (need_update_) { this->op_desc_.mutable_inputs()->Clear(); diff --git a/paddle/framework/op_desc.h b/paddle/framework/op_desc.h index 0cf7d13971675e..a32e6d03f716c6 100644 --- a/paddle/framework/op_desc.h +++ b/paddle/framework/op_desc.h @@ -26,6 +26,11 @@ class BlockDescBind; class OpDescBind { public: + OpDescBind() {} + + OpDescBind(const std::string &type, const VariableNameMap &inputs, + const VariableNameMap &outputs, const AttributeMap &attrs); + OpDesc *Proto(); std::string Type() const { return op_desc_.type(); } @@ -67,6 +72,8 @@ class OpDescBind { int GetBlockAttr(const std::string &name) const; + void Rename(const std::string &old_name, const std::string &new_name); + // Only be used in C++ const std::unordered_map &GetAttrMap() const; diff --git a/paddle/framework/op_registry.cc b/paddle/framework/op_registry.cc index b0e85dd49f97da..fe3228ce5bf7ac 100644 --- a/paddle/framework/op_registry.cc +++ b/paddle/framework/op_registry.cc @@ -57,5 +57,10 @@ std::unique_ptr OpRegistry::CreateGradOp(const OperatorBase& op) { return std::unique_ptr(BuildGradOp(&op)); } +static std::vector CreateGradOpDescs(const OpDescBind& op_desc) { + auto& info = OpInfoMap::Instance().Get(op_desc.Type()); + return info.grad_op_maker_(op_desc); +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 4ee2c7d27561c3..c80b6e9630bf55 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -68,6 +68,8 @@ class OpRegistry { static std::unique_ptr CreateOp(const OpDesc& op_desc); static std::unique_ptr CreateGradOp(const OperatorBase& op); + + static std::vector CreateGradOpDescs(const OpDescBind& op_desc); }; class Registrar { From 9935fdd3dd92cf9930f88b070090925d2909ed1a Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Tue, 3 Oct 2017 14:14:42 -0700 Subject: [PATCH 08/27] Update --- paddle/framework/backward.cc | 57 ++++++++++++++++++++++++++++++++--- paddle/framework/block_desc.h | 4 +++ paddle/framework/op_desc.cc | 16 ++++++++++ paddle/framework/op_desc.h | 4 +++ 4 files changed, 77 insertions(+), 4 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 1b4c5c025e63fe..0f65478ef81d7e 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -235,14 +235,17 @@ static bool AllGradInSet(const std::vector& names, } std::vector CreatBackwardOps( - const OpDescBind& op_desc, unordered_map& no_grad_vars) { + const std::unique_ptr& op_desc_ptr, + unordered_map& no_grad_vars) { + const OpDescBind& op_desc = *op_desc_ptr; std::vector grad_op_descs; // All input gradients of forwarding operator do not need to calculat. - if (AllGradInSet(op_desc_.InputNames(), kGradVarSuffix, no_grad_vars)) { + if (AllGradInSet(op_desc_.InputArgumentNames(), kGradVarSuffix, + no_grad_vars)) { return grad_op_descs; // empty vector } // All output gradients of forwarding operator do not need to calculate. - const std::vector& outputs = op_desc_.OutputNames(); + const std::vector& outputs = op_desc_.OutputArugumentNames(); if (AllGradInSet(outputs, kGradVarSuffix, no_grad_vars)) { for (const std::string& name : outputs) { no_grad_vars.insert(GradVarName(name)); @@ -254,7 +257,7 @@ std::vector CreatBackwardOps( std::vector fill_zeros_ops; for (OpDescBind& desc : grad_op_descs) { - for (const std::string& in_name : desc.InputNames()) { + for (const std::string& in_name : desc.InputArgumentNames()) { if (no_grad_vars.count(in_name)) { std::string prefix = in_name.substr( 0, in_name.size() - sizeof(kGradVarSuffix) / sizeof(char) + 1); @@ -278,5 +281,51 @@ std::vector CreatBackwardOps( return grad_op_descs; } +void AppendBackwardOps(BlockDescBind& block_desc, + const std::unordered_set& no_grad_vars) { + std::unordered_map> dup_out_ops; + size_t grad_desc_idx = 0; + std::deque> op_descs = block_desc.ops_; + std::vector> grad_op_descs; + for (auto it = op_descs.rbegin(); it != op_descs.rend(); ++it) { + std::vector op_grads = CreatBackwardOps(*it, no_grad_vars); + for (const OpDescBind& desc : op_grads) { + for (const std::string& out_name : desc.OutputArugumentNames()) { + dup_out_ops[out_name].emplace_back(grad_desc_idx); + } + ++grad_desc_idx; + } + grad_op_descs.insert(grad_op_descs.end(), op_grads.begin(), op_grads.end()); + } + // Check whether some variables are written more than once + std::list> pending_sum_ops; + for (const auto& dup : dup_out_ops) { + const std::string& out_name = dup.first; + const std::vector dup_op = dup.second; + if (out_name != kEmptyVarName && dup_op.size() > 1) { + std::vector sum_op_inputs; + for (size_t i = 0; i < dup_op.size(); ++i) { + std::string new_name = out_name + "@RENAME@" + std::to_string(i); + grad_op_descs[dup_op[i]].Rename(out_name, new_name); + sum_op_inputs.emplace_back(new_name); + } + pending_sum_ops.push_back( + {dup_op.back(), + OpDescBind( + {"sum", {{"X", {sum_op_inputs}}}, {{"Out", {out_name}}}, {}})}); + } + } + pending_sum_ops.sort( + [](const std::pair& a, + const std::pair& b) { return a.first > b.first; }); + for (auto& p : pending_sum_ops) { + grad_op_descs.insert(grad_op_descs.begin() + p.first + 1, + std::move(p.second)); + } + // Append grad_op_descs to BlockDescBind::ops_ + for () { + } +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/block_desc.h b/paddle/framework/block_desc.h index 59513ede33ebb4..a171dfef30ad47 100644 --- a/paddle/framework/block_desc.h +++ b/paddle/framework/block_desc.h @@ -32,6 +32,10 @@ class ProgramDescBind; class BlockDescBind { public: + friend void AppendBackwardOps( + BlockDescBind &block_desc, + const std::unordered_set &no_grad_vars); + BlockDescBind(ProgramDescBind *prog, BlockDesc *desc) : prog_(prog), desc_(desc), need_update_(false) {} diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc index f2e0c14fbdf0c0..e6c0cdacd96e11 100644 --- a/paddle/framework/op_desc.cc +++ b/paddle/framework/op_desc.cc @@ -49,6 +49,14 @@ std::vector OpDescBind::InputNames() const { return retv; } +std::vector InputArgumentNames() const { + std::vector retv; + for (auto &ipt : this->inputs_) { + retv.insert(retv.end(), ipt.second.begin(), ipt.second.end()); + } + return retv; +} + void OpDescBind::SetInput(const std::string ¶m_name, const std::vector &args) { need_update_ = true; @@ -72,6 +80,14 @@ std::vector OpDescBind::OutputNames() const { return retv; } +std::vector OutputArgumentNames() const { + std::vector retv; + for (auto &ipt : this->outputs_) { + retv.insert(retv.end(), ipt.second.begin(), ipt.second.end()); + } + return retv; +} + void OpDescBind::SetOutput(const std::string ¶m_name, const std::vector &args) { need_update_ = true; diff --git a/paddle/framework/op_desc.h b/paddle/framework/op_desc.h index 228065481246d0..e30c58632ea077 100644 --- a/paddle/framework/op_desc.h +++ b/paddle/framework/op_desc.h @@ -42,6 +42,8 @@ class OpDescBind { std::vector InputNames() const; + std::vector InputArgumentNames() const; + void SetInput(const std::string ¶m_name, const std::vector &args); @@ -49,6 +51,8 @@ class OpDescBind { std::vector OutputNames() const; + std::vector OutputArgumentNames() const; + void SetOutput(const std::string ¶m_name, const std::vector &args); From e47770bd27a20b2fa9bf2754d16b0e71008185e5 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Tue, 3 Oct 2017 15:30:21 -0700 Subject: [PATCH 09/27] Update --- paddle/framework/backward.cc | 85 +++++++++++++++++---------------- paddle/framework/backward.h | 2 +- paddle/framework/op_desc.cc | 13 ++--- paddle/framework/op_registry.cc | 3 +- paddle/framework/op_registry.h | 3 +- 5 files changed, 57 insertions(+), 49 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 0f65478ef81d7e..b4eb89e2d7b728 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -234,18 +234,17 @@ static bool AllGradInSet(const std::vector& names, return true; } -std::vector CreatBackwardOps( - const std::unique_ptr& op_desc_ptr, - unordered_map& no_grad_vars) { - const OpDescBind& op_desc = *op_desc_ptr; - std::vector grad_op_descs; +std::vector> MakeGradOpDescs( + const std::unique_ptr& op_desc, + unordered_set& no_grad_vars) { + std::vector> grad_op_descs; // All input gradients of forwarding operator do not need to calculat. - if (AllGradInSet(op_desc_.InputArgumentNames(), kGradVarSuffix, + if (AllGradInSet(op_desc->InputArgumentNames(), kGradVarSuffix, no_grad_vars)) { return grad_op_descs; // empty vector } // All output gradients of forwarding operator do not need to calculate. - const std::vector& outputs = op_desc_.OutputArugumentNames(); + const std::vector& outputs = op_desc->OutputArugumentNames(); if (AllGradInSet(outputs, kGradVarSuffix, no_grad_vars)) { for (const std::string& name : outputs) { no_grad_vars.insert(GradVarName(name)); @@ -255,50 +254,54 @@ std::vector CreatBackwardOps( grad_op_descs = OpRegistry::CreateGradOpDescs(op_desc); - std::vector fill_zeros_ops; - for (OpDescBind& desc : grad_op_descs) { - for (const std::string& in_name : desc.InputArgumentNames()) { + std::list> pending_fill_zeros_ops; + for (auto& desc : grad_op_descs) { + for (const std::string& in_name : desc->InputArgumentNames()) { if (no_grad_vars.count(in_name)) { std::string prefix = in_name.substr( 0, in_name.size() - sizeof(kGradVarSuffix) / sizeof(char) + 1); std::string new_name = prefix + kZeroVarSuffix; - desc.Rename(in_name, new_name); - OpDescBind op_desc_bind( - {"fill_zeros_like", {{"X", {prefix}}}, {{"Y", {new_name}}}, {}}); - fill_zeros_ops.push_back(op_desc_bind); + desc->Rename(in_name, new_name); + OpDescBind* fill_zeros_op = new OpDescBind( + "fill_zeros_like", {{"X", {prefix}}}, {{"Y", {new_name}}}, {}); + pending_fill_zeros_ops.push_back({fill_zeros_op}); } } - for (const std::string& out_name : desc.OutputName()) { + for (const std::string& out_name : desc->OutputArgumentName()) { if (no_grad_vars.count(out_name)) { - desc.Rename(out_name, kEmptyVarName); + desc->Rename(out_name, kEmptyVarName); } } } - grad_op_descs.insert(grad_op_descs.begin(), fill_zeros_ops.begin(), - fill_zeros_ops.end()); + grad_op_descs.insert(std::begin(grad_op_descs), + std::begin(pending_fill_zeros_ops), + std::end(pending_fill_zeros_ops)); // TODO (fengjiayi): RNN op return grad_op_descs; } -void AppendBackwardOps(BlockDescBind& block_desc, - const std::unordered_set& no_grad_vars) { +void AppendBackwardOpDescs( + BlockDescBind& block_desc, + const std::unordered_set& no_grad_vars) { std::unordered_map> dup_out_ops; size_t grad_desc_idx = 0; - std::deque> op_descs = block_desc.ops_; - std::vector> grad_op_descs; - for (auto it = op_descs.rbegin(); it != op_descs.rend(); ++it) { - std::vector op_grads = CreatBackwardOps(*it, no_grad_vars); - for (const OpDescBind& desc : op_grads) { - for (const std::string& out_name : desc.OutputArugumentNames()) { + std::deque> block_op_descs = block_desc.ops_; + std::vector> backward_descs; + for (auto it = block_op_descs.rbegin(); it != block_op_descs.rend(); ++it) { + std::vector> op_grads = + MakeGradOpDescs(*it, no_grad_vars); + for (const auto& desc : op_grads) { + for (const std::string& out_name : desc->OutputArugumentNames()) { dup_out_ops[out_name].emplace_back(grad_desc_idx); } ++grad_desc_idx; } - grad_op_descs.insert(grad_op_descs.end(), op_grads.begin(), op_grads.end()); + backward_descs.insert(backward_descs.end(), op_grads.begin(), + op_grads.end()); } // Check whether some variables are written more than once - std::list> pending_sum_ops; + std::list>> pending_sum_ops; for (const auto& dup : dup_out_ops) { const std::string& out_name = dup.first; const std::vector dup_op = dup.second; @@ -306,25 +309,27 @@ void AppendBackwardOps(BlockDescBind& block_desc, std::vector sum_op_inputs; for (size_t i = 0; i < dup_op.size(); ++i) { std::string new_name = out_name + "@RENAME@" + std::to_string(i); - grad_op_descs[dup_op[i]].Rename(out_name, new_name); + backward_descs[dup_op[i]]->Rename(out_name, new_name); sum_op_inputs.emplace_back(new_name); } - pending_sum_ops.push_back( - {dup_op.back(), - OpDescBind( - {"sum", {{"X", {sum_op_inputs}}}, {{"Out", {out_name}}}, {}})}); + OpDescBind* sum_op = new OpDescBind("sum", {{"X", sum_op_inputs}}, + {{"Out", {out_name}}}, {}); + pending_sum_ops.push_back({dup_op.back(), {sum_op}}); } } pending_sum_ops.sort( - [](const std::pair& a, - const std::pair& b) { return a.first > b.first; }); + [](const std::pair>& a, + const std::pair>& b) { + return a.first > b.first; + }); for (auto& p : pending_sum_ops) { - grad_op_descs.insert(grad_op_descs.begin() + p.first + 1, - std::move(p.second)); - } - // Append grad_op_descs to BlockDescBind::ops_ - for () { + backward_descs.insert(backward_descs.begin() + p.first + 1, + std::move(p.second)); } + // Append backward_descs to BlockDescBind::ops_ + block_op_descs.insert(std::end(block_op_descs), std::begin(backward_descs), + std::end(backward_descs)); + return; } } // namespace framework diff --git a/paddle/framework/backward.h b/paddle/framework/backward.h index 6aeddafb41e01a..fb496c34c7dfb7 100644 --- a/paddle/framework/backward.h +++ b/paddle/framework/backward.h @@ -24,7 +24,7 @@ extern std::unique_ptr Backward( const OperatorBase& forwardOp, const std::unordered_set& no_grad_vars); -extern void AppendBackwardOps( +extern void AppendBackwardOpDescs( BlockDescBind& block_desc, const std::unordered_set& no_grad_vars); diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc index e6c0cdacd96e11..2c6aec717bd29c 100644 --- a/paddle/framework/op_desc.cc +++ b/paddle/framework/op_desc.cc @@ -49,7 +49,7 @@ std::vector OpDescBind::InputNames() const { return retv; } -std::vector InputArgumentNames() const { +std::vector OpDescBind::InputArgumentNames() const { std::vector retv; for (auto &ipt : this->inputs_) { retv.insert(retv.end(), ipt.second.begin(), ipt.second.end()); @@ -80,7 +80,7 @@ std::vector OpDescBind::OutputNames() const { return retv; } -std::vector OutputArgumentNames() const { +std::vector OpDescBind::OutputArgumentNames() const { std::vector retv; for (auto &ipt : this->outputs_) { retv.insert(retv.end(), ipt.second.begin(), ipt.second.end()); @@ -137,12 +137,13 @@ const std::unordered_map &OpDescBind::GetAttrMap() return attrs_; } -void Rename(const std::string &old_name, const std::string &new_name) { - for (std : string &input : inputs_) { +void OpDescBind::Rename(const std::string &old_name, + const std::string &new_name) { + for (auto &input : inputs_) { std::replace(input.second.begin(), input.second.end(), old_name, new_name); } - for (std::string &output : outputs_) { - std::repalce(output.second.begin(), output.second.end(), old_name, + for (auto &output : outputs_) { + std::replace(output.second.begin(), output.second.end(), old_name, new_name); } need_update_ = true; diff --git a/paddle/framework/op_registry.cc b/paddle/framework/op_registry.cc index fe3228ce5bf7ac..d8851a8b42f26b 100644 --- a/paddle/framework/op_registry.cc +++ b/paddle/framework/op_registry.cc @@ -57,7 +57,8 @@ std::unique_ptr OpRegistry::CreateGradOp(const OperatorBase& op) { return std::unique_ptr(BuildGradOp(&op)); } -static std::vector CreateGradOpDescs(const OpDescBind& op_desc) { +static std::vector> OpRegistry::CreateGradOpDescs( + const OpDescBind& op_desc) { auto& info = OpInfoMap::Instance().Get(op_desc.Type()); return info.grad_op_maker_(op_desc); } diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index c80b6e9630bf55..e334cd592a262c 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -69,7 +69,8 @@ class OpRegistry { static std::unique_ptr CreateGradOp(const OperatorBase& op); - static std::vector CreateGradOpDescs(const OpDescBind& op_desc); + static std::vector> CreateGradOpDescs( + const OpDescBind& op_desc); }; class Registrar { From 7e4648338adbaa6c36f43706771901fc9158447a Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Tue, 3 Oct 2017 16:13:35 -0700 Subject: [PATCH 10/27] Fix bug --- paddle/framework/op_registry.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/framework/op_registry.cc b/paddle/framework/op_registry.cc index d8851a8b42f26b..b28eb64af5fbe8 100644 --- a/paddle/framework/op_registry.cc +++ b/paddle/framework/op_registry.cc @@ -57,7 +57,7 @@ std::unique_ptr OpRegistry::CreateGradOp(const OperatorBase& op) { return std::unique_ptr(BuildGradOp(&op)); } -static std::vector> OpRegistry::CreateGradOpDescs( +std::vector> OpRegistry::CreateGradOpDescs( const OpDescBind& op_desc) { auto& info = OpInfoMap::Instance().Get(op_desc.Type()); return info.grad_op_maker_(op_desc); From 37b0bb15973632fca96fef31c8a5b30a78a80042 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Wed, 4 Oct 2017 11:25:17 -0700 Subject: [PATCH 11/27] Fix compile errors --- paddle/framework/backward.cc | 54 ++++++++++++++++++----------------- paddle/framework/block_desc.h | 5 ++-- 2 files changed, 30 insertions(+), 29 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 4c76326e7cecf3..a84262e0075aa4 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -14,9 +14,11 @@ #include "paddle/framework/backward.h" +#include #include #include +#include "paddle/framework/block_desc.h" #include "paddle/framework/op_registry.h" #include "paddle/operators/net_op.h" #include "paddle/operators/recurrent_op.h" @@ -254,23 +256,22 @@ static bool AllGradInSet(const std::vector& names, std::vector> MakeGradOpDescs( const std::unique_ptr& op_desc, - unordered_set& no_grad_vars) { + std::unordered_set& no_grad_vars) { std::vector> grad_op_descs; // All input gradients of forwarding operator do not need to calculat. - if (AllGradInSet(op_desc->InputArgumentNames(), kGradVarSuffix, - no_grad_vars)) { + if (AllGradInSet(op_desc->InputArgumentNames(), no_grad_vars)) { return grad_op_descs; // empty vector } // All output gradients of forwarding operator do not need to calculate. - const std::vector& outputs = op_desc->OutputArugumentNames(); - if (AllGradInSet(outputs, kGradVarSuffix, no_grad_vars)) { + const std::vector& outputs = op_desc->OutputArgumentNames(); + if (AllGradInSet(outputs, no_grad_vars)) { for (const std::string& name : outputs) { no_grad_vars.insert(GradVarName(name)); } return grad_op_descs; // empty vector } - grad_op_descs = OpRegistry::CreateGradOpDescs(op_desc); + grad_op_descs = OpRegistry::CreateGradOpDescs(*op_desc); std::list> pending_fill_zeros_ops; for (auto& desc : grad_op_descs) { @@ -280,43 +281,43 @@ std::vector> MakeGradOpDescs( 0, in_name.size() - sizeof(kGradVarSuffix) / sizeof(char) + 1); std::string new_name = prefix + kZeroVarSuffix; desc->Rename(in_name, new_name); - OpDescBind* fill_zeros_op = new OpDescBind( - "fill_zeros_like", {{"X", {prefix}}}, {{"Y", {new_name}}}, {}); - pending_fill_zeros_ops.push_back({fill_zeros_op}); + std::unique_ptr fill_zeros_op(new OpDescBind( + "fill_zeros_like", {{"X", {prefix}}}, {{"Y", {new_name}}}, {})); + pending_fill_zeros_ops.push_back(std::move(fill_zeros_op)); } } - for (const std::string& out_name : desc->OutputArgumentName()) { + for (const std::string& out_name : desc->OutputArgumentNames()) { if (no_grad_vars.count(out_name)) { desc->Rename(out_name, kEmptyVarName); } } } - grad_op_descs.insert(std::begin(grad_op_descs), - std::begin(pending_fill_zeros_ops), - std::end(pending_fill_zeros_ops)); + for (auto& p : pending_fill_zeros_ops) { + grad_op_descs.push_back(std::move(p)); + } - // TODO (fengjiayi): RNN op + // TODO(fengjiayi): RNN op return grad_op_descs; } -void AppendBackwardOpDescs( - BlockDescBind& block_desc, - const std::unordered_set& no_grad_vars) { +void AppendBackwardOpDescs(BlockDescBind& block_desc, + std::unordered_set& no_grad_vars) { std::unordered_map> dup_out_ops; size_t grad_desc_idx = 0; - std::deque> block_op_descs = block_desc.ops_; + std::deque>& block_op_descs = block_desc.ops_; std::vector> backward_descs; for (auto it = block_op_descs.rbegin(); it != block_op_descs.rend(); ++it) { std::vector> op_grads = MakeGradOpDescs(*it, no_grad_vars); for (const auto& desc : op_grads) { - for (const std::string& out_name : desc->OutputArugumentNames()) { + for (const std::string& out_name : desc->OutputArgumentNames()) { dup_out_ops[out_name].emplace_back(grad_desc_idx); } ++grad_desc_idx; } - backward_descs.insert(backward_descs.end(), op_grads.begin(), - op_grads.end()); + std::transform( + op_grads.begin(), op_grads.end(), std::back_inserter(backward_descs), + [](std::unique_ptr& ptr) { return std::move(ptr); }); } // Check whether some variables are written more than once std::list>> pending_sum_ops; @@ -330,9 +331,9 @@ void AppendBackwardOpDescs( backward_descs[dup_op[i]]->Rename(out_name, new_name); sum_op_inputs.emplace_back(new_name); } - OpDescBind* sum_op = new OpDescBind("sum", {{"X", sum_op_inputs}}, - {{"Out", {out_name}}}, {}); - pending_sum_ops.push_back({dup_op.back(), {sum_op}}); + std::unique_ptr sum_op(new OpDescBind( + "sum", {{"X", sum_op_inputs}}, {{"Out", {out_name}}}, {})); + pending_sum_ops.push_back({dup_op.back(), std::move(sum_op)}); } } pending_sum_ops.sort( @@ -345,8 +346,9 @@ void AppendBackwardOpDescs( std::move(p.second)); } // Append backward_descs to BlockDescBind::ops_ - block_op_descs.insert(std::end(block_op_descs), std::begin(backward_descs), - std::end(backward_descs)); + for (std::unique_ptr& ptr : backward_descs) { + block_op_descs.push_back(std::move(ptr)); + } return; } diff --git a/paddle/framework/block_desc.h b/paddle/framework/block_desc.h index a171dfef30ad47..fd95ef19012754 100644 --- a/paddle/framework/block_desc.h +++ b/paddle/framework/block_desc.h @@ -32,9 +32,8 @@ class ProgramDescBind; class BlockDescBind { public: - friend void AppendBackwardOps( - BlockDescBind &block_desc, - const std::unordered_set &no_grad_vars); + friend void AppendBackwardOpDescs( + BlockDescBind &block_desc, std::unordered_set &no_grad_vars); BlockDescBind(ProgramDescBind *prog, BlockDesc *desc) : prog_(prog), desc_(desc), need_update_(false) {} From a270dbb778091d983010298b7972e05bcd6fe22e Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Wed, 4 Oct 2017 15:48:41 -0700 Subject: [PATCH 12/27] Add support for rnn_op --- paddle/framework/backward.cc | 53 +++++++++++++++++++++++++++-------- paddle/framework/block_desc.h | 9 ++++-- 2 files changed, 48 insertions(+), 14 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index c4ede7d2fba380..d9a42be5a2d880 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -20,6 +20,7 @@ #include "paddle/framework/block_desc.h" #include "paddle/framework/op_registry.h" +#include "paddle/framework/program_desc.h" #include "paddle/operators/net_op.h" #include "paddle/operators/recurrent_op.h" @@ -254,7 +255,7 @@ static bool AllGradInSet(const std::vector& names, return true; } -std::vector> MakeGradOpDescs( +std::vector> MakeOpGrad( const std::unique_ptr& op_desc, std::unordered_set& no_grad_vars) { std::vector> grad_op_descs; @@ -295,20 +296,35 @@ std::vector> MakeGradOpDescs( for (auto& p : pending_fill_zeros_ops) { grad_op_descs.push_back(std::move(p)); } - - // TODO(fengjiayi): RNN op return grad_op_descs; } -void AppendBackwardOpDescs(BlockDescBind& block_desc, - std::unordered_set& no_grad_vars) { +std::vector> MakeBlockBackward( + ProgramDescBind& program_desc, int block_idx, + std::unordered_set& no_grad_vars) { + BlockDescBind* cur_block = program_desc.Block(block_idx); + std::deque>& op_descs = cur_block->ops_; std::unordered_map> dup_out_ops; size_t grad_desc_idx = 0; - std::deque>& block_op_descs = block_desc.ops_; std::vector> backward_descs; - for (auto it = block_op_descs.rbegin(); it != block_op_descs.rend(); ++it) { + for (auto it = op_descs.rbegin(); it != op_descs.rend(); ++it) { std::vector> op_grads = - MakeGradOpDescs(*it, no_grad_vars); + MakeOpGrad(*it, no_grad_vars); + + if ((*it)->Type() == "recurrent") { + PADDLE_ENFORCE_EQ( + op_grads.size(), size_t(1), + "rnn_op's gradient process should contain only one op."); + int step_block_idx = (*it)->GetBlockAttr("stop_block"); + auto backward_block_op_descs = + MakeBlockBackward(program_desc, step_block_idx, no_grad_vars); + BlockDescBind* backward_block = program_desc.AppendBlock(*cur_block); + for (auto& ptr : backward_block_op_descs) { + backward_block->ops_.push_back(std::move(ptr)); + } + op_grads[0]->SetBlockAttr("step_block", *backward_block); + } + for (const auto& desc : op_grads) { for (const std::string& out_name : desc->OutputArgumentNames()) { dup_out_ops[out_name].emplace_back(grad_desc_idx); @@ -345,11 +361,24 @@ void AppendBackwardOpDescs(BlockDescBind& block_desc, backward_descs.insert(backward_descs.begin() + p.first + 1, std::move(p.second)); } - // Append backward_descs to BlockDescBind::ops_ - for (std::unique_ptr& ptr : backward_descs) { - block_op_descs.push_back(std::move(ptr)); + return backward_descs; +} + +void AppendBackward(ProgramDescBind& program_desc, + const std::unordered_set& no_grad_vars) { + std::unordered_set no_grad_var_names; + no_grad_var_names.reserve(no_grad_vars.size() + 1); + no_grad_var_names.insert(std::string(kEmptyVarName) + kGradVarSuffix); + for (auto& name : no_grad_vars) { + no_grad_var_names.insert(GradVarName(name)); + } + const int root_block_idx = 0; + auto backward_op_descs = + MakeBlockBackward(program_desc, root_block_idx, no_grad_var_names); + auto& forw_op_descs = program_desc.Block(root_block_idx)->ops_; + for (auto& ptr : backward_op_descs) { + forw_op_descs.push_back(std::move(ptr)); } - return; } } // namespace framework diff --git a/paddle/framework/block_desc.h b/paddle/framework/block_desc.h index fd95ef19012754..aad1c3fef82031 100644 --- a/paddle/framework/block_desc.h +++ b/paddle/framework/block_desc.h @@ -32,8 +32,13 @@ class ProgramDescBind; class BlockDescBind { public: - friend void AppendBackwardOpDescs( - BlockDescBind &block_desc, std::unordered_set &no_grad_vars); + friend std::vector> MakeBlockBackward( + ProgramDescBind &program_desc, int block_idx, + std::unordered_set &no_grad_vars); + + friend void AppendBackward( + ProgramDescBind &program_desc, + const std::unordered_set &no_grad_vars); BlockDescBind(ProgramDescBind *prog, BlockDesc *desc) : prog_(prog), desc_(desc), need_update_(false) {} From 4b07686aa89575442176ce056ef3551a2b31580e Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Thu, 5 Oct 2017 14:42:17 -0700 Subject: [PATCH 13/27] Add unit tests --- paddle/framework/backward.cc | 5 +- paddle/framework/backward_test.cc | 206 ++++++++++++++++++++++++++++++ 2 files changed, 209 insertions(+), 2 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index efcdd1bc78d17c..c970e01dd19d80 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -289,7 +289,7 @@ std::vector> MakeOpGrad( std::unordered_set& no_grad_vars) { std::vector> grad_op_descs; // All input gradients of forwarding operator do not need to calculat. - const std::vector& inputs = op_desc->InArgumentNames(); + const std::vector& inputs = op_desc->InputArgumentNames(); if (AllGradInSet(inputs, no_grad_vars)) { return grad_op_descs; // empty vector } @@ -323,8 +323,9 @@ std::vector> MakeOpGrad( } } } + for (auto& p : pending_fill_zeros_ops) { - grad_op_descs.push_back(std::move(p)); + grad_op_descs.insert(grad_op_descs.begin(), std::move(p)); } return grad_op_descs; } diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 9ea91358f47da4..30225a4a99d993 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -155,6 +155,18 @@ class SumOpMaker : public framework::OpProtoAndCheckerMaker { } }; +class MultInOutOpMaker : public OpProtoAndCheckerMaker { + public: + MultInOutOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "x"); + AddInput("H", "h"); + AddOutput("Y", "y"); + AddOutput("Z", "z"); + AddComment(""); + } +}; + } // namespace framework } // namespace paddle @@ -172,6 +184,7 @@ REGISTER_OP(sum, f::NOP, f::SumOpMaker, sum_grad, f::NOP); REGISTER_OP_WITHOUT_GRADIENT(fc, f::FcOp, f::FcOpMaker); REGISTER_OP(many_output_op, f::NOP, f::ManyOutputOpMaker, many_output_op_grad, f::NOP); +REGISTER_OP(mult_in_out, f::NOP, f::MultInOutOpMaker, mult_in_out_grad, f::NOP); TEST(Backward, simple_op_not_need_grad) { auto fwd = f::OpRegistry::CreateOp( @@ -487,4 +500,197 @@ TEST(Backward, simple_mult_op) { std::vector({f::GradVarName("out2")})); EXPECT_EQ(grad_op3->Output(f::GradVarName("b")), std::vector({f::GradVarName("b3")})); +} + +TEST(Backward, intermedia_var_no_grad) { + f::ProgramDesc *program_desc = GetNewProgramDesc(); + f::ProgramDescBind &program = f::ProgramDescBind::Instance(program_desc); + f::BlockDescBind *block = program.Block(0); + f::OpDescBind *op1 = block->AppendOp(); + op1->SetType("rowwise_add"); + op1->SetInput("X", {"x1"}); + op1->SetInput("b", {"b1"}); + op1->SetOutput("Out", {"out1"}); + + f::OpDescBind *op2 = block->AppendOp(); + op2->SetType("mul"); + op2->SetInput("X", {"x2"}); + op2->SetInput("Y", {"y2"}); + op2->SetOutput("Out", {"out2"}); + + f::OpDescBind *op3 = block->AppendOp(); + op3->SetType("rowwise_add"); + op3->SetInput("X", {"out2"}); + op3->SetInput("b", {"b3"}); + op3->SetOutput("Out", {"out3"}); + + f::OpDescBind *op4 = block->AppendOp(); + op4->SetType("mul"); + op4->SetInput("X", {"out1"}); + op4->SetInput("Y", {"out3"}); + op4->SetOutput("Out", {"out4"}); + + AppendBackward(program, {"out3"}); + + ASSERT_EQ(block->AllOps().size(), 6UL); + f::OpDescBind *grad_op1 = block->AllOps()[5]; + EXPECT_EQ(grad_op1->Type(), "rowwise_add_grad"); + ASSERT_EQ(grad_op1->InputNames().size(), 1UL); + ASSERT_EQ(grad_op1->OutputNames().size(), 2UL); + EXPECT_EQ(grad_op1->Input(f::GradVarName("Out")), + std::vector({f::GradVarName("out1")})); + EXPECT_EQ(grad_op1->Output(f::GradVarName("X")), + std::vector({f::GradVarName("x1")})); + EXPECT_EQ(grad_op1->Output(f::GradVarName("b")), + std::vector({f::GradVarName("b1")})); + + f::OpDescBind *grad_op4 = block->AllOps()[4]; + EXPECT_EQ(grad_op4->Type(), "mul_grad"); + ASSERT_EQ(grad_op4->InputNames().size(), 4UL); + ASSERT_EQ(grad_op4->OutputNames().size(), 2UL); + EXPECT_EQ(grad_op4->Input("X"), std::vector({"out1"})); + EXPECT_EQ(grad_op4->Input("Y"), std::vector({"out3"})); + EXPECT_EQ(grad_op4->Input("Out"), std::vector({"out4"})); + EXPECT_EQ(grad_op4->Input(f::GradVarName("Out")), + std::vector({f::GradVarName("out4")})); + EXPECT_EQ(grad_op4->Output(f::GradVarName("X")), + std::vector({f::GradVarName("out1")})); + EXPECT_EQ(grad_op4->Output(f::GradVarName("Y")), + std::vector({f::kEmptyVarName})); +} + +TEST(Backward, var_no_grad) { + f::ProgramDesc *program_desc = GetNewProgramDesc(); + f::ProgramDescBind &program = f::ProgramDescBind::Instance(program_desc); + f::BlockDescBind *block = program.Block(0); + f::OpDescBind *op1 = block->AppendOp(); + op1->SetType("mult_in_out"); + op1->SetInput("X", {"x1"}); + op1->SetInput("H", {"h1"}); + op1->SetOutput("Y", {"y1"}); + op1->SetOutput("Z", {"z1"}); + + f::OpDescBind *op2 = block->AppendOp(); + op2->SetType("mult_in_out"); + op2->SetInput("X", {"y1"}); + op2->SetInput("H", {"z1"}); + op2->SetOutput("Y", {"y2"}); + op2->SetOutput("Z", {"z2"}); + + AppendBackward(program, {"z1"}); + + ASSERT_EQ(block->AllOps().size(), 5UL); + f::OpDescBind *grad_op2 = block->AllOps()[2]; + ASSERT_EQ(grad_op2->Type(), "mult_in_out_grad"); + ASSERT_EQ(grad_op2->InputNames().size(), 6UL); + ASSERT_EQ(grad_op2->OutputNames().size(), 2UL); + EXPECT_EQ(grad_op2->Input("X"), std::vector({"y1"})); + EXPECT_EQ(grad_op2->Input("H"), std::vector({"z1"})); + EXPECT_EQ(grad_op2->Input("Y"), std::vector({"y2"})); + EXPECT_EQ(grad_op2->Input("Z"), std::vector({"z2"})); + EXPECT_EQ(grad_op2->Input(f::GradVarName("Y")), + std::vector({f::GradVarName("y2")})); + EXPECT_EQ(grad_op2->Input(f::GradVarName("Z")), + std::vector({f::GradVarName("z2")})); + EXPECT_EQ(grad_op2->Output(f::GradVarName("X")), + std::vector({f::GradVarName("y1")})); + EXPECT_EQ(grad_op2->Output(f::GradVarName("H")), + std::vector({f::kEmptyVarName})); + + f::OpDescBind *fill_zero_op = block->AllOps()[3]; + ASSERT_EQ(fill_zero_op->Type(), "fill_zeros_like"); + ASSERT_EQ(fill_zero_op->InputNames().size(), 1UL); + ASSERT_EQ(fill_zero_op->OutputNames().size(), 1UL); + EXPECT_EQ(fill_zero_op->Input("X"), std::vector({"z1"})); + EXPECT_EQ(fill_zero_op->Output("Y"), + std::vector({std::string("z1") + f::kZeroVarSuffix})); + + f::OpDescBind *grad_op1 = block->AllOps()[4]; + ASSERT_EQ(grad_op1->Type(), "mult_in_out_grad"); + ASSERT_EQ(grad_op1->InputNames().size(), 6UL); + ASSERT_EQ(grad_op1->OutputNames().size(), 2UL); + EXPECT_EQ(grad_op1->Input("X"), std::vector({"x1"})); + EXPECT_EQ(grad_op1->Input("H"), std::vector({"h1"})); + EXPECT_EQ(grad_op1->Input("Y"), std::vector({"y1"})); + EXPECT_EQ(grad_op1->Input("Z"), std::vector({"z1"})); + EXPECT_EQ(grad_op1->Input(f::GradVarName("Y")), + std::vector({f::GradVarName("y1")})); + EXPECT_EQ(grad_op1->Input(f::GradVarName("Z")), + std::vector({std::string("z1") + f::kZeroVarSuffix})); + EXPECT_EQ(grad_op1->Output(f::GradVarName("X")), + std::vector({f::GradVarName("x1")})); + EXPECT_EQ(grad_op1->Output(f::GradVarName("H")), + std::vector({f::GradVarName("h1")})); +} + +TEST(Backward, shared_var) { + f::ProgramDesc *program_desc = GetNewProgramDesc(); + f::ProgramDescBind &program = f::ProgramDescBind::Instance(program_desc); + f::BlockDescBind *block = program.Block(0); + f::OpDescBind *op1 = block->AppendOp(); + op1->SetType("rowwise_add"); + op1->SetInput("X", {"x1"}); + op1->SetInput("b", {"b1"}); + op1->SetOutput("Out", {"out1"}); + + f::OpDescBind *op2 = block->AppendOp(); + op2->SetType("mul"); + op2->SetInput("X", {"out1"}); + op2->SetInput("Y", {"y2"}); + op2->SetOutput("Out", {"out2"}); + + f::OpDescBind *op3 = block->AppendOp(); + op3->SetType("rowwise_add"); + op3->SetInput("X", {"out1"}); + op3->SetInput("b", {"b3"}); + op3->SetOutput("Out", {"out3"}); + + AppendBackward(program, {}); + + ASSERT_EQ(block->AllOps().size(), 7UL); + f::OpDescBind *grad_op3 = block->AllOps()[3]; + ASSERT_EQ(grad_op3->Type(), "rowwise_add_grad"); + ASSERT_EQ(grad_op3->InputNames().size(), 1UL); + ASSERT_EQ(grad_op3->OutputNames().size(), 2UL); + EXPECT_EQ(grad_op3->Input(f::GradVarName("Out")), + std::vector({f::GradVarName("out3")})); + EXPECT_EQ(grad_op3->Output(f::GradVarName("X")), + std::vector({f::GradVarName("out1") + "@RENAME@0"})); + EXPECT_EQ(grad_op3->Output(f::GradVarName("b")), + std::vector({f::GradVarName("b3")})); + + f::OpDescBind *grad_op4 = block->AllOps()[4]; + ASSERT_EQ(grad_op4->Type(), "mul_grad"); + ASSERT_EQ(grad_op4->InputNames().size(), 4UL); + ASSERT_EQ(grad_op4->OutputNames().size(), 2UL); + EXPECT_EQ(grad_op4->Input("X"), std::vector({"out1"})); + EXPECT_EQ(grad_op4->Input("Y"), std::vector({"y2"})); + EXPECT_EQ(grad_op4->Input("Out"), std::vector({"out2"})); + EXPECT_EQ(grad_op4->Input(f::GradVarName("Out")), + std::vector({f::GradVarName("out2")})); + EXPECT_EQ(grad_op4->Output(f::GradVarName("X")), + std::vector({f::GradVarName("out1") + "@RENAME@1"})); + EXPECT_EQ(grad_op4->Output(f::GradVarName("Y")), + std::vector({f::GradVarName("y2")})); + + f::OpDescBind *sum_op = block->AllOps()[5]; + ASSERT_EQ(sum_op->Type(), "sum"); + ASSERT_EQ(sum_op->InputNames().size(), 1UL); + ASSERT_EQ(sum_op->OutputNames().size(), 1UL); + EXPECT_EQ(sum_op->Input("X"), + std::vector({f::GradVarName("out1") + "@RENAME@0", + f::GradVarName("out1") + "@RENAME@1"})); + EXPECT_EQ(sum_op->Output("Out"), + std::vector({f::GradVarName("out1")})); + + f::OpDescBind *grad_op1 = block->AllOps()[6]; + ASSERT_EQ(grad_op1->Type(), "rowwise_add_grad"); + ASSERT_EQ(grad_op1->InputNames().size(), 1UL); + ASSERT_EQ(grad_op1->OutputNames().size(), 2UL); + EXPECT_EQ(grad_op1->Input(f::GradVarName("Out")), + std::vector({f::GradVarName("out1")})); + EXPECT_EQ(grad_op1->Output(f::GradVarName("X")), + std::vector({f::GradVarName("x1")})); + EXPECT_EQ(grad_op1->Output(f::GradVarName("b")), + std::vector({f::GradVarName("b1")})); } \ No newline at end of file From 154a6ed29c13eeab9c4f785cf5ca1520ba8ca999 Mon Sep 17 00:00:00 2001 From: Kavya Srinet Date: Thu, 5 Oct 2017 17:52:10 -0700 Subject: [PATCH 14/27] Implementing tanhshrink operator --- paddle/operators/activation_op.cc | 14 +++++++++++++ paddle/operators/activation_op.h | 21 ++++++++++++++++++- .../v2/framework/tests/test_activation_op.py | 15 +++++++++++++ 3 files changed, 49 insertions(+), 1 deletion(-) diff --git a/paddle/operators/activation_op.cc b/paddle/operators/activation_op.cc index 5f2ecc2673a5bf..66e9d2c40138c2 100644 --- a/paddle/operators/activation_op.cc +++ b/paddle/operators/activation_op.cc @@ -97,6 +97,17 @@ class TanhOpMaker : public framework::OpProtoAndCheckerMaker { } }; +class TanhShrinkOpMaker : public framework::OpProtoAndCheckerMaker { + public: + TanhShrinkOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "Input of TanhShrink operator"); + AddOutput("Y", "Output of TanhShrink operator"); + AddComment("TanhShrink activation operator, tanhshrink(x) = x - tanh(x)"); + } +}; + class SqrtOpMaker : public framework::OpProtoAndCheckerMaker { public: SqrtOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) @@ -235,6 +246,9 @@ REGISTER_OP(relu, ops::ActivationOp, ops::ReluOpMaker, relu_grad, REGISTER_OP(tanh, ops::ActivationOp, ops::TanhOpMaker, tanh_grad, ops::ActivationOpGrad); +REGISTER_OP(tanh_shrink, ops::ActivationOp, ops::TanhShrinkOpMaker, + tanh_shrink_grad, ops::ActivationOpGrad); + REGISTER_OP(sqrt, ops::ActivationOp, ops::SqrtOpMaker, sqrt_grad, ops::ActivationOpGrad); diff --git a/paddle/operators/activation_op.h b/paddle/operators/activation_op.h index dae66cc77d9103..245060174224c5 100644 --- a/paddle/operators/activation_op.h +++ b/paddle/operators/activation_op.h @@ -146,6 +146,24 @@ struct TanhGradFunctor : public BaseActivationFunctor { } }; +// tanhshrink(x) = x - tanh(x) +// where tanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x)) +template +struct TanhShrinkFunctor : public BaseActivationFunctor { + template + void operator()(Device d, X x, Y y) const { + y.device(d) = x - x.tanh(); + } +}; + +template +struct TanhShrinkGradFunctor : public BaseActivationFunctor { + template + void operator()(Device d, X x, Y y, dY dy, dX dx) const { + dx.device(d) = dy * (x.tanh() * x.tanh()); + } +}; + // sqrt(x) = x^(1/2) template struct SqrtFunctor : public BaseActivationFunctor { @@ -407,4 +425,5 @@ struct STanhGradFunctor : public BaseActivationFunctor { __macro(pow, PowFunctor, PowGradFunctor); \ __macro(stanh, STanhFunctor, STanhGradFunctor); \ __macro(softsign, SoftsignFunctor, SoftsignGradFunctor); \ - __macro(leaky_relu, LeakyReluFunctor, LeakyReluGradFunctor) + __macro(leaky_relu, LeakyReluFunctor, LeakyReluGradFunctor); \ + __macro(tanh_shrink, TanhShrinkFunctor, TanhShrinkGradFunctor) diff --git a/python/paddle/v2/framework/tests/test_activation_op.py b/python/paddle/v2/framework/tests/test_activation_op.py index f232996a55da86..701e1a1aeec274 100644 --- a/python/paddle/v2/framework/tests/test_activation_op.py +++ b/python/paddle/v2/framework/tests/test_activation_op.py @@ -48,6 +48,21 @@ def test_check_grad(self): self.check_grad(['X'], 'Y', max_relative_error=0.007) +class TestTanhShrink(OpTest): + def setUp(self): + self.op_type = "tanh_shrink" + self.inputs = { + 'X': np.random.uniform(0.1, 1, [10, 17]).astype("float32") + } + self.outputs = {'Y': self.inputs['X'] - np.tanh(self.inputs['X'])} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Y', max_relative_error=0.008) + + class TestSqrt(OpTest): def setUp(self): self.op_type = "sqrt" From 6dcbeb6125fbb0e34ab08a7ebbcc171da01514d8 Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Fri, 6 Oct 2017 17:29:43 -0700 Subject: [PATCH 15/27] Update if_else_op.md according to https://github.com/PaddlePaddle/Paddle/issues/4313 --- doc/design/if_else_op.md | 76 +++++++++++++++++++++++----------------- 1 file changed, 43 insertions(+), 33 deletions(-) diff --git a/doc/design/if_else_op.md b/doc/design/if_else_op.md index 954a19c0733358..26d140f06db4ec 100644 --- a/doc/design/if_else_op.md +++ b/doc/design/if_else_op.md @@ -1,41 +1,51 @@ -IfOp should have only one branch. An IfOp operator takes a `cond` variable whose value must be a vector of N boolean elements. Its return value has N instances. If cond[i] == True, input instance input[i] will go through true_block() and generate output[i]; otherwise it will produce output from false_bloack(). +# The `IfElse` Operator -```python -import paddle as pd +PaddlePaddle's `IfElse` operator differs from TensorFlow's: -x = var() -y = var() -cond = var() -default_value = var() -b = pd.create_ifelseop(inputs=[x], output_num=1) -with b.true_block(): - x = b.inputs(0) - z = operator.add(x, y) - b.set_output(0, operator.softmax(z)) - -with b.false_block(): - x = b.inputs(0) - z = layer.fc(x) - b.set_output(0, operator.softmax(z)) - -out = b(cond) -``` +- the TensorFlow version takes a scalar boolean value as the condition so that the whole mini-batch goes to either the true or the false branch, whereas +- the PaddlePaddle version takes a vector of boolean value as the condition, and instances corresponding to true values go to the true branch, those corresponding to false values go to the false branch. + +## Example + +The following PaddlePaddle program shows the usage of the IfElse operator: -If only true_block is set in an IfElseOp, a special case is that we can have a default value for false as: ```python import paddle as pd -x = var() -y = var() -cond = var() -default_value = var() -b = pd.create_ifelseop(inputs=[x], output_num=1, default_value) - -with b.true_block(): - x = b.inputs(0) - z = operator.add(x, y) - b.set_output(0, operator.softmax(z)) +x = minibatch([10, 20, 30]) # shape=[None, 1] +y = var(1) # shape=[1], value=1 +z = minibatch([10, 20, 30]) # shape=[None, 1] +cond = larger_than(x, 15) # [false, true, true] + +ie = pd.ifelse() +with ie.true_block(): + d = pd.layer.add(x, y) + ie.output(d, pd.layer.softmax(d)) +with ie.false_block(): + d = pd.layer.fc(z) + ie.output(d, d+1) +o1, o2 = ie(cond) +``` -out = b(cond) +A challenge to implement the `IfElse` operator is to infer those variables to be split, or, say, to identify the variable of the mini-batch or those derived from the mini-batch. + +An equivalent C++ program is as follows: + +```c++ +namespace pd = paddle; + +int x = 10; +int y = 1; +int z = 10; +bool cond = false; +int o1, o2; +if (cond) { + int d = x + y; + o1 = z; + o2 = pd::layer::softmax(z); +} else { + int d = pd::layer::fc(z); + o1 = d; + o2 = d+1; +} ``` -where default_value is a list of vars for `cond` == False. From 20a6ae7f1f7eb67eb9f5e7b6290aa81aa4536e0a Mon Sep 17 00:00:00 2001 From: Yan Chunwei Date: Fri, 6 Oct 2017 20:41:37 -0400 Subject: [PATCH 16/27] Feature/tensor array add python binding (#4616) --- paddle/framework/tensor_array.h | 3 + paddle/pybind/CMakeLists.txt | 2 +- paddle/pybind/pybind.cc | 51 +++++++++ .../v2/framework/tests/test_tensor_array.py | 106 ++++++++++++++++++ 4 files changed, 161 insertions(+), 1 deletion(-) create mode 100644 python/paddle/v2/framework/tests/test_tensor_array.py diff --git a/paddle/framework/tensor_array.h b/paddle/framework/tensor_array.h index 22ae6a966f90c4..94a14c2df492b1 100644 --- a/paddle/framework/tensor_array.h +++ b/paddle/framework/tensor_array.h @@ -26,6 +26,9 @@ namespace framework { * in original lod-tensor. */ struct DySeqMeta { + DySeqMeta(size_t begin, size_t end, size_t ori_idx) + : begin(begin), end(end), ori_idx(ori_idx) {} + size_t begin; size_t end; // not included size_t ori_idx; diff --git a/paddle/pybind/CMakeLists.txt b/paddle/pybind/CMakeLists.txt index 18ecbd1aa34c82..97364f2db9523c 100644 --- a/paddle/pybind/CMakeLists.txt +++ b/paddle/pybind/CMakeLists.txt @@ -1,6 +1,6 @@ if(WITH_PYTHON) cc_library(paddle_pybind SHARED SRCS pybind.cc exception.cc protobuf.cc - DEPS pybind python backward proto_desc + DEPS pybind python backward proto_desc tensor_array ${GLOB_OP_LIB}) endif(WITH_PYTHON) diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 38ba450447386b..356c4986e2e182 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -16,6 +16,7 @@ limitations under the License. */ #include "paddle/framework/backward.h" #include "paddle/framework/lod_tensor.h" +#include "paddle/framework/tensor_array.h" #include "paddle/operators/cond_op.h" #include "paddle/operators/net_op.h" #include "paddle/operators/recurrent_op.h" @@ -286,6 +287,56 @@ All parameter, weight, gradient are variables in Paddle. self->CompleteAddOp(); }); + py::class_(m, "TensorArray") + .def("__init__", + [](TensorArray &instance) { new (&instance) TensorArray(); }) + .def("read", + [](TensorArray &self, size_t index) { return self.Read(index); }) + .def("write", [](TensorArray &self, size_t index, + LoDTensor &value) { self.Write(index, value); }) + .def("write_shared", + [](TensorArray &self, size_t index, const LoDTensor &value) { + self.WriteShared(index, value); + }) + .def("size", [](TensorArray &self) { return self.size(); }) + .def("pack", + [](TensorArray &self, size_t level, + const std::vector> &meta_info, + const std::vector> &lod) { + std::vector meta; + for (auto &info : meta_info) { + PADDLE_ENFORCE_EQ(info.size(), 3UL); + meta.emplace_back(info[0], info[1], info[2]); + } +#ifndef PADDLE_WITH_CUDA + return self.Pack(level, meta, lod); +#else + LoD new_lod; + new_lod.reserve(lod.size()); + std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod)); + return self.Pack(level, meta, new_lod); +#endif + }) + .def("unpack", + [](TensorArray &self, const LoDTensor &source, int level, + bool length_descend) { + auto metas = self.Unpack(source, level, length_descend); + std::vector> meta_info; + for (auto meta : metas) { + meta_info.emplace_back( + std::vector({meta.begin, meta.end, meta.ori_idx})); + } + return meta_info; + }) + .def("stack", [](TensorArray &self) { return self.Stack(); }) + .def("unstack", + [](TensorArray &self, const LoDTensor &source) { + return self.Unstack(source); + }) + .def("unstack_shared", [](TensorArray &self, const LoDTensor &source) { + return self.UnstackShared(source); + }); + // recurrent_op py::class_(m, "RecurrentOp") .def_static( diff --git a/python/paddle/v2/framework/tests/test_tensor_array.py b/python/paddle/v2/framework/tests/test_tensor_array.py new file mode 100644 index 00000000000000..11f8a01f9224fc --- /dev/null +++ b/python/paddle/v2/framework/tests/test_tensor_array.py @@ -0,0 +1,106 @@ +import logging +import paddle.v2.framework.core as core +import unittest +import numpy as np + + +class TestTensorArray(unittest.TestCase): + def setUp(self): + self.ta = core.TensorArray() + + self.batch_size = 10 + self.dim = 2 + + # create a LoDTensor + self.scope = core.Scope() + var = self.scope.new_var("test_tensor") + self.place = core.CPUPlace() + tensor = var.get_tensor() + tensor.set_dims([self.batch_size, self.dim]) + tensor.alloc_float(self.place) + tensor_array = np.array(tensor) + tensor_array[0, 0] = 0 + tensor_array[1, 0] = 1 + tensor_array[2, 0] = 2 + tensor_array[3, 0] = 3 + tensor_array[4, 0] = 4 + tensor_array[5, 0] = 5 + tensor_array[6, 0] = 6 + tensor_array[7, 0] = 7 + tensor_array[8, 0] = 8 + tensor_array[9, 0] = 9 + + lod_py = [[0, 2, 5, 10]] + lod_tensor = core.LoDTensor(lod_py) + lod_tensor.set(tensor_array, self.place) + + self.py_seq_meta = [[5, 10, 2], [2, 5, 1], [0, 2, 0]] + + self.tensor = lod_tensor + + def test_unstack(self): + self.ta.unstack(self.tensor) + self.assertEqual(self.tensor.get_dims()[0], self.ta.size()) + + def test_read(self): + self.ta.unstack(self.tensor) + for i in range(self.batch_size): + tensor = self.ta.read(i) + + def test_write(self): + self.ta.unstack(self.tensor) + + # create a tensor with shape of [1, self.dim] + var = self.scope.new_var("hell") + tensor = var.get_tensor() + tensor.set_dims([1, self.dim]) + tensor.alloc_float(self.place) + tensor_array = np.array(tensor) + for i in range(self.dim): + tensor_array[0, i] = i + tensor.set(tensor_array, self.place) + + self.ta.write(2, tensor) + + ta_tensor = self.ta.read(2) + ta_tensor_array = np.array(ta_tensor) + self.assertEqual(ta_tensor.get_dims(), [1, self.dim]) + self.assertTrue((tensor_array == ta_tensor_array).all()) + + def test_write_shared(self): + self.ta.unstack(self.tensor) + + # create a tensor with shape of [1, self.dim] + var = self.scope.new_var("hell") + tensor = var.get_tensor() + tensor.set_dims([1, self.dim]) + tensor.alloc_float(self.place) + tensor_array = np.array(tensor) + for i in range(self.dim): + tensor_array[0, i] = i + tensor.set(tensor_array, self.place) + + self.ta.write_shared(2, tensor) + + ta_tensor = self.ta.read(2) + ta_tensor_array = np.array(ta_tensor) + self.assertEqual(ta_tensor.get_dims(), [1, self.dim]) + self.assertTrue((tensor_array == ta_tensor_array).all()) + + def test_unpack(self): + meta = self.ta.unpack(self.tensor, 0, True) + self.assertEqual(self.ta.size(), 5) + self.assertEqual(meta, self.py_seq_meta) + + def test_pack(self): + meta = self.ta.unpack(self.tensor, 0, True) + print "meta", meta + tensor = self.ta.pack(0, meta, self.tensor.lod()) + print np.array(self.tensor) + print np.array(tensor) + self.assertTrue((np.array(self.tensor) == np.array(tensor)).all()) + self.assertTrue(tensor.lod(), self.tensor.lod()) + + +if __name__ == '__main__': + unittest.main() From 34193844adc9d67e4a9c8bb00ff9e67ffbc4ba3c Mon Sep 17 00:00:00 2001 From: Yan Chunwei Date: Fri, 6 Oct 2017 21:06:44 -0400 Subject: [PATCH 17/27] add background for TensorArray (#4564) --- doc/design/tensor_array.md | 268 ++++++++++++++++++++++++++++++++----- 1 file changed, 233 insertions(+), 35 deletions(-) diff --git a/doc/design/tensor_array.md b/doc/design/tensor_array.md index a0419ec0021598..8378e97bf7cfaa 100644 --- a/doc/design/tensor_array.md +++ b/doc/design/tensor_array.md @@ -1,39 +1,250 @@ # Design for TensorArray +This design doc presents the necessity of a new C++ class `TensorArray`. +In addition to the very simple C++ implementation + +```c++ +class TensorArray { + public: + explicit TensorArray(const LoDTensor&); + explicit TensorArray(size_t size); + + private: + vector values_; +}; +``` + +We also need to expose it to PaddlePaddle's Python API, +because users would want to use it with our very flexible operators `WhileLoop`. +An example for a RNN based on dynamic operators is + +```python +input = pd.data(...) +num_steps = Var(12) + +TensorArray states(size=num_steps) +TensorArray step_inputs(unstack_from=input) +TensorArray step_outputs(size=num_steps) + +W = Tensor(...) +U = Tensor(...) +default_state = some_op() + +step = Var(1) + +wloop = paddle.create_whileloop(loop_vars=[step]) +with wloop.frame(): + wloop.break_if(pd.equal(step, num_steps) + pre_state = states.read(step-1, default_state) + step_input = step_inputs.read(step) + state = pd.sigmoid(pd.matmul(U, pre_state) + pd.matmul(W, step_input)) + states.write(step, state) + step_outputs.write(step, state) # output state + step.update(state+1) + +output = step_outputs.stack() +``` + +## Background +Steps are one of the core concepts of RNN. In each time step of RNN, there should be several input segments, states, and output segments; all these components act like arrays, for example, call `states[step_id]` will get the state in `step_id`th time step. + +An RNN can be implemented with the following pseudocode + +```c++ +Array states; +Array input_segments; +Array output_segments; +Parameter W, U; + +step = 1 +seq_len = 12 +while_loop { + if (step == seq_len) break; + states[step] = sigmoid(W * states[step-1] + U * input_segments[step]); + output_segments[step] = states[step] // take state as output + step++; +} +``` +According to the [RNN roadmap](https://github.com/PaddlePaddle/Paddle/issues/4561), there are several different RNNs that PaddlePaddle will eventually support. + +Currently, the basic RNN implementation supported by PaddlePaddle is the `recurrent_op` which takes tensors as input and splits them into `input_segments`. + + +Since a tensor cannot store variable-length sequences directly, PaddlePaddle implements the tensor with level of details (`LoDTensor` for short). +Segmenting the `LoDTensor` is much more complicated than splitting a tensor, that makes it necessary to refactor the `recurrent_op` with `LoDTensor` segmenting support. + +As the next step in RNN support, `dynamic_recurrent_op` should be introduced to handle inputs with variable-length sequences. + +The implementation is similar to `recurrent_op`. +The key difference is the way **the original input `LoDTensors` and outupts are split to get the `input_segments` and the `output_segments`.** + + +Though it can't be built over `recurrent_op` or `dynamic_recurrent_op` directly, +the logic behind splitting a tensor or a LoD tensor into `input_segments` remains the same. + +## Why `TensorArray` +The logic behind splitting the inputs to segments, states and outputs is similar and can be shared in a seperate module. + +The array of `states`, `input_segments` and `output_segments` would be exposed to users when writing a dynamic RNN model similar to the above pseudo codes. + +So there should be an array-like container, which can store the segments of a tensor or LoD tensor. + +**This container can store an array of tensors and provides several methods to split a tensor or a LoD tensor** . +This is where the notion of `TensorArray` comes from. + +## Introduce TensorArray to uniform all the three RNNs TensorArray as a new concept is borrowed from TensorFlow, it is meant to be used with dynamic iteration primitives such as `while_loop` and `map_fn`. This concept can be used to support our new design of dynamic operations, and help to refactor some existing variant-sentence-related layers, -such as `RecurrentGradientMachine`. +such as `recurrent_op`, `RecurrentGradientMachine`. In [our design for dynamic RNN](https://github.com/PaddlePaddle/Paddle/pull/4401), `TensorArray` is used to segment inputs and store states in all time steps. By providing some methods similar to a C++ array, -the definition of some state-based dynamic models such as RNN could be more natural and highly flexible. - -## Dynamic-Related Methods -Some basic methods should be proposed as follows: - -### stack() -Pack the values in a `TensorArray` into a tensor with rank one higher than each tensor in `values`. -### unstack(axis=0) -Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors. -### concat() -Return the values in the `TensorArray` as a concatenated Tensor. -### write(index, value, data_shared=true) -Write value into index of the TensorArray. -### read(index) -Read the value at location `index` in the `TensorArray`. -### size() -Return the number of values. +the definition of some state-based dynamic models such as RNN can be more natural and highly flexible. + +## Dynamic-operations on TensorArray + +`TensorArray` will be used directly when defining dynamic models, so some operators listed below should be implemented + +```python +# several helper operators for TensorArray +def tensor_array_stack(ta, tensor): + ''' + get a tensor array `ta`, return a packed `tensor`. + ''' + pass + +def tensor_array_unstack(tensor, ta): + ''' + get a `tensor`, unstack it and get a tensor array `ta`. + ''' + pass + +def tensor_array_write(ta, index, tensor, data_shared): + ''' + get a `tensor` and a scalar tensor `index`, write `tensor` into index-th + value of the tensor array `ta`. + `data_shared` is an attribute that specifies whether to copy or reference the tensors. + ''' + pass + +def tensor_array_read(ta, index, tensor): + ''' + get a tensor array `ta`, a scalar tensor `index`, read the index-th value of + `ta` and return as the `tensor`. + ''' + pass + +def tensor_array_size(ta, tensor): + ''' + get a tensor array `ta`, return the size of `ta` and return as the scalar `tensor`. + ''' + pass +``` + +It is trivial for users to use so many low-level operators, so some helper methods should be proposed in python wrapper to make `TensorArray` easier to use, +for example + +```python +class TensorArray: + def __init__(self, name): + self.name = name + self.desc = TensorArrayDesc() + + def stack(self, name=None): + ''' + Pack the values in a `TensorArray` into a tensor with rank one higher + than each tensor in `values`. + `stack` can be used to split tensor into time steps for RNN or whileloop. + + @name: str + the name of the variable to output. + ''' + tensor = NewVar(name) + tensor_array_stack(self.name, tensor) + return tensor + + def unstack(self, input): + ''' + Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors. + `unstack` can be used to concatenate all the time steps for RNN or whileloop. + + @input: str + the name of input tensor + ''' + tensor_array_unstack(tensor, self.name) + + def write(self, index, value, data_shared=True): + ''' + Write value into index of the TensorArray. + If `data_shared` is set to True, than the index-th value in TensorArray will + be shared with the tensor passed in. + + @index: str + name of a scalar tensor + @value: str + name of a tensor + @data_shared: bool + ''' + tensor_array_write(self.name, index, value, data_shared) + + def read(self, index, output): + ''' + Read the value at location `index` in the `TensorArray`. + + @index: str + name of a scalar tensor + @output: + name of a output variable + ''' + tensor_array_read(self.name, index, output) + + + def size(self, output): + ''' + Return the number of values. + + @output: str + name of a scalar tensor + ''' + tensor_array_size(self.name, output) +``` ## LoDTensor-related Supports -The `RecurrentGradientMachine` in Paddle serves as a flexible RNN layer; it takes variant length sequences as input, -because each step of RNN could only take a tensor-represented batch of data as input, +The `RecurrentGradientMachine` in Paddle serves as a flexible RNN layer; it takes varience-length sequences as input, and output sequences too. + +Since each step of RNN can only take a tensor-represented batch of data as input, some preprocess should be taken on the inputs such as sorting the sentences by their length in descending order and cut each word and pack to new batches. -Such cut-like operations can be embedded into `TensorArray` as general methods called `unpack` and `pack`. +Such cut-like operations can be embedded into `TensorArray` as general methods called `unpack` and `pack`, +these two operations are similar to `stack` and `unstack` except that they operate on variable-length sequences formated as a LoD tensor rather than a tensor. + +Some definitions are like + +```python +def unpack(level): + ''' + Split LodTensor in some `level` and generate batches, if set `sort_by_length`, + will sort by length. -With these two methods, a variant-sentence-RNN can be implemented like + Returns: + - a new `TensorArray`, whose values are LodTensors and represents batches + of data. + - an int32 Tensor, which stores the map from the new batch's indices to + original LoDTensor + ''' + pass + +def pack(level, indices_map): + ''' + Recover the original LoD-arranged LoDTensor with the values in a `TensorArray` + and `level` and `indices_map`. + ''' + pass +``` + +With these two methods, a varience-length sentence supported RNN can be implemented like ```c++ // input is the varient-length data @@ -58,16 +269,3 @@ LoDTensor rnn_output = ta.pack(ta, indice_map); ``` the code above shows that by embedding the LoDTensor-related preprocess operations into `TensorArray`, the implementation of a RNN that supports varient-length sentences is far more concise than `RecurrentGradientMachine` because the latter mixes all the codes together, hard to read and extend. - - -some details are as follows. - -### unpack(level, sort_by_length) -Split LodTensor in some `level` and generate batches, if set `sort_by_length`, will sort by length. - -Returns: - -- a new `TensorArray`, whose values are LodTensors and represents batches of data. -- an int32 Tensor, which stores the map from the new batch's indices to original LoDTensor -### pack(level, indices_map) -Recover the original LoD-arranged LoDTensor with the values in a `TensorArray` and `level` and `indices_map`. From bfe6dcb5d345b3a734c2d496a97bffa184dfbbd4 Mon Sep 17 00:00:00 2001 From: Yan Chunwei Date: Fri, 6 Oct 2017 23:48:44 -0400 Subject: [PATCH 18/27] fix RNN and IfElse syntax in Block design (#4210) * fix block syntax * add builder * fix argument names * uniform all Var to var * fix c demo * factor set_outputs * update block * delete as_ifelse_input interface from block design * simplify set_outputs and output_num * make the c++ codes compatible with demo * fix compatible * fix syntax * update ie * update syntax --- doc/design/block.md | 96 ++++++++++++++++++++++----------------------- 1 file changed, 48 insertions(+), 48 deletions(-) diff --git a/doc/design/block.md b/doc/design/block.md index be880012203598..4d5dd4ba95a686 100644 --- a/doc/design/block.md +++ b/doc/design/block.md @@ -55,17 +55,23 @@ Let us consolidate the discussion by presenting some examples. The following C++ programs shows how blocks are used with the `if-else` structure: ```c++ +namespace pd = paddle; + int x = 10; -int y = 20; -int out; +int y = 1; +int z = 10; bool cond = false; +int o1, o2; if (cond) { int z = x + y; - out = softmax(z); + o1 = z; + o2 = pd::layer::softmax(z); } else { - int z = fc(x); - out = z; + int d = pd::layer::fc(z); + o1 = d; + o2 = d+1; } + ``` An equivalent PaddlePaddle program from the design doc of the [IfElseOp operator](./if_else_op.md) is as follows: @@ -73,57 +79,55 @@ An equivalent PaddlePaddle program from the design doc of the [IfElseOp operator ```python import paddle as pd -x = var(10) -y = var(20) -cond = var(false) -ie = pd.create_ifelseop(inputs=[x], output_num=1) +x = minibatch([10, 20, 30]) # shape=[None, 1] +y = var(1) # shape=[1], value=1 +z = minibatch([10, 20, 30]) # shape=[None, 1] +cond = larger_than(x, 15) # [false, true, true] + +ie = pd.ifelse() with ie.true_block(): - x = ie.inputs(true, 0) - z = operator.add(x, y) - ie.set_output(true, 0, operator.softmax(z)) + d = pd.layer.add_scalar(x, y) + ie.output(d, pd.layer.softmax(d)) with ie.false_block(): - x = ie.inputs(false, 0) - z = layer.fc(x) - ie.set_output(true, 0, operator.softmax(z)) -out = b(cond) + d = pd.layer.fc(z) + ie.output(d, d+1) +o1, o2 = ie(cond) ``` -In both examples, the left branch computes `softmax(x+y)` and the right branch computes `fc(x)`. +In both examples, the left branch computes `x+y` and `softmax(x+y)`, the right branch computes `x+1` and `fc(x)`. A difference is that variables in the C++ program contain scalar values, whereas those in the PaddlePaddle programs are mini-batches of instances. The `ie.input(true, 0)` invocation returns instances in the 0-th input, `x`, that corresponds to true values in `cond` as the local variable `x`, where `ie.input(false, 0)` returns instances corresponding to false values. + ### Blocks with `for` and `RNNOp` The following RNN model from the [RNN design doc](./rnn.md) ```python -x = sequence([10, 20, 30]) -m = var(0) -W = tensor() -U = tensor() - -rnn = create_rnn(inputs=[input]) -with rnn.stepnet() as net: - x = net.set_inputs(0) - h = net.add_memory(init=m) - fc_out = pd.matmul(W, x) - hidden_out = pd.matmul(U, h.pre(n=1)) - sum = pd.add_two(fc_out, hidden_out) - act = pd.sigmoid(sum) - h.update(act) # update memory with act - net.set_outputs(0, act, hidden_out) # two outputs - +x = sequence([10, 20, 30]) # shape=[None, 1] +m = var(0) # shape=[1] +W = var(0.314, param=true) # shape=[1] +U = var(0.375, param=true) # shape=[1] + +rnn = pd.rnn() +with rnn.step(): + h = rnn.memory(init = m) + hh = rnn.previous_memory(h) + a = layer.fc(W, x) + b = layer.fc(U, hh) + s = pd.add(a, b) + act = pd.sigmoid(s) + rnn.update_memory(h, act) + rnn.output(a, b) o1, o2 = rnn() -print o1, o2 ``` - has its equivalent C++ program as follows ```c++ int* x = {10, 20, 30}; -int m = 0; -int W = some_value(); -int U = some_other_value(); +int* m = {0}; +int* W = {0.314}; +int* U = {0.375}; int mem[sizeof(x) / sizeof(x[0]) + 1]; int o1[sizeof(x) / sizeof(x[0]) + 1]; @@ -131,20 +135,16 @@ int o2[sizeof(x) / sizeof(x[0]) + 1]; for (int i = 1; i <= sizeof(x)/sizeof(x[0]); ++i) { int x = x[i-1]; if (i == 1) mem[0] = m; - int fc_out = W * x; - int hidden_out = Y * mem[i-1]; - int sum = fc_out + hidden_out; + int a = W * x; + int b = Y * mem[i-1]; + int s = fc_out + hidden_out; int act = sigmoid(sum); mem[i] = act; o1[i] = act; o2[i] = hidden_out; } - -print_array(o1); -print_array(o2); ``` - ## Compilation and Execution Like TensorFlow programs, a PaddlePaddle program is written in Python. The first part describes a neural network as a protobuf message, and the rest part executes the message for training or inference. @@ -210,11 +210,11 @@ a = pd.Varaible(shape=[20, 20]) b = pd.fc(a, params=["fc.w", "fc.b"]) rnn = pd.create_rnn() -with rnn.stepnet() as net: - x = net.set_inputs(a) +with rnn.stepnet() + x = a.as_step_input() # reuse fc's parameter fc_without_b = pd.get_variable("fc.w") - net.set_outputs(fc_without_b) + rnn.output(fc_without_b) out = rnn() ``` From cb1baa3be739023a0a72dc36a45c71f4fc4545f4 Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Fri, 6 Oct 2017 22:54:35 -0700 Subject: [PATCH 19/27] Update program.md (#4618) * Update program.md * Update * Update --- doc/design/program.md | 104 +++++++++++++++++++++++++++---- paddle/framework/framework.proto | 3 + 2 files changed, 94 insertions(+), 13 deletions(-) diff --git a/doc/design/program.md b/doc/design/program.md index fb8f86ac07af40..bd2456787c4e33 100644 --- a/doc/design/program.md +++ b/doc/design/program.md @@ -1,8 +1,10 @@ -# Design Doc: ProgramDesc +# Design Doc: PaddlePaddle Programs -The basic structure of a PaddlePaddle program is some nested blocks, as a C++ or Java program. +## Compile and Execution + +A PaddlePaddle program consists of two parts -- the first generates a `ProgramDesc` protobuf message that describes the program, and the second runs this message using a C++ class `Executor`. -As described in [graph.md](./graph.md), the first five lines of the following PaddlePaddle program +A simple example PaddlePaddle program can be found in [graph.md](./graph.md): ```python x = layer.data("images") @@ -13,36 +15,112 @@ optimize(cost) train(cost, reader=mnist.train()) ``` -generates, or compiles, a PaddelPaddle program, which is represented by the following protobuf message: +The first five lines of the following PaddlePaddle program generates, or, compiles, the `ProgramDesc` message. The last line runs it. -```protobuf -message ProgramDesc { - repeated BlockDesc blocks = 1; +## Programs and Blocks + +The basic structure of a PaddlePaddle program is some nested blocks, as a C++ or Java program. + +- program: some nested blocks +- [block](./block.md): + - some local variable definitions, and + - a sequence of operators + +The concept of block comes from usual programs. For example, the following C++ program has three blocks: + +```c++ +int main() { // block 0 + int i = 0; + if (i < 10) { // block 1 + for (int j = 0; j < 10; j++) { // block 2 + } + } + return 0; } +``` + +The following PaddlePaddle program has three blocks: + +```python +import paddle as pd // block 0 + +x = minibatch([10, 20, 30]) # shape=[None, 1] +y = var(1) # shape=[1], value=1 +z = minibatch([10, 20, 30]) # shape=[None, 1] +cond = larger_than(x, 15) # [false, true, true] +ie = pd.ifelse() +with ie.true_block(): // block 1 + d = pd.layer.add_scalar(x, y) + ie.output(d, pd.layer.softmax(d)) +with ie.false_block(): // block 2 + d = pd.layer.fc(z) + ie.output(d, d+1) +o1, o2 = ie(cond) +``` + +## `BlockDesc` and `ProgramDesc` + +All protobuf messages are defined in `framework.proto`. + +`BlockDesc` is straight-forward -- it includes local variable definitions, `vars`, and a sequence of operators, `ops`. + +```protobuf message BlockDesc { required int32 parent = 1; repeated VarDesc vars = 2; repeated OpDesc ops = 3; } +``` + +The parent ID indicates the parent block so that operators in a block can refer to variables defined locally and also those defined in their ancestor blocks. + +All hierarchical blocks in a program are flattened and stored in an array. The block ID is the index of the block in this array. + +```protobuf +message ProgramDesc { + repeated BlockDesc blocks = 1; +} +``` + + +### Global Block +The global block is the first one in the above array. + +## Operators that Use Blocks + +In the above example, the operator `IfElseOp` has two blocks -- the true branch and the false branch. + +The definition of `OpDesc` shows that an operator could have some attributes: + +```protobuf message OpDesc { AttrDesc attrs = 1; ... } +``` + +and an attribute could be of type block, which is, in fact, a block ID as described above: +``` message AttrDesc { - required AttrType type = 1; + required string name = 1; - // index into ProgramDesc::blocks when type==BLOCK - optional int32 block = 2; + enum AttrType { + INT = 1, + STRING = 2, + ... + BLOCK = ... + } + required AttrType type = 2; + + optional int32 block = 10; // when type == BLOCK ... } ``` -When each of the first five lines runs, related Python function, e.g., `layer.fc`, calls C++ InferShape functions. This InferShape function needs to access the properties of VarDesc's accessed by the current OpDesc. These VarDesc's might not be defined in the current block, but in some ancestor blocks. This requires that we can trace the parent of a block. - -A nested block is often an attribute of an operator, most likely, an IfElseOp or a WhileOp. In above solution, all blocks are in `ProgramDesc::blocks`, this implicitly assigns a zero-based ID to each block -- the index of the block in `ProgramDesc::blocks`. So that `AttrDesc::block` could be an integer block ID. +## InferShape With this design, the InferShape function should take the following parameters: diff --git a/paddle/framework/framework.proto b/paddle/framework/framework.proto index 9a3077fe6e5eb5..ac2827e54773f8 100644 --- a/paddle/framework/framework.proto +++ b/paddle/framework/framework.proto @@ -115,4 +115,7 @@ message BlockDesc { repeated OpDesc ops = 4; } +// Please refer to +// https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/program.md +// for more details. message ProgramDesc { repeated BlockDesc blocks = 1; } From adad8d9ed2cd722e6ac45b18596099b31fdb9929 Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Mon, 9 Oct 2017 11:20:09 +0800 Subject: [PATCH 20/27] Open WITH_TESTING option. --- CMakeLists.txt | 4 -- paddle/capi/tests/CMakeLists.txt | 17 ++--- paddle/gserver/tests/CMakeLists.txt | 70 +++++++++++-------- paddle/gserver/tests/LayerGradUtil.h | 1 - paddle/gserver/tests/test_ActivationGrad.cpp | 1 - paddle/gserver/tests/test_BatchNorm.cpp | 1 - paddle/gserver/tests/test_CRFLayerGrad.cpp | 1 - paddle/gserver/tests/test_ConvTrans.cpp | 1 - paddle/gserver/tests/test_ConvUnify.cpp | 1 - .../tests/test_CrossEntropyOverBeamGrad.cpp | 1 - paddle/gserver/tests/test_KmaxSeqScore.cpp | 1 - paddle/gserver/tests/test_LayerGrad.cpp | 1 - .../gserver/tests/test_SelectiveFCLayer.cpp | 1 - .../gserver/tests/test_SeqSliceLayerGrad.cpp | 1 - 14 files changed, 48 insertions(+), 54 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7d549b864b7a01..4783095194dc9c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -94,10 +94,6 @@ if(ANDROID OR IOS) endif() set(MOBILE_INFERENCE ON) add_definitions(-DPADDLE_MOBILE_INFERENCE) - - # TODO: Need Open the WITH_TESTING - set(WITH_TESTING OFF CACHE STRING "Disable TESTING when cross-compiling - for Android and iOS" FORCE) endif() set(THIRD_PARTY_PATH "${CMAKE_BINARY_DIR}/third_party" CACHE STRING diff --git a/paddle/capi/tests/CMakeLists.txt b/paddle/capi/tests/CMakeLists.txt index 8208808b94f54f..bb38ace62808db 100644 --- a/paddle/capi/tests/CMakeLists.txt +++ b/paddle/capi/tests/CMakeLists.txt @@ -4,11 +4,12 @@ add_unittest(capi_test_mats test_Vector.cpp target_include_directories(capi_test_mats PUBLIC ${PADDLE_CAPI_INC_PATH}) target_link_libraries(capi_test_mats paddle_capi) - -add_unittest_without_exec(capi_test_gradientMachine test_GradientMachine.cpp) -target_include_directories(capi_test_gradientMachine PUBLIC - ${PADDLE_CAPI_INC_PATH}) -target_link_libraries(capi_test_gradientMachine paddle_capi) -add_test(NAME capi_test_gradientMachine - COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/capi_test_gradientMachine - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/capi/tests) +if(NOT MOBILE_INFERENCE) + add_unittest_without_exec(capi_test_gradientMachine test_GradientMachine.cpp) + target_include_directories(capi_test_gradientMachine PUBLIC + ${PADDLE_CAPI_INC_PATH}) + target_link_libraries(capi_test_gradientMachine paddle_capi) + add_test(NAME capi_test_gradientMachine + COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/capi_test_gradientMachine + WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/capi/tests) +endif() diff --git a/paddle/gserver/tests/CMakeLists.txt b/paddle/gserver/tests/CMakeLists.txt index de9b8e63dfc429..fcee19415c13e9 100644 --- a/paddle/gserver/tests/CMakeLists.txt +++ b/paddle/gserver/tests/CMakeLists.txt @@ -1,15 +1,17 @@ # gserver pacakge unittests +if(NOT MOBILE_INFERENCE) ################### test_ProtoDataProvider ############ -add_unittest_without_exec(test_ProtoDataProvider - test_ProtoDataProvider.cpp) - -# test_ProtoDataProvider will mkdir as same name, -# so if WORKING_DIRECTORY is default directory, then -# mkdir will get error. -add_test(NAME test_ProtoDataProvider - COMMAND ${CMAKE_CURRENT_BINARY_DIR}/test_ProtoDataProvider - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) + add_unittest_without_exec(test_ProtoDataProvider + test_ProtoDataProvider.cpp) + + # test_ProtoDataProvider will mkdir as same name, + # so if WORKING_DIRECTORY is default directory, then + # mkdir will get error. + add_test(NAME test_ProtoDataProvider + COMMAND ${CMAKE_CURRENT_BINARY_DIR}/test_ProtoDataProvider + WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) +endif() ################# test_LayerGrad ####################### add_unittest_without_exec(test_LayerGrad @@ -98,9 +100,11 @@ add_unittest_without_exec(test_KmaxSeqScore add_test(NAME test_KmaxSeqScore COMMAND test_KmaxSeqScore) +if(NOT MOBILE_INFERENCE) ################## test_Evaluator ####################### -add_unittest(test_Evaluator - test_Evaluator.cpp) + add_unittest(test_Evaluator + test_Evaluator.cpp) +endif() ################ test_LinearChainCRF #################### add_simple_unittest(test_LinearChainCRF) @@ -131,27 +135,31 @@ if(NOT WITH_DOUBLE) WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) endif() +if(NOT MOBILE_INFERENCE) ############### test_RecurrentGradientMachine ############### -# TODO(yuyang18): There is some bug in test_RecurrentGradientMachine -# I will fix it. -add_unittest_without_exec(test_RecurrentGradientMachine - test_RecurrentGradientMachine.cpp) -add_test(NAME test_RecurrentGradientMachine - COMMAND .set_python_path.sh -d - ${PADDLE_SOURCE_DIR}/python:${PADDLE_SOURCE_DIR}/paddle/gserver/tests - ${CMAKE_CURRENT_BINARY_DIR}/test_RecurrentGradientMachine - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) - -add_unittest_without_exec(test_NetworkCompare - test_NetworkCompare.cpp) -if(WITH_GPU) - add_test(NAME test_NetworkCompare - COMMAND .set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/test_NetworkCompare --use_gpu=true - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) -else() - add_test(NAME test_NetworkCompare - COMMAND .set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/test_NetworkCompare --use_gpu=false - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) + # TODO(yuyang18): There is some bug in test_RecurrentGradientMachine + # I will fix it. + add_unittest_without_exec(test_RecurrentGradientMachine + test_RecurrentGradientMachine.cpp) + add_test(NAME test_RecurrentGradientMachine + COMMAND .set_python_path.sh -d + ${PADDLE_SOURCE_DIR}/python:${PADDLE_SOURCE_DIR}/paddle/gserver/tests + ${CMAKE_CURRENT_BINARY_DIR}/test_RecurrentGradientMachine + WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) +endif() + +if(NOT MOBILE_INFERENCE) + add_unittest_without_exec(test_NetworkCompare + test_NetworkCompare.cpp) + if(WITH_GPU) + add_test(NAME test_NetworkCompare + COMMAND .set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/test_NetworkCompare --use_gpu=true + WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) + else() + add_test(NAME test_NetworkCompare + COMMAND .set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/test_NetworkCompare --use_gpu=false + WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) + endif() endif() diff --git a/paddle/gserver/tests/LayerGradUtil.h b/paddle/gserver/tests/LayerGradUtil.h index 88e831f78bd165..e10a27eedfa3d2 100644 --- a/paddle/gserver/tests/LayerGradUtil.h +++ b/paddle/gserver/tests/LayerGradUtil.h @@ -15,7 +15,6 @@ limitations under the License. */ #pragma once #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" -#include "paddle/trainer/Trainer.h" #include "paddle/testing/TestUtil.h" using namespace std; // NOLINT diff --git a/paddle/gserver/tests/test_ActivationGrad.cpp b/paddle/gserver/tests/test_ActivationGrad.cpp index de93972a588051..f4c2a07c4426da 100644 --- a/paddle/gserver/tests/test_ActivationGrad.cpp +++ b/paddle/gserver/tests/test_ActivationGrad.cpp @@ -17,7 +17,6 @@ limitations under the License. */ #include #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" -#include "paddle/trainer/Trainer.h" #include "LayerGradUtil.h" #include "paddle/testing/TestUtil.h" diff --git a/paddle/gserver/tests/test_BatchNorm.cpp b/paddle/gserver/tests/test_BatchNorm.cpp index 659eefa31bdb1f..38bcbb880d6fc8 100644 --- a/paddle/gserver/tests/test_BatchNorm.cpp +++ b/paddle/gserver/tests/test_BatchNorm.cpp @@ -17,7 +17,6 @@ limitations under the License. */ #include #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" -#include "paddle/trainer/Trainer.h" #include "paddle/utils/GlobalConstants.h" #include "LayerGradUtil.h" diff --git a/paddle/gserver/tests/test_CRFLayerGrad.cpp b/paddle/gserver/tests/test_CRFLayerGrad.cpp index df14449291e9ec..f010066ebc6c33 100644 --- a/paddle/gserver/tests/test_CRFLayerGrad.cpp +++ b/paddle/gserver/tests/test_CRFLayerGrad.cpp @@ -16,7 +16,6 @@ limitations under the License. */ #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" #include "paddle/gserver/layers/LinearChainCRF.h" -#include "paddle/trainer/Trainer.h" #include "LayerGradUtil.h" #include "paddle/testing/TestUtil.h" diff --git a/paddle/gserver/tests/test_ConvTrans.cpp b/paddle/gserver/tests/test_ConvTrans.cpp index 6035a866b4eee4..5f2f9665478ad4 100644 --- a/paddle/gserver/tests/test_ConvTrans.cpp +++ b/paddle/gserver/tests/test_ConvTrans.cpp @@ -18,7 +18,6 @@ limitations under the License. */ #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" #include "paddle/math/MathUtils.h" -#include "paddle/trainer/Trainer.h" #include "paddle/utils/GlobalConstants.h" #include "LayerGradUtil.h" diff --git a/paddle/gserver/tests/test_ConvUnify.cpp b/paddle/gserver/tests/test_ConvUnify.cpp index e7325e0cc3b719..bcc10a6197e5cf 100644 --- a/paddle/gserver/tests/test_ConvUnify.cpp +++ b/paddle/gserver/tests/test_ConvUnify.cpp @@ -18,7 +18,6 @@ limitations under the License. */ #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" #include "paddle/math/MathUtils.h" -#include "paddle/trainer/Trainer.h" #include "paddle/utils/GlobalConstants.h" #include "LayerGradUtil.h" diff --git a/paddle/gserver/tests/test_CrossEntropyOverBeamGrad.cpp b/paddle/gserver/tests/test_CrossEntropyOverBeamGrad.cpp index c922237d33da5d..477638426fe91f 100644 --- a/paddle/gserver/tests/test_CrossEntropyOverBeamGrad.cpp +++ b/paddle/gserver/tests/test_CrossEntropyOverBeamGrad.cpp @@ -18,7 +18,6 @@ limitations under the License. */ #include #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" -#include "paddle/trainer/Trainer.h" #include "LayerGradUtil.h" #include "paddle/testing/TestUtil.h" diff --git a/paddle/gserver/tests/test_KmaxSeqScore.cpp b/paddle/gserver/tests/test_KmaxSeqScore.cpp index 308abe6816428b..483e382f6d3f42 100644 --- a/paddle/gserver/tests/test_KmaxSeqScore.cpp +++ b/paddle/gserver/tests/test_KmaxSeqScore.cpp @@ -18,7 +18,6 @@ limitations under the License. */ #include #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" -#include "paddle/trainer/Trainer.h" #include "paddle/utils/GlobalConstants.h" #include "LayerGradUtil.h" diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index 090bde7b203652..876a935fb0784b 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -21,7 +21,6 @@ limitations under the License. */ #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" #include "paddle/math/MathUtils.h" -#include "paddle/trainer/Trainer.h" #include "LayerGradUtil.h" #include "paddle/testing/TestUtil.h" diff --git a/paddle/gserver/tests/test_SelectiveFCLayer.cpp b/paddle/gserver/tests/test_SelectiveFCLayer.cpp index ab23d00a2cb607..1da935dfc9f328 100644 --- a/paddle/gserver/tests/test_SelectiveFCLayer.cpp +++ b/paddle/gserver/tests/test_SelectiveFCLayer.cpp @@ -24,7 +24,6 @@ limitations under the License. */ #include "paddle/gserver/layers/Layer.h" #include "paddle/gserver/layers/SelectiveFullyConnectedLayer.h" #include "paddle/math/CpuSparseMatrix.h" -#include "paddle/trainer/Trainer.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp b/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp index e1d4ae16176433..8e04ccd16a8860 100644 --- a/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp +++ b/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp @@ -15,7 +15,6 @@ limitations under the License. */ #include #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" -#include "paddle/trainer/Trainer.h" #include "LayerGradUtil.h" #include "paddle/testing/TestUtil.h" From 8f2c48cf4c46b3b3d1b58fb8fec13a9c95d2327c Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Mon, 9 Oct 2017 11:32:17 +0800 Subject: [PATCH 21/27] Fix Layer.cpp --- paddle/gserver/layers/Layer.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/paddle/gserver/layers/Layer.cpp b/paddle/gserver/layers/Layer.cpp index 075e8166ef8e78..01f2aae6cf88d4 100644 --- a/paddle/gserver/layers/Layer.cpp +++ b/paddle/gserver/layers/Layer.cpp @@ -15,11 +15,14 @@ limitations under the License. */ #include "paddle/utils/Util.h" #include "CostLayer.h" -#include "ValidationLayer.h" #include "paddle/math/SparseMatrix.h" #include "paddle/utils/Error.h" #include "paddle/utils/Logging.h" +#ifndef PADDLE_MOBILE_INFERENCE +#include "ValidationLayer.h" +#endif + DEFINE_bool(log_error_clipping, false, "enable log error clipping or not"); namespace paddle { From 707d144c93aa6053cd02c58bc92bf1d7306c95c3 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Mon, 9 Oct 2017 14:45:01 +0800 Subject: [PATCH 22/27] Unify Reduce functions and simplify register code --- paddle/operators/activation_op.cc | 8 +++---- paddle/operators/activation_op.cu | 10 ++++----- paddle/operators/reduce_op.cc | 34 +++++++++-------------------- paddle/operators/reduce_op.cu | 36 ++++++++----------------------- paddle/operators/reduce_op.h | 6 ++++++ 5 files changed, 33 insertions(+), 61 deletions(-) diff --git a/paddle/operators/activation_op.cc b/paddle/operators/activation_op.cc index 66e9d2c40138c2..2afa8a68b005f8 100644 --- a/paddle/operators/activation_op.cc +++ b/paddle/operators/activation_op.cc @@ -285,11 +285,9 @@ REGISTER_OP(stanh, ops::ActivationOp, ops::STanhOpMaker, stanh_grad, #define REGISTER_ACTIVATION_CPU_KERNEL(act_type, functor, grad_functor) \ REGISTER_OP_CPU_KERNEL( \ act_type, \ - paddle::operators::ActivationKernel>); \ + ops::ActivationKernel>); \ REGISTER_OP_CPU_KERNEL(act_type##_grad, \ - paddle::operators::ActivationGradKernel< \ - paddle::platform::CPUPlace, \ - paddle::operators::grad_functor>); + ops::ActivationGradKernel>); FOR_EACH_KERNEL_FUNCTOR(REGISTER_ACTIVATION_CPU_KERNEL); diff --git a/paddle/operators/activation_op.cu b/paddle/operators/activation_op.cu index 93e9f1c694bacb..7b7644519d4e9c 100644 --- a/paddle/operators/activation_op.cu +++ b/paddle/operators/activation_op.cu @@ -15,14 +15,14 @@ #define EIGEN_USE_GPU #include "paddle/operators/activation_op.h" +namespace ops = paddle::operators; + #define REGISTER_ACTIVATION_GPU_KERNEL(act_type, functor, grad_functor) \ REGISTER_OP_GPU_KERNEL( \ act_type, \ - paddle::operators::ActivationKernel>); \ + ops::ActivationKernel>); \ REGISTER_OP_GPU_KERNEL(act_type##_grad, \ - paddle::operators::ActivationGradKernel< \ - paddle::platform::GPUPlace, \ - paddle::operators::grad_functor>); + ops::ActivationGradKernel>); FOR_EACH_KERNEL_FUNCTOR(REGISTER_ACTIVATION_GPU_KERNEL); diff --git a/paddle/operators/reduce_op.cc b/paddle/operators/reduce_op.cc index 3ef443d1c7f475..87f66e1e93ece3 100644 --- a/paddle/operators/reduce_op.cc +++ b/paddle/operators/reduce_op.cc @@ -168,36 +168,22 @@ namespace ops = paddle::operators; REGISTER_OP(reduce_sum, ops::ReduceOp, ops::ReduceSumOpMaker, reduce_sum_grad, ops::ReduceGradOp); -REGISTER_OP_CPU_KERNEL( - reduce_sum, - ops::ReduceKernel); -REGISTER_OP_CPU_KERNEL(reduce_sum_grad, - ops::ReduceGradKernel); REGISTER_OP(reduce_mean, ops::ReduceOp, ops::ReduceMeanOpMaker, reduce_mean_grad, ops::ReduceGradOp); -REGISTER_OP_CPU_KERNEL( - reduce_mean, - ops::ReduceKernel); -REGISTER_OP_CPU_KERNEL(reduce_mean_grad, - ops::ReduceGradKernel); REGISTER_OP(reduce_max, ops::ReduceOp, ops::ReduceMaxOpMaker, reduce_max_grad, ops::ReduceGradOp); -REGISTER_OP_CPU_KERNEL( - reduce_max, - ops::ReduceKernel); -REGISTER_OP_CPU_KERNEL(reduce_max_grad, - ops::ReduceGradKernel); REGISTER_OP(reduce_min, ops::ReduceOp, ops::ReduceMaxOpMaker, reduce_min_grad, ops::ReduceGradOp); -REGISTER_OP_CPU_KERNEL( - reduce_min, - ops::ReduceKernel); -REGISTER_OP_CPU_KERNEL(reduce_min_grad, - ops::ReduceGradKernel); + +#define REGISTER_REDUCE_CPU_KERNEL(reduce_type, functor, grad_functor) \ + REGISTER_OP_CPU_KERNEL( \ + reduce_type, \ + ops::ReduceKernel); \ + REGISTER_OP_CPU_KERNEL(reduce_type##_grad, \ + ops::ReduceGradKernel); + +FOR_EACH_KERNEL_FUNCTOR(REGISTER_REDUCE_CPU_KERNEL); diff --git a/paddle/operators/reduce_op.cu b/paddle/operators/reduce_op.cu index 595127b858ea8e..d306e1a24096d7 100644 --- a/paddle/operators/reduce_op.cu +++ b/paddle/operators/reduce_op.cu @@ -17,30 +17,12 @@ namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( - reduce_sum, - ops::ReduceKernel); -REGISTER_OP_GPU_KERNEL(reduce_sum_grad, - ops::ReduceGradKernel); - -REGISTER_OP_GPU_KERNEL( - reduce_mean, - ops::ReduceKernel); -REGISTER_OP_GPU_KERNEL(reduce_mean_grad, - ops::ReduceGradKernel); - -REGISTER_OP_GPU_KERNEL( - reduce_max, - ops::ReduceKernel); -REGISTER_OP_GPU_KERNEL(reduce_max_grad, - ops::ReduceGradKernel); - -REGISTER_OP_GPU_KERNEL( - reduce_min, - ops::ReduceKernel); -REGISTER_OP_GPU_KERNEL(reduce_min_grad, - ops::ReduceGradKernel); +#define REGISTER_REDUCE_GPU_KERNEL(reduce_type, functor, grad_functor) \ + REGISTER_OP_GPU_KERNEL( \ + reduce_type, \ + ops::ReduceKernel); \ + REGISTER_OP_GPU_KERNEL(reduce_type##_grad, \ + ops::ReduceGradKernel); + +FOR_EACH_KERNEL_FUNCTOR(REGISTER_REDUCE_GPU_KERNEL); diff --git a/paddle/operators/reduce_op.h b/paddle/operators/reduce_op.h index ba3f3db81dc625..45043c440bc801 100644 --- a/paddle/operators/reduce_op.h +++ b/paddle/operators/reduce_op.h @@ -198,3 +198,9 @@ class ReduceGradKernel : public framework::OpKernel { } // namespace operators } // namespace paddle + +#define FOR_EACH_KERNEL_FUNCTOR(__macro) \ + __macro(reduce_sum, SumFunctor, SumGradFunctor); \ + __macro(reduce_mean, MeanFunctor, MeanGradFunctor); \ + __macro(reduce_max, MaxFunctor, MaxOrMinGradFunctor); \ + __macro(reduce_min, MinFunctor, MaxOrMinGradFunctor); From 3f874143fe62062607f341f2559840fc23f4bbd7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=AD=A6=E6=AF=85?= Date: Mon, 9 Oct 2017 14:55:03 +0800 Subject: [PATCH 23/27] fix grad debug event (#4536) --- python/paddle/v2/event.py | 14 +++++++++++++- python/paddle/v2/trainer.py | 9 +++++++-- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/python/paddle/v2/event.py b/python/paddle/v2/event.py index e66bf67d794905..a0ffd31c545eb1 100644 --- a/python/paddle/v2/event.py +++ b/python/paddle/v2/event.py @@ -10,7 +10,8 @@ * EndPass """ __all__ = [ - 'EndIteration', 'BeginIteration', 'BeginPass', 'EndPass', 'TestResult' + 'EndIteration', 'BeginIteration', 'BeginPass', 'EndPass', 'TestResult', + 'EndForwardBackward' ] @@ -73,6 +74,17 @@ def __init__(self, pass_id, batch_id): self.batch_id = batch_id +class EndForwardBackward(object): + """ + Event On One Batch ForwardBackward Complete. + """ + + def __init__(self, pass_id, batch_id, gm): + self.pass_id = pass_id + self.batch_id = batch_id + self.gm = gm + + class EndIteration(WithMetric): """ Event On One Batch Training Complete. diff --git a/python/paddle/v2/trainer.py b/python/paddle/v2/trainer.py index ca95ef13bd440a..076e7559399141 100644 --- a/python/paddle/v2/trainer.py +++ b/python/paddle/v2/trainer.py @@ -164,11 +164,18 @@ def train(self, reader, num_passes=1, event_handler=None, feeding=None): pass_type) self.__gradient_machine__.eval(pass_evaluator) self.__gradient_machine__.eval(batch_evaluator) + event_handler( + v2_event.EndForwardBackward( + pass_id=pass_id, + batch_id=batch_id, + gm=self.__gradient_machine__)) for each_param in self.__gradient_machine__.getNonStaticParameters( ): self.__parameter_updater__.update(each_param) cost_sum = out_args.sum() cost = cost_sum / len(data_batch) + self.__parameter_updater__.finishBatch(cost) + batch_evaluator.finish() event_handler( v2_event.EndIteration( pass_id=pass_id, @@ -176,8 +183,6 @@ def train(self, reader, num_passes=1, event_handler=None, feeding=None): cost=cost, evaluator=batch_evaluator, gm=self.__gradient_machine__)) - self.__parameter_updater__.finishBatch(cost) - batch_evaluator.finish() self.__parameter_updater__.finishPass() pass_evaluator.finish() From 597299074efb2e926954219c4afac9a6b189904d Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Mon, 9 Oct 2017 20:11:01 +0800 Subject: [PATCH 24/27] fix bug in REGISTER_OP(reduce_min) --- paddle/operators/reduce_op.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/operators/reduce_op.cc b/paddle/operators/reduce_op.cc index 87f66e1e93ece3..55f294a9be6f44 100644 --- a/paddle/operators/reduce_op.cc +++ b/paddle/operators/reduce_op.cc @@ -175,7 +175,7 @@ REGISTER_OP(reduce_mean, ops::ReduceOp, ops::ReduceMeanOpMaker, REGISTER_OP(reduce_max, ops::ReduceOp, ops::ReduceMaxOpMaker, reduce_max_grad, ops::ReduceGradOp); -REGISTER_OP(reduce_min, ops::ReduceOp, ops::ReduceMaxOpMaker, reduce_min_grad, +REGISTER_OP(reduce_min, ops::ReduceOp, ops::ReduceMinOpMaker, reduce_min_grad, ops::ReduceGradOp); #define REGISTER_REDUCE_CPU_KERNEL(reduce_type, functor, grad_functor) \ From b14c122a949402169678aa1a154349b2fbd8ddc0 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Mon, 9 Oct 2017 10:28:26 -0700 Subject: [PATCH 25/27] Fix bug --- paddle/framework/block_desc.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/paddle/framework/block_desc.cc b/paddle/framework/block_desc.cc index 01f50e13936060..509aa235d3ee22 100644 --- a/paddle/framework/block_desc.cc +++ b/paddle/framework/block_desc.cc @@ -74,6 +74,12 @@ void BlockDescBind::Sync() { for (auto &op_desc : ops_) { op_field.AddAllocated(op_desc->Proto()); } + auto &var_field = *this->desc_->mutable_vars(); + var_field.Clear(); + var_field.Reserve(static_cast(vars_.size())); + for (auto &var_desc : vars_) { + var_field.AddAllocated(var_desc.second->Proto()); + } need_update_ = false; } } From f30a1f42f0b90b17c2664d7e9a65070ee1c3a473 Mon Sep 17 00:00:00 2001 From: kavyasrinet Date: Mon, 9 Oct 2017 10:49:21 -0700 Subject: [PATCH 26/27] Adding relu6 activation function (#4607) --- paddle/operators/activation_op.cc | 16 ++++++++++ paddle/operators/activation_op.h | 31 +++++++++++++++++++ .../v2/framework/tests/test_activation_op.py | 19 +++++++----- 3 files changed, 59 insertions(+), 7 deletions(-) diff --git a/paddle/operators/activation_op.cc b/paddle/operators/activation_op.cc index 2afa8a68b005f8..43081d23262108 100644 --- a/paddle/operators/activation_op.cc +++ b/paddle/operators/activation_op.cc @@ -201,6 +201,19 @@ class SoftReluOpMaker : public framework::OpProtoAndCheckerMaker { } }; +template +class Relu6OpMaker : public framework::OpProtoAndCheckerMaker { + public: + Relu6OpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "Input of Relu6 operator"); + AddOutput("Y", "Output of Relu6 operator"); + AddComment("Relu6 activation operator, relu6 = min(max(0, x), 6)"); + AddAttr("threshold", "The threshold value of Relu6") + .SetDefault(static_cast(6)); + } +}; + template class PowOpMaker : public framework::OpProtoAndCheckerMaker { public: @@ -276,6 +289,9 @@ REGISTER_OP(leaky_relu, ops::ActivationOp, ops::LeakyReluOpMaker, REGISTER_OP(soft_relu, ops::ActivationOp, ops::SoftReluOpMaker, soft_relu_grad, ops::ActivationOpGrad); +REGISTER_OP(relu6, ops::ActivationOp, ops::Relu6OpMaker, relu6_grad, + ops::ActivationOpGrad); + REGISTER_OP(pow, ops::ActivationOp, ops::PowOpMaker, pow_grad, ops::ActivationOpGrad); diff --git a/paddle/operators/activation_op.h b/paddle/operators/activation_op.h index 245060174224c5..f127468125c265 100644 --- a/paddle/operators/activation_op.h +++ b/paddle/operators/activation_op.h @@ -280,6 +280,36 @@ struct BReluGradFunctor : public BaseActivationFunctor { } }; +// relu6(x) = min(max(0, x), 6) +template +struct Relu6Functor : public BaseActivationFunctor { + float threshold; + + // NOTE: Explicit hides the `BaseActivationFunctor::GetAttrs` + // not polymorphism for speed. + typename BaseActivationFunctor::AttrPair GetAttrs() { + return {{"threshold", &threshold}}; + } + + template + void operator()(Device d, X x, Y y) const { + y.device(d) = x.cwiseMax(static_cast(0)).cwiseMin(threshold); + } +}; + +template +struct Relu6GradFunctor : public BaseActivationFunctor { + float threshold; + typename BaseActivationFunctor::AttrPair GetAttrs() { + return {{"threshold", &threshold}}; + } + template + void operator()(Device d, X x, Y y, dY dy, dX dx) const { + dx.device(d) = + dy * ((x > static_cast(0)) * (x < threshold)).template cast(); + } +}; + // softsign(x) = x / (1 + |x|) template struct SoftsignFunctor : public BaseActivationFunctor { @@ -425,5 +455,6 @@ struct STanhGradFunctor : public BaseActivationFunctor { __macro(pow, PowFunctor, PowGradFunctor); \ __macro(stanh, STanhFunctor, STanhGradFunctor); \ __macro(softsign, SoftsignFunctor, SoftsignGradFunctor); \ + __macro(relu6, Relu6Functor, Relu6GradFunctor); \ __macro(leaky_relu, LeakyReluFunctor, LeakyReluGradFunctor); \ __macro(tanh_shrink, TanhShrinkFunctor, TanhShrinkGradFunctor) diff --git a/python/paddle/v2/framework/tests/test_activation_op.py b/python/paddle/v2/framework/tests/test_activation_op.py index 701e1a1aeec274..8b76decaecdcb2 100644 --- a/python/paddle/v2/framework/tests/test_activation_op.py +++ b/python/paddle/v2/framework/tests/test_activation_op.py @@ -137,21 +137,26 @@ def test_check_grad(self): self.check_grad(['X'], 'Y', max_relative_error=0.02) -class TestLeakyRelu(OpTest): +class TestRelu6(OpTest): def setUp(self): - self.op_type = "leaky_relu" - alpha = 0.02 - self.attrs = {'alpha': alpha} - self.inputs = {'X': np.random.uniform(-3, 3, [4, 4]).astype("float32")} + self.op_type = "relu6" + x = np.random.uniform(-1, 1, [4, 10]).astype("float32") + threshold = 6.0 + # The same with TestAbs + x[np.abs(x) < 0.005] = 0.02 + x[np.abs(x - threshold) < 0.005] = threshold + 0.02 + + self.inputs = {'X': x} + self.attrs = {'threshold': threshold} self.outputs = { - 'Y': np.maximum(self.inputs['X'], alpha * self.inputs['X']) + 'Y': np.minimum(np.maximum(self.inputs['X'], 0), threshold) } def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y', max_relative_error=0.007) + self.check_grad(['X'], 'Y', max_relative_error=0.02) class TestSoftRelu(OpTest): From 4cb5bd90218082998f990d0977f05acef8da61e7 Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Mon, 9 Oct 2017 10:56:56 -0700 Subject: [PATCH 27/27] Implementing the Adamax optimizer operator (#4538) * Implementing the Adamax optimizer step operator * Adding unit tests for adamax_op * Changing learning rate and time step to inputs from attributes * Changing learning rate and time step to input(tensors) * Making the Adamax operator conform to naming convention * Removing Tensor from comments * Rectifying the Adamax implementation * Changing Unit Test values and adding comments * Changing Unit Test to test multiple steps --- paddle/operators/adamax_op.cc | 139 ++++++++++++++ paddle/operators/adamax_op.cu | 20 ++ paddle/operators/adamax_op.h | 72 +++++++ .../v2/framework/tests/test_adamax_op.py | 178 ++++++++++++++++++ 4 files changed, 409 insertions(+) create mode 100644 paddle/operators/adamax_op.cc create mode 100644 paddle/operators/adamax_op.cu create mode 100644 paddle/operators/adamax_op.h create mode 100644 python/paddle/v2/framework/tests/test_adamax_op.py diff --git a/paddle/operators/adamax_op.cc b/paddle/operators/adamax_op.cc new file mode 100644 index 00000000000000..c348e0a0b2ba1a --- /dev/null +++ b/paddle/operators/adamax_op.cc @@ -0,0 +1,139 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/adamax_op.h" + +namespace paddle { +namespace operators { + +class AdamaxOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContextBase *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Param"), + "Input(Param) of AdamaxOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Grad"), + "Input(Grad) of AdamaxOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Moment"), + "Input(Moment) of AdamaxOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("InfNorm"), + "Input(InfNorm) of AdamaxOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("LearningRate"), + "Input(LearningRate) of AdamaxOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Beta1Pow"), + "Input(Beta1Pow) of AdamaxOp should not be null."); + + PADDLE_ENFORCE(ctx->HasOutput("ParamOut"), + "Output(ParamOut) of AdamaxOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("MomentOut"), + "Output(MomentOut) of AdamaxOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("InfNormOut"), + "Output(InfNormOut) of AdamaxOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Beta1PowOut"), + "Output(Beta1PowOut) of AdamaxOp should not be null."); + + auto lr_dims = ctx->GetInputDim("LearningRate"); + PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, + "Learning rate should have 1 dimension"); + auto beta1_pow_dims = ctx->GetInputDim("Beta1Pow"); + PADDLE_ENFORCE_EQ(framework::product(beta1_pow_dims), 1, + "Beta1 power accumulator should have 1 dimension"); + auto param_dims = ctx->GetInputDim("Param"); + PADDLE_ENFORCE_EQ( + param_dims, ctx->GetInputDim("Grad"), + "Param and Grad input of AdamaxOp should have same dimension"); + PADDLE_ENFORCE_EQ( + param_dims, ctx->GetInputDim("Moment"), + "Param and Moment input of AdamaxOp should have same dimension"); + PADDLE_ENFORCE_EQ( + param_dims, ctx->GetInputDim("InfNorm"), + "Param and InfNorm input of AdamaxOp should have same dimension"); + + ctx->SetOutputDim("ParamOut", param_dims); + ctx->SetOutputDim("MomentOut", param_dims); + ctx->SetOutputDim("InfNormOut", param_dims); + ctx->SetOutputDim("Beta1PowOut", beta1_pow_dims); + } +}; + +class AdamaxOpMaker : public framework::OpProtoAndCheckerMaker { + public: + AdamaxOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("Param", "(Tensor) Input parameter"); + AddInput("Grad", "(Tensor) Input gradient"); + AddInput("LearningRate", "(Tensor) Learning rate"); + AddInput("Moment", "(Tensor) First moment"); + AddInput("InfNorm", + "(Tensor) " + "Input exponentially weighted infinity norm"); + AddInput("Beta1Pow", "(Tensor) Input beta1 power accumulator"); + + AddOutput("ParamOut", "(Tensor) Output parameter"); + AddOutput("MomentOut", "(Tensor) Output first moment"); + AddOutput("InfNormOut", + "(Tensor) " + "Output exponentially weighted infinity norm"); + AddOutput("Beta1PowOut", "(Tensor) Output beta1 power accumulator"); + + AddAttr("beta1", + "(float, default 0.9) " + "Exponential decay rate for the " + "1st moment estimates.") + .SetDefault(0.9f); + AddAttr("beta2", + "(float, default 0.999) " + "exponential decay rate for the weighted " + "infinity norm estimates.") + .SetDefault(0.999f); + AddAttr("epsilon", + "(float, default 1.0e-8) " + "Constant for numerical stability") + .SetDefault(1.0e-8f); + AddComment(R"DOC( +Adamax Updates Operator. + +This implements the Adamax optimizer from Section 7 of the Adam +paper[1]. Adamax is a variant of the +Adam algorithm based on the infinity norm. + +Adamax updates: + +moment_out = beta1 * moment + (1 - beta1) * grad +inf_norm_out = max(beta2 * inf_norm + epsilon, abs(grad)) +beta1_pow_out = beta1_pow * beta1 +learning_rate_t = learning_rate/(1 - beta1_pow_out) +param_out = param - learning_rate_t * moment_out/inf_norm_out + +The original paper does not have an epsilon attribute. +However, it is added here for numerical stability +by preventing divide by 0. + +References: + [1] Adam: A Method for Stochastic Optimization + (https://arxiv.org/abs/1412.6980) + +)DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_WITHOUT_GRADIENT(adamax, ops::AdamaxOp, ops::AdamaxOpMaker); +REGISTER_OP_CPU_KERNEL(adamax, + ops::AdamaxOpKernel); diff --git a/paddle/operators/adamax_op.cu b/paddle/operators/adamax_op.cu new file mode 100644 index 00000000000000..fee3b6fc6b6569 --- /dev/null +++ b/paddle/operators/adamax_op.cu @@ -0,0 +1,20 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#define EIGEN_USE_GPU +#include "paddle/operators/adamax_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL(adamax, + ops::AdamaxOpKernel); diff --git a/paddle/operators/adamax_op.h b/paddle/operators/adamax_op.h new file mode 100644 index 00000000000000..9677b1bb786002 --- /dev/null +++ b/paddle/operators/adamax_op.h @@ -0,0 +1,72 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +template +class AdamaxOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto param_out_tensor = ctx.Output("ParamOut"); + auto moment_out_tensor = ctx.Output("MomentOut"); + auto inf_norm_out_tensor = ctx.Output("InfNormOut"); + auto beta1_pow_out_tensor = ctx.Output("Beta1PowOut"); + + param_out_tensor->mutable_data(ctx.GetPlace()); + moment_out_tensor->mutable_data(ctx.GetPlace()); + inf_norm_out_tensor->mutable_data(ctx.GetPlace()); + beta1_pow_out_tensor->mutable_data(ctx.GetPlace()); + + float beta1 = ctx.Attr("beta1"); + float beta2 = ctx.Attr("beta2"); + float epsilon = ctx.Attr("epsilon"); + + auto param = framework::EigenVector::Flatten( + *ctx.Input("Param")); + auto grad = framework::EigenVector::Flatten( + *ctx.Input("Grad")); + auto moment = framework::EigenVector::Flatten( + *ctx.Input("Moment")); + auto inf_norm = framework::EigenVector::Flatten( + *ctx.Input("InfNorm")); + auto lr = framework::EigenVector::Flatten( + *ctx.Input("LearningRate")); + auto beta1_pow = framework::EigenVector::Flatten( + *ctx.Input("Beta1Pow")); + auto param_out = framework::EigenVector::Flatten(*param_out_tensor); + auto moment_out = framework::EigenVector::Flatten(*moment_out_tensor); + auto inf_norm_out = + framework::EigenVector::Flatten(*inf_norm_out_tensor); + auto beta1_pow_out = + framework::EigenVector::Flatten(*beta1_pow_out_tensor); + auto place = ctx.GetEigenDevice(); + + moment_out.device(place) = beta1 * moment + (1 - beta1) * grad; + inf_norm_out.device(place) = + grad.abs().cwiseMax((beta2 * inf_norm) + epsilon); + beta1_pow_out.device(place) = beta1_pow * beta1; + auto lr_t = lr / (1 - beta1_pow_out); + Eigen::DSizes m_dsize(moment_out_tensor->numel()); + param_out.device(place) = + param - lr_t.broadcast(m_dsize) * (moment_out / inf_norm_out); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/python/paddle/v2/framework/tests/test_adamax_op.py b/python/paddle/v2/framework/tests/test_adamax_op.py new file mode 100644 index 00000000000000..af81075d6ad508 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_adamax_op.py @@ -0,0 +1,178 @@ +import unittest +import numpy as np +from op_test import OpTest + + +class TestAdamaxOp1(OpTest): + def setUp(self): + '''Test Adamax Operator with supplied attributes + ''' + self.op_type = "adamax" + param = np.random.uniform(-1, 1, (102, 105)).astype("float32") + grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") + moment = np.random.uniform(-1, 1, (102, 105)).astype("float32") + # The infinity norm is positive + inf_norm = np.random.random((102, 105)).astype("float32") + + learning_rate = 0.002 + beta1 = 0.78 + beta2 = 0.899 + epsilon = 1e-5 + beta1_pow = beta1**10 + + self.inputs = { + 'Param': param, + 'Grad': grad, + 'Moment': moment, + 'InfNorm': inf_norm, + 'LearningRate': np.array([learning_rate]).astype("float32"), + 'Beta1Pow': np.array([beta1_pow]).astype("float32") + } + + self.attrs = {'beta1': beta1, 'beta2': beta2, 'epsilon': epsilon} + + param_out, moment_out, inf_norm_out, beta1_pow_out = adamax_step( + self.inputs, self.attrs) + + self.outputs = { + 'ParamOut': param_out, + 'MomentOut': moment_out, + 'InfNormOut': inf_norm_out, + 'Beta1PowOut': beta1_pow_out + } + + def test_check_output(self): + self.check_output() + + +class TestAdamaxOp2(OpTest): + '''Test Adamax Operator with default attributes + ''' + + def setUp(self): + self.op_type = "adamax" + param = np.random.uniform(-1, 1, (102, 105)).astype("float32") + grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") + moment = np.random.uniform(-1, 1, (102, 105)).astype("float32") + # The infinity norm is positive + inf_norm = np.random.random((102, 105)).astype("float32") + + learning_rate = 0.002 + beta1 = 0.9 + beta2 = 0.999 + epsilon = 1e-8 + beta1_pow = beta1**8 + + self.inputs = { + 'Param': param, + 'Grad': grad, + 'Moment': moment, + 'InfNorm': inf_norm, + 'LearningRate': np.array([learning_rate]).astype("float32"), + 'Beta1Pow': np.array([beta1_pow]).astype("float32") + } + + attrs = {'beta1': beta1, 'beta2': beta2, 'epsilon': epsilon} + param_out, moment_out, inf_norm_out, beta1_pow_out = adamax_step( + self.inputs, attrs) + + self.outputs = { + 'ParamOut': param_out, + 'MomentOut': moment_out, + 'InfNormOut': inf_norm_out, + 'Beta1PowOut': beta1_pow_out + } + + def test_check_output(self): + self.check_output() + + +class TestAdamaxOpMultipleSteps(OpTest): + def setUp(self): + '''Test Adamax Operator with supplied attributes + ''' + self.op_type = "adamax" + self.num_steps = 10 + + param = np.random.uniform(-1, 1, (102, 105)).astype("float32") + grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") + moment = np.random.uniform(-1, 1, (102, 105)).astype("float32") + # The infinity norm is positive + inf_norm = np.random.random((102, 105)).astype("float32") + + learning_rate = 0.002 + beta1 = 0.8 + beta2 = 0.99 + epsilon = 1e-5 + beta1_pow = 1 + + self.inputs = { + 'Param': param, + 'Grad': grad, + 'Moment': moment, + 'InfNorm': inf_norm, + 'LearningRate': np.array([learning_rate]).astype("float32"), + 'Beta1Pow': np.array([beta1_pow]).astype("float32") + } + + self.attrs = {'beta1': beta1, 'beta2': beta2, 'epsilon': epsilon} + + param_out, moment_out, inf_norm_out, beta1_pow_out = adamax_step( + self.inputs, self.attrs) + + def test_check_output(self): + for _ in range(self.num_steps): + param_out, moment_out, inf_norm_out, beta1_pow_out = adamax_step( + self.inputs, self.attrs) + + self.outputs = { + 'ParamOut': param_out, + 'MomentOut': moment_out, + 'InfNormOut': inf_norm_out, + 'Beta1PowOut': beta1_pow_out + } + + # Verify output for this step + self.check_output() + + # Output of this step becomes input for next step + self.inputs['Param'] = param_out + self.inputs['Moment'] = moment_out + self.inputs['InfNorm'] = inf_norm_out + self.inputs['Beta1Pow'] = beta1_pow_out + + # Randomize gradient for next step + self.inputs['Grad'] = np.random.uniform( + -1, 1, (102, 105)).astype("float32") + + +def adamax_step(inputs, attributes): + ''' + Simulate one step of the adamax optimizer + :param inputs: dict of inputs + :param attributes: dict of attributes + :return tuple: tuple of output param, moment, inf_norm and + beta1 power accumulator + ''' + param = inputs['Param'] + grad = inputs['Grad'] + moment = inputs['Moment'] + inf_norm = inputs['InfNorm'] + lr = inputs['LearningRate'] + beta1_pow = inputs['Beta1Pow'] + + beta1 = attributes['beta1'] + beta2 = attributes['beta2'] + epsilon = attributes['epsilon'] + + moment_out = beta1 * moment + (1 - beta1) * grad + inf_norm_out = np.maximum(beta2 * inf_norm + epsilon, np.abs(grad)) + beta1_pow_out = beta1_pow * beta1 + lr_t = (lr / (1 - beta1_pow_out)) + param_out = param - lr_t * np.divide(moment_out, inf_norm_out) + + return param_out, moment_out, inf_norm_out, beta1_pow_out + + +if __name__ == "__main__": + unittest.main()