Skip to content

Commit

Permalink
[cpp-package] Add monitor, optimizers, metrics and bug fixes (apache#…
Browse files Browse the repository at this point in the history
…6033)

* [cpp-package]Support MSVC and python 3

* Update docs

* [cpp-package]Add monitor

* Force UTF-8 encoding in MSVC

* [cpp-package] Automatically deploy the compiled library

* [cpp-package] Add RMSProp, Adam, AdaGrad, AdaDelta

* [cpp-package] Fix initializers

* Fix lint

* [cpp-package] Update Makefile

* [cpp-package] Fix constructor order

* Add MAE, MSE, PSNR metrics

* trigger op_map when loading symbols from file, to force ops to be initialized

* Add missing headers

* [cpp-package] Add inline to satisfy ODR

* Fix cuda compilation on windows

* [cpp-package] Update CMakeLists
  • Loading branch information
Xin Li authored and piiswrong committed May 17, 2017
1 parent 3a61a64 commit 4544c58
Show file tree
Hide file tree
Showing 21 changed files with 719 additions and 150 deletions.
15 changes: 9 additions & 6 deletions cpp-package/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,18 +1,21 @@

if(USE_CPP_PACKAGE AND NOT MSVC)
if(USE_CPP_PACKAGE)

set(CPP_PACKAGE_OP_H_HEADER ${CMAKE_CURRENT_LIST_DIR}/include/mxnet-cpp/op.h)

if(MSVC)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /utf-8")
endif(MSVC)

add_custom_target(
cpp_package_op_h ALL
BYPRODUCTS ${CPP_PACKAGE_OP_H_HEADER}
MAIN_DEPENDENCY mxnet
DEPENDS mxnet ${CMAKE_CURRENT_SOURCE_DIR}/src/OpWrapperGenerator/OpWrapperGenerator.py
DEPENDS mxnet ${CMAKE_CURRENT_SOURCE_DIR}/scripts/OpWrapperGenerator.py
COMMAND echo "Running: OpWrapperGenerator.py"
COMMAND python OpWrapperGenerator.py ${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}/$<TARGET_FILE:mxnet>
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/src/OpWrapperGenerator/
COMMAND python OpWrapperGenerator.py $<TARGET_FILE:mxnet>
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/scripts
)

add_subdirectory(example)

endif()
endif()
4 changes: 2 additions & 2 deletions cpp-package/cpp-package.mk
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@ cpp-package-all: $(CPP_PACKAGE_OP_H_FILE)
cpp-package-clean:
rm -f $(CPP_PACKAGE_OP_H_FILE)

$(CPP_PACKAGE_OP_H_FILE): lib/libmxnet.so cpp-package/src/OpWrapperGenerator/OpWrapperGenerator.py
(cd cpp-package/src/OpWrapperGenerator; python OpWrapperGenerator.py $(ROOTDIR)/lib/libmxnet.so)
$(CPP_PACKAGE_OP_H_FILE): lib/libmxnet.so cpp-package/scripts/OpWrapperGenerator.py
(cd cpp-package/scripts; python OpWrapperGenerator.py $(ROOTDIR)/lib/libmxnet.so)

cpp-package-lint:
(cd cpp-package; python scripts/lint.py dmlc ${LINT_LANG} include example)
Expand Down
11 changes: 7 additions & 4 deletions cpp-package/example/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,12 +1,9 @@

if(NOT MSVC)
set(UNITTEST_STATIC_LINK ON)
endif()

set(CPP_EXAMPLE_LIBS
rt
${BEGIN_WHOLE_ARCHIVE} mxnet_static ${END_WHOLE_ARCHIVE}
dmlc
${BEGIN_WHOLE_ARCHIVE} mxnet ${END_WHOLE_ARCHIVE}
${mxnet_LINKER_LIBS}
)

Expand All @@ -19,6 +16,12 @@ file(GLOB_RECURSE CPP_PACKAGE_HEADERS
"${CPP_PACKAGE_INCLUDE_DIR}/*.hpp"
)

add_custom_target(
cpp_package_deploy_library ALL
DEPENDS mxnet
COMMAND ${CMAKE_COMMAND} -E copy $<TARGET_FILE:mxnet> $<TARGET_FILE_DIR:mlp>
)

include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include)

add_executable(lenet lenet.cpp ${CPP_PACKAGE_HEADERS})
Expand Down
1 change: 1 addition & 0 deletions cpp-package/include/mxnet-cpp/MxNetCpp.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
#include "mxnet-cpp/executor.hpp"
#include "mxnet-cpp/symbol.hpp"
#include "mxnet-cpp/ndarray.hpp"
#include "mxnet-cpp/monitor.hpp"
#include "mxnet-cpp/operator.hpp"
#include "mxnet-cpp/optimizer.hpp"
#include "mxnet-cpp/kvstore.hpp"
Expand Down
1 change: 1 addition & 0 deletions cpp-package/include/mxnet-cpp/executor.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ class Optimizer;
* \brief Executor interface
*/
class Executor {
friend class Monitor;
public:
Executor(const Symbol &symbol, Context context,
const std::vector<NDArray> &arg_arrays,
Expand Down
48 changes: 19 additions & 29 deletions cpp-package/include/mxnet-cpp/initializer.h
Original file line number Diff line number Diff line change
Expand Up @@ -75,33 +75,25 @@ class Initializer {
virtual void InitDefault(NDArray* arr) {}
};

class Zero : public Initializer {
class Constant : public Initializer {
public:
Zero() {}
protected:
void InitWeight(NDArray *arr) override {
InitZero(arr);
explicit Constant(float value)
: value(value) {}
void operator()(const std::string &name, NDArray *arr) override {
(*arr) = value;
}
protected:
float value;
};

class One : public Initializer {
class Zero : public Constant {
public:
One() {}
protected:
void InitWeight(NDArray *arr) override {
InitOne(arr);
}
Zero(): Constant(0.0f) {}
};

class Constant : public Initializer {
class One : public Constant {
public:
explicit Constant(float value)
: value(value) {}
protected:
float value;
void InitWeight(NDArray *arr) override {
(*arr) = value;
}
One(): Constant(1.0f) {}
};

class Uniform : public Initializer {
Expand All @@ -110,29 +102,28 @@ class Uniform : public Initializer {
: Uniform(-scale, scale) {}
Uniform(float begin, float end)
: begin(begin), end(end) {}
protected:
float begin, end;
void InitWeight(NDArray *arr) override {
void operator()(const std::string &name, NDArray *arr) override {
NDArray::SampleUniform(begin, end, arr);
}
protected:
float begin, end;
};

class Normal : public Initializer {
public:
Normal(float mu, float sigma)
: mu(mu), sigma(sigma) {}
protected:
float mu, sigma;
void InitWeight(NDArray *arr) override {
void operator()(const std::string &name, NDArray *arr) override {
NDArray::SampleGaussian(mu, sigma, arr);
}
protected:
float mu, sigma;
};

class Bilinear : public Initializer {
public:
Bilinear() {}
protected:
void InitWeight(NDArray *arr) override {
void operator()(const std::string &name, NDArray *arr) override {
InitBilinear(arr);
}
};
Expand All @@ -153,8 +144,7 @@ class Xavier : public Initializer {
float magnitude = 3)
: rand_type(rand_type), factor_type(factor_type), magnitude(magnitude) {}

protected:
void InitWeight(NDArray* arr) override {
void operator()(const std::string &name, NDArray* arr) override {
Shape shape(arr->GetShape());
float hw_scale = 1.0f;
if (shape.ndim() > 2) {
Expand Down
118 changes: 109 additions & 9 deletions cpp-package/include/mxnet-cpp/metric.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,26 +36,27 @@ class EvalMetric {
float sum_metric = 0.0f;
int num_inst = 0;

static bool CheckLabelShapes(NDArray labels, NDArray preds,
Shape shape = Shape(0)) {
// TODO(zhangchen-qinyinghua)
// inplement this
return true;
static void CheckLabelShapes(NDArray labels, NDArray preds,
bool strict = false) {
if (strict) {
CHECK_EQ(Shape(labels.GetShape()), Shape(preds.GetShape()));
} else {
CHECK_EQ(labels.Size(), preds.Size());
}
}
};

class Accuracy : public EvalMetric {
public:
Accuracy() : EvalMetric("accuracy") {}

void Update(NDArray labels, NDArray preds) {
void Update(NDArray labels, NDArray preds) override {
CHECK_EQ(labels.GetShape().size(), 1);
mx_uint len = labels.GetShape()[0];
std::vector<mx_float> pred_data(len);
std::vector<mx_float> label_data(len);
preds.ArgmaxChannel().SyncCopyToCPU(&pred_data, len);
labels.SyncCopyToCPU(&label_data, len);
NDArray::WaitAll();
for (mx_uint i = 0; i < len; ++i) {
sum_metric += (pred_data[i] == label_data[i]) ? 1 : 0;
num_inst += 1;
Expand All @@ -67,15 +68,14 @@ class LogLoss : public EvalMetric {
public:
LogLoss() : EvalMetric("logloss") {}

void Update(NDArray labels, NDArray preds) {
void Update(NDArray labels, NDArray preds) override {
static const float epsilon = 1e-15;
mx_uint len = labels.GetShape()[0];
mx_uint m = preds.GetShape()[1];
std::vector<mx_float> pred_data(len * m);
std::vector<mx_float> label_data(len);
preds.SyncCopyToCPU(&pred_data, pred_data.size());
labels.SyncCopyToCPU(&label_data, len);
NDArray::WaitAll();
for (mx_uint i = 0; i < len; ++i) {
sum_metric +=
-std::log(std::max(pred_data[i * m + label_data[i]], epsilon));
Expand All @@ -84,6 +84,106 @@ class LogLoss : public EvalMetric {
}
};

class MAE : public EvalMetric {
public:
MAE() : EvalMetric("mae") {}

void Update(NDArray labels, NDArray preds) override {
CheckLabelShapes(labels, preds);

std::vector<mx_float> pred_data;
preds.SyncCopyToCPU(&pred_data);
std::vector<mx_float> label_data;
labels.SyncCopyToCPU(&label_data);

size_t len = preds.Size();
mx_float sum = 0;
for (size_t i = 0; i < len; ++i) {
sum += std::abs(pred_data[i] - label_data[i]);
}
sum_metric += sum / len;
++num_inst;
}
};

class MSE : public EvalMetric {
public:
MSE() : EvalMetric("mse") {}

void Update(NDArray labels, NDArray preds) override {
CheckLabelShapes(labels, preds);

std::vector<mx_float> pred_data;
preds.SyncCopyToCPU(&pred_data);
std::vector<mx_float> label_data;
labels.SyncCopyToCPU(&label_data);

size_t len = preds.Size();
mx_float sum = 0;
for (size_t i = 0; i < len; ++i) {
mx_float diff = pred_data[i] - label_data[i];
sum += diff * diff;
}
sum_metric += sum / len;
++num_inst;
}
};

class RMSE : public EvalMetric {
public:
RMSE() : EvalMetric("rmse") {}

void Update(NDArray labels, NDArray preds) override {
CheckLabelShapes(labels, preds);

std::vector<mx_float> pred_data;
preds.SyncCopyToCPU(&pred_data);
std::vector<mx_float> label_data;
labels.SyncCopyToCPU(&label_data);

size_t len = preds.Size();
mx_float sum = 0;
for (size_t i = 0; i < len; ++i) {
mx_float diff = pred_data[i] - label_data[i];
sum += diff * diff;
}
sum_metric += std::sqrt(sum / len);
++num_inst;
}
};

class PSNR : public EvalMetric {
public:
PSNR() : EvalMetric("psnr") {
}

void Update(NDArray labels, NDArray preds) override {
CheckLabelShapes(labels, preds);

std::vector<mx_float> pred_data;
preds.SyncCopyToCPU(&pred_data);
std::vector<mx_float> label_data;
labels.SyncCopyToCPU(&label_data);

size_t len = preds.Size();
mx_float sum = 0;
for (size_t i = 0; i < len; ++i) {
mx_float diff = pred_data[i] - label_data[i];
sum += diff * diff;
}
mx_float mse = sum / len;
if (mse > 0) {
sum_metric += 10 * std::log(255.0f / mse) / log10_;
} else {
sum_metric += 99.0f;
}
++num_inst;
}

private:
mx_float log10_ = std::log(10.0f);
};

} // namespace cpp
} // namespace mxnet

Expand Down
Loading

0 comments on commit 4544c58

Please sign in to comment.