Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

priorbox layer for Single Shot Multibox Detection Network #867

Merged
merged 15 commits into from
Dec 22, 2016
146 changes: 146 additions & 0 deletions paddle/gserver/layers/PriorBox.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "Layer.h"
#include "paddle/math/BaseMatrix.h"
#include "paddle/math/Matrix.h"

namespace paddle {
/**
* @brief A layer for generate prior box locations and variances.
* - Input: Two and only two input layer are accepted. The input layer must be
* be a data output layer and a convolution output layer.
* - Output: The prior box locations and variances of the input data.
* Reference:
* Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed,
* Cheng-Yang Fu, Alexander C. Berg. SSD: Single Shot MultiBox Detector
*/

class PriorBoxLayer : public Layer {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

添加注释。

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

已添加

public:
explicit PriorBoxLayer(const LayerConfig& config) : Layer(config) {}
bool init(const LayerMap& layerMap, const ParameterMap& parameterMap);
void forward(PassType passType);
void backward(const UpdateCallback& callback) {}
int numPriors_;
std::vector<int> minSize_;
std::vector<int> maxSize_;
std::vector<float> aspectRatio_;
std::vector<float> variance_;
MatrixPtr buffer_;
};

bool PriorBoxLayer::init(const LayerMap& layerMap,
const ParameterMap& parameterMap) {
Layer::init(layerMap, parameterMap);
auto pbConf = config_.inputs(0).priorbox_conf();
std::copy(pbConf.min_size().begin(),
pbConf.min_size().end(),
std::back_inserter(minSize_));
std::copy(pbConf.max_size().begin(),
pbConf.max_size().end(),
std::back_inserter(maxSize_));
std::copy(pbConf.aspect_ratio().begin(),
pbConf.aspect_ratio().end(),
std::back_inserter(aspectRatio_));
std::copy(pbConf.variance().begin(),
pbConf.variance().end(),
std::back_inserter(variance_));
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

可以先用一个临时变量来记录config_.inputs(0).priorbox_conf(),38-49行不用重复写那么多了

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

好的

// flip
int inputRatioLength = aspectRatio_.size();
for (int index = 0; index < inputRatioLength; index++)
aspectRatio_.push_back(1 / aspectRatio_[index]);
aspectRatio_.push_back(1.);
numPriors_ = aspectRatio_.size();
if (maxSize_.size() > 0) numPriors_++;
return true;
}

void PriorBoxLayer::forward(PassType passType) {
Layer::forward(passType);
auto input = getInput(0);
int layerWidth = input.getFrameWidth();
int layerHeight = input.getFrameHeight();

auto image = getInput(1);
int imageWidth = image.getFrameWidth();
int imageHeight = image.getFrameHeight();

float stepW = static_cast<float>(imageWidth) / layerWidth;
float stepH = static_cast<float>(imageHeight) / layerHeight;
int dim = layerHeight * layerWidth * numPriors_ * 4;
reserveOutput(1, dim * 2);
// use a cpu buffer to compute
Matrix::resizeOrCreate(buffer_, 1, dim * 2, false, false);
auto* tmpPtr = buffer_->getData();

int idx = 0;
for (int h = 0; h < layerHeight; ++h) {
for (int w = 0; w < layerWidth; ++w) {
float centerX = (w + 0.5) * stepW;
float centerY = (h + 0.5) * stepH;
int minSize = 0;
for (size_t s = 0; s < minSize_.size(); s++) {
// first prior.
minSize = minSize_[s];
int boxWidth = minSize;
int boxHeight = minSize;
// xmin, ymin, xmax, ymax.
tmpPtr[idx++] = (centerX - boxWidth / 2.) / imageWidth;
tmpPtr[idx++] = (centerY - boxHeight / 2.) / imageHeight;
tmpPtr[idx++] = (centerX + boxWidth / 2.) / imageWidth;
tmpPtr[idx++] = (centerY + boxHeight / 2.) / imageHeight;
// set the variance.
for (int t = 0; t < 4; t++) tmpPtr[idx++] = variance_[t];

if (maxSize_.size() > 0) {
CHECK_EQ(minSize_.size(), maxSize_.size());
// second prior.
for (size_t s = 0; s < maxSize_.size(); s++) {
int maxSize = maxSize_[s];
boxWidth = boxHeight = sqrt(minSize * maxSize);
tmpPtr[idx++] = (centerX - boxWidth / 2.) / imageWidth;
tmpPtr[idx++] = (centerY - boxHeight / 2.) / imageHeight;
tmpPtr[idx++] = (centerX + boxWidth / 2.) / imageWidth;
tmpPtr[idx++] = (centerY + boxHeight / 2.) / imageHeight;
// set the variance.
for (int t = 0; t < 4; t++) tmpPtr[idx++] = variance_[t];
}
}
}
// rest of priors.
for (size_t r = 0; r < aspectRatio_.size(); r++) {
float ar = aspectRatio_[r];
if (fabs(ar - 1.) < 1e-6) continue;
float boxWidth = minSize * sqrt(ar);
float boxHeight = minSize / sqrt(ar);
tmpPtr[idx++] = (centerX - boxWidth / 2.) / imageWidth;
tmpPtr[idx++] = (centerY - boxHeight / 2.) / imageHeight;
tmpPtr[idx++] = (centerX + boxWidth / 2.) / imageWidth;
tmpPtr[idx++] = (centerY + boxHeight / 2.) / imageHeight;
// set the variance.
for (int t = 0; t < 4; t++) tmpPtr[idx++] = variance_[t];
}
}
}
// clip the prior's coordidate such that it is within [0, 1]
for (int d = 0; d < dim * 2; ++d)
if ((d % 8) < 4)
tmpPtr[d] = std::min(std::max(tmpPtr[d], (float)0.), (float)1.);
MatrixPtr outV = getOutputValue();
outV->copyFrom(buffer_->data_, dim * 2);
}
REGISTER_LAYER(priorbox, PriorBoxLayer);

} // namespace paddle
8 changes: 8 additions & 0 deletions paddle/gserver/tests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,14 @@ add_unittest_without_exec(test_ConvTrans

add_test(NAME test_ConvTrans
COMMAND test_ConvTrans)
################# test_PriorBox #######################
add_unittest_without_exec(test_PriorBox
test_PriorBox.cpp
LayerGradUtil.cpp
TestUtil.cpp)

add_test(NAME test_PriorBox
COMMAND test_PriorBox)
################# test_ConvUnify #######################
add_unittest_without_exec(test_ConvUnify
test_ConvUnify.cpp
Expand Down
212 changes: 212 additions & 0 deletions paddle/gserver/tests/test_PriorBox.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,212 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include <gtest/gtest.h>
#include <string>
#include <vector>

#include "LayerGradUtil.h"
#include "TestUtil.h"

using namespace paddle; // NOLINT
using namespace std; // NOLINT

// Do one forward pass of priorBox layer and check to see if its output
// matches the given result
void doOnePriorBoxTest(size_t feature_map_width,
size_t feature_map_height,
size_t image_width,
size_t image_height,
vector<int> min_size,
vector<int> max_size,
vector<float> aspect_ratio,
vector<float> variance,
bool use_gpu,
MatrixPtr& result) {
// Setting up the priorbox layer
TestConfig configt;
configt.layerConfig.set_type("priorbox");

configt.inputDefs.push_back({INPUT_DATA, "featureMap", 1, 0});
LayerInputConfig* input = configt.layerConfig.add_inputs();
configt.inputDefs.push_back({INPUT_DATA, "image", 1, 0});
configt.layerConfig.add_inputs();
PriorBoxConfig* pb = input->mutable_priorbox_conf();
for (size_t i = 0; i < min_size.size(); i++) pb->add_min_size(min_size[i]);
for (size_t i = 0; i < max_size.size(); i++) pb->add_max_size(max_size[i]);
for (size_t i = 0; i < variance.size(); i++) pb->add_variance(variance[i]);
for (size_t i = 0; i < aspect_ratio.size(); i++)
pb->add_aspect_ratio(aspect_ratio[i]);

// data layer initialize
std::vector<DataLayerPtr> dataLayers;
LayerMap layerMap;
vector<Argument> datas;
initDataLayer(
configt, &dataLayers, &datas, &layerMap, "priorbox", 1, false, use_gpu);
dataLayers[0]->getOutput().setFrameHeight(feature_map_height);
dataLayers[0]->getOutput().setFrameWidth(feature_map_width);
dataLayers[1]->getOutput().setFrameHeight(image_height);
dataLayers[1]->getOutput().setFrameWidth(image_width);

// test layer initialize
std::vector<ParameterPtr> parameters;
LayerPtr priorboxLayer;
initTestLayer(configt, &layerMap, &parameters, &priorboxLayer);
priorboxLayer->forward(PASS_GC);
checkMatrixEqual(priorboxLayer->getOutputValue(), result);
}

TEST(Layer, priorBoxLayerFwd) {
vector<int> minSize;
vector<int> maxSize;
vector<float> aspectRatio;
vector<float> variance;
bool useGpu = false;

minSize.push_back(276);
maxSize.push_back(330);
variance.push_back(0.1);
variance.push_back(0.1);
variance.push_back(0.2);
variance.push_back(0.2);

// CPU case 1.
MatrixPtr result;
float resultData[] = {0.04,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

额, 由 #967 注意到,这里得改成 real, 下同 :)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

已修改

0.04,
0.96,
0.96,
0.1,
0.1,
0.2,
0.2,
0,
0,
1,
1,
0.1,
0.1,
0.2,
0.2};
result = Matrix::create(1, 2 * 8, false, useGpu);
result->setData(resultData);
doOnePriorBoxTest(/* feature_map_width */ 1,
/* feature_map_height */ 1,
/* image_width */ 300,
/* image_height */ 300,
minSize,
maxSize,
aspectRatio,
variance,
useGpu,
result);
// CPU case 2.
variance[1] = 0.2;
variance[3] = 0.1;
maxSize.pop_back();
float resultData2[] = {0, 0, 0.595, 0.595, 0.1, 0.2, 0.2, 0.1,
0.405, 0, 1, 0.595, 0.1, 0.2, 0.2, 0.1,
0, 0.405, 0.595, 1, 0.1, 0.2, 0.2, 0.1,
0.405, 0.405, 1, 1, 0.1, 0.2, 0.2, 0.1};
Matrix::resizeOrCreate(result, 1, 4 * 8, false, useGpu);
result->setData(resultData2);
doOnePriorBoxTest(/* feature_map_width */ 2,
/* feature_map_height */ 2,
/* image_width */ 400,
/* image_height */ 400,
minSize,
maxSize,
aspectRatio,
variance,
useGpu,
result);
// CPU case 3.
aspectRatio.push_back(2);
float resultData3[] = {0.04, 0.04, 0.96, 0.96, 0.1, 0.2,
0.2, 0.1, 0, 0.17473088, 1, 0.825269,
0.1, 0.2, 0.2, 0.1, 0.17473088, 0,
0.825269, 1, 0.1, 0.2, 0.2, 0.1};
Matrix::resizeOrCreate(result, 1, 3 * 8, false, useGpu);
result->setData(resultData3);
doOnePriorBoxTest(/* feature_map_width */ 1,
/* feature_map_height */ 1,
/* image_width */ 300,
/* image_height */ 300,
minSize,
maxSize,
aspectRatio,
variance,
useGpu,
result);

#ifndef PADDLE_ONLY_CPU
// reset the input parameters
variance[1] = 0.1;
variance[3] = 0.2;
maxSize.push_back(330);
aspectRatio.pop_back();
MatrixPtr resultGpu;
useGpu = true;
// GPU case 1.
resultGpu = Matrix::create(1, 2 * 8, false, useGpu);
resultGpu->copyFrom(resultData, 2 * 8);
doOnePriorBoxTest(/* feature_map_width */ 1,
/* feature_map_height */ 1,
/* image_width */ 300,
/* image_height */ 300,
minSize,
maxSize,
aspectRatio,
variance,
useGpu,
resultGpu);
// GPU case 2.
variance[1] = 0.2;
variance[3] = 0.1;
maxSize.pop_back();
Matrix::resizeOrCreate(resultGpu, 1, 4 * 8, false, useGpu);
resultGpu->copyFrom(resultData2, 4 * 8);
doOnePriorBoxTest(/* feature_map_width */ 2,
/* feature_map_height */ 2,
/* image_width */ 400,
/* image_height */ 400,
minSize,
maxSize,
aspectRatio,
variance,
useGpu,
resultGpu);
// GPU case 3.
aspectRatio.push_back(2);
Matrix::resizeOrCreate(resultGpu, 1, 3 * 8, false, useGpu);
resultGpu->copyFrom(resultData3, 3 * 8);
doOnePriorBoxTest(/* feature_map_width */ 1,
/* feature_map_height */ 1,
/* image_width */ 300,
/* image_height */ 300,
minSize,
maxSize,
aspectRatio,
variance,
useGpu,
resultGpu);
#endif
}

int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
initMain(argc, argv);
return RUN_ALL_TESTS();
}
8 changes: 8 additions & 0 deletions proto/ModelConfig.proto
Original file line number Diff line number Diff line change
Expand Up @@ -248,6 +248,13 @@ message ImageConfig {
required uint32 img_size_y = 9;
}

message PriorBoxConfig {
repeated uint32 min_size = 1;
repeated uint32 max_size = 2;
repeated float aspect_ratio = 3;
repeated float variance = 4;
}

message LayerInputConfig {
required string input_layer_name = 1;
optional string input_parameter_name = 2;
Expand All @@ -263,6 +270,7 @@ message LayerInputConfig {
optional BilinearInterpConfig bilinear_interp_conf = 10;
optional MaxOutConfig maxout_conf = 11;
optional SppConfig spp_conf = 12;
optional PriorBoxConfig priorbox_conf = 13;
}

message LayerConfig {
Expand Down
Loading