Skip to content

Commit bff8fb8

Browse files
committed
Merge pull request jimmie33#6 from jeffdonahue/flatten
add flatten layer
2 parents 2786cf7 + a2bac02 commit bff8fb8

File tree

4 files changed

+173
-0
lines changed

4 files changed

+173
-0
lines changed

include/caffe/vision_layers.hpp

+21
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,27 @@ class DropoutLayer : public NeuronLayer<Dtype> {
8989
};
9090

9191

92+
template <typename Dtype>
93+
class FlattenLayer : public Layer<Dtype> {
94+
public:
95+
explicit FlattenLayer(const LayerParameter& param)
96+
: Layer<Dtype>(param) {}
97+
virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
98+
vector<Blob<Dtype>*>* top);
99+
100+
protected:
101+
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
102+
vector<Blob<Dtype>*>* top);
103+
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
104+
vector<Blob<Dtype>*>* top);
105+
virtual Dtype Backward_cpu(const vector<Blob<Dtype>*>& top,
106+
const bool propagate_down, vector<Blob<Dtype>*>* bottom);
107+
virtual Dtype Backward_gpu(const vector<Blob<Dtype>*>& top,
108+
const bool propagate_down, vector<Blob<Dtype>*>* bottom);
109+
int channels_out_;
110+
};
111+
112+
92113
template <typename Dtype>
93114
class InnerProductLayer : public Layer<Dtype> {
94115
public:

src/caffe/layer_factory.cpp

+2
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,8 @@ Layer<Dtype>* GetLayer(const LayerParameter& param) {
3131
return new DropoutLayer<Dtype>(param);
3232
} else if (type == "euclidean_loss") {
3333
return new EuclideanLossLayer<Dtype>(param);
34+
} else if (type == "flatten") {
35+
return new FlattenLayer<Dtype>(param);
3436
} else if (type == "im2col") {
3537
return new Im2colLayer<Dtype>(param);
3638
} else if (type == "infogain_loss") {

src/caffe/layers/flatten_layer.cpp

+57
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
// Copyright 2013 Yangqing Jia
2+
3+
#include <vector>
4+
5+
#include "caffe/layer.hpp"
6+
#include "caffe/vision_layers.hpp"
7+
#include "caffe/util/math_functions.hpp"
8+
9+
namespace caffe {
10+
11+
template <typename Dtype>
12+
void FlattenLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
13+
vector<Blob<Dtype>*>* top) {
14+
CHECK_EQ(bottom.size(), 1) << "Flatten Layer takes a single blob as input.";
15+
CHECK_EQ(top->size(), 1) << "Flatten Layer takes a single blob as output.";
16+
channels_out_ = bottom[0]->channels() * bottom[0]->height()
17+
* bottom[0]->width();
18+
(*top)[0]->Reshape(bottom[0]->num(), channels_out_, 1, 1);
19+
};
20+
21+
template <typename Dtype>
22+
void FlattenLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
23+
vector<Blob<Dtype>*>* top) {
24+
25+
const Dtype* bottom_data = bottom[0]->cpu_data();
26+
Dtype* top_data = (*top)[0]->mutable_cpu_data();
27+
caffe_copy(channels_out_, bottom_data, top_data);
28+
}
29+
30+
template <typename Dtype>
31+
void FlattenLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
32+
vector<Blob<Dtype>*>* top) {
33+
const Dtype* bottom_data = bottom[0]->gpu_data();
34+
Dtype* top_data = (*top)[0]->mutable_gpu_data();
35+
caffe_gpu_copy(channels_out_, bottom_data, top_data);
36+
}
37+
38+
template <typename Dtype>
39+
Dtype FlattenLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
40+
const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
41+
const Dtype* top_diff = top[0]->cpu_diff();
42+
Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
43+
caffe_copy(channels_out_, top_diff, bottom_diff);
44+
}
45+
46+
47+
template <typename Dtype>
48+
Dtype FlattenLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
49+
const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
50+
const Dtype* top_diff = top[0]->gpu_diff();
51+
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
52+
caffe_gpu_copy(channels_out_, top_diff, bottom_diff);
53+
}
54+
55+
INSTANTIATE_CLASS(FlattenLayer);
56+
57+
} // namespace caffe

src/caffe/test/test_flatten_layer.cpp

+93
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,93 @@
1+
// Copyright 2013 Yangqing Jia
2+
3+
#include <cstring>
4+
#include <cuda_runtime.h>
5+
6+
#include "gtest/gtest.h"
7+
#include "caffe/blob.hpp"
8+
#include "caffe/common.hpp"
9+
#include "caffe/filler.hpp"
10+
#include "caffe/vision_layers.hpp"
11+
#include "caffe/test/test_gradient_check_util.hpp"
12+
13+
#include "caffe/test/test_caffe_main.hpp"
14+
15+
namespace caffe {
16+
17+
extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
18+
19+
template <typename Dtype>
20+
class FlattenLayerTest : public ::testing::Test {
21+
protected:
22+
FlattenLayerTest()
23+
: blob_bottom_(new Blob<Dtype>(2, 3, 6, 5)),
24+
blob_top_(new Blob<Dtype>()) {
25+
// fill the values
26+
FillerParameter filler_param;
27+
GaussianFiller<Dtype> filler(filler_param);
28+
filler.Fill(this->blob_bottom_);
29+
blob_bottom_vec_.push_back(blob_bottom_);
30+
blob_top_vec_.push_back(blob_top_);
31+
};
32+
virtual ~FlattenLayerTest() { delete blob_bottom_; delete blob_top_; }
33+
Blob<Dtype>* const blob_bottom_;
34+
Blob<Dtype>* const blob_top_;
35+
vector<Blob<Dtype>*> blob_bottom_vec_;
36+
vector<Blob<Dtype>*> blob_top_vec_;
37+
};
38+
39+
typedef ::testing::Types<float, double> Dtypes;
40+
TYPED_TEST_CASE(FlattenLayerTest, Dtypes);
41+
42+
TYPED_TEST(FlattenLayerTest, TestSetup) {
43+
LayerParameter layer_param;
44+
FlattenLayer<TypeParam> layer(layer_param);
45+
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
46+
EXPECT_EQ(this->blob_top_->num(), 2);
47+
EXPECT_EQ(this->blob_top_->channels(), 3 * 6 * 5);
48+
EXPECT_EQ(this->blob_top_->height(), 1);
49+
EXPECT_EQ(this->blob_top_->width(), 1);
50+
}
51+
52+
TYPED_TEST(FlattenLayerTest, TestCPU) {
53+
LayerParameter layer_param;
54+
FlattenLayer<TypeParam> layer(layer_param);
55+
Caffe::set_mode(Caffe::CPU);
56+
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
57+
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
58+
for (int c = 0; c < 3 * 6 * 5; ++c) {
59+
EXPECT_EQ(this->blob_top_->data_at(0, c, 0, 0),
60+
this->blob_bottom_->data_at(0, c / (6 * 5), (c / 5) % 6, c % 5));
61+
}
62+
}
63+
64+
TYPED_TEST(FlattenLayerTest, TestGPU) {
65+
LayerParameter layer_param;
66+
FlattenLayer<TypeParam> layer(layer_param);
67+
Caffe::set_mode(Caffe::GPU);
68+
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
69+
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
70+
for (int c = 0; c < 3 * 6 * 5; ++c) {
71+
EXPECT_EQ(this->blob_top_->data_at(0, c, 0, 0),
72+
this->blob_bottom_->data_at(0, c / (6 * 5), (c / 5) % 6, c % 5));
73+
}
74+
}
75+
76+
TYPED_TEST(FlattenLayerTest, TestCPUGradient) {
77+
LayerParameter layer_param;
78+
Caffe::set_mode(Caffe::CPU);
79+
FlattenLayer<TypeParam> layer(layer_param);
80+
GradientChecker<TypeParam> checker(1e-2, 1e-2);
81+
checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
82+
}
83+
84+
TYPED_TEST(FlattenLayerTest, TestGPUGradient) {
85+
LayerParameter layer_param;
86+
Caffe::set_mode(Caffe::GPU);
87+
FlattenLayer<TypeParam> layer(layer_param);
88+
GradientChecker<TypeParam> checker(1e-2, 1e-2);
89+
checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
90+
}
91+
92+
93+
}

0 commit comments

Comments
 (0)