Skip to content
This repository was archived by the owner on Aug 5, 2022. It is now read-only.

add continuation indicator layer for intel caffe #206

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions compile_caution.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
remember to execute source ./external/mlsl/l_mlsl_2018.0.003//intel64/bin/mlslvars.sh when compiling pycaffe
38 changes: 38 additions & 0 deletions include/caffe/layers/continuation_indicator_layer.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
#ifndef CAFFE_CONTINUATION_INDICATOR_LAYER_HPP_
#define CAFFE_CONTINUATION_INDICATOR_LAYER_HPP_
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/proto/caffe.pb.h"

namespace caffe {
template <typename Dtype>
class ContinuationIndicatorLayer: public Layer<Dtype> {
public:
explicit ContinuationIndicatorLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual inline const char* type() const { return "ContinuationIndicator";}
virtual inline int ExactNumBottomBlobs() const { return 0;}
virtual inline int ExactNumTopBlobs() const { return 1;}

protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {}
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom);
int mini_batch_;
int time_step_;
};
}

#endif // end file

43 changes: 43 additions & 0 deletions src/caffe/layers/continuation_indicator_layer.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
#include "caffe/layers/continuation_indicator_layer.hpp"

namespace caffe {
template <typename Dtype>
void ContinuationIndicatorLayer<Dtype>::LayerSetUp(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
ContinuationIndicatorParameter param = this->layer_param_.continuation_indicator_param();
mini_batch_ = param.batch_size();
time_step_ = param.time_step();
CHECK_GT(mini_batch_, 0) << "The batch size should be greater than 0.";
CHECK_GT(time_step_, 0) << "The time step should be greater than 0.";
}
template <typename Dtype>
void ContinuationIndicatorLayer<Dtype>::Reshape(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
//vector<int> top_shape{time_step_, mini_batch_};
vector<int> top_shape;
top_shape.push_back(time_step_);
top_shape.push_back(mini_batch_);
top[0]->Reshape(top_shape);
}
template <typename Dtype>
void ContinuationIndicatorLayer<Dtype>::Forward_cpu(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
CHECK_EQ(top[0]->shape()[0], time_step_) << "1st dimension of top blob should be same with time step.";
CHECK_EQ(top[0]->shape()[1], mini_batch_) << "2nd dimension of top blob should be same with batch size.";
Dtype* top_data = top[0]->mutable_cpu_data();
for(int t = 0; t < time_step_; ++t) {
for(int b = 0; b < mini_batch_; ++b) {
// time step index: t, batch index: b
*top_data++ = t == 0? Dtype(0): Dtype(1);
}
}
}
#ifdef CPU_ONLY
STUB_GPU(ContinuationIndicatorLayer);
#endif
INSTANTIATE_CLASS(ContinuationIndicatorLayer);
REGISTER_LAYER_CLASS(ContinuationIndicator);
}
8 changes: 8 additions & 0 deletions src/caffe/proto/caffe.proto
Original file line number Diff line number Diff line change
Expand Up @@ -610,6 +610,14 @@ message LayerParameter {
optional MnActivationParameter mn_activation_param = 151;
optional MnParamGradCompressParameter mn_grad_compress_param = 156;
optional QuantizationParameter quantization_param = 158;

optional ContinuationIndicatorParameter continuation_indicator_param = 200;
}


message ContinuationIndicatorParameter {
optional uint32 time_step = 1 [default = 0];
optional uint32 batch_size = 2 [default = 0];
}

message MultinodeLayerParameter {
Expand Down