Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
83 changes: 76 additions & 7 deletions paddle/gserver/layers/MKLDNNAddtoLayer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -62,24 +62,22 @@ void MKLDNNAddtoLayer::resetFwd(std::vector<primitive>& pipeline,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
if (biases_) {
LOG(FATAL) << "not implemented yet";
}
resetFwdBuffers(inVals_, out);
resetFwdBuffers(inVals_, bias, out);
in = inVals_[0];

std::shared_ptr<sum::primitive_desc> fwdPD;
resetFwdPD(fwdPD, inVals_, out);
std::shared_ptr<sum::primitive_desc> biasPD;
resetFwdPD(fwdPD, biasPD, inVals_, bias, out);

resetFwdPipeline(pipeline, fwdPD, inVals_, out);
resetFwdPipeline(pipeline, fwdPD, biasPD, inVals_, bias, out);
}

void MKLDNNAddtoLayer::resetBwd(std::vector<primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
resetBwdBuffers(inGrads_, out);
resetBwdBuffers(inGrads_, bias, out);
in = inGrads_[0];

// backward only need share output grad to input grad
Expand All @@ -89,6 +87,20 @@ void MKLDNNAddtoLayer::resetBwd(std::vector<primitive>& pipeline,
inputLayers_[i]->getOutputGrad()->setData(inGrads_[i]->getData());
}
}

// backward bias
bwdBias_ = nullptr;
if (bias) {
std::vector<double> scales(bs_, 1.0);
std::vector<memory::primitive_desc> srcPDs(bs_, bias->getPrimitiveDesc());
auto biasPD = sum::primitive_desc(bias->getMemoryDesc(), scales, srcPDs);
std::vector<primitive::at> srcs;
for (size_t i = 0; i < grads_.size(); ++i) {
srcs.push_back(*(grads_[i]));
}
bwdBias_.reset(new sum(biasPD, srcs, *bias));
pipeline.push_back(*bwdBias_);
}
}

void MKLDNNAddtoLayer::updateWeights(const UpdateCallback& callback) {
Expand All @@ -97,7 +109,25 @@ void MKLDNNAddtoLayer::updateWeights(const UpdateCallback& callback) {
}
}

void MKLDNNAddtoLayer::prepareBias(MKLDNNMatrixPtr& bias,
const MatrixPtr& biasMat,
const MKLDNNMatrixPtr& out,
std::vector<MKLDNNMatrixPtr>& outs) {
auto pd = MKLDNNMatrix::createPrimitiveDesc(
{(int)layerSize_}, memory::format::x, engine_);
bias = MKLDNNMatrix::create(pd, biasMat);
outs.clear();
real* data = out->getData();
CHECK_EQ(bs_ * layerSize_, out->getElementCnt());
for (int i = 0; i < bs_; ++i) {
MatrixPtr tmp =
Matrix::create(data + i * layerSize_, 1, layerSize_, false, false);
outs.push_back(MKLDNNMatrix::create(bias->getPrimitiveDesc(), tmp));
}
}

void MKLDNNAddtoLayer::resetFwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
inputs.resize(inputLayers_.size());
for (size_t i = 0; i < inputs.size(); i++) {
Expand All @@ -110,10 +140,18 @@ void MKLDNNAddtoLayer::resetFwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
}

resetOutValue(out, inputs[0]->getPrimitiveDesc());

if (biases_ && biases_->getW()) {
prepareBias(bias, biases_->getW(), out, vals_);
} else {
bias = nullptr;
}
}

void MKLDNNAddtoLayer::resetFwdPD(std::shared_ptr<sum::primitive_desc>& pd,
std::shared_ptr<sum::primitive_desc>& biasPD,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr bias,
MKLDNNMatrixPtr out) {
std::vector<double> scales(inputs.size(), 1.0);
std::vector<memory::primitive_desc> srcPDs;
Expand All @@ -123,22 +161,47 @@ void MKLDNNAddtoLayer::resetFwdPD(std::shared_ptr<sum::primitive_desc>& pd,
CHECK(out);
pd.reset(new sum::primitive_desc(out->getMemoryDesc(), scales, srcPDs));
CHECK_PRIMITIVE_DESC_EQ(out, pd->dst_primitive_desc());

biasPD = nullptr;
if (bias) {
std::vector<double> scales(2, 1.0);
std::vector<memory::primitive_desc> srcPDs(2, bias->getPrimitiveDesc());
biasPD.reset(
new sum::primitive_desc(bias->getMemoryDesc(), scales, srcPDs));
CHECK_PRIMITIVE_DESC_EQ(bias, biasPD->dst_primitive_desc());
}
}

void MKLDNNAddtoLayer::resetFwdPipeline(
std::vector<primitive>& pipeline,
std::shared_ptr<sum::primitive_desc>& pd,
std::shared_ptr<sum::primitive_desc>& biasPD,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
std::vector<primitive::at> srcs;
for (size_t i = 0; i < inputs.size(); i++) {
srcs.push_back(*(inputs[i]));
}
fwd_.reset(new sum(*pd, srcs, *out));
pipeline.push_back(*fwd_);

fwdBias_.clear();
if (biasPD == nullptr || bias == nullptr) {
return;
}
fwdBias_.resize(vals_.size());
for (size_t i = 0; i < vals_.size(); ++i) {
std::vector<primitive::at> srcs;
srcs.push_back(*(vals_[i]));
srcs.push_back(*bias);
fwdBias_[i].reset(new sum(*biasPD, srcs, *vals_[i]));
pipeline.push_back(*fwdBias_[i]);
}
}

void MKLDNNAddtoLayer::resetBwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
CHECK(outVal_);
resetOutGrad(out, outVal_->getPrimitiveDesc());
Expand All @@ -149,6 +212,12 @@ void MKLDNNAddtoLayer::resetBwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
resetInGrad(inputs[i], inVal_->getPrimitiveDesc(), i);
CHECK_PRIMITIVE_DESC_EQ(inputs[i], out->getPrimitiveDesc());
}

if (biases_ && biases_->getWGrad()) {
prepareBias(bias, biases_->getWGrad(), out, grads_);
} else {
bias = nullptr;
}
}

} // namespace paddle
22 changes: 21 additions & 1 deletion paddle/gserver/layers/MKLDNNAddtoLayer.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,15 @@ class MKLDNNAddtoLayer : public MKLDNNLayer {
// layer size == ic * ih * iw == oc * oh *ow, and can not be changed
size_t layerSize_;

// TODO(TJ): this part has not been optimized by MKL-DNN
std::unique_ptr<Weight> biases_;

// buffers for adding bias
std::vector<MKLDNNMatrixPtr> vals_;
std::vector<MKLDNNMatrixPtr> grads_;
// primitives for adding bias
std::vector<std::shared_ptr<mkldnn::primitive>> fwdBias_;
std::shared_ptr<mkldnn::primitive> bwdBias_;

public:
explicit MKLDNNAddtoLayer(const LayerConfig& config) : MKLDNNLayer(config) {}

Expand Down Expand Up @@ -91,20 +97,34 @@ class MKLDNNAddtoLayer : public MKLDNNLayer {
* reset pipeline.
*/
void resetFwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out);
void resetFwdPD(std::shared_ptr<mkldnn::sum::primitive_desc>& pd,
std::shared_ptr<mkldnn::sum::primitive_desc>& biasPD,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr bias,
MKLDNNMatrixPtr out);
void resetFwdPipeline(std::vector<mkldnn::primitive>& pipeline,
std::shared_ptr<mkldnn::sum::primitive_desc>& pd,
std::shared_ptr<mkldnn::sum::primitive_desc>& biasPD,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out);

/**
* Backward functions: reset buffers(inputs, output, bias)
*/
void resetBwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out);

/**
* prepare for bias
*/
void prepareBias(MKLDNNMatrixPtr& bias,
const MatrixPtr& biasMat,
const MKLDNNMatrixPtr& out,
std::vector<MKLDNNMatrixPtr>& outs);
};

} // namespace paddle
9 changes: 2 additions & 7 deletions paddle/gserver/tests/test_MKLDNN.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -300,13 +300,8 @@ void testAddtoLayer(const testImageDesc& pm, const size_t nInputs) {
TestConfig dnnConfig;
getAddtoConfig(dnnConfig, pm, nInputs);
dnnConfig.layerConfig.set_type("mkldnn_addto");
// TODO(TJ): test with bias
for (auto withBias : {false}) {
if (withBias) {
dnnConfig.biasSize = pm.ic * pm.ih * pm.iw;
} else {
dnnConfig.biasSize = 0;
}
for (auto withBias : {false, true}) {
dnnConfig.biasSize = withBias ? pm.ic * pm.ih * pm.iw : 0;
RUN_MKLDNN_TEST_LAYER(dnnConfig, "addto", pm)
}
}
Expand Down