-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathDenseLayer.cpp
116 lines (96 loc) · 3.28 KB
/
DenseLayer.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
// Oliver - machine learning library.
// written by cubeflix - https://github.com/cubeflix/oliver
//
// DenseLayer.cpp
// Dense layer class.
#include "DenseLayer.h"
#include "Initializer.h"
#include "Network.h"
namespace Oliver {
DenseLayer::DenseLayer(unsigned int inputSize, unsigned int outputSize, Initializer* weightInit, Initializer* biasInit)
: m_trainable(false), m_inputSize(inputSize), m_outputSize(outputSize), m_weightInit(weightInit), m_biasInit(biasInit) {
if (m_inputSize == 0 || m_outputSize == 0) {
throw NetworkException("input and output size must not be zero");
}
// Allocate the matricies.
m_weights = new Matrix(m_inputSize, m_outputSize);
m_biases = new Matrix(1, m_outputSize);
m_inputCache = NULL;
m_weightGrad = NULL;
m_weightOpt = NULL;
m_biasGrad = NULL;
m_biasOpt = NULL;
}
DenseLayer::~DenseLayer() {
delete m_weights;
delete m_biases;
if (m_trainable) {
delete m_inputCache;
delete m_weightGrad;
delete m_biasGrad;
delete m_weightOpt;
delete m_biasOpt;
}
}
unsigned int DenseLayer::inputSize() {
return m_inputSize;
}
unsigned int DenseLayer::outputSize() {
return m_outputSize;
}
void DenseLayer::initTraining(OptimizerSettings* settings) {
// Allocate the matricies.
m_weightGrad = new Matrix(m_inputSize, m_outputSize);
m_biasGrad = new Matrix(1, m_outputSize);
// Initialize the weights and biases.
m_weightInit->init(m_weights);
m_biasInit->init(m_biases);
// Create the optimizers.
m_weightOpt = settings->create(m_weights, m_weightGrad);
m_biasOpt = settings->create(m_biases, m_biasGrad);
m_trainable = true;
}
void DenseLayer::forward(Matrix* input, Matrix* output, int device) {
// Ensure the sizes match.
if (input->rows() != output->rows() || input->cols() != m_inputSize || output->cols() != m_outputSize) {
throw NetworkException("invalid input and output matrix sizes for layer");
}
if (m_trainable) {
// If we need to train, cache the input.
m_inputCache = input->copy();
}
dot(input, m_weights, output, device);
output->addBias(m_biases, device);
}
void DenseLayer::backward(Matrix* outputGrad, Matrix* prevGrad, int device) {
if (!m_trainable) {
throw NetworkException("layer not initialized for training");
}
if (!m_inputCache) {
throw NetworkException("forward pass has not been run");
}
// Ensure the sizes match.
if (m_inputCache->rows() != prevGrad->rows() || prevGrad->rows() != outputGrad->rows()) {
throw NetworkException("invalid input, previous gradient, and output gradient matrix sizes for layer");
}
if (m_inputCache->cols() != m_inputSize || prevGrad->cols() != m_inputSize || outputGrad->cols() != m_outputSize) {
throw NetworkException("invalid input, previous gradient, and output gradient matrix sizes for layer");
}
Matrix* weightT = m_weights->transpose(device);
dot(outputGrad, weightT, prevGrad, device);
Matrix* xT = m_inputCache->transpose(device);
dot(xT, outputGrad, m_weightGrad, device);
sum(outputGrad, m_biasGrad, SumOverColumns, device);
delete weightT;
delete xT;
delete m_inputCache;
m_inputCache = NULL;
}
void DenseLayer::update(int device) {
if (!m_trainable) {
throw NetworkException("layer not initialized for training");
}
m_weightOpt->update(device);
m_biasOpt->update(device);
}
}