@@ -38,7 +38,6 @@ int FullyConnect::configure(int batch, float learning_rate, float v_param, Layer
3838 Layer::configure (batch, learning_rate, v_param, prevLayer, phase);
3939 this ->Y .resize (this ->batch *this ->units );
4040 this ->W .resize (this ->input_shape *this ->units );
41- this ->B = (float *)malloc (sizeof (float ));
4241 if (this ->phase == TRAIN) {
4342 this ->E .resize (this ->batch *this ->input_shape );
4443 this ->delta_buf = (float *)malloc (sizeof (float )*this ->batch *this ->units );
@@ -50,7 +49,7 @@ int FullyConnect::configure(int batch, float learning_rate, float v_param, Layer
5049 for (int iu = 0 ; iu < this ->input_shape *this ->units ; iu++) {
5150 this ->W [iu] = rand (mt);
5251 }
53- * this ->B = rand (mt);
52+ this ->B = rand (mt);
5453 return 1 ;
5554}
5655
@@ -63,7 +62,7 @@ void FullyConnect::forward(vector<float> *x) {
6362 for (int b = 0 ; b < this ->batch ; b++) {
6463 for (int i = 0 ; i < this ->input_shape ; i++) {
6564 for (int u = 0 ; u < this ->units ; u++) {
66- this ->Y [b*this ->units + u] += x->at (b*this ->input_shape + i) * this ->W [i*this ->units + u] + * this ->B ;
65+ this ->Y [b*this ->units + u] += x->at (b*this ->input_shape + i) * this ->W [i*this ->units + u] + this ->B ;
6766 }
6867 }
6968 }
@@ -115,7 +114,7 @@ void FullyConnect::backward(vector<float> *e) {
115114 #pragma omp parallel for
116115 for (int b = 0 ; b < this ->batch ; b++) {
117116 for (int u = 0 ; u < this ->units ; u++) {
118- * this ->B -= this ->learning_rate * e->at (b*this ->units + u)*this ->batch_inv ;
117+ this ->B -= this ->learning_rate * e->at (b*this ->units + u)*this ->batch_inv ;
119118 }
120119 }
121120
0 commit comments