@@ -56,8 +56,8 @@ def get_l2_regularization(self, derivate=False, weights=False):
56
56
return l2_derivate_matrix
57
57
else :
58
58
all_weights_squared = np .sum (np .sum (layer .w ** 2 ) for layer in self .layers )
59
- all_biases_squared = np .sum (np .sum (layer .b ** 2 ) for layer in self .layers )
60
- return self .regularization_factor * ( all_weights_squared )
59
+ # all_biases_squared = np.sum(np.sum(layer.b ** 2) for layer in self.layers)
60
+ return self .regularization_factor * all_weights_squared
61
61
62
62
def get_loss (self , layer , target_y , estimate_y , derivate = False ):
63
63
if layer .loss == "L2" :
@@ -74,9 +74,9 @@ def get_loss(self, layer, target_y, estimate_y, derivate=False):
74
74
print ("Lol" )
75
75
if derivate :
76
76
derivate = estimate_y - target_y
77
- derivate [(derivate >= - 0.000001 ) & (derivate <= 0.000001 )] = 0
78
- derivate [(derivate >= 0.999 )] = 1
79
- derivate [(derivate <= - 0.999 )] = - 1
77
+ # derivate[(derivate >= -0.000001) & (derivate <= 0.000001)] = 0
78
+ # derivate[(derivate >= 0.999)] = 1
79
+ # derivate[(derivate <= -0.999)] = -1
80
80
return derivate
81
81
return loss
82
82
@@ -127,8 +127,7 @@ def back_propagation(self, activations, target_y, zs, learning_rate=0.0001):
127
127
layer .w = layer .w - (learning_rate * np .array (last_error ).dot (np .transpose (
128
128
activations [layer_i ])) + self .regularization_factor * layer .w )
129
129
130
- layer .b = layer .b - (learning_rate * last_error + self .regularization_factor *
131
- layer .b )
130
+ layer .b = layer .b - (learning_rate * last_error )
132
131
else :
133
132
layer .b = layer .b - (learning_rate * last_error )
134
133
layer .w = layer .w - (learning_rate * np .array (last_error ).dot (np .transpose (
0 commit comments