@@ -7,18 +7,17 @@ class NeuralNet
7
7
error_threshold : 0.01
8
8
}
9
9
10
- def initialize ( shape )
10
+ def initialize shape
11
11
@shape = shape
12
- @output_layer = @shape . length - 1
13
- set_initial_weight_values
14
12
end
15
13
16
14
def run input
17
15
# Input to this method represents the output of the first layer (i.e., the input layer)
18
16
@outputs = [ input ]
17
+ set_initial_weight_values if @weights . nil?
19
18
20
19
# Now calculate output of neurons in subsequent layers:
21
- 1 . upto ( @ output_layer) . each do |layer |
20
+ 1 . upto ( output_layer ) . each do |layer |
22
21
source_layer = layer - 1 # i.e, the layer that is feeding into this one
23
22
source_outputs = @outputs [ source_layer ]
24
23
@@ -36,7 +35,7 @@ def run input
36
35
end
37
36
38
37
# Outputs of neurons in the last layer is the final result
39
- @outputs [ @ output_layer]
38
+ @outputs [ output_layer ]
40
39
end
41
40
42
41
def train data , opts = { }
@@ -45,8 +44,9 @@ def train data, opts = {}
45
44
iteration = 0
46
45
error = nil
47
46
48
- set_weight_changes_to_zeros
47
+ set_initial_weight_values if @weights . nil?
49
48
set_initial_weight_update_values if @weight_update_values . nil?
49
+ set_weight_changes_to_zeros
50
50
set_previous_gradients_to_zeroes
51
51
52
52
while iteration < opts [ :max_iterations ]
@@ -84,15 +84,15 @@ def train_on_batch data
84
84
end
85
85
86
86
def calculate_training_error ideal_output
87
- @outputs [ @ output_layer] . map . with_index do |output , i |
87
+ @outputs [ output_layer ] . map . with_index do |output , i |
88
88
output - ideal_output [ i ]
89
89
end
90
90
end
91
91
92
92
def update_gradients training_error
93
93
deltas = [ ]
94
94
# Starting from output layer and working backwards, backpropagating the training error
95
- @ output_layer. downto ( 1 ) . each do |layer |
95
+ output_layer . downto ( 1 ) . each do |layer |
96
96
deltas [ layer ] = [ ]
97
97
source_layer = layer - 1
98
98
source_neurons = @shape [ source_layer ] + 1 # account for bias neuron
@@ -103,7 +103,7 @@ def update_gradients training_error
103
103
activation_derivative = output * ( 1.0 - output )
104
104
105
105
# calculate delta for neuron
106
- delta = deltas [ layer ] [ neuron ] = if layer == @ output_layer
106
+ delta = deltas [ layer ] [ neuron ] = if layer == output_layer
107
107
# For neurons in output layer, use training error
108
108
-training_error [ neuron ] * activation_derivative
109
109
else
@@ -132,7 +132,7 @@ def update_gradients training_error
132
132
# Now that we've calculated gradients for the batch, we can use these to update the weights
133
133
# Using the RPROP algorithm - somewhat more complicated than classic backpropagation algorithm, but much faster
134
134
def update_weights
135
- 1 . upto ( @ output_layer) do |layer |
135
+ 1 . upto ( output_layer ) do |layer |
136
136
source_layer = layer - 1
137
137
source_neurons = @shape [ source_layer ] + 1 # account for bias neuron
138
138
@@ -213,6 +213,10 @@ def build_matrix
213
213
end
214
214
end
215
215
216
+ def output_layer
217
+ @shape . length - 1
218
+ end
219
+
216
220
def sigmoid x
217
221
1 / ( 1 + Math ::E **-x )
218
222
end
@@ -230,4 +234,12 @@ def sign x
230
234
x <=> 0 # returns 1 if postitive, -1 if negative
231
235
end
232
236
end
237
+
238
+ def marshal_dump
239
+ [ @shape , @weights , @weight_update_values ]
240
+ end
241
+
242
+ def marshal_load array
243
+ @shape , @weights , @weight_update_values = array
244
+ end
233
245
end
0 commit comments