-
Notifications
You must be signed in to change notification settings - Fork 0
/
loss.rb
65 lines (51 loc) · 1.45 KB
/
loss.rb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
require 'backprop/perceptron'
include BackProp
num_inputs = 4
num_examples = 10
net_structure = [4, 4, 1]
gradient_step = 0.1
iterations = 999
# afn = [:tanh, :sigmoid, :relu].sample
afn = :tanh # seems to work better
# binary classifier; *num_examples* sets of inputs that map to 1 or 0
inputs = BackProp.rand_inputs(num_inputs, num_examples, (-1.0..1.0))
outputs = BackProp.rand_outputs(num_examples, 0.0..1.0)
predictions = []
n = MLP.new(num_inputs, net_structure, activation: afn)
puts "Training Cases:"
inputs.each.with_index { |input, i|
puts format("%s = %s", input.join(', '), outputs[i].value.round(3))
}
puts
puts "Neural Net:"
puts n
puts
puts "Press Enter to continue"
gets
999.times { |i|
# 1. apply inputs to the net to yield predictions
# 2. calculate the loss
# 3. backward propagate the gradients
# 4. adjust every neuron in the direction of minimizing loss
# 1. apply inputs
predictions = inputs.map { |input| n.apply(input).first }
# 2. calculate loss
loss = BackProp.mean_squared_error(outputs, predictions)
puts loss
# 3. propagate the derivatives (gradients) backwards
loss.backward
# output every so often
if i % 100 == 0
p outputs.map { |f| f.value.round(3) }
p predictions.map { |f| f.value.round(3) }
puts
p n
gets
end
# 4. adjust all weights and biases towards minimizing loss function
n.descend(gradient_step)
}
p outputs.map(&:value)
p predictions.map { |f| f.value.round(3) }
puts n
p n