Skip to content

Commit bce65a6

Browse files
committed
Merge pull request mnielsen#20 from CesiumLifeJacket/master
removed numpy.vectorize() wrappers
2 parents 84be581 + 8886ab7 commit bce65a6

File tree

2 files changed

+8
-16
lines changed

2 files changed

+8
-16
lines changed

src/network.py

+4-8
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ def __init__(self, sizes):
3838
def feedforward(self, a):
3939
"""Return the output of the network if ``a`` is input."""
4040
for b, w in zip(self.biases, self.weights):
41-
a = sigmoid_vec(np.dot(w, a)+b)
41+
a = sigmoid(np.dot(w, a)+b)
4242
return a
4343

4444
def SGD(self, training_data, epochs, mini_batch_size, eta,
@@ -96,11 +96,11 @@ def backprop(self, x, y):
9696
for b, w in zip(self.biases, self.weights):
9797
z = np.dot(w, activation)+b
9898
zs.append(z)
99-
activation = sigmoid_vec(z)
99+
activation = sigmoid(z)
100100
activations.append(activation)
101101
# backward pass
102102
delta = self.cost_derivative(activations[-1], y) * \
103-
sigmoid_prime_vec(zs[-1])
103+
sigmoid_prime(zs[-1])
104104
nabla_b[-1] = delta
105105
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
106106
# Note that the variable l in the loop below is used a little
@@ -111,7 +111,7 @@ def backprop(self, x, y):
111111
# that Python can use negative indices in lists.
112112
for l in xrange(2, self.num_layers):
113113
z = zs[-l]
114-
spv = sigmoid_prime_vec(z)
114+
spv = sigmoid_prime(z)
115115
delta = np.dot(self.weights[-l+1].transpose(), delta) * spv
116116
nabla_b[-l] = delta
117117
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
@@ -136,10 +136,6 @@ def sigmoid(z):
136136
"""The sigmoid function."""
137137
return 1.0/(1.0+np.exp(-z))
138138

139-
sigmoid_vec = np.vectorize(sigmoid)
140-
141139
def sigmoid_prime(z):
142140
"""Derivative of the sigmoid function."""
143141
return sigmoid(z)*(1-sigmoid(z))
144-
145-
sigmoid_prime_vec = np.vectorize(sigmoid_prime)

src/network2.py

+4-8
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ def fn(a, y):
3636
@staticmethod
3737
def delta(z, a, y):
3838
"""Return the error delta from the output layer."""
39-
return (a-y) * sigmoid_prime_vec(z)
39+
return (a-y) * sigmoid_prime(z)
4040

4141

4242
class CrossEntropyCost(object):
@@ -123,7 +123,7 @@ def large_weight_initializer(self):
123123
def feedforward(self, a):
124124
"""Return the output of the network if ``a`` is input."""
125125
for b, w in zip(self.biases, self.weights):
126-
a = sigmoid_vec(np.dot(w, a)+b)
126+
a = sigmoid(np.dot(w, a)+b)
127127
return a
128128

129129
def SGD(self, training_data, epochs, mini_batch_size, eta,
@@ -220,7 +220,7 @@ def backprop(self, x, y):
220220
for b, w in zip(self.biases, self.weights):
221221
z = np.dot(w, activation)+b
222222
zs.append(z)
223-
activation = sigmoid_vec(z)
223+
activation = sigmoid(z)
224224
activations.append(activation)
225225
# backward pass
226226
delta = (self.cost).delta(zs[-1], activations[-1], y)
@@ -234,7 +234,7 @@ def backprop(self, x, y):
234234
# that Python can use negative indices in lists.
235235
for l in xrange(2, self.num_layers):
236236
z = zs[-l]
237-
spv = sigmoid_prime_vec(z)
237+
spv = sigmoid_prime(z)
238238
delta = np.dot(self.weights[-l+1].transpose(), delta) * spv
239239
nabla_b[-l] = delta
240240
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
@@ -327,10 +327,6 @@ def sigmoid(z):
327327
"""The sigmoid function."""
328328
return 1.0/(1.0+np.exp(-z))
329329

330-
sigmoid_vec = np.vectorize(sigmoid)
331-
332330
def sigmoid_prime(z):
333331
"""Derivative of the sigmoid function."""
334332
return sigmoid(z)*(1-sigmoid(z))
335-
336-
sigmoid_prime_vec = np.vectorize(sigmoid_prime)

0 commit comments

Comments
 (0)