Skip to content

Commit b30dbd1

Browse files
committed
fix some minor bugs
1 parent 32df45d commit b30dbd1

File tree

3 files changed

+41
-39
lines changed

3 files changed

+41
-39
lines changed

assignment1/activation_funcs.py

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,15 @@
33

44

55
def relu(value, derivate=False):
6+
new = np.copy(value)
67
if derivate:
78
# return np.sign(value)
8-
value[value <= 0] = 0
9-
value[value > 0] = 1
10-
return value
11-
# return 1 if value > 0 else 0
9+
new[new <= 0] = 0
10+
new[new > 0] = 1
11+
return new
12+
# return 1 if new > 0 else 0
1213
else:
13-
return np.maximum(0, value)
14+
return np.maximum(0, new)
1415

1516

1617
def tanh(x, derivate=False):
@@ -28,7 +29,8 @@ def softmax(value, derivate=False):
2829

2930

3031
def linear(value, derivate=False):
32+
new = np.copy(value)
3133
if derivate:
32-
value.fill(1)
33-
return value
34-
return value
34+
new.fill(1)
35+
return new
36+
return new

assignment1/main.py

Lines changed: 29 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -9,35 +9,35 @@
99
def main():
1010
config = Config("config.txt")
1111
lr = config.config["learning_rate"]
12-
X_train, y_train = read_file(config.config["training"])
13-
X_val, y_val = read_file(config.config["validation"])
14-
activation_functions = config.config["activations"]
15-
loss_type = config.config["loss_type"]
16-
l2_regularization_factor = config.config["L2_regularization"]
17-
18-
layers = config.config["layers"]
19-
layers.insert(0, X_train.shape[1])
20-
21-
if loss_type == "cross_entropy":
22-
n_classes = get_num_of_classes(y_train)
23-
activation_functions.append("softmax")
24-
layers.append(n_classes)
25-
y_train = one_hot(y_train, classes=n_classes)
26-
y_val = one_hot(y_val, classes=n_classes)
27-
else:
28-
activation_functions.append("linear") # TODO: Typisk linear, kan være relu og.
29-
layers.append(1)
30-
network = Network(X_train, y_train, layers, loss_type, activation_functions,
31-
lr, X_val=X_val, y_val=y_val, regularization_factor=l2_regularization_factor)
32-
33-
# X_data = np.array([[1, 1],
34-
# [1, 0],
35-
# [0, 1],
36-
# [0, 0]])
37-
# y_data = np.array([1, 1, 1, 0])
38-
# activation_functions = ["relu"]
39-
# layers = [2, 1]
40-
# network = Network(X_data, y_data, layers, "L2", activation_functions, lr=0.001)
12+
# X_train, y_train = read_file(config.config["training"])
13+
# X_val, y_val = read_file(config.config["validation"])
14+
# activation_functions = config.config["activations"]
15+
# loss_type = config.config["loss_type"]
16+
# l2_regularization_factor = config.config["L2_regularization"]
17+
#
18+
# layers = config.config["layers"]
19+
# layers.insert(0, X_train.shape[1])
20+
#
21+
# if loss_type == "cross_entropy":
22+
# n_classes = get_num_of_classes(y_train)
23+
# activation_functions.append("softmax")
24+
# layers.append(n_classes)
25+
# y_train = one_hot(y_train, classes=n_classes)
26+
# y_val = one_hot(y_val, classes=n_classes)
27+
# else:
28+
# activation_functions.append("linear") # TODO: Typisk linear, kan være relu og.
29+
# layers.append(1)
30+
# network = Network(X_train, y_train, layers, loss_type, activation_functions,
31+
# lr, X_val=X_val, y_val=y_val, regularization_factor=l2_regularization_factor)
32+
33+
X_data = np.array([[1, 1],
34+
[1, 0],
35+
[0, 1],
36+
[0, 0]])
37+
y_data = np.array([0, 1, 1, 0])
38+
activation_functions = ["tanh", "tanh", "linear"]
39+
layers = [2, 5, 5, 1]
40+
network = Network(X_data, y_data, layers, "L2", activation_functions, lr=0.1)
4141

4242
# X_data = np.array([[0.8, 0.7, 0.2, 1],
4343
# [0, 1, 1, 0],

assignment1/network.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@ def train(self):
151151
if y.shape: # if the y is an array and not just a single number.
152152
y = y.reshape(y.shape[0], 1)
153153
activations, zs = self.feed_forward(x)
154-
loss = self.back_propagation(activations, y, zs, learning_rate=self.lr)
154+
loss = self.back_propagation(activations, y, zs, learning_rate=self.lr).squeeze()
155155
total_loss += loss
156156

157157
training_loss = total_loss / len(self.X_train)
@@ -165,7 +165,7 @@ def train(self):
165165
if y.shape: # if the y is an array and not just a single number.
166166
y = y.reshape(y.shape[0], 1)
167167
activations, zs = self.feed_forward(x)
168-
loss = self.get_loss(self.layers[-1], y, activations[-1])
168+
loss = self.get_loss(self.layers[-1], y, activations[-1]).squeeze()
169169
total_val_loss += loss
170170
val_loss = total_val_loss / len(self.X_val)
171171
val_losses.append(val_loss)

0 commit comments

Comments
 (0)