|
9 | 9 | def main():
|
10 | 10 | config = Config("config.txt")
|
11 | 11 | lr = config.config["learning_rate"]
|
12 |
| - X_train, y_train = read_file(config.config["training"]) |
13 |
| - X_val, y_val = read_file(config.config["validation"]) |
14 |
| - activation_functions = config.config["activations"] |
15 |
| - loss_type = config.config["loss_type"] |
16 |
| - l2_regularization_factor = config.config["L2_regularization"] |
17 |
| - |
18 |
| - layers = config.config["layers"] |
19 |
| - layers.insert(0, X_train.shape[1]) |
20 |
| - |
21 |
| - if loss_type == "cross_entropy": |
22 |
| - n_classes = get_num_of_classes(y_train) |
23 |
| - activation_functions.append("softmax") |
24 |
| - layers.append(n_classes) |
25 |
| - y_train = one_hot(y_train, classes=n_classes) |
26 |
| - y_val = one_hot(y_val, classes=n_classes) |
27 |
| - else: |
28 |
| - activation_functions.append("linear") # TODO: Typisk linear, kan være relu og. |
29 |
| - layers.append(1) |
30 |
| - network = Network(X_train, y_train, layers, loss_type, activation_functions, |
31 |
| - lr, X_val=X_val, y_val=y_val, regularization_factor=l2_regularization_factor) |
32 |
| - |
33 |
| - # X_data = np.array([[1, 1], |
34 |
| - # [1, 0], |
35 |
| - # [0, 1], |
36 |
| - # [0, 0]]) |
37 |
| - # y_data = np.array([1, 1, 1, 0]) |
38 |
| - # activation_functions = ["relu"] |
39 |
| - # layers = [2, 1] |
40 |
| - # network = Network(X_data, y_data, layers, "L2", activation_functions, lr=0.001) |
| 12 | + # X_train, y_train = read_file(config.config["training"]) |
| 13 | + # X_val, y_val = read_file(config.config["validation"]) |
| 14 | + # activation_functions = config.config["activations"] |
| 15 | + # loss_type = config.config["loss_type"] |
| 16 | + # l2_regularization_factor = config.config["L2_regularization"] |
| 17 | + # |
| 18 | + # layers = config.config["layers"] |
| 19 | + # layers.insert(0, X_train.shape[1]) |
| 20 | + # |
| 21 | + # if loss_type == "cross_entropy": |
| 22 | + # n_classes = get_num_of_classes(y_train) |
| 23 | + # activation_functions.append("softmax") |
| 24 | + # layers.append(n_classes) |
| 25 | + # y_train = one_hot(y_train, classes=n_classes) |
| 26 | + # y_val = one_hot(y_val, classes=n_classes) |
| 27 | + # else: |
| 28 | + # activation_functions.append("linear") # TODO: Typisk linear, kan være relu og. |
| 29 | + # layers.append(1) |
| 30 | + # network = Network(X_train, y_train, layers, loss_type, activation_functions, |
| 31 | + # lr, X_val=X_val, y_val=y_val, regularization_factor=l2_regularization_factor) |
| 32 | + |
| 33 | + X_data = np.array([[1, 1], |
| 34 | + [1, 0], |
| 35 | + [0, 1], |
| 36 | + [0, 0]]) |
| 37 | + y_data = np.array([0, 1, 1, 0]) |
| 38 | + activation_functions = ["tanh", "tanh", "linear"] |
| 39 | + layers = [2, 5, 5, 1] |
| 40 | + network = Network(X_data, y_data, layers, "L2", activation_functions, lr=0.1) |
41 | 41 |
|
42 | 42 | # X_data = np.array([[0.8, 0.7, 0.2, 1],
|
43 | 43 | # [0, 1, 1, 0],
|
|
0 commit comments