-
Notifications
You must be signed in to change notification settings - Fork 41
/
Copy pathMain.py
107 lines (89 loc) · 3.61 KB
/
Main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import time
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
import GOA
import PSO
import sys
import settings
def inbuilt_algo(x_train, x_test, y_train, y_test):
clf = MLPClassifier(max_iter=10000)
clf.fit(x_train, y_train)
print("Inbuilt", accuracy_score(y_test, clf.predict(x_test)))
def get_dataset_ready(filename):
data = pd.read_csv(filename, sep=",", header=None)
data = data.values
X = data[:, 0:len(data[0]) - 1]
Y = data[:, len(data[0]) - 1]
Y = np.reshape(Y, newshape=(len(Y), 1))
# one hot labeling classes needs to be stated as 0 1 2 . . .
unique_classes = np.unique(Y)
settings.no_of_classes = len(unique_classes)
print(settings.no_of_classes, "Classes.")
one_hot_labels = np.zeros((Y.shape[0], len(unique_classes)))
for i in range(one_hot_labels.shape[0]):
one_hot_labels[i, int(Y[i, 0])] = 1
Y = one_hot_labels
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2)
return x_train, x_test, y_train, y_test
def scale(x_train, x_test):
sc = StandardScaler()
x_train = sc.fit_transform(x_train)
x_test = sc.transform(x_test)
return x_train, x_test
def verify(x_test, y_test, optimal_solution):
# print("\n\nOPTIMAL SOLUTION:\n", optimal_solution)
print("\nOPTIMAL ERROR: \n", optimal_solution[0])
print("\nOPTIMAL ARCHITECTURE:\n", optimal_solution[1])
print("\nOPTIMAL WEIGHT MATRIX:\n", optimal_solution[2])
feature_set = optimal_solution[1][0]
updated_x_test = GOA.updated_X(x_test, feature_set)
output, error = PSO.generate_output_and_error(updated_x_test, y_test, optimal_solution[2], optimal_solution[1][3],
optimal_solution[1][4])
print("\n*** RESULTS ON TESTING DATA ***")
print("\nError:", error)
print("Prediction:", output.argmax(axis=1))
print("Accuracy:", accuracy_score(y_test.argmax(axis=1), output.argmax(axis=1)))
def save_log():
te = open('log.txt', 'w') # File where you need to keep the logs
class Unbuffered:
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
te.write(data) # Write the data of stdout here to a text file as well
sys.stdout = Unbuffered(sys.stdout)
if __name__ == '__main__':
save_log()
# old_dim = (4, 10)
# new_dim = (5, 2)
# old_matrix = np.random.randn(old_dim[0], old_dim[1])
# print("OLD :\n", old_matrix)
# previous_gh = [[1, 1, 1, 1, 0]]
# gh = [[1, 1, 1, 1, 1]]
# GOA.make_similar_matrix(old_dim, new_dim, old_matrix, gh, previous_gh)
# exit()
start_time = time.time()
# x_train = [[0, 0], [0, 1], [1, 0], [1, 1]]
# y_train = [[1, 0], [0, 1], [0, 1], [0, 1]]
# settings.no_of_classes = 2.0
# x_test = x_train
# y_test = y_train
dataset_path = "datasets/iris.csv"
print(dataset_path.split('/')[1])
if settings.arch_penalty_weight is 0:
print("Running W/O PENALTY.")
else:
print("Running With PENALTY.")
x_train, x_test, y_train, y_test = get_dataset_ready(dataset_path)
print(len(x_test), "instances for testing.")
x_train, x_test = scale(x_train, x_test)
# GOA
optimal_solution = GOA.algorithm(x_train, y_train) # accuracy, grasshopper, corresponding_weights
verify(x_test, y_test, optimal_solution)
print("\nExecution Time:", time.time() - start_time)
exit(0)