-
Notifications
You must be signed in to change notification settings - Fork 0
/
dataprocessing.py
135 lines (107 loc) · 4.5 KB
/
dataprocessing.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
from matplotlib import pyplot as plt
from sklearn.neighbors import KNeighborsClassifier
import pandas as pd
from sklearn.model_selection import train_test_split
import seaborn as sns
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score, confusion_matrix, ConfusionMatrixDisplay
from joblib import dump
pd.options.mode.chained_assignment = None
df = pd.read_csv("landmarks.csv")
labels = pd.read_csv("labels.csv")
# this line added after running normalize_dataframe function
normalized_landmarks = pd.read_csv("normalized_landmarks.csv")
def normalize_dataset(dataSubSet: pd.DataFrame) -> pd.DataFrame:
for index, row in dataSubSet.iterrows():
toProces = row[:-1]
minValue = toProces.min()
# Make all the values in the row positive and set minimum value as zero
toProces -= minValue
maxValue = toProces.max()
normalized_row = toProces/maxValue
# print(normalized_row)
dataSubSet.loc[index, dataSubSet.columns[:-1]] = normalized_row
return dataSubSet
# normalization of the points should be seperated for x, y and, z values
# Normalize points here
def normalize_dataframe(dataAsDF:pd.DataFrame, extract = False) -> pd.DataFrame:
xValues = dataAsDF[dataAsDF.columns[dataAsDF.columns.str.startswith("x")].append(
(dataAsDF.columns[dataAsDF.columns.str.startswith("pose_id")])
)]
xNormalized = normalize_dataset(xValues)
yValues = dataAsDF[dataAsDF.columns[dataAsDF.columns.str.startswith("y")].append(
(dataAsDF.columns[dataAsDF.columns.str.startswith("pose_id")])
)]
yNormalized = normalize_dataset(yValues)
zValues = dataAsDF[dataAsDF.columns[dataAsDF.columns.str.startswith("z")].append(
(dataAsDF.columns[dataAsDF.columns.str.startswith("pose_id")])
)]
zNormalised = normalize_dataset(zValues)
normalised_landmarks = pd.merge(pd.merge(xNormalized, yNormalized, on='pose_id'), zNormalised, on='pose_id')
if extract == True:
normalised_landmarks.to_csv("normalized_landmarks.csv")
return normalised_landmarks
def train_modal(processed_df:pd.DataFrame) -> MLPClassifier:
X_train, X_test, y_train, y_test = train_test_split(processed_df, labels["pose"], test_size=0.07)
modal = MLPClassifier()
# For solver lbfgs and adam results are similar close to each other
# For solver sgd results accuracy is really low
modal = MLPClassifier(max_iter=500, solver="lbfgs", hidden_layer_sizes=(80))
modal.fit(X_train, y_train)
predictions = modal.predict(X_test)
print(
accuracy_score(y_test, predictions),
"\n",
confusion_matrix(y_test, predictions)
)
cm = confusion_matrix(y_test, predictions, labels=modal.classes_)
disp = ConfusionMatrixDisplay(confusion_matrix=cm,
display_labels=modal.classes_)
disp.plot()
plt.savefig('confusion_matrix.png')
plt.show()
return modal
def train_knn_modal(processed_df:pd.DataFrame) -> KNeighborsClassifier:
X_train, X_test, y_train, y_test = train_test_split(processed_df, labels["pose"], test_size=0.1)
neigh = KNeighborsClassifier(n_neighbors=3)
neigh.fit(X_train, y_train)
predictions = neigh.predict(X_test)
# I did not believe that it performed near perfect so that I checked the results manually
merged_knn_data = pd.DataFrame({
'y_test': y_test,
'predictions': predictions
}).to_csv("knn_accuracy.csv")
print(merged_knn_data)
print(
accuracy_score(y_test, predictions)
)
return neigh
def pickle_modal(modal_to_pickle):
pickled_modal = dump(modal_to_pickle, "model.joblib")
pass
# data was normalised and extracted to the file
# normalized_landmarks.csv
### CALL NORMALIZE_DATAFRAME HERE ###
# normalize_dataframe(df)
# Out of curiosity I trained a knn model too
# train_knn_modal(normalized_landmarks)
### Train modal and pickle it for later use ###
normalized_landmarks = normalized_landmarks.drop(columns=["pose_id", "Unnamed: 0"])
modal_trained = train_modal(normalized_landmarks)
pickle_modal(modal_trained)
"""
Readings of the pickled modal
Accuracy:
0.8762886597938144
Confusion Matrix:
[[16 0 0 0 1 0 0 0 1 0]
[ 0 7 1 0 0 0 0 0 0 0]
[ 0 0 16 0 0 0 0 1 0 0]
[ 0 0 0 5 0 0 0 1 0 2]
[ 0 0 0 0 5 1 0 0 0 0]
[ 0 0 0 0 1 10 1 0 0 0]
[ 0 0 0 0 0 0 8 0 0 0]
[ 0 0 0 0 0 0 0 6 0 0]
[ 0 0 0 0 0 0 0 1 7 0]
[ 1 0 0 0 0 0 0 0 0 5]]
"""