Skip to content

Commit e528d33

Browse files
committed
Up code RNN,LSTM
1 parent 2213c30 commit e528d33

File tree

5 files changed

+204
-0
lines changed

5 files changed

+204
-0
lines changed

lstm1hl_giang1_script.py

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
from sklearn.model_selection import ParameterGrid
2+
from model.main.traditional_rnn import Lstm1HL
3+
from utils.IOUtil import read_dataset_file
4+
from utils.SettingPaper import lstm1hl_giang1_paras as param_grid
5+
from utils.SettingPaper import giang1
6+
7+
rv_data = [giang1]
8+
data_file = ["final"]
9+
test_type = "normal" ### normal: for normal test, stability: for n_times test
10+
run_times = None
11+
12+
if test_type == "normal": ### For normal test
13+
run_times = 1
14+
pathsave = "paper/results/test/"
15+
all_model_file_name = "log_models"
16+
elif test_type == "stability": ### For stability test (n times run with the same parameters)
17+
run_times = 15
18+
pathsave = "paper/results/stability/"
19+
all_model_file_name = "stability_lstm1hl"
20+
else:
21+
pass
22+
23+
def train_model(item):
24+
root_base_paras = {
25+
"dataset": dataset,
26+
"data_idx": (0.33, 0.33, 0.33),
27+
"sliding": item["sliding_window"],
28+
"expand_function": item["expand_function"],
29+
"multi_output": requirement_variables[2],
30+
"output_idx": requirement_variables[3],
31+
"method_statistic": 0, # 0: sliding window, 1: mean, 2: min-mean-max, 3: min-median-max
32+
"log_filename": all_model_file_name,
33+
"path_save_result": pathsave + requirement_variables[4],
34+
"test_type": test_type,
35+
"draw": True,
36+
"print_train": 0 # 0: nothing, else : full detail
37+
}
38+
root_rnn_paras = {
39+
"hidden_sizes": item["hidden_sizes"], "epoch": item["epoch"], "batch_size": item["batch_size"],
40+
"learning_rate": item["learning_rate"], "activations": item["activations"],
41+
"optimizer": item["optimizer"], "loss": item["loss"], "dropouts": item["dropouts"]
42+
}
43+
md = Lstm1HL(root_base_paras=root_base_paras, root_rnn_paras=root_rnn_paras)
44+
md._running__()
45+
46+
47+
for _ in range(run_times):
48+
for loop in range(len(rv_data)):
49+
requirement_variables = rv_data[loop]
50+
filename = requirement_variables[0] + data_file[loop] + ".csv"
51+
dataset = read_dataset_file(filename, requirement_variables[1])
52+
# Create combination of params.
53+
for item in list(ParameterGrid(param_grid)):
54+
train_model(item)
55+
56+

model/main/traditional_rnn.py

Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
from keras import backend
2+
from keras.models import Sequential
3+
from keras.layers import Dense, LSTM, Dropout
4+
from model.root.traditional.root_rnn import RootRnn
5+
6+
class Rnn1HL(RootRnn):
7+
"""
8+
Recurrent Neural Network (1 Hidden Layer)
9+
"""
10+
def __init__(self, root_base_paras=None, root_rnn_paras=None):
11+
RootRnn.__init__(self, root_base_paras, root_rnn_paras)
12+
self.filename = "RNN-1HL-sliding_{}-net_para_{}".format(root_base_paras["sliding"], [self.hidden_sizes, self.epoch,
13+
self.batch_size, self.learning_rate, self.activations, self.optimizer, self.loss, self.dropouts])
14+
15+
def _training__(self):
16+
# The RNN architecture
17+
self.model = Sequential()
18+
self.model.add(LSTM(units=self.hidden_sizes[0], activation=self.activations[0], input_shape=(self.X_train.shape[1], 1)))
19+
self.model.add(Dropout(self.dropouts[0]))
20+
self.model.add(Dense(units=1, activation=self.activations[1]))
21+
self.model.compile(loss=self.loss, optimizer=self.optimizer)
22+
backend.set_session(backend.tf.Session(config=backend.tf.ConfigProto(intra_op_parallelism_threads=2, inter_op_parallelism_threads=2)))
23+
ml = self.model.fit(self.X_train, self.y_train, epochs=self.epoch, batch_size=self.batch_size, verbose=self.print_train)
24+
self.loss_train = ml.history["loss"]
25+
26+
27+
class Rnn2HL(RootRnn):
28+
"""
29+
Recurrent Neural Network (2 Hidden Layer)
30+
"""
31+
def __init__(self, root_base_paras=None, root_rnn_paras=None):
32+
RootRnn.__init__(self, root_base_paras, root_rnn_paras)
33+
self.filename = "RNN-2HL-sliding_{}-net_para_{}".format(root_base_paras["sliding"], [self.hidden_sizes,
34+
self.epoch, self.batch_size, self.learning_rate, self.activations, self.optimizer, self.loss])
35+
36+
def _training__(self):
37+
# The RNN architecture
38+
self.model = Sequential()
39+
self.model.add(LSTM(units=self.hidden_sizes[0], return_sequences=True, input_shape=(self.X_train.shape[1], 1), activation=self.activations[0]))
40+
self.model.add(Dropout(self.dropouts[0]))
41+
self.model.add(LSTM(units=self.hidden_sizes[1], activation=self.activations[1]))
42+
self.model.add(Dropout(self.dropouts[1]))
43+
self.model.add(Dense(units=1, activation=self.activations[2]))
44+
self.model.compile(loss=self.loss, optimizer=self.optimizer)
45+
backend.set_session(backend.tf.Session(config=backend.tf.ConfigProto(intra_op_parallelism_threads=2, inter_op_parallelism_threads=2)))
46+
ml = self.model.fit(self.X_train, self.y_train, epochs=self.epoch, batch_size=self.batch_size, verbose=self.print_train)
47+
self.loss_train = ml.history["loss"]
48+
49+
50+
51+
class Lstm1HL(RootRnn):
52+
"""
53+
Long-short Term Memory Neural Network (1 Hidden Layer)
54+
"""
55+
def __init__(self, root_base_paras=None, root_rnn_paras=None):
56+
RootRnn.__init__(self, root_base_paras, root_rnn_paras)
57+
self.filename = "LSTM-1HL-sliding_{}-net_para_{}".format(root_base_paras["sliding"], [self.hidden_sizes,
58+
self.epoch, self.batch_size, self.learning_rate, self.activations, self.optimizer, self.loss])
59+
60+
def _training__(self):
61+
# The LSTM architecture
62+
self.model = Sequential()
63+
self.model.add(LSTM(units=self.hidden_sizes[0], input_shape=(None, 1), activation=self.activations[0]))
64+
self.model.add(Dense(units=1, activation=self.activations[1]))
65+
self.model.compile(loss=self.loss, optimizer=self.optimizer)
66+
backend.set_session(backend.tf.Session(config=backend.tf.ConfigProto(intra_op_parallelism_threads=2, inter_op_parallelism_threads=2)))
67+
ml = self.model.fit(self.X_train, self.y_train, epochs=self.epoch, batch_size=self.batch_size, verbose=self.print_train)
68+
self.loss_train = ml.history["loss"]
69+
70+
71+
class Lstm2HL(RootRnn):
72+
"""
73+
Long-short Term Memory Neural Network (2 Hidden Layer)
74+
"""
75+
def __init__(self, root_base_paras=None, root_rnn_paras=None):
76+
RootRnn.__init__(self, root_base_paras, root_rnn_paras)
77+
self.filename = "LSTM-2HL-sliding_{}-net_para_{}".format(root_base_paras["sliding"], [self.hidden_sizes,
78+
self.epoch, self.batch_size, self.learning_rate, self.activations, self.optimizer, self.loss])
79+
def _training__(self):
80+
# The LSTM architecture
81+
self.model = Sequential()
82+
self.model.add(LSTM(units=self.hidden_sizes[0], return_sequences=True, input_shape=(None, 1), activation=self.activations[0]))
83+
self.model.add(LSTM(units=self.hidden_sizes[1], activation=self.activations[1]))
84+
self.model.add(Dense(units=1, activation=self.activations[2]))
85+
self.model.compile(loss=self.loss, optimizer=self.optimizer)
86+
backend.set_session(backend.tf.Session(config=backend.tf.ConfigProto(intra_op_parallelism_threads=2, inter_op_parallelism_threads=2)))
87+
ml = self.model.fit(self.X_train, self.y_train, epochs=self.epoch, batch_size=self.batch_size, verbose=self.print_train)
88+
self.loss_train = ml.history["loss"]
89+

model/root/root_base.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,9 @@ def _preprocessing__(self):
3333
self.time_series = TimeSeries(self.dataset, self.data_idx, self.sliding, self.expand_function, self.output_idx, self.method_statistic)
3434
self.X_train, self.y_train, self.X_valid, self.y_valid, self.X_test, self.y_test = self.time_series._preprocessing_2d__()
3535

36+
def _preprocessing_3d__(self):
37+
self.time_series = TimeSeries(self.dataset, self.data_idx, self.sliding, self.expand_function, self.output_idx,self.method_statistic)
38+
self.X_train, self.y_train, self.X_valid, self.y_valid, self.X_test, self.y_test = self.time_series._preprocessing_3d__()
3639

3740
def _save_results__(self, y_actual=None, y_predict=None, y_actual_normalized=None, y_predict_normalized=None, loss_train=None):
3841
if self.multi_output:

model/root/traditional/root_rnn.py

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
from model.root.root_base import RootBase
2+
import time
3+
4+
class RootRnn(RootBase):
5+
def __init__(self, root_base_paras=None, root_rnn_paras=None):
6+
RootBase.__init__(self, root_base_paras)
7+
self.hidden_sizes = root_rnn_paras["hidden_sizes"]
8+
self.epoch = root_rnn_paras["epoch"]
9+
self.batch_size = root_rnn_paras["batch_size"]
10+
self.learning_rate = root_rnn_paras["learning_rate"]
11+
self.activations = root_rnn_paras["activations"]
12+
self.optimizer = root_rnn_paras["optimizer"]
13+
self.loss = root_rnn_paras["loss"]
14+
self.dropouts = root_rnn_paras["dropouts"]
15+
16+
def _forecasting__(self):
17+
# Evaluate models on the test set
18+
y_pred = self.model.predict(self.X_test)
19+
pred_inverse = self.time_series.minmax_scaler.inverse_transform(y_pred)
20+
real_inverse = self.time_series.minmax_scaler.inverse_transform(self.y_test)
21+
return real_inverse, pred_inverse, self.y_test, y_pred
22+
23+
def _running__(self):
24+
self.time_system = time.time()
25+
self._preprocessing_3d__()
26+
self.time_total_train = time.time()
27+
self._training__()
28+
self.time_total_train = round(time.time() - self.time_total_train, 4)
29+
self.time_epoch = round(self.time_total_train / self.epoch, 4)
30+
self.time_predict = time.time()
31+
y_actual, y_predict, y_actual_normalized, y_predict_normalized = self._forecasting__()
32+
self.time_predict = round(time.time() - self.time_predict, 8)
33+
self.time_system = round(time.time() - self.time_system, 4)
34+
if self.test_type == "normal":
35+
self._save_results__(y_actual, y_predict, y_actual_normalized, y_predict_normalized, self.loss_train)
36+
elif self.test_type == "stability":
37+
self._save_results_ntimes_run__(y_actual, y_predict, y_actual_normalized, y_predict_normalized)
38+
39+
40+

utils/SettingPaper.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,22 @@
102102
"domain_range": [(-1, 1)] # lower and upper bound
103103
}
104104

105+
####: LSTM-1HL
106+
lstm1hl_giang1_paras = {
107+
"sliding": [2, 3, 5, 10],
108+
"hidden_sizes" : [[5], [10] ],
109+
"activations": [("elu", "elu")], # 0: elu, 1:relu, 2:tanh, 3:sigmoid
110+
"learning_rate": [0.0001],
111+
"epoch": [1000],
112+
"batch_size": [128],
113+
"optimizer": ["adam"], # GradientDescentOptimizer, AdamOptimizer, AdagradOptimizer, AdadeltaOptimizer
114+
"loss": ["mse"],
115+
"dropouts": [[0.2]]
116+
}
117+
118+
119+
120+
105121

106122
#### : FL-GANN
107123
flgann_paras = {

0 commit comments

Comments
 (0)