Skip to content

Commit

Permalink
add ultimate models
Browse files Browse the repository at this point in the history
  • Loading branch information
ale3otik committed Dec 3, 2017
1 parent 4f0eea3 commit 5d1ccda
Show file tree
Hide file tree
Showing 3 changed files with 461 additions and 0 deletions.
122 changes: 122 additions & 0 deletions keras_lstm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,122 @@
# import keras.backend as K
# from keras.layers import Dense, Activation, Input, LSTM,LSTMCell, Dropout, multiply, Lambda
# from keras.models import Model, Sequential
# from keras import optimizers
# from keras import regularizers
# import tensorflow as tf
# from keras.activations import sigmoid, linear, tanh
# from keras import backend as K
# from keras.engine.topology import Layer
# import numpy as np
# from keras.losses import mean_squared_error
# from keras import optimizers

# class PhasedLSTMCell(keras.layers.Layer):

# def __init__(self, units, cell, **kwargs):
# self.units = units
# self.state_size = units
# self.cell = cell
# super(PhasedLSTMCell, self).__init__(**kwargs)

# def build(self, input_shape):
# self.built = True

# def call(self, inputs, states):
# tuple_inputs = tf.split(inputs, (inputs.shape[-1]-1,1), axis=-1)
# return self.cell(tuple_inputs, states)

# class MyLayer(Layer):
# def __init__(self, lstm_units=50,max_sequence_length=100):
# self.lstm_units = lstm_units
# self.max_sequence_length=max_sequence_length
# self.cell = tf.contrib.rnn.PhasedLSTMCell(num_units=self.lstm_units)
# super(MyLayer, self).__init__()

# def build(self, input_shape):
# self.shape = input_shape
# super(MyLayer, self).build(input_shape)

# def call(self, inputs, **kwargs):
# features, times = tf.split(inputs,[inputs.get_shape().as_list()[-1]-1, 1] ,axis=-1)
# print('feature', features.shape)
# print('times' , times.shape)
# lstm_layer, state = tf.nn.dynamic_rnn(self.cell, inputs=(features, times), dtype=tf.float32)
# print('lstm', lstm_layer.shape)
# return lstm_layer

# def compute_output_shape(self, input_shape):
# return (None, self.max_sequence_length, self.cell.output_size)

# def keras_phased_lstm_model(max_sequence_length=None, input_shape=None, lstm_units=50):
# input = Input(shape=(max_sequence_length, input_shape))
# timeseries = Input(shape=(max_sequence_length, 1))
# # timestamps = Input(shape=(max_sequence_length, 1))
# # cell = tf.contrib.rnn.LSTMCell(num_units=lstm_units)
# print(input)
# # cell = tf.contrib.rnn.BasicLSTMCell(num_units=lstm_units, activation=tf.tanh)
# # lstm_layer, state = tf.nn.dynamic_rnn(cell, inputs=input,dtype=tf.float32)
# lstm_layer = MyLayer(50,max_sequence_length)(tf.concat([input, timeseries], axis=-1))
# dense = Dense(1, input_shape=(max_sequence_length, lstm_units))(lstm_layer)
# print(dense.shape)
# model = Model([input,timeseries], dense)
# optimizer = optimizers.Adam()
# model.compile(loss='mean_squared_error', optimizer=optimizer)
# return model

# def keras_lstm_model_1(max_sequence_length=None, input_shape=None, lstm_units=100, eps_reg=1e-2):
# input = Input(shape=(max_sequence_length, input_shape))
# state_input = Input(shape=(lstm_units,max_sequence_length))

# print(state_input.shape)

# cell = LSTM(units=lstm_units,
# return_sequences=True,
# return_state=True)
# print(cell.cell.state_size)

# lstm_layer = cell(input, initial_state = [state_input])
# dense = Dense(1, input_shape=(max_sequence_length, lstm_units),
# kernel_initializer='normal',
# kernel_regularizer=regularizers.l2(eps_reg),
# activation=linear)(lstm_layer)

# # print(lstm_layer.shape)
# # print(dense.shape)
# # print(input.shape)s
# model = Model([input, state_input], [dense, state])
# optimizer = optimizers.Adam(1e-3, clipvalue=10.0)
# model.compile(loss='mean_squared_error', optimizer=optimizer)
# return model

# def myloss(y_true, y_pred):
# print('true' ,y_true.shape)
# print('pr', y_pred.shape)
# return mean_squared_error(y_pred, y_true)
# def fake_loss(y_true,y_pred):
# return mean_squared_error(0 * y_true,0 * y_pred)

# def reccurent_model(input_shape, lstm_units=120, eps_reg=1e-2):
# input = Input((None, input_shape))
# input_state1 = Input((lstm_units,))
# input_state2 = Input((lstm_units,))

# cell = LSTMCell(units=lstm_units, activation=tanh)

# layer = RNN(cell, return_sequences=True, return_state=True, name='rnn')
# print(layer.cell.state_size)

# outputs, states1, states2 = layer(input, initial_state=(input_state1,input_state2))
# print(outputs.shape)
# print(states1.shape)
# print(states2.shape)
# densed = Dense(1,input_shape=(None, lstm_units),
# kernel_initializer='normal',
# kernel_regularizer=regularizers.l2(eps_reg),
# activation=linear)(outputs)

# model = Model(inputs = [input, input_state1, input_state2],
# outputs= densed)
# optimizer = optimizers.Adam(1e-3, clipvalue=10.0)
# model.compile(loss='mean_squared_error', optimizer=optimizer)
# return model
120 changes: 120 additions & 0 deletions predict_online.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
#!/usr/bin/python

from __future__ import print_function # for python 2 compatibility
import hackathon_protocol
import os
import pandas as pd
from feature_generator import count_all_features
import numpy as np
from keras.models import load_model
import serg_model

USERNAME = "Andrei_potishe_pliz"
PASSWORD = "antoha322"

CONNECT_IP = os.environ.get("HACKATHON_CONNECT_IP") or "127.0.0.1"
CONNECT_PORT = int(os.environ.get("HACKATHON_CONNECT_PORT") or 12345)


class MyClient(hackathon_protocol.Client):
def __init__(self, sock):
super(MyClient, self).__init__(sock)
# print('on_init')
self.counter = 0
self.target_instrument = 'TEA'
self.send_login(USERNAME, PASSWORD)
self.last_raw = None
self.last_tea = None
self.last_cofee = None
self.buffer_main_tea = pd.DataFrame()
self.buffer_with_features_tea = pd.DataFrame()
self.buffer_main_coffee = pd.DataFrame()
self.buffer_with_features_coffee = pd.DataFrame()

self.h = np.array([np.zeros(200,dtype=np.float32)])
self.c = np.array([np.zeros(200,dtype=np.float32)])

# Load pre-trained model previously created by create_model.ipynb
self.model = serg_model.reccurent_model(input_shape=80)
# self.model = keras_ls .reccurent_model(input_shape=262)
self.model.load_weights('weights.013-0.029.hdf5')

print(self.model.summary())

self.win_size = 50
self.counter = 0

def on_header(self, csv_header):
# print('onheader')
self.header = {column_name: n for n, column_name in enumerate(csv_header)}
self.columns = csv_header[2:]
# print(self.columns)
# print(len(self.columns))
# print(csv_header)
# print("Header:", self.header)

def on_orderbook(self, cvs_line_values):
if cvs_line_values[0] == 'COFFEE':
self.last_cofee = np.array(cvs_line_values[2:],dtype=np.float32)
# cvs_line_values = cvs_line_values[2:]
# self.buffer_main_coffee = self.buffer_main_coffee.append(
# pd.DataFrame(np.array([np.array(cvs_line_values)]), columns=self.columns), ignore_index=True)

# if self.buffer_main_coffee.shape[0] >= self.win_size:
# # features = count_all_features(self.buffer_main_coffee.iloc[-self.win_size:])
# to_append = pd.DataFrame(np.concatenate([np.array(cvs_line_values), np.array([])])).T

# self.buffer_with_features_coffee = self.buffer_with_features_coffee.append(to_append, ignore_index=True)
# if self.buffer_with_features_coffee.shape[0] > self.win_size:
# self.buffer_with_features_coffee = self.buffer_with_features_coffee.iloc[-self.win_size:]


if cvs_line_values[0] == 'TEA':
self.last_tea = np.array(cvs_line_values[2:],dtype=np.float32)
# cvs_line_values = cvs_line_values[2:]
# self.buffer_main_tea = self.buffer_main_tea.append(
# pd.DataFrame(np.array([np.array(cvs_line_values)]), columns=self.columns), ignore_index=True)

# # if self.buffer_main_tea.shape[0] >= self.win_size:
# # features = count_all_features(self.buffer_main_tea.iloc[-self.win_size:])
# # to_append = pd.DataFrame(np.concatenate([np.array(cvs_line_values), features])).T
# if self.buffer_main_tea.shape[0] >= self.win_size:
# # features = count_all_features(self.buffer_main_tea.iloc[-self.win_size:])
# to_append = pd.DataFrame(np.concatenate([np.array(cvs_line_values), np.array([])])).T

# self.buffer_with_features_tea = self.buffer_with_features_tea.append(to_append, ignore_index=True)
# if self.buffer_with_features_tea.shape[0] > self.win_size:
# self.buffer_with_features_tea = self.buffer_with_features_tea.iloc[-self.win_size:]

def make_prediction(self):
# print('make_prediction')
# print('buffer shape', self.buffer_with_features_tea.shape)
# input = np.concatenate([self.buffer_with_features_tea, self.buffer_with_features_coffee], axis=-1)
input = np.concatenate([self.last_tea,self.last_cofee])
input = input.reshape((1,1,input.shape[0]))
# print(input.shape)
# print(self.state1.shape)
# print(input.shape)
prediction, self.h, self.c = self.model.predict([input,self.h, self.c])

# print('pred shape', prediction.shape)
answer = prediction[0,:,0][-1]
self.send_volatility(float(answer))

def on_score(self, items_processed, time_elapsed, score_value):
print('on_score')
print("Completed! items processed: %d, time elapsed: %.3f sec, score: %.6f" % (
items_processed, time_elapsed, score_value))
self.stop()

def on_connected(sock):
client = MyClient(sock)
client.run()


def main():
hackathon_protocol.tcp_connect(CONNECT_IP, CONNECT_PORT, on_connected)


if __name__ == '__main__':
main()
Loading

0 comments on commit 5d1ccda

Please sign in to comment.