-
Notifications
You must be signed in to change notification settings - Fork 8
/
multi-layer.py
91 lines (60 loc) · 2.64 KB
/
multi-layer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
from hist_feature_test import *
import numpy as np
import tensorflow as tf
# help function to sampling data
def get_sample(num_samples, X_data, y_data):
positions = np.arange(len(y_data))
np.random.shuffle(positions)
X_sample = []
y_sample = []
for posi in positions[:num_samples]:
X_sample.append(X_data[posi])
y_sample.append(y_data[posi])
return X_sample, y_sample
######################## creating the model architecture #######################################
# input placeholder
x = tf.placeholder(tf.float32, [None, 324])
# output placeholder
y_ = tf.placeholder(tf.float32, [None, 10])
# weights of the neurons in first layer
W1 = tf.Variable(tf.random_normal([324, 200], stddev=0.35))
b1 = tf.Variable(tf.random_normal([200], stddev=0.35))
# weights of the neurons in first layer
W2 = tf.Variable(tf.random_normal([200,100], stddev=0.35))
b2 = tf.Variable(tf.random_normal([100], stddev=0.35))
# weights of the neurons in second layer
W3 = tf.Variable(tf.random_normal([100,10], stddev=0.35))
b3 = tf.Variable(tf.random_normal([10], stddev=0.35))
# hidden_layer value
#hidden_layer = tf.nn.softmax(tf.matmul(x, W1) + b1)
hidden_layer = tf.nn.sigmoid(tf.matmul(x, W1) + b1)
# hidden_layer value
#hidden_layer2 = tf.nn.softmax(tf.matmul(hidden_layer2, W2) + b2)
hidden_layer2 = tf.nn.sigmoid(tf.matmul(hidden_layer, W2) + b2)
# output of the network
y_estimated = tf.nn.softmax(tf.matmul(hidden_layer2, W3) + b3)
# function to measure the error
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_estimated), reduction_indices=[1]))
# how to train the model
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
#train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# how to evaluate the model
correct_prediction = tf.equal(tf.argmax(y_estimated,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
######################## training the model #######################################
# applying a value for each variable (in this case W and b)
init = tf.initialize_all_variables()
# a session is dependent of the enviroment where tensorflow is running
sess = tf.Session()
sess.run(init)
num_batch_trainning = 1000
for i in range(10000): # trainning 1000 times
# randomizing positions
X_sample, y_sample = get_sample(num_batch_trainning, X_train, y_train)
# where the magic happens
sess.run(train_step, feed_dict={x: X_sample, y_: y_sample})
# print the accuracy result
if i % 100 == 0:
print i, ": ", (sess.run(accuracy, feed_dict={x: X_validation, y_: y_validation}))
print "\n\n\n"
print "TEST RESULT: ", (sess.run(accuracy, feed_dict={x: X_test, y_: y_test}))