Skip to content

Commit

Permalink
make the architecture more dynamic
Browse files Browse the repository at this point in the history
  • Loading branch information
SakhriHoussem authored May 26, 2018
1 parent ede58b0 commit a3f036c
Show file tree
Hide file tree
Showing 7 changed files with 100 additions and 117 deletions.
71 changes: 4 additions & 67 deletions confusion_matrix.py
Original file line number Diff line number Diff line change
@@ -1,68 +1,3 @@
# """
# Simple tester for the vgg19_trainable
# """
# import numpy as np
# import tensorflow as tf
#
# from dataSetGenerator import dataSetGenerator
# from dataSetGenerator import picShow
# from dataSetGenerator import confusion_matrix
# from dataSetGenerator import draw_confusion_matrix
# from dataSetGenerator import draw_table
# from vgg19 import vgg19_trainable as vgg19
#
# batch,labels,classes = dataSetGenerator("C:\\Users\\shous\Desktop\datasets\\UCMerced_LandUse\\Images")
#
# # with tf.device('/device:GPU:0'):
# # with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) as sess:
#
# with tf.device('/cpu:0'):
# with tf.Session() as sess:
#
# images = tf.placeholder(tf.float32, [None, 224, 224, 3])
# true_out = tf.placeholder(tf.float32, [None, len(classes)])
# train_mode = tf.placeholder(tf.bool)
#
# vgg = vgg19.Vgg19('Weights/VGG19_21C.npy',len(classes))
# vgg.build(images,train_mode)
#
# # print number of variables used: 139754605 variables, i.e. ideal size = 548MB
# # print('number of variables used:',vgg.get_var_count())
#
# sess.run(tf.global_variables_initializer())
#
# # test classification
# prob = sess.run(vgg.prob, feed_dict={images: batch[:20], train_mode: False})
# picShow(batch[:10],labels[:10], classes, None, prob)
#
# # simple 1-step training
# cost = tf.reduce_sum((vgg.prob - true_out) ** 2)
# train = tf.train.GradientDescentOptimizer(0.0001).minimize(cost)
#
# correct_prediction = tf.equal(tf.argmax(prob), tf.argmax(true_out))
# acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
#
# conf_mat = np.zeros((len(classes),len(classes)))
# batch_size = 20
# batche_num = len(batch)
# indice = np.random.permutation(batche_num)
# for i in range(int(batche_num/batch_size)):
# min_batch = indice[i*batch_size:(i+1)*batch_size]
# prob = sess.run(vgg.prob, feed_dict={images: batch[min_batch], true_out: labels[min_batch], train_mode: True})
# tru = labels[min_batch]
# conf_mat += confusion_matrix(prob,tru,classes)
# print("Iteration %d" % i)
# draw_confusion_matrix(conf_mat,classes)
# draw_table(conf_mat,classes)
# np.save('Data/confusion_matrix.npy',conf_mat)
#
# # test save
# vgg.save_npy(sess, 'Weights/VGG19_21C.npy')
#
# # test classification again, should have a higher probability about tiger
# # prob = sess.run(vgg.prob, feed_dict={images: batch[:10], train_mode: False})
# # picShow(batch[:10],labels[:10], classes, None, prob)

"""
Draw Confusion Matrix for the vgg19
"""
Expand All @@ -79,13 +14,15 @@
conf_mat = np.zeros((len(classes),len(classes)))
batch_size = 10
batche_num = len(batch)
classes_num = len(classes)
rib = batch.shape[1] # picture Rib
indice = np.random.permutation(batche_num)
with tf.device('/device:cpu:0'):
# with tf.device('/device:GPU:0'):
# with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=int(environ['NUMBER_OF_PROCESSORS']))) as sess:
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True))
images = tf.placeholder(tf.float32, [None, 224, 224, 3])
vgg = vgg19.Vgg19("Weights/VGG19_21C.npy") #set the path
images = tf.placeholder(tf.float32, [None, rib, rib, 3])
vgg = vgg19.Vgg19("Weights/VGG19_"+str(classes_num)+"C.npy") #set the path
with tf.name_scope("content_vgg"):
vgg.build(images)
for i in range(int(batche_num/batch_size)):
Expand Down
2 changes: 1 addition & 1 deletion dataSetGenerator.py

Large diffs are not rendered by default.

41 changes: 27 additions & 14 deletions train_vgg16.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,30 +2,33 @@
Simple tester for the vgg16_trainable
"""
from os import environ

import numpy as np
import tensorflow as tf

from dataSetGenerator import loadClasses
from dataSetGenerator import picShow
from dataSetGenerator import loadClasses,picShow,append
from vgg16 import vgg16_trainable as vgg16

#path= "C:/Users/{}/Desktop/UCMerced_LandUse/Images/".format(getlogin())
#batch, labels, classes = dataSetGenerator(path,True,224,80)
batch = np.load("DataSets/UCMerced_LandUse_dataTrain.npy")
labels = np.load("DataSets/UCMerced_LandUse_labelsTrain.npy")
classes = loadClasses("DataSets/UCMerced_LandUse.txt")
classes_num = len(classes)
rib = batch.shape[1] # picture Rib
# with tf.device('/device:GPU:0'):
# with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) as sess:
with tf.device('/cpu:0'):
with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=int(environ['NUMBER_OF_PROCESSORS']))) as sess:


images = tf.placeholder(tf.float32, [None, 224, 224, 3])
true_out = tf.placeholder(tf.float32, [None, len(classes)])
images = tf.placeholder(tf.float32, [None, rib, rib, 3])
true_out = tf.placeholder(tf.float32, [None, classes_num])
train_mode = tf.placeholder(tf.bool)

vgg = vgg16.Vgg16('Weights/VGG16_21C.npy',21)
try:
vgg = vgg16.Vgg16('Weights/VGG16_'+str(classes_num)+'C.npy',21)
except:
print('Weights/VGG16_'+str(classes_num)+'C.npy Not Exist')
vgg = vgg16.Vgg16(None,21)
vgg.build(images,train_mode)

# print number of variables used: 143667240 variables, i.e. ideal size = 548MB
Expand All @@ -46,21 +49,31 @@
batch_size = 10
epochs = 30
batche_num = batch.shape[0]
costs = []
accs = []
for _ in range(epochs):
print("******************* ", _, " *******************")
indice = np.random.permutation(batche_num)
counter = 0
for i in range(int(batche_num/batch_size)):
min_batch = indice[i*batch_size:(i+1)*batch_size]
cur_cost, cur_train,cur_acc= sess.run([cost, train,acc], feed_dict={images: batch[min_batch], true_out: labels[min_batch], train_mode: True})
print("Iteration %d loss:\n%s" % (i, cur_cost))
with open('Data/cost16_21C.txt', 'a') as f:
f.write(str(cur_cost)+'\n')
with open('Data/acc16_21C.txt', 'a') as f:
f.write(str(cur_acc)+'\n')

# test save
vgg.save_npy(sess, 'Weights/VGG16_21C.npy')
accs.append(cur_acc)
costs.append(cur_cost)
counter += 1
if counter % 100 == 0:
# save graph data
append(costs,'Data/cost16_'+str(classes_num)+'C.txt')
append(accs,'Data/acc16_'+str(classes_num)+'C.txt')
# save Weights
vgg.save_npy(sess, 'Weights/VGG16_'+str(classes_num)+'C.npy')

# save graph data
append(costs,'Data/cost16_'+str(classes_num)+'C.txt')
append(accs,'Data/acc16_'+str(classes_num)+'C.txt')
# save Weights
vgg.save_npy(sess, 'Weights/VGG16_'+str(classes_num)+'C.npy')
# test classification again, should have a higher probability about tiger
prob = sess.run(vgg.prob, feed_dict={images: batch[:10], train_mode: False})
picShow(batch[:10],labels[:10], classes,None,prob)
Expand Down
45 changes: 30 additions & 15 deletions train_vgg19.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,26 +4,34 @@
import numpy as np
import tensorflow as tf

from dataSetGenerator import loadClasses
from dataSetGenerator import append
from dataSetGenerator import picShow
from vgg19 import vgg19_trainable as vgg19

#path= "C:/Users/{}/Desktop/UCMerced_LandUse/Images/".format(getlogin())
#batch, labels, classes = dataSetGenerator(path,True,224,80)
# batch = np.load("DataSets/RSSCN7_dataTrain.npy")
# labels = np.load("DataSets/RSSCN7_labelsTrain.npy")
classes = np.load("DataSets/UCMerced_LandUse_DU_classes.npy")
batch = np.load("DataSets/UCMerced_LandUse_DU_dataTrain.npy")
labels = np.load("DataSets/UCMerced_LandUse_DU_labelsTrain.npy")
classes = loadClasses("DataSets/UCMerced_LandUse.txt")
# classes = np.load("DataSets/RSSCN7_classes.npy")
classes_num = len(classes)
rib = batch.shape[1]
with tf.device('/device:GPU:0'):
# with tf.device('/cpu:0'):
# with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=int(environ['NUMBER_OF_PROCESSORS']))) as sess:

with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) as sess:

images = tf.placeholder(tf.float32, [None, 224, 224, 3])
true_out = tf.placeholder(tf.float32, [None, len(classes)])
images = tf.placeholder(tf.float32, [None, rib, rib, 3])
true_out = tf.placeholder(tf.float32, [None, classes_num])
train_mode = tf.placeholder(tf.bool)

vgg = vgg19.Vgg19('Weights/VGG19_21C.npy',len(classes))
try:
vgg = vgg19.Vgg19('Weights/VGG19_'+str(classes_num)+'C.npy',classes_num)
except:
print('Weights/VGG19_'+str(classes_num)+'C.npy Not Exist')
vgg = vgg19.Vgg19(None,classes_num)
vgg.build(images,train_mode)

# print number of variables used: 143667240 variables, i.e. ideal size = 548MB
Expand All @@ -46,24 +54,31 @@
batch_size = 10
epochs = 30
batche_num = len(batch)
accs = []
costs = []
for _ in range(epochs):
print("******************* ", _, " *******************")
indice = np.random.permutation(batche_num)
s = 0
counter = 0
for i in range(int(batche_num/batch_size)):
min_batch = indice[i*batch_size:(i+1)*batch_size]
cur_cost, cur_train,cur_acc= sess.run([cost, train,acc], feed_dict={images: batch[min_batch], true_out: labels[min_batch], train_mode: True})
print("Iteration %d loss:\n%s" % (i, cur_cost))
with open('Data/cost19_21C.txt', 'a') as f:
f.write(str(cur_cost)+'\n')
with open('Data/acc19_21C.txt', 'a') as f:
f.write(str(cur_acc)+'\n')
s += 1
if s % 100 == 0:
accs.append(cur_acc)
costs.append(cur_cost)
counter += 1
if counter % 100 == 0:
# save graph data
append(costs,'Data/cost19_'+str(classes_num)+'C.txt')
append(accs,'Data/acc19_'+str(classes_num)+'C.txt')
# save Weights
vgg.save_npy(sess, 'Weights/VGG19_21C.npy')
vgg.save_npy(sess, 'Weights/VGG19_'+str(classes_num)+'C.npy')

# save graph data
append(costs,'Data/cost19_'+str(classes_num)+'C.txt')
append(accs,'Data/acc19_'+str(classes_num)+'C.txt')
# save Weights
vgg.save_npy(sess, 'Weights/VGG19_21C.npy')
vgg.save_npy(sess, 'Weights/VGG19_'+str(classes_num)+'C.npy')

# test classification again, should have a higher probability about tiger
prob = sess.run(vgg.prob, feed_dict={images: batch[:10], train_mode: False})
Expand Down
16 changes: 11 additions & 5 deletions train_vgg19_distibuted.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@
batch = np.load("DataSets/UCMerced_LandUse_dataTrain.npy")
labels = np.load("DataSets/UCMerced_LandUse_labelsTrain.npy")
classes = loadClasses("DataSets/UCMerced_LandUse.txt")
classes_num = len(classes)
rib = batch.shape[1] # picture Rib

workers = ['DESKTOP-07HFBQN','FOUZI-PC']
pss = ['DELL-MINI']
Expand All @@ -28,10 +30,14 @@
# cluster=cluster)):
with tf.device("/job:ps/task:"+str(index)):

images = tf.placeholder(tf.float32, [None, 224, 224, 3])
images = tf.placeholder(tf.float32, [None, rib, rib, 3])
true_out = tf.placeholder(tf.float32, [None, len(classes)])
train_mode = tf.placeholder(tf.bool)
vgg = vgg19.Vgg19('Weights/VGG19_21C.npy')
try:
vgg = vgg19.Vgg19('Weights/VGG19_'+str(classes_num)+'C.npy',len(classes))
except:
vgg = vgg19.Vgg19(None,len(classes))

vgg.build(images,train_mode)

global_step = tf.train.get_or_create_global_step()
Expand Down Expand Up @@ -70,9 +76,9 @@
# with tf.device(tf.train.replica_device_setter(
# worker_device="/job:ps/task:"+str(index),
# cluster=cluster)):
append(costs,'Data/cost19_21C_D')
append(accs,'Data/acc19_21C_D')
vgg.save_npy(sess, 'Weights/VGG19_21C_D.npy')
append(costs,'Data/cost19_'+str(classes_num)+'C_D')
append(accs,'Data/acc19_'+str(classes_num)+'C_D')
vgg.save_npy(sess, 'Weights/VGG19_'+str(classes_num)+'C_D.npy')
# test classification
prob = sess.run(vgg.prob, feed_dict={images: batch[:10], train_mode: False})
picShow(batch[:10],labels[:10], classes, None, prob)
Expand Down
21 changes: 13 additions & 8 deletions vgg16/vgg16_trainable.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,20 +27,21 @@ def build(self, rgb, train_mode=None):
:param rgb: rgb image [batch, height, width, 3] values scaled [0, 1]
:param train_mode: a bool tensor, usually a placeholder: if True, dropout will be turned on
"""
self.rib = rgb.get_shape().as_list()[1]

rgb_scaled = rgb * 255.0

# Convert RGB to BGR
red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb_scaled)
assert red.get_shape().as_list()[1:] == [224, 224, 1]
assert green.get_shape().as_list()[1:] == [224, 224, 1]
assert blue.get_shape().as_list()[1:] == [224, 224, 1]
assert red.get_shape().as_list()[1:] == [self.rib, self.rib, 1]
assert green.get_shape().as_list()[1:] == [self.rib, self.rib, 1]
assert blue.get_shape().as_list()[1:] == [self.rib, self.rib, 1]
bgr = tf.concat(axis=3, values=[
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2],
])
assert bgr.get_shape().as_list()[1:] == [224, 224, 3]
assert bgr.get_shape().as_list()[1:] == [self.rib, self.rib, 3]

self.conv1_1 = self.conv_layer(bgr, 3, 64, "conv1_1")
self.conv1_2 = self.conv_layer(self.conv1_1, 64, 64, "conv1_2")
Expand All @@ -64,8 +65,10 @@ def build(self, rgb, train_mode=None):
self.conv5_2 = self.conv_layer(self.conv5_1, 512, 512, "conv5_2")
self.conv5_3 = self.conv_layer(self.conv5_2, 512, 512, "conv5_3")
self.pool5 = self.max_pool(self.conv5_3, 'pool5')

self.fc6 = self.fc_layer(self.pool5, 25088, 4096, "fc6") # 25088 = ((224 // (2 ** 5)) ** 2) * 512
try:
self.fc6 = self.fc_layer(self.pool5, np.prod(self.pool5.get_shape().as_list()[1:]), 4096, "fc6") # 25088 = ((224 // (2 ** 5)) ** 2) * 512
except:
self.fc6 = self.fc_layer(self.pool5, np.prod(self.pool5.get_shape().as_list()[1:]), 4096, "fc6_") # 25088 = ((224 // (2 ** 5)) ** 2) * 512
self.relu6 = tf.nn.relu(self.fc6)
if train_mode is not None:
self.relu6 = tf.cond(train_mode, lambda: tf.nn.dropout(self.relu6, self.dropout), lambda: self.relu6)
Expand All @@ -78,8 +81,10 @@ def build(self, rgb, train_mode=None):
self.relu7 = tf.cond(train_mode, lambda: tf.nn.dropout(self.relu7, self.dropout), lambda: self.relu7)
elif self.trainable:
self.relu7 = tf.nn.dropout(self.relu7, self.dropout)

self.fc8 = self.fc_layer(self.relu7, 4096, self.output, "fc8")
try:
self.fc8 = self.fc_layer(self.relu7, 4096, self.output, "fc8")
except:
self.fc8 = self.fc_layer(self.relu7, 4096, self.output, "fc8_")

self.prob = tf.nn.softmax(self.fc8, name="prob")

Expand Down
21 changes: 14 additions & 7 deletions vgg19/vgg19_trainable.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,20 +28,21 @@ def build(self, rgb, train_mode=None):
:param rgb: rgb image [batch, height, width, 3] values scaled [0, 1]
:param train_mode: a bool tensor, usually a placeholder: if True, dropout will be turned on
"""
self.rib = rgb.get_shape().as_list()[1]

rgb_scaled = rgb * 255.0

# Convert RGB to BGR
red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb_scaled)
assert red.get_shape().as_list()[1:] == [224, 224, 1]
assert green.get_shape().as_list()[1:] == [224, 224, 1]
assert blue.get_shape().as_list()[1:] == [224, 224, 1]
assert red.get_shape().as_list()[1:] == [self.rib, self.rib, 1]
assert green.get_shape().as_list()[1:] == [self.rib, self.rib, 1]
assert blue.get_shape().as_list()[1:] == [self.rib, self.rib, 1]
bgr = tf.concat(axis=3, values=[
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2],
])
assert bgr.get_shape().as_list()[1:] == [224, 224, 3]
assert bgr.get_shape().as_list()[1:] == [self.rib, self.rib, 3]

self.conv1_1 = self.conv_layer(bgr, 3, 64, "conv1_1")
self.conv1_2 = self.conv_layer(self.conv1_1, 64, 64, "conv1_2")
Expand All @@ -68,8 +69,12 @@ def build(self, rgb, train_mode=None):
self.conv5_3 = self.conv_layer(self.conv5_2, 512, 512, "conv5_3")
self.conv5_4 = self.conv_layer(self.conv5_3, 512, 512, "conv5_4")
self.pool5 = self.max_pool(self.conv5_4, 'pool5')
try:
self.fc6 = self.fc_layer(self.pool5, np.prod(self.pool5.get_shape().as_list()[1:]), 4096, "fc6") # 25088 = ((400 // (2 ** 5)) ** 2) * 512
except:
self.fc6 = self.fc_layer(self.pool5,np.prod(self.pool5.get_shape().as_list()[1:]), 4096, "fc6_") # 25088 = ((400 // (2 ** 5)) ** 2) * 512


self.fc6 = self.fc_layer(self.pool5, 25088, 4096, "fc6") # 25088 = ((224 // (2 ** 5)) ** 2) * 512
self.relu6 = tf.nn.relu(self.fc6)
if train_mode is not None:
self.relu6 = tf.cond(train_mode, lambda: tf.nn.dropout(self.relu6, self.dropout), lambda: self.relu6)
Expand All @@ -82,8 +87,10 @@ def build(self, rgb, train_mode=None):
self.relu7 = tf.cond(train_mode, lambda: tf.nn.dropout(self.relu7, self.dropout), lambda: self.relu7)
elif self.trainable:
self.relu7 = tf.nn.dropout(self.relu7, self.dropout)

self.fc8 = self.fc_layer(self.relu7, 4096, self.output, "fc8")
try:
self.fc8 = self.fc_layer(self.relu7, 4096, self.output, "fc8")
except:
self.fc8 = self.fc_layer(self.relu7, 4096, self.output, "fc8_")

self.prob = tf.nn.softmax(self.fc8, name="prob")

Expand Down

0 comments on commit a3f036c

Please sign in to comment.