Skip to content

Commit 927f810

Browse files
committed
Changing network layers to handle multiple inputs at once
1 parent bf4902c commit 927f810

File tree

3 files changed

+97
-80
lines changed

3 files changed

+97
-80
lines changed

image.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ def start_threads(self):
5858
t.start()
5959
if(self.pools > 1):
6060
print('Processing input images')
61-
self.pool = [[self.queue.get() for _ in range(self.batch_size)] for _ in range(self.pools)]
61+
self.pool = [[] for _ in range(self.pools)]
6262

6363
def stop_threads(self):
6464
"""Stop the threads that are generating image variations (freeing memory)"""
@@ -67,14 +67,16 @@ def stop_threads(self):
6767

6868
def get_batch(self):
6969
"""Get a batch of images as arrays"""
70-
if self.closing:
70+
if self.closing: #Start threads
7171
self.start_threads()
7272
self.event.set()
73+
if len(self.pool[self.pool_index]) == 0: #Check and fill image pool
74+
self.pool[self.pool_index] = [self.queue.get() for _ in range(self.batch_size)]
7375
images = self.pool[self.pool_index]
74-
for i in range(self.pool_renew):
76+
for i in range(self.pool_renew): #Replace old images
7577
self.pool[self.pool_index][(self.pool_iteration+i)%self.batch_size] = self.queue.get()
7678
self.pool_index += 1
77-
if self.pool_index == self.pools:
79+
if self.pool_index == self.pools: #Cycle indexes
7880
self.pool_index = 0
7981
self.pool_iteration = (self.pool_iteration+self.pool_renew)%self.batch_size
8082
self.event.clear()

network.py

Lines changed: 39 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ def __init__(self, name, setup=True, image_size=64, colors=3, batch_size=64, dir
5656
self._dis_conv = discriminator_convolutions
5757
self._dis_width = discriminator_base_width
5858
self._class_depth = classification_depth
59-
self.dropout = dropout
59+
self._dropout = dropout
6060
#Training variables
6161
self.learning_rate = (learning_rate, learning_momentum, learning_momentum2)
6262
self._y_offset = y_offset
@@ -77,22 +77,21 @@ def __init__(self, name, setup=True, image_size=64, colors=3, batch_size=64, dir
7777
self.image_input_scaled = tf.subtract(tf.to_float(self.image_input)/127.5, 1, name='image_scaling')
7878
self.generator_output = None
7979
self.image_output = self.image_grid_output = None
80-
self.image_logit = self.generated_logit = None
8180
self.variation_updater = self.image_variation = None
8281
self.generator_solver = self.discriminator_solver = None
8382
if setup:
8483
self.setup_network()
8584

8685
def setup_network(self):
8786
"""Initialize the network if it is not done in the constructor"""
88-
self.__generator__()
89-
self.setup_output()
90-
self.discriminator(self.generator_output, self._dis_conv, self._dis_width, self._class_depth)
91-
g_loss, d_loss, d_loss_real, d_loss_fake = self.loss_functions(self.image_logit, self.generated_logit, self._y_offset)
87+
self.generator_output = self.__generator__([self.generator_input])[0]
88+
self.__output__()
89+
gen_logit, image_logit = self.__discriminator__([self.generator_output, self.image_input_scaled])
90+
g_loss, d_loss, d_loss_real, d_loss_fake = self.loss_functions(image_logit, gen_logit, self._y_offset)
9291
self.generator_solver, self.discriminator_solver = self.solver_functions(g_loss, d_loss, *self.learning_rate)
9392

9493

95-
def __generator__(self):
94+
def __generator__(self, input_tensors):
9695
"""Create a Generator Network"""
9796
conv_layers = self._gen_conv
9897
conv_size = self._gen_width
@@ -101,13 +100,15 @@ def __generator__(self):
101100
conv_image_size = self.image_size // (2**conv_layers)
102101
assert conv_image_size*(2**conv_layers) == self.image_size, "Images must be a multiple of two (or at least divisible by 2**num_of_conv_layers_plus_one)"
103102
#Input Layer
104-
prev_layer = expand_relu(self.generator_input, [-1, conv_image_size, conv_image_size, conv_size*2**(conv_layers-1)], 'expand')
103+
prev_layer = expand_relu(input_tensors, [-1, conv_image_size, conv_image_size, conv_size*2**(conv_layers-1)], 'expand')
105104
#Conv layers
106105
for i in range(conv_layers-1):
107106
prev_layer = conv2d_transpose(prev_layer, self.batch_size, 2**(conv_layers-i-2)*conv_size, 'convolution_%d'%i)
108-
self.generator_output = conv2d_transpose_tanh(prev_layer, self.batch_size, self.colors, 'output')
107+
return conv2d_transpose_tanh(prev_layer, self.batch_size, self.colors, 'output')
109108

110-
def setup_output(self):
109+
def __output__(self):
110+
if self.log:
111+
self.__variation_summary__()
111112
with tf.name_scope('output'):
112113
with tf.name_scope("image_list") as scope:
113114
self.image_output = tf.cast((self.generator_output + 1) * 127.5, tf.uint8, name=scope)
@@ -129,41 +130,40 @@ def setup_output(self):
129130
self.image_grid_output = tf.cast(grid, tf.uint8, name=scope)
130131

131132

132-
def discriminator(self, generator_output, conv_layers, conv_size, class_layers):
133+
def __discriminator__(self, input_tensors):
133134
"""Create a Discriminator Network"""
135+
conv_layers, conv_size, class_layers = self._dis_conv, self._dis_width, self._class_depth
134136
image_size = self.image_size
135137
with tf.variable_scope('discriminator') as scope:
136138
conv_output_size = ((image_size//(2**conv_layers))**2) * conv_size * conv_layers
137139
class_output_size = 2**int(math.log(conv_output_size//2, 2))
138140
#Create Layers
139-
def create_network(layer, summary=True):
140-
#Convolutional layers
141-
for i in range(conv_layers):
142-
layer = conv2d(layer, conv_size*(i+1), name='convolution_%d'%i, norm=(i != 0), summary=summary)
143-
layer = tf.reshape(layer, [-1, conv_output_size])
144-
#Classification layers
145-
for i in range(class_layers):
146-
layer = relu_dropout(layer, class_output_size, self.dropout, 'classification_%d'%i, summary=summary)
147-
return linear(layer, 1, 'output', summary=summary)
148-
self.generated_logit = create_network(generator_output)
149-
scope.reuse_variables()
150-
self.image_logit = create_network(self.image_input_scaled, False)
151-
if self.log:
152-
with tf.variable_scope('pixel_variation'):
153-
#Pixel Variations
154-
img_tot_var = tf.image.total_variation(self.image_input_scaled)
155-
gen_tot_var = tf.image.total_variation(generator_output)
156-
image_variation = tf.reduce_sum(img_tot_var)
157-
gener_variation = tf.reduce_sum(gen_tot_var)
158-
tf.summary.histogram('images', img_tot_var)
159-
tf.summary.histogram('generated', gen_tot_var)
160-
tf.summary.scalar('images', image_variation)
161-
tf.summary.scalar('generated', gener_variation)
162-
ema = tf.train.ExponentialMovingAverage(decay=0.95, num_updates=self.iterations)
163-
ema_apply = ema.apply([image_variation])
164-
self.variation_updater = tf.group(ema_apply)
165-
self.image_variation = ema.average(image_variation)
166-
tf.summary.scalar('images_averaged', self.image_variation)
141+
prev_layer = input_tensors
142+
for i in range(conv_layers): #Convolutional layers
143+
prev_layer = conv2d(prev_layer, conv_size*(i+1), name='convolution_%d'%i, norm=(i != 0))
144+
prev_layer = [tf.reshape(layer, [-1, conv_output_size]) for layer in prev_layer]
145+
for i in range(class_layers): #Classification layers
146+
prev_layer = relu_dropout(prev_layer, class_output_size, self._dropout, 'classification_%d'%i)
147+
prev_layer = linear(prev_layer, 1, 'output')
148+
return prev_layer
149+
150+
def __variation_summary__(self):
151+
"""Create summaries for pixel variation"""
152+
with tf.variable_scope('pixel_variation'):
153+
#Pixel Variations
154+
img_tot_var = tf.image.total_variation(self.image_input_scaled)
155+
gen_tot_var = tf.image.total_variation(self.generator_output)
156+
image_variation = tf.reduce_sum(img_tot_var)
157+
gener_variation = tf.reduce_sum(gen_tot_var)
158+
tf.summary.histogram('images', img_tot_var)
159+
tf.summary.histogram('generated', gen_tot_var)
160+
tf.summary.scalar('images', image_variation)
161+
tf.summary.scalar('generated', gener_variation)
162+
ema = tf.train.ExponentialMovingAverage(decay=0.95, num_updates=self.iterations)
163+
ema_apply = ema.apply([image_variation])
164+
self.variation_updater = tf.group(ema_apply)
165+
self.image_variation = ema.average(image_variation)
166+
tf.summary.scalar('images_averaged', self.image_variation)
167167

168168

169169
def loss_functions(self, real_logit, fake_logit, y_offset=0):

operators.py

Lines changed: 52 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -43,65 +43,80 @@ def lrelu(tensor, leak: float=0.2):
4343

4444
# Network Layers
4545

46-
def conv2d(tensor, output_size: int, name: str='conv2d', norm: bool=True, stddev: float=0.02, term: float=0.01, summary: bool=True):
46+
def conv2d(tensors, output_size: int, name: str='conv2d', norm: bool=True, stddev: float=0.02, term: float=0.01, summary: bool=True):
4747
"""Create a convolutional layer"""
4848
with tf.variable_scope(name):
49-
weight, bias = weight_bias([5, 5, int(tensor.get_shape()[-1]), output_size], stddev, term, summary)
50-
conv = tf.nn.conv2d(tensor, weight, [1, 2, 2, 1], "SAME")
51-
if norm:
52-
conv = tf.contrib.layers.batch_norm(conv, decay=0.9, updates_collections=None, scale=False,
53-
trainable=True, reuse=True, scope="normalization", is_training=True, epsilon=0.00001)
54-
return lrelu(tf.nn.bias_add(conv, bias))
49+
weight, bias = weight_bias([5, 5, int(tensors[0].get_shape()[-1]), output_size], stddev, term, summary)
50+
output = []
51+
for tensor in tensors:
52+
conv = tf.nn.conv2d(tensor, weight, [1, 2, 2, 1], "SAME")
53+
if norm:
54+
conv = tf.contrib.layers.batch_norm(conv, decay=0.9, updates_collections=None, scale=False,
55+
trainable=True, reuse=True, scope="normalization", is_training=True, epsilon=0.00001)
56+
output.append(lrelu(tf.nn.bias_add(conv, bias)))
57+
return output
5558

5659
def relu(tensor, output_size: int, name: str='relu', stddev: float=0.02, term: float=0.01, summary: bool=True):
5760
"""Create a relu layer"""
5861
with tf.variable_scope(name):
5962
weight, bias = weight_bias([int(tensor.get_shape()[-1]), output_size], stddev, term, summary)
6063
return tf.nn.relu(tf.matmul(tensor, weight) + bias)
6164

62-
def relu_dropout(tensor, output_size: int, dropout: float=0.4, name: str='relu_dropout', stddev: float=0.02, term: float=0.01, summary: bool=True):
65+
def relu_dropout(tensors, output_size: int, dropout: float=0.4, name: str='relu_dropout', stddev: float=0.02, term: float=0.01, summary: bool=True):
6366
"""Create a relu layer with dropout"""
6467
with tf.variable_scope(name):
65-
weight, bias = weight_bias([int(tensor.get_shape()[-1]), output_size], stddev, term, summary)
66-
relu_layer = tf.nn.relu(tf.matmul(tensor, weight) + bias)
67-
return tf.nn.dropout(relu_layer, dropout)
68-
69-
def linear(tensor, output_size: int, name: str='linear', stddev: float=0.02, term: float=0.01, summary: bool=True):
68+
weight, bias = weight_bias([int(tensors[0].get_shape()[-1]), output_size], stddev, term, summary)
69+
output = []
70+
for tensor in tensors:
71+
relu_layer = tf.nn.relu(tf.matmul(tensor, weight) + bias)
72+
output.append(tf.nn.dropout(relu_layer, dropout))
73+
return output
74+
75+
def linear(tensors, output_size: int, name: str='linear', stddev: float=0.02, term: float=0.01, summary: bool=True):
7076
'''Create a fully connected layer'''
7177
with tf.variable_scope(name):
72-
weight, bias = weight_bias([tensor.get_shape()[-1], output_size], stddev, term, summary)
73-
return tf.matmul(tensor, weight) + bias
78+
weight, bias = weight_bias([tensors[0].get_shape()[-1], output_size], stddev, term, summary)
79+
return [tf.matmul(tensor, weight) + bias for tensor in tensors]
7480

75-
def conv2d_transpose(tensor, batch_size=1, conv_size=32, name: str='conv2d_transpose', norm: bool=True, stddev: float=0.02, term: float=0.01, summary: bool=True):
81+
def conv2d_transpose(tensors, batch_size=1, conv_size=32, name: str='conv2d_transpose', norm: bool=True, stddev: float=0.02, term: float=0.01, summary: bool=True):
7682
"""Create a transpose convolutional layer"""
7783
with tf.variable_scope(name):
78-
tensor_shape = tensor.get_shape()
84+
tensor_shape = tensors[0].get_shape()
7985
filt, bias = filter_bias([5, 5, conv_size, tensor_shape[-1]], stddev, term, summary)
8086
conv_shape = [batch_size, int(tensor_shape[1]*2), int(tensor_shape[2]*2), conv_size]
81-
deconv = tf.nn.conv2d_transpose(tensor, filt, conv_shape, [1, 2, 2, 1])
82-
if norm:
83-
deconv = tf.contrib.layers.batch_norm(deconv, decay=0.9, updates_collections=None, scale=False,
84-
trainable=True, reuse=True, scope="normalization", is_training=True, epsilon=0.00001)
85-
return tf.nn.relu(tf.nn.bias_add(deconv, bias))
86-
87-
def conv2d_transpose_tanh(tensor, batch_size=1, conv_size=32, name: str='conv2d_transpose_tanh', stddev: float=0.02, summary: bool=True):
87+
output = []
88+
for tensor in tensors:
89+
deconv = tf.nn.conv2d_transpose(tensor, filt, conv_shape, [1, 2, 2, 1])
90+
if norm:
91+
deconv = tf.contrib.layers.batch_norm(deconv, decay=0.9, updates_collections=None, scale=False,
92+
trainable=True, reuse=True, scope="normalization", is_training=True, epsilon=0.00001)
93+
output.append(tf.nn.relu(tf.nn.bias_add(deconv, bias)))
94+
return output
95+
96+
def conv2d_transpose_tanh(tensors, batch_size=1, conv_size=32, name: str='conv2d_transpose_tanh', stddev: float=0.02, summary: bool=True):
8897
"""Create a transpose convolutional layer"""
8998
with tf.variable_scope(name):
90-
tensor_shape = tensor.get_shape()
99+
tensor_shape = tensors[0].get_shape()
91100
filt = tf.get_variable('filter', [5, 5, conv_size, tensor_shape[-1]], tf.float32, tf.random_normal_initializer(0, stddev), trainable=True)
92-
conv_shape = [batch_size, int(tensor_shape[1]*2), int(tensor_shape[2]*2), conv_size]
93-
deconv = tf.nn.conv2d_transpose(tensor, filt, conv_shape, [1, 2, 2, 1])
94-
return tf.nn.tanh(deconv)
95-
96-
def expand_relu(tensor, out_shape, name: str='expand_relu', norm: bool=True, stddev: float=0.2, term: float=0.01, summary: bool=True):
101+
output = []
102+
for tensor in tensors:
103+
conv_shape = [batch_size, int(tensor_shape[1]*2), int(tensor_shape[2]*2), conv_size]
104+
deconv = tf.nn.conv2d_transpose(tensor, filt, conv_shape, [1, 2, 2, 1])
105+
output.append(tf.nn.tanh(deconv))
106+
return output
107+
108+
def expand_relu(tensors, out_shape, name: str='expand_relu', norm: bool=True, stddev: float=0.2, term: float=0.01, summary: bool=True):
97109
"""Create a layer that expands an input to a shape"""
98110
with tf.variable_scope(name) as scope:
99-
weight, bias = weight_bias([tensor.get_shape()[-1], np.prod(out_shape[1:])], stddev, term, summary)
100-
lin = tf.matmul(tensor, weight) + bias
101-
reshape = tf.reshape(lin, out_shape)
102-
if norm:
103-
reshape = tf.contrib.layers.batch_norm(reshape, decay=0.9, updates_collections=None, scale=False,
104-
trainable=True, reuse=True, scope=scope, is_training=True, epsilon=0.00001)
105-
return tf.nn.relu(reshape)
111+
weight, bias = weight_bias([tensors[0].get_shape()[-1], np.prod(out_shape[1:])], stddev, term, summary)
112+
output = []
113+
for tensor in tensors:
114+
lin = tf.matmul(tensor, weight) + bias
115+
reshape = tf.reshape(lin, out_shape)
116+
if norm:
117+
reshape = tf.contrib.layers.batch_norm(reshape, decay=0.9, updates_collections=None, scale=False,
118+
trainable=True, reuse=True, scope=scope, is_training=True, epsilon=0.00001)
119+
output.append(tf.nn.relu(reshape))
120+
return output
106121

107122

0 commit comments

Comments
 (0)